]>
Commit | Line | Data |
---|---|---|
89e5785f | 1 | /* |
f75ba50b | 2 | * Cadence MACB/GEM Ethernet Controller driver |
89e5785f HS |
3 | * |
4 | * Copyright (C) 2004-2006 Atmel Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
c220f8cd | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
89e5785f | 12 | #include <linux/clk.h> |
653e92a9 | 13 | #include <linux/crc32.h> |
89e5785f HS |
14 | #include <linux/module.h> |
15 | #include <linux/moduleparam.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/types.h> | |
909a8583 | 18 | #include <linux/circ_buf.h> |
89e5785f HS |
19 | #include <linux/slab.h> |
20 | #include <linux/init.h> | |
60fe716f | 21 | #include <linux/io.h> |
2dbfdbb9 | 22 | #include <linux/gpio.h> |
270c499f | 23 | #include <linux/gpio/consumer.h> |
a6b7a407 | 24 | #include <linux/interrupt.h> |
89e5785f HS |
25 | #include <linux/netdevice.h> |
26 | #include <linux/etherdevice.h> | |
89e5785f | 27 | #include <linux/dma-mapping.h> |
84e0cdb0 | 28 | #include <linux/platform_data/macb.h> |
89e5785f | 29 | #include <linux/platform_device.h> |
6c36a707 | 30 | #include <linux/phy.h> |
b17471f5 | 31 | #include <linux/of.h> |
fb97a846 | 32 | #include <linux/of_device.h> |
270c499f | 33 | #include <linux/of_gpio.h> |
148cbb53 | 34 | #include <linux/of_mdio.h> |
fb97a846 | 35 | #include <linux/of_net.h> |
1629dd4f RO |
36 | #include <linux/ip.h> |
37 | #include <linux/udp.h> | |
38 | #include <linux/tcp.h> | |
89e5785f HS |
39 | #include "macb.h" |
40 | ||
1b44791a | 41 | #define MACB_RX_BUFFER_SIZE 128 |
1b44791a | 42 | #define RX_BUFFER_MULTIPLE 64 /* bytes */ |
8441bb33 | 43 | |
b410d13e | 44 | #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ |
8441bb33 ZB |
45 | #define MIN_RX_RING_SIZE 64 |
46 | #define MAX_RX_RING_SIZE 8192 | |
dc97a89e | 47 | #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
b410d13e | 48 | * (bp)->rx_ring_size) |
89e5785f | 49 | |
b410d13e | 50 | #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ |
8441bb33 ZB |
51 | #define MIN_TX_RING_SIZE 64 |
52 | #define MAX_TX_RING_SIZE 4096 | |
dc97a89e | 53 | #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ |
b410d13e | 54 | * (bp)->tx_ring_size) |
89e5785f | 55 | |
909a8583 | 56 | /* level of occupied TX descriptors under which we wake up TX process */ |
b410d13e | 57 | #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) |
89e5785f HS |
58 | |
59 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | |
60 | | MACB_BIT(ISR_ROVR)) | |
e86cd53a NF |
61 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
62 | | MACB_BIT(ISR_RLE) \ | |
63 | | MACB_BIT(TXERR)) | |
64 | #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) | |
65 | ||
1629dd4f RO |
66 | /* Max length of transmit frame must be a multiple of 8 bytes */ |
67 | #define MACB_TX_LEN_ALIGN 8 | |
68 | #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) | |
69 | #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) | |
a4c35ed3 | 70 | |
44770e11 | 71 | #define GEM_MTU_MIN_SIZE ETH_MIN_MTU |
f9c45ae0 | 72 | #define MACB_NETIF_LSO NETIF_F_TSO |
a5898ea0 | 73 | |
3e2a5e15 SP |
74 | #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) |
75 | #define MACB_WOL_ENABLED (0x1 << 1) | |
76 | ||
64ec42fe | 77 | /* Graceful stop timeouts in us. We should allow up to |
e86cd53a NF |
78 | * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
79 | */ | |
80 | #define MACB_HALT_TIMEOUT 1230 | |
89e5785f | 81 | |
dc97a89e | 82 | /* DMA buffer descriptor might be different size |
7b429614 RO |
83 | * depends on hardware configuration: |
84 | * | |
85 | * 1. dma address width 32 bits: | |
86 | * word 1: 32 bit address of Data Buffer | |
87 | * word 2: control | |
88 | * | |
89 | * 2. dma address width 64 bits: | |
90 | * word 1: 32 bit address of Data Buffer | |
91 | * word 2: control | |
92 | * word 3: upper 32 bit address of Data Buffer | |
93 | * word 4: unused | |
94 | * | |
95 | * 3. dma address width 32 bits with hardware timestamping: | |
96 | * word 1: 32 bit address of Data Buffer | |
97 | * word 2: control | |
98 | * word 3: timestamp word 1 | |
99 | * word 4: timestamp word 2 | |
100 | * | |
101 | * 4. dma address width 64 bits with hardware timestamping: | |
102 | * word 1: 32 bit address of Data Buffer | |
103 | * word 2: control | |
104 | * word 3: upper 32 bit address of Data Buffer | |
105 | * word 4: unused | |
106 | * word 5: timestamp word 1 | |
107 | * word 6: timestamp word 2 | |
dc97a89e RO |
108 | */ |
109 | static unsigned int macb_dma_desc_get_size(struct macb *bp) | |
110 | { | |
7b429614 RO |
111 | #ifdef MACB_EXT_DESC |
112 | unsigned int desc_size; | |
113 | ||
114 | switch (bp->hw_dma_cap) { | |
115 | case HW_DMA_CAP_64B: | |
116 | desc_size = sizeof(struct macb_dma_desc) | |
117 | + sizeof(struct macb_dma_desc_64); | |
118 | break; | |
119 | case HW_DMA_CAP_PTP: | |
120 | desc_size = sizeof(struct macb_dma_desc) | |
121 | + sizeof(struct macb_dma_desc_ptp); | |
122 | break; | |
123 | case HW_DMA_CAP_64B_PTP: | |
124 | desc_size = sizeof(struct macb_dma_desc) | |
125 | + sizeof(struct macb_dma_desc_64) | |
126 | + sizeof(struct macb_dma_desc_ptp); | |
127 | break; | |
128 | default: | |
129 | desc_size = sizeof(struct macb_dma_desc); | |
130 | } | |
131 | return desc_size; | |
dc97a89e RO |
132 | #endif |
133 | return sizeof(struct macb_dma_desc); | |
134 | } | |
135 | ||
7b429614 | 136 | static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) |
dc97a89e | 137 | { |
7b429614 RO |
138 | #ifdef MACB_EXT_DESC |
139 | switch (bp->hw_dma_cap) { | |
140 | case HW_DMA_CAP_64B: | |
141 | case HW_DMA_CAP_PTP: | |
142 | desc_idx <<= 1; | |
143 | break; | |
144 | case HW_DMA_CAP_64B_PTP: | |
145 | desc_idx *= 3; | |
146 | break; | |
147 | default: | |
148 | break; | |
149 | } | |
dc97a89e | 150 | #endif |
7b429614 | 151 | return desc_idx; |
dc97a89e RO |
152 | } |
153 | ||
154 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
155 | static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) | |
156 | { | |
7b429614 RO |
157 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
158 | return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); | |
159 | return NULL; | |
dc97a89e RO |
160 | } |
161 | #endif | |
162 | ||
55054a16 | 163 | /* Ring buffer accessors */ |
b410d13e | 164 | static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) |
55054a16 | 165 | { |
b410d13e | 166 | return index & (bp->tx_ring_size - 1); |
55054a16 HS |
167 | } |
168 | ||
02c958dd CP |
169 | static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, |
170 | unsigned int index) | |
55054a16 | 171 | { |
dc97a89e RO |
172 | index = macb_tx_ring_wrap(queue->bp, index); |
173 | index = macb_adj_dma_desc_idx(queue->bp, index); | |
174 | return &queue->tx_ring[index]; | |
55054a16 HS |
175 | } |
176 | ||
02c958dd CP |
177 | static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, |
178 | unsigned int index) | |
55054a16 | 179 | { |
b410d13e | 180 | return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; |
55054a16 HS |
181 | } |
182 | ||
02c958dd | 183 | static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) |
55054a16 HS |
184 | { |
185 | dma_addr_t offset; | |
186 | ||
b410d13e | 187 | offset = macb_tx_ring_wrap(queue->bp, index) * |
dc97a89e | 188 | macb_dma_desc_get_size(queue->bp); |
55054a16 | 189 | |
02c958dd | 190 | return queue->tx_ring_dma + offset; |
55054a16 HS |
191 | } |
192 | ||
b410d13e | 193 | static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) |
55054a16 | 194 | { |
b410d13e | 195 | return index & (bp->rx_ring_size - 1); |
55054a16 HS |
196 | } |
197 | ||
ae1f2a56 | 198 | static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index) |
55054a16 | 199 | { |
ae1f2a56 RO |
200 | index = macb_rx_ring_wrap(queue->bp, index); |
201 | index = macb_adj_dma_desc_idx(queue->bp, index); | |
202 | return &queue->rx_ring[index]; | |
55054a16 HS |
203 | } |
204 | ||
ae1f2a56 | 205 | static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index) |
55054a16 | 206 | { |
ae1f2a56 RO |
207 | return queue->rx_buffers + queue->bp->rx_buffer_size * |
208 | macb_rx_ring_wrap(queue->bp, index); | |
55054a16 HS |
209 | } |
210 | ||
f2ce8a9e AS |
211 | /* I/O accessors */ |
212 | static u32 hw_readl_native(struct macb *bp, int offset) | |
213 | { | |
214 | return __raw_readl(bp->regs + offset); | |
215 | } | |
216 | ||
217 | static void hw_writel_native(struct macb *bp, int offset, u32 value) | |
218 | { | |
219 | __raw_writel(value, bp->regs + offset); | |
220 | } | |
221 | ||
222 | static u32 hw_readl(struct macb *bp, int offset) | |
223 | { | |
224 | return readl_relaxed(bp->regs + offset); | |
225 | } | |
226 | ||
227 | static void hw_writel(struct macb *bp, int offset, u32 value) | |
228 | { | |
229 | writel_relaxed(value, bp->regs + offset); | |
230 | } | |
231 | ||
64ec42fe | 232 | /* Find the CPU endianness by using the loopback bit of NCR register. When the |
88023beb | 233 | * CPU is in big endian we need to program swapped mode for management |
f2ce8a9e AS |
234 | * descriptor access. |
235 | */ | |
236 | static bool hw_is_native_io(void __iomem *addr) | |
237 | { | |
238 | u32 value = MACB_BIT(LLB); | |
239 | ||
240 | __raw_writel(value, addr + MACB_NCR); | |
241 | value = __raw_readl(addr + MACB_NCR); | |
242 | ||
243 | /* Write 0 back to disable everything */ | |
244 | __raw_writel(0, addr + MACB_NCR); | |
245 | ||
246 | return value == MACB_BIT(LLB); | |
247 | } | |
248 | ||
249 | static bool hw_is_gem(void __iomem *addr, bool native_io) | |
250 | { | |
251 | u32 id; | |
252 | ||
253 | if (native_io) | |
254 | id = __raw_readl(addr + MACB_MID); | |
255 | else | |
256 | id = readl_relaxed(addr + MACB_MID); | |
257 | ||
258 | return MACB_BFEXT(IDNUM, id) >= 0x2; | |
259 | } | |
260 | ||
421d9df0 | 261 | static void macb_set_hwaddr(struct macb *bp) |
89e5785f HS |
262 | { |
263 | u32 bottom; | |
264 | u16 top; | |
265 | ||
266 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | |
f75ba50b | 267 | macb_or_gem_writel(bp, SA1B, bottom); |
89e5785f | 268 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
f75ba50b | 269 | macb_or_gem_writel(bp, SA1T, top); |
3629a6ce JE |
270 | |
271 | /* Clear unused address register sets */ | |
272 | macb_or_gem_writel(bp, SA2B, 0); | |
273 | macb_or_gem_writel(bp, SA2T, 0); | |
274 | macb_or_gem_writel(bp, SA3B, 0); | |
275 | macb_or_gem_writel(bp, SA3T, 0); | |
276 | macb_or_gem_writel(bp, SA4B, 0); | |
277 | macb_or_gem_writel(bp, SA4T, 0); | |
89e5785f HS |
278 | } |
279 | ||
421d9df0 | 280 | static void macb_get_hwaddr(struct macb *bp) |
89e5785f | 281 | { |
d25e78aa | 282 | struct macb_platform_data *pdata; |
89e5785f HS |
283 | u32 bottom; |
284 | u16 top; | |
285 | u8 addr[6]; | |
17b8bb3e JE |
286 | int i; |
287 | ||
c607a0d9 | 288 | pdata = dev_get_platdata(&bp->pdev->dev); |
d25e78aa | 289 | |
aa50b552 | 290 | /* Check all 4 address register for valid address */ |
17b8bb3e JE |
291 | for (i = 0; i < 4; i++) { |
292 | bottom = macb_or_gem_readl(bp, SA1B + i * 8); | |
293 | top = macb_or_gem_readl(bp, SA1T + i * 8); | |
294 | ||
d25e78aa JE |
295 | if (pdata && pdata->rev_eth_addr) { |
296 | addr[5] = bottom & 0xff; | |
297 | addr[4] = (bottom >> 8) & 0xff; | |
298 | addr[3] = (bottom >> 16) & 0xff; | |
299 | addr[2] = (bottom >> 24) & 0xff; | |
300 | addr[1] = top & 0xff; | |
301 | addr[0] = (top & 0xff00) >> 8; | |
302 | } else { | |
303 | addr[0] = bottom & 0xff; | |
304 | addr[1] = (bottom >> 8) & 0xff; | |
305 | addr[2] = (bottom >> 16) & 0xff; | |
306 | addr[3] = (bottom >> 24) & 0xff; | |
307 | addr[4] = top & 0xff; | |
308 | addr[5] = (top >> 8) & 0xff; | |
309 | } | |
17b8bb3e JE |
310 | |
311 | if (is_valid_ether_addr(addr)) { | |
312 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); | |
313 | return; | |
314 | } | |
d1d5741d | 315 | } |
17b8bb3e | 316 | |
a35919e1 | 317 | dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); |
17b8bb3e | 318 | eth_hw_addr_random(bp->dev); |
89e5785f HS |
319 | } |
320 | ||
6c36a707 | 321 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
89e5785f | 322 | { |
6c36a707 | 323 | struct macb *bp = bus->priv; |
89e5785f HS |
324 | int value; |
325 | ||
89e5785f HS |
326 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
327 | | MACB_BF(RW, MACB_MAN_READ) | |
6c36a707 R |
328 | | MACB_BF(PHYA, mii_id) |
329 | | MACB_BF(REGA, regnum) | |
89e5785f HS |
330 | | MACB_BF(CODE, MACB_MAN_CODE))); |
331 | ||
6c36a707 R |
332 | /* wait for end of transfer */ |
333 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | |
334 | cpu_relax(); | |
89e5785f HS |
335 | |
336 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); | |
89e5785f HS |
337 | |
338 | return value; | |
339 | } | |
340 | ||
6c36a707 R |
341 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
342 | u16 value) | |
89e5785f | 343 | { |
6c36a707 | 344 | struct macb *bp = bus->priv; |
89e5785f HS |
345 | |
346 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | |
347 | | MACB_BF(RW, MACB_MAN_WRITE) | |
6c36a707 R |
348 | | MACB_BF(PHYA, mii_id) |
349 | | MACB_BF(REGA, regnum) | |
89e5785f | 350 | | MACB_BF(CODE, MACB_MAN_CODE) |
6c36a707 | 351 | | MACB_BF(DATA, value))); |
89e5785f | 352 | |
6c36a707 R |
353 | /* wait for end of transfer */ |
354 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | |
355 | cpu_relax(); | |
356 | ||
357 | return 0; | |
358 | } | |
89e5785f | 359 | |
e1824dfe SB |
360 | /** |
361 | * macb_set_tx_clk() - Set a clock to a new frequency | |
362 | * @clk Pointer to the clock to change | |
363 | * @rate New frequency in Hz | |
364 | * @dev Pointer to the struct net_device | |
365 | */ | |
366 | static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) | |
367 | { | |
368 | long ferr, rate, rate_rounded; | |
369 | ||
93b31f48 CP |
370 | if (!clk) |
371 | return; | |
372 | ||
e1824dfe SB |
373 | switch (speed) { |
374 | case SPEED_10: | |
375 | rate = 2500000; | |
376 | break; | |
377 | case SPEED_100: | |
378 | rate = 25000000; | |
379 | break; | |
380 | case SPEED_1000: | |
381 | rate = 125000000; | |
382 | break; | |
383 | default: | |
9319e47c | 384 | return; |
e1824dfe SB |
385 | } |
386 | ||
387 | rate_rounded = clk_round_rate(clk, rate); | |
388 | if (rate_rounded < 0) | |
389 | return; | |
390 | ||
391 | /* RGMII allows 50 ppm frequency error. Test and warn if this limit | |
392 | * is not satisfied. | |
393 | */ | |
394 | ferr = abs(rate_rounded - rate); | |
395 | ferr = DIV_ROUND_UP(ferr, rate / 100000); | |
396 | if (ferr > 5) | |
397 | netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", | |
aa50b552 | 398 | rate); |
e1824dfe SB |
399 | |
400 | if (clk_set_rate(clk, rate_rounded)) | |
401 | netdev_err(dev, "adjusting tx_clk failed.\n"); | |
402 | } | |
403 | ||
6c36a707 | 404 | static void macb_handle_link_change(struct net_device *dev) |
89e5785f | 405 | { |
6c36a707 | 406 | struct macb *bp = netdev_priv(dev); |
0a91281e | 407 | struct phy_device *phydev = dev->phydev; |
6c36a707 | 408 | unsigned long flags; |
6c36a707 | 409 | int status_change = 0; |
89e5785f | 410 | |
6c36a707 R |
411 | spin_lock_irqsave(&bp->lock, flags); |
412 | ||
413 | if (phydev->link) { | |
414 | if ((bp->speed != phydev->speed) || | |
415 | (bp->duplex != phydev->duplex)) { | |
416 | u32 reg; | |
417 | ||
418 | reg = macb_readl(bp, NCFGR); | |
419 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | |
140b7552 PV |
420 | if (macb_is_gem(bp)) |
421 | reg &= ~GEM_BIT(GBE); | |
6c36a707 R |
422 | |
423 | if (phydev->duplex) | |
424 | reg |= MACB_BIT(FD); | |
179956f4 | 425 | if (phydev->speed == SPEED_100) |
6c36a707 | 426 | reg |= MACB_BIT(SPD); |
e175587f NF |
427 | if (phydev->speed == SPEED_1000 && |
428 | bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) | |
140b7552 | 429 | reg |= GEM_BIT(GBE); |
6c36a707 | 430 | |
140b7552 | 431 | macb_or_gem_writel(bp, NCFGR, reg); |
6c36a707 R |
432 | |
433 | bp->speed = phydev->speed; | |
434 | bp->duplex = phydev->duplex; | |
435 | status_change = 1; | |
436 | } | |
89e5785f HS |
437 | } |
438 | ||
6c36a707 | 439 | if (phydev->link != bp->link) { |
c8f15686 | 440 | if (!phydev->link) { |
6c36a707 R |
441 | bp->speed = 0; |
442 | bp->duplex = -1; | |
443 | } | |
444 | bp->link = phydev->link; | |
89e5785f | 445 | |
6c36a707 R |
446 | status_change = 1; |
447 | } | |
89e5785f | 448 | |
6c36a707 R |
449 | spin_unlock_irqrestore(&bp->lock, flags); |
450 | ||
451 | if (status_change) { | |
03fc4721 | 452 | if (phydev->link) { |
2c29b235 JA |
453 | /* Update the TX clock rate if and only if the link is |
454 | * up and there has been a link change. | |
455 | */ | |
456 | macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); | |
457 | ||
03fc4721 | 458 | netif_carrier_on(dev); |
c220f8cd JI |
459 | netdev_info(dev, "link up (%d/%s)\n", |
460 | phydev->speed, | |
461 | phydev->duplex == DUPLEX_FULL ? | |
462 | "Full" : "Half"); | |
03fc4721 NF |
463 | } else { |
464 | netif_carrier_off(dev); | |
c220f8cd | 465 | netdev_info(dev, "link down\n"); |
03fc4721 | 466 | } |
6c36a707 | 467 | } |
89e5785f HS |
468 | } |
469 | ||
6c36a707 R |
470 | /* based on au1000_eth. c*/ |
471 | static int macb_mii_probe(struct net_device *dev) | |
89e5785f | 472 | { |
6c36a707 | 473 | struct macb *bp = netdev_priv(dev); |
2dbfdbb9 | 474 | struct macb_platform_data *pdata; |
7455a76f | 475 | struct phy_device *phydev; |
739de9a1 BM |
476 | struct device_node *np; |
477 | int phy_irq, ret, i; | |
478 | ||
479 | pdata = dev_get_platdata(&bp->pdev->dev); | |
480 | np = bp->pdev->dev.of_node; | |
481 | ret = 0; | |
482 | ||
483 | if (np) { | |
484 | if (of_phy_is_fixed_link(np)) { | |
739de9a1 BM |
485 | bp->phy_node = of_node_get(np); |
486 | } else { | |
2105a5d3 BM |
487 | bp->phy_node = of_parse_phandle(np, "phy-handle", 0); |
488 | /* fallback to standard phy registration if no | |
489 | * phy-handle was found nor any phy found during | |
490 | * dt phy registration | |
739de9a1 | 491 | */ |
2105a5d3 | 492 | if (!bp->phy_node && !phy_find_first(bp->mii_bus)) { |
739de9a1 BM |
493 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
494 | struct phy_device *phydev; | |
495 | ||
496 | phydev = mdiobus_scan(bp->mii_bus, i); | |
497 | if (IS_ERR(phydev) && | |
498 | PTR_ERR(phydev) != -ENODEV) { | |
499 | ret = PTR_ERR(phydev); | |
500 | break; | |
501 | } | |
502 | } | |
503 | ||
504 | if (ret) | |
505 | return -ENODEV; | |
506 | } | |
507 | } | |
508 | } | |
6c36a707 | 509 | |
dacdbb4d MG |
510 | if (bp->phy_node) { |
511 | phydev = of_phy_connect(dev, bp->phy_node, | |
512 | &macb_handle_link_change, 0, | |
513 | bp->phy_interface); | |
514 | if (!phydev) | |
515 | return -ENODEV; | |
516 | } else { | |
517 | phydev = phy_find_first(bp->mii_bus); | |
518 | if (!phydev) { | |
519 | netdev_err(dev, "no PHY found\n"); | |
520 | return -ENXIO; | |
521 | } | |
6c36a707 | 522 | |
dacdbb4d MG |
523 | if (pdata) { |
524 | if (gpio_is_valid(pdata->phy_irq_pin)) { | |
525 | ret = devm_gpio_request(&bp->pdev->dev, | |
526 | pdata->phy_irq_pin, "phy int"); | |
527 | if (!ret) { | |
528 | phy_irq = gpio_to_irq(pdata->phy_irq_pin); | |
529 | phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; | |
530 | } | |
531 | } else { | |
532 | phydev->irq = PHY_POLL; | |
ae3696c1 | 533 | } |
2dbfdbb9 | 534 | } |
6c36a707 | 535 | |
dacdbb4d MG |
536 | /* attach the mac to the phy */ |
537 | ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, | |
538 | bp->phy_interface); | |
539 | if (ret) { | |
540 | netdev_err(dev, "Could not attach to PHY\n"); | |
541 | return ret; | |
542 | } | |
6c36a707 R |
543 | } |
544 | ||
545 | /* mask with MAC supported features */ | |
e175587f | 546 | if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) |
140b7552 PV |
547 | phydev->supported &= PHY_GBIT_FEATURES; |
548 | else | |
549 | phydev->supported &= PHY_BASIC_FEATURES; | |
6c36a707 | 550 | |
222ca8e0 NS |
551 | if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) |
552 | phydev->supported &= ~SUPPORTED_1000baseT_Half; | |
553 | ||
6c36a707 R |
554 | phydev->advertising = phydev->supported; |
555 | ||
556 | bp->link = 0; | |
557 | bp->speed = 0; | |
558 | bp->duplex = -1; | |
6c36a707 R |
559 | |
560 | return 0; | |
89e5785f HS |
561 | } |
562 | ||
421d9df0 | 563 | static int macb_mii_init(struct macb *bp) |
89e5785f | 564 | { |
84e0cdb0 | 565 | struct macb_platform_data *pdata; |
148cbb53 | 566 | struct device_node *np; |
ab5f1105 | 567 | int err = -ENXIO; |
89e5785f | 568 | |
3dbda77e | 569 | /* Enable management port */ |
6c36a707 | 570 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
89e5785f | 571 | |
298cf9be | 572 | bp->mii_bus = mdiobus_alloc(); |
aa50b552 | 573 | if (!bp->mii_bus) { |
298cf9be LB |
574 | err = -ENOMEM; |
575 | goto err_out; | |
576 | } | |
577 | ||
578 | bp->mii_bus->name = "MACB_mii_bus"; | |
579 | bp->mii_bus->read = &macb_mdio_read; | |
580 | bp->mii_bus->write = &macb_mdio_write; | |
98d5e57e | 581 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
aa50b552 | 582 | bp->pdev->name, bp->pdev->id); |
298cf9be | 583 | bp->mii_bus->priv = bp; |
cf669660 | 584 | bp->mii_bus->parent = &bp->pdev->dev; |
c607a0d9 | 585 | pdata = dev_get_platdata(&bp->pdev->dev); |
89e5785f | 586 | |
91523947 | 587 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
89e5785f | 588 | |
148cbb53 | 589 | np = bp->pdev->dev.of_node; |
ab5f1105 AF |
590 | if (np && of_phy_is_fixed_link(np)) { |
591 | if (of_phy_register_fixed_link(np) < 0) { | |
592 | dev_err(&bp->pdev->dev, | |
593 | "broken fixed-link specification %pOF\n", np); | |
594 | goto err_out_free_mdiobus; | |
595 | } | |
596 | ||
597 | err = mdiobus_register(bp->mii_bus); | |
598 | } else { | |
599 | if (pdata) | |
600 | bp->mii_bus->phy_mask = pdata->phy_mask; | |
601 | ||
602 | err = of_mdiobus_register(bp->mii_bus, np); | |
603 | } | |
dacdbb4d | 604 | |
148cbb53 | 605 | if (err) |
ab5f1105 | 606 | goto err_out_free_fixed_link; |
89e5785f | 607 | |
7daa78e3 BB |
608 | err = macb_mii_probe(bp->dev); |
609 | if (err) | |
6c36a707 | 610 | goto err_out_unregister_bus; |
89e5785f | 611 | |
6c36a707 | 612 | return 0; |
89e5785f | 613 | |
6c36a707 | 614 | err_out_unregister_bus: |
298cf9be | 615 | mdiobus_unregister(bp->mii_bus); |
ab5f1105 | 616 | err_out_free_fixed_link: |
9ce98140 MG |
617 | if (np && of_phy_is_fixed_link(np)) |
618 | of_phy_deregister_fixed_link(np); | |
739de9a1 BM |
619 | err_out_free_mdiobus: |
620 | of_node_put(bp->phy_node); | |
298cf9be | 621 | mdiobus_free(bp->mii_bus); |
6c36a707 R |
622 | err_out: |
623 | return err; | |
89e5785f HS |
624 | } |
625 | ||
626 | static void macb_update_stats(struct macb *bp) | |
627 | { | |
a494ed8e JI |
628 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
629 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; | |
f2ce8a9e | 630 | int offset = MACB_PFR; |
89e5785f HS |
631 | |
632 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | |
633 | ||
96ec6310 | 634 | for (; p < end; p++, offset += 4) |
7a6e0706 | 635 | *p += bp->macb_reg_readl(bp, offset); |
89e5785f HS |
636 | } |
637 | ||
e86cd53a | 638 | static int macb_halt_tx(struct macb *bp) |
89e5785f | 639 | { |
e86cd53a NF |
640 | unsigned long halt_time, timeout; |
641 | u32 status; | |
89e5785f | 642 | |
e86cd53a | 643 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); |
89e5785f | 644 | |
e86cd53a NF |
645 | timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); |
646 | do { | |
647 | halt_time = jiffies; | |
648 | status = macb_readl(bp, TSR); | |
649 | if (!(status & MACB_BIT(TGO))) | |
650 | return 0; | |
89e5785f | 651 | |
e86cd53a NF |
652 | usleep_range(10, 250); |
653 | } while (time_before(halt_time, timeout)); | |
bdcba151 | 654 | |
e86cd53a NF |
655 | return -ETIMEDOUT; |
656 | } | |
39eddb4c | 657 | |
a4c35ed3 CP |
658 | static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) |
659 | { | |
660 | if (tx_skb->mapping) { | |
661 | if (tx_skb->mapped_as_page) | |
662 | dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, | |
663 | tx_skb->size, DMA_TO_DEVICE); | |
664 | else | |
665 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, | |
666 | tx_skb->size, DMA_TO_DEVICE); | |
667 | tx_skb->mapping = 0; | |
668 | } | |
669 | ||
670 | if (tx_skb->skb) { | |
671 | dev_kfree_skb_any(tx_skb->skb); | |
672 | tx_skb->skb = NULL; | |
673 | } | |
674 | } | |
675 | ||
dc97a89e | 676 | static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) |
fff8019a | 677 | { |
fff8019a | 678 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
dc97a89e RO |
679 | struct macb_dma_desc_64 *desc_64; |
680 | ||
7b429614 | 681 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e RO |
682 | desc_64 = macb_64b_desc(bp, desc); |
683 | desc_64->addrh = upper_32_bits(addr); | |
684 | } | |
fff8019a | 685 | #endif |
dc97a89e RO |
686 | desc->addr = lower_32_bits(addr); |
687 | } | |
688 | ||
689 | static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) | |
690 | { | |
691 | dma_addr_t addr = 0; | |
692 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
693 | struct macb_dma_desc_64 *desc_64; | |
694 | ||
7b429614 | 695 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e RO |
696 | desc_64 = macb_64b_desc(bp, desc); |
697 | addr = ((u64)(desc_64->addrh) << 32); | |
698 | } | |
699 | #endif | |
700 | addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); | |
701 | return addr; | |
fff8019a HK |
702 | } |
703 | ||
e86cd53a NF |
704 | static void macb_tx_error_task(struct work_struct *work) |
705 | { | |
02c958dd CP |
706 | struct macb_queue *queue = container_of(work, struct macb_queue, |
707 | tx_error_task); | |
708 | struct macb *bp = queue->bp; | |
e86cd53a | 709 | struct macb_tx_skb *tx_skb; |
02c958dd | 710 | struct macb_dma_desc *desc; |
e86cd53a NF |
711 | struct sk_buff *skb; |
712 | unsigned int tail; | |
02c958dd CP |
713 | unsigned long flags; |
714 | ||
715 | netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", | |
716 | (unsigned int)(queue - bp->queues), | |
717 | queue->tx_tail, queue->tx_head); | |
bdcba151 | 718 | |
02c958dd CP |
719 | /* Prevent the queue IRQ handlers from running: each of them may call |
720 | * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). | |
721 | * As explained below, we have to halt the transmission before updating | |
722 | * TBQP registers so we call netif_tx_stop_all_queues() to notify the | |
723 | * network engine about the macb/gem being halted. | |
724 | */ | |
725 | spin_lock_irqsave(&bp->lock, flags); | |
bdcba151 | 726 | |
e86cd53a | 727 | /* Make sure nobody is trying to queue up new packets */ |
02c958dd | 728 | netif_tx_stop_all_queues(bp->dev); |
d3e61457 | 729 | |
64ec42fe | 730 | /* Stop transmission now |
e86cd53a | 731 | * (in case we have just queued new packets) |
02c958dd | 732 | * macb/gem must be halted to write TBQP register |
e86cd53a NF |
733 | */ |
734 | if (macb_halt_tx(bp)) | |
735 | /* Just complain for now, reinitializing TX path can be good */ | |
736 | netdev_err(bp->dev, "BUG: halt tx timed out\n"); | |
bdcba151 | 737 | |
64ec42fe | 738 | /* Treat frames in TX queue including the ones that caused the error. |
e86cd53a NF |
739 | * Free transmit buffers in upper layer. |
740 | */ | |
02c958dd CP |
741 | for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { |
742 | u32 ctrl; | |
55054a16 | 743 | |
02c958dd | 744 | desc = macb_tx_desc(queue, tail); |
e86cd53a | 745 | ctrl = desc->ctrl; |
02c958dd | 746 | tx_skb = macb_tx_skb(queue, tail); |
e86cd53a | 747 | skb = tx_skb->skb; |
bdcba151 | 748 | |
e86cd53a | 749 | if (ctrl & MACB_BIT(TX_USED)) { |
a4c35ed3 CP |
750 | /* skb is set for the last buffer of the frame */ |
751 | while (!skb) { | |
752 | macb_tx_unmap(bp, tx_skb); | |
753 | tail++; | |
02c958dd | 754 | tx_skb = macb_tx_skb(queue, tail); |
a4c35ed3 CP |
755 | skb = tx_skb->skb; |
756 | } | |
757 | ||
758 | /* ctrl still refers to the first buffer descriptor | |
759 | * since it's the only one written back by the hardware | |
760 | */ | |
761 | if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { | |
762 | netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", | |
b410d13e ZB |
763 | macb_tx_ring_wrap(bp, tail), |
764 | skb->data); | |
5f1d3a5c | 765 | bp->dev->stats.tx_packets++; |
512286bb | 766 | queue->stats.tx_packets++; |
5f1d3a5c | 767 | bp->dev->stats.tx_bytes += skb->len; |
512286bb | 768 | queue->stats.tx_bytes += skb->len; |
a4c35ed3 | 769 | } |
e86cd53a | 770 | } else { |
64ec42fe MF |
771 | /* "Buffers exhausted mid-frame" errors may only happen |
772 | * if the driver is buggy, so complain loudly about | |
773 | * those. Statistics are updated by hardware. | |
e86cd53a NF |
774 | */ |
775 | if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) | |
776 | netdev_err(bp->dev, | |
777 | "BUG: TX buffers exhausted mid-frame\n"); | |
39eddb4c | 778 | |
e86cd53a NF |
779 | desc->ctrl = ctrl | MACB_BIT(TX_USED); |
780 | } | |
781 | ||
a4c35ed3 | 782 | macb_tx_unmap(bp, tx_skb); |
89e5785f HS |
783 | } |
784 | ||
02c958dd CP |
785 | /* Set end of TX queue */ |
786 | desc = macb_tx_desc(queue, 0); | |
dc97a89e | 787 | macb_set_addr(bp, desc, 0); |
02c958dd CP |
788 | desc->ctrl = MACB_BIT(TX_USED); |
789 | ||
e86cd53a NF |
790 | /* Make descriptor updates visible to hardware */ |
791 | wmb(); | |
792 | ||
793 | /* Reinitialize the TX desc queue */ | |
dc97a89e | 794 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
fff8019a | 795 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
7b429614 | 796 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
dc97a89e | 797 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); |
fff8019a | 798 | #endif |
e86cd53a | 799 | /* Make TX ring reflect state of hardware */ |
02c958dd CP |
800 | queue->tx_head = 0; |
801 | queue->tx_tail = 0; | |
e86cd53a NF |
802 | |
803 | /* Housework before enabling TX IRQ */ | |
804 | macb_writel(bp, TSR, macb_readl(bp, TSR)); | |
02c958dd CP |
805 | queue_writel(queue, IER, MACB_TX_INT_FLAGS); |
806 | ||
807 | /* Now we are ready to start transmission again */ | |
808 | netif_tx_start_all_queues(bp->dev); | |
809 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | |
810 | ||
811 | spin_unlock_irqrestore(&bp->lock, flags); | |
e86cd53a NF |
812 | } |
813 | ||
02c958dd | 814 | static void macb_tx_interrupt(struct macb_queue *queue) |
e86cd53a NF |
815 | { |
816 | unsigned int tail; | |
817 | unsigned int head; | |
818 | u32 status; | |
02c958dd CP |
819 | struct macb *bp = queue->bp; |
820 | u16 queue_index = queue - bp->queues; | |
e86cd53a NF |
821 | |
822 | status = macb_readl(bp, TSR); | |
823 | macb_writel(bp, TSR, status); | |
824 | ||
581df9e1 | 825 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
02c958dd | 826 | queue_writel(queue, ISR, MACB_BIT(TCOMP)); |
749a2b66 | 827 | |
e86cd53a | 828 | netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", |
aa50b552 | 829 | (unsigned long)status); |
89e5785f | 830 | |
02c958dd CP |
831 | head = queue->tx_head; |
832 | for (tail = queue->tx_tail; tail != head; tail++) { | |
55054a16 HS |
833 | struct macb_tx_skb *tx_skb; |
834 | struct sk_buff *skb; | |
835 | struct macb_dma_desc *desc; | |
836 | u32 ctrl; | |
89e5785f | 837 | |
02c958dd | 838 | desc = macb_tx_desc(queue, tail); |
89e5785f | 839 | |
03dbe05f | 840 | /* Make hw descriptor updates visible to CPU */ |
89e5785f | 841 | rmb(); |
03dbe05f | 842 | |
55054a16 | 843 | ctrl = desc->ctrl; |
89e5785f | 844 | |
a4c35ed3 CP |
845 | /* TX_USED bit is only set by hardware on the very first buffer |
846 | * descriptor of the transmitted frame. | |
847 | */ | |
55054a16 | 848 | if (!(ctrl & MACB_BIT(TX_USED))) |
89e5785f HS |
849 | break; |
850 | ||
a4c35ed3 CP |
851 | /* Process all buffers of the current transmitted frame */ |
852 | for (;; tail++) { | |
02c958dd | 853 | tx_skb = macb_tx_skb(queue, tail); |
a4c35ed3 CP |
854 | skb = tx_skb->skb; |
855 | ||
856 | /* First, update TX stats if needed */ | |
857 | if (skb) { | |
ab91f0a9 RO |
858 | if (gem_ptp_do_txstamp(queue, skb, desc) == 0) { |
859 | /* skb now belongs to timestamp buffer | |
860 | * and will be removed later | |
861 | */ | |
862 | tx_skb->skb = NULL; | |
863 | } | |
a4c35ed3 | 864 | netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", |
b410d13e ZB |
865 | macb_tx_ring_wrap(bp, tail), |
866 | skb->data); | |
5f1d3a5c | 867 | bp->dev->stats.tx_packets++; |
512286bb | 868 | queue->stats.tx_packets++; |
5f1d3a5c | 869 | bp->dev->stats.tx_bytes += skb->len; |
512286bb | 870 | queue->stats.tx_bytes += skb->len; |
a4c35ed3 | 871 | } |
55054a16 | 872 | |
a4c35ed3 CP |
873 | /* Now we can safely release resources */ |
874 | macb_tx_unmap(bp, tx_skb); | |
875 | ||
876 | /* skb is set only for the last buffer of the frame. | |
877 | * WARNING: at this point skb has been freed by | |
878 | * macb_tx_unmap(). | |
879 | */ | |
880 | if (skb) | |
881 | break; | |
882 | } | |
89e5785f HS |
883 | } |
884 | ||
02c958dd CP |
885 | queue->tx_tail = tail; |
886 | if (__netif_subqueue_stopped(bp->dev, queue_index) && | |
887 | CIRC_CNT(queue->tx_head, queue->tx_tail, | |
b410d13e | 888 | bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) |
02c958dd | 889 | netif_wake_subqueue(bp->dev, queue_index); |
89e5785f HS |
890 | } |
891 | ||
ae1f2a56 | 892 | static void gem_rx_refill(struct macb_queue *queue) |
4df95131 NF |
893 | { |
894 | unsigned int entry; | |
895 | struct sk_buff *skb; | |
4df95131 | 896 | dma_addr_t paddr; |
ae1f2a56 | 897 | struct macb *bp = queue->bp; |
dc97a89e | 898 | struct macb_dma_desc *desc; |
4df95131 | 899 | |
ae1f2a56 RO |
900 | while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail, |
901 | bp->rx_ring_size) > 0) { | |
902 | entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); | |
4df95131 NF |
903 | |
904 | /* Make hw descriptor updates visible to CPU */ | |
905 | rmb(); | |
906 | ||
ae1f2a56 RO |
907 | queue->rx_prepared_head++; |
908 | desc = macb_rx_desc(queue, entry); | |
4df95131 | 909 | |
ae1f2a56 | 910 | if (!queue->rx_skbuff[entry]) { |
4df95131 NF |
911 | /* allocate sk_buff for this free entry in ring */ |
912 | skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); | |
aa50b552 | 913 | if (unlikely(!skb)) { |
4df95131 NF |
914 | netdev_err(bp->dev, |
915 | "Unable to allocate sk_buff\n"); | |
916 | break; | |
917 | } | |
4df95131 NF |
918 | |
919 | /* now fill corresponding descriptor entry */ | |
920 | paddr = dma_map_single(&bp->pdev->dev, skb->data, | |
64ec42fe MF |
921 | bp->rx_buffer_size, |
922 | DMA_FROM_DEVICE); | |
92030908 SB |
923 | if (dma_mapping_error(&bp->pdev->dev, paddr)) { |
924 | dev_kfree_skb(skb); | |
925 | break; | |
926 | } | |
927 | ||
ae1f2a56 | 928 | queue->rx_skbuff[entry] = skb; |
4df95131 | 929 | |
b410d13e | 930 | if (entry == bp->rx_ring_size - 1) |
4df95131 | 931 | paddr |= MACB_BIT(RX_WRAP); |
dc97a89e RO |
932 | macb_set_addr(bp, desc, paddr); |
933 | desc->ctrl = 0; | |
4df95131 NF |
934 | |
935 | /* properly align Ethernet header */ | |
936 | skb_reserve(skb, NET_IP_ALIGN); | |
d4c216c5 | 937 | } else { |
dc97a89e RO |
938 | desc->addr &= ~MACB_BIT(RX_USED); |
939 | desc->ctrl = 0; | |
4df95131 NF |
940 | } |
941 | } | |
942 | ||
943 | /* Make descriptor updates visible to hardware */ | |
944 | wmb(); | |
945 | ||
ae1f2a56 RO |
946 | netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", |
947 | queue, queue->rx_prepared_head, queue->rx_tail); | |
4df95131 NF |
948 | } |
949 | ||
950 | /* Mark DMA descriptors from begin up to and not including end as unused */ | |
ae1f2a56 | 951 | static void discard_partial_frame(struct macb_queue *queue, unsigned int begin, |
4df95131 NF |
952 | unsigned int end) |
953 | { | |
954 | unsigned int frag; | |
955 | ||
956 | for (frag = begin; frag != end; frag++) { | |
ae1f2a56 | 957 | struct macb_dma_desc *desc = macb_rx_desc(queue, frag); |
64ec42fe | 958 | |
4df95131 NF |
959 | desc->addr &= ~MACB_BIT(RX_USED); |
960 | } | |
961 | ||
962 | /* Make descriptor updates visible to hardware */ | |
963 | wmb(); | |
964 | ||
64ec42fe | 965 | /* When this happens, the hardware stats registers for |
4df95131 NF |
966 | * whatever caused this is updated, so we don't have to record |
967 | * anything. | |
968 | */ | |
969 | } | |
970 | ||
ae1f2a56 | 971 | static int gem_rx(struct macb_queue *queue, int budget) |
4df95131 | 972 | { |
ae1f2a56 | 973 | struct macb *bp = queue->bp; |
4df95131 NF |
974 | unsigned int len; |
975 | unsigned int entry; | |
976 | struct sk_buff *skb; | |
977 | struct macb_dma_desc *desc; | |
978 | int count = 0; | |
979 | ||
980 | while (count < budget) { | |
fff8019a HK |
981 | u32 ctrl; |
982 | dma_addr_t addr; | |
983 | bool rxused; | |
4df95131 | 984 | |
ae1f2a56 RO |
985 | entry = macb_rx_ring_wrap(bp, queue->rx_tail); |
986 | desc = macb_rx_desc(queue, entry); | |
4df95131 NF |
987 | |
988 | /* Make hw descriptor updates visible to CPU */ | |
989 | rmb(); | |
990 | ||
fff8019a | 991 | rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; |
dc97a89e | 992 | addr = macb_get_addr(bp, desc); |
4df95131 NF |
993 | ctrl = desc->ctrl; |
994 | ||
fff8019a | 995 | if (!rxused) |
4df95131 NF |
996 | break; |
997 | ||
ae1f2a56 | 998 | queue->rx_tail++; |
4df95131 NF |
999 | count++; |
1000 | ||
1001 | if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { | |
1002 | netdev_err(bp->dev, | |
1003 | "not whole frame pointed by descriptor\n"); | |
5f1d3a5c | 1004 | bp->dev->stats.rx_dropped++; |
512286bb | 1005 | queue->stats.rx_dropped++; |
4df95131 NF |
1006 | break; |
1007 | } | |
ae1f2a56 | 1008 | skb = queue->rx_skbuff[entry]; |
4df95131 NF |
1009 | if (unlikely(!skb)) { |
1010 | netdev_err(bp->dev, | |
1011 | "inconsistent Rx descriptor chain\n"); | |
5f1d3a5c | 1012 | bp->dev->stats.rx_dropped++; |
512286bb | 1013 | queue->stats.rx_dropped++; |
4df95131 NF |
1014 | break; |
1015 | } | |
1016 | /* now everything is ready for receiving packet */ | |
ae1f2a56 | 1017 | queue->rx_skbuff[entry] = NULL; |
98b5a0f4 | 1018 | len = ctrl & bp->rx_frm_len_mask; |
4df95131 NF |
1019 | |
1020 | netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); | |
1021 | ||
1022 | skb_put(skb, len); | |
4df95131 | 1023 | dma_unmap_single(&bp->pdev->dev, addr, |
48330e08 | 1024 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
4df95131 NF |
1025 | |
1026 | skb->protocol = eth_type_trans(skb, bp->dev); | |
1027 | skb_checksum_none_assert(skb); | |
924ec53c CP |
1028 | if (bp->dev->features & NETIF_F_RXCSUM && |
1029 | !(bp->dev->flags & IFF_PROMISC) && | |
1030 | GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) | |
1031 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
4df95131 | 1032 | |
5f1d3a5c | 1033 | bp->dev->stats.rx_packets++; |
512286bb | 1034 | queue->stats.rx_packets++; |
5f1d3a5c | 1035 | bp->dev->stats.rx_bytes += skb->len; |
512286bb | 1036 | queue->stats.rx_bytes += skb->len; |
4df95131 | 1037 | |
ab91f0a9 RO |
1038 | gem_ptp_do_rxstamp(bp, skb, desc); |
1039 | ||
4df95131 NF |
1040 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
1041 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", | |
1042 | skb->len, skb->csum); | |
1043 | print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, | |
51f83014 | 1044 | skb_mac_header(skb), 16, true); |
4df95131 NF |
1045 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, |
1046 | skb->data, 32, true); | |
1047 | #endif | |
1048 | ||
1049 | netif_receive_skb(skb); | |
1050 | } | |
1051 | ||
ae1f2a56 | 1052 | gem_rx_refill(queue); |
4df95131 NF |
1053 | |
1054 | return count; | |
1055 | } | |
1056 | ||
ae1f2a56 | 1057 | static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag, |
89e5785f HS |
1058 | unsigned int last_frag) |
1059 | { | |
1060 | unsigned int len; | |
1061 | unsigned int frag; | |
29bc2e1e | 1062 | unsigned int offset; |
89e5785f | 1063 | struct sk_buff *skb; |
55054a16 | 1064 | struct macb_dma_desc *desc; |
ae1f2a56 | 1065 | struct macb *bp = queue->bp; |
89e5785f | 1066 | |
ae1f2a56 | 1067 | desc = macb_rx_desc(queue, last_frag); |
98b5a0f4 | 1068 | len = desc->ctrl & bp->rx_frm_len_mask; |
89e5785f | 1069 | |
a268adb1 | 1070 | netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
b410d13e ZB |
1071 | macb_rx_ring_wrap(bp, first_frag), |
1072 | macb_rx_ring_wrap(bp, last_frag), len); | |
89e5785f | 1073 | |
64ec42fe | 1074 | /* The ethernet header starts NET_IP_ALIGN bytes into the |
29bc2e1e HS |
1075 | * first buffer. Since the header is 14 bytes, this makes the |
1076 | * payload word-aligned. | |
1077 | * | |
1078 | * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy | |
1079 | * the two padding bytes into the skb so that we avoid hitting | |
1080 | * the slowpath in memcpy(), and pull them off afterwards. | |
1081 | */ | |
1082 | skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); | |
89e5785f | 1083 | if (!skb) { |
5f1d3a5c | 1084 | bp->dev->stats.rx_dropped++; |
55054a16 | 1085 | for (frag = first_frag; ; frag++) { |
ae1f2a56 | 1086 | desc = macb_rx_desc(queue, frag); |
55054a16 | 1087 | desc->addr &= ~MACB_BIT(RX_USED); |
89e5785f HS |
1088 | if (frag == last_frag) |
1089 | break; | |
1090 | } | |
03dbe05f HS |
1091 | |
1092 | /* Make descriptor updates visible to hardware */ | |
89e5785f | 1093 | wmb(); |
03dbe05f | 1094 | |
89e5785f HS |
1095 | return 1; |
1096 | } | |
1097 | ||
29bc2e1e HS |
1098 | offset = 0; |
1099 | len += NET_IP_ALIGN; | |
bc8acf2c | 1100 | skb_checksum_none_assert(skb); |
89e5785f HS |
1101 | skb_put(skb, len); |
1102 | ||
55054a16 | 1103 | for (frag = first_frag; ; frag++) { |
1b44791a | 1104 | unsigned int frag_len = bp->rx_buffer_size; |
89e5785f HS |
1105 | |
1106 | if (offset + frag_len > len) { | |
9ba723b0 CP |
1107 | if (unlikely(frag != last_frag)) { |
1108 | dev_kfree_skb_any(skb); | |
1109 | return -1; | |
1110 | } | |
89e5785f HS |
1111 | frag_len = len - offset; |
1112 | } | |
27d7ff46 | 1113 | skb_copy_to_linear_data_offset(skb, offset, |
ae1f2a56 | 1114 | macb_rx_buffer(queue, frag), |
aa50b552 | 1115 | frag_len); |
1b44791a | 1116 | offset += bp->rx_buffer_size; |
ae1f2a56 | 1117 | desc = macb_rx_desc(queue, frag); |
55054a16 | 1118 | desc->addr &= ~MACB_BIT(RX_USED); |
89e5785f HS |
1119 | |
1120 | if (frag == last_frag) | |
1121 | break; | |
1122 | } | |
1123 | ||
03dbe05f HS |
1124 | /* Make descriptor updates visible to hardware */ |
1125 | wmb(); | |
1126 | ||
29bc2e1e | 1127 | __skb_pull(skb, NET_IP_ALIGN); |
89e5785f HS |
1128 | skb->protocol = eth_type_trans(skb, bp->dev); |
1129 | ||
5f1d3a5c TK |
1130 | bp->dev->stats.rx_packets++; |
1131 | bp->dev->stats.rx_bytes += skb->len; | |
a268adb1 | 1132 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
aa50b552 | 1133 | skb->len, skb->csum); |
89e5785f HS |
1134 | netif_receive_skb(skb); |
1135 | ||
1136 | return 0; | |
1137 | } | |
1138 | ||
ae1f2a56 | 1139 | static inline void macb_init_rx_ring(struct macb_queue *queue) |
9ba723b0 | 1140 | { |
ae1f2a56 | 1141 | struct macb *bp = queue->bp; |
9ba723b0 | 1142 | dma_addr_t addr; |
dc97a89e | 1143 | struct macb_dma_desc *desc = NULL; |
9ba723b0 CP |
1144 | int i; |
1145 | ||
ae1f2a56 | 1146 | addr = queue->rx_buffers_dma; |
b410d13e | 1147 | for (i = 0; i < bp->rx_ring_size; i++) { |
ae1f2a56 | 1148 | desc = macb_rx_desc(queue, i); |
dc97a89e RO |
1149 | macb_set_addr(bp, desc, addr); |
1150 | desc->ctrl = 0; | |
9ba723b0 CP |
1151 | addr += bp->rx_buffer_size; |
1152 | } | |
dc97a89e | 1153 | desc->addr |= MACB_BIT(RX_WRAP); |
ae1f2a56 | 1154 | queue->rx_tail = 0; |
9ba723b0 CP |
1155 | } |
1156 | ||
ae1f2a56 | 1157 | static int macb_rx(struct macb_queue *queue, int budget) |
89e5785f | 1158 | { |
ae1f2a56 | 1159 | struct macb *bp = queue->bp; |
9ba723b0 | 1160 | bool reset_rx_queue = false; |
89e5785f | 1161 | int received = 0; |
55054a16 | 1162 | unsigned int tail; |
89e5785f HS |
1163 | int first_frag = -1; |
1164 | ||
ae1f2a56 RO |
1165 | for (tail = queue->rx_tail; budget > 0; tail++) { |
1166 | struct macb_dma_desc *desc = macb_rx_desc(queue, tail); | |
dc97a89e | 1167 | u32 ctrl; |
89e5785f | 1168 | |
03dbe05f | 1169 | /* Make hw descriptor updates visible to CPU */ |
89e5785f | 1170 | rmb(); |
03dbe05f | 1171 | |
55054a16 | 1172 | ctrl = desc->ctrl; |
89e5785f | 1173 | |
dc97a89e | 1174 | if (!(desc->addr & MACB_BIT(RX_USED))) |
89e5785f HS |
1175 | break; |
1176 | ||
1177 | if (ctrl & MACB_BIT(RX_SOF)) { | |
1178 | if (first_frag != -1) | |
ae1f2a56 | 1179 | discard_partial_frame(queue, first_frag, tail); |
89e5785f HS |
1180 | first_frag = tail; |
1181 | } | |
1182 | ||
1183 | if (ctrl & MACB_BIT(RX_EOF)) { | |
1184 | int dropped; | |
9ba723b0 CP |
1185 | |
1186 | if (unlikely(first_frag == -1)) { | |
1187 | reset_rx_queue = true; | |
1188 | continue; | |
1189 | } | |
89e5785f | 1190 | |
ae1f2a56 | 1191 | dropped = macb_rx_frame(queue, first_frag, tail); |
89e5785f | 1192 | first_frag = -1; |
9ba723b0 CP |
1193 | if (unlikely(dropped < 0)) { |
1194 | reset_rx_queue = true; | |
1195 | continue; | |
1196 | } | |
89e5785f HS |
1197 | if (!dropped) { |
1198 | received++; | |
1199 | budget--; | |
1200 | } | |
1201 | } | |
1202 | } | |
1203 | ||
9ba723b0 CP |
1204 | if (unlikely(reset_rx_queue)) { |
1205 | unsigned long flags; | |
1206 | u32 ctrl; | |
1207 | ||
1208 | netdev_err(bp->dev, "RX queue corruption: reset it\n"); | |
1209 | ||
1210 | spin_lock_irqsave(&bp->lock, flags); | |
1211 | ||
1212 | ctrl = macb_readl(bp, NCR); | |
1213 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); | |
1214 | ||
ae1f2a56 RO |
1215 | macb_init_rx_ring(queue); |
1216 | queue_writel(queue, RBQP, queue->rx_ring_dma); | |
9ba723b0 CP |
1217 | |
1218 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); | |
1219 | ||
1220 | spin_unlock_irqrestore(&bp->lock, flags); | |
1221 | return received; | |
1222 | } | |
1223 | ||
89e5785f | 1224 | if (first_frag != -1) |
ae1f2a56 | 1225 | queue->rx_tail = first_frag; |
89e5785f | 1226 | else |
ae1f2a56 | 1227 | queue->rx_tail = tail; |
89e5785f HS |
1228 | |
1229 | return received; | |
1230 | } | |
1231 | ||
bea3348e | 1232 | static int macb_poll(struct napi_struct *napi, int budget) |
89e5785f | 1233 | { |
ae1f2a56 RO |
1234 | struct macb_queue *queue = container_of(napi, struct macb_queue, napi); |
1235 | struct macb *bp = queue->bp; | |
bea3348e | 1236 | int work_done; |
89e5785f HS |
1237 | u32 status; |
1238 | ||
1239 | status = macb_readl(bp, RSR); | |
1240 | macb_writel(bp, RSR, status); | |
1241 | ||
a268adb1 | 1242 | netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", |
aa50b552 | 1243 | (unsigned long)status, budget); |
89e5785f | 1244 | |
ae1f2a56 | 1245 | work_done = bp->macbgem_ops.mog_rx(queue, budget); |
b336369c | 1246 | if (work_done < budget) { |
6ad20165 | 1247 | napi_complete_done(napi, work_done); |
89e5785f | 1248 | |
8770e91a NF |
1249 | /* Packets received while interrupts were disabled */ |
1250 | status = macb_readl(bp, RSR); | |
504ad98d | 1251 | if (status) { |
02f7a34f | 1252 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
ae1f2a56 | 1253 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
8770e91a | 1254 | napi_reschedule(napi); |
02f7a34f | 1255 | } else { |
ae1f2a56 | 1256 | queue_writel(queue, IER, MACB_RX_INT_FLAGS); |
02f7a34f | 1257 | } |
b336369c | 1258 | } |
89e5785f HS |
1259 | |
1260 | /* TODO: Handle errors */ | |
1261 | ||
bea3348e | 1262 | return work_done; |
89e5785f HS |
1263 | } |
1264 | ||
032dc41b HK |
1265 | static void macb_hresp_error_task(unsigned long data) |
1266 | { | |
1267 | struct macb *bp = (struct macb *)data; | |
1268 | struct net_device *dev = bp->dev; | |
1269 | struct macb_queue *queue = bp->queues; | |
1270 | unsigned int q; | |
1271 | u32 ctrl; | |
1272 | ||
1273 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
1274 | queue_writel(queue, IDR, MACB_RX_INT_FLAGS | | |
1275 | MACB_TX_INT_FLAGS | | |
1276 | MACB_BIT(HRESP)); | |
1277 | } | |
1278 | ctrl = macb_readl(bp, NCR); | |
1279 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); | |
1280 | macb_writel(bp, NCR, ctrl); | |
1281 | ||
1282 | netif_tx_stop_all_queues(dev); | |
1283 | netif_carrier_off(dev); | |
1284 | ||
1285 | bp->macbgem_ops.mog_init_rings(bp); | |
1286 | ||
1287 | /* Initialize TX and RX buffers */ | |
1288 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
1289 | queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); | |
1290 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
1291 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) | |
1292 | queue_writel(queue, RBQPH, | |
1293 | upper_32_bits(queue->rx_ring_dma)); | |
1294 | #endif | |
1295 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); | |
1296 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
1297 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) | |
1298 | queue_writel(queue, TBQPH, | |
1299 | upper_32_bits(queue->tx_ring_dma)); | |
1300 | #endif | |
1301 | ||
1302 | /* Enable interrupts */ | |
1303 | queue_writel(queue, IER, | |
1304 | MACB_RX_INT_FLAGS | | |
1305 | MACB_TX_INT_FLAGS | | |
1306 | MACB_BIT(HRESP)); | |
1307 | } | |
1308 | ||
1309 | ctrl |= MACB_BIT(RE) | MACB_BIT(TE); | |
1310 | macb_writel(bp, NCR, ctrl); | |
1311 | ||
1312 | netif_carrier_on(dev); | |
1313 | netif_tx_start_all_queues(dev); | |
1314 | } | |
1315 | ||
89e5785f HS |
1316 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
1317 | { | |
02c958dd CP |
1318 | struct macb_queue *queue = dev_id; |
1319 | struct macb *bp = queue->bp; | |
1320 | struct net_device *dev = bp->dev; | |
bfbb92c4 | 1321 | u32 status, ctrl; |
89e5785f | 1322 | |
02c958dd | 1323 | status = queue_readl(queue, ISR); |
89e5785f HS |
1324 | |
1325 | if (unlikely(!status)) | |
1326 | return IRQ_NONE; | |
1327 | ||
1328 | spin_lock(&bp->lock); | |
1329 | ||
1330 | while (status) { | |
89e5785f HS |
1331 | /* close possible race with dev_close */ |
1332 | if (unlikely(!netif_running(dev))) { | |
02c958dd | 1333 | queue_writel(queue, IDR, -1); |
24468374 NS |
1334 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1335 | queue_writel(queue, ISR, -1); | |
89e5785f HS |
1336 | break; |
1337 | } | |
1338 | ||
02c958dd CP |
1339 | netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", |
1340 | (unsigned int)(queue - bp->queues), | |
1341 | (unsigned long)status); | |
a268adb1 | 1342 | |
89e5785f | 1343 | if (status & MACB_RX_INT_FLAGS) { |
64ec42fe | 1344 | /* There's no point taking any more interrupts |
b336369c JH |
1345 | * until we have processed the buffers. The |
1346 | * scheduling call may fail if the poll routine | |
1347 | * is already scheduled, so disable interrupts | |
1348 | * now. | |
1349 | */ | |
02c958dd | 1350 | queue_writel(queue, IDR, MACB_RX_INT_FLAGS); |
581df9e1 | 1351 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
02c958dd | 1352 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
b336369c | 1353 | |
ae1f2a56 | 1354 | if (napi_schedule_prep(&queue->napi)) { |
a268adb1 | 1355 | netdev_vdbg(bp->dev, "scheduling RX softirq\n"); |
ae1f2a56 | 1356 | __napi_schedule(&queue->napi); |
89e5785f HS |
1357 | } |
1358 | } | |
1359 | ||
e86cd53a | 1360 | if (unlikely(status & (MACB_TX_ERR_FLAGS))) { |
02c958dd CP |
1361 | queue_writel(queue, IDR, MACB_TX_INT_FLAGS); |
1362 | schedule_work(&queue->tx_error_task); | |
6a027b70 SB |
1363 | |
1364 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
02c958dd | 1365 | queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); |
6a027b70 | 1366 | |
e86cd53a NF |
1367 | break; |
1368 | } | |
1369 | ||
1370 | if (status & MACB_BIT(TCOMP)) | |
02c958dd | 1371 | macb_tx_interrupt(queue); |
89e5785f | 1372 | |
64ec42fe | 1373 | /* Link change detection isn't possible with RMII, so we'll |
89e5785f HS |
1374 | * add that if/when we get our hands on a full-blown MII PHY. |
1375 | */ | |
1376 | ||
86b5e7de NS |
1377 | /* There is a hardware issue under heavy load where DMA can |
1378 | * stop, this causes endless "used buffer descriptor read" | |
1379 | * interrupts but it can be cleared by re-enabling RX. See | |
1380 | * the at91 manual, section 41.3.1 or the Zynq manual | |
1381 | * section 16.7.4 for details. | |
1382 | */ | |
bfbb92c4 NS |
1383 | if (status & MACB_BIT(RXUBR)) { |
1384 | ctrl = macb_readl(bp, NCR); | |
1385 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); | |
ffac0e96 | 1386 | wmb(); |
bfbb92c4 NS |
1387 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); |
1388 | ||
1389 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
ba504994 | 1390 | queue_writel(queue, ISR, MACB_BIT(RXUBR)); |
bfbb92c4 NS |
1391 | } |
1392 | ||
b19f7f71 AS |
1393 | if (status & MACB_BIT(ISR_ROVR)) { |
1394 | /* We missed at least one packet */ | |
f75ba50b JI |
1395 | if (macb_is_gem(bp)) |
1396 | bp->hw_stats.gem.rx_overruns++; | |
1397 | else | |
1398 | bp->hw_stats.macb.rx_overruns++; | |
6a027b70 SB |
1399 | |
1400 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
02c958dd | 1401 | queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); |
b19f7f71 AS |
1402 | } |
1403 | ||
89e5785f | 1404 | if (status & MACB_BIT(HRESP)) { |
032dc41b | 1405 | tasklet_schedule(&bp->hresp_err_tasklet); |
c220f8cd | 1406 | netdev_err(dev, "DMA bus error: HRESP not OK\n"); |
6a027b70 SB |
1407 | |
1408 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | |
02c958dd | 1409 | queue_writel(queue, ISR, MACB_BIT(HRESP)); |
89e5785f | 1410 | } |
02c958dd | 1411 | status = queue_readl(queue, ISR); |
89e5785f HS |
1412 | } |
1413 | ||
1414 | spin_unlock(&bp->lock); | |
1415 | ||
1416 | return IRQ_HANDLED; | |
1417 | } | |
1418 | ||
6e8cf5c0 | 1419 | #ifdef CONFIG_NET_POLL_CONTROLLER |
64ec42fe | 1420 | /* Polling receive - used by netconsole and other diagnostic tools |
6e8cf5c0 TP |
1421 | * to allow network i/o with interrupts disabled. |
1422 | */ | |
1423 | static void macb_poll_controller(struct net_device *dev) | |
1424 | { | |
02c958dd CP |
1425 | struct macb *bp = netdev_priv(dev); |
1426 | struct macb_queue *queue; | |
6e8cf5c0 | 1427 | unsigned long flags; |
02c958dd | 1428 | unsigned int q; |
6e8cf5c0 TP |
1429 | |
1430 | local_irq_save(flags); | |
02c958dd CP |
1431 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
1432 | macb_interrupt(dev->irq, queue); | |
6e8cf5c0 TP |
1433 | local_irq_restore(flags); |
1434 | } | |
1435 | #endif | |
1436 | ||
a4c35ed3 | 1437 | static unsigned int macb_tx_map(struct macb *bp, |
02c958dd | 1438 | struct macb_queue *queue, |
1629dd4f RO |
1439 | struct sk_buff *skb, |
1440 | unsigned int hdrlen) | |
89e5785f | 1441 | { |
89e5785f | 1442 | dma_addr_t mapping; |
02c958dd | 1443 | unsigned int len, entry, i, tx_head = queue->tx_head; |
a4c35ed3 | 1444 | struct macb_tx_skb *tx_skb = NULL; |
55054a16 | 1445 | struct macb_dma_desc *desc; |
a4c35ed3 CP |
1446 | unsigned int offset, size, count = 0; |
1447 | unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; | |
1629dd4f RO |
1448 | unsigned int eof = 1, mss_mfs = 0; |
1449 | u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; | |
1450 | ||
1451 | /* LSO */ | |
1452 | if (skb_shinfo(skb)->gso_size != 0) { | |
1453 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) | |
1454 | /* UDP - UFO */ | |
1455 | lso_ctrl = MACB_LSO_UFO_ENABLE; | |
1456 | else | |
1457 | /* TCP - TSO */ | |
1458 | lso_ctrl = MACB_LSO_TSO_ENABLE; | |
1459 | } | |
a4c35ed3 CP |
1460 | |
1461 | /* First, map non-paged data */ | |
1462 | len = skb_headlen(skb); | |
1629dd4f RO |
1463 | |
1464 | /* first buffer length */ | |
1465 | size = hdrlen; | |
1466 | ||
a4c35ed3 CP |
1467 | offset = 0; |
1468 | while (len) { | |
b410d13e | 1469 | entry = macb_tx_ring_wrap(bp, tx_head); |
02c958dd | 1470 | tx_skb = &queue->tx_skb[entry]; |
a4c35ed3 CP |
1471 | |
1472 | mapping = dma_map_single(&bp->pdev->dev, | |
1473 | skb->data + offset, | |
1474 | size, DMA_TO_DEVICE); | |
1475 | if (dma_mapping_error(&bp->pdev->dev, mapping)) | |
1476 | goto dma_error; | |
1477 | ||
1478 | /* Save info to properly release resources */ | |
1479 | tx_skb->skb = NULL; | |
1480 | tx_skb->mapping = mapping; | |
1481 | tx_skb->size = size; | |
1482 | tx_skb->mapped_as_page = false; | |
1483 | ||
1484 | len -= size; | |
1485 | offset += size; | |
1486 | count++; | |
1487 | tx_head++; | |
1629dd4f RO |
1488 | |
1489 | size = min(len, bp->max_tx_length); | |
a4c35ed3 CP |
1490 | } |
1491 | ||
1492 | /* Then, map paged data from fragments */ | |
1493 | for (f = 0; f < nr_frags; f++) { | |
1494 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; | |
1495 | ||
1496 | len = skb_frag_size(frag); | |
1497 | offset = 0; | |
1498 | while (len) { | |
1499 | size = min(len, bp->max_tx_length); | |
b410d13e | 1500 | entry = macb_tx_ring_wrap(bp, tx_head); |
02c958dd | 1501 | tx_skb = &queue->tx_skb[entry]; |
a4c35ed3 CP |
1502 | |
1503 | mapping = skb_frag_dma_map(&bp->pdev->dev, frag, | |
1504 | offset, size, DMA_TO_DEVICE); | |
1505 | if (dma_mapping_error(&bp->pdev->dev, mapping)) | |
1506 | goto dma_error; | |
1507 | ||
1508 | /* Save info to properly release resources */ | |
1509 | tx_skb->skb = NULL; | |
1510 | tx_skb->mapping = mapping; | |
1511 | tx_skb->size = size; | |
1512 | tx_skb->mapped_as_page = true; | |
1513 | ||
1514 | len -= size; | |
1515 | offset += size; | |
1516 | count++; | |
1517 | tx_head++; | |
1518 | } | |
1519 | } | |
1520 | ||
1521 | /* Should never happen */ | |
aa50b552 | 1522 | if (unlikely(!tx_skb)) { |
a4c35ed3 CP |
1523 | netdev_err(bp->dev, "BUG! empty skb!\n"); |
1524 | return 0; | |
1525 | } | |
1526 | ||
1527 | /* This is the last buffer of the frame: save socket buffer */ | |
1528 | tx_skb->skb = skb; | |
1529 | ||
1530 | /* Update TX ring: update buffer descriptors in reverse order | |
1531 | * to avoid race condition | |
1532 | */ | |
1533 | ||
1534 | /* Set 'TX_USED' bit in buffer descriptor at tx_head position | |
1535 | * to set the end of TX queue | |
1536 | */ | |
1537 | i = tx_head; | |
b410d13e | 1538 | entry = macb_tx_ring_wrap(bp, i); |
a4c35ed3 | 1539 | ctrl = MACB_BIT(TX_USED); |
dc97a89e | 1540 | desc = macb_tx_desc(queue, entry); |
a4c35ed3 CP |
1541 | desc->ctrl = ctrl; |
1542 | ||
1629dd4f RO |
1543 | if (lso_ctrl) { |
1544 | if (lso_ctrl == MACB_LSO_UFO_ENABLE) | |
1545 | /* include header and FCS in value given to h/w */ | |
1546 | mss_mfs = skb_shinfo(skb)->gso_size + | |
1547 | skb_transport_offset(skb) + | |
1548 | ETH_FCS_LEN; | |
1549 | else /* TSO */ { | |
1550 | mss_mfs = skb_shinfo(skb)->gso_size; | |
1551 | /* TCP Sequence Number Source Select | |
1552 | * can be set only for TSO | |
1553 | */ | |
1554 | seq_ctrl = 0; | |
1555 | } | |
1556 | } | |
1557 | ||
a4c35ed3 CP |
1558 | do { |
1559 | i--; | |
b410d13e | 1560 | entry = macb_tx_ring_wrap(bp, i); |
02c958dd | 1561 | tx_skb = &queue->tx_skb[entry]; |
dc97a89e | 1562 | desc = macb_tx_desc(queue, entry); |
a4c35ed3 CP |
1563 | |
1564 | ctrl = (u32)tx_skb->size; | |
1565 | if (eof) { | |
1566 | ctrl |= MACB_BIT(TX_LAST); | |
1567 | eof = 0; | |
1568 | } | |
b410d13e | 1569 | if (unlikely(entry == (bp->tx_ring_size - 1))) |
a4c35ed3 CP |
1570 | ctrl |= MACB_BIT(TX_WRAP); |
1571 | ||
1629dd4f RO |
1572 | /* First descriptor is header descriptor */ |
1573 | if (i == queue->tx_head) { | |
1574 | ctrl |= MACB_BF(TX_LSO, lso_ctrl); | |
1575 | ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl); | |
653e92a9 CB |
1576 | if ((bp->dev->features & NETIF_F_HW_CSUM) && |
1577 | skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl) | |
1578 | ctrl |= MACB_BIT(TX_NOCRC); | |
1629dd4f RO |
1579 | } else |
1580 | /* Only set MSS/MFS on payload descriptors | |
1581 | * (second or later descriptor) | |
1582 | */ | |
1583 | ctrl |= MACB_BF(MSS_MFS, mss_mfs); | |
1584 | ||
a4c35ed3 | 1585 | /* Set TX buffer descriptor */ |
dc97a89e | 1586 | macb_set_addr(bp, desc, tx_skb->mapping); |
a4c35ed3 CP |
1587 | /* desc->addr must be visible to hardware before clearing |
1588 | * 'TX_USED' bit in desc->ctrl. | |
1589 | */ | |
1590 | wmb(); | |
1591 | desc->ctrl = ctrl; | |
02c958dd | 1592 | } while (i != queue->tx_head); |
a4c35ed3 | 1593 | |
02c958dd | 1594 | queue->tx_head = tx_head; |
a4c35ed3 CP |
1595 | |
1596 | return count; | |
1597 | ||
1598 | dma_error: | |
1599 | netdev_err(bp->dev, "TX DMA map failed\n"); | |
1600 | ||
02c958dd CP |
1601 | for (i = queue->tx_head; i != tx_head; i++) { |
1602 | tx_skb = macb_tx_skb(queue, i); | |
a4c35ed3 CP |
1603 | |
1604 | macb_tx_unmap(bp, tx_skb); | |
1605 | } | |
1606 | ||
1607 | return 0; | |
1608 | } | |
1609 | ||
1629dd4f RO |
1610 | static netdev_features_t macb_features_check(struct sk_buff *skb, |
1611 | struct net_device *dev, | |
1612 | netdev_features_t features) | |
1613 | { | |
1614 | unsigned int nr_frags, f; | |
1615 | unsigned int hdrlen; | |
1616 | ||
1617 | /* Validate LSO compatibility */ | |
1618 | ||
1619 | /* there is only one buffer */ | |
1620 | if (!skb_is_nonlinear(skb)) | |
1621 | return features; | |
1622 | ||
1623 | /* length of header */ | |
1624 | hdrlen = skb_transport_offset(skb); | |
1625 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
1626 | hdrlen += tcp_hdrlen(skb); | |
1627 | ||
1628 | /* For LSO: | |
1629 | * When software supplies two or more payload buffers all payload buffers | |
1630 | * apart from the last must be a multiple of 8 bytes in size. | |
1631 | */ | |
1632 | if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN)) | |
1633 | return features & ~MACB_NETIF_LSO; | |
1634 | ||
1635 | nr_frags = skb_shinfo(skb)->nr_frags; | |
1636 | /* No need to check last fragment */ | |
1637 | nr_frags--; | |
1638 | for (f = 0; f < nr_frags; f++) { | |
1639 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; | |
1640 | ||
1641 | if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN)) | |
1642 | return features & ~MACB_NETIF_LSO; | |
1643 | } | |
1644 | return features; | |
1645 | } | |
1646 | ||
007e4ba3 HB |
1647 | static inline int macb_clear_csum(struct sk_buff *skb) |
1648 | { | |
1649 | /* no change for packets without checksum offloading */ | |
1650 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1651 | return 0; | |
1652 | ||
1653 | /* make sure we can modify the header */ | |
1654 | if (unlikely(skb_cow_head(skb, 0))) | |
1655 | return -1; | |
1656 | ||
1657 | /* initialize checksum field | |
1658 | * This is required - at least for Zynq, which otherwise calculates | |
1659 | * wrong UDP header checksums for UDP packets with UDP data len <=2 | |
1660 | */ | |
1661 | *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; | |
1662 | return 0; | |
1663 | } | |
1664 | ||
653e92a9 CB |
1665 | static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) |
1666 | { | |
1667 | bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); | |
1668 | int padlen = ETH_ZLEN - (*skb)->len; | |
1669 | int headroom = skb_headroom(*skb); | |
1670 | int tailroom = skb_tailroom(*skb); | |
1671 | struct sk_buff *nskb; | |
1672 | u32 fcs; | |
1673 | ||
1674 | if (!(ndev->features & NETIF_F_HW_CSUM) || | |
1675 | !((*skb)->ip_summed != CHECKSUM_PARTIAL) || | |
1676 | skb_shinfo(*skb)->gso_size) /* Not available for GSO */ | |
1677 | return 0; | |
1678 | ||
1679 | if (padlen <= 0) { | |
1680 | /* FCS could be appeded to tailroom. */ | |
1681 | if (tailroom >= ETH_FCS_LEN) | |
1682 | goto add_fcs; | |
1683 | /* FCS could be appeded by moving data to headroom. */ | |
1684 | else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) | |
1685 | padlen = 0; | |
1686 | /* No room for FCS, need to reallocate skb. */ | |
1687 | else | |
1688 | padlen = ETH_FCS_LEN - tailroom; | |
1689 | } else { | |
1690 | /* Add room for FCS. */ | |
1691 | padlen += ETH_FCS_LEN; | |
1692 | } | |
1693 | ||
1694 | if (!cloned && headroom + tailroom >= padlen) { | |
1695 | (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); | |
1696 | skb_set_tail_pointer(*skb, (*skb)->len); | |
1697 | } else { | |
1698 | nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); | |
1699 | if (!nskb) | |
1700 | return -ENOMEM; | |
1701 | ||
1702 | dev_kfree_skb_any(*skb); | |
1703 | *skb = nskb; | |
1704 | } | |
1705 | ||
1706 | if (padlen) { | |
1707 | if (padlen >= ETH_FCS_LEN) | |
1708 | skb_put_zero(*skb, padlen - ETH_FCS_LEN); | |
1709 | else | |
1710 | skb_trim(*skb, ETH_FCS_LEN - padlen); | |
1711 | } | |
1712 | ||
1713 | add_fcs: | |
1714 | /* set FCS to packet */ | |
1715 | fcs = crc32_le(~0, (*skb)->data, (*skb)->len); | |
1716 | fcs = ~fcs; | |
1717 | ||
1718 | skb_put_u8(*skb, fcs & 0xff); | |
1719 | skb_put_u8(*skb, (fcs >> 8) & 0xff); | |
1720 | skb_put_u8(*skb, (fcs >> 16) & 0xff); | |
1721 | skb_put_u8(*skb, (fcs >> 24) & 0xff); | |
1722 | ||
1723 | return 0; | |
1724 | } | |
1725 | ||
d1c38957 | 1726 | static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
a4c35ed3 | 1727 | { |
02c958dd | 1728 | u16 queue_index = skb_get_queue_mapping(skb); |
a4c35ed3 | 1729 | struct macb *bp = netdev_priv(dev); |
02c958dd | 1730 | struct macb_queue *queue = &bp->queues[queue_index]; |
4871953c | 1731 | unsigned long flags; |
1629dd4f RO |
1732 | unsigned int desc_cnt, nr_frags, frag_size, f; |
1733 | unsigned int hdrlen; | |
1734 | bool is_lso, is_udp = 0; | |
d1c38957 | 1735 | netdev_tx_t ret = NETDEV_TX_OK; |
1629dd4f | 1736 | |
33729f25 CB |
1737 | if (macb_clear_csum(skb)) { |
1738 | dev_kfree_skb_any(skb); | |
1739 | return ret; | |
1740 | } | |
1741 | ||
653e92a9 CB |
1742 | if (macb_pad_and_fcs(&skb, dev)) { |
1743 | dev_kfree_skb_any(skb); | |
1744 | return ret; | |
1745 | } | |
1746 | ||
1629dd4f RO |
1747 | is_lso = (skb_shinfo(skb)->gso_size != 0); |
1748 | ||
1749 | if (is_lso) { | |
1750 | is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); | |
1751 | ||
1752 | /* length of headers */ | |
1753 | if (is_udp) | |
1754 | /* only queue eth + ip headers separately for UDP */ | |
1755 | hdrlen = skb_transport_offset(skb); | |
1756 | else | |
1757 | hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
1758 | if (skb_headlen(skb) < hdrlen) { | |
1759 | netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); | |
1760 | /* if this is required, would need to copy to single buffer */ | |
1761 | return NETDEV_TX_BUSY; | |
1762 | } | |
1763 | } else | |
1764 | hdrlen = min(skb_headlen(skb), bp->max_tx_length); | |
89e5785f | 1765 | |
a268adb1 HS |
1766 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
1767 | netdev_vdbg(bp->dev, | |
aa50b552 MF |
1768 | "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", |
1769 | queue_index, skb->len, skb->head, skb->data, | |
1770 | skb_tail_pointer(skb), skb_end_pointer(skb)); | |
c220f8cd JI |
1771 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, |
1772 | skb->data, 16, true); | |
89e5785f HS |
1773 | #endif |
1774 | ||
a4c35ed3 CP |
1775 | /* Count how many TX buffer descriptors are needed to send this |
1776 | * socket buffer: skb fragments of jumbo frames may need to be | |
aa50b552 | 1777 | * split into many buffer descriptors. |
a4c35ed3 | 1778 | */ |
1629dd4f RO |
1779 | if (is_lso && (skb_headlen(skb) > hdrlen)) |
1780 | /* extra header descriptor if also payload in first buffer */ | |
1781 | desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; | |
1782 | else | |
1783 | desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); | |
a4c35ed3 CP |
1784 | nr_frags = skb_shinfo(skb)->nr_frags; |
1785 | for (f = 0; f < nr_frags; f++) { | |
1786 | frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); | |
1629dd4f | 1787 | desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); |
a4c35ed3 CP |
1788 | } |
1789 | ||
4871953c | 1790 | spin_lock_irqsave(&bp->lock, flags); |
89e5785f HS |
1791 | |
1792 | /* This is a hard error, log it. */ | |
b410d13e | 1793 | if (CIRC_SPACE(queue->tx_head, queue->tx_tail, |
1629dd4f | 1794 | bp->tx_ring_size) < desc_cnt) { |
02c958dd | 1795 | netif_stop_subqueue(dev, queue_index); |
4871953c | 1796 | spin_unlock_irqrestore(&bp->lock, flags); |
c220f8cd | 1797 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", |
02c958dd | 1798 | queue->tx_head, queue->tx_tail); |
5b548140 | 1799 | return NETDEV_TX_BUSY; |
89e5785f HS |
1800 | } |
1801 | ||
a4c35ed3 | 1802 | /* Map socket buffer for DMA transfer */ |
1629dd4f | 1803 | if (!macb_tx_map(bp, queue, skb, hdrlen)) { |
c88b5b6a | 1804 | dev_kfree_skb_any(skb); |
92030908 SB |
1805 | goto unlock; |
1806 | } | |
55054a16 | 1807 | |
03dbe05f | 1808 | /* Make newly initialized descriptor visible to hardware */ |
89e5785f | 1809 | wmb(); |
e072092f RC |
1810 | skb_tx_timestamp(skb); |
1811 | ||
89e5785f HS |
1812 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
1813 | ||
b410d13e | 1814 | if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) |
02c958dd | 1815 | netif_stop_subqueue(dev, queue_index); |
89e5785f | 1816 | |
92030908 | 1817 | unlock: |
4871953c | 1818 | spin_unlock_irqrestore(&bp->lock, flags); |
89e5785f | 1819 | |
d1c38957 | 1820 | return ret; |
89e5785f HS |
1821 | } |
1822 | ||
4df95131 | 1823 | static void macb_init_rx_buffer_size(struct macb *bp, size_t size) |
1b44791a NF |
1824 | { |
1825 | if (!macb_is_gem(bp)) { | |
1826 | bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; | |
1827 | } else { | |
4df95131 | 1828 | bp->rx_buffer_size = size; |
1b44791a | 1829 | |
1b44791a | 1830 | if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { |
4df95131 | 1831 | netdev_dbg(bp->dev, |
aa50b552 MF |
1832 | "RX buffer must be multiple of %d bytes, expanding\n", |
1833 | RX_BUFFER_MULTIPLE); | |
1b44791a | 1834 | bp->rx_buffer_size = |
4df95131 | 1835 | roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); |
1b44791a | 1836 | } |
1b44791a | 1837 | } |
4df95131 | 1838 | |
5b5e0928 | 1839 | netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", |
4df95131 | 1840 | bp->dev->mtu, bp->rx_buffer_size); |
1b44791a NF |
1841 | } |
1842 | ||
4df95131 NF |
1843 | static void gem_free_rx_buffers(struct macb *bp) |
1844 | { | |
1845 | struct sk_buff *skb; | |
1846 | struct macb_dma_desc *desc; | |
ae1f2a56 | 1847 | struct macb_queue *queue; |
4df95131 | 1848 | dma_addr_t addr; |
ae1f2a56 | 1849 | unsigned int q; |
4df95131 NF |
1850 | int i; |
1851 | ||
ae1f2a56 RO |
1852 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1853 | if (!queue->rx_skbuff) | |
1854 | continue; | |
4df95131 | 1855 | |
ae1f2a56 RO |
1856 | for (i = 0; i < bp->rx_ring_size; i++) { |
1857 | skb = queue->rx_skbuff[i]; | |
4df95131 | 1858 | |
ae1f2a56 RO |
1859 | if (!skb) |
1860 | continue; | |
4df95131 | 1861 | |
ae1f2a56 RO |
1862 | desc = macb_rx_desc(queue, i); |
1863 | addr = macb_get_addr(bp, desc); | |
dc97a89e | 1864 | |
ae1f2a56 RO |
1865 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, |
1866 | DMA_FROM_DEVICE); | |
1867 | dev_kfree_skb_any(skb); | |
1868 | skb = NULL; | |
1869 | } | |
4df95131 | 1870 | |
ae1f2a56 RO |
1871 | kfree(queue->rx_skbuff); |
1872 | queue->rx_skbuff = NULL; | |
1873 | } | |
4df95131 NF |
1874 | } |
1875 | ||
1876 | static void macb_free_rx_buffers(struct macb *bp) | |
1877 | { | |
ae1f2a56 RO |
1878 | struct macb_queue *queue = &bp->queues[0]; |
1879 | ||
1880 | if (queue->rx_buffers) { | |
4df95131 | 1881 | dma_free_coherent(&bp->pdev->dev, |
b410d13e | 1882 | bp->rx_ring_size * bp->rx_buffer_size, |
ae1f2a56 RO |
1883 | queue->rx_buffers, queue->rx_buffers_dma); |
1884 | queue->rx_buffers = NULL; | |
4df95131 NF |
1885 | } |
1886 | } | |
1b44791a | 1887 | |
89e5785f HS |
1888 | static void macb_free_consistent(struct macb *bp) |
1889 | { | |
02c958dd CP |
1890 | struct macb_queue *queue; |
1891 | unsigned int q; | |
404cd086 | 1892 | int size; |
02c958dd | 1893 | |
4df95131 | 1894 | bp->macbgem_ops.mog_free_rx_buffers(bp); |
02c958dd CP |
1895 | |
1896 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
1897 | kfree(queue->tx_skb); | |
1898 | queue->tx_skb = NULL; | |
1899 | if (queue->tx_ring) { | |
404cd086 HK |
1900 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
1901 | dma_free_coherent(&bp->pdev->dev, size, | |
02c958dd CP |
1902 | queue->tx_ring, queue->tx_ring_dma); |
1903 | queue->tx_ring = NULL; | |
1904 | } | |
e50b770e | 1905 | if (queue->rx_ring) { |
404cd086 HK |
1906 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
1907 | dma_free_coherent(&bp->pdev->dev, size, | |
e50b770e HK |
1908 | queue->rx_ring, queue->rx_ring_dma); |
1909 | queue->rx_ring = NULL; | |
1910 | } | |
89e5785f | 1911 | } |
4df95131 NF |
1912 | } |
1913 | ||
1914 | static int gem_alloc_rx_buffers(struct macb *bp) | |
1915 | { | |
ae1f2a56 RO |
1916 | struct macb_queue *queue; |
1917 | unsigned int q; | |
4df95131 NF |
1918 | int size; |
1919 | ||
ae1f2a56 RO |
1920 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1921 | size = bp->rx_ring_size * sizeof(struct sk_buff *); | |
1922 | queue->rx_skbuff = kzalloc(size, GFP_KERNEL); | |
1923 | if (!queue->rx_skbuff) | |
1924 | return -ENOMEM; | |
1925 | else | |
1926 | netdev_dbg(bp->dev, | |
1927 | "Allocated %d RX struct sk_buff entries at %p\n", | |
1928 | bp->rx_ring_size, queue->rx_skbuff); | |
1929 | } | |
4df95131 NF |
1930 | return 0; |
1931 | } | |
1932 | ||
1933 | static int macb_alloc_rx_buffers(struct macb *bp) | |
1934 | { | |
ae1f2a56 | 1935 | struct macb_queue *queue = &bp->queues[0]; |
4df95131 NF |
1936 | int size; |
1937 | ||
b410d13e | 1938 | size = bp->rx_ring_size * bp->rx_buffer_size; |
ae1f2a56 RO |
1939 | queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, |
1940 | &queue->rx_buffers_dma, GFP_KERNEL); | |
1941 | if (!queue->rx_buffers) | |
4df95131 | 1942 | return -ENOMEM; |
64ec42fe MF |
1943 | |
1944 | netdev_dbg(bp->dev, | |
1945 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | |
ae1f2a56 | 1946 | size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers); |
4df95131 | 1947 | return 0; |
89e5785f HS |
1948 | } |
1949 | ||
1950 | static int macb_alloc_consistent(struct macb *bp) | |
1951 | { | |
02c958dd CP |
1952 | struct macb_queue *queue; |
1953 | unsigned int q; | |
89e5785f HS |
1954 | int size; |
1955 | ||
02c958dd | 1956 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
404cd086 | 1957 | size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; |
02c958dd CP |
1958 | queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
1959 | &queue->tx_ring_dma, | |
1960 | GFP_KERNEL); | |
1961 | if (!queue->tx_ring) | |
1962 | goto out_err; | |
1963 | netdev_dbg(bp->dev, | |
1964 | "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", | |
1965 | q, size, (unsigned long)queue->tx_ring_dma, | |
1966 | queue->tx_ring); | |
1967 | ||
b410d13e | 1968 | size = bp->tx_ring_size * sizeof(struct macb_tx_skb); |
02c958dd CP |
1969 | queue->tx_skb = kmalloc(size, GFP_KERNEL); |
1970 | if (!queue->tx_skb) | |
1971 | goto out_err; | |
89e5785f | 1972 | |
404cd086 | 1973 | size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; |
ae1f2a56 RO |
1974 | queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
1975 | &queue->rx_ring_dma, GFP_KERNEL); | |
1976 | if (!queue->rx_ring) | |
1977 | goto out_err; | |
1978 | netdev_dbg(bp->dev, | |
1979 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | |
1980 | size, (unsigned long)queue->rx_ring_dma, queue->rx_ring); | |
1981 | } | |
4df95131 | 1982 | if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) |
89e5785f | 1983 | goto out_err; |
89e5785f HS |
1984 | |
1985 | return 0; | |
1986 | ||
1987 | out_err: | |
1988 | macb_free_consistent(bp); | |
1989 | return -ENOMEM; | |
1990 | } | |
1991 | ||
4df95131 NF |
1992 | static void gem_init_rings(struct macb *bp) |
1993 | { | |
02c958dd | 1994 | struct macb_queue *queue; |
dc97a89e | 1995 | struct macb_dma_desc *desc = NULL; |
02c958dd | 1996 | unsigned int q; |
4df95131 NF |
1997 | int i; |
1998 | ||
02c958dd | 1999 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
b410d13e | 2000 | for (i = 0; i < bp->tx_ring_size; i++) { |
dc97a89e RO |
2001 | desc = macb_tx_desc(queue, i); |
2002 | macb_set_addr(bp, desc, 0); | |
2003 | desc->ctrl = MACB_BIT(TX_USED); | |
02c958dd | 2004 | } |
dc97a89e | 2005 | desc->ctrl |= MACB_BIT(TX_WRAP); |
02c958dd CP |
2006 | queue->tx_head = 0; |
2007 | queue->tx_tail = 0; | |
4df95131 | 2008 | |
ae1f2a56 RO |
2009 | queue->rx_tail = 0; |
2010 | queue->rx_prepared_head = 0; | |
2011 | ||
2012 | gem_rx_refill(queue); | |
2013 | } | |
4df95131 | 2014 | |
4df95131 NF |
2015 | } |
2016 | ||
89e5785f HS |
2017 | static void macb_init_rings(struct macb *bp) |
2018 | { | |
2019 | int i; | |
dc97a89e | 2020 | struct macb_dma_desc *desc = NULL; |
89e5785f | 2021 | |
ae1f2a56 | 2022 | macb_init_rx_ring(&bp->queues[0]); |
89e5785f | 2023 | |
b410d13e | 2024 | for (i = 0; i < bp->tx_ring_size; i++) { |
dc97a89e RO |
2025 | desc = macb_tx_desc(&bp->queues[0], i); |
2026 | macb_set_addr(bp, desc, 0); | |
2027 | desc->ctrl = MACB_BIT(TX_USED); | |
89e5785f | 2028 | } |
21d3515c BS |
2029 | bp->queues[0].tx_head = 0; |
2030 | bp->queues[0].tx_tail = 0; | |
dc97a89e | 2031 | desc->ctrl |= MACB_BIT(TX_WRAP); |
89e5785f HS |
2032 | } |
2033 | ||
2034 | static void macb_reset_hw(struct macb *bp) | |
2035 | { | |
02c958dd CP |
2036 | struct macb_queue *queue; |
2037 | unsigned int q; | |
0da70f80 | 2038 | u32 ctrl = macb_readl(bp, NCR); |
02c958dd | 2039 | |
64ec42fe | 2040 | /* Disable RX and TX (XXX: Should we halt the transmission |
89e5785f HS |
2041 | * more gracefully?) |
2042 | */ | |
0da70f80 | 2043 | ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE)); |
89e5785f HS |
2044 | |
2045 | /* Clear the stats registers (XXX: Update stats first?) */ | |
0da70f80 AH |
2046 | ctrl |= MACB_BIT(CLRSTAT); |
2047 | ||
2048 | macb_writel(bp, NCR, ctrl); | |
89e5785f HS |
2049 | |
2050 | /* Clear all status flags */ | |
95ebcea6 JE |
2051 | macb_writel(bp, TSR, -1); |
2052 | macb_writel(bp, RSR, -1); | |
89e5785f HS |
2053 | |
2054 | /* Disable all interrupts */ | |
02c958dd CP |
2055 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2056 | queue_writel(queue, IDR, -1); | |
2057 | queue_readl(queue, ISR); | |
24468374 NS |
2058 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
2059 | queue_writel(queue, ISR, -1); | |
02c958dd | 2060 | } |
89e5785f HS |
2061 | } |
2062 | ||
70c9f3d4 JI |
2063 | static u32 gem_mdc_clk_div(struct macb *bp) |
2064 | { | |
2065 | u32 config; | |
2066 | unsigned long pclk_hz = clk_get_rate(bp->pclk); | |
2067 | ||
2068 | if (pclk_hz <= 20000000) | |
2069 | config = GEM_BF(CLK, GEM_CLK_DIV8); | |
2070 | else if (pclk_hz <= 40000000) | |
2071 | config = GEM_BF(CLK, GEM_CLK_DIV16); | |
2072 | else if (pclk_hz <= 80000000) | |
2073 | config = GEM_BF(CLK, GEM_CLK_DIV32); | |
2074 | else if (pclk_hz <= 120000000) | |
2075 | config = GEM_BF(CLK, GEM_CLK_DIV48); | |
2076 | else if (pclk_hz <= 160000000) | |
2077 | config = GEM_BF(CLK, GEM_CLK_DIV64); | |
2078 | else | |
2079 | config = GEM_BF(CLK, GEM_CLK_DIV96); | |
2080 | ||
2081 | return config; | |
2082 | } | |
2083 | ||
2084 | static u32 macb_mdc_clk_div(struct macb *bp) | |
2085 | { | |
2086 | u32 config; | |
2087 | unsigned long pclk_hz; | |
2088 | ||
2089 | if (macb_is_gem(bp)) | |
2090 | return gem_mdc_clk_div(bp); | |
2091 | ||
2092 | pclk_hz = clk_get_rate(bp->pclk); | |
2093 | if (pclk_hz <= 20000000) | |
2094 | config = MACB_BF(CLK, MACB_CLK_DIV8); | |
2095 | else if (pclk_hz <= 40000000) | |
2096 | config = MACB_BF(CLK, MACB_CLK_DIV16); | |
2097 | else if (pclk_hz <= 80000000) | |
2098 | config = MACB_BF(CLK, MACB_CLK_DIV32); | |
2099 | else | |
2100 | config = MACB_BF(CLK, MACB_CLK_DIV64); | |
2101 | ||
2102 | return config; | |
2103 | } | |
2104 | ||
64ec42fe | 2105 | /* Get the DMA bus width field of the network configuration register that we |
757a03c6 JI |
2106 | * should program. We find the width from decoding the design configuration |
2107 | * register to find the maximum supported data bus width. | |
2108 | */ | |
2109 | static u32 macb_dbw(struct macb *bp) | |
2110 | { | |
2111 | if (!macb_is_gem(bp)) | |
2112 | return 0; | |
2113 | ||
2114 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { | |
2115 | case 4: | |
2116 | return GEM_BF(DBW, GEM_DBW128); | |
2117 | case 2: | |
2118 | return GEM_BF(DBW, GEM_DBW64); | |
2119 | case 1: | |
2120 | default: | |
2121 | return GEM_BF(DBW, GEM_DBW32); | |
2122 | } | |
2123 | } | |
2124 | ||
64ec42fe | 2125 | /* Configure the receive DMA engine |
b3e3bd71 | 2126 | * - use the correct receive buffer size |
e175587f | 2127 | * - set best burst length for DMA operations |
b3e3bd71 NF |
2128 | * (if not supported by FIFO, it will fallback to default) |
2129 | * - set both rx/tx packet buffers to full memory size | |
2130 | * These are configurable parameters for GEM. | |
0116da4f JI |
2131 | */ |
2132 | static void macb_configure_dma(struct macb *bp) | |
2133 | { | |
ae1f2a56 RO |
2134 | struct macb_queue *queue; |
2135 | u32 buffer_size; | |
2136 | unsigned int q; | |
0116da4f JI |
2137 | u32 dmacfg; |
2138 | ||
ae1f2a56 | 2139 | buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; |
0116da4f JI |
2140 | if (macb_is_gem(bp)) { |
2141 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); | |
ae1f2a56 RO |
2142 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2143 | if (q) | |
2144 | queue_writel(queue, RBQS, buffer_size); | |
2145 | else | |
2146 | dmacfg |= GEM_BF(RXBS, buffer_size); | |
2147 | } | |
e175587f NF |
2148 | if (bp->dma_burst_length) |
2149 | dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); | |
b3e3bd71 | 2150 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); |
a50dad35 | 2151 | dmacfg &= ~GEM_BIT(ENDIA_PKT); |
62f6924c | 2152 | |
f2ce8a9e | 2153 | if (bp->native_io) |
62f6924c AC |
2154 | dmacfg &= ~GEM_BIT(ENDIA_DESC); |
2155 | else | |
2156 | dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ | |
2157 | ||
85ff3d87 CP |
2158 | if (bp->dev->features & NETIF_F_HW_CSUM) |
2159 | dmacfg |= GEM_BIT(TXCOEN); | |
2160 | else | |
2161 | dmacfg &= ~GEM_BIT(TXCOEN); | |
fff8019a HK |
2162 | |
2163 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
7b429614 | 2164 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
dc97a89e | 2165 | dmacfg |= GEM_BIT(ADDR64); |
7b429614 RO |
2166 | #endif |
2167 | #ifdef CONFIG_MACB_USE_HWSTAMP | |
2168 | if (bp->hw_dma_cap & HW_DMA_CAP_PTP) | |
2169 | dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); | |
fff8019a | 2170 | #endif |
e175587f NF |
2171 | netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", |
2172 | dmacfg); | |
0116da4f JI |
2173 | gem_writel(bp, DMACFG, dmacfg); |
2174 | } | |
2175 | } | |
2176 | ||
89e5785f HS |
2177 | static void macb_init_hw(struct macb *bp) |
2178 | { | |
02c958dd CP |
2179 | struct macb_queue *queue; |
2180 | unsigned int q; | |
2181 | ||
89e5785f HS |
2182 | u32 config; |
2183 | ||
2184 | macb_reset_hw(bp); | |
314bccc4 | 2185 | macb_set_hwaddr(bp); |
89e5785f | 2186 | |
70c9f3d4 | 2187 | config = macb_mdc_clk_div(bp); |
022be25c PCK |
2188 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) |
2189 | config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | |
29bc2e1e | 2190 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
89e5785f HS |
2191 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
2192 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | |
a104a6b3 | 2193 | if (bp->caps & MACB_CAPS_JUMBO) |
98b5a0f4 HK |
2194 | config |= MACB_BIT(JFRAME); /* Enable jumbo frames */ |
2195 | else | |
2196 | config |= MACB_BIT(BIG); /* Receive oversized frames */ | |
89e5785f HS |
2197 | if (bp->dev->flags & IFF_PROMISC) |
2198 | config |= MACB_BIT(CAF); /* Copy All Frames */ | |
924ec53c CP |
2199 | else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) |
2200 | config |= GEM_BIT(RXCOEN); | |
89e5785f HS |
2201 | if (!(bp->dev->flags & IFF_BROADCAST)) |
2202 | config |= MACB_BIT(NBC); /* No BroadCast */ | |
757a03c6 | 2203 | config |= macb_dbw(bp); |
89e5785f | 2204 | macb_writel(bp, NCFGR, config); |
a104a6b3 | 2205 | if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) |
98b5a0f4 | 2206 | gem_writel(bp, JML, bp->jumbo_max_len); |
26cdfb49 VD |
2207 | bp->speed = SPEED_10; |
2208 | bp->duplex = DUPLEX_HALF; | |
98b5a0f4 | 2209 | bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; |
a104a6b3 | 2210 | if (bp->caps & MACB_CAPS_JUMBO) |
98b5a0f4 | 2211 | bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; |
89e5785f | 2212 | |
0116da4f JI |
2213 | macb_configure_dma(bp); |
2214 | ||
89e5785f | 2215 | /* Initialize TX and RX buffers */ |
ae1f2a56 RO |
2216 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
2217 | queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); | |
fff8019a | 2218 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
ae1f2a56 RO |
2219 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
2220 | queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma)); | |
fff8019a | 2221 | #endif |
dc97a89e | 2222 | queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); |
fff8019a | 2223 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
7b429614 | 2224 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) |
dc97a89e | 2225 | queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); |
fff8019a | 2226 | #endif |
02c958dd CP |
2227 | |
2228 | /* Enable interrupts */ | |
2229 | queue_writel(queue, IER, | |
2230 | MACB_RX_INT_FLAGS | | |
2231 | MACB_TX_INT_FLAGS | | |
2232 | MACB_BIT(HRESP)); | |
2233 | } | |
89e5785f HS |
2234 | |
2235 | /* Enable TX and RX */ | |
0da70f80 | 2236 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); |
89e5785f HS |
2237 | } |
2238 | ||
64ec42fe | 2239 | /* The hash address register is 64 bits long and takes up two |
446ebd01 PV |
2240 | * locations in the memory map. The least significant bits are stored |
2241 | * in EMAC_HSL and the most significant bits in EMAC_HSH. | |
2242 | * | |
2243 | * The unicast hash enable and the multicast hash enable bits in the | |
2244 | * network configuration register enable the reception of hash matched | |
2245 | * frames. The destination address is reduced to a 6 bit index into | |
2246 | * the 64 bit hash register using the following hash function. The | |
2247 | * hash function is an exclusive or of every sixth bit of the | |
2248 | * destination address. | |
2249 | * | |
2250 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] | |
2251 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] | |
2252 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] | |
2253 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] | |
2254 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] | |
2255 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] | |
2256 | * | |
2257 | * da[0] represents the least significant bit of the first byte | |
2258 | * received, that is, the multicast/unicast indicator, and da[47] | |
2259 | * represents the most significant bit of the last byte received. If | |
2260 | * the hash index, hi[n], points to a bit that is set in the hash | |
2261 | * register then the frame will be matched according to whether the | |
2262 | * frame is multicast or unicast. A multicast match will be signalled | |
2263 | * if the multicast hash enable bit is set, da[0] is 1 and the hash | |
2264 | * index points to a bit set in the hash register. A unicast match | |
2265 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 | |
2266 | * and the hash index points to a bit set in the hash register. To | |
2267 | * receive all multicast frames, the hash register should be set with | |
2268 | * all ones and the multicast hash enable bit should be set in the | |
2269 | * network configuration register. | |
2270 | */ | |
2271 | ||
2272 | static inline int hash_bit_value(int bitnr, __u8 *addr) | |
2273 | { | |
2274 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) | |
2275 | return 1; | |
2276 | return 0; | |
2277 | } | |
2278 | ||
64ec42fe | 2279 | /* Return the hash index value for the specified address. */ |
446ebd01 PV |
2280 | static int hash_get_index(__u8 *addr) |
2281 | { | |
2282 | int i, j, bitval; | |
2283 | int hash_index = 0; | |
2284 | ||
2285 | for (j = 0; j < 6; j++) { | |
2286 | for (i = 0, bitval = 0; i < 8; i++) | |
2fa45e22 | 2287 | bitval ^= hash_bit_value(i * 6 + j, addr); |
446ebd01 PV |
2288 | |
2289 | hash_index |= (bitval << j); | |
2290 | } | |
2291 | ||
2292 | return hash_index; | |
2293 | } | |
2294 | ||
64ec42fe | 2295 | /* Add multicast addresses to the internal multicast-hash table. */ |
446ebd01 PV |
2296 | static void macb_sethashtable(struct net_device *dev) |
2297 | { | |
22bedad3 | 2298 | struct netdev_hw_addr *ha; |
446ebd01 | 2299 | unsigned long mc_filter[2]; |
f9dcbcc9 | 2300 | unsigned int bitnr; |
446ebd01 PV |
2301 | struct macb *bp = netdev_priv(dev); |
2302 | ||
aa50b552 MF |
2303 | mc_filter[0] = 0; |
2304 | mc_filter[1] = 0; | |
446ebd01 | 2305 | |
22bedad3 JP |
2306 | netdev_for_each_mc_addr(ha, dev) { |
2307 | bitnr = hash_get_index(ha->addr); | |
446ebd01 PV |
2308 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
2309 | } | |
2310 | ||
f75ba50b JI |
2311 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
2312 | macb_or_gem_writel(bp, HRT, mc_filter[1]); | |
446ebd01 PV |
2313 | } |
2314 | ||
64ec42fe | 2315 | /* Enable/Disable promiscuous and multicast modes. */ |
421d9df0 | 2316 | static void macb_set_rx_mode(struct net_device *dev) |
446ebd01 PV |
2317 | { |
2318 | unsigned long cfg; | |
2319 | struct macb *bp = netdev_priv(dev); | |
2320 | ||
2321 | cfg = macb_readl(bp, NCFGR); | |
2322 | ||
924ec53c | 2323 | if (dev->flags & IFF_PROMISC) { |
446ebd01 PV |
2324 | /* Enable promiscuous mode */ |
2325 | cfg |= MACB_BIT(CAF); | |
924ec53c CP |
2326 | |
2327 | /* Disable RX checksum offload */ | |
2328 | if (macb_is_gem(bp)) | |
2329 | cfg &= ~GEM_BIT(RXCOEN); | |
2330 | } else { | |
2331 | /* Disable promiscuous mode */ | |
446ebd01 PV |
2332 | cfg &= ~MACB_BIT(CAF); |
2333 | ||
924ec53c CP |
2334 | /* Enable RX checksum offload only if requested */ |
2335 | if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) | |
2336 | cfg |= GEM_BIT(RXCOEN); | |
2337 | } | |
2338 | ||
446ebd01 PV |
2339 | if (dev->flags & IFF_ALLMULTI) { |
2340 | /* Enable all multicast mode */ | |
f75ba50b JI |
2341 | macb_or_gem_writel(bp, HRB, -1); |
2342 | macb_or_gem_writel(bp, HRT, -1); | |
446ebd01 | 2343 | cfg |= MACB_BIT(NCFGR_MTI); |
4cd24eaf | 2344 | } else if (!netdev_mc_empty(dev)) { |
446ebd01 PV |
2345 | /* Enable specific multicasts */ |
2346 | macb_sethashtable(dev); | |
2347 | cfg |= MACB_BIT(NCFGR_MTI); | |
2348 | } else if (dev->flags & (~IFF_ALLMULTI)) { | |
2349 | /* Disable all multicast mode */ | |
f75ba50b JI |
2350 | macb_or_gem_writel(bp, HRB, 0); |
2351 | macb_or_gem_writel(bp, HRT, 0); | |
446ebd01 PV |
2352 | cfg &= ~MACB_BIT(NCFGR_MTI); |
2353 | } | |
2354 | ||
2355 | macb_writel(bp, NCFGR, cfg); | |
2356 | } | |
2357 | ||
89e5785f HS |
2358 | static int macb_open(struct net_device *dev) |
2359 | { | |
2360 | struct macb *bp = netdev_priv(dev); | |
4df95131 | 2361 | size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; |
ae1f2a56 RO |
2362 | struct macb_queue *queue; |
2363 | unsigned int q; | |
89e5785f HS |
2364 | int err; |
2365 | ||
c220f8cd | 2366 | netdev_dbg(bp->dev, "open\n"); |
89e5785f | 2367 | |
03fc4721 NF |
2368 | /* carrier starts down */ |
2369 | netif_carrier_off(dev); | |
2370 | ||
6c36a707 | 2371 | /* if the phy is not yet register, retry later*/ |
0a91281e | 2372 | if (!dev->phydev) |
6c36a707 | 2373 | return -EAGAIN; |
1b44791a NF |
2374 | |
2375 | /* RX buffers initialization */ | |
4df95131 | 2376 | macb_init_rx_buffer_size(bp, bufsz); |
6c36a707 | 2377 | |
89e5785f HS |
2378 | err = macb_alloc_consistent(bp); |
2379 | if (err) { | |
c220f8cd JI |
2380 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
2381 | err); | |
89e5785f HS |
2382 | return err; |
2383 | } | |
2384 | ||
4df95131 | 2385 | bp->macbgem_ops.mog_init_rings(bp); |
89e5785f | 2386 | macb_init_hw(bp); |
89e5785f | 2387 | |
ae1f2a56 RO |
2388 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) |
2389 | napi_enable(&queue->napi); | |
2390 | ||
6c36a707 | 2391 | /* schedule a link state check */ |
0a91281e | 2392 | phy_start(dev->phydev); |
89e5785f | 2393 | |
02c958dd | 2394 | netif_tx_start_all_queues(dev); |
89e5785f | 2395 | |
c2594d80 AP |
2396 | if (bp->ptp_info) |
2397 | bp->ptp_info->ptp_init(dev); | |
2398 | ||
89e5785f HS |
2399 | return 0; |
2400 | } | |
2401 | ||
2402 | static int macb_close(struct net_device *dev) | |
2403 | { | |
2404 | struct macb *bp = netdev_priv(dev); | |
ae1f2a56 | 2405 | struct macb_queue *queue; |
89e5785f | 2406 | unsigned long flags; |
ae1f2a56 | 2407 | unsigned int q; |
89e5785f | 2408 | |
02c958dd | 2409 | netif_tx_stop_all_queues(dev); |
ae1f2a56 RO |
2410 | |
2411 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) | |
2412 | napi_disable(&queue->napi); | |
89e5785f | 2413 | |
0a91281e PR |
2414 | if (dev->phydev) |
2415 | phy_stop(dev->phydev); | |
6c36a707 | 2416 | |
89e5785f HS |
2417 | spin_lock_irqsave(&bp->lock, flags); |
2418 | macb_reset_hw(bp); | |
2419 | netif_carrier_off(dev); | |
2420 | spin_unlock_irqrestore(&bp->lock, flags); | |
2421 | ||
2422 | macb_free_consistent(bp); | |
2423 | ||
c2594d80 AP |
2424 | if (bp->ptp_info) |
2425 | bp->ptp_info->ptp_remove(dev); | |
2426 | ||
89e5785f HS |
2427 | return 0; |
2428 | } | |
2429 | ||
a5898ea0 HK |
2430 | static int macb_change_mtu(struct net_device *dev, int new_mtu) |
2431 | { | |
a5898ea0 HK |
2432 | if (netif_running(dev)) |
2433 | return -EBUSY; | |
2434 | ||
a5898ea0 HK |
2435 | dev->mtu = new_mtu; |
2436 | ||
2437 | return 0; | |
2438 | } | |
2439 | ||
a494ed8e JI |
2440 | static void gem_update_stats(struct macb *bp) |
2441 | { | |
512286bb RO |
2442 | struct macb_queue *queue; |
2443 | unsigned int i, q, idx; | |
2444 | unsigned long *stat; | |
2445 | ||
a494ed8e | 2446 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; |
a494ed8e | 2447 | |
3ff13f1c XH |
2448 | for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { |
2449 | u32 offset = gem_statistics[i].offset; | |
7a6e0706 | 2450 | u64 val = bp->macb_reg_readl(bp, offset); |
3ff13f1c XH |
2451 | |
2452 | bp->ethtool_stats[i] += val; | |
2453 | *p += val; | |
2454 | ||
2455 | if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { | |
2456 | /* Add GEM_OCTTXH, GEM_OCTRXH */ | |
7a6e0706 | 2457 | val = bp->macb_reg_readl(bp, offset + 4); |
2fa45e22 | 2458 | bp->ethtool_stats[i] += ((u64)val) << 32; |
3ff13f1c XH |
2459 | *(++p) += val; |
2460 | } | |
2461 | } | |
512286bb RO |
2462 | |
2463 | idx = GEM_STATS_LEN; | |
2464 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) | |
2465 | for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat) | |
2466 | bp->ethtool_stats[idx++] = *stat; | |
a494ed8e JI |
2467 | } |
2468 | ||
2469 | static struct net_device_stats *gem_get_stats(struct macb *bp) | |
2470 | { | |
2471 | struct gem_stats *hwstat = &bp->hw_stats.gem; | |
5f1d3a5c | 2472 | struct net_device_stats *nstat = &bp->dev->stats; |
a494ed8e JI |
2473 | |
2474 | gem_update_stats(bp); | |
2475 | ||
2476 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + | |
2477 | hwstat->rx_alignment_errors + | |
2478 | hwstat->rx_resource_errors + | |
2479 | hwstat->rx_overruns + | |
2480 | hwstat->rx_oversize_frames + | |
2481 | hwstat->rx_jabbers + | |
2482 | hwstat->rx_undersized_frames + | |
2483 | hwstat->rx_length_field_frame_errors); | |
2484 | nstat->tx_errors = (hwstat->tx_late_collisions + | |
2485 | hwstat->tx_excessive_collisions + | |
2486 | hwstat->tx_underrun + | |
2487 | hwstat->tx_carrier_sense_errors); | |
2488 | nstat->multicast = hwstat->rx_multicast_frames; | |
2489 | nstat->collisions = (hwstat->tx_single_collision_frames + | |
2490 | hwstat->tx_multiple_collision_frames + | |
2491 | hwstat->tx_excessive_collisions); | |
2492 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + | |
2493 | hwstat->rx_jabbers + | |
2494 | hwstat->rx_undersized_frames + | |
2495 | hwstat->rx_length_field_frame_errors); | |
2496 | nstat->rx_over_errors = hwstat->rx_resource_errors; | |
2497 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; | |
2498 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; | |
2499 | nstat->rx_fifo_errors = hwstat->rx_overruns; | |
2500 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; | |
2501 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; | |
2502 | nstat->tx_fifo_errors = hwstat->tx_underrun; | |
2503 | ||
2504 | return nstat; | |
2505 | } | |
2506 | ||
3ff13f1c XH |
2507 | static void gem_get_ethtool_stats(struct net_device *dev, |
2508 | struct ethtool_stats *stats, u64 *data) | |
2509 | { | |
2510 | struct macb *bp; | |
2511 | ||
2512 | bp = netdev_priv(dev); | |
2513 | gem_update_stats(bp); | |
512286bb RO |
2514 | memcpy(data, &bp->ethtool_stats, sizeof(u64) |
2515 | * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); | |
3ff13f1c XH |
2516 | } |
2517 | ||
2518 | static int gem_get_sset_count(struct net_device *dev, int sset) | |
2519 | { | |
512286bb RO |
2520 | struct macb *bp = netdev_priv(dev); |
2521 | ||
3ff13f1c XH |
2522 | switch (sset) { |
2523 | case ETH_SS_STATS: | |
512286bb | 2524 | return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; |
3ff13f1c XH |
2525 | default: |
2526 | return -EOPNOTSUPP; | |
2527 | } | |
2528 | } | |
2529 | ||
2530 | static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) | |
2531 | { | |
512286bb RO |
2532 | char stat_string[ETH_GSTRING_LEN]; |
2533 | struct macb *bp = netdev_priv(dev); | |
2534 | struct macb_queue *queue; | |
8bcbf82f | 2535 | unsigned int i; |
512286bb | 2536 | unsigned int q; |
3ff13f1c XH |
2537 | |
2538 | switch (sset) { | |
2539 | case ETH_SS_STATS: | |
2540 | for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) | |
2541 | memcpy(p, gem_statistics[i].stat_string, | |
2542 | ETH_GSTRING_LEN); | |
512286bb RO |
2543 | |
2544 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | |
2545 | for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) { | |
2546 | snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s", | |
2547 | q, queue_statistics[i].stat_string); | |
2548 | memcpy(p, stat_string, ETH_GSTRING_LEN); | |
2549 | } | |
2550 | } | |
3ff13f1c XH |
2551 | break; |
2552 | } | |
2553 | } | |
2554 | ||
421d9df0 | 2555 | static struct net_device_stats *macb_get_stats(struct net_device *dev) |
89e5785f HS |
2556 | { |
2557 | struct macb *bp = netdev_priv(dev); | |
5f1d3a5c | 2558 | struct net_device_stats *nstat = &bp->dev->stats; |
a494ed8e JI |
2559 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
2560 | ||
2561 | if (macb_is_gem(bp)) | |
2562 | return gem_get_stats(bp); | |
89e5785f | 2563 | |
6c36a707 R |
2564 | /* read stats from hardware */ |
2565 | macb_update_stats(bp); | |
2566 | ||
89e5785f HS |
2567 | /* Convert HW stats into netdevice stats */ |
2568 | nstat->rx_errors = (hwstat->rx_fcs_errors + | |
2569 | hwstat->rx_align_errors + | |
2570 | hwstat->rx_resource_errors + | |
2571 | hwstat->rx_overruns + | |
2572 | hwstat->rx_oversize_pkts + | |
2573 | hwstat->rx_jabbers + | |
2574 | hwstat->rx_undersize_pkts + | |
89e5785f HS |
2575 | hwstat->rx_length_mismatch); |
2576 | nstat->tx_errors = (hwstat->tx_late_cols + | |
2577 | hwstat->tx_excessive_cols + | |
2578 | hwstat->tx_underruns + | |
716723c2 WS |
2579 | hwstat->tx_carrier_errors + |
2580 | hwstat->sqe_test_errors); | |
89e5785f HS |
2581 | nstat->collisions = (hwstat->tx_single_cols + |
2582 | hwstat->tx_multiple_cols + | |
2583 | hwstat->tx_excessive_cols); | |
2584 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | |
2585 | hwstat->rx_jabbers + | |
2586 | hwstat->rx_undersize_pkts + | |
2587 | hwstat->rx_length_mismatch); | |
b19f7f71 AS |
2588 | nstat->rx_over_errors = hwstat->rx_resource_errors + |
2589 | hwstat->rx_overruns; | |
89e5785f HS |
2590 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
2591 | nstat->rx_frame_errors = hwstat->rx_align_errors; | |
2592 | nstat->rx_fifo_errors = hwstat->rx_overruns; | |
2593 | /* XXX: What does "missed" mean? */ | |
2594 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | |
2595 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | |
2596 | nstat->tx_fifo_errors = hwstat->tx_underruns; | |
2597 | /* Don't know about heartbeat or window errors... */ | |
2598 | ||
2599 | return nstat; | |
2600 | } | |
2601 | ||
d1d1b53d NF |
2602 | static int macb_get_regs_len(struct net_device *netdev) |
2603 | { | |
2604 | return MACB_GREGS_NBR * sizeof(u32); | |
2605 | } | |
2606 | ||
2607 | static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |
2608 | void *p) | |
2609 | { | |
2610 | struct macb *bp = netdev_priv(dev); | |
2611 | unsigned int tail, head; | |
2612 | u32 *regs_buff = p; | |
2613 | ||
2614 | regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) | |
2615 | | MACB_GREGS_VERSION; | |
2616 | ||
b410d13e ZB |
2617 | tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); |
2618 | head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); | |
d1d1b53d NF |
2619 | |
2620 | regs_buff[0] = macb_readl(bp, NCR); | |
2621 | regs_buff[1] = macb_or_gem_readl(bp, NCFGR); | |
2622 | regs_buff[2] = macb_readl(bp, NSR); | |
2623 | regs_buff[3] = macb_readl(bp, TSR); | |
2624 | regs_buff[4] = macb_readl(bp, RBQP); | |
2625 | regs_buff[5] = macb_readl(bp, TBQP); | |
2626 | regs_buff[6] = macb_readl(bp, RSR); | |
2627 | regs_buff[7] = macb_readl(bp, IMR); | |
2628 | ||
2629 | regs_buff[8] = tail; | |
2630 | regs_buff[9] = head; | |
02c958dd CP |
2631 | regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); |
2632 | regs_buff[11] = macb_tx_dma(&bp->queues[0], head); | |
d1d1b53d | 2633 | |
ce721a70 NA |
2634 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) |
2635 | regs_buff[12] = macb_or_gem_readl(bp, USRIO); | |
64ec42fe | 2636 | if (macb_is_gem(bp)) |
d1d1b53d | 2637 | regs_buff[13] = gem_readl(bp, DMACFG); |
d1d1b53d NF |
2638 | } |
2639 | ||
3e2a5e15 SP |
2640 | static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
2641 | { | |
2642 | struct macb *bp = netdev_priv(netdev); | |
2643 | ||
2644 | wol->supported = 0; | |
2645 | wol->wolopts = 0; | |
2646 | ||
2647 | if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { | |
2648 | wol->supported = WAKE_MAGIC; | |
2649 | ||
2650 | if (bp->wol & MACB_WOL_ENABLED) | |
2651 | wol->wolopts |= WAKE_MAGIC; | |
2652 | } | |
2653 | } | |
2654 | ||
2655 | static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |
2656 | { | |
2657 | struct macb *bp = netdev_priv(netdev); | |
2658 | ||
2659 | if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || | |
2660 | (wol->wolopts & ~WAKE_MAGIC)) | |
2661 | return -EOPNOTSUPP; | |
2662 | ||
2663 | if (wol->wolopts & WAKE_MAGIC) | |
2664 | bp->wol |= MACB_WOL_ENABLED; | |
2665 | else | |
2666 | bp->wol &= ~MACB_WOL_ENABLED; | |
2667 | ||
2668 | device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); | |
2669 | ||
2670 | return 0; | |
2671 | } | |
2672 | ||
8441bb33 ZB |
2673 | static void macb_get_ringparam(struct net_device *netdev, |
2674 | struct ethtool_ringparam *ring) | |
2675 | { | |
2676 | struct macb *bp = netdev_priv(netdev); | |
2677 | ||
2678 | ring->rx_max_pending = MAX_RX_RING_SIZE; | |
2679 | ring->tx_max_pending = MAX_TX_RING_SIZE; | |
2680 | ||
2681 | ring->rx_pending = bp->rx_ring_size; | |
2682 | ring->tx_pending = bp->tx_ring_size; | |
2683 | } | |
2684 | ||
2685 | static int macb_set_ringparam(struct net_device *netdev, | |
2686 | struct ethtool_ringparam *ring) | |
2687 | { | |
2688 | struct macb *bp = netdev_priv(netdev); | |
2689 | u32 new_rx_size, new_tx_size; | |
2690 | unsigned int reset = 0; | |
2691 | ||
2692 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | |
2693 | return -EINVAL; | |
2694 | ||
2695 | new_rx_size = clamp_t(u32, ring->rx_pending, | |
2696 | MIN_RX_RING_SIZE, MAX_RX_RING_SIZE); | |
2697 | new_rx_size = roundup_pow_of_two(new_rx_size); | |
2698 | ||
2699 | new_tx_size = clamp_t(u32, ring->tx_pending, | |
2700 | MIN_TX_RING_SIZE, MAX_TX_RING_SIZE); | |
2701 | new_tx_size = roundup_pow_of_two(new_tx_size); | |
2702 | ||
2703 | if ((new_tx_size == bp->tx_ring_size) && | |
2704 | (new_rx_size == bp->rx_ring_size)) { | |
2705 | /* nothing to do */ | |
2706 | return 0; | |
2707 | } | |
2708 | ||
2709 | if (netif_running(bp->dev)) { | |
2710 | reset = 1; | |
2711 | macb_close(bp->dev); | |
2712 | } | |
2713 | ||
2714 | bp->rx_ring_size = new_rx_size; | |
2715 | bp->tx_ring_size = new_tx_size; | |
2716 | ||
2717 | if (reset) | |
2718 | macb_open(bp->dev); | |
2719 | ||
2720 | return 0; | |
2721 | } | |
2722 | ||
ab91f0a9 RO |
2723 | #ifdef CONFIG_MACB_USE_HWSTAMP |
2724 | static unsigned int gem_get_tsu_rate(struct macb *bp) | |
2725 | { | |
2726 | struct clk *tsu_clk; | |
2727 | unsigned int tsu_rate; | |
2728 | ||
2729 | tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); | |
2730 | if (!IS_ERR(tsu_clk)) | |
2731 | tsu_rate = clk_get_rate(tsu_clk); | |
2732 | /* try pclk instead */ | |
2733 | else if (!IS_ERR(bp->pclk)) { | |
2734 | tsu_clk = bp->pclk; | |
2735 | tsu_rate = clk_get_rate(tsu_clk); | |
2736 | } else | |
2737 | return -ENOTSUPP; | |
2738 | return tsu_rate; | |
2739 | } | |
2740 | ||
2741 | static s32 gem_get_ptp_max_adj(void) | |
2742 | { | |
2743 | return 64000000; | |
2744 | } | |
2745 | ||
2746 | static int gem_get_ts_info(struct net_device *dev, | |
2747 | struct ethtool_ts_info *info) | |
2748 | { | |
2749 | struct macb *bp = netdev_priv(dev); | |
2750 | ||
2751 | if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { | |
2752 | ethtool_op_get_ts_info(dev, info); | |
2753 | return 0; | |
2754 | } | |
2755 | ||
2756 | info->so_timestamping = | |
2757 | SOF_TIMESTAMPING_TX_SOFTWARE | | |
2758 | SOF_TIMESTAMPING_RX_SOFTWARE | | |
2759 | SOF_TIMESTAMPING_SOFTWARE | | |
2760 | SOF_TIMESTAMPING_TX_HARDWARE | | |
2761 | SOF_TIMESTAMPING_RX_HARDWARE | | |
2762 | SOF_TIMESTAMPING_RAW_HARDWARE; | |
2763 | info->tx_types = | |
2764 | (1 << HWTSTAMP_TX_ONESTEP_SYNC) | | |
2765 | (1 << HWTSTAMP_TX_OFF) | | |
2766 | (1 << HWTSTAMP_TX_ON); | |
2767 | info->rx_filters = | |
2768 | (1 << HWTSTAMP_FILTER_NONE) | | |
2769 | (1 << HWTSTAMP_FILTER_ALL); | |
2770 | ||
2771 | info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; | |
2772 | ||
2773 | return 0; | |
2774 | } | |
2775 | ||
2776 | static struct macb_ptp_info gem_ptp_info = { | |
2777 | .ptp_init = gem_ptp_init, | |
2778 | .ptp_remove = gem_ptp_remove, | |
2779 | .get_ptp_max_adj = gem_get_ptp_max_adj, | |
2780 | .get_tsu_rate = gem_get_tsu_rate, | |
2781 | .get_ts_info = gem_get_ts_info, | |
2782 | .get_hwtst = gem_get_hwtst, | |
2783 | .set_hwtst = gem_set_hwtst, | |
2784 | }; | |
2785 | #endif | |
2786 | ||
c2594d80 AP |
2787 | static int macb_get_ts_info(struct net_device *netdev, |
2788 | struct ethtool_ts_info *info) | |
2789 | { | |
2790 | struct macb *bp = netdev_priv(netdev); | |
2791 | ||
2792 | if (bp->ptp_info) | |
2793 | return bp->ptp_info->get_ts_info(netdev, info); | |
2794 | ||
2795 | return ethtool_op_get_ts_info(netdev, info); | |
2796 | } | |
2797 | ||
ae8223de RO |
2798 | static void gem_enable_flow_filters(struct macb *bp, bool enable) |
2799 | { | |
2800 | struct ethtool_rx_fs_item *item; | |
2801 | u32 t2_scr; | |
2802 | int num_t2_scr; | |
2803 | ||
2804 | num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); | |
2805 | ||
2806 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { | |
2807 | struct ethtool_rx_flow_spec *fs = &item->fs; | |
2808 | struct ethtool_tcpip4_spec *tp4sp_m; | |
2809 | ||
2810 | if (fs->location >= num_t2_scr) | |
2811 | continue; | |
2812 | ||
2813 | t2_scr = gem_readl_n(bp, SCRT2, fs->location); | |
2814 | ||
2815 | /* enable/disable screener regs for the flow entry */ | |
2816 | t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr); | |
2817 | ||
2818 | /* only enable fields with no masking */ | |
2819 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); | |
2820 | ||
2821 | if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF)) | |
2822 | t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr); | |
2823 | else | |
2824 | t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr); | |
2825 | ||
2826 | if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF)) | |
2827 | t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr); | |
2828 | else | |
2829 | t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr); | |
2830 | ||
2831 | if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF))) | |
2832 | t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr); | |
2833 | else | |
2834 | t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr); | |
2835 | ||
2836 | gem_writel_n(bp, SCRT2, fs->location, t2_scr); | |
2837 | } | |
2838 | } | |
2839 | ||
2840 | static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) | |
2841 | { | |
2842 | struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m; | |
2843 | uint16_t index = fs->location; | |
2844 | u32 w0, w1, t2_scr; | |
2845 | bool cmp_a = false; | |
2846 | bool cmp_b = false; | |
2847 | bool cmp_c = false; | |
2848 | ||
2849 | tp4sp_v = &(fs->h_u.tcp_ip4_spec); | |
2850 | tp4sp_m = &(fs->m_u.tcp_ip4_spec); | |
2851 | ||
2852 | /* ignore field if any masking set */ | |
2853 | if (tp4sp_m->ip4src == 0xFFFFFFFF) { | |
2854 | /* 1st compare reg - IP source address */ | |
2855 | w0 = 0; | |
2856 | w1 = 0; | |
2857 | w0 = tp4sp_v->ip4src; | |
2858 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ | |
2859 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); | |
2860 | w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1); | |
2861 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); | |
2862 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); | |
2863 | cmp_a = true; | |
2864 | } | |
2865 | ||
2866 | /* ignore field if any masking set */ | |
2867 | if (tp4sp_m->ip4dst == 0xFFFFFFFF) { | |
2868 | /* 2nd compare reg - IP destination address */ | |
2869 | w0 = 0; | |
2870 | w1 = 0; | |
2871 | w0 = tp4sp_v->ip4dst; | |
2872 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ | |
2873 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1); | |
2874 | w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1); | |
2875 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); | |
2876 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); | |
2877 | cmp_b = true; | |
2878 | } | |
2879 | ||
2880 | /* ignore both port fields if masking set in both */ | |
2881 | if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) { | |
2882 | /* 3rd compare reg - source port, destination port */ | |
2883 | w0 = 0; | |
2884 | w1 = 0; | |
2885 | w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1); | |
2886 | if (tp4sp_m->psrc == tp4sp_m->pdst) { | |
2887 | w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0); | |
2888 | w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); | |
2889 | w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */ | |
2890 | w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); | |
2891 | } else { | |
2892 | /* only one port definition */ | |
2893 | w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */ | |
2894 | w0 = GEM_BFINS(T2MASK, 0xFFFF, w0); | |
2895 | if (tp4sp_m->psrc == 0xFFFF) { /* src port */ | |
2896 | w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0); | |
2897 | w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1); | |
2898 | } else { /* dst port */ | |
2899 | w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0); | |
2900 | w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1); | |
2901 | } | |
2902 | } | |
2903 | gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); | |
2904 | gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); | |
2905 | cmp_c = true; | |
2906 | } | |
2907 | ||
2908 | t2_scr = 0; | |
2909 | t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr); | |
2910 | t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr); | |
2911 | if (cmp_a) | |
2912 | t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr); | |
2913 | if (cmp_b) | |
2914 | t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr); | |
2915 | if (cmp_c) | |
2916 | t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr); | |
2917 | gem_writel_n(bp, SCRT2, index, t2_scr); | |
2918 | } | |
2919 | ||
2920 | static int gem_add_flow_filter(struct net_device *netdev, | |
2921 | struct ethtool_rxnfc *cmd) | |
2922 | { | |
2923 | struct macb *bp = netdev_priv(netdev); | |
2924 | struct ethtool_rx_flow_spec *fs = &cmd->fs; | |
2925 | struct ethtool_rx_fs_item *item, *newfs; | |
7038cdb7 | 2926 | unsigned long flags; |
ae8223de RO |
2927 | int ret = -EINVAL; |
2928 | bool added = false; | |
2929 | ||
cc1674ee | 2930 | newfs = kmalloc(sizeof(*newfs), GFP_KERNEL); |
ae8223de RO |
2931 | if (newfs == NULL) |
2932 | return -ENOMEM; | |
2933 | memcpy(&newfs->fs, fs, sizeof(newfs->fs)); | |
2934 | ||
2935 | netdev_dbg(netdev, | |
2936 | "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", | |
2937 | fs->flow_type, (int)fs->ring_cookie, fs->location, | |
2938 | htonl(fs->h_u.tcp_ip4_spec.ip4src), | |
2939 | htonl(fs->h_u.tcp_ip4_spec.ip4dst), | |
2940 | htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst)); | |
2941 | ||
7038cdb7 JC |
2942 | spin_lock_irqsave(&bp->rx_fs_lock, flags); |
2943 | ||
ae8223de | 2944 | /* find correct place to add in list */ |
a3da8adc JC |
2945 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
2946 | if (item->fs.location > newfs->fs.location) { | |
2947 | list_add_tail(&newfs->list, &item->list); | |
2948 | added = true; | |
2949 | break; | |
2950 | } else if (item->fs.location == fs->location) { | |
2951 | netdev_err(netdev, "Rule not added: location %d not free!\n", | |
2952 | fs->location); | |
2953 | ret = -EBUSY; | |
2954 | goto err; | |
ae8223de | 2955 | } |
ae8223de | 2956 | } |
a3da8adc JC |
2957 | if (!added) |
2958 | list_add_tail(&newfs->list, &bp->rx_fs_list.list); | |
ae8223de RO |
2959 | |
2960 | gem_prog_cmp_regs(bp, fs); | |
2961 | bp->rx_fs_list.count++; | |
2962 | /* enable filtering if NTUPLE on */ | |
2963 | if (netdev->features & NETIF_F_NTUPLE) | |
2964 | gem_enable_flow_filters(bp, 1); | |
2965 | ||
7038cdb7 | 2966 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
ae8223de RO |
2967 | return 0; |
2968 | ||
2969 | err: | |
7038cdb7 | 2970 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
ae8223de RO |
2971 | kfree(newfs); |
2972 | return ret; | |
2973 | } | |
2974 | ||
2975 | static int gem_del_flow_filter(struct net_device *netdev, | |
2976 | struct ethtool_rxnfc *cmd) | |
2977 | { | |
2978 | struct macb *bp = netdev_priv(netdev); | |
2979 | struct ethtool_rx_fs_item *item; | |
2980 | struct ethtool_rx_flow_spec *fs; | |
7038cdb7 JC |
2981 | unsigned long flags; |
2982 | ||
2983 | spin_lock_irqsave(&bp->rx_fs_lock, flags); | |
ae8223de | 2984 | |
ae8223de RO |
2985 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { |
2986 | if (item->fs.location == cmd->fs.location) { | |
2987 | /* disable screener regs for the flow entry */ | |
2988 | fs = &(item->fs); | |
2989 | netdev_dbg(netdev, | |
2990 | "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n", | |
2991 | fs->flow_type, (int)fs->ring_cookie, fs->location, | |
2992 | htonl(fs->h_u.tcp_ip4_spec.ip4src), | |
2993 | htonl(fs->h_u.tcp_ip4_spec.ip4dst), | |
2994 | htons(fs->h_u.tcp_ip4_spec.psrc), | |
2995 | htons(fs->h_u.tcp_ip4_spec.pdst)); | |
2996 | ||
2997 | gem_writel_n(bp, SCRT2, fs->location, 0); | |
2998 | ||
2999 | list_del(&item->list); | |
ae8223de | 3000 | bp->rx_fs_list.count--; |
7038cdb7 JC |
3001 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
3002 | kfree(item); | |
ae8223de RO |
3003 | return 0; |
3004 | } | |
3005 | } | |
3006 | ||
7038cdb7 | 3007 | spin_unlock_irqrestore(&bp->rx_fs_lock, flags); |
ae8223de RO |
3008 | return -EINVAL; |
3009 | } | |
3010 | ||
3011 | static int gem_get_flow_entry(struct net_device *netdev, | |
3012 | struct ethtool_rxnfc *cmd) | |
3013 | { | |
3014 | struct macb *bp = netdev_priv(netdev); | |
3015 | struct ethtool_rx_fs_item *item; | |
3016 | ||
3017 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { | |
3018 | if (item->fs.location == cmd->fs.location) { | |
3019 | memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs)); | |
3020 | return 0; | |
3021 | } | |
3022 | } | |
3023 | return -EINVAL; | |
3024 | } | |
3025 | ||
3026 | static int gem_get_all_flow_entries(struct net_device *netdev, | |
3027 | struct ethtool_rxnfc *cmd, u32 *rule_locs) | |
3028 | { | |
3029 | struct macb *bp = netdev_priv(netdev); | |
3030 | struct ethtool_rx_fs_item *item; | |
3031 | uint32_t cnt = 0; | |
3032 | ||
3033 | list_for_each_entry(item, &bp->rx_fs_list.list, list) { | |
3034 | if (cnt == cmd->rule_cnt) | |
3035 | return -EMSGSIZE; | |
3036 | rule_locs[cnt] = item->fs.location; | |
3037 | cnt++; | |
3038 | } | |
3039 | cmd->data = bp->max_tuples; | |
3040 | cmd->rule_cnt = cnt; | |
3041 | ||
3042 | return 0; | |
3043 | } | |
3044 | ||
3045 | static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, | |
3046 | u32 *rule_locs) | |
3047 | { | |
3048 | struct macb *bp = netdev_priv(netdev); | |
3049 | int ret = 0; | |
3050 | ||
3051 | switch (cmd->cmd) { | |
3052 | case ETHTOOL_GRXRINGS: | |
3053 | cmd->data = bp->num_queues; | |
3054 | break; | |
3055 | case ETHTOOL_GRXCLSRLCNT: | |
3056 | cmd->rule_cnt = bp->rx_fs_list.count; | |
3057 | break; | |
3058 | case ETHTOOL_GRXCLSRULE: | |
3059 | ret = gem_get_flow_entry(netdev, cmd); | |
3060 | break; | |
3061 | case ETHTOOL_GRXCLSRLALL: | |
3062 | ret = gem_get_all_flow_entries(netdev, cmd, rule_locs); | |
3063 | break; | |
3064 | default: | |
3065 | netdev_err(netdev, | |
3066 | "Command parameter %d is not supported\n", cmd->cmd); | |
3067 | ret = -EOPNOTSUPP; | |
3068 | } | |
3069 | ||
3070 | return ret; | |
3071 | } | |
3072 | ||
3073 | static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) | |
3074 | { | |
3075 | struct macb *bp = netdev_priv(netdev); | |
ae8223de RO |
3076 | int ret; |
3077 | ||
ae8223de RO |
3078 | switch (cmd->cmd) { |
3079 | case ETHTOOL_SRXCLSRLINS: | |
3080 | if ((cmd->fs.location >= bp->max_tuples) | |
3081 | || (cmd->fs.ring_cookie >= bp->num_queues)) { | |
3082 | ret = -EINVAL; | |
3083 | break; | |
3084 | } | |
3085 | ret = gem_add_flow_filter(netdev, cmd); | |
3086 | break; | |
3087 | case ETHTOOL_SRXCLSRLDEL: | |
3088 | ret = gem_del_flow_filter(netdev, cmd); | |
3089 | break; | |
3090 | default: | |
3091 | netdev_err(netdev, | |
3092 | "Command parameter %d is not supported\n", cmd->cmd); | |
3093 | ret = -EOPNOTSUPP; | |
3094 | } | |
3095 | ||
ae8223de RO |
3096 | return ret; |
3097 | } | |
3098 | ||
421d9df0 | 3099 | static const struct ethtool_ops macb_ethtool_ops = { |
d1d1b53d NF |
3100 | .get_regs_len = macb_get_regs_len, |
3101 | .get_regs = macb_get_regs, | |
89e5785f | 3102 | .get_link = ethtool_op_get_link, |
17f393e8 | 3103 | .get_ts_info = ethtool_op_get_ts_info, |
3e2a5e15 SP |
3104 | .get_wol = macb_get_wol, |
3105 | .set_wol = macb_set_wol, | |
176275a2 PR |
3106 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
3107 | .set_link_ksettings = phy_ethtool_set_link_ksettings, | |
8441bb33 ZB |
3108 | .get_ringparam = macb_get_ringparam, |
3109 | .set_ringparam = macb_set_ringparam, | |
8cd5a56c | 3110 | }; |
8cd5a56c | 3111 | |
8093b1c3 | 3112 | static const struct ethtool_ops gem_ethtool_ops = { |
8cd5a56c XH |
3113 | .get_regs_len = macb_get_regs_len, |
3114 | .get_regs = macb_get_regs, | |
3115 | .get_link = ethtool_op_get_link, | |
c2594d80 | 3116 | .get_ts_info = macb_get_ts_info, |
3ff13f1c XH |
3117 | .get_ethtool_stats = gem_get_ethtool_stats, |
3118 | .get_strings = gem_get_ethtool_strings, | |
3119 | .get_sset_count = gem_get_sset_count, | |
176275a2 PR |
3120 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
3121 | .set_link_ksettings = phy_ethtool_set_link_ksettings, | |
8441bb33 ZB |
3122 | .get_ringparam = macb_get_ringparam, |
3123 | .set_ringparam = macb_set_ringparam, | |
ae8223de RO |
3124 | .get_rxnfc = gem_get_rxnfc, |
3125 | .set_rxnfc = gem_set_rxnfc, | |
89e5785f HS |
3126 | }; |
3127 | ||
421d9df0 | 3128 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
89e5785f | 3129 | { |
0a91281e | 3130 | struct phy_device *phydev = dev->phydev; |
c2594d80 | 3131 | struct macb *bp = netdev_priv(dev); |
89e5785f HS |
3132 | |
3133 | if (!netif_running(dev)) | |
3134 | return -EINVAL; | |
3135 | ||
6c36a707 R |
3136 | if (!phydev) |
3137 | return -ENODEV; | |
89e5785f | 3138 | |
c2594d80 AP |
3139 | if (!bp->ptp_info) |
3140 | return phy_mii_ioctl(phydev, rq, cmd); | |
3141 | ||
3142 | switch (cmd) { | |
3143 | case SIOCSHWTSTAMP: | |
3144 | return bp->ptp_info->set_hwtst(dev, rq, cmd); | |
3145 | case SIOCGHWTSTAMP: | |
3146 | return bp->ptp_info->get_hwtst(dev, rq); | |
3147 | default: | |
3148 | return phy_mii_ioctl(phydev, rq, cmd); | |
3149 | } | |
89e5785f HS |
3150 | } |
3151 | ||
85ff3d87 CP |
3152 | static int macb_set_features(struct net_device *netdev, |
3153 | netdev_features_t features) | |
3154 | { | |
3155 | struct macb *bp = netdev_priv(netdev); | |
3156 | netdev_features_t changed = features ^ netdev->features; | |
3157 | ||
3158 | /* TX checksum offload */ | |
3159 | if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { | |
3160 | u32 dmacfg; | |
3161 | ||
3162 | dmacfg = gem_readl(bp, DMACFG); | |
3163 | if (features & NETIF_F_HW_CSUM) | |
3164 | dmacfg |= GEM_BIT(TXCOEN); | |
3165 | else | |
3166 | dmacfg &= ~GEM_BIT(TXCOEN); | |
3167 | gem_writel(bp, DMACFG, dmacfg); | |
3168 | } | |
3169 | ||
924ec53c CP |
3170 | /* RX checksum offload */ |
3171 | if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { | |
3172 | u32 netcfg; | |
3173 | ||
3174 | netcfg = gem_readl(bp, NCFGR); | |
3175 | if (features & NETIF_F_RXCSUM && | |
3176 | !(netdev->flags & IFF_PROMISC)) | |
3177 | netcfg |= GEM_BIT(RXCOEN); | |
3178 | else | |
3179 | netcfg &= ~GEM_BIT(RXCOEN); | |
3180 | gem_writel(bp, NCFGR, netcfg); | |
3181 | } | |
3182 | ||
ae8223de RO |
3183 | /* RX Flow Filters */ |
3184 | if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) { | |
3185 | bool turn_on = features & NETIF_F_NTUPLE; | |
3186 | ||
3187 | gem_enable_flow_filters(bp, turn_on); | |
3188 | } | |
85ff3d87 CP |
3189 | return 0; |
3190 | } | |
3191 | ||
5f1fa992 AB |
3192 | static const struct net_device_ops macb_netdev_ops = { |
3193 | .ndo_open = macb_open, | |
3194 | .ndo_stop = macb_close, | |
3195 | .ndo_start_xmit = macb_start_xmit, | |
afc4b13d | 3196 | .ndo_set_rx_mode = macb_set_rx_mode, |
5f1fa992 AB |
3197 | .ndo_get_stats = macb_get_stats, |
3198 | .ndo_do_ioctl = macb_ioctl, | |
3199 | .ndo_validate_addr = eth_validate_addr, | |
a5898ea0 | 3200 | .ndo_change_mtu = macb_change_mtu, |
5f1fa992 | 3201 | .ndo_set_mac_address = eth_mac_addr, |
6e8cf5c0 TP |
3202 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3203 | .ndo_poll_controller = macb_poll_controller, | |
3204 | #endif | |
85ff3d87 | 3205 | .ndo_set_features = macb_set_features, |
1629dd4f | 3206 | .ndo_features_check = macb_features_check, |
5f1fa992 AB |
3207 | }; |
3208 | ||
64ec42fe | 3209 | /* Configure peripheral capabilities according to device tree |
e175587f NF |
3210 | * and integration options used |
3211 | */ | |
64ec42fe MF |
3212 | static void macb_configure_caps(struct macb *bp, |
3213 | const struct macb_config *dt_conf) | |
e175587f NF |
3214 | { |
3215 | u32 dcfg; | |
e175587f | 3216 | |
f6970505 NF |
3217 | if (dt_conf) |
3218 | bp->caps = dt_conf->caps; | |
3219 | ||
f2ce8a9e | 3220 | if (hw_is_gem(bp->regs, bp->native_io)) { |
e175587f NF |
3221 | bp->caps |= MACB_CAPS_MACB_IS_GEM; |
3222 | ||
e175587f NF |
3223 | dcfg = gem_readl(bp, DCFG1); |
3224 | if (GEM_BFEXT(IRQCOR, dcfg) == 0) | |
3225 | bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; | |
3226 | dcfg = gem_readl(bp, DCFG2); | |
3227 | if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) | |
3228 | bp->caps |= MACB_CAPS_FIFO_MODE; | |
ab91f0a9 RO |
3229 | #ifdef CONFIG_MACB_USE_HWSTAMP |
3230 | if (gem_has_ptp(bp)) { | |
7b429614 RO |
3231 | if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) |
3232 | pr_err("GEM doesn't support hardware ptp.\n"); | |
ab91f0a9 | 3233 | else { |
7b429614 | 3234 | bp->hw_dma_cap |= HW_DMA_CAP_PTP; |
ab91f0a9 RO |
3235 | bp->ptp_info = &gem_ptp_info; |
3236 | } | |
7b429614 | 3237 | } |
ab91f0a9 | 3238 | #endif |
e175587f NF |
3239 | } |
3240 | ||
a35919e1 | 3241 | dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); |
e175587f NF |
3242 | } |
3243 | ||
02c958dd | 3244 | static void macb_probe_queues(void __iomem *mem, |
f2ce8a9e | 3245 | bool native_io, |
02c958dd CP |
3246 | unsigned int *queue_mask, |
3247 | unsigned int *num_queues) | |
3248 | { | |
3249 | unsigned int hw_q; | |
02c958dd CP |
3250 | |
3251 | *queue_mask = 0x1; | |
3252 | *num_queues = 1; | |
3253 | ||
da120112 NF |
3254 | /* is it macb or gem ? |
3255 | * | |
3256 | * We need to read directly from the hardware here because | |
3257 | * we are early in the probe process and don't have the | |
3258 | * MACB_CAPS_MACB_IS_GEM flag positioned | |
3259 | */ | |
f2ce8a9e | 3260 | if (!hw_is_gem(mem, native_io)) |
02c958dd CP |
3261 | return; |
3262 | ||
3263 | /* bit 0 is never set but queue 0 always exists */ | |
a50dad35 AC |
3264 | *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; |
3265 | ||
02c958dd CP |
3266 | *queue_mask |= 0x1; |
3267 | ||
3268 | for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) | |
3269 | if (*queue_mask & (1 << hw_q)) | |
3270 | (*num_queues)++; | |
3271 | } | |
3272 | ||
c69618b3 | 3273 | static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, |
aead88bd | 3274 | struct clk **hclk, struct clk **tx_clk, |
3275 | struct clk **rx_clk) | |
89e5785f | 3276 | { |
83a77e9e | 3277 | struct macb_platform_data *pdata; |
421d9df0 | 3278 | int err; |
89e5785f | 3279 | |
83a77e9e BF |
3280 | pdata = dev_get_platdata(&pdev->dev); |
3281 | if (pdata) { | |
3282 | *pclk = pdata->pclk; | |
3283 | *hclk = pdata->hclk; | |
3284 | } else { | |
3285 | *pclk = devm_clk_get(&pdev->dev, "pclk"); | |
3286 | *hclk = devm_clk_get(&pdev->dev, "hclk"); | |
3287 | } | |
3288 | ||
c69618b3 NF |
3289 | if (IS_ERR(*pclk)) { |
3290 | err = PTR_ERR(*pclk); | |
b48e0bab | 3291 | dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); |
421d9df0 | 3292 | return err; |
0cc8674f | 3293 | } |
461845db | 3294 | |
c69618b3 NF |
3295 | if (IS_ERR(*hclk)) { |
3296 | err = PTR_ERR(*hclk); | |
b48e0bab | 3297 | dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); |
421d9df0 | 3298 | return err; |
b48e0bab SB |
3299 | } |
3300 | ||
c69618b3 NF |
3301 | *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); |
3302 | if (IS_ERR(*tx_clk)) | |
3303 | *tx_clk = NULL; | |
e1824dfe | 3304 | |
aead88bd | 3305 | *rx_clk = devm_clk_get(&pdev->dev, "rx_clk"); |
3306 | if (IS_ERR(*rx_clk)) | |
3307 | *rx_clk = NULL; | |
3308 | ||
c69618b3 | 3309 | err = clk_prepare_enable(*pclk); |
b48e0bab SB |
3310 | if (err) { |
3311 | dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); | |
421d9df0 | 3312 | return err; |
b48e0bab SB |
3313 | } |
3314 | ||
c69618b3 | 3315 | err = clk_prepare_enable(*hclk); |
b48e0bab SB |
3316 | if (err) { |
3317 | dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); | |
421d9df0 | 3318 | goto err_disable_pclk; |
89e5785f | 3319 | } |
89e5785f | 3320 | |
c69618b3 | 3321 | err = clk_prepare_enable(*tx_clk); |
93b31f48 CP |
3322 | if (err) { |
3323 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); | |
421d9df0 | 3324 | goto err_disable_hclk; |
e1824dfe SB |
3325 | } |
3326 | ||
aead88bd | 3327 | err = clk_prepare_enable(*rx_clk); |
3328 | if (err) { | |
3329 | dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); | |
3330 | goto err_disable_txclk; | |
3331 | } | |
3332 | ||
c69618b3 NF |
3333 | return 0; |
3334 | ||
aead88bd | 3335 | err_disable_txclk: |
3336 | clk_disable_unprepare(*tx_clk); | |
3337 | ||
c69618b3 NF |
3338 | err_disable_hclk: |
3339 | clk_disable_unprepare(*hclk); | |
3340 | ||
3341 | err_disable_pclk: | |
3342 | clk_disable_unprepare(*pclk); | |
3343 | ||
3344 | return err; | |
3345 | } | |
3346 | ||
3347 | static int macb_init(struct platform_device *pdev) | |
3348 | { | |
3349 | struct net_device *dev = platform_get_drvdata(pdev); | |
3350 | unsigned int hw_q, q; | |
3351 | struct macb *bp = netdev_priv(dev); | |
3352 | struct macb_queue *queue; | |
3353 | int err; | |
ae8223de | 3354 | u32 val, reg; |
c69618b3 | 3355 | |
b410d13e ZB |
3356 | bp->tx_ring_size = DEFAULT_TX_RING_SIZE; |
3357 | bp->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
3358 | ||
02c958dd CP |
3359 | /* set the queue register mapping once for all: queue0 has a special |
3360 | * register mapping but we don't want to test the queue index then | |
3361 | * compute the corresponding register offset at run time. | |
3362 | */ | |
cf250de0 | 3363 | for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { |
bfa0914a | 3364 | if (!(bp->queue_mask & (1 << hw_q))) |
02c958dd CP |
3365 | continue; |
3366 | ||
cf250de0 | 3367 | queue = &bp->queues[q]; |
02c958dd | 3368 | queue->bp = bp; |
ae1f2a56 | 3369 | netif_napi_add(dev, &queue->napi, macb_poll, 64); |
02c958dd CP |
3370 | if (hw_q) { |
3371 | queue->ISR = GEM_ISR(hw_q - 1); | |
3372 | queue->IER = GEM_IER(hw_q - 1); | |
3373 | queue->IDR = GEM_IDR(hw_q - 1); | |
3374 | queue->IMR = GEM_IMR(hw_q - 1); | |
3375 | queue->TBQP = GEM_TBQP(hw_q - 1); | |
ae1f2a56 RO |
3376 | queue->RBQP = GEM_RBQP(hw_q - 1); |
3377 | queue->RBQS = GEM_RBQS(hw_q - 1); | |
fff8019a | 3378 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
ae1f2a56 | 3379 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e | 3380 | queue->TBQPH = GEM_TBQPH(hw_q - 1); |
ae1f2a56 RO |
3381 | queue->RBQPH = GEM_RBQPH(hw_q - 1); |
3382 | } | |
fff8019a | 3383 | #endif |
02c958dd CP |
3384 | } else { |
3385 | /* queue0 uses legacy registers */ | |
3386 | queue->ISR = MACB_ISR; | |
3387 | queue->IER = MACB_IER; | |
3388 | queue->IDR = MACB_IDR; | |
3389 | queue->IMR = MACB_IMR; | |
3390 | queue->TBQP = MACB_TBQP; | |
ae1f2a56 | 3391 | queue->RBQP = MACB_RBQP; |
fff8019a | 3392 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
ae1f2a56 | 3393 | if (bp->hw_dma_cap & HW_DMA_CAP_64B) { |
dc97a89e | 3394 | queue->TBQPH = MACB_TBQPH; |
ae1f2a56 RO |
3395 | queue->RBQPH = MACB_RBQPH; |
3396 | } | |
fff8019a | 3397 | #endif |
02c958dd CP |
3398 | } |
3399 | ||
3400 | /* get irq: here we use the linux queue index, not the hardware | |
3401 | * queue index. the queue irq definitions in the device tree | |
3402 | * must remove the optional gaps that could exist in the | |
3403 | * hardware queue mask. | |
3404 | */ | |
cf250de0 | 3405 | queue->irq = platform_get_irq(pdev, q); |
02c958dd | 3406 | err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, |
20488239 | 3407 | IRQF_SHARED, dev->name, queue); |
02c958dd CP |
3408 | if (err) { |
3409 | dev_err(&pdev->dev, | |
3410 | "Unable to request IRQ %d (error %d)\n", | |
3411 | queue->irq, err); | |
c69618b3 | 3412 | return err; |
02c958dd CP |
3413 | } |
3414 | ||
3415 | INIT_WORK(&queue->tx_error_task, macb_tx_error_task); | |
cf250de0 | 3416 | q++; |
89e5785f HS |
3417 | } |
3418 | ||
5f1fa992 | 3419 | dev->netdev_ops = &macb_netdev_ops; |
89e5785f | 3420 | |
4df95131 NF |
3421 | /* setup appropriated routines according to adapter type */ |
3422 | if (macb_is_gem(bp)) { | |
a4c35ed3 | 3423 | bp->max_tx_length = GEM_MAX_TX_LEN; |
4df95131 NF |
3424 | bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; |
3425 | bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; | |
3426 | bp->macbgem_ops.mog_init_rings = gem_init_rings; | |
3427 | bp->macbgem_ops.mog_rx = gem_rx; | |
8cd5a56c | 3428 | dev->ethtool_ops = &gem_ethtool_ops; |
4df95131 | 3429 | } else { |
a4c35ed3 | 3430 | bp->max_tx_length = MACB_MAX_TX_LEN; |
4df95131 NF |
3431 | bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; |
3432 | bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; | |
3433 | bp->macbgem_ops.mog_init_rings = macb_init_rings; | |
3434 | bp->macbgem_ops.mog_rx = macb_rx; | |
8cd5a56c | 3435 | dev->ethtool_ops = &macb_ethtool_ops; |
4df95131 NF |
3436 | } |
3437 | ||
a4c35ed3 CP |
3438 | /* Set features */ |
3439 | dev->hw_features = NETIF_F_SG; | |
1629dd4f RO |
3440 | |
3441 | /* Check LSO capability */ | |
3442 | if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) | |
3443 | dev->hw_features |= MACB_NETIF_LSO; | |
3444 | ||
85ff3d87 CP |
3445 | /* Checksum offload is only available on gem with packet buffer */ |
3446 | if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) | |
924ec53c | 3447 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; |
a4c35ed3 CP |
3448 | if (bp->caps & MACB_CAPS_SG_DISABLED) |
3449 | dev->hw_features &= ~NETIF_F_SG; | |
3450 | dev->features = dev->hw_features; | |
3451 | ||
ae8223de RO |
3452 | /* Check RX Flow Filters support. |
3453 | * Max Rx flows set by availability of screeners & compare regs: | |
3454 | * each 4-tuple define requires 1 T2 screener reg + 3 compare regs | |
3455 | */ | |
3456 | reg = gem_readl(bp, DCFG8); | |
3457 | bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), | |
3458 | GEM_BFEXT(T2SCR, reg)); | |
3459 | if (bp->max_tuples > 0) { | |
3460 | /* also needs one ethtype match to check IPv4 */ | |
3461 | if (GEM_BFEXT(SCR2ETH, reg) > 0) { | |
3462 | /* program this reg now */ | |
3463 | reg = 0; | |
3464 | reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg); | |
3465 | gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); | |
3466 | /* Filtering is supported in hw but don't enable it in kernel now */ | |
3467 | dev->hw_features |= NETIF_F_NTUPLE; | |
3468 | /* init Rx flow definitions */ | |
3469 | INIT_LIST_HEAD(&bp->rx_fs_list.list); | |
3470 | bp->rx_fs_list.count = 0; | |
3471 | spin_lock_init(&bp->rx_fs_lock); | |
3472 | } else | |
3473 | bp->max_tuples = 0; | |
3474 | } | |
3475 | ||
ce721a70 NA |
3476 | if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { |
3477 | val = 0; | |
3478 | if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) | |
3479 | val = GEM_BIT(RGMII); | |
3480 | else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && | |
6bdaa5e9 | 3481 | (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
ce721a70 | 3482 | val = MACB_BIT(RMII); |
6bdaa5e9 | 3483 | else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) |
ce721a70 | 3484 | val = MACB_BIT(MII); |
421d9df0 | 3485 | |
ce721a70 NA |
3486 | if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) |
3487 | val |= MACB_BIT(CLKEN); | |
421d9df0 | 3488 | |
ce721a70 NA |
3489 | macb_or_gem_writel(bp, USRIO, val); |
3490 | } | |
421d9df0 | 3491 | |
89e5785f | 3492 | /* Set MII management clock divider */ |
421d9df0 CP |
3493 | val = macb_mdc_clk_div(bp); |
3494 | val |= macb_dbw(bp); | |
022be25c PCK |
3495 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) |
3496 | val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | |
421d9df0 CP |
3497 | macb_writel(bp, NCFGR, val); |
3498 | ||
3499 | return 0; | |
421d9df0 CP |
3500 | } |
3501 | ||
3502 | #if defined(CONFIG_OF) | |
3503 | /* 1518 rounded up */ | |
3504 | #define AT91ETHER_MAX_RBUFF_SZ 0x600 | |
3505 | /* max number of receive buffers */ | |
3506 | #define AT91ETHER_MAX_RX_DESCR 9 | |
3507 | ||
3508 | /* Initialize and start the Receiver and Transmit subsystems */ | |
3509 | static int at91ether_start(struct net_device *dev) | |
3510 | { | |
3511 | struct macb *lp = netdev_priv(dev); | |
ae1f2a56 | 3512 | struct macb_queue *q = &lp->queues[0]; |
dc97a89e | 3513 | struct macb_dma_desc *desc; |
421d9df0 CP |
3514 | dma_addr_t addr; |
3515 | u32 ctl; | |
3516 | int i; | |
3517 | ||
ae1f2a56 | 3518 | q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, |
421d9df0 | 3519 | (AT91ETHER_MAX_RX_DESCR * |
dc97a89e | 3520 | macb_dma_desc_get_size(lp)), |
ae1f2a56 RO |
3521 | &q->rx_ring_dma, GFP_KERNEL); |
3522 | if (!q->rx_ring) | |
421d9df0 CP |
3523 | return -ENOMEM; |
3524 | ||
ae1f2a56 | 3525 | q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, |
421d9df0 CP |
3526 | AT91ETHER_MAX_RX_DESCR * |
3527 | AT91ETHER_MAX_RBUFF_SZ, | |
ae1f2a56 RO |
3528 | &q->rx_buffers_dma, GFP_KERNEL); |
3529 | if (!q->rx_buffers) { | |
421d9df0 CP |
3530 | dma_free_coherent(&lp->pdev->dev, |
3531 | AT91ETHER_MAX_RX_DESCR * | |
dc97a89e | 3532 | macb_dma_desc_get_size(lp), |
ae1f2a56 RO |
3533 | q->rx_ring, q->rx_ring_dma); |
3534 | q->rx_ring = NULL; | |
421d9df0 CP |
3535 | return -ENOMEM; |
3536 | } | |
3537 | ||
ae1f2a56 | 3538 | addr = q->rx_buffers_dma; |
421d9df0 | 3539 | for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { |
ae1f2a56 | 3540 | desc = macb_rx_desc(q, i); |
dc97a89e RO |
3541 | macb_set_addr(lp, desc, addr); |
3542 | desc->ctrl = 0; | |
421d9df0 CP |
3543 | addr += AT91ETHER_MAX_RBUFF_SZ; |
3544 | } | |
3545 | ||
3546 | /* Set the Wrap bit on the last descriptor */ | |
dc97a89e | 3547 | desc->addr |= MACB_BIT(RX_WRAP); |
421d9df0 CP |
3548 | |
3549 | /* Reset buffer index */ | |
ae1f2a56 | 3550 | q->rx_tail = 0; |
421d9df0 CP |
3551 | |
3552 | /* Program address of descriptor list in Rx Buffer Queue register */ | |
ae1f2a56 | 3553 | macb_writel(lp, RBQP, q->rx_ring_dma); |
421d9df0 CP |
3554 | |
3555 | /* Enable Receive and Transmit */ | |
3556 | ctl = macb_readl(lp, NCR); | |
3557 | macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); | |
3558 | ||
3559 | return 0; | |
3560 | } | |
3561 | ||
3562 | /* Open the ethernet interface */ | |
3563 | static int at91ether_open(struct net_device *dev) | |
3564 | { | |
3565 | struct macb *lp = netdev_priv(dev); | |
3566 | u32 ctl; | |
3567 | int ret; | |
3568 | ||
3569 | /* Clear internal statistics */ | |
3570 | ctl = macb_readl(lp, NCR); | |
3571 | macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); | |
3572 | ||
3573 | macb_set_hwaddr(lp); | |
3574 | ||
3575 | ret = at91ether_start(dev); | |
3576 | if (ret) | |
3577 | return ret; | |
3578 | ||
3579 | /* Enable MAC interrupts */ | |
3580 | macb_writel(lp, IER, MACB_BIT(RCOMP) | | |
3581 | MACB_BIT(RXUBR) | | |
3582 | MACB_BIT(ISR_TUND) | | |
3583 | MACB_BIT(ISR_RLE) | | |
3584 | MACB_BIT(TCOMP) | | |
3585 | MACB_BIT(ISR_ROVR) | | |
3586 | MACB_BIT(HRESP)); | |
3587 | ||
3588 | /* schedule a link state check */ | |
0a91281e | 3589 | phy_start(dev->phydev); |
421d9df0 CP |
3590 | |
3591 | netif_start_queue(dev); | |
3592 | ||
3593 | return 0; | |
3594 | } | |
3595 | ||
3596 | /* Close the interface */ | |
3597 | static int at91ether_close(struct net_device *dev) | |
3598 | { | |
3599 | struct macb *lp = netdev_priv(dev); | |
ae1f2a56 | 3600 | struct macb_queue *q = &lp->queues[0]; |
421d9df0 CP |
3601 | u32 ctl; |
3602 | ||
3603 | /* Disable Receiver and Transmitter */ | |
3604 | ctl = macb_readl(lp, NCR); | |
3605 | macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); | |
3606 | ||
3607 | /* Disable MAC interrupts */ | |
3608 | macb_writel(lp, IDR, MACB_BIT(RCOMP) | | |
3609 | MACB_BIT(RXUBR) | | |
3610 | MACB_BIT(ISR_TUND) | | |
3611 | MACB_BIT(ISR_RLE) | | |
3612 | MACB_BIT(TCOMP) | | |
3613 | MACB_BIT(ISR_ROVR) | | |
3614 | MACB_BIT(HRESP)); | |
3615 | ||
3616 | netif_stop_queue(dev); | |
3617 | ||
3618 | dma_free_coherent(&lp->pdev->dev, | |
3619 | AT91ETHER_MAX_RX_DESCR * | |
dc97a89e | 3620 | macb_dma_desc_get_size(lp), |
ae1f2a56 RO |
3621 | q->rx_ring, q->rx_ring_dma); |
3622 | q->rx_ring = NULL; | |
421d9df0 CP |
3623 | |
3624 | dma_free_coherent(&lp->pdev->dev, | |
3625 | AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, | |
ae1f2a56 RO |
3626 | q->rx_buffers, q->rx_buffers_dma); |
3627 | q->rx_buffers = NULL; | |
421d9df0 CP |
3628 | |
3629 | return 0; | |
3630 | } | |
3631 | ||
3632 | /* Transmit packet */ | |
d1c38957 CB |
3633 | static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, |
3634 | struct net_device *dev) | |
421d9df0 CP |
3635 | { |
3636 | struct macb *lp = netdev_priv(dev); | |
3637 | ||
3638 | if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { | |
3639 | netif_stop_queue(dev); | |
3640 | ||
3641 | /* Store packet information (to free when Tx completed) */ | |
3642 | lp->skb = skb; | |
3643 | lp->skb_length = skb->len; | |
3644 | lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, | |
3645 | DMA_TO_DEVICE); | |
178c7ae9 AK |
3646 | if (dma_mapping_error(NULL, lp->skb_physaddr)) { |
3647 | dev_kfree_skb_any(skb); | |
3648 | dev->stats.tx_dropped++; | |
3649 | netdev_err(dev, "%s: DMA mapping error\n", __func__); | |
3650 | return NETDEV_TX_OK; | |
3651 | } | |
421d9df0 CP |
3652 | |
3653 | /* Set address of the data in the Transmit Address register */ | |
3654 | macb_writel(lp, TAR, lp->skb_physaddr); | |
3655 | /* Set length of the packet in the Transmit Control register */ | |
3656 | macb_writel(lp, TCR, skb->len); | |
89e5785f | 3657 | |
421d9df0 CP |
3658 | } else { |
3659 | netdev_err(dev, "%s called, but device is busy!\n", __func__); | |
3660 | return NETDEV_TX_BUSY; | |
3661 | } | |
3662 | ||
3663 | return NETDEV_TX_OK; | |
3664 | } | |
3665 | ||
3666 | /* Extract received frame from buffer descriptors and sent to upper layers. | |
3667 | * (Called from interrupt context) | |
3668 | */ | |
3669 | static void at91ether_rx(struct net_device *dev) | |
3670 | { | |
3671 | struct macb *lp = netdev_priv(dev); | |
ae1f2a56 | 3672 | struct macb_queue *q = &lp->queues[0]; |
dc97a89e | 3673 | struct macb_dma_desc *desc; |
421d9df0 CP |
3674 | unsigned char *p_recv; |
3675 | struct sk_buff *skb; | |
3676 | unsigned int pktlen; | |
3677 | ||
ae1f2a56 | 3678 | desc = macb_rx_desc(q, q->rx_tail); |
dc97a89e | 3679 | while (desc->addr & MACB_BIT(RX_USED)) { |
ae1f2a56 | 3680 | p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; |
dc97a89e | 3681 | pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); |
421d9df0 CP |
3682 | skb = netdev_alloc_skb(dev, pktlen + 2); |
3683 | if (skb) { | |
3684 | skb_reserve(skb, 2); | |
59ae1d12 | 3685 | skb_put_data(skb, p_recv, pktlen); |
421d9df0 CP |
3686 | |
3687 | skb->protocol = eth_type_trans(skb, dev); | |
5f1d3a5c TK |
3688 | dev->stats.rx_packets++; |
3689 | dev->stats.rx_bytes += pktlen; | |
421d9df0 CP |
3690 | netif_rx(skb); |
3691 | } else { | |
5f1d3a5c | 3692 | dev->stats.rx_dropped++; |
421d9df0 CP |
3693 | } |
3694 | ||
dc97a89e | 3695 | if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) |
5f1d3a5c | 3696 | dev->stats.multicast++; |
421d9df0 CP |
3697 | |
3698 | /* reset ownership bit */ | |
dc97a89e | 3699 | desc->addr &= ~MACB_BIT(RX_USED); |
421d9df0 CP |
3700 | |
3701 | /* wrap after last buffer */ | |
ae1f2a56 RO |
3702 | if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) |
3703 | q->rx_tail = 0; | |
421d9df0 | 3704 | else |
ae1f2a56 | 3705 | q->rx_tail++; |
dc97a89e | 3706 | |
ae1f2a56 | 3707 | desc = macb_rx_desc(q, q->rx_tail); |
421d9df0 CP |
3708 | } |
3709 | } | |
3710 | ||
3711 | /* MAC interrupt handler */ | |
3712 | static irqreturn_t at91ether_interrupt(int irq, void *dev_id) | |
3713 | { | |
3714 | struct net_device *dev = dev_id; | |
3715 | struct macb *lp = netdev_priv(dev); | |
3716 | u32 intstatus, ctl; | |
3717 | ||
3718 | /* MAC Interrupt Status register indicates what interrupts are pending. | |
3719 | * It is automatically cleared once read. | |
3720 | */ | |
3721 | intstatus = macb_readl(lp, ISR); | |
3722 | ||
3723 | /* Receive complete */ | |
3724 | if (intstatus & MACB_BIT(RCOMP)) | |
3725 | at91ether_rx(dev); | |
3726 | ||
3727 | /* Transmit complete */ | |
3728 | if (intstatus & MACB_BIT(TCOMP)) { | |
3729 | /* The TCOM bit is set even if the transmission failed */ | |
3730 | if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) | |
5f1d3a5c | 3731 | dev->stats.tx_errors++; |
421d9df0 CP |
3732 | |
3733 | if (lp->skb) { | |
3734 | dev_kfree_skb_irq(lp->skb); | |
3735 | lp->skb = NULL; | |
3736 | dma_unmap_single(NULL, lp->skb_physaddr, | |
3737 | lp->skb_length, DMA_TO_DEVICE); | |
5f1d3a5c TK |
3738 | dev->stats.tx_packets++; |
3739 | dev->stats.tx_bytes += lp->skb_length; | |
421d9df0 CP |
3740 | } |
3741 | netif_wake_queue(dev); | |
3742 | } | |
3743 | ||
3744 | /* Work-around for EMAC Errata section 41.3.1 */ | |
3745 | if (intstatus & MACB_BIT(RXUBR)) { | |
3746 | ctl = macb_readl(lp, NCR); | |
3747 | macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); | |
ffac0e96 | 3748 | wmb(); |
421d9df0 CP |
3749 | macb_writel(lp, NCR, ctl | MACB_BIT(RE)); |
3750 | } | |
3751 | ||
3752 | if (intstatus & MACB_BIT(ISR_ROVR)) | |
3753 | netdev_err(dev, "ROVR error\n"); | |
3754 | ||
3755 | return IRQ_HANDLED; | |
3756 | } | |
3757 | ||
3758 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3759 | static void at91ether_poll_controller(struct net_device *dev) | |
3760 | { | |
3761 | unsigned long flags; | |
3762 | ||
3763 | local_irq_save(flags); | |
3764 | at91ether_interrupt(dev->irq, dev); | |
3765 | local_irq_restore(flags); | |
3766 | } | |
3767 | #endif | |
3768 | ||
3769 | static const struct net_device_ops at91ether_netdev_ops = { | |
3770 | .ndo_open = at91ether_open, | |
3771 | .ndo_stop = at91ether_close, | |
3772 | .ndo_start_xmit = at91ether_start_xmit, | |
3773 | .ndo_get_stats = macb_get_stats, | |
3774 | .ndo_set_rx_mode = macb_set_rx_mode, | |
3775 | .ndo_set_mac_address = eth_mac_addr, | |
3776 | .ndo_do_ioctl = macb_ioctl, | |
3777 | .ndo_validate_addr = eth_validate_addr, | |
421d9df0 CP |
3778 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3779 | .ndo_poll_controller = at91ether_poll_controller, | |
3780 | #endif | |
3781 | }; | |
3782 | ||
c69618b3 | 3783 | static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, |
aead88bd | 3784 | struct clk **hclk, struct clk **tx_clk, |
3785 | struct clk **rx_clk) | |
421d9df0 | 3786 | { |
421d9df0 | 3787 | int err; |
421d9df0 | 3788 | |
c69618b3 NF |
3789 | *hclk = NULL; |
3790 | *tx_clk = NULL; | |
aead88bd | 3791 | *rx_clk = NULL; |
c69618b3 NF |
3792 | |
3793 | *pclk = devm_clk_get(&pdev->dev, "ether_clk"); | |
3794 | if (IS_ERR(*pclk)) | |
3795 | return PTR_ERR(*pclk); | |
421d9df0 | 3796 | |
c69618b3 | 3797 | err = clk_prepare_enable(*pclk); |
421d9df0 CP |
3798 | if (err) { |
3799 | dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); | |
3800 | return err; | |
3801 | } | |
3802 | ||
c69618b3 NF |
3803 | return 0; |
3804 | } | |
3805 | ||
3806 | static int at91ether_init(struct platform_device *pdev) | |
3807 | { | |
3808 | struct net_device *dev = platform_get_drvdata(pdev); | |
3809 | struct macb *bp = netdev_priv(dev); | |
3810 | int err; | |
3811 | u32 reg; | |
3812 | ||
fec9d3b1 AB |
3813 | bp->queues[0].bp = bp; |
3814 | ||
421d9df0 CP |
3815 | dev->netdev_ops = &at91ether_netdev_ops; |
3816 | dev->ethtool_ops = &macb_ethtool_ops; | |
3817 | ||
3818 | err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, | |
3819 | 0, dev->name, dev); | |
3820 | if (err) | |
c69618b3 | 3821 | return err; |
421d9df0 CP |
3822 | |
3823 | macb_writel(bp, NCR, 0); | |
3824 | ||
3825 | reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); | |
3826 | if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) | |
3827 | reg |= MACB_BIT(RM9200_RMII); | |
3828 | ||
3829 | macb_writel(bp, NCFGR, reg); | |
3830 | ||
3831 | return 0; | |
421d9df0 CP |
3832 | } |
3833 | ||
3cef5c5b | 3834 | static const struct macb_config at91sam9260_config = { |
6bdaa5e9 | 3835 | .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
c69618b3 | 3836 | .clk_init = macb_clk_init, |
421d9df0 CP |
3837 | .init = macb_init, |
3838 | }; | |
3839 | ||
3cef5c5b | 3840 | static const struct macb_config pc302gem_config = { |
421d9df0 CP |
3841 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
3842 | .dma_burst_length = 16, | |
c69618b3 | 3843 | .clk_init = macb_clk_init, |
421d9df0 CP |
3844 | .init = macb_init, |
3845 | }; | |
3846 | ||
5c8fe711 | 3847 | static const struct macb_config sama5d2_config = { |
6bdaa5e9 | 3848 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
5c8fe711 CP |
3849 | .dma_burst_length = 16, |
3850 | .clk_init = macb_clk_init, | |
3851 | .init = macb_init, | |
3852 | }; | |
3853 | ||
3cef5c5b | 3854 | static const struct macb_config sama5d3_config = { |
6bdaa5e9 | 3855 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
233a1587 | 3856 | | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, |
421d9df0 | 3857 | .dma_burst_length = 16, |
c69618b3 | 3858 | .clk_init = macb_clk_init, |
421d9df0 | 3859 | .init = macb_init, |
233a1587 | 3860 | .jumbo_max_len = 10240, |
421d9df0 CP |
3861 | }; |
3862 | ||
3cef5c5b | 3863 | static const struct macb_config sama5d4_config = { |
6bdaa5e9 | 3864 | .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, |
421d9df0 | 3865 | .dma_burst_length = 4, |
c69618b3 | 3866 | .clk_init = macb_clk_init, |
421d9df0 CP |
3867 | .init = macb_init, |
3868 | }; | |
3869 | ||
3cef5c5b | 3870 | static const struct macb_config emac_config = { |
c69618b3 | 3871 | .clk_init = at91ether_clk_init, |
421d9df0 CP |
3872 | .init = at91ether_init, |
3873 | }; | |
3874 | ||
e611b5b8 NA |
3875 | static const struct macb_config np4_config = { |
3876 | .caps = MACB_CAPS_USRIO_DISABLED, | |
3877 | .clk_init = macb_clk_init, | |
3878 | .init = macb_init, | |
3879 | }; | |
36583eb5 | 3880 | |
7b61f9c1 | 3881 | static const struct macb_config zynqmp_config = { |
ab91f0a9 RO |
3882 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
3883 | MACB_CAPS_JUMBO | | |
404cd086 | 3884 | MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH, |
7b61f9c1 HK |
3885 | .dma_burst_length = 16, |
3886 | .clk_init = macb_clk_init, | |
3887 | .init = macb_init, | |
98b5a0f4 | 3888 | .jumbo_max_len = 10240, |
7b61f9c1 HK |
3889 | }; |
3890 | ||
222ca8e0 | 3891 | static const struct macb_config zynq_config = { |
7baaa909 | 3892 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, |
222ca8e0 NS |
3893 | .dma_burst_length = 16, |
3894 | .clk_init = macb_clk_init, | |
3895 | .init = macb_init, | |
3896 | }; | |
3897 | ||
421d9df0 CP |
3898 | static const struct of_device_id macb_dt_ids[] = { |
3899 | { .compatible = "cdns,at32ap7000-macb" }, | |
3900 | { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, | |
3901 | { .compatible = "cdns,macb" }, | |
e611b5b8 | 3902 | { .compatible = "cdns,np4-macb", .data = &np4_config }, |
421d9df0 CP |
3903 | { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, |
3904 | { .compatible = "cdns,gem", .data = &pc302gem_config }, | |
5c8fe711 | 3905 | { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, |
421d9df0 CP |
3906 | { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, |
3907 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, | |
3908 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, | |
3909 | { .compatible = "cdns,emac", .data = &emac_config }, | |
7b61f9c1 | 3910 | { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, |
222ca8e0 | 3911 | { .compatible = "cdns,zynq-gem", .data = &zynq_config }, |
421d9df0 CP |
3912 | { /* sentinel */ } |
3913 | }; | |
3914 | MODULE_DEVICE_TABLE(of, macb_dt_ids); | |
3915 | #endif /* CONFIG_OF */ | |
3916 | ||
83a77e9e | 3917 | static const struct macb_config default_gem_config = { |
ab91f0a9 RO |
3918 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | |
3919 | MACB_CAPS_JUMBO | | |
3920 | MACB_CAPS_GEM_HAS_PTP, | |
83a77e9e BF |
3921 | .dma_burst_length = 16, |
3922 | .clk_init = macb_clk_init, | |
3923 | .init = macb_init, | |
3924 | .jumbo_max_len = 10240, | |
3925 | }; | |
3926 | ||
421d9df0 CP |
3927 | static int macb_probe(struct platform_device *pdev) |
3928 | { | |
83a77e9e | 3929 | const struct macb_config *macb_config = &default_gem_config; |
c69618b3 | 3930 | int (*clk_init)(struct platform_device *, struct clk **, |
aead88bd | 3931 | struct clk **, struct clk **, struct clk **) |
83a77e9e BF |
3932 | = macb_config->clk_init; |
3933 | int (*init)(struct platform_device *) = macb_config->init; | |
421d9df0 | 3934 | struct device_node *np = pdev->dev.of_node; |
aead88bd | 3935 | struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; |
421d9df0 CP |
3936 | unsigned int queue_mask, num_queues; |
3937 | struct macb_platform_data *pdata; | |
f2ce8a9e | 3938 | bool native_io; |
421d9df0 CP |
3939 | struct phy_device *phydev; |
3940 | struct net_device *dev; | |
3941 | struct resource *regs; | |
3942 | void __iomem *mem; | |
3943 | const char *mac; | |
3944 | struct macb *bp; | |
404cd086 | 3945 | int err, val; |
421d9df0 | 3946 | |
f2ce8a9e AS |
3947 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
3948 | mem = devm_ioremap_resource(&pdev->dev, regs); | |
3949 | if (IS_ERR(mem)) | |
3950 | return PTR_ERR(mem); | |
3951 | ||
c69618b3 NF |
3952 | if (np) { |
3953 | const struct of_device_id *match; | |
3954 | ||
3955 | match = of_match_node(macb_dt_ids, np); | |
3956 | if (match && match->data) { | |
3957 | macb_config = match->data; | |
3958 | clk_init = macb_config->clk_init; | |
3959 | init = macb_config->init; | |
3960 | } | |
3961 | } | |
3962 | ||
aead88bd | 3963 | err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); |
c69618b3 NF |
3964 | if (err) |
3965 | return err; | |
3966 | ||
f2ce8a9e | 3967 | native_io = hw_is_native_io(mem); |
421d9df0 | 3968 | |
f2ce8a9e | 3969 | macb_probe_queues(mem, native_io, &queue_mask, &num_queues); |
421d9df0 | 3970 | dev = alloc_etherdev_mq(sizeof(*bp), num_queues); |
c69618b3 NF |
3971 | if (!dev) { |
3972 | err = -ENOMEM; | |
3973 | goto err_disable_clocks; | |
3974 | } | |
421d9df0 CP |
3975 | |
3976 | dev->base_addr = regs->start; | |
3977 | ||
3978 | SET_NETDEV_DEV(dev, &pdev->dev); | |
3979 | ||
3980 | bp = netdev_priv(dev); | |
3981 | bp->pdev = pdev; | |
3982 | bp->dev = dev; | |
3983 | bp->regs = mem; | |
f2ce8a9e AS |
3984 | bp->native_io = native_io; |
3985 | if (native_io) { | |
7a6e0706 DM |
3986 | bp->macb_reg_readl = hw_readl_native; |
3987 | bp->macb_reg_writel = hw_writel_native; | |
f2ce8a9e | 3988 | } else { |
7a6e0706 DM |
3989 | bp->macb_reg_readl = hw_readl; |
3990 | bp->macb_reg_writel = hw_writel; | |
f2ce8a9e | 3991 | } |
421d9df0 | 3992 | bp->num_queues = num_queues; |
bfa0914a | 3993 | bp->queue_mask = queue_mask; |
c69618b3 NF |
3994 | if (macb_config) |
3995 | bp->dma_burst_length = macb_config->dma_burst_length; | |
3996 | bp->pclk = pclk; | |
3997 | bp->hclk = hclk; | |
3998 | bp->tx_clk = tx_clk; | |
aead88bd | 3999 | bp->rx_clk = rx_clk; |
f36dbe6a | 4000 | if (macb_config) |
98b5a0f4 | 4001 | bp->jumbo_max_len = macb_config->jumbo_max_len; |
98b5a0f4 | 4002 | |
3e2a5e15 | 4003 | bp->wol = 0; |
7c4a1d0c | 4004 | if (of_get_property(np, "magic-packet", NULL)) |
3e2a5e15 SP |
4005 | bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; |
4006 | device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); | |
4007 | ||
421d9df0 CP |
4008 | spin_lock_init(&bp->lock); |
4009 | ||
ad78347f | 4010 | /* setup capabilities */ |
f6970505 NF |
4011 | macb_configure_caps(bp, macb_config); |
4012 | ||
7b429614 RO |
4013 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
4014 | if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { | |
4015 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); | |
4016 | bp->hw_dma_cap |= HW_DMA_CAP_64B; | |
4017 | } | |
4018 | #endif | |
421d9df0 CP |
4019 | platform_set_drvdata(pdev, dev); |
4020 | ||
4021 | dev->irq = platform_get_irq(pdev, 0); | |
c69618b3 NF |
4022 | if (dev->irq < 0) { |
4023 | err = dev->irq; | |
b22ae0b4 | 4024 | goto err_out_free_netdev; |
c69618b3 | 4025 | } |
421d9df0 | 4026 | |
44770e11 JW |
4027 | /* MTU range: 68 - 1500 or 10240 */ |
4028 | dev->min_mtu = GEM_MTU_MIN_SIZE; | |
4029 | if (bp->caps & MACB_CAPS_JUMBO) | |
4030 | dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; | |
4031 | else | |
4032 | dev->max_mtu = ETH_DATA_LEN; | |
4033 | ||
404cd086 HK |
4034 | if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { |
4035 | val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); | |
4036 | if (val) | |
4037 | bp->rx_bd_rd_prefetch = (2 << (val - 1)) * | |
4038 | macb_dma_desc_get_size(bp); | |
4039 | ||
4040 | val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); | |
4041 | if (val) | |
4042 | bp->tx_bd_rd_prefetch = (2 << (val - 1)) * | |
4043 | macb_dma_desc_get_size(bp); | |
4044 | } | |
4045 | ||
421d9df0 | 4046 | mac = of_get_mac_address(np); |
aa076e3d | 4047 | if (mac) { |
eefb52d1 | 4048 | ether_addr_copy(bp->dev->dev_addr, mac); |
aa076e3d ML |
4049 | } else { |
4050 | err = of_get_nvmem_mac_address(np, bp->dev->dev_addr); | |
4051 | if (err) { | |
4052 | if (err == -EPROBE_DEFER) | |
4053 | goto err_out_free_netdev; | |
4054 | macb_get_hwaddr(bp); | |
4055 | } | |
4056 | } | |
fb97a846 | 4057 | |
421d9df0 | 4058 | err = of_get_phy_mode(np); |
fb97a846 | 4059 | if (err < 0) { |
c607a0d9 | 4060 | pdata = dev_get_platdata(&pdev->dev); |
fb97a846 JCPV |
4061 | if (pdata && pdata->is_rmii) |
4062 | bp->phy_interface = PHY_INTERFACE_MODE_RMII; | |
4063 | else | |
4064 | bp->phy_interface = PHY_INTERFACE_MODE_MII; | |
4065 | } else { | |
4066 | bp->phy_interface = err; | |
4067 | } | |
6c36a707 | 4068 | |
421d9df0 CP |
4069 | /* IP specific init */ |
4070 | err = init(pdev); | |
4071 | if (err) | |
4072 | goto err_out_free_netdev; | |
89e5785f | 4073 | |
cf669660 FF |
4074 | err = macb_mii_init(bp); |
4075 | if (err) | |
4076 | goto err_out_free_netdev; | |
4077 | ||
0a91281e | 4078 | phydev = dev->phydev; |
cf669660 FF |
4079 | |
4080 | netif_carrier_off(dev); | |
4081 | ||
89e5785f HS |
4082 | err = register_netdev(dev); |
4083 | if (err) { | |
4084 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | |
cf669660 | 4085 | goto err_out_unregister_mdio; |
89e5785f HS |
4086 | } |
4087 | ||
032dc41b HK |
4088 | tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task, |
4089 | (unsigned long)bp); | |
4090 | ||
cf669660 | 4091 | phy_attached_info(phydev); |
03fc4721 | 4092 | |
5879823f BS |
4093 | netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", |
4094 | macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), | |
4095 | dev->base_addr, dev->irq, dev->dev_addr); | |
89e5785f HS |
4096 | |
4097 | return 0; | |
4098 | ||
cf669660 | 4099 | err_out_unregister_mdio: |
0a91281e | 4100 | phy_disconnect(dev->phydev); |
cf669660 | 4101 | mdiobus_unregister(bp->mii_bus); |
66ee6a06 | 4102 | of_node_put(bp->phy_node); |
9ce98140 MG |
4103 | if (np && of_phy_is_fixed_link(np)) |
4104 | of_phy_deregister_fixed_link(np); | |
cf669660 FF |
4105 | mdiobus_free(bp->mii_bus); |
4106 | ||
cf250de0 | 4107 | err_out_free_netdev: |
02c958dd | 4108 | free_netdev(dev); |
421d9df0 | 4109 | |
c69618b3 NF |
4110 | err_disable_clocks: |
4111 | clk_disable_unprepare(tx_clk); | |
4112 | clk_disable_unprepare(hclk); | |
4113 | clk_disable_unprepare(pclk); | |
aead88bd | 4114 | clk_disable_unprepare(rx_clk); |
c69618b3 | 4115 | |
89e5785f HS |
4116 | return err; |
4117 | } | |
4118 | ||
9e86d766 | 4119 | static int macb_remove(struct platform_device *pdev) |
89e5785f HS |
4120 | { |
4121 | struct net_device *dev; | |
4122 | struct macb *bp; | |
9ce98140 | 4123 | struct device_node *np = pdev->dev.of_node; |
89e5785f HS |
4124 | |
4125 | dev = platform_get_drvdata(pdev); | |
4126 | ||
4127 | if (dev) { | |
4128 | bp = netdev_priv(dev); | |
0a91281e PR |
4129 | if (dev->phydev) |
4130 | phy_disconnect(dev->phydev); | |
298cf9be | 4131 | mdiobus_unregister(bp->mii_bus); |
9ce98140 MG |
4132 | if (np && of_phy_is_fixed_link(np)) |
4133 | of_phy_deregister_fixed_link(np); | |
fa6114d4 | 4134 | dev->phydev = NULL; |
298cf9be | 4135 | mdiobus_free(bp->mii_bus); |
5833e052 | 4136 | |
89e5785f | 4137 | unregister_netdev(dev); |
93b31f48 | 4138 | clk_disable_unprepare(bp->tx_clk); |
ace58010 | 4139 | clk_disable_unprepare(bp->hclk); |
ace58010 | 4140 | clk_disable_unprepare(bp->pclk); |
aead88bd | 4141 | clk_disable_unprepare(bp->rx_clk); |
dacdbb4d | 4142 | of_node_put(bp->phy_node); |
e965be7d | 4143 | free_netdev(dev); |
89e5785f HS |
4144 | } |
4145 | ||
4146 | return 0; | |
4147 | } | |
4148 | ||
d23823dd | 4149 | static int __maybe_unused macb_suspend(struct device *dev) |
c1f598fd | 4150 | { |
0dfc3e18 | 4151 | struct platform_device *pdev = to_platform_device(dev); |
c1f598fd HS |
4152 | struct net_device *netdev = platform_get_drvdata(pdev); |
4153 | struct macb *bp = netdev_priv(netdev); | |
4154 | ||
03fc4721 | 4155 | netif_carrier_off(netdev); |
c1f598fd HS |
4156 | netif_device_detach(netdev); |
4157 | ||
3e2a5e15 SP |
4158 | if (bp->wol & MACB_WOL_ENABLED) { |
4159 | macb_writel(bp, IER, MACB_BIT(WOL)); | |
4160 | macb_writel(bp, WOL, MACB_BIT(MAG)); | |
4161 | enable_irq_wake(bp->queues[0].irq); | |
4162 | } else { | |
4163 | clk_disable_unprepare(bp->tx_clk); | |
4164 | clk_disable_unprepare(bp->hclk); | |
4165 | clk_disable_unprepare(bp->pclk); | |
aead88bd | 4166 | clk_disable_unprepare(bp->rx_clk); |
3e2a5e15 | 4167 | } |
c1f598fd HS |
4168 | |
4169 | return 0; | |
4170 | } | |
4171 | ||
d23823dd | 4172 | static int __maybe_unused macb_resume(struct device *dev) |
c1f598fd | 4173 | { |
0dfc3e18 | 4174 | struct platform_device *pdev = to_platform_device(dev); |
c1f598fd HS |
4175 | struct net_device *netdev = platform_get_drvdata(pdev); |
4176 | struct macb *bp = netdev_priv(netdev); | |
4177 | ||
3e2a5e15 SP |
4178 | if (bp->wol & MACB_WOL_ENABLED) { |
4179 | macb_writel(bp, IDR, MACB_BIT(WOL)); | |
4180 | macb_writel(bp, WOL, 0); | |
4181 | disable_irq_wake(bp->queues[0].irq); | |
4182 | } else { | |
4183 | clk_prepare_enable(bp->pclk); | |
4184 | clk_prepare_enable(bp->hclk); | |
4185 | clk_prepare_enable(bp->tx_clk); | |
aead88bd | 4186 | clk_prepare_enable(bp->rx_clk); |
3e2a5e15 | 4187 | } |
c1f598fd HS |
4188 | |
4189 | netif_device_attach(netdev); | |
4190 | ||
4191 | return 0; | |
4192 | } | |
c1f598fd | 4193 | |
0dfc3e18 SB |
4194 | static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); |
4195 | ||
89e5785f | 4196 | static struct platform_driver macb_driver = { |
9e86d766 NR |
4197 | .probe = macb_probe, |
4198 | .remove = macb_remove, | |
89e5785f HS |
4199 | .driver = { |
4200 | .name = "macb", | |
fb97a846 | 4201 | .of_match_table = of_match_ptr(macb_dt_ids), |
0dfc3e18 | 4202 | .pm = &macb_pm_ops, |
89e5785f HS |
4203 | }, |
4204 | }; | |
4205 | ||
9e86d766 | 4206 | module_platform_driver(macb_driver); |
89e5785f HS |
4207 | |
4208 | MODULE_LICENSE("GPL"); | |
f75ba50b | 4209 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); |
e05503ef | 4210 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
72abb461 | 4211 | MODULE_ALIAS("platform:macb"); |