]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | |
3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | |
4 | * | |
5 | * Right now, I am very wasteful with the buffers. I allocate memory | |
6 | * pages and then divide them into 2K frame buffers. This way I know I | |
7 | * have buffers large enough to hold one frame within one buffer descriptor. | |
8 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | |
9 | * will be much more memory efficient and will easily handle lots of | |
10 | * small packets. | |
11 | * | |
12 | * Much better multiple PHY support by Magnus Damm. | |
13 | * Copyright (c) 2000 Ericsson Radio Systems AB. | |
14 | * | |
15 | * Support for FEC controller of ColdFire processors. | |
16 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) | |
17 | * | |
18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | |
19 | * Copyright (c) 2004-2006 Macq Electronique SA. | |
20 | * | |
21 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | |
22 | */ | |
23 | ||
24 | #include <linux/module.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/ptrace.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/ioport.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/in.h> | |
37 | #include <linux/ip.h> | |
38 | #include <net/ip.h> | |
39 | #include <net/tso.h> | |
40 | #include <linux/tcp.h> | |
41 | #include <linux/udp.h> | |
42 | #include <linux/icmp.h> | |
43 | #include <linux/spinlock.h> | |
44 | #include <linux/workqueue.h> | |
45 | #include <linux/bitops.h> | |
46 | #include <linux/io.h> | |
47 | #include <linux/irq.h> | |
48 | #include <linux/clk.h> | |
49 | #include <linux/platform_device.h> | |
50 | #include <linux/phy.h> | |
51 | #include <linux/fec.h> | |
52 | #include <linux/of.h> | |
53 | #include <linux/of_device.h> | |
54 | #include <linux/of_gpio.h> | |
55 | #include <linux/of_mdio.h> | |
56 | #include <linux/of_net.h> | |
57 | #include <linux/regulator/consumer.h> | |
58 | #include <linux/if_vlan.h> | |
59 | #include <linux/pinctrl/consumer.h> | |
60 | #include <linux/prefetch.h> | |
61 | ||
62 | #include <asm/cacheflush.h> | |
63 | ||
64 | #include "fec.h" | |
65 | ||
66 | static void set_multicast_list(struct net_device *ndev); | |
67 | static void fec_enet_itr_coal_init(struct net_device *ndev); | |
68 | ||
69 | #define DRIVER_NAME "fec" | |
70 | ||
71 | #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) | |
72 | ||
73 | /* Pause frame feild and FIFO threshold */ | |
74 | #define FEC_ENET_FCE (1 << 5) | |
75 | #define FEC_ENET_RSEM_V 0x84 | |
76 | #define FEC_ENET_RSFL_V 16 | |
77 | #define FEC_ENET_RAEM_V 0x8 | |
78 | #define FEC_ENET_RAFL_V 0x8 | |
79 | #define FEC_ENET_OPD_V 0xFFF0 | |
80 | ||
81 | static struct platform_device_id fec_devtype[] = { | |
82 | { | |
83 | /* keep it for coldfire */ | |
84 | .name = DRIVER_NAME, | |
85 | .driver_data = 0, | |
86 | }, { | |
87 | .name = "imx25-fec", | |
88 | .driver_data = FEC_QUIRK_USE_GASKET, | |
89 | }, { | |
90 | .name = "imx27-fec", | |
91 | .driver_data = 0, | |
92 | }, { | |
93 | .name = "imx28-fec", | |
94 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | |
95 | }, { | |
96 | .name = "imx6q-fec", | |
97 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | |
98 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | |
99 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, | |
100 | }, { | |
101 | .name = "mvf600-fec", | |
102 | .driver_data = FEC_QUIRK_ENET_MAC, | |
103 | }, { | |
104 | .name = "imx6sx-fec", | |
105 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | |
106 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | |
107 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | | |
108 | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, | |
109 | }, { | |
110 | /* sentinel */ | |
111 | } | |
112 | }; | |
113 | MODULE_DEVICE_TABLE(platform, fec_devtype); | |
114 | ||
115 | enum imx_fec_type { | |
116 | IMX25_FEC = 1, /* runs on i.mx25/50/53 */ | |
117 | IMX27_FEC, /* runs on i.mx27/35/51 */ | |
118 | IMX28_FEC, | |
119 | IMX6Q_FEC, | |
120 | MVF600_FEC, | |
121 | IMX6SX_FEC, | |
122 | }; | |
123 | ||
124 | static const struct of_device_id fec_dt_ids[] = { | |
125 | { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, | |
126 | { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, | |
127 | { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, | |
128 | { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, | |
129 | { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, | |
130 | { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, | |
131 | { /* sentinel */ } | |
132 | }; | |
133 | MODULE_DEVICE_TABLE(of, fec_dt_ids); | |
134 | ||
135 | static unsigned char macaddr[ETH_ALEN]; | |
136 | module_param_array(macaddr, byte, NULL, 0); | |
137 | MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |
138 | ||
139 | #if defined(CONFIG_M5272) | |
140 | /* | |
141 | * Some hardware gets it MAC address out of local flash memory. | |
142 | * if this is non-zero then assume it is the address to get MAC from. | |
143 | */ | |
144 | #if defined(CONFIG_NETtel) | |
145 | #define FEC_FLASHMAC 0xf0006006 | |
146 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) | |
147 | #define FEC_FLASHMAC 0xf0006000 | |
148 | #elif defined(CONFIG_CANCam) | |
149 | #define FEC_FLASHMAC 0xf0020000 | |
150 | #elif defined (CONFIG_M5272C3) | |
151 | #define FEC_FLASHMAC (0xffe04000 + 4) | |
152 | #elif defined(CONFIG_MOD5272) | |
153 | #define FEC_FLASHMAC 0xffc0406b | |
154 | #else | |
155 | #define FEC_FLASHMAC 0 | |
156 | #endif | |
157 | #endif /* CONFIG_M5272 */ | |
158 | ||
159 | /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. | |
160 | */ | |
161 | #define PKT_MAXBUF_SIZE 1522 | |
162 | #define PKT_MINBUF_SIZE 64 | |
163 | #define PKT_MAXBLR_SIZE 1536 | |
164 | ||
165 | /* FEC receive acceleration */ | |
166 | #define FEC_RACC_IPDIS (1 << 1) | |
167 | #define FEC_RACC_PRODIS (1 << 2) | |
168 | #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) | |
169 | ||
170 | /* | |
171 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame | |
172 | * size bits. Other FEC hardware does not, so we need to take that into | |
173 | * account when setting it. | |
174 | */ | |
175 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | |
176 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) | |
177 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) | |
178 | #else | |
179 | #define OPT_FRAME_SIZE 0 | |
180 | #endif | |
181 | ||
182 | /* FEC MII MMFR bits definition */ | |
183 | #define FEC_MMFR_ST (1 << 30) | |
184 | #define FEC_MMFR_OP_READ (2 << 28) | |
185 | #define FEC_MMFR_OP_WRITE (1 << 28) | |
186 | #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) | |
187 | #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) | |
188 | #define FEC_MMFR_TA (2 << 16) | |
189 | #define FEC_MMFR_DATA(v) (v & 0xffff) | |
190 | ||
191 | #define FEC_MII_TIMEOUT 30000 /* us */ | |
192 | ||
193 | /* Transmitter timeout */ | |
194 | #define TX_TIMEOUT (2 * HZ) | |
195 | ||
196 | #define FEC_PAUSE_FLAG_AUTONEG 0x1 | |
197 | #define FEC_PAUSE_FLAG_ENABLE 0x2 | |
198 | ||
199 | #define COPYBREAK_DEFAULT 256 | |
200 | ||
201 | #define TSO_HEADER_SIZE 128 | |
202 | /* Max number of allowed TCP segments for software TSO */ | |
203 | #define FEC_MAX_TSO_SEGS 100 | |
204 | #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) | |
205 | ||
206 | #define IS_TSO_HEADER(txq, addr) \ | |
207 | ((addr >= txq->tso_hdrs_dma) && \ | |
208 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) | |
209 | ||
210 | static int mii_cnt; | |
211 | ||
212 | static inline | |
213 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, | |
214 | struct fec_enet_private *fep, | |
215 | int queue_id) | |
216 | { | |
217 | struct bufdesc *new_bd = bdp + 1; | |
218 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; | |
219 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; | |
220 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; | |
221 | struct bufdesc_ex *ex_base; | |
222 | struct bufdesc *base; | |
223 | int ring_size; | |
224 | ||
225 | if (bdp >= txq->tx_bd_base) { | |
226 | base = txq->tx_bd_base; | |
227 | ring_size = txq->tx_ring_size; | |
228 | ex_base = (struct bufdesc_ex *)txq->tx_bd_base; | |
229 | } else { | |
230 | base = rxq->rx_bd_base; | |
231 | ring_size = rxq->rx_ring_size; | |
232 | ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; | |
233 | } | |
234 | ||
235 | if (fep->bufdesc_ex) | |
236 | return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? | |
237 | ex_base : ex_new_bd); | |
238 | else | |
239 | return (new_bd >= (base + ring_size)) ? | |
240 | base : new_bd; | |
241 | } | |
242 | ||
243 | static inline | |
244 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, | |
245 | struct fec_enet_private *fep, | |
246 | int queue_id) | |
247 | { | |
248 | struct bufdesc *new_bd = bdp - 1; | |
249 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; | |
250 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; | |
251 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; | |
252 | struct bufdesc_ex *ex_base; | |
253 | struct bufdesc *base; | |
254 | int ring_size; | |
255 | ||
256 | if (bdp >= txq->tx_bd_base) { | |
257 | base = txq->tx_bd_base; | |
258 | ring_size = txq->tx_ring_size; | |
259 | ex_base = (struct bufdesc_ex *)txq->tx_bd_base; | |
260 | } else { | |
261 | base = rxq->rx_bd_base; | |
262 | ring_size = rxq->rx_ring_size; | |
263 | ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; | |
264 | } | |
265 | ||
266 | if (fep->bufdesc_ex) | |
267 | return (struct bufdesc *)((ex_new_bd < ex_base) ? | |
268 | (ex_new_bd + ring_size) : ex_new_bd); | |
269 | else | |
270 | return (new_bd < base) ? (new_bd + ring_size) : new_bd; | |
271 | } | |
272 | ||
273 | static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, | |
274 | struct fec_enet_private *fep) | |
275 | { | |
276 | return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; | |
277 | } | |
278 | ||
279 | static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, | |
280 | struct fec_enet_priv_tx_q *txq) | |
281 | { | |
282 | int entries; | |
283 | ||
284 | entries = ((const char *)txq->dirty_tx - | |
285 | (const char *)txq->cur_tx) / fep->bufdesc_size - 1; | |
286 | ||
287 | return entries > 0 ? entries : entries + txq->tx_ring_size; | |
288 | } | |
289 | ||
290 | static void swap_buffer(void *bufaddr, int len) | |
291 | { | |
292 | int i; | |
293 | unsigned int *buf = bufaddr; | |
294 | ||
295 | for (i = 0; i < len; i += 4, buf++) | |
296 | swab32s(buf); | |
297 | } | |
298 | ||
299 | static void swap_buffer2(void *dst_buf, void *src_buf, int len) | |
300 | { | |
301 | int i; | |
302 | unsigned int *src = src_buf; | |
303 | unsigned int *dst = dst_buf; | |
304 | ||
305 | for (i = 0; i < len; i += 4, src++, dst++) | |
306 | *dst = swab32p(src); | |
307 | } | |
308 | ||
309 | static void fec_dump(struct net_device *ndev) | |
310 | { | |
311 | struct fec_enet_private *fep = netdev_priv(ndev); | |
312 | struct bufdesc *bdp; | |
313 | struct fec_enet_priv_tx_q *txq; | |
314 | int index = 0; | |
315 | ||
316 | netdev_info(ndev, "TX ring dump\n"); | |
317 | pr_info("Nr SC addr len SKB\n"); | |
318 | ||
319 | txq = fep->tx_queue[0]; | |
320 | bdp = txq->tx_bd_base; | |
321 | ||
322 | do { | |
323 | pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", | |
324 | index, | |
325 | bdp == txq->cur_tx ? 'S' : ' ', | |
326 | bdp == txq->dirty_tx ? 'H' : ' ', | |
327 | bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, | |
328 | txq->tx_skbuff[index]); | |
329 | bdp = fec_enet_get_nextdesc(bdp, fep, 0); | |
330 | index++; | |
331 | } while (bdp != txq->tx_bd_base); | |
332 | } | |
333 | ||
334 | static inline bool is_ipv4_pkt(struct sk_buff *skb) | |
335 | { | |
336 | return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; | |
337 | } | |
338 | ||
339 | static int | |
340 | fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) | |
341 | { | |
342 | /* Only run for packets requiring a checksum. */ | |
343 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
344 | return 0; | |
345 | ||
346 | if (unlikely(skb_cow_head(skb, 0))) | |
347 | return -1; | |
348 | ||
349 | if (is_ipv4_pkt(skb)) | |
350 | ip_hdr(skb)->check = 0; | |
351 | *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; | |
352 | ||
353 | return 0; | |
354 | } | |
355 | ||
356 | static int | |
357 | fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, | |
358 | struct sk_buff *skb, | |
359 | struct net_device *ndev) | |
360 | { | |
361 | struct fec_enet_private *fep = netdev_priv(ndev); | |
362 | struct bufdesc *bdp = txq->cur_tx; | |
363 | struct bufdesc_ex *ebdp; | |
364 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
365 | unsigned short queue = skb_get_queue_mapping(skb); | |
366 | int frag, frag_len; | |
367 | unsigned short status; | |
368 | unsigned int estatus = 0; | |
369 | skb_frag_t *this_frag; | |
370 | unsigned int index; | |
371 | void *bufaddr; | |
372 | dma_addr_t addr; | |
373 | int i; | |
374 | ||
375 | for (frag = 0; frag < nr_frags; frag++) { | |
376 | this_frag = &skb_shinfo(skb)->frags[frag]; | |
377 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | |
378 | ebdp = (struct bufdesc_ex *)bdp; | |
379 | ||
380 | status = bdp->cbd_sc; | |
381 | status &= ~BD_ENET_TX_STATS; | |
382 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | |
383 | frag_len = skb_shinfo(skb)->frags[frag].size; | |
384 | ||
385 | /* Handle the last BD specially */ | |
386 | if (frag == nr_frags - 1) { | |
387 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | |
388 | if (fep->bufdesc_ex) { | |
389 | estatus |= BD_ENET_TX_INT; | |
390 | if (unlikely(skb_shinfo(skb)->tx_flags & | |
391 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) | |
392 | estatus |= BD_ENET_TX_TS; | |
393 | } | |
394 | } | |
395 | ||
396 | if (fep->bufdesc_ex) { | |
397 | if (fep->quirks & FEC_QUIRK_HAS_AVB) | |
398 | estatus |= FEC_TX_BD_FTYPE(queue); | |
399 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
400 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
401 | ebdp->cbd_bdu = 0; | |
402 | ebdp->cbd_esc = estatus; | |
403 | } | |
404 | ||
405 | bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; | |
406 | ||
407 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); | |
408 | if (((unsigned long) bufaddr) & fep->tx_align || | |
409 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { | |
410 | memcpy(txq->tx_bounce[index], bufaddr, frag_len); | |
411 | bufaddr = txq->tx_bounce[index]; | |
412 | ||
413 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) | |
414 | swap_buffer(bufaddr, frag_len); | |
415 | } | |
416 | ||
417 | addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, | |
418 | DMA_TO_DEVICE); | |
419 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | |
420 | dev_kfree_skb_any(skb); | |
421 | if (net_ratelimit()) | |
422 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
423 | goto dma_mapping_error; | |
424 | } | |
425 | ||
426 | bdp->cbd_bufaddr = addr; | |
427 | bdp->cbd_datlen = frag_len; | |
428 | bdp->cbd_sc = status; | |
429 | } | |
430 | ||
431 | txq->cur_tx = bdp; | |
432 | ||
433 | return 0; | |
434 | ||
435 | dma_mapping_error: | |
436 | bdp = txq->cur_tx; | |
437 | for (i = 0; i < frag; i++) { | |
438 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | |
439 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | |
440 | bdp->cbd_datlen, DMA_TO_DEVICE); | |
441 | } | |
442 | return NETDEV_TX_OK; | |
443 | } | |
444 | ||
445 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, | |
446 | struct sk_buff *skb, struct net_device *ndev) | |
447 | { | |
448 | struct fec_enet_private *fep = netdev_priv(ndev); | |
449 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
450 | struct bufdesc *bdp, *last_bdp; | |
451 | void *bufaddr; | |
452 | dma_addr_t addr; | |
453 | unsigned short status; | |
454 | unsigned short buflen; | |
455 | unsigned short queue; | |
456 | unsigned int estatus = 0; | |
457 | unsigned int index; | |
458 | int entries_free; | |
459 | int ret; | |
460 | ||
461 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); | |
462 | if (entries_free < MAX_SKB_FRAGS + 1) { | |
463 | dev_kfree_skb_any(skb); | |
464 | if (net_ratelimit()) | |
465 | netdev_err(ndev, "NOT enough BD for SG!\n"); | |
466 | return NETDEV_TX_OK; | |
467 | } | |
468 | ||
469 | /* Protocol checksum off-load for TCP and UDP. */ | |
470 | if (fec_enet_clear_csum(skb, ndev)) { | |
471 | dev_kfree_skb_any(skb); | |
472 | return NETDEV_TX_OK; | |
473 | } | |
474 | ||
475 | /* Fill in a Tx ring entry */ | |
476 | bdp = txq->cur_tx; | |
477 | status = bdp->cbd_sc; | |
478 | status &= ~BD_ENET_TX_STATS; | |
479 | ||
480 | /* Set buffer length and buffer pointer */ | |
481 | bufaddr = skb->data; | |
482 | buflen = skb_headlen(skb); | |
483 | ||
484 | queue = skb_get_queue_mapping(skb); | |
485 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); | |
486 | if (((unsigned long) bufaddr) & fep->tx_align || | |
487 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { | |
488 | memcpy(txq->tx_bounce[index], skb->data, buflen); | |
489 | bufaddr = txq->tx_bounce[index]; | |
490 | ||
491 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) | |
492 | swap_buffer(bufaddr, buflen); | |
493 | } | |
494 | ||
495 | /* Push the data cache so the CPM does not get stale memory data. */ | |
496 | addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); | |
497 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | |
498 | dev_kfree_skb_any(skb); | |
499 | if (net_ratelimit()) | |
500 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
501 | return NETDEV_TX_OK; | |
502 | } | |
503 | ||
504 | if (nr_frags) { | |
505 | ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); | |
506 | if (ret) | |
507 | return ret; | |
508 | } else { | |
509 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | |
510 | if (fep->bufdesc_ex) { | |
511 | estatus = BD_ENET_TX_INT; | |
512 | if (unlikely(skb_shinfo(skb)->tx_flags & | |
513 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) | |
514 | estatus |= BD_ENET_TX_TS; | |
515 | } | |
516 | } | |
517 | ||
518 | if (fep->bufdesc_ex) { | |
519 | ||
520 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
521 | ||
522 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && | |
523 | fep->hwts_tx_en)) | |
524 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
525 | ||
526 | if (fep->quirks & FEC_QUIRK_HAS_AVB) | |
527 | estatus |= FEC_TX_BD_FTYPE(queue); | |
528 | ||
529 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
530 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
531 | ||
532 | ebdp->cbd_bdu = 0; | |
533 | ebdp->cbd_esc = estatus; | |
534 | } | |
535 | ||
536 | last_bdp = txq->cur_tx; | |
537 | index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); | |
538 | /* Save skb pointer */ | |
539 | txq->tx_skbuff[index] = skb; | |
540 | ||
541 | bdp->cbd_datlen = buflen; | |
542 | bdp->cbd_bufaddr = addr; | |
543 | ||
544 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | |
545 | * it's the last BD of the frame, and to put the CRC on the end. | |
546 | */ | |
547 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); | |
548 | bdp->cbd_sc = status; | |
549 | ||
550 | /* If this was the last BD in the ring, start at the beginning again. */ | |
551 | bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); | |
552 | ||
553 | skb_tx_timestamp(skb); | |
554 | ||
555 | txq->cur_tx = bdp; | |
556 | ||
557 | /* Trigger transmission start */ | |
558 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); | |
559 | ||
560 | return 0; | |
561 | } | |
562 | ||
563 | static int | |
564 | fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, | |
565 | struct net_device *ndev, | |
566 | struct bufdesc *bdp, int index, char *data, | |
567 | int size, bool last_tcp, bool is_last) | |
568 | { | |
569 | struct fec_enet_private *fep = netdev_priv(ndev); | |
570 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); | |
571 | unsigned short queue = skb_get_queue_mapping(skb); | |
572 | unsigned short status; | |
573 | unsigned int estatus = 0; | |
574 | dma_addr_t addr; | |
575 | ||
576 | status = bdp->cbd_sc; | |
577 | status &= ~BD_ENET_TX_STATS; | |
578 | ||
579 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | |
580 | ||
581 | if (((unsigned long) data) & fep->tx_align || | |
582 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { | |
583 | memcpy(txq->tx_bounce[index], data, size); | |
584 | data = txq->tx_bounce[index]; | |
585 | ||
586 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) | |
587 | swap_buffer(data, size); | |
588 | } | |
589 | ||
590 | addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); | |
591 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | |
592 | dev_kfree_skb_any(skb); | |
593 | if (net_ratelimit()) | |
594 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
595 | return NETDEV_TX_BUSY; | |
596 | } | |
597 | ||
598 | bdp->cbd_datlen = size; | |
599 | bdp->cbd_bufaddr = addr; | |
600 | ||
601 | if (fep->bufdesc_ex) { | |
602 | if (fep->quirks & FEC_QUIRK_HAS_AVB) | |
603 | estatus |= FEC_TX_BD_FTYPE(queue); | |
604 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
605 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
606 | ebdp->cbd_bdu = 0; | |
607 | ebdp->cbd_esc = estatus; | |
608 | } | |
609 | ||
610 | /* Handle the last BD specially */ | |
611 | if (last_tcp) | |
612 | status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); | |
613 | if (is_last) { | |
614 | status |= BD_ENET_TX_INTR; | |
615 | if (fep->bufdesc_ex) | |
616 | ebdp->cbd_esc |= BD_ENET_TX_INT; | |
617 | } | |
618 | ||
619 | bdp->cbd_sc = status; | |
620 | ||
621 | return 0; | |
622 | } | |
623 | ||
624 | static int | |
625 | fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, | |
626 | struct sk_buff *skb, struct net_device *ndev, | |
627 | struct bufdesc *bdp, int index) | |
628 | { | |
629 | struct fec_enet_private *fep = netdev_priv(ndev); | |
630 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
631 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); | |
632 | unsigned short queue = skb_get_queue_mapping(skb); | |
633 | void *bufaddr; | |
634 | unsigned long dmabuf; | |
635 | unsigned short status; | |
636 | unsigned int estatus = 0; | |
637 | ||
638 | status = bdp->cbd_sc; | |
639 | status &= ~BD_ENET_TX_STATS; | |
640 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | |
641 | ||
642 | bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; | |
643 | dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; | |
644 | if (((unsigned long)bufaddr) & fep->tx_align || | |
645 | fep->quirks & FEC_QUIRK_SWAP_FRAME) { | |
646 | memcpy(txq->tx_bounce[index], skb->data, hdr_len); | |
647 | bufaddr = txq->tx_bounce[index]; | |
648 | ||
649 | if (fep->quirks & FEC_QUIRK_SWAP_FRAME) | |
650 | swap_buffer(bufaddr, hdr_len); | |
651 | ||
652 | dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, | |
653 | hdr_len, DMA_TO_DEVICE); | |
654 | if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { | |
655 | dev_kfree_skb_any(skb); | |
656 | if (net_ratelimit()) | |
657 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
658 | return NETDEV_TX_BUSY; | |
659 | } | |
660 | } | |
661 | ||
662 | bdp->cbd_bufaddr = dmabuf; | |
663 | bdp->cbd_datlen = hdr_len; | |
664 | ||
665 | if (fep->bufdesc_ex) { | |
666 | if (fep->quirks & FEC_QUIRK_HAS_AVB) | |
667 | estatus |= FEC_TX_BD_FTYPE(queue); | |
668 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
669 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
670 | ebdp->cbd_bdu = 0; | |
671 | ebdp->cbd_esc = estatus; | |
672 | } | |
673 | ||
674 | bdp->cbd_sc = status; | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
679 | static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, | |
680 | struct sk_buff *skb, | |
681 | struct net_device *ndev) | |
682 | { | |
683 | struct fec_enet_private *fep = netdev_priv(ndev); | |
684 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
685 | int total_len, data_left; | |
686 | struct bufdesc *bdp = txq->cur_tx; | |
687 | unsigned short queue = skb_get_queue_mapping(skb); | |
688 | struct tso_t tso; | |
689 | unsigned int index = 0; | |
690 | int ret; | |
691 | ||
692 | if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { | |
693 | dev_kfree_skb_any(skb); | |
694 | if (net_ratelimit()) | |
695 | netdev_err(ndev, "NOT enough BD for TSO!\n"); | |
696 | return NETDEV_TX_OK; | |
697 | } | |
698 | ||
699 | /* Protocol checksum off-load for TCP and UDP. */ | |
700 | if (fec_enet_clear_csum(skb, ndev)) { | |
701 | dev_kfree_skb_any(skb); | |
702 | return NETDEV_TX_OK; | |
703 | } | |
704 | ||
705 | /* Initialize the TSO handler, and prepare the first payload */ | |
706 | tso_start(skb, &tso); | |
707 | ||
708 | total_len = skb->len - hdr_len; | |
709 | while (total_len > 0) { | |
710 | char *hdr; | |
711 | ||
712 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); | |
713 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); | |
714 | total_len -= data_left; | |
715 | ||
716 | /* prepare packet headers: MAC + IP + TCP */ | |
717 | hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; | |
718 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); | |
719 | ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); | |
720 | if (ret) | |
721 | goto err_release; | |
722 | ||
723 | while (data_left > 0) { | |
724 | int size; | |
725 | ||
726 | size = min_t(int, tso.size, data_left); | |
727 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | |
728 | index = fec_enet_get_bd_index(txq->tx_bd_base, | |
729 | bdp, fep); | |
730 | ret = fec_enet_txq_put_data_tso(txq, skb, ndev, | |
731 | bdp, index, | |
732 | tso.data, size, | |
733 | size == data_left, | |
734 | total_len == 0); | |
735 | if (ret) | |
736 | goto err_release; | |
737 | ||
738 | data_left -= size; | |
739 | tso_build_data(skb, &tso, size); | |
740 | } | |
741 | ||
742 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | |
743 | } | |
744 | ||
745 | /* Save skb pointer */ | |
746 | txq->tx_skbuff[index] = skb; | |
747 | ||
748 | skb_tx_timestamp(skb); | |
749 | txq->cur_tx = bdp; | |
750 | ||
751 | /* Trigger transmission start */ | |
752 | if (!(fep->quirks & FEC_QUIRK_ERR007885) || | |
753 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || | |
754 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || | |
755 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || | |
756 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) | |
757 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); | |
758 | ||
759 | return 0; | |
760 | ||
761 | err_release: | |
762 | /* TODO: Release all used data descriptors for TSO */ | |
763 | return ret; | |
764 | } | |
765 | ||
766 | static netdev_tx_t | |
767 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
768 | { | |
769 | struct fec_enet_private *fep = netdev_priv(ndev); | |
770 | int entries_free; | |
771 | unsigned short queue; | |
772 | struct fec_enet_priv_tx_q *txq; | |
773 | struct netdev_queue *nq; | |
774 | int ret; | |
775 | ||
776 | queue = skb_get_queue_mapping(skb); | |
777 | txq = fep->tx_queue[queue]; | |
778 | nq = netdev_get_tx_queue(ndev, queue); | |
779 | ||
780 | if (skb_is_gso(skb)) | |
781 | ret = fec_enet_txq_submit_tso(txq, skb, ndev); | |
782 | else | |
783 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); | |
784 | if (ret) | |
785 | return ret; | |
786 | ||
787 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); | |
788 | if (entries_free <= txq->tx_stop_threshold) | |
789 | netif_tx_stop_queue(nq); | |
790 | ||
791 | return NETDEV_TX_OK; | |
792 | } | |
793 | ||
794 | /* Init RX & TX buffer descriptors | |
795 | */ | |
796 | static void fec_enet_bd_init(struct net_device *dev) | |
797 | { | |
798 | struct fec_enet_private *fep = netdev_priv(dev); | |
799 | struct fec_enet_priv_tx_q *txq; | |
800 | struct fec_enet_priv_rx_q *rxq; | |
801 | struct bufdesc *bdp; | |
802 | unsigned int i; | |
803 | unsigned int q; | |
804 | ||
805 | for (q = 0; q < fep->num_rx_queues; q++) { | |
806 | /* Initialize the receive buffer descriptors. */ | |
807 | rxq = fep->rx_queue[q]; | |
808 | bdp = rxq->rx_bd_base; | |
809 | ||
810 | for (i = 0; i < rxq->rx_ring_size; i++) { | |
811 | ||
812 | /* Initialize the BD for every fragment in the page. */ | |
813 | if (bdp->cbd_bufaddr) | |
814 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | |
815 | else | |
816 | bdp->cbd_sc = 0; | |
817 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | |
818 | } | |
819 | ||
820 | /* Set the last buffer to wrap */ | |
821 | bdp = fec_enet_get_prevdesc(bdp, fep, q); | |
822 | bdp->cbd_sc |= BD_SC_WRAP; | |
823 | ||
824 | rxq->cur_rx = rxq->rx_bd_base; | |
825 | } | |
826 | ||
827 | for (q = 0; q < fep->num_tx_queues; q++) { | |
828 | /* ...and the same for transmit */ | |
829 | txq = fep->tx_queue[q]; | |
830 | bdp = txq->tx_bd_base; | |
831 | txq->cur_tx = bdp; | |
832 | ||
833 | for (i = 0; i < txq->tx_ring_size; i++) { | |
834 | /* Initialize the BD for every fragment in the page. */ | |
835 | bdp->cbd_sc = 0; | |
836 | if (txq->tx_skbuff[i]) { | |
837 | dev_kfree_skb_any(txq->tx_skbuff[i]); | |
838 | txq->tx_skbuff[i] = NULL; | |
839 | } | |
840 | bdp->cbd_bufaddr = 0; | |
841 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | |
842 | } | |
843 | ||
844 | /* Set the last buffer to wrap */ | |
845 | bdp = fec_enet_get_prevdesc(bdp, fep, q); | |
846 | bdp->cbd_sc |= BD_SC_WRAP; | |
847 | txq->dirty_tx = bdp; | |
848 | } | |
849 | } | |
850 | ||
851 | static void fec_enet_active_rxring(struct net_device *ndev) | |
852 | { | |
853 | struct fec_enet_private *fep = netdev_priv(ndev); | |
854 | int i; | |
855 | ||
856 | for (i = 0; i < fep->num_rx_queues; i++) | |
857 | writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); | |
858 | } | |
859 | ||
860 | static void fec_enet_enable_ring(struct net_device *ndev) | |
861 | { | |
862 | struct fec_enet_private *fep = netdev_priv(ndev); | |
863 | struct fec_enet_priv_tx_q *txq; | |
864 | struct fec_enet_priv_rx_q *rxq; | |
865 | int i; | |
866 | ||
867 | for (i = 0; i < fep->num_rx_queues; i++) { | |
868 | rxq = fep->rx_queue[i]; | |
869 | writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); | |
870 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); | |
871 | ||
872 | /* enable DMA1/2 */ | |
873 | if (i) | |
874 | writel(RCMR_MATCHEN | RCMR_CMP(i), | |
875 | fep->hwp + FEC_RCMR(i)); | |
876 | } | |
877 | ||
878 | for (i = 0; i < fep->num_tx_queues; i++) { | |
879 | txq = fep->tx_queue[i]; | |
880 | writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); | |
881 | ||
882 | /* enable DMA1/2 */ | |
883 | if (i) | |
884 | writel(DMA_CLASS_EN | IDLE_SLOPE(i), | |
885 | fep->hwp + FEC_DMA_CFG(i)); | |
886 | } | |
887 | } | |
888 | ||
889 | static void fec_enet_reset_skb(struct net_device *ndev) | |
890 | { | |
891 | struct fec_enet_private *fep = netdev_priv(ndev); | |
892 | struct fec_enet_priv_tx_q *txq; | |
893 | int i, j; | |
894 | ||
895 | for (i = 0; i < fep->num_tx_queues; i++) { | |
896 | txq = fep->tx_queue[i]; | |
897 | ||
898 | for (j = 0; j < txq->tx_ring_size; j++) { | |
899 | if (txq->tx_skbuff[j]) { | |
900 | dev_kfree_skb_any(txq->tx_skbuff[j]); | |
901 | txq->tx_skbuff[j] = NULL; | |
902 | } | |
903 | } | |
904 | } | |
905 | } | |
906 | ||
907 | /* | |
908 | * This function is called to start or restart the FEC during a link | |
909 | * change, transmit timeout, or to reconfigure the FEC. The network | |
910 | * packet processing for this device must be stopped before this call. | |
911 | */ | |
912 | static void | |
913 | fec_restart(struct net_device *ndev) | |
914 | { | |
915 | struct fec_enet_private *fep = netdev_priv(ndev); | |
916 | u32 val; | |
917 | u32 temp_mac[2]; | |
918 | u32 rcntl = OPT_FRAME_SIZE | 0x04; | |
919 | u32 ecntl = 0x2; /* ETHEREN */ | |
920 | ||
921 | /* Whack a reset. We should wait for this. | |
922 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC | |
923 | * instead of reset MAC itself. | |
924 | */ | |
925 | if (fep->quirks & FEC_QUIRK_HAS_AVB) { | |
926 | writel(0, fep->hwp + FEC_ECNTRL); | |
927 | } else { | |
928 | writel(1, fep->hwp + FEC_ECNTRL); | |
929 | udelay(10); | |
930 | } | |
931 | ||
932 | /* | |
933 | * enet-mac reset will reset mac address registers too, | |
934 | * so need to reconfigure it. | |
935 | */ | |
936 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { | |
937 | memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); | |
938 | writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); | |
939 | writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); | |
940 | } | |
941 | ||
942 | /* Clear any outstanding interrupt. */ | |
943 | writel(0xffffffff, fep->hwp + FEC_IEVENT); | |
944 | ||
945 | fec_enet_bd_init(ndev); | |
946 | ||
947 | fec_enet_enable_ring(ndev); | |
948 | ||
949 | /* Reset tx SKB buffers. */ | |
950 | fec_enet_reset_skb(ndev); | |
951 | ||
952 | /* Enable MII mode */ | |
953 | if (fep->full_duplex == DUPLEX_FULL) { | |
954 | /* FD enable */ | |
955 | writel(0x04, fep->hwp + FEC_X_CNTRL); | |
956 | } else { | |
957 | /* No Rcv on Xmit */ | |
958 | rcntl |= 0x02; | |
959 | writel(0x0, fep->hwp + FEC_X_CNTRL); | |
960 | } | |
961 | ||
962 | /* Set MII speed */ | |
963 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | |
964 | ||
965 | #if !defined(CONFIG_M5272) | |
966 | /* set RX checksum */ | |
967 | val = readl(fep->hwp + FEC_RACC); | |
968 | if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) | |
969 | val |= FEC_RACC_OPTIONS; | |
970 | else | |
971 | val &= ~FEC_RACC_OPTIONS; | |
972 | writel(val, fep->hwp + FEC_RACC); | |
973 | #endif | |
974 | ||
975 | /* | |
976 | * The phy interface and speed need to get configured | |
977 | * differently on enet-mac. | |
978 | */ | |
979 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { | |
980 | /* Enable flow control and length check */ | |
981 | rcntl |= 0x40000000 | 0x00000020; | |
982 | ||
983 | /* RGMII, RMII or MII */ | |
984 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) | |
985 | rcntl |= (1 << 6); | |
986 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) | |
987 | rcntl |= (1 << 8); | |
988 | else | |
989 | rcntl &= ~(1 << 8); | |
990 | ||
991 | /* 1G, 100M or 10M */ | |
992 | if (fep->phy_dev) { | |
993 | if (fep->phy_dev->speed == SPEED_1000) | |
994 | ecntl |= (1 << 5); | |
995 | else if (fep->phy_dev->speed == SPEED_100) | |
996 | rcntl &= ~(1 << 9); | |
997 | else | |
998 | rcntl |= (1 << 9); | |
999 | } | |
1000 | } else { | |
1001 | #ifdef FEC_MIIGSK_ENR | |
1002 | if (fep->quirks & FEC_QUIRK_USE_GASKET) { | |
1003 | u32 cfgr; | |
1004 | /* disable the gasket and wait */ | |
1005 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | |
1006 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | |
1007 | udelay(1); | |
1008 | ||
1009 | /* | |
1010 | * configure the gasket: | |
1011 | * RMII, 50 MHz, no loopback, no echo | |
1012 | * MII, 25 MHz, no loopback, no echo | |
1013 | */ | |
1014 | cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) | |
1015 | ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; | |
1016 | if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) | |
1017 | cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; | |
1018 | writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); | |
1019 | ||
1020 | /* re-enable the gasket */ | |
1021 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | |
1022 | } | |
1023 | #endif | |
1024 | } | |
1025 | ||
1026 | #if !defined(CONFIG_M5272) | |
1027 | /* enable pause frame*/ | |
1028 | if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || | |
1029 | ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && | |
1030 | fep->phy_dev && fep->phy_dev->pause)) { | |
1031 | rcntl |= FEC_ENET_FCE; | |
1032 | ||
1033 | /* set FIFO threshold parameter to reduce overrun */ | |
1034 | writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); | |
1035 | writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); | |
1036 | writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); | |
1037 | writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); | |
1038 | ||
1039 | /* OPD */ | |
1040 | writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); | |
1041 | } else { | |
1042 | rcntl &= ~FEC_ENET_FCE; | |
1043 | } | |
1044 | #endif /* !defined(CONFIG_M5272) */ | |
1045 | ||
1046 | writel(rcntl, fep->hwp + FEC_R_CNTRL); | |
1047 | ||
1048 | /* Setup multicast filter. */ | |
1049 | set_multicast_list(ndev); | |
1050 | #ifndef CONFIG_M5272 | |
1051 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | |
1052 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | |
1053 | #endif | |
1054 | ||
1055 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { | |
1056 | /* enable ENET endian swap */ | |
1057 | ecntl |= (1 << 8); | |
1058 | /* enable ENET store and forward mode */ | |
1059 | writel(1 << 8, fep->hwp + FEC_X_WMRK); | |
1060 | } | |
1061 | ||
1062 | if (fep->bufdesc_ex) | |
1063 | ecntl |= (1 << 4); | |
1064 | ||
1065 | #ifndef CONFIG_M5272 | |
1066 | /* Enable the MIB statistic event counters */ | |
1067 | writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); | |
1068 | #endif | |
1069 | ||
1070 | /* And last, enable the transmit and receive processing */ | |
1071 | writel(ecntl, fep->hwp + FEC_ECNTRL); | |
1072 | fec_enet_active_rxring(ndev); | |
1073 | ||
1074 | if (fep->bufdesc_ex) | |
1075 | fec_ptp_start_cyclecounter(ndev); | |
1076 | ||
1077 | /* Enable interrupts we wish to service */ | |
1078 | if (fep->link) | |
1079 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | |
1080 | else | |
1081 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); | |
1082 | ||
1083 | /* Init the interrupt coalescing */ | |
1084 | fec_enet_itr_coal_init(ndev); | |
1085 | ||
1086 | } | |
1087 | ||
1088 | static void | |
1089 | fec_stop(struct net_device *ndev) | |
1090 | { | |
1091 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1092 | u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); | |
1093 | ||
1094 | /* We cannot expect a graceful transmit stop without link !!! */ | |
1095 | if (fep->link) { | |
1096 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ | |
1097 | udelay(10); | |
1098 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) | |
1099 | netdev_err(ndev, "Graceful transmit stop did not complete!\n"); | |
1100 | } | |
1101 | ||
1102 | /* Whack a reset. We should wait for this. | |
1103 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC | |
1104 | * instead of reset MAC itself. | |
1105 | */ | |
1106 | if (fep->quirks & FEC_QUIRK_HAS_AVB) { | |
1107 | writel(0, fep->hwp + FEC_ECNTRL); | |
1108 | } else { | |
1109 | writel(1, fep->hwp + FEC_ECNTRL); | |
1110 | udelay(10); | |
1111 | } | |
1112 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | |
1113 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | |
1114 | ||
1115 | /* We have to keep ENET enabled to have MII interrupt stay working */ | |
1116 | if (fep->quirks & FEC_QUIRK_ENET_MAC) { | |
1117 | writel(2, fep->hwp + FEC_ECNTRL); | |
1118 | writel(rmii_mode, fep->hwp + FEC_R_CNTRL); | |
1119 | } | |
1120 | } | |
1121 | ||
1122 | ||
1123 | static void | |
1124 | fec_timeout(struct net_device *ndev) | |
1125 | { | |
1126 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1127 | ||
1128 | fec_dump(ndev); | |
1129 | ||
1130 | ndev->stats.tx_errors++; | |
1131 | ||
1132 | schedule_work(&fep->tx_timeout_work); | |
1133 | } | |
1134 | ||
1135 | static void fec_enet_timeout_work(struct work_struct *work) | |
1136 | { | |
1137 | struct fec_enet_private *fep = | |
1138 | container_of(work, struct fec_enet_private, tx_timeout_work); | |
1139 | struct net_device *ndev = fep->netdev; | |
1140 | ||
1141 | rtnl_lock(); | |
1142 | if (netif_device_present(ndev) || netif_running(ndev)) { | |
1143 | napi_disable(&fep->napi); | |
1144 | netif_tx_lock_bh(ndev); | |
1145 | fec_restart(ndev); | |
1146 | netif_wake_queue(ndev); | |
1147 | netif_tx_unlock_bh(ndev); | |
1148 | napi_enable(&fep->napi); | |
1149 | } | |
1150 | rtnl_unlock(); | |
1151 | } | |
1152 | ||
1153 | static void | |
1154 | fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, | |
1155 | struct skb_shared_hwtstamps *hwtstamps) | |
1156 | { | |
1157 | unsigned long flags; | |
1158 | u64 ns; | |
1159 | ||
1160 | spin_lock_irqsave(&fep->tmreg_lock, flags); | |
1161 | ns = timecounter_cyc2time(&fep->tc, ts); | |
1162 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | |
1163 | ||
1164 | memset(hwtstamps, 0, sizeof(*hwtstamps)); | |
1165 | hwtstamps->hwtstamp = ns_to_ktime(ns); | |
1166 | } | |
1167 | ||
1168 | static void | |
1169 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |
1170 | { | |
1171 | struct fec_enet_private *fep; | |
1172 | struct bufdesc *bdp; | |
1173 | unsigned short status; | |
1174 | struct sk_buff *skb; | |
1175 | struct fec_enet_priv_tx_q *txq; | |
1176 | struct netdev_queue *nq; | |
1177 | int index = 0; | |
1178 | int entries_free; | |
1179 | ||
1180 | fep = netdev_priv(ndev); | |
1181 | ||
1182 | queue_id = FEC_ENET_GET_QUQUE(queue_id); | |
1183 | ||
1184 | txq = fep->tx_queue[queue_id]; | |
1185 | /* get next bdp of dirty_tx */ | |
1186 | nq = netdev_get_tx_queue(ndev, queue_id); | |
1187 | bdp = txq->dirty_tx; | |
1188 | ||
1189 | /* get next bdp of dirty_tx */ | |
1190 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | |
1191 | ||
1192 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | |
1193 | ||
1194 | /* current queue is empty */ | |
1195 | if (bdp == txq->cur_tx) | |
1196 | break; | |
1197 | ||
1198 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); | |
1199 | ||
1200 | skb = txq->tx_skbuff[index]; | |
1201 | txq->tx_skbuff[index] = NULL; | |
1202 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) | |
1203 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | |
1204 | bdp->cbd_datlen, DMA_TO_DEVICE); | |
1205 | bdp->cbd_bufaddr = 0; | |
1206 | if (!skb) { | |
1207 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | |
1208 | continue; | |
1209 | } | |
1210 | ||
1211 | /* Check for errors. */ | |
1212 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | |
1213 | BD_ENET_TX_RL | BD_ENET_TX_UN | | |
1214 | BD_ENET_TX_CSL)) { | |
1215 | ndev->stats.tx_errors++; | |
1216 | if (status & BD_ENET_TX_HB) /* No heartbeat */ | |
1217 | ndev->stats.tx_heartbeat_errors++; | |
1218 | if (status & BD_ENET_TX_LC) /* Late collision */ | |
1219 | ndev->stats.tx_window_errors++; | |
1220 | if (status & BD_ENET_TX_RL) /* Retrans limit */ | |
1221 | ndev->stats.tx_aborted_errors++; | |
1222 | if (status & BD_ENET_TX_UN) /* Underrun */ | |
1223 | ndev->stats.tx_fifo_errors++; | |
1224 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ | |
1225 | ndev->stats.tx_carrier_errors++; | |
1226 | } else { | |
1227 | ndev->stats.tx_packets++; | |
1228 | ndev->stats.tx_bytes += skb->len; | |
1229 | } | |
1230 | ||
1231 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && | |
1232 | fep->bufdesc_ex) { | |
1233 | struct skb_shared_hwtstamps shhwtstamps; | |
1234 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
1235 | ||
1236 | fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); | |
1237 | skb_tstamp_tx(skb, &shhwtstamps); | |
1238 | } | |
1239 | ||
1240 | /* Deferred means some collisions occurred during transmit, | |
1241 | * but we eventually sent the packet OK. | |
1242 | */ | |
1243 | if (status & BD_ENET_TX_DEF) | |
1244 | ndev->stats.collisions++; | |
1245 | ||
1246 | /* Free the sk buffer associated with this last transmit */ | |
1247 | dev_kfree_skb_any(skb); | |
1248 | ||
1249 | txq->dirty_tx = bdp; | |
1250 | ||
1251 | /* Update pointer to next buffer descriptor to be transmitted */ | |
1252 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | |
1253 | ||
1254 | /* Since we have freed up a buffer, the ring is no longer full | |
1255 | */ | |
1256 | if (netif_queue_stopped(ndev)) { | |
1257 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); | |
1258 | if (entries_free >= txq->tx_wake_threshold) | |
1259 | netif_tx_wake_queue(nq); | |
1260 | } | |
1261 | } | |
1262 | ||
1263 | /* ERR006538: Keep the transmitter going */ | |
1264 | if (bdp != txq->cur_tx && | |
1265 | readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) | |
1266 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); | |
1267 | } | |
1268 | ||
1269 | static void | |
1270 | fec_enet_tx(struct net_device *ndev) | |
1271 | { | |
1272 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1273 | u16 queue_id; | |
1274 | /* First process class A queue, then Class B and Best Effort queue */ | |
1275 | for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { | |
1276 | clear_bit(queue_id, &fep->work_tx); | |
1277 | fec_enet_tx_queue(ndev, queue_id); | |
1278 | } | |
1279 | return; | |
1280 | } | |
1281 | ||
1282 | static int | |
1283 | fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) | |
1284 | { | |
1285 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1286 | int off; | |
1287 | ||
1288 | off = ((unsigned long)skb->data) & fep->rx_align; | |
1289 | if (off) | |
1290 | skb_reserve(skb, fep->rx_align + 1 - off); | |
1291 | ||
1292 | bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, | |
1293 | FEC_ENET_RX_FRSIZE - fep->rx_align, | |
1294 | DMA_FROM_DEVICE); | |
1295 | if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { | |
1296 | if (net_ratelimit()) | |
1297 | netdev_err(ndev, "Rx DMA memory map failed\n"); | |
1298 | return -ENOMEM; | |
1299 | } | |
1300 | ||
1301 | return 0; | |
1302 | } | |
1303 | ||
1304 | static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, | |
1305 | struct bufdesc *bdp, u32 length, bool swap) | |
1306 | { | |
1307 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1308 | struct sk_buff *new_skb; | |
1309 | ||
1310 | if (length > fep->rx_copybreak) | |
1311 | return false; | |
1312 | ||
1313 | new_skb = netdev_alloc_skb(ndev, length); | |
1314 | if (!new_skb) | |
1315 | return false; | |
1316 | ||
1317 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, | |
1318 | FEC_ENET_RX_FRSIZE - fep->rx_align, | |
1319 | DMA_FROM_DEVICE); | |
1320 | if (!swap) | |
1321 | memcpy(new_skb->data, (*skb)->data, length); | |
1322 | else | |
1323 | swap_buffer2(new_skb->data, (*skb)->data, length); | |
1324 | *skb = new_skb; | |
1325 | ||
1326 | return true; | |
1327 | } | |
1328 | ||
1329 | /* During a receive, the cur_rx points to the current incoming buffer. | |
1330 | * When we update through the ring, if the next incoming buffer has | |
1331 | * not been given to the system, we just set the empty indicator, | |
1332 | * effectively tossing the packet. | |
1333 | */ | |
1334 | static int | |
1335 | fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) | |
1336 | { | |
1337 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1338 | struct fec_enet_priv_rx_q *rxq; | |
1339 | struct bufdesc *bdp; | |
1340 | unsigned short status; | |
1341 | struct sk_buff *skb_new = NULL; | |
1342 | struct sk_buff *skb; | |
1343 | ushort pkt_len; | |
1344 | __u8 *data; | |
1345 | int pkt_received = 0; | |
1346 | struct bufdesc_ex *ebdp = NULL; | |
1347 | bool vlan_packet_rcvd = false; | |
1348 | u16 vlan_tag; | |
1349 | int index = 0; | |
1350 | bool is_copybreak; | |
1351 | bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; | |
1352 | ||
1353 | #ifdef CONFIG_M532x | |
1354 | flush_cache_all(); | |
1355 | #endif | |
1356 | queue_id = FEC_ENET_GET_QUQUE(queue_id); | |
1357 | rxq = fep->rx_queue[queue_id]; | |
1358 | ||
1359 | /* First, grab all of the stats for the incoming packet. | |
1360 | * These get messed up if we get called due to a busy condition. | |
1361 | */ | |
1362 | bdp = rxq->cur_rx; | |
1363 | ||
1364 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | |
1365 | ||
1366 | if (pkt_received >= budget) | |
1367 | break; | |
1368 | pkt_received++; | |
1369 | ||
1370 | /* Since we have allocated space to hold a complete frame, | |
1371 | * the last indicator should be set. | |
1372 | */ | |
1373 | if ((status & BD_ENET_RX_LAST) == 0) | |
1374 | netdev_err(ndev, "rcv is not +last\n"); | |
1375 | ||
1376 | ||
1377 | /* Check for errors. */ | |
1378 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | |
1379 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | |
1380 | ndev->stats.rx_errors++; | |
1381 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | |
1382 | /* Frame too long or too short. */ | |
1383 | ndev->stats.rx_length_errors++; | |
1384 | } | |
1385 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | |
1386 | ndev->stats.rx_frame_errors++; | |
1387 | if (status & BD_ENET_RX_CR) /* CRC Error */ | |
1388 | ndev->stats.rx_crc_errors++; | |
1389 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | |
1390 | ndev->stats.rx_fifo_errors++; | |
1391 | } | |
1392 | ||
1393 | /* Report late collisions as a frame error. | |
1394 | * On this error, the BD is closed, but we don't know what we | |
1395 | * have in the buffer. So, just drop this frame on the floor. | |
1396 | */ | |
1397 | if (status & BD_ENET_RX_CL) { | |
1398 | ndev->stats.rx_errors++; | |
1399 | ndev->stats.rx_frame_errors++; | |
1400 | goto rx_processing_done; | |
1401 | } | |
1402 | ||
1403 | /* Process the incoming frame. */ | |
1404 | ndev->stats.rx_packets++; | |
1405 | pkt_len = bdp->cbd_datlen; | |
1406 | ndev->stats.rx_bytes += pkt_len; | |
1407 | ||
1408 | index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); | |
1409 | skb = rxq->rx_skbuff[index]; | |
1410 | ||
1411 | /* The packet length includes FCS, but we don't want to | |
1412 | * include that when passing upstream as it messes up | |
1413 | * bridging applications. | |
1414 | */ | |
1415 | is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, | |
1416 | need_swap); | |
1417 | if (!is_copybreak) { | |
1418 | skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | |
1419 | if (unlikely(!skb_new)) { | |
1420 | ndev->stats.rx_dropped++; | |
1421 | goto rx_processing_done; | |
1422 | } | |
1423 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, | |
1424 | FEC_ENET_RX_FRSIZE - fep->rx_align, | |
1425 | DMA_FROM_DEVICE); | |
1426 | } | |
1427 | ||
1428 | prefetch(skb->data - NET_IP_ALIGN); | |
1429 | skb_put(skb, pkt_len - 4); | |
1430 | data = skb->data; | |
1431 | if (!is_copybreak && need_swap) | |
1432 | swap_buffer(data, pkt_len); | |
1433 | ||
1434 | /* Extract the enhanced buffer descriptor */ | |
1435 | ebdp = NULL; | |
1436 | if (fep->bufdesc_ex) | |
1437 | ebdp = (struct bufdesc_ex *)bdp; | |
1438 | ||
1439 | /* If this is a VLAN packet remove the VLAN Tag */ | |
1440 | vlan_packet_rcvd = false; | |
1441 | if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && | |
1442 | fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { | |
1443 | /* Push and remove the vlan tag */ | |
1444 | struct vlan_hdr *vlan_header = | |
1445 | (struct vlan_hdr *) (data + ETH_HLEN); | |
1446 | vlan_tag = ntohs(vlan_header->h_vlan_TCI); | |
1447 | ||
1448 | vlan_packet_rcvd = true; | |
1449 | ||
1450 | skb_copy_to_linear_data_offset(skb, VLAN_HLEN, | |
1451 | data, (2 * ETH_ALEN)); | |
1452 | skb_pull(skb, VLAN_HLEN); | |
1453 | } | |
1454 | ||
1455 | skb->protocol = eth_type_trans(skb, ndev); | |
1456 | ||
1457 | /* Get receive timestamp from the skb */ | |
1458 | if (fep->hwts_rx_en && fep->bufdesc_ex) | |
1459 | fec_enet_hwtstamp(fep, ebdp->ts, | |
1460 | skb_hwtstamps(skb)); | |
1461 | ||
1462 | if (fep->bufdesc_ex && | |
1463 | (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { | |
1464 | if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { | |
1465 | /* don't check it */ | |
1466 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1467 | } else { | |
1468 | skb_checksum_none_assert(skb); | |
1469 | } | |
1470 | } | |
1471 | ||
1472 | /* Handle received VLAN packets */ | |
1473 | if (vlan_packet_rcvd) | |
1474 | __vlan_hwaccel_put_tag(skb, | |
1475 | htons(ETH_P_8021Q), | |
1476 | vlan_tag); | |
1477 | ||
1478 | napi_gro_receive(&fep->napi, skb); | |
1479 | ||
1480 | if (is_copybreak) { | |
1481 | dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, | |
1482 | FEC_ENET_RX_FRSIZE - fep->rx_align, | |
1483 | DMA_FROM_DEVICE); | |
1484 | } else { | |
1485 | rxq->rx_skbuff[index] = skb_new; | |
1486 | fec_enet_new_rxbdp(ndev, bdp, skb_new); | |
1487 | } | |
1488 | ||
1489 | rx_processing_done: | |
1490 | /* Clear the status flags for this buffer */ | |
1491 | status &= ~BD_ENET_RX_STATS; | |
1492 | ||
1493 | /* Mark the buffer empty */ | |
1494 | status |= BD_ENET_RX_EMPTY; | |
1495 | bdp->cbd_sc = status; | |
1496 | ||
1497 | if (fep->bufdesc_ex) { | |
1498 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
1499 | ||
1500 | ebdp->cbd_esc = BD_ENET_RX_INT; | |
1501 | ebdp->cbd_prot = 0; | |
1502 | ebdp->cbd_bdu = 0; | |
1503 | } | |
1504 | ||
1505 | /* Update BD pointer to next entry */ | |
1506 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); | |
1507 | ||
1508 | /* Doing this here will keep the FEC running while we process | |
1509 | * incoming frames. On a heavily loaded network, we should be | |
1510 | * able to keep up at the expense of system resources. | |
1511 | */ | |
1512 | writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); | |
1513 | } | |
1514 | rxq->cur_rx = bdp; | |
1515 | return pkt_received; | |
1516 | } | |
1517 | ||
1518 | static int | |
1519 | fec_enet_rx(struct net_device *ndev, int budget) | |
1520 | { | |
1521 | int pkt_received = 0; | |
1522 | u16 queue_id; | |
1523 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1524 | ||
1525 | for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { | |
1526 | clear_bit(queue_id, &fep->work_rx); | |
1527 | pkt_received += fec_enet_rx_queue(ndev, | |
1528 | budget - pkt_received, queue_id); | |
1529 | } | |
1530 | return pkt_received; | |
1531 | } | |
1532 | ||
1533 | static bool | |
1534 | fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) | |
1535 | { | |
1536 | if (int_events == 0) | |
1537 | return false; | |
1538 | ||
1539 | if (int_events & FEC_ENET_RXF) | |
1540 | fep->work_rx |= (1 << 2); | |
1541 | if (int_events & FEC_ENET_RXF_1) | |
1542 | fep->work_rx |= (1 << 0); | |
1543 | if (int_events & FEC_ENET_RXF_2) | |
1544 | fep->work_rx |= (1 << 1); | |
1545 | ||
1546 | if (int_events & FEC_ENET_TXF) | |
1547 | fep->work_tx |= (1 << 2); | |
1548 | if (int_events & FEC_ENET_TXF_1) | |
1549 | fep->work_tx |= (1 << 0); | |
1550 | if (int_events & FEC_ENET_TXF_2) | |
1551 | fep->work_tx |= (1 << 1); | |
1552 | ||
1553 | return true; | |
1554 | } | |
1555 | ||
1556 | static irqreturn_t | |
1557 | fec_enet_interrupt(int irq, void *dev_id) | |
1558 | { | |
1559 | struct net_device *ndev = dev_id; | |
1560 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1561 | const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF; | |
1562 | uint int_events; | |
1563 | irqreturn_t ret = IRQ_NONE; | |
1564 | ||
1565 | int_events = readl(fep->hwp + FEC_IEVENT); | |
1566 | writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); | |
1567 | fec_enet_collect_events(fep, int_events); | |
1568 | ||
1569 | if (int_events & napi_mask) { | |
1570 | ret = IRQ_HANDLED; | |
1571 | ||
1572 | /* Disable the NAPI interrupts */ | |
1573 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); | |
1574 | napi_schedule(&fep->napi); | |
1575 | } | |
1576 | ||
1577 | if (int_events & FEC_ENET_MII) { | |
1578 | ret = IRQ_HANDLED; | |
1579 | complete(&fep->mdio_done); | |
1580 | } | |
1581 | ||
1582 | if (fep->ptp_clock) | |
1583 | fec_ptp_check_pps_event(fep); | |
1584 | ||
1585 | return ret; | |
1586 | } | |
1587 | ||
1588 | static int fec_enet_rx_napi(struct napi_struct *napi, int budget) | |
1589 | { | |
1590 | struct net_device *ndev = napi->dev; | |
1591 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1592 | int pkts; | |
1593 | ||
1594 | /* | |
1595 | * Clear any pending transmit or receive interrupts before | |
1596 | * processing the rings to avoid racing with the hardware. | |
1597 | */ | |
1598 | writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT); | |
1599 | ||
1600 | pkts = fec_enet_rx(ndev, budget); | |
1601 | ||
1602 | fec_enet_tx(ndev); | |
1603 | ||
1604 | if (pkts < budget) { | |
1605 | napi_complete(napi); | |
1606 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | |
1607 | } | |
1608 | return pkts; | |
1609 | } | |
1610 | ||
1611 | /* ------------------------------------------------------------------------- */ | |
1612 | static void fec_get_mac(struct net_device *ndev) | |
1613 | { | |
1614 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1615 | struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); | |
1616 | unsigned char *iap, tmpaddr[ETH_ALEN]; | |
1617 | ||
1618 | /* | |
1619 | * try to get mac address in following order: | |
1620 | * | |
1621 | * 1) module parameter via kernel command line in form | |
1622 | * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 | |
1623 | */ | |
1624 | iap = macaddr; | |
1625 | ||
1626 | /* | |
1627 | * 2) from device tree data | |
1628 | */ | |
1629 | if (!is_valid_ether_addr(iap)) { | |
1630 | struct device_node *np = fep->pdev->dev.of_node; | |
1631 | if (np) { | |
1632 | const char *mac = of_get_mac_address(np); | |
1633 | if (mac) | |
1634 | iap = (unsigned char *) mac; | |
1635 | } | |
1636 | } | |
1637 | ||
1638 | /* | |
1639 | * 3) from flash or fuse (via platform data) | |
1640 | */ | |
1641 | if (!is_valid_ether_addr(iap)) { | |
1642 | #ifdef CONFIG_M5272 | |
1643 | if (FEC_FLASHMAC) | |
1644 | iap = (unsigned char *)FEC_FLASHMAC; | |
1645 | #else | |
1646 | if (pdata) | |
1647 | iap = (unsigned char *)&pdata->mac; | |
1648 | #endif | |
1649 | } | |
1650 | ||
1651 | /* | |
1652 | * 4) FEC mac registers set by bootloader | |
1653 | */ | |
1654 | if (!is_valid_ether_addr(iap)) { | |
1655 | *((__be32 *) &tmpaddr[0]) = | |
1656 | cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); | |
1657 | *((__be16 *) &tmpaddr[4]) = | |
1658 | cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | |
1659 | iap = &tmpaddr[0]; | |
1660 | } | |
1661 | ||
1662 | /* | |
1663 | * 5) random mac address | |
1664 | */ | |
1665 | if (!is_valid_ether_addr(iap)) { | |
1666 | /* Report it and use a random ethernet address instead */ | |
1667 | netdev_err(ndev, "Invalid MAC address: %pM\n", iap); | |
1668 | eth_hw_addr_random(ndev); | |
1669 | netdev_info(ndev, "Using random MAC address: %pM\n", | |
1670 | ndev->dev_addr); | |
1671 | return; | |
1672 | } | |
1673 | ||
1674 | memcpy(ndev->dev_addr, iap, ETH_ALEN); | |
1675 | ||
1676 | /* Adjust MAC if using macaddr */ | |
1677 | if (iap == macaddr) | |
1678 | ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; | |
1679 | } | |
1680 | ||
1681 | /* ------------------------------------------------------------------------- */ | |
1682 | ||
1683 | /* | |
1684 | * Phy section | |
1685 | */ | |
1686 | static void fec_enet_adjust_link(struct net_device *ndev) | |
1687 | { | |
1688 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1689 | struct phy_device *phy_dev = fep->phy_dev; | |
1690 | int status_change = 0; | |
1691 | ||
1692 | /* Prevent a state halted on mii error */ | |
1693 | if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { | |
1694 | phy_dev->state = PHY_RESUMING; | |
1695 | return; | |
1696 | } | |
1697 | ||
1698 | /* | |
1699 | * If the netdev is down, or is going down, we're not interested | |
1700 | * in link state events, so just mark our idea of the link as down | |
1701 | * and ignore the event. | |
1702 | */ | |
1703 | if (!netif_running(ndev) || !netif_device_present(ndev)) { | |
1704 | fep->link = 0; | |
1705 | } else if (phy_dev->link) { | |
1706 | if (!fep->link) { | |
1707 | fep->link = phy_dev->link; | |
1708 | status_change = 1; | |
1709 | } | |
1710 | ||
1711 | if (fep->full_duplex != phy_dev->duplex) { | |
1712 | fep->full_duplex = phy_dev->duplex; | |
1713 | status_change = 1; | |
1714 | } | |
1715 | ||
1716 | if (phy_dev->speed != fep->speed) { | |
1717 | fep->speed = phy_dev->speed; | |
1718 | status_change = 1; | |
1719 | } | |
1720 | ||
1721 | /* if any of the above changed restart the FEC */ | |
1722 | if (status_change) { | |
1723 | napi_disable(&fep->napi); | |
1724 | netif_tx_lock_bh(ndev); | |
1725 | fec_restart(ndev); | |
1726 | netif_wake_queue(ndev); | |
1727 | netif_tx_unlock_bh(ndev); | |
1728 | napi_enable(&fep->napi); | |
1729 | } | |
1730 | } else { | |
1731 | if (fep->link) { | |
1732 | napi_disable(&fep->napi); | |
1733 | netif_tx_lock_bh(ndev); | |
1734 | fec_stop(ndev); | |
1735 | netif_tx_unlock_bh(ndev); | |
1736 | napi_enable(&fep->napi); | |
1737 | fep->link = phy_dev->link; | |
1738 | status_change = 1; | |
1739 | } | |
1740 | } | |
1741 | ||
1742 | if (status_change) | |
1743 | phy_print_status(phy_dev); | |
1744 | } | |
1745 | ||
1746 | static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |
1747 | { | |
1748 | struct fec_enet_private *fep = bus->priv; | |
1749 | unsigned long time_left; | |
1750 | ||
1751 | fep->mii_timeout = 0; | |
1752 | init_completion(&fep->mdio_done); | |
1753 | ||
1754 | /* start a read op */ | |
1755 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | |
1756 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | |
1757 | FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); | |
1758 | ||
1759 | /* wait for end of transfer */ | |
1760 | time_left = wait_for_completion_timeout(&fep->mdio_done, | |
1761 | usecs_to_jiffies(FEC_MII_TIMEOUT)); | |
1762 | if (time_left == 0) { | |
1763 | fep->mii_timeout = 1; | |
1764 | netdev_err(fep->netdev, "MDIO read timeout\n"); | |
1765 | return -ETIMEDOUT; | |
1766 | } | |
1767 | ||
1768 | /* return value */ | |
1769 | return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); | |
1770 | } | |
1771 | ||
1772 | static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |
1773 | u16 value) | |
1774 | { | |
1775 | struct fec_enet_private *fep = bus->priv; | |
1776 | unsigned long time_left; | |
1777 | ||
1778 | fep->mii_timeout = 0; | |
1779 | init_completion(&fep->mdio_done); | |
1780 | ||
1781 | /* start a write op */ | |
1782 | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | | |
1783 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | |
1784 | FEC_MMFR_TA | FEC_MMFR_DATA(value), | |
1785 | fep->hwp + FEC_MII_DATA); | |
1786 | ||
1787 | /* wait for end of transfer */ | |
1788 | time_left = wait_for_completion_timeout(&fep->mdio_done, | |
1789 | usecs_to_jiffies(FEC_MII_TIMEOUT)); | |
1790 | if (time_left == 0) { | |
1791 | fep->mii_timeout = 1; | |
1792 | netdev_err(fep->netdev, "MDIO write timeout\n"); | |
1793 | return -ETIMEDOUT; | |
1794 | } | |
1795 | ||
1796 | return 0; | |
1797 | } | |
1798 | ||
1799 | static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |
1800 | { | |
1801 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1802 | int ret; | |
1803 | ||
1804 | if (enable) { | |
1805 | ret = clk_prepare_enable(fep->clk_ahb); | |
1806 | if (ret) | |
1807 | return ret; | |
1808 | ret = clk_prepare_enable(fep->clk_ipg); | |
1809 | if (ret) | |
1810 | goto failed_clk_ipg; | |
1811 | if (fep->clk_enet_out) { | |
1812 | ret = clk_prepare_enable(fep->clk_enet_out); | |
1813 | if (ret) | |
1814 | goto failed_clk_enet_out; | |
1815 | } | |
1816 | if (fep->clk_ptp) { | |
1817 | mutex_lock(&fep->ptp_clk_mutex); | |
1818 | ret = clk_prepare_enable(fep->clk_ptp); | |
1819 | if (ret) { | |
1820 | mutex_unlock(&fep->ptp_clk_mutex); | |
1821 | goto failed_clk_ptp; | |
1822 | } else { | |
1823 | fep->ptp_clk_on = true; | |
1824 | } | |
1825 | mutex_unlock(&fep->ptp_clk_mutex); | |
1826 | } | |
1827 | if (fep->clk_ref) { | |
1828 | ret = clk_prepare_enable(fep->clk_ref); | |
1829 | if (ret) | |
1830 | goto failed_clk_ref; | |
1831 | } | |
1832 | } else { | |
1833 | clk_disable_unprepare(fep->clk_ahb); | |
1834 | clk_disable_unprepare(fep->clk_ipg); | |
1835 | if (fep->clk_enet_out) | |
1836 | clk_disable_unprepare(fep->clk_enet_out); | |
1837 | if (fep->clk_ptp) { | |
1838 | mutex_lock(&fep->ptp_clk_mutex); | |
1839 | clk_disable_unprepare(fep->clk_ptp); | |
1840 | fep->ptp_clk_on = false; | |
1841 | mutex_unlock(&fep->ptp_clk_mutex); | |
1842 | } | |
1843 | if (fep->clk_ref) | |
1844 | clk_disable_unprepare(fep->clk_ref); | |
1845 | } | |
1846 | ||
1847 | return 0; | |
1848 | ||
1849 | failed_clk_ref: | |
1850 | if (fep->clk_ref) | |
1851 | clk_disable_unprepare(fep->clk_ref); | |
1852 | failed_clk_ptp: | |
1853 | if (fep->clk_enet_out) | |
1854 | clk_disable_unprepare(fep->clk_enet_out); | |
1855 | failed_clk_enet_out: | |
1856 | clk_disable_unprepare(fep->clk_ipg); | |
1857 | failed_clk_ipg: | |
1858 | clk_disable_unprepare(fep->clk_ahb); | |
1859 | ||
1860 | return ret; | |
1861 | } | |
1862 | ||
1863 | static int fec_enet_mii_probe(struct net_device *ndev) | |
1864 | { | |
1865 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1866 | struct phy_device *phy_dev = NULL; | |
1867 | char mdio_bus_id[MII_BUS_ID_SIZE]; | |
1868 | char phy_name[MII_BUS_ID_SIZE + 3]; | |
1869 | int phy_id; | |
1870 | int dev_id = fep->dev_id; | |
1871 | ||
1872 | fep->phy_dev = NULL; | |
1873 | ||
1874 | if (fep->phy_node) { | |
1875 | phy_dev = of_phy_connect(ndev, fep->phy_node, | |
1876 | &fec_enet_adjust_link, 0, | |
1877 | fep->phy_interface); | |
1878 | if (!phy_dev) | |
1879 | return -ENODEV; | |
1880 | } else { | |
1881 | /* check for attached phy */ | |
1882 | for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { | |
1883 | if ((fep->mii_bus->phy_mask & (1 << phy_id))) | |
1884 | continue; | |
1885 | if (fep->mii_bus->phy_map[phy_id] == NULL) | |
1886 | continue; | |
1887 | if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) | |
1888 | continue; | |
1889 | if (dev_id--) | |
1890 | continue; | |
1891 | strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); | |
1892 | break; | |
1893 | } | |
1894 | ||
1895 | if (phy_id >= PHY_MAX_ADDR) { | |
1896 | netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); | |
1897 | strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); | |
1898 | phy_id = 0; | |
1899 | } | |
1900 | ||
1901 | snprintf(phy_name, sizeof(phy_name), | |
1902 | PHY_ID_FMT, mdio_bus_id, phy_id); | |
1903 | phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, | |
1904 | fep->phy_interface); | |
1905 | } | |
1906 | ||
1907 | if (IS_ERR(phy_dev)) { | |
1908 | netdev_err(ndev, "could not attach to PHY\n"); | |
1909 | return PTR_ERR(phy_dev); | |
1910 | } | |
1911 | ||
1912 | /* mask with MAC supported features */ | |
1913 | if (fep->quirks & FEC_QUIRK_HAS_GBIT) { | |
1914 | phy_dev->supported &= PHY_GBIT_FEATURES; | |
1915 | phy_dev->supported &= ~SUPPORTED_1000baseT_Half; | |
1916 | #if !defined(CONFIG_M5272) | |
1917 | phy_dev->supported |= SUPPORTED_Pause; | |
1918 | #endif | |
1919 | } | |
1920 | else | |
1921 | phy_dev->supported &= PHY_BASIC_FEATURES; | |
1922 | ||
1923 | phy_dev->advertising = phy_dev->supported; | |
1924 | ||
1925 | fep->phy_dev = phy_dev; | |
1926 | fep->link = 0; | |
1927 | fep->full_duplex = 0; | |
1928 | ||
1929 | netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", | |
1930 | fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), | |
1931 | fep->phy_dev->irq); | |
1932 | ||
1933 | return 0; | |
1934 | } | |
1935 | ||
1936 | static int fec_enet_mii_init(struct platform_device *pdev) | |
1937 | { | |
1938 | static struct mii_bus *fec0_mii_bus; | |
1939 | struct net_device *ndev = platform_get_drvdata(pdev); | |
1940 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1941 | struct device_node *node; | |
1942 | int err = -ENXIO, i; | |
1943 | ||
1944 | /* | |
1945 | * The dual fec interfaces are not equivalent with enet-mac. | |
1946 | * Here are the differences: | |
1947 | * | |
1948 | * - fec0 supports MII & RMII modes while fec1 only supports RMII | |
1949 | * - fec0 acts as the 1588 time master while fec1 is slave | |
1950 | * - external phys can only be configured by fec0 | |
1951 | * | |
1952 | * That is to say fec1 can not work independently. It only works | |
1953 | * when fec0 is working. The reason behind this design is that the | |
1954 | * second interface is added primarily for Switch mode. | |
1955 | * | |
1956 | * Because of the last point above, both phys are attached on fec0 | |
1957 | * mdio interface in board design, and need to be configured by | |
1958 | * fec0 mii_bus. | |
1959 | */ | |
1960 | if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { | |
1961 | /* fec1 uses fec0 mii_bus */ | |
1962 | if (mii_cnt && fec0_mii_bus) { | |
1963 | fep->mii_bus = fec0_mii_bus; | |
1964 | mii_cnt++; | |
1965 | return 0; | |
1966 | } | |
1967 | return -ENOENT; | |
1968 | } | |
1969 | ||
1970 | fep->mii_timeout = 0; | |
1971 | ||
1972 | /* | |
1973 | * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) | |
1974 | * | |
1975 | * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while | |
1976 | * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 | |
1977 | * Reference Manual has an error on this, and gets fixed on i.MX6Q | |
1978 | * document. | |
1979 | */ | |
1980 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); | |
1981 | if (fep->quirks & FEC_QUIRK_ENET_MAC) | |
1982 | fep->phy_speed--; | |
1983 | fep->phy_speed <<= 1; | |
1984 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | |
1985 | ||
1986 | fep->mii_bus = mdiobus_alloc(); | |
1987 | if (fep->mii_bus == NULL) { | |
1988 | err = -ENOMEM; | |
1989 | goto err_out; | |
1990 | } | |
1991 | ||
1992 | fep->mii_bus->name = "fec_enet_mii_bus"; | |
1993 | fep->mii_bus->read = fec_enet_mdio_read; | |
1994 | fep->mii_bus->write = fec_enet_mdio_write; | |
1995 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", | |
1996 | pdev->name, fep->dev_id + 1); | |
1997 | fep->mii_bus->priv = fep; | |
1998 | fep->mii_bus->parent = &pdev->dev; | |
1999 | ||
2000 | fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | |
2001 | if (!fep->mii_bus->irq) { | |
2002 | err = -ENOMEM; | |
2003 | goto err_out_free_mdiobus; | |
2004 | } | |
2005 | ||
2006 | for (i = 0; i < PHY_MAX_ADDR; i++) | |
2007 | fep->mii_bus->irq[i] = PHY_POLL; | |
2008 | ||
2009 | node = of_get_child_by_name(pdev->dev.of_node, "mdio"); | |
2010 | if (node) { | |
2011 | err = of_mdiobus_register(fep->mii_bus, node); | |
2012 | of_node_put(node); | |
2013 | } else { | |
2014 | err = mdiobus_register(fep->mii_bus); | |
2015 | } | |
2016 | ||
2017 | if (err) | |
2018 | goto err_out_free_mdio_irq; | |
2019 | ||
2020 | mii_cnt++; | |
2021 | ||
2022 | /* save fec0 mii_bus */ | |
2023 | if (fep->quirks & FEC_QUIRK_ENET_MAC) | |
2024 | fec0_mii_bus = fep->mii_bus; | |
2025 | ||
2026 | return 0; | |
2027 | ||
2028 | err_out_free_mdio_irq: | |
2029 | kfree(fep->mii_bus->irq); | |
2030 | err_out_free_mdiobus: | |
2031 | mdiobus_free(fep->mii_bus); | |
2032 | err_out: | |
2033 | return err; | |
2034 | } | |
2035 | ||
2036 | static void fec_enet_mii_remove(struct fec_enet_private *fep) | |
2037 | { | |
2038 | if (--mii_cnt == 0) { | |
2039 | mdiobus_unregister(fep->mii_bus); | |
2040 | kfree(fep->mii_bus->irq); | |
2041 | mdiobus_free(fep->mii_bus); | |
2042 | } | |
2043 | } | |
2044 | ||
2045 | static int fec_enet_get_settings(struct net_device *ndev, | |
2046 | struct ethtool_cmd *cmd) | |
2047 | { | |
2048 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2049 | struct phy_device *phydev = fep->phy_dev; | |
2050 | ||
2051 | if (!phydev) | |
2052 | return -ENODEV; | |
2053 | ||
2054 | return phy_ethtool_gset(phydev, cmd); | |
2055 | } | |
2056 | ||
2057 | static int fec_enet_set_settings(struct net_device *ndev, | |
2058 | struct ethtool_cmd *cmd) | |
2059 | { | |
2060 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2061 | struct phy_device *phydev = fep->phy_dev; | |
2062 | ||
2063 | if (!phydev) | |
2064 | return -ENODEV; | |
2065 | ||
2066 | return phy_ethtool_sset(phydev, cmd); | |
2067 | } | |
2068 | ||
2069 | static void fec_enet_get_drvinfo(struct net_device *ndev, | |
2070 | struct ethtool_drvinfo *info) | |
2071 | { | |
2072 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2073 | ||
2074 | strlcpy(info->driver, fep->pdev->dev.driver->name, | |
2075 | sizeof(info->driver)); | |
2076 | strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); | |
2077 | strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); | |
2078 | } | |
2079 | ||
2080 | static int fec_enet_get_ts_info(struct net_device *ndev, | |
2081 | struct ethtool_ts_info *info) | |
2082 | { | |
2083 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2084 | ||
2085 | if (fep->bufdesc_ex) { | |
2086 | ||
2087 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | | |
2088 | SOF_TIMESTAMPING_RX_SOFTWARE | | |
2089 | SOF_TIMESTAMPING_SOFTWARE | | |
2090 | SOF_TIMESTAMPING_TX_HARDWARE | | |
2091 | SOF_TIMESTAMPING_RX_HARDWARE | | |
2092 | SOF_TIMESTAMPING_RAW_HARDWARE; | |
2093 | if (fep->ptp_clock) | |
2094 | info->phc_index = ptp_clock_index(fep->ptp_clock); | |
2095 | else | |
2096 | info->phc_index = -1; | |
2097 | ||
2098 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | | |
2099 | (1 << HWTSTAMP_TX_ON); | |
2100 | ||
2101 | info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | | |
2102 | (1 << HWTSTAMP_FILTER_ALL); | |
2103 | return 0; | |
2104 | } else { | |
2105 | return ethtool_op_get_ts_info(ndev, info); | |
2106 | } | |
2107 | } | |
2108 | ||
2109 | #if !defined(CONFIG_M5272) | |
2110 | ||
2111 | static void fec_enet_get_pauseparam(struct net_device *ndev, | |
2112 | struct ethtool_pauseparam *pause) | |
2113 | { | |
2114 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2115 | ||
2116 | pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; | |
2117 | pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; | |
2118 | pause->rx_pause = pause->tx_pause; | |
2119 | } | |
2120 | ||
2121 | static int fec_enet_set_pauseparam(struct net_device *ndev, | |
2122 | struct ethtool_pauseparam *pause) | |
2123 | { | |
2124 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2125 | ||
2126 | if (!fep->phy_dev) | |
2127 | return -ENODEV; | |
2128 | ||
2129 | if (pause->tx_pause != pause->rx_pause) { | |
2130 | netdev_info(ndev, | |
2131 | "hardware only support enable/disable both tx and rx"); | |
2132 | return -EINVAL; | |
2133 | } | |
2134 | ||
2135 | fep->pause_flag = 0; | |
2136 | ||
2137 | /* tx pause must be same as rx pause */ | |
2138 | fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; | |
2139 | fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; | |
2140 | ||
2141 | if (pause->rx_pause || pause->autoneg) { | |
2142 | fep->phy_dev->supported |= ADVERTISED_Pause; | |
2143 | fep->phy_dev->advertising |= ADVERTISED_Pause; | |
2144 | } else { | |
2145 | fep->phy_dev->supported &= ~ADVERTISED_Pause; | |
2146 | fep->phy_dev->advertising &= ~ADVERTISED_Pause; | |
2147 | } | |
2148 | ||
2149 | if (pause->autoneg) { | |
2150 | if (netif_running(ndev)) | |
2151 | fec_stop(ndev); | |
2152 | phy_start_aneg(fep->phy_dev); | |
2153 | } | |
2154 | if (netif_running(ndev)) { | |
2155 | napi_disable(&fep->napi); | |
2156 | netif_tx_lock_bh(ndev); | |
2157 | fec_restart(ndev); | |
2158 | netif_wake_queue(ndev); | |
2159 | netif_tx_unlock_bh(ndev); | |
2160 | napi_enable(&fep->napi); | |
2161 | } | |
2162 | ||
2163 | return 0; | |
2164 | } | |
2165 | ||
2166 | static const struct fec_stat { | |
2167 | char name[ETH_GSTRING_LEN]; | |
2168 | u16 offset; | |
2169 | } fec_stats[] = { | |
2170 | /* RMON TX */ | |
2171 | { "tx_dropped", RMON_T_DROP }, | |
2172 | { "tx_packets", RMON_T_PACKETS }, | |
2173 | { "tx_broadcast", RMON_T_BC_PKT }, | |
2174 | { "tx_multicast", RMON_T_MC_PKT }, | |
2175 | { "tx_crc_errors", RMON_T_CRC_ALIGN }, | |
2176 | { "tx_undersize", RMON_T_UNDERSIZE }, | |
2177 | { "tx_oversize", RMON_T_OVERSIZE }, | |
2178 | { "tx_fragment", RMON_T_FRAG }, | |
2179 | { "tx_jabber", RMON_T_JAB }, | |
2180 | { "tx_collision", RMON_T_COL }, | |
2181 | { "tx_64byte", RMON_T_P64 }, | |
2182 | { "tx_65to127byte", RMON_T_P65TO127 }, | |
2183 | { "tx_128to255byte", RMON_T_P128TO255 }, | |
2184 | { "tx_256to511byte", RMON_T_P256TO511 }, | |
2185 | { "tx_512to1023byte", RMON_T_P512TO1023 }, | |
2186 | { "tx_1024to2047byte", RMON_T_P1024TO2047 }, | |
2187 | { "tx_GTE2048byte", RMON_T_P_GTE2048 }, | |
2188 | { "tx_octets", RMON_T_OCTETS }, | |
2189 | ||
2190 | /* IEEE TX */ | |
2191 | { "IEEE_tx_drop", IEEE_T_DROP }, | |
2192 | { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, | |
2193 | { "IEEE_tx_1col", IEEE_T_1COL }, | |
2194 | { "IEEE_tx_mcol", IEEE_T_MCOL }, | |
2195 | { "IEEE_tx_def", IEEE_T_DEF }, | |
2196 | { "IEEE_tx_lcol", IEEE_T_LCOL }, | |
2197 | { "IEEE_tx_excol", IEEE_T_EXCOL }, | |
2198 | { "IEEE_tx_macerr", IEEE_T_MACERR }, | |
2199 | { "IEEE_tx_cserr", IEEE_T_CSERR }, | |
2200 | { "IEEE_tx_sqe", IEEE_T_SQE }, | |
2201 | { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, | |
2202 | { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, | |
2203 | ||
2204 | /* RMON RX */ | |
2205 | { "rx_packets", RMON_R_PACKETS }, | |
2206 | { "rx_broadcast", RMON_R_BC_PKT }, | |
2207 | { "rx_multicast", RMON_R_MC_PKT }, | |
2208 | { "rx_crc_errors", RMON_R_CRC_ALIGN }, | |
2209 | { "rx_undersize", RMON_R_UNDERSIZE }, | |
2210 | { "rx_oversize", RMON_R_OVERSIZE }, | |
2211 | { "rx_fragment", RMON_R_FRAG }, | |
2212 | { "rx_jabber", RMON_R_JAB }, | |
2213 | { "rx_64byte", RMON_R_P64 }, | |
2214 | { "rx_65to127byte", RMON_R_P65TO127 }, | |
2215 | { "rx_128to255byte", RMON_R_P128TO255 }, | |
2216 | { "rx_256to511byte", RMON_R_P256TO511 }, | |
2217 | { "rx_512to1023byte", RMON_R_P512TO1023 }, | |
2218 | { "rx_1024to2047byte", RMON_R_P1024TO2047 }, | |
2219 | { "rx_GTE2048byte", RMON_R_P_GTE2048 }, | |
2220 | { "rx_octets", RMON_R_OCTETS }, | |
2221 | ||
2222 | /* IEEE RX */ | |
2223 | { "IEEE_rx_drop", IEEE_R_DROP }, | |
2224 | { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, | |
2225 | { "IEEE_rx_crc", IEEE_R_CRC }, | |
2226 | { "IEEE_rx_align", IEEE_R_ALIGN }, | |
2227 | { "IEEE_rx_macerr", IEEE_R_MACERR }, | |
2228 | { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, | |
2229 | { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, | |
2230 | }; | |
2231 | ||
2232 | static void fec_enet_get_ethtool_stats(struct net_device *dev, | |
2233 | struct ethtool_stats *stats, u64 *data) | |
2234 | { | |
2235 | struct fec_enet_private *fep = netdev_priv(dev); | |
2236 | int i; | |
2237 | ||
2238 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) | |
2239 | data[i] = readl(fep->hwp + fec_stats[i].offset); | |
2240 | } | |
2241 | ||
2242 | static void fec_enet_get_strings(struct net_device *netdev, | |
2243 | u32 stringset, u8 *data) | |
2244 | { | |
2245 | int i; | |
2246 | switch (stringset) { | |
2247 | case ETH_SS_STATS: | |
2248 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) | |
2249 | memcpy(data + i * ETH_GSTRING_LEN, | |
2250 | fec_stats[i].name, ETH_GSTRING_LEN); | |
2251 | break; | |
2252 | } | |
2253 | } | |
2254 | ||
2255 | static int fec_enet_get_sset_count(struct net_device *dev, int sset) | |
2256 | { | |
2257 | switch (sset) { | |
2258 | case ETH_SS_STATS: | |
2259 | return ARRAY_SIZE(fec_stats); | |
2260 | default: | |
2261 | return -EOPNOTSUPP; | |
2262 | } | |
2263 | } | |
2264 | #endif /* !defined(CONFIG_M5272) */ | |
2265 | ||
2266 | static int fec_enet_nway_reset(struct net_device *dev) | |
2267 | { | |
2268 | struct fec_enet_private *fep = netdev_priv(dev); | |
2269 | struct phy_device *phydev = fep->phy_dev; | |
2270 | ||
2271 | if (!phydev) | |
2272 | return -ENODEV; | |
2273 | ||
2274 | return genphy_restart_aneg(phydev); | |
2275 | } | |
2276 | ||
2277 | /* ITR clock source is enet system clock (clk_ahb). | |
2278 | * TCTT unit is cycle_ns * 64 cycle | |
2279 | * So, the ICTT value = X us / (cycle_ns * 64) | |
2280 | */ | |
2281 | static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) | |
2282 | { | |
2283 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2284 | ||
2285 | return us * (fep->itr_clk_rate / 64000) / 1000; | |
2286 | } | |
2287 | ||
2288 | /* Set threshold for interrupt coalescing */ | |
2289 | static void fec_enet_itr_coal_set(struct net_device *ndev) | |
2290 | { | |
2291 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2292 | int rx_itr, tx_itr; | |
2293 | ||
2294 | if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) | |
2295 | return; | |
2296 | ||
2297 | /* Must be greater than zero to avoid unpredictable behavior */ | |
2298 | if (!fep->rx_time_itr || !fep->rx_pkts_itr || | |
2299 | !fep->tx_time_itr || !fep->tx_pkts_itr) | |
2300 | return; | |
2301 | ||
2302 | /* Select enet system clock as Interrupt Coalescing | |
2303 | * timer Clock Source | |
2304 | */ | |
2305 | rx_itr = FEC_ITR_CLK_SEL; | |
2306 | tx_itr = FEC_ITR_CLK_SEL; | |
2307 | ||
2308 | /* set ICFT and ICTT */ | |
2309 | rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); | |
2310 | rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); | |
2311 | tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); | |
2312 | tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); | |
2313 | ||
2314 | rx_itr |= FEC_ITR_EN; | |
2315 | tx_itr |= FEC_ITR_EN; | |
2316 | ||
2317 | writel(tx_itr, fep->hwp + FEC_TXIC0); | |
2318 | writel(rx_itr, fep->hwp + FEC_RXIC0); | |
2319 | writel(tx_itr, fep->hwp + FEC_TXIC1); | |
2320 | writel(rx_itr, fep->hwp + FEC_RXIC1); | |
2321 | writel(tx_itr, fep->hwp + FEC_TXIC2); | |
2322 | writel(rx_itr, fep->hwp + FEC_RXIC2); | |
2323 | } | |
2324 | ||
2325 | static int | |
2326 | fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |
2327 | { | |
2328 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2329 | ||
2330 | if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) | |
2331 | return -EOPNOTSUPP; | |
2332 | ||
2333 | ec->rx_coalesce_usecs = fep->rx_time_itr; | |
2334 | ec->rx_max_coalesced_frames = fep->rx_pkts_itr; | |
2335 | ||
2336 | ec->tx_coalesce_usecs = fep->tx_time_itr; | |
2337 | ec->tx_max_coalesced_frames = fep->tx_pkts_itr; | |
2338 | ||
2339 | return 0; | |
2340 | } | |
2341 | ||
2342 | static int | |
2343 | fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |
2344 | { | |
2345 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2346 | unsigned int cycle; | |
2347 | ||
2348 | if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) | |
2349 | return -EOPNOTSUPP; | |
2350 | ||
2351 | if (ec->rx_max_coalesced_frames > 255) { | |
2352 | pr_err("Rx coalesced frames exceed hardware limiation"); | |
2353 | return -EINVAL; | |
2354 | } | |
2355 | ||
2356 | if (ec->tx_max_coalesced_frames > 255) { | |
2357 | pr_err("Tx coalesced frame exceed hardware limiation"); | |
2358 | return -EINVAL; | |
2359 | } | |
2360 | ||
2361 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); | |
2362 | if (cycle > 0xFFFF) { | |
2363 | pr_err("Rx coalesed usec exceeed hardware limiation"); | |
2364 | return -EINVAL; | |
2365 | } | |
2366 | ||
2367 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); | |
2368 | if (cycle > 0xFFFF) { | |
2369 | pr_err("Rx coalesed usec exceeed hardware limiation"); | |
2370 | return -EINVAL; | |
2371 | } | |
2372 | ||
2373 | fep->rx_time_itr = ec->rx_coalesce_usecs; | |
2374 | fep->rx_pkts_itr = ec->rx_max_coalesced_frames; | |
2375 | ||
2376 | fep->tx_time_itr = ec->tx_coalesce_usecs; | |
2377 | fep->tx_pkts_itr = ec->tx_max_coalesced_frames; | |
2378 | ||
2379 | fec_enet_itr_coal_set(ndev); | |
2380 | ||
2381 | return 0; | |
2382 | } | |
2383 | ||
2384 | static void fec_enet_itr_coal_init(struct net_device *ndev) | |
2385 | { | |
2386 | struct ethtool_coalesce ec; | |
2387 | ||
2388 | ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; | |
2389 | ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; | |
2390 | ||
2391 | ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; | |
2392 | ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; | |
2393 | ||
2394 | fec_enet_set_coalesce(ndev, &ec); | |
2395 | } | |
2396 | ||
2397 | static int fec_enet_get_tunable(struct net_device *netdev, | |
2398 | const struct ethtool_tunable *tuna, | |
2399 | void *data) | |
2400 | { | |
2401 | struct fec_enet_private *fep = netdev_priv(netdev); | |
2402 | int ret = 0; | |
2403 | ||
2404 | switch (tuna->id) { | |
2405 | case ETHTOOL_RX_COPYBREAK: | |
2406 | *(u32 *)data = fep->rx_copybreak; | |
2407 | break; | |
2408 | default: | |
2409 | ret = -EINVAL; | |
2410 | break; | |
2411 | } | |
2412 | ||
2413 | return ret; | |
2414 | } | |
2415 | ||
2416 | static int fec_enet_set_tunable(struct net_device *netdev, | |
2417 | const struct ethtool_tunable *tuna, | |
2418 | const void *data) | |
2419 | { | |
2420 | struct fec_enet_private *fep = netdev_priv(netdev); | |
2421 | int ret = 0; | |
2422 | ||
2423 | switch (tuna->id) { | |
2424 | case ETHTOOL_RX_COPYBREAK: | |
2425 | fep->rx_copybreak = *(u32 *)data; | |
2426 | break; | |
2427 | default: | |
2428 | ret = -EINVAL; | |
2429 | break; | |
2430 | } | |
2431 | ||
2432 | return ret; | |
2433 | } | |
2434 | ||
2435 | static const struct ethtool_ops fec_enet_ethtool_ops = { | |
2436 | .get_settings = fec_enet_get_settings, | |
2437 | .set_settings = fec_enet_set_settings, | |
2438 | .get_drvinfo = fec_enet_get_drvinfo, | |
2439 | .nway_reset = fec_enet_nway_reset, | |
2440 | .get_link = ethtool_op_get_link, | |
2441 | .get_coalesce = fec_enet_get_coalesce, | |
2442 | .set_coalesce = fec_enet_set_coalesce, | |
2443 | #ifndef CONFIG_M5272 | |
2444 | .get_pauseparam = fec_enet_get_pauseparam, | |
2445 | .set_pauseparam = fec_enet_set_pauseparam, | |
2446 | .get_strings = fec_enet_get_strings, | |
2447 | .get_ethtool_stats = fec_enet_get_ethtool_stats, | |
2448 | .get_sset_count = fec_enet_get_sset_count, | |
2449 | #endif | |
2450 | .get_ts_info = fec_enet_get_ts_info, | |
2451 | .get_tunable = fec_enet_get_tunable, | |
2452 | .set_tunable = fec_enet_set_tunable, | |
2453 | }; | |
2454 | ||
2455 | static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |
2456 | { | |
2457 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2458 | struct phy_device *phydev = fep->phy_dev; | |
2459 | ||
2460 | if (!netif_running(ndev)) | |
2461 | return -EINVAL; | |
2462 | ||
2463 | if (!phydev) | |
2464 | return -ENODEV; | |
2465 | ||
2466 | if (fep->bufdesc_ex) { | |
2467 | if (cmd == SIOCSHWTSTAMP) | |
2468 | return fec_ptp_set(ndev, rq); | |
2469 | if (cmd == SIOCGHWTSTAMP) | |
2470 | return fec_ptp_get(ndev, rq); | |
2471 | } | |
2472 | ||
2473 | return phy_mii_ioctl(phydev, rq, cmd); | |
2474 | } | |
2475 | ||
2476 | static void fec_enet_free_buffers(struct net_device *ndev) | |
2477 | { | |
2478 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2479 | unsigned int i; | |
2480 | struct sk_buff *skb; | |
2481 | struct bufdesc *bdp; | |
2482 | struct fec_enet_priv_tx_q *txq; | |
2483 | struct fec_enet_priv_rx_q *rxq; | |
2484 | unsigned int q; | |
2485 | ||
2486 | for (q = 0; q < fep->num_rx_queues; q++) { | |
2487 | rxq = fep->rx_queue[q]; | |
2488 | bdp = rxq->rx_bd_base; | |
2489 | for (i = 0; i < rxq->rx_ring_size; i++) { | |
2490 | skb = rxq->rx_skbuff[i]; | |
2491 | rxq->rx_skbuff[i] = NULL; | |
2492 | if (skb) { | |
2493 | dma_unmap_single(&fep->pdev->dev, | |
2494 | bdp->cbd_bufaddr, | |
2495 | FEC_ENET_RX_FRSIZE - fep->rx_align, | |
2496 | DMA_FROM_DEVICE); | |
2497 | dev_kfree_skb(skb); | |
2498 | } | |
2499 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | |
2500 | } | |
2501 | } | |
2502 | ||
2503 | for (q = 0; q < fep->num_tx_queues; q++) { | |
2504 | txq = fep->tx_queue[q]; | |
2505 | bdp = txq->tx_bd_base; | |
2506 | for (i = 0; i < txq->tx_ring_size; i++) { | |
2507 | kfree(txq->tx_bounce[i]); | |
2508 | txq->tx_bounce[i] = NULL; | |
2509 | skb = txq->tx_skbuff[i]; | |
2510 | txq->tx_skbuff[i] = NULL; | |
2511 | dev_kfree_skb(skb); | |
2512 | } | |
2513 | } | |
2514 | } | |
2515 | ||
2516 | static void fec_enet_free_queue(struct net_device *ndev) | |
2517 | { | |
2518 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2519 | int i; | |
2520 | struct fec_enet_priv_tx_q *txq; | |
2521 | ||
2522 | for (i = 0; i < fep->num_tx_queues; i++) | |
2523 | if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { | |
2524 | txq = fep->tx_queue[i]; | |
2525 | dma_free_coherent(NULL, | |
2526 | txq->tx_ring_size * TSO_HEADER_SIZE, | |
2527 | txq->tso_hdrs, | |
2528 | txq->tso_hdrs_dma); | |
2529 | } | |
2530 | ||
2531 | for (i = 0; i < fep->num_rx_queues; i++) | |
2532 | if (fep->rx_queue[i]) | |
2533 | kfree(fep->rx_queue[i]); | |
2534 | ||
2535 | for (i = 0; i < fep->num_tx_queues; i++) | |
2536 | if (fep->tx_queue[i]) | |
2537 | kfree(fep->tx_queue[i]); | |
2538 | } | |
2539 | ||
2540 | static int fec_enet_alloc_queue(struct net_device *ndev) | |
2541 | { | |
2542 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2543 | int i; | |
2544 | int ret = 0; | |
2545 | struct fec_enet_priv_tx_q *txq; | |
2546 | ||
2547 | for (i = 0; i < fep->num_tx_queues; i++) { | |
2548 | txq = kzalloc(sizeof(*txq), GFP_KERNEL); | |
2549 | if (!txq) { | |
2550 | ret = -ENOMEM; | |
2551 | goto alloc_failed; | |
2552 | } | |
2553 | ||
2554 | fep->tx_queue[i] = txq; | |
2555 | txq->tx_ring_size = TX_RING_SIZE; | |
2556 | fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; | |
2557 | ||
2558 | txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; | |
2559 | txq->tx_wake_threshold = | |
2560 | (txq->tx_ring_size - txq->tx_stop_threshold) / 2; | |
2561 | ||
2562 | txq->tso_hdrs = dma_alloc_coherent(NULL, | |
2563 | txq->tx_ring_size * TSO_HEADER_SIZE, | |
2564 | &txq->tso_hdrs_dma, | |
2565 | GFP_KERNEL); | |
2566 | if (!txq->tso_hdrs) { | |
2567 | ret = -ENOMEM; | |
2568 | goto alloc_failed; | |
2569 | } | |
2570 | } | |
2571 | ||
2572 | for (i = 0; i < fep->num_rx_queues; i++) { | |
2573 | fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), | |
2574 | GFP_KERNEL); | |
2575 | if (!fep->rx_queue[i]) { | |
2576 | ret = -ENOMEM; | |
2577 | goto alloc_failed; | |
2578 | } | |
2579 | ||
2580 | fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; | |
2581 | fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; | |
2582 | } | |
2583 | return ret; | |
2584 | ||
2585 | alloc_failed: | |
2586 | fec_enet_free_queue(ndev); | |
2587 | return ret; | |
2588 | } | |
2589 | ||
2590 | static int | |
2591 | fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) | |
2592 | { | |
2593 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2594 | unsigned int i; | |
2595 | struct sk_buff *skb; | |
2596 | struct bufdesc *bdp; | |
2597 | struct fec_enet_priv_rx_q *rxq; | |
2598 | ||
2599 | rxq = fep->rx_queue[queue]; | |
2600 | bdp = rxq->rx_bd_base; | |
2601 | for (i = 0; i < rxq->rx_ring_size; i++) { | |
2602 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); | |
2603 | if (!skb) | |
2604 | goto err_alloc; | |
2605 | ||
2606 | if (fec_enet_new_rxbdp(ndev, bdp, skb)) { | |
2607 | dev_kfree_skb(skb); | |
2608 | goto err_alloc; | |
2609 | } | |
2610 | ||
2611 | rxq->rx_skbuff[i] = skb; | |
2612 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | |
2613 | ||
2614 | if (fep->bufdesc_ex) { | |
2615 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
2616 | ebdp->cbd_esc = BD_ENET_RX_INT; | |
2617 | } | |
2618 | ||
2619 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | |
2620 | } | |
2621 | ||
2622 | /* Set the last buffer to wrap. */ | |
2623 | bdp = fec_enet_get_prevdesc(bdp, fep, queue); | |
2624 | bdp->cbd_sc |= BD_SC_WRAP; | |
2625 | return 0; | |
2626 | ||
2627 | err_alloc: | |
2628 | fec_enet_free_buffers(ndev); | |
2629 | return -ENOMEM; | |
2630 | } | |
2631 | ||
2632 | static int | |
2633 | fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) | |
2634 | { | |
2635 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2636 | unsigned int i; | |
2637 | struct bufdesc *bdp; | |
2638 | struct fec_enet_priv_tx_q *txq; | |
2639 | ||
2640 | txq = fep->tx_queue[queue]; | |
2641 | bdp = txq->tx_bd_base; | |
2642 | for (i = 0; i < txq->tx_ring_size; i++) { | |
2643 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | |
2644 | if (!txq->tx_bounce[i]) | |
2645 | goto err_alloc; | |
2646 | ||
2647 | bdp->cbd_sc = 0; | |
2648 | bdp->cbd_bufaddr = 0; | |
2649 | ||
2650 | if (fep->bufdesc_ex) { | |
2651 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
2652 | ebdp->cbd_esc = BD_ENET_TX_INT; | |
2653 | } | |
2654 | ||
2655 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); | |
2656 | } | |
2657 | ||
2658 | /* Set the last buffer to wrap. */ | |
2659 | bdp = fec_enet_get_prevdesc(bdp, fep, queue); | |
2660 | bdp->cbd_sc |= BD_SC_WRAP; | |
2661 | ||
2662 | return 0; | |
2663 | ||
2664 | err_alloc: | |
2665 | fec_enet_free_buffers(ndev); | |
2666 | return -ENOMEM; | |
2667 | } | |
2668 | ||
2669 | static int fec_enet_alloc_buffers(struct net_device *ndev) | |
2670 | { | |
2671 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2672 | unsigned int i; | |
2673 | ||
2674 | for (i = 0; i < fep->num_rx_queues; i++) | |
2675 | if (fec_enet_alloc_rxq_buffers(ndev, i)) | |
2676 | return -ENOMEM; | |
2677 | ||
2678 | for (i = 0; i < fep->num_tx_queues; i++) | |
2679 | if (fec_enet_alloc_txq_buffers(ndev, i)) | |
2680 | return -ENOMEM; | |
2681 | return 0; | |
2682 | } | |
2683 | ||
2684 | static int | |
2685 | fec_enet_open(struct net_device *ndev) | |
2686 | { | |
2687 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2688 | int ret; | |
2689 | ||
2690 | pinctrl_pm_select_default_state(&fep->pdev->dev); | |
2691 | ret = fec_enet_clk_enable(ndev, true); | |
2692 | if (ret) | |
2693 | return ret; | |
2694 | ||
2695 | /* I should reset the ring buffers here, but I don't yet know | |
2696 | * a simple way to do that. | |
2697 | */ | |
2698 | ||
2699 | ret = fec_enet_alloc_buffers(ndev); | |
2700 | if (ret) | |
2701 | goto err_enet_alloc; | |
2702 | ||
2703 | /* Probe and connect to PHY when open the interface */ | |
2704 | ret = fec_enet_mii_probe(ndev); | |
2705 | if (ret) | |
2706 | goto err_enet_mii_probe; | |
2707 | ||
2708 | fec_restart(ndev); | |
2709 | napi_enable(&fep->napi); | |
2710 | phy_start(fep->phy_dev); | |
2711 | netif_tx_start_all_queues(ndev); | |
2712 | ||
2713 | return 0; | |
2714 | ||
2715 | err_enet_mii_probe: | |
2716 | fec_enet_free_buffers(ndev); | |
2717 | err_enet_alloc: | |
2718 | fec_enet_clk_enable(ndev, false); | |
2719 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | |
2720 | return ret; | |
2721 | } | |
2722 | ||
2723 | static int | |
2724 | fec_enet_close(struct net_device *ndev) | |
2725 | { | |
2726 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2727 | ||
2728 | phy_stop(fep->phy_dev); | |
2729 | ||
2730 | if (netif_device_present(ndev)) { | |
2731 | napi_disable(&fep->napi); | |
2732 | netif_tx_disable(ndev); | |
2733 | fec_stop(ndev); | |
2734 | } | |
2735 | ||
2736 | phy_disconnect(fep->phy_dev); | |
2737 | fep->phy_dev = NULL; | |
2738 | ||
2739 | fec_enet_clk_enable(ndev, false); | |
2740 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | |
2741 | fec_enet_free_buffers(ndev); | |
2742 | ||
2743 | return 0; | |
2744 | } | |
2745 | ||
2746 | /* Set or clear the multicast filter for this adaptor. | |
2747 | * Skeleton taken from sunlance driver. | |
2748 | * The CPM Ethernet implementation allows Multicast as well as individual | |
2749 | * MAC address filtering. Some of the drivers check to make sure it is | |
2750 | * a group multicast address, and discard those that are not. I guess I | |
2751 | * will do the same for now, but just remove the test if you want | |
2752 | * individual filtering as well (do the upper net layers want or support | |
2753 | * this kind of feature?). | |
2754 | */ | |
2755 | ||
2756 | #define HASH_BITS 6 /* #bits in hash */ | |
2757 | #define CRC32_POLY 0xEDB88320 | |
2758 | ||
2759 | static void set_multicast_list(struct net_device *ndev) | |
2760 | { | |
2761 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2762 | struct netdev_hw_addr *ha; | |
2763 | unsigned int i, bit, data, crc, tmp; | |
2764 | unsigned char hash; | |
2765 | ||
2766 | if (ndev->flags & IFF_PROMISC) { | |
2767 | tmp = readl(fep->hwp + FEC_R_CNTRL); | |
2768 | tmp |= 0x8; | |
2769 | writel(tmp, fep->hwp + FEC_R_CNTRL); | |
2770 | return; | |
2771 | } | |
2772 | ||
2773 | tmp = readl(fep->hwp + FEC_R_CNTRL); | |
2774 | tmp &= ~0x8; | |
2775 | writel(tmp, fep->hwp + FEC_R_CNTRL); | |
2776 | ||
2777 | if (ndev->flags & IFF_ALLMULTI) { | |
2778 | /* Catch all multicast addresses, so set the | |
2779 | * filter to all 1's | |
2780 | */ | |
2781 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2782 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2783 | ||
2784 | return; | |
2785 | } | |
2786 | ||
2787 | /* Clear filter and add the addresses in hash register | |
2788 | */ | |
2789 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2790 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2791 | ||
2792 | netdev_for_each_mc_addr(ha, ndev) { | |
2793 | /* calculate crc32 value of mac address */ | |
2794 | crc = 0xffffffff; | |
2795 | ||
2796 | for (i = 0; i < ndev->addr_len; i++) { | |
2797 | data = ha->addr[i]; | |
2798 | for (bit = 0; bit < 8; bit++, data >>= 1) { | |
2799 | crc = (crc >> 1) ^ | |
2800 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | |
2801 | } | |
2802 | } | |
2803 | ||
2804 | /* only upper 6 bits (HASH_BITS) are used | |
2805 | * which point to specific bit in he hash registers | |
2806 | */ | |
2807 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | |
2808 | ||
2809 | if (hash > 31) { | |
2810 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2811 | tmp |= 1 << (hash - 32); | |
2812 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2813 | } else { | |
2814 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2815 | tmp |= 1 << hash; | |
2816 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2817 | } | |
2818 | } | |
2819 | } | |
2820 | ||
2821 | /* Set a MAC change in hardware. */ | |
2822 | static int | |
2823 | fec_set_mac_address(struct net_device *ndev, void *p) | |
2824 | { | |
2825 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2826 | struct sockaddr *addr = p; | |
2827 | ||
2828 | if (addr) { | |
2829 | if (!is_valid_ether_addr(addr->sa_data)) | |
2830 | return -EADDRNOTAVAIL; | |
2831 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | |
2832 | } | |
2833 | ||
2834 | writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | | |
2835 | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), | |
2836 | fep->hwp + FEC_ADDR_LOW); | |
2837 | writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), | |
2838 | fep->hwp + FEC_ADDR_HIGH); | |
2839 | return 0; | |
2840 | } | |
2841 | ||
2842 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2843 | /** | |
2844 | * fec_poll_controller - FEC Poll controller function | |
2845 | * @dev: The FEC network adapter | |
2846 | * | |
2847 | * Polled functionality used by netconsole and others in non interrupt mode | |
2848 | * | |
2849 | */ | |
2850 | static void fec_poll_controller(struct net_device *dev) | |
2851 | { | |
2852 | int i; | |
2853 | struct fec_enet_private *fep = netdev_priv(dev); | |
2854 | ||
2855 | for (i = 0; i < FEC_IRQ_NUM; i++) { | |
2856 | if (fep->irq[i] > 0) { | |
2857 | disable_irq(fep->irq[i]); | |
2858 | fec_enet_interrupt(fep->irq[i], dev); | |
2859 | enable_irq(fep->irq[i]); | |
2860 | } | |
2861 | } | |
2862 | } | |
2863 | #endif | |
2864 | ||
2865 | #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM | |
2866 | static inline void fec_enet_set_netdev_features(struct net_device *netdev, | |
2867 | netdev_features_t features) | |
2868 | { | |
2869 | struct fec_enet_private *fep = netdev_priv(netdev); | |
2870 | netdev_features_t changed = features ^ netdev->features; | |
2871 | ||
2872 | netdev->features = features; | |
2873 | ||
2874 | /* Receive checksum has been changed */ | |
2875 | if (changed & NETIF_F_RXCSUM) { | |
2876 | if (features & NETIF_F_RXCSUM) | |
2877 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | |
2878 | else | |
2879 | fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; | |
2880 | } | |
2881 | } | |
2882 | ||
2883 | static int fec_set_features(struct net_device *netdev, | |
2884 | netdev_features_t features) | |
2885 | { | |
2886 | struct fec_enet_private *fep = netdev_priv(netdev); | |
2887 | netdev_features_t changed = features ^ netdev->features; | |
2888 | ||
2889 | if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { | |
2890 | napi_disable(&fep->napi); | |
2891 | netif_tx_lock_bh(netdev); | |
2892 | fec_stop(netdev); | |
2893 | fec_enet_set_netdev_features(netdev, features); | |
2894 | fec_restart(netdev); | |
2895 | netif_tx_wake_all_queues(netdev); | |
2896 | netif_tx_unlock_bh(netdev); | |
2897 | napi_enable(&fep->napi); | |
2898 | } else { | |
2899 | fec_enet_set_netdev_features(netdev, features); | |
2900 | } | |
2901 | ||
2902 | return 0; | |
2903 | } | |
2904 | ||
2905 | static const struct net_device_ops fec_netdev_ops = { | |
2906 | .ndo_open = fec_enet_open, | |
2907 | .ndo_stop = fec_enet_close, | |
2908 | .ndo_start_xmit = fec_enet_start_xmit, | |
2909 | .ndo_set_rx_mode = set_multicast_list, | |
2910 | .ndo_change_mtu = eth_change_mtu, | |
2911 | .ndo_validate_addr = eth_validate_addr, | |
2912 | .ndo_tx_timeout = fec_timeout, | |
2913 | .ndo_set_mac_address = fec_set_mac_address, | |
2914 | .ndo_do_ioctl = fec_enet_ioctl, | |
2915 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2916 | .ndo_poll_controller = fec_poll_controller, | |
2917 | #endif | |
2918 | .ndo_set_features = fec_set_features, | |
2919 | }; | |
2920 | ||
2921 | /* | |
2922 | * XXX: We need to clean up on failure exits here. | |
2923 | * | |
2924 | */ | |
2925 | static int fec_enet_init(struct net_device *ndev) | |
2926 | { | |
2927 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2928 | struct fec_enet_priv_tx_q *txq; | |
2929 | struct fec_enet_priv_rx_q *rxq; | |
2930 | struct bufdesc *cbd_base; | |
2931 | dma_addr_t bd_dma; | |
2932 | int bd_size; | |
2933 | unsigned int i; | |
2934 | ||
2935 | #if defined(CONFIG_ARM) | |
2936 | fep->rx_align = 0xf; | |
2937 | fep->tx_align = 0xf; | |
2938 | #else | |
2939 | fep->rx_align = 0x3; | |
2940 | fep->tx_align = 0x3; | |
2941 | #endif | |
2942 | ||
2943 | fec_enet_alloc_queue(ndev); | |
2944 | ||
2945 | if (fep->bufdesc_ex) | |
2946 | fep->bufdesc_size = sizeof(struct bufdesc_ex); | |
2947 | else | |
2948 | fep->bufdesc_size = sizeof(struct bufdesc); | |
2949 | bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * | |
2950 | fep->bufdesc_size; | |
2951 | ||
2952 | /* Allocate memory for buffer descriptors. */ | |
2953 | cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, | |
2954 | GFP_KERNEL); | |
2955 | if (!cbd_base) { | |
2956 | return -ENOMEM; | |
2957 | } | |
2958 | ||
2959 | memset(cbd_base, 0, bd_size); | |
2960 | ||
2961 | /* Get the Ethernet address */ | |
2962 | fec_get_mac(ndev); | |
2963 | /* make sure MAC we just acquired is programmed into the hw */ | |
2964 | fec_set_mac_address(ndev, NULL); | |
2965 | ||
2966 | /* Set receive and transmit descriptor base. */ | |
2967 | for (i = 0; i < fep->num_rx_queues; i++) { | |
2968 | rxq = fep->rx_queue[i]; | |
2969 | rxq->index = i; | |
2970 | rxq->rx_bd_base = (struct bufdesc *)cbd_base; | |
2971 | rxq->bd_dma = bd_dma; | |
2972 | if (fep->bufdesc_ex) { | |
2973 | bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; | |
2974 | cbd_base = (struct bufdesc *) | |
2975 | (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); | |
2976 | } else { | |
2977 | bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; | |
2978 | cbd_base += rxq->rx_ring_size; | |
2979 | } | |
2980 | } | |
2981 | ||
2982 | for (i = 0; i < fep->num_tx_queues; i++) { | |
2983 | txq = fep->tx_queue[i]; | |
2984 | txq->index = i; | |
2985 | txq->tx_bd_base = (struct bufdesc *)cbd_base; | |
2986 | txq->bd_dma = bd_dma; | |
2987 | if (fep->bufdesc_ex) { | |
2988 | bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; | |
2989 | cbd_base = (struct bufdesc *) | |
2990 | (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); | |
2991 | } else { | |
2992 | bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; | |
2993 | cbd_base += txq->tx_ring_size; | |
2994 | } | |
2995 | } | |
2996 | ||
2997 | ||
2998 | /* The FEC Ethernet specific entries in the device structure */ | |
2999 | ndev->watchdog_timeo = TX_TIMEOUT; | |
3000 | ndev->netdev_ops = &fec_netdev_ops; | |
3001 | ndev->ethtool_ops = &fec_enet_ethtool_ops; | |
3002 | ||
3003 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | |
3004 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); | |
3005 | ||
3006 | if (fep->quirks & FEC_QUIRK_HAS_VLAN) | |
3007 | /* enable hw VLAN support */ | |
3008 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; | |
3009 | ||
3010 | if (fep->quirks & FEC_QUIRK_HAS_CSUM) { | |
3011 | ndev->gso_max_segs = FEC_MAX_TSO_SEGS; | |
3012 | ||
3013 | /* enable hw accelerator */ | |
3014 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
3015 | | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); | |
3016 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | |
3017 | } | |
3018 | ||
3019 | if (fep->quirks & FEC_QUIRK_HAS_AVB) { | |
3020 | fep->tx_align = 0; | |
3021 | fep->rx_align = 0x3f; | |
3022 | } | |
3023 | ||
3024 | ndev->hw_features = ndev->features; | |
3025 | ||
3026 | fec_restart(ndev); | |
3027 | ||
3028 | return 0; | |
3029 | } | |
3030 | ||
3031 | #ifdef CONFIG_OF | |
3032 | static void fec_reset_phy(struct platform_device *pdev) | |
3033 | { | |
3034 | int err, phy_reset; | |
3035 | int msec = 1; | |
3036 | struct device_node *np = pdev->dev.of_node; | |
3037 | ||
3038 | if (!np) | |
3039 | return; | |
3040 | ||
3041 | of_property_read_u32(np, "phy-reset-duration", &msec); | |
3042 | /* A sane reset duration should not be longer than 1s */ | |
3043 | if (msec > 1000) | |
3044 | msec = 1; | |
3045 | ||
3046 | phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); | |
3047 | if (!gpio_is_valid(phy_reset)) | |
3048 | return; | |
3049 | ||
3050 | err = devm_gpio_request_one(&pdev->dev, phy_reset, | |
3051 | GPIOF_OUT_INIT_LOW, "phy-reset"); | |
3052 | if (err) { | |
3053 | dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); | |
3054 | return; | |
3055 | } | |
3056 | msleep(msec); | |
3057 | gpio_set_value(phy_reset, 1); | |
3058 | } | |
3059 | #else /* CONFIG_OF */ | |
3060 | static void fec_reset_phy(struct platform_device *pdev) | |
3061 | { | |
3062 | /* | |
3063 | * In case of platform probe, the reset has been done | |
3064 | * by machine code. | |
3065 | */ | |
3066 | } | |
3067 | #endif /* CONFIG_OF */ | |
3068 | ||
3069 | static void | |
3070 | fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) | |
3071 | { | |
3072 | struct device_node *np = pdev->dev.of_node; | |
3073 | int err; | |
3074 | ||
3075 | *num_tx = *num_rx = 1; | |
3076 | ||
3077 | if (!np || !of_device_is_available(np)) | |
3078 | return; | |
3079 | ||
3080 | /* parse the num of tx and rx queues */ | |
3081 | err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); | |
3082 | if (err) | |
3083 | *num_tx = 1; | |
3084 | ||
3085 | err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); | |
3086 | if (err) | |
3087 | *num_rx = 1; | |
3088 | ||
3089 | if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { | |
3090 | dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", | |
3091 | *num_tx); | |
3092 | *num_tx = 1; | |
3093 | return; | |
3094 | } | |
3095 | ||
3096 | if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { | |
3097 | dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", | |
3098 | *num_rx); | |
3099 | *num_rx = 1; | |
3100 | return; | |
3101 | } | |
3102 | ||
3103 | } | |
3104 | ||
3105 | static int | |
3106 | fec_probe(struct platform_device *pdev) | |
3107 | { | |
3108 | struct fec_enet_private *fep; | |
3109 | struct fec_platform_data *pdata; | |
3110 | struct net_device *ndev; | |
3111 | int i, irq, ret = 0; | |
3112 | struct resource *r; | |
3113 | const struct of_device_id *of_id; | |
3114 | static int dev_id; | |
3115 | struct device_node *np = pdev->dev.of_node, *phy_node; | |
3116 | int num_tx_qs; | |
3117 | int num_rx_qs; | |
3118 | ||
3119 | fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); | |
3120 | ||
3121 | /* Init network device */ | |
3122 | ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), | |
3123 | num_tx_qs, num_rx_qs); | |
3124 | if (!ndev) | |
3125 | return -ENOMEM; | |
3126 | ||
3127 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
3128 | ||
3129 | /* setup board info structure */ | |
3130 | fep = netdev_priv(ndev); | |
3131 | ||
3132 | of_id = of_match_device(fec_dt_ids, &pdev->dev); | |
3133 | if (of_id) | |
3134 | pdev->id_entry = of_id->data; | |
3135 | fep->quirks = pdev->id_entry->driver_data; | |
3136 | ||
3137 | fep->num_rx_queues = num_rx_qs; | |
3138 | fep->num_tx_queues = num_tx_qs; | |
3139 | ||
3140 | #if !defined(CONFIG_M5272) | |
3141 | /* default enable pause frame auto negotiation */ | |
3142 | if (fep->quirks & FEC_QUIRK_HAS_GBIT) | |
3143 | fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; | |
3144 | #endif | |
3145 | ||
3146 | /* Select default pin state */ | |
3147 | pinctrl_pm_select_default_state(&pdev->dev); | |
3148 | ||
3149 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
3150 | fep->hwp = devm_ioremap_resource(&pdev->dev, r); | |
3151 | if (IS_ERR(fep->hwp)) { | |
3152 | ret = PTR_ERR(fep->hwp); | |
3153 | goto failed_ioremap; | |
3154 | } | |
3155 | ||
3156 | fep->pdev = pdev; | |
3157 | fep->dev_id = dev_id++; | |
3158 | ||
3159 | platform_set_drvdata(pdev, ndev); | |
3160 | ||
3161 | phy_node = of_parse_phandle(np, "phy-handle", 0); | |
3162 | if (!phy_node && of_phy_is_fixed_link(np)) { | |
3163 | ret = of_phy_register_fixed_link(np); | |
3164 | if (ret < 0) { | |
3165 | dev_err(&pdev->dev, | |
3166 | "broken fixed-link specification\n"); | |
3167 | goto failed_phy; | |
3168 | } | |
3169 | phy_node = of_node_get(np); | |
3170 | } | |
3171 | fep->phy_node = phy_node; | |
3172 | ||
3173 | ret = of_get_phy_mode(pdev->dev.of_node); | |
3174 | if (ret < 0) { | |
3175 | pdata = dev_get_platdata(&pdev->dev); | |
3176 | if (pdata) | |
3177 | fep->phy_interface = pdata->phy; | |
3178 | else | |
3179 | fep->phy_interface = PHY_INTERFACE_MODE_MII; | |
3180 | } else { | |
3181 | fep->phy_interface = ret; | |
3182 | } | |
3183 | ||
3184 | fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | |
3185 | if (IS_ERR(fep->clk_ipg)) { | |
3186 | ret = PTR_ERR(fep->clk_ipg); | |
3187 | goto failed_clk; | |
3188 | } | |
3189 | ||
3190 | fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | |
3191 | if (IS_ERR(fep->clk_ahb)) { | |
3192 | ret = PTR_ERR(fep->clk_ahb); | |
3193 | goto failed_clk; | |
3194 | } | |
3195 | ||
3196 | fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); | |
3197 | ||
3198 | /* enet_out is optional, depends on board */ | |
3199 | fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); | |
3200 | if (IS_ERR(fep->clk_enet_out)) | |
3201 | fep->clk_enet_out = NULL; | |
3202 | ||
3203 | fep->ptp_clk_on = false; | |
3204 | mutex_init(&fep->ptp_clk_mutex); | |
3205 | ||
3206 | /* clk_ref is optional, depends on board */ | |
3207 | fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); | |
3208 | if (IS_ERR(fep->clk_ref)) | |
3209 | fep->clk_ref = NULL; | |
3210 | ||
3211 | fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; | |
3212 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); | |
3213 | if (IS_ERR(fep->clk_ptp)) { | |
3214 | fep->clk_ptp = NULL; | |
3215 | fep->bufdesc_ex = false; | |
3216 | } | |
3217 | ||
3218 | ret = fec_enet_clk_enable(ndev, true); | |
3219 | if (ret) | |
3220 | goto failed_clk; | |
3221 | ||
3222 | fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); | |
3223 | if (!IS_ERR(fep->reg_phy)) { | |
3224 | ret = regulator_enable(fep->reg_phy); | |
3225 | if (ret) { | |
3226 | dev_err(&pdev->dev, | |
3227 | "Failed to enable phy regulator: %d\n", ret); | |
3228 | goto failed_regulator; | |
3229 | } | |
3230 | } else { | |
3231 | fep->reg_phy = NULL; | |
3232 | } | |
3233 | ||
3234 | fec_reset_phy(pdev); | |
3235 | ||
3236 | if (fep->bufdesc_ex) | |
3237 | fec_ptp_init(pdev); | |
3238 | ||
3239 | ret = fec_enet_init(ndev); | |
3240 | if (ret) | |
3241 | goto failed_init; | |
3242 | ||
3243 | for (i = 0; i < FEC_IRQ_NUM; i++) { | |
3244 | irq = platform_get_irq(pdev, i); | |
3245 | if (irq < 0) { | |
3246 | if (i) | |
3247 | break; | |
3248 | ret = irq; | |
3249 | goto failed_irq; | |
3250 | } | |
3251 | ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, | |
3252 | 0, pdev->name, ndev); | |
3253 | if (ret) | |
3254 | goto failed_irq; | |
3255 | } | |
3256 | ||
3257 | init_completion(&fep->mdio_done); | |
3258 | ret = fec_enet_mii_init(pdev); | |
3259 | if (ret) | |
3260 | goto failed_mii_init; | |
3261 | ||
3262 | /* Carrier starts down, phylib will bring it up */ | |
3263 | netif_carrier_off(ndev); | |
3264 | fec_enet_clk_enable(ndev, false); | |
3265 | pinctrl_pm_select_sleep_state(&pdev->dev); | |
3266 | ||
3267 | ret = register_netdev(ndev); | |
3268 | if (ret) | |
3269 | goto failed_register; | |
3270 | ||
3271 | if (fep->bufdesc_ex && fep->ptp_clock) | |
3272 | netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); | |
3273 | ||
3274 | fep->rx_copybreak = COPYBREAK_DEFAULT; | |
3275 | INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); | |
3276 | return 0; | |
3277 | ||
3278 | failed_register: | |
3279 | fec_enet_mii_remove(fep); | |
3280 | failed_mii_init: | |
3281 | failed_irq: | |
3282 | failed_init: | |
3283 | if (fep->reg_phy) | |
3284 | regulator_disable(fep->reg_phy); | |
3285 | failed_regulator: | |
3286 | fec_enet_clk_enable(ndev, false); | |
3287 | failed_clk: | |
3288 | failed_phy: | |
3289 | of_node_put(phy_node); | |
3290 | failed_ioremap: | |
3291 | free_netdev(ndev); | |
3292 | ||
3293 | return ret; | |
3294 | } | |
3295 | ||
3296 | static int | |
3297 | fec_drv_remove(struct platform_device *pdev) | |
3298 | { | |
3299 | struct net_device *ndev = platform_get_drvdata(pdev); | |
3300 | struct fec_enet_private *fep = netdev_priv(ndev); | |
3301 | ||
3302 | cancel_delayed_work_sync(&fep->time_keep); | |
3303 | cancel_work_sync(&fep->tx_timeout_work); | |
3304 | unregister_netdev(ndev); | |
3305 | fec_enet_mii_remove(fep); | |
3306 | if (fep->reg_phy) | |
3307 | regulator_disable(fep->reg_phy); | |
3308 | if (fep->ptp_clock) | |
3309 | ptp_clock_unregister(fep->ptp_clock); | |
3310 | fec_enet_clk_enable(ndev, false); | |
3311 | of_node_put(fep->phy_node); | |
3312 | free_netdev(ndev); | |
3313 | ||
3314 | return 0; | |
3315 | } | |
3316 | ||
3317 | static int __maybe_unused fec_suspend(struct device *dev) | |
3318 | { | |
3319 | struct net_device *ndev = dev_get_drvdata(dev); | |
3320 | struct fec_enet_private *fep = netdev_priv(ndev); | |
3321 | ||
3322 | rtnl_lock(); | |
3323 | if (netif_running(ndev)) { | |
3324 | phy_stop(fep->phy_dev); | |
3325 | napi_disable(&fep->napi); | |
3326 | netif_tx_lock_bh(ndev); | |
3327 | netif_device_detach(ndev); | |
3328 | netif_tx_unlock_bh(ndev); | |
3329 | fec_stop(ndev); | |
3330 | fec_enet_clk_enable(ndev, false); | |
3331 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | |
3332 | } | |
3333 | rtnl_unlock(); | |
3334 | ||
3335 | if (fep->reg_phy) | |
3336 | regulator_disable(fep->reg_phy); | |
3337 | ||
3338 | /* SOC supply clock to phy, when clock is disabled, phy link down | |
3339 | * SOC control phy regulator, when regulator is disabled, phy link down | |
3340 | */ | |
3341 | if (fep->clk_enet_out || fep->reg_phy) | |
3342 | fep->link = 0; | |
3343 | ||
3344 | return 0; | |
3345 | } | |
3346 | ||
3347 | static int __maybe_unused fec_resume(struct device *dev) | |
3348 | { | |
3349 | struct net_device *ndev = dev_get_drvdata(dev); | |
3350 | struct fec_enet_private *fep = netdev_priv(ndev); | |
3351 | int ret; | |
3352 | ||
3353 | if (fep->reg_phy) { | |
3354 | ret = regulator_enable(fep->reg_phy); | |
3355 | if (ret) | |
3356 | return ret; | |
3357 | } | |
3358 | ||
3359 | rtnl_lock(); | |
3360 | if (netif_running(ndev)) { | |
3361 | pinctrl_pm_select_default_state(&fep->pdev->dev); | |
3362 | ret = fec_enet_clk_enable(ndev, true); | |
3363 | if (ret) { | |
3364 | rtnl_unlock(); | |
3365 | goto failed_clk; | |
3366 | } | |
3367 | fec_restart(ndev); | |
3368 | netif_tx_lock_bh(ndev); | |
3369 | netif_device_attach(ndev); | |
3370 | netif_tx_unlock_bh(ndev); | |
3371 | napi_enable(&fep->napi); | |
3372 | phy_start(fep->phy_dev); | |
3373 | } | |
3374 | rtnl_unlock(); | |
3375 | ||
3376 | return 0; | |
3377 | ||
3378 | failed_clk: | |
3379 | if (fep->reg_phy) | |
3380 | regulator_disable(fep->reg_phy); | |
3381 | return ret; | |
3382 | } | |
3383 | ||
3384 | static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); | |
3385 | ||
3386 | static struct platform_driver fec_driver = { | |
3387 | .driver = { | |
3388 | .name = DRIVER_NAME, | |
3389 | .pm = &fec_pm_ops, | |
3390 | .of_match_table = fec_dt_ids, | |
3391 | }, | |
3392 | .id_table = fec_devtype, | |
3393 | .probe = fec_probe, | |
3394 | .remove = fec_drv_remove, | |
3395 | }; | |
3396 | ||
3397 | module_platform_driver(fec_driver); | |
3398 | ||
3399 | MODULE_ALIAS("platform:"DRIVER_NAME); | |
3400 | MODULE_LICENSE("GPL"); |