2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/ptrace.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/icmp.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/bitops.h>
48 #include <linux/irq.h>
49 #include <linux/clk.h>
50 #include <linux/platform_device.h>
51 #include <linux/mdio.h>
52 #include <linux/phy.h>
53 #include <linux/fec.h>
55 #include <linux/of_device.h>
56 #include <linux/of_gpio.h>
57 #include <linux/of_mdio.h>
58 #include <linux/of_net.h>
59 #include <linux/regulator/consumer.h>
60 #include <linux/if_vlan.h>
61 #include <linux/pinctrl/consumer.h>
62 #include <linux/prefetch.h>
63 #include <soc/imx/cpuidle.h>
65 #include <asm/cacheflush.h>
69 static void set_multicast_list(struct net_device
*ndev
);
70 static void fec_enet_itr_coal_init(struct net_device
*ndev
);
72 #define DRIVER_NAME "fec"
74 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
76 /* Pause frame feild and FIFO threshold */
77 #define FEC_ENET_FCE (1 << 5)
78 #define FEC_ENET_RSEM_V 0x84
79 #define FEC_ENET_RSFL_V 16
80 #define FEC_ENET_RAEM_V 0x8
81 #define FEC_ENET_RAFL_V 0x8
82 #define FEC_ENET_OPD_V 0xFFF0
83 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
85 static struct platform_device_id fec_devtype
[] = {
87 /* keep it for coldfire */
92 .driver_data
= FEC_QUIRK_USE_GASKET
| FEC_QUIRK_MIB_CLEAR
,
95 .driver_data
= FEC_QUIRK_MIB_CLEAR
,
98 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_SWAP_FRAME
|
99 FEC_QUIRK_SINGLE_MDIO
| FEC_QUIRK_HAS_RACC
,
102 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
103 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
104 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR006358
|
107 .name
= "mvf600-fec",
108 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_RACC
,
110 .name
= "imx6sx-fec",
111 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
112 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
113 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_HAS_AVB
|
114 FEC_QUIRK_ERR007885
| FEC_QUIRK_BUG_CAPTURE
|
115 FEC_QUIRK_HAS_RACC
| FEC_QUIRK_HAS_COALESCE
,
117 .name
= "imx6ul-fec",
118 .driver_data
= FEC_QUIRK_ENET_MAC
| FEC_QUIRK_HAS_GBIT
|
119 FEC_QUIRK_HAS_BUFDESC_EX
| FEC_QUIRK_HAS_CSUM
|
120 FEC_QUIRK_HAS_VLAN
| FEC_QUIRK_ERR007885
|
121 FEC_QUIRK_BUG_CAPTURE
| FEC_QUIRK_HAS_RACC
|
122 FEC_QUIRK_HAS_COALESCE
,
127 MODULE_DEVICE_TABLE(platform
, fec_devtype
);
130 IMX25_FEC
= 1, /* runs on i.mx25/50/53 */
131 IMX27_FEC
, /* runs on i.mx27/35/51 */
139 static const struct of_device_id fec_dt_ids
[] = {
140 { .compatible
= "fsl,imx25-fec", .data
= &fec_devtype
[IMX25_FEC
], },
141 { .compatible
= "fsl,imx27-fec", .data
= &fec_devtype
[IMX27_FEC
], },
142 { .compatible
= "fsl,imx28-fec", .data
= &fec_devtype
[IMX28_FEC
], },
143 { .compatible
= "fsl,imx6q-fec", .data
= &fec_devtype
[IMX6Q_FEC
], },
144 { .compatible
= "fsl,mvf600-fec", .data
= &fec_devtype
[MVF600_FEC
], },
145 { .compatible
= "fsl,imx6sx-fec", .data
= &fec_devtype
[IMX6SX_FEC
], },
146 { .compatible
= "fsl,imx6ul-fec", .data
= &fec_devtype
[IMX6UL_FEC
], },
149 MODULE_DEVICE_TABLE(of
, fec_dt_ids
);
151 static unsigned char macaddr
[ETH_ALEN
];
152 module_param_array(macaddr
, byte
, NULL
, 0);
153 MODULE_PARM_DESC(macaddr
, "FEC Ethernet MAC address");
155 #if defined(CONFIG_M5272)
157 * Some hardware gets it MAC address out of local flash memory.
158 * if this is non-zero then assume it is the address to get MAC from.
160 #if defined(CONFIG_NETtel)
161 #define FEC_FLASHMAC 0xf0006006
162 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
163 #define FEC_FLASHMAC 0xf0006000
164 #elif defined(CONFIG_CANCam)
165 #define FEC_FLASHMAC 0xf0020000
166 #elif defined (CONFIG_M5272C3)
167 #define FEC_FLASHMAC (0xffe04000 + 4)
168 #elif defined(CONFIG_MOD5272)
169 #define FEC_FLASHMAC 0xffc0406b
171 #define FEC_FLASHMAC 0
173 #endif /* CONFIG_M5272 */
175 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
177 * 2048 byte skbufs are allocated. However, alignment requirements
178 * varies between FEC variants. Worst case is 64, so round down by 64.
180 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
181 #define PKT_MINBUF_SIZE 64
183 /* FEC receive acceleration */
184 #define FEC_RACC_IPDIS (1 << 1)
185 #define FEC_RACC_PRODIS (1 << 2)
186 #define FEC_RACC_SHIFT16 BIT(7)
187 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
189 /* MIB Control Register */
190 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
193 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
194 * size bits. Other FEC hardware does not, so we need to take that into
195 * account when setting it.
197 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
198 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
199 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
201 #define OPT_FRAME_SIZE 0
204 /* FEC MII MMFR bits definition */
205 #define FEC_MMFR_ST (1 << 30)
206 #define FEC_MMFR_OP_READ (2 << 28)
207 #define FEC_MMFR_OP_WRITE (1 << 28)
208 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
209 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
210 #define FEC_MMFR_TA (2 << 16)
211 #define FEC_MMFR_DATA(v) (v & 0xffff)
212 /* FEC ECR bits definition */
213 #define FEC_ECR_MAGICEN (1 << 2)
214 #define FEC_ECR_SLEEP (1 << 3)
216 #define FEC_MII_TIMEOUT 30000 /* us */
218 /* Transmitter timeout */
219 #define TX_TIMEOUT (2 * HZ)
221 #define FEC_PAUSE_FLAG_AUTONEG 0x1
222 #define FEC_PAUSE_FLAG_ENABLE 0x2
223 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
224 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
225 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
227 #define COPYBREAK_DEFAULT 256
229 /* Max number of allowed TCP segments for software TSO */
230 #define FEC_MAX_TSO_SEGS 100
231 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
233 #define IS_TSO_HEADER(txq, addr) \
234 ((addr >= txq->tso_hdrs_dma) && \
235 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
239 static struct bufdesc
*fec_enet_get_nextdesc(struct bufdesc
*bdp
,
240 struct bufdesc_prop
*bd
)
242 return (bdp
>= bd
->last
) ? bd
->base
243 : (struct bufdesc
*)(((void *)bdp
) + bd
->dsize
);
246 static struct bufdesc
*fec_enet_get_prevdesc(struct bufdesc
*bdp
,
247 struct bufdesc_prop
*bd
)
249 return (bdp
<= bd
->base
) ? bd
->last
250 : (struct bufdesc
*)(((void *)bdp
) - bd
->dsize
);
253 static int fec_enet_get_bd_index(struct bufdesc
*bdp
,
254 struct bufdesc_prop
*bd
)
256 return ((const char *)bdp
- (const char *)bd
->base
) >> bd
->dsize_log2
;
259 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q
*txq
)
263 entries
= (((const char *)txq
->dirty_tx
-
264 (const char *)txq
->bd
.cur
) >> txq
->bd
.dsize_log2
) - 1;
266 return entries
>= 0 ? entries
: entries
+ txq
->bd
.ring_size
;
269 static void swap_buffer(void *bufaddr
, int len
)
272 unsigned int *buf
= bufaddr
;
274 for (i
= 0; i
< len
; i
+= 4, buf
++)
278 static void swap_buffer2(void *dst_buf
, void *src_buf
, int len
)
281 unsigned int *src
= src_buf
;
282 unsigned int *dst
= dst_buf
;
284 for (i
= 0; i
< len
; i
+= 4, src
++, dst
++)
288 static void fec_dump(struct net_device
*ndev
)
290 struct fec_enet_private
*fep
= netdev_priv(ndev
);
292 struct fec_enet_priv_tx_q
*txq
;
295 netdev_info(ndev
, "TX ring dump\n");
296 pr_info("Nr SC addr len SKB\n");
298 txq
= fep
->tx_queue
[0];
302 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
304 bdp
== txq
->bd
.cur
? 'S' : ' ',
305 bdp
== txq
->dirty_tx
? 'H' : ' ',
306 fec16_to_cpu(bdp
->cbd_sc
),
307 fec32_to_cpu(bdp
->cbd_bufaddr
),
308 fec16_to_cpu(bdp
->cbd_datlen
),
309 txq
->tx_skbuff
[index
]);
310 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
312 } while (bdp
!= txq
->bd
.base
);
315 static inline bool is_ipv4_pkt(struct sk_buff
*skb
)
317 return skb
->protocol
== htons(ETH_P_IP
) && ip_hdr(skb
)->version
== 4;
321 fec_enet_clear_csum(struct sk_buff
*skb
, struct net_device
*ndev
)
323 /* Only run for packets requiring a checksum. */
324 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
327 if (unlikely(skb_cow_head(skb
, 0)))
330 if (is_ipv4_pkt(skb
))
331 ip_hdr(skb
)->check
= 0;
332 *(__sum16
*)(skb
->head
+ skb
->csum_start
+ skb
->csum_offset
) = 0;
337 static struct bufdesc
*
338 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q
*txq
,
340 struct net_device
*ndev
)
342 struct fec_enet_private
*fep
= netdev_priv(ndev
);
343 struct bufdesc
*bdp
= txq
->bd
.cur
;
344 struct bufdesc_ex
*ebdp
;
345 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
347 unsigned short status
;
348 unsigned int estatus
= 0;
349 skb_frag_t
*this_frag
;
355 for (frag
= 0; frag
< nr_frags
; frag
++) {
356 this_frag
= &skb_shinfo(skb
)->frags
[frag
];
357 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
358 ebdp
= (struct bufdesc_ex
*)bdp
;
360 status
= fec16_to_cpu(bdp
->cbd_sc
);
361 status
&= ~BD_ENET_TX_STATS
;
362 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
363 frag_len
= skb_shinfo(skb
)->frags
[frag
].size
;
365 /* Handle the last BD specially */
366 if (frag
== nr_frags
- 1) {
367 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
368 if (fep
->bufdesc_ex
) {
369 estatus
|= BD_ENET_TX_INT
;
370 if (unlikely(skb_shinfo(skb
)->tx_flags
&
371 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
372 estatus
|= BD_ENET_TX_TS
;
376 if (fep
->bufdesc_ex
) {
377 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
378 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
379 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
380 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
382 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
385 bufaddr
= page_address(this_frag
->page
.p
) + this_frag
->page_offset
;
387 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
388 if (((unsigned long) bufaddr
) & fep
->tx_align
||
389 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
390 memcpy(txq
->tx_bounce
[index
], bufaddr
, frag_len
);
391 bufaddr
= txq
->tx_bounce
[index
];
393 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
394 swap_buffer(bufaddr
, frag_len
);
397 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, frag_len
,
399 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
401 netdev_err(ndev
, "Tx DMA memory map failed\n");
402 goto dma_mapping_error
;
405 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
406 bdp
->cbd_datlen
= cpu_to_fec16(frag_len
);
407 /* Make sure the updates to rest of the descriptor are
408 * performed before transferring ownership.
411 bdp
->cbd_sc
= cpu_to_fec16(status
);
417 for (i
= 0; i
< frag
; i
++) {
418 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
419 dma_unmap_single(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
),
420 fec16_to_cpu(bdp
->cbd_datlen
), DMA_TO_DEVICE
);
422 return ERR_PTR(-ENOMEM
);
425 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q
*txq
,
426 struct sk_buff
*skb
, struct net_device
*ndev
)
428 struct fec_enet_private
*fep
= netdev_priv(ndev
);
429 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
430 struct bufdesc
*bdp
, *last_bdp
;
433 unsigned short status
;
434 unsigned short buflen
;
435 unsigned int estatus
= 0;
439 entries_free
= fec_enet_get_free_txdesc_num(txq
);
440 if (entries_free
< MAX_SKB_FRAGS
+ 1) {
441 dev_kfree_skb_any(skb
);
443 netdev_err(ndev
, "NOT enough BD for SG!\n");
447 /* Protocol checksum off-load for TCP and UDP. */
448 if (fec_enet_clear_csum(skb
, ndev
)) {
449 dev_kfree_skb_any(skb
);
453 /* Fill in a Tx ring entry */
456 status
= fec16_to_cpu(bdp
->cbd_sc
);
457 status
&= ~BD_ENET_TX_STATS
;
459 /* Set buffer length and buffer pointer */
461 buflen
= skb_headlen(skb
);
463 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
464 if (((unsigned long) bufaddr
) & fep
->tx_align
||
465 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
466 memcpy(txq
->tx_bounce
[index
], skb
->data
, buflen
);
467 bufaddr
= txq
->tx_bounce
[index
];
469 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
470 swap_buffer(bufaddr
, buflen
);
473 /* Push the data cache so the CPM does not get stale memory data. */
474 addr
= dma_map_single(&fep
->pdev
->dev
, bufaddr
, buflen
, DMA_TO_DEVICE
);
475 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
476 dev_kfree_skb_any(skb
);
478 netdev_err(ndev
, "Tx DMA memory map failed\n");
483 last_bdp
= fec_enet_txq_submit_frag_skb(txq
, skb
, ndev
);
484 if (IS_ERR(last_bdp
)) {
485 dma_unmap_single(&fep
->pdev
->dev
, addr
,
486 buflen
, DMA_TO_DEVICE
);
487 dev_kfree_skb_any(skb
);
491 status
|= (BD_ENET_TX_INTR
| BD_ENET_TX_LAST
);
492 if (fep
->bufdesc_ex
) {
493 estatus
= BD_ENET_TX_INT
;
494 if (unlikely(skb_shinfo(skb
)->tx_flags
&
495 SKBTX_HW_TSTAMP
&& fep
->hwts_tx_en
))
496 estatus
|= BD_ENET_TX_TS
;
499 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
500 bdp
->cbd_datlen
= cpu_to_fec16(buflen
);
502 if (fep
->bufdesc_ex
) {
504 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
506 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
508 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
510 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
511 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
513 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
514 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
517 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
520 index
= fec_enet_get_bd_index(last_bdp
, &txq
->bd
);
521 /* Save skb pointer */
522 txq
->tx_skbuff
[index
] = skb
;
524 /* Make sure the updates to rest of the descriptor are performed before
525 * transferring ownership.
529 /* Send it on its way. Tell FEC it's ready, interrupt when done,
530 * it's the last BD of the frame, and to put the CRC on the end.
532 status
|= (BD_ENET_TX_READY
| BD_ENET_TX_TC
);
533 bdp
->cbd_sc
= cpu_to_fec16(status
);
535 /* If this was the last BD in the ring, start at the beginning again. */
536 bdp
= fec_enet_get_nextdesc(last_bdp
, &txq
->bd
);
538 skb_tx_timestamp(skb
);
540 /* Make sure the update to bdp and tx_skbuff are performed before
546 /* Trigger transmission start */
547 writel(0, txq
->bd
.reg_desc_active
);
553 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q
*txq
, struct sk_buff
*skb
,
554 struct net_device
*ndev
,
555 struct bufdesc
*bdp
, int index
, char *data
,
556 int size
, bool last_tcp
, bool is_last
)
558 struct fec_enet_private
*fep
= netdev_priv(ndev
);
559 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
560 unsigned short status
;
561 unsigned int estatus
= 0;
564 status
= fec16_to_cpu(bdp
->cbd_sc
);
565 status
&= ~BD_ENET_TX_STATS
;
567 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
569 if (((unsigned long) data
) & fep
->tx_align
||
570 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
571 memcpy(txq
->tx_bounce
[index
], data
, size
);
572 data
= txq
->tx_bounce
[index
];
574 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
575 swap_buffer(data
, size
);
578 addr
= dma_map_single(&fep
->pdev
->dev
, data
, size
, DMA_TO_DEVICE
);
579 if (dma_mapping_error(&fep
->pdev
->dev
, addr
)) {
580 dev_kfree_skb_any(skb
);
582 netdev_err(ndev
, "Tx DMA memory map failed\n");
583 return NETDEV_TX_BUSY
;
586 bdp
->cbd_datlen
= cpu_to_fec16(size
);
587 bdp
->cbd_bufaddr
= cpu_to_fec32(addr
);
589 if (fep
->bufdesc_ex
) {
590 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
591 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
592 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
593 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
595 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
598 /* Handle the last BD specially */
600 status
|= (BD_ENET_TX_LAST
| BD_ENET_TX_TC
);
602 status
|= BD_ENET_TX_INTR
;
604 ebdp
->cbd_esc
|= cpu_to_fec32(BD_ENET_TX_INT
);
607 bdp
->cbd_sc
= cpu_to_fec16(status
);
613 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q
*txq
,
614 struct sk_buff
*skb
, struct net_device
*ndev
,
615 struct bufdesc
*bdp
, int index
)
617 struct fec_enet_private
*fep
= netdev_priv(ndev
);
618 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
619 struct bufdesc_ex
*ebdp
= container_of(bdp
, struct bufdesc_ex
, desc
);
621 unsigned long dmabuf
;
622 unsigned short status
;
623 unsigned int estatus
= 0;
625 status
= fec16_to_cpu(bdp
->cbd_sc
);
626 status
&= ~BD_ENET_TX_STATS
;
627 status
|= (BD_ENET_TX_TC
| BD_ENET_TX_READY
);
629 bufaddr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
630 dmabuf
= txq
->tso_hdrs_dma
+ index
* TSO_HEADER_SIZE
;
631 if (((unsigned long)bufaddr
) & fep
->tx_align
||
632 fep
->quirks
& FEC_QUIRK_SWAP_FRAME
) {
633 memcpy(txq
->tx_bounce
[index
], skb
->data
, hdr_len
);
634 bufaddr
= txq
->tx_bounce
[index
];
636 if (fep
->quirks
& FEC_QUIRK_SWAP_FRAME
)
637 swap_buffer(bufaddr
, hdr_len
);
639 dmabuf
= dma_map_single(&fep
->pdev
->dev
, bufaddr
,
640 hdr_len
, DMA_TO_DEVICE
);
641 if (dma_mapping_error(&fep
->pdev
->dev
, dmabuf
)) {
642 dev_kfree_skb_any(skb
);
644 netdev_err(ndev
, "Tx DMA memory map failed\n");
645 return NETDEV_TX_BUSY
;
649 bdp
->cbd_bufaddr
= cpu_to_fec32(dmabuf
);
650 bdp
->cbd_datlen
= cpu_to_fec16(hdr_len
);
652 if (fep
->bufdesc_ex
) {
653 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
)
654 estatus
|= FEC_TX_BD_FTYPE(txq
->bd
.qid
);
655 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
656 estatus
|= BD_ENET_TX_PINS
| BD_ENET_TX_IINS
;
658 ebdp
->cbd_esc
= cpu_to_fec32(estatus
);
661 bdp
->cbd_sc
= cpu_to_fec16(status
);
666 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q
*txq
,
668 struct net_device
*ndev
)
670 struct fec_enet_private
*fep
= netdev_priv(ndev
);
671 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
672 int total_len
, data_left
;
673 struct bufdesc
*bdp
= txq
->bd
.cur
;
675 unsigned int index
= 0;
678 if (tso_count_descs(skb
) >= fec_enet_get_free_txdesc_num(txq
)) {
679 dev_kfree_skb_any(skb
);
681 netdev_err(ndev
, "NOT enough BD for TSO!\n");
685 /* Protocol checksum off-load for TCP and UDP. */
686 if (fec_enet_clear_csum(skb
, ndev
)) {
687 dev_kfree_skb_any(skb
);
691 /* Initialize the TSO handler, and prepare the first payload */
692 tso_start(skb
, &tso
);
694 total_len
= skb
->len
- hdr_len
;
695 while (total_len
> 0) {
698 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
699 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
700 total_len
-= data_left
;
702 /* prepare packet headers: MAC + IP + TCP */
703 hdr
= txq
->tso_hdrs
+ index
* TSO_HEADER_SIZE
;
704 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
705 ret
= fec_enet_txq_put_hdr_tso(txq
, skb
, ndev
, bdp
, index
);
709 while (data_left
> 0) {
712 size
= min_t(int, tso
.size
, data_left
);
713 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
714 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
715 ret
= fec_enet_txq_put_data_tso(txq
, skb
, ndev
,
724 tso_build_data(skb
, &tso
, size
);
727 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
730 /* Save skb pointer */
731 txq
->tx_skbuff
[index
] = skb
;
733 skb_tx_timestamp(skb
);
736 /* Trigger transmission start */
737 if (!(fep
->quirks
& FEC_QUIRK_ERR007885
) ||
738 !readl(txq
->bd
.reg_desc_active
) ||
739 !readl(txq
->bd
.reg_desc_active
) ||
740 !readl(txq
->bd
.reg_desc_active
) ||
741 !readl(txq
->bd
.reg_desc_active
))
742 writel(0, txq
->bd
.reg_desc_active
);
747 /* TODO: Release all used data descriptors for TSO */
752 fec_enet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
754 struct fec_enet_private
*fep
= netdev_priv(ndev
);
756 unsigned short queue
;
757 struct fec_enet_priv_tx_q
*txq
;
758 struct netdev_queue
*nq
;
761 queue
= skb_get_queue_mapping(skb
);
762 txq
= fep
->tx_queue
[queue
];
763 nq
= netdev_get_tx_queue(ndev
, queue
);
766 ret
= fec_enet_txq_submit_tso(txq
, skb
, ndev
);
768 ret
= fec_enet_txq_submit_skb(txq
, skb
, ndev
);
772 entries_free
= fec_enet_get_free_txdesc_num(txq
);
773 if (entries_free
<= txq
->tx_stop_threshold
)
774 netif_tx_stop_queue(nq
);
779 /* Init RX & TX buffer descriptors
781 static void fec_enet_bd_init(struct net_device
*dev
)
783 struct fec_enet_private
*fep
= netdev_priv(dev
);
784 struct fec_enet_priv_tx_q
*txq
;
785 struct fec_enet_priv_rx_q
*rxq
;
790 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
791 /* Initialize the receive buffer descriptors. */
792 rxq
= fep
->rx_queue
[q
];
795 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
797 /* Initialize the BD for every fragment in the page. */
798 if (bdp
->cbd_bufaddr
)
799 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
801 bdp
->cbd_sc
= cpu_to_fec16(0);
802 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
805 /* Set the last buffer to wrap */
806 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
807 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
809 rxq
->bd
.cur
= rxq
->bd
.base
;
812 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
813 /* ...and the same for transmit */
814 txq
= fep
->tx_queue
[q
];
818 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
819 /* Initialize the BD for every fragment in the page. */
820 bdp
->cbd_sc
= cpu_to_fec16(0);
821 if (bdp
->cbd_bufaddr
&&
822 !IS_TSO_HEADER(txq
, fec32_to_cpu(bdp
->cbd_bufaddr
)))
823 dma_unmap_single(&fep
->pdev
->dev
,
824 fec32_to_cpu(bdp
->cbd_bufaddr
),
825 fec16_to_cpu(bdp
->cbd_datlen
),
827 if (txq
->tx_skbuff
[i
]) {
828 dev_kfree_skb_any(txq
->tx_skbuff
[i
]);
829 txq
->tx_skbuff
[i
] = NULL
;
831 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
832 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
835 /* Set the last buffer to wrap */
836 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
837 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
842 static void fec_enet_active_rxring(struct net_device
*ndev
)
844 struct fec_enet_private
*fep
= netdev_priv(ndev
);
847 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
848 writel(0, fep
->rx_queue
[i
]->bd
.reg_desc_active
);
851 static void fec_enet_enable_ring(struct net_device
*ndev
)
853 struct fec_enet_private
*fep
= netdev_priv(ndev
);
854 struct fec_enet_priv_tx_q
*txq
;
855 struct fec_enet_priv_rx_q
*rxq
;
858 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
859 rxq
= fep
->rx_queue
[i
];
860 writel(rxq
->bd
.dma
, fep
->hwp
+ FEC_R_DES_START(i
));
861 writel(PKT_MAXBUF_SIZE
, fep
->hwp
+ FEC_R_BUFF_SIZE(i
));
865 writel(RCMR_MATCHEN
| RCMR_CMP(i
),
866 fep
->hwp
+ FEC_RCMR(i
));
869 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
870 txq
= fep
->tx_queue
[i
];
871 writel(txq
->bd
.dma
, fep
->hwp
+ FEC_X_DES_START(i
));
875 writel(DMA_CLASS_EN
| IDLE_SLOPE(i
),
876 fep
->hwp
+ FEC_DMA_CFG(i
));
880 static void fec_enet_reset_skb(struct net_device
*ndev
)
882 struct fec_enet_private
*fep
= netdev_priv(ndev
);
883 struct fec_enet_priv_tx_q
*txq
;
886 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
887 txq
= fep
->tx_queue
[i
];
889 for (j
= 0; j
< txq
->bd
.ring_size
; j
++) {
890 if (txq
->tx_skbuff
[j
]) {
891 dev_kfree_skb_any(txq
->tx_skbuff
[j
]);
892 txq
->tx_skbuff
[j
] = NULL
;
899 * This function is called to start or restart the FEC during a link
900 * change, transmit timeout, or to reconfigure the FEC. The network
901 * packet processing for this device must be stopped before this call.
904 fec_restart(struct net_device
*ndev
)
906 struct fec_enet_private
*fep
= netdev_priv(ndev
);
909 u32 rcntl
= OPT_FRAME_SIZE
| 0x04;
910 u32 ecntl
= 0x2; /* ETHEREN */
912 /* Whack a reset. We should wait for this.
913 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
914 * instead of reset MAC itself.
916 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
917 writel(0, fep
->hwp
+ FEC_ECNTRL
);
919 writel(1, fep
->hwp
+ FEC_ECNTRL
);
924 * enet-mac reset will reset mac address registers too,
925 * so need to reconfigure it.
927 memcpy(&temp_mac
, ndev
->dev_addr
, ETH_ALEN
);
928 writel((__force u32
)cpu_to_be32(temp_mac
[0]),
929 fep
->hwp
+ FEC_ADDR_LOW
);
930 writel((__force u32
)cpu_to_be32(temp_mac
[1]),
931 fep
->hwp
+ FEC_ADDR_HIGH
);
933 /* Clear any outstanding interrupt. */
934 writel(0xffffffff, fep
->hwp
+ FEC_IEVENT
);
936 fec_enet_bd_init(ndev
);
938 fec_enet_enable_ring(ndev
);
940 /* Reset tx SKB buffers. */
941 fec_enet_reset_skb(ndev
);
943 /* Enable MII mode */
944 if (fep
->full_duplex
== DUPLEX_FULL
) {
946 writel(0x04, fep
->hwp
+ FEC_X_CNTRL
);
950 writel(0x0, fep
->hwp
+ FEC_X_CNTRL
);
954 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
956 #if !defined(CONFIG_M5272)
957 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
) {
958 val
= readl(fep
->hwp
+ FEC_RACC
);
959 /* align IP header */
960 val
|= FEC_RACC_SHIFT16
;
961 if (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)
962 /* set RX checksum */
963 val
|= FEC_RACC_OPTIONS
;
965 val
&= ~FEC_RACC_OPTIONS
;
966 writel(val
, fep
->hwp
+ FEC_RACC
);
967 writel(PKT_MAXBUF_SIZE
, fep
->hwp
+ FEC_FTRL
);
972 * The phy interface and speed need to get configured
973 * differently on enet-mac.
975 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
976 /* Enable flow control and length check */
977 rcntl
|= 0x40000000 | 0x00000020;
979 /* RGMII, RMII or MII */
980 if (fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII
||
981 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_ID
||
982 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_RXID
||
983 fep
->phy_interface
== PHY_INTERFACE_MODE_RGMII_TXID
)
985 else if (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
990 /* 1G, 100M or 10M */
992 if (ndev
->phydev
->speed
== SPEED_1000
)
994 else if (ndev
->phydev
->speed
== SPEED_100
)
1000 #ifdef FEC_MIIGSK_ENR
1001 if (fep
->quirks
& FEC_QUIRK_USE_GASKET
) {
1003 /* disable the gasket and wait */
1004 writel(0, fep
->hwp
+ FEC_MIIGSK_ENR
);
1005 while (readl(fep
->hwp
+ FEC_MIIGSK_ENR
) & 4)
1009 * configure the gasket:
1010 * RMII, 50 MHz, no loopback, no echo
1011 * MII, 25 MHz, no loopback, no echo
1013 cfgr
= (fep
->phy_interface
== PHY_INTERFACE_MODE_RMII
)
1014 ? BM_MIIGSK_CFGR_RMII
: BM_MIIGSK_CFGR_MII
;
1015 if (ndev
->phydev
&& ndev
->phydev
->speed
== SPEED_10
)
1016 cfgr
|= BM_MIIGSK_CFGR_FRCONT_10M
;
1017 writel(cfgr
, fep
->hwp
+ FEC_MIIGSK_CFGR
);
1019 /* re-enable the gasket */
1020 writel(2, fep
->hwp
+ FEC_MIIGSK_ENR
);
1025 #if !defined(CONFIG_M5272)
1026 /* enable pause frame*/
1027 if ((fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) ||
1028 ((fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) &&
1029 ndev
->phydev
&& ndev
->phydev
->pause
)) {
1030 rcntl
|= FEC_ENET_FCE
;
1032 /* set FIFO threshold parameter to reduce overrun */
1033 writel(FEC_ENET_RSEM_V
, fep
->hwp
+ FEC_R_FIFO_RSEM
);
1034 writel(FEC_ENET_RSFL_V
, fep
->hwp
+ FEC_R_FIFO_RSFL
);
1035 writel(FEC_ENET_RAEM_V
, fep
->hwp
+ FEC_R_FIFO_RAEM
);
1036 writel(FEC_ENET_RAFL_V
, fep
->hwp
+ FEC_R_FIFO_RAFL
);
1039 writel(FEC_ENET_OPD_V
, fep
->hwp
+ FEC_OPD
);
1041 rcntl
&= ~FEC_ENET_FCE
;
1043 #endif /* !defined(CONFIG_M5272) */
1045 writel(rcntl
, fep
->hwp
+ FEC_R_CNTRL
);
1047 /* Setup multicast filter. */
1048 set_multicast_list(ndev
);
1049 #ifndef CONFIG_M5272
1050 writel(0, fep
->hwp
+ FEC_HASH_TABLE_HIGH
);
1051 writel(0, fep
->hwp
+ FEC_HASH_TABLE_LOW
);
1054 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
) {
1055 /* enable ENET endian swap */
1057 /* enable ENET store and forward mode */
1058 writel(1 << 8, fep
->hwp
+ FEC_X_WMRK
);
1061 if (fep
->bufdesc_ex
)
1064 #ifndef CONFIG_M5272
1065 /* Enable the MIB statistic event counters */
1066 writel(0 << 31, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
1069 /* And last, enable the transmit and receive processing */
1070 writel(ecntl
, fep
->hwp
+ FEC_ECNTRL
);
1071 fec_enet_active_rxring(ndev
);
1073 if (fep
->bufdesc_ex
)
1074 fec_ptp_start_cyclecounter(ndev
);
1076 /* Enable interrupts we wish to service */
1078 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1080 writel(FEC_ENET_MII
, fep
->hwp
+ FEC_IMASK
);
1082 /* Init the interrupt coalescing */
1083 fec_enet_itr_coal_init(ndev
);
1088 fec_stop(struct net_device
*ndev
)
1090 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1091 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
1092 u32 rmii_mode
= readl(fep
->hwp
+ FEC_R_CNTRL
) & (1 << 8);
1095 /* We cannot expect a graceful transmit stop without link !!! */
1097 writel(1, fep
->hwp
+ FEC_X_CNTRL
); /* Graceful transmit stop */
1099 if (!(readl(fep
->hwp
+ FEC_IEVENT
) & FEC_ENET_GRA
))
1100 netdev_err(ndev
, "Graceful transmit stop did not complete!\n");
1103 /* Whack a reset. We should wait for this.
1104 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1105 * instead of reset MAC itself.
1107 if (!(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1108 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
1109 writel(0, fep
->hwp
+ FEC_ECNTRL
);
1111 writel(1, fep
->hwp
+ FEC_ECNTRL
);
1114 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1116 writel(FEC_DEFAULT_IMASK
| FEC_ENET_WAKEUP
, fep
->hwp
+ FEC_IMASK
);
1117 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
1118 val
|= (FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
1119 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
1121 if (pdata
&& pdata
->sleep_mode_enable
)
1122 pdata
->sleep_mode_enable(true);
1124 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
1126 /* We have to keep ENET enabled to have MII interrupt stay working */
1127 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
&&
1128 !(fep
->wol_flag
& FEC_WOL_FLAG_SLEEP_ON
)) {
1129 writel(2, fep
->hwp
+ FEC_ECNTRL
);
1130 writel(rmii_mode
, fep
->hwp
+ FEC_R_CNTRL
);
1136 fec_timeout(struct net_device
*ndev
)
1138 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1142 ndev
->stats
.tx_errors
++;
1144 schedule_work(&fep
->tx_timeout_work
);
1147 static void fec_enet_timeout_work(struct work_struct
*work
)
1149 struct fec_enet_private
*fep
=
1150 container_of(work
, struct fec_enet_private
, tx_timeout_work
);
1151 struct net_device
*ndev
= fep
->netdev
;
1154 if (netif_device_present(ndev
) || netif_running(ndev
)) {
1155 napi_disable(&fep
->napi
);
1156 netif_tx_lock_bh(ndev
);
1158 netif_wake_queue(ndev
);
1159 netif_tx_unlock_bh(ndev
);
1160 napi_enable(&fep
->napi
);
1166 fec_enet_hwtstamp(struct fec_enet_private
*fep
, unsigned ts
,
1167 struct skb_shared_hwtstamps
*hwtstamps
)
1169 unsigned long flags
;
1172 spin_lock_irqsave(&fep
->tmreg_lock
, flags
);
1173 ns
= timecounter_cyc2time(&fep
->tc
, ts
);
1174 spin_unlock_irqrestore(&fep
->tmreg_lock
, flags
);
1176 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
1177 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
1181 fec_enet_tx_queue(struct net_device
*ndev
, u16 queue_id
)
1183 struct fec_enet_private
*fep
;
1184 struct bufdesc
*bdp
;
1185 unsigned short status
;
1186 struct sk_buff
*skb
;
1187 struct fec_enet_priv_tx_q
*txq
;
1188 struct netdev_queue
*nq
;
1192 fep
= netdev_priv(ndev
);
1194 queue_id
= FEC_ENET_GET_QUQUE(queue_id
);
1196 txq
= fep
->tx_queue
[queue_id
];
1197 /* get next bdp of dirty_tx */
1198 nq
= netdev_get_tx_queue(ndev
, queue_id
);
1199 bdp
= txq
->dirty_tx
;
1201 /* get next bdp of dirty_tx */
1202 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1204 while (bdp
!= READ_ONCE(txq
->bd
.cur
)) {
1205 /* Order the load of bd.cur and cbd_sc */
1207 status
= fec16_to_cpu(READ_ONCE(bdp
->cbd_sc
));
1208 if (status
& BD_ENET_TX_READY
)
1211 index
= fec_enet_get_bd_index(bdp
, &txq
->bd
);
1213 skb
= txq
->tx_skbuff
[index
];
1214 txq
->tx_skbuff
[index
] = NULL
;
1215 if (!IS_TSO_HEADER(txq
, fec32_to_cpu(bdp
->cbd_bufaddr
)))
1216 dma_unmap_single(&fep
->pdev
->dev
,
1217 fec32_to_cpu(bdp
->cbd_bufaddr
),
1218 fec16_to_cpu(bdp
->cbd_datlen
),
1220 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
1224 /* Check for errors. */
1225 if (status
& (BD_ENET_TX_HB
| BD_ENET_TX_LC
|
1226 BD_ENET_TX_RL
| BD_ENET_TX_UN
|
1228 ndev
->stats
.tx_errors
++;
1229 if (status
& BD_ENET_TX_HB
) /* No heartbeat */
1230 ndev
->stats
.tx_heartbeat_errors
++;
1231 if (status
& BD_ENET_TX_LC
) /* Late collision */
1232 ndev
->stats
.tx_window_errors
++;
1233 if (status
& BD_ENET_TX_RL
) /* Retrans limit */
1234 ndev
->stats
.tx_aborted_errors
++;
1235 if (status
& BD_ENET_TX_UN
) /* Underrun */
1236 ndev
->stats
.tx_fifo_errors
++;
1237 if (status
& BD_ENET_TX_CSL
) /* Carrier lost */
1238 ndev
->stats
.tx_carrier_errors
++;
1240 ndev
->stats
.tx_packets
++;
1241 ndev
->stats
.tx_bytes
+= skb
->len
;
1244 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) &&
1246 struct skb_shared_hwtstamps shhwtstamps
;
1247 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1249 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
), &shhwtstamps
);
1250 skb_tstamp_tx(skb
, &shhwtstamps
);
1253 /* Deferred means some collisions occurred during transmit,
1254 * but we eventually sent the packet OK.
1256 if (status
& BD_ENET_TX_DEF
)
1257 ndev
->stats
.collisions
++;
1259 /* Free the sk buffer associated with this last transmit */
1260 dev_kfree_skb_any(skb
);
1262 /* Make sure the update to bdp and tx_skbuff are performed
1266 txq
->dirty_tx
= bdp
;
1268 /* Update pointer to next buffer descriptor to be transmitted */
1269 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
1271 /* Since we have freed up a buffer, the ring is no longer full
1273 if (netif_queue_stopped(ndev
)) {
1274 entries_free
= fec_enet_get_free_txdesc_num(txq
);
1275 if (entries_free
>= txq
->tx_wake_threshold
)
1276 netif_tx_wake_queue(nq
);
1280 /* ERR006358: Keep the transmitter going */
1281 if (bdp
!= txq
->bd
.cur
&&
1282 readl(txq
->bd
.reg_desc_active
) == 0)
1283 writel(0, txq
->bd
.reg_desc_active
);
1287 fec_enet_tx(struct net_device
*ndev
)
1289 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1291 /* First process class A queue, then Class B and Best Effort queue */
1292 for_each_set_bit(queue_id
, &fep
->work_tx
, FEC_ENET_MAX_TX_QS
) {
1293 clear_bit(queue_id
, &fep
->work_tx
);
1294 fec_enet_tx_queue(ndev
, queue_id
);
1300 fec_enet_new_rxbdp(struct net_device
*ndev
, struct bufdesc
*bdp
, struct sk_buff
*skb
)
1302 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1305 off
= ((unsigned long)skb
->data
) & fep
->rx_align
;
1307 skb_reserve(skb
, fep
->rx_align
+ 1 - off
);
1309 bdp
->cbd_bufaddr
= cpu_to_fec32(dma_map_single(&fep
->pdev
->dev
, skb
->data
, FEC_ENET_RX_FRSIZE
- fep
->rx_align
, DMA_FROM_DEVICE
));
1310 if (dma_mapping_error(&fep
->pdev
->dev
, fec32_to_cpu(bdp
->cbd_bufaddr
))) {
1311 if (net_ratelimit())
1312 netdev_err(ndev
, "Rx DMA memory map failed\n");
1319 static bool fec_enet_copybreak(struct net_device
*ndev
, struct sk_buff
**skb
,
1320 struct bufdesc
*bdp
, u32 length
, bool swap
)
1322 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1323 struct sk_buff
*new_skb
;
1325 if (length
> fep
->rx_copybreak
)
1328 new_skb
= netdev_alloc_skb(ndev
, length
);
1332 dma_sync_single_for_cpu(&fep
->pdev
->dev
,
1333 fec32_to_cpu(bdp
->cbd_bufaddr
),
1334 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1337 memcpy(new_skb
->data
, (*skb
)->data
, length
);
1339 swap_buffer2(new_skb
->data
, (*skb
)->data
, length
);
1345 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1346 * When we update through the ring, if the next incoming buffer has
1347 * not been given to the system, we just set the empty indicator,
1348 * effectively tossing the packet.
1351 fec_enet_rx_queue(struct net_device
*ndev
, int budget
, u16 queue_id
)
1353 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1354 struct fec_enet_priv_rx_q
*rxq
;
1355 struct bufdesc
*bdp
;
1356 unsigned short status
;
1357 struct sk_buff
*skb_new
= NULL
;
1358 struct sk_buff
*skb
;
1361 int pkt_received
= 0;
1362 struct bufdesc_ex
*ebdp
= NULL
;
1363 bool vlan_packet_rcvd
= false;
1367 bool need_swap
= fep
->quirks
& FEC_QUIRK_SWAP_FRAME
;
1372 queue_id
= FEC_ENET_GET_QUQUE(queue_id
);
1373 rxq
= fep
->rx_queue
[queue_id
];
1375 /* First, grab all of the stats for the incoming packet.
1376 * These get messed up if we get called due to a busy condition.
1380 while (!((status
= fec16_to_cpu(bdp
->cbd_sc
)) & BD_ENET_RX_EMPTY
)) {
1382 if (pkt_received
>= budget
)
1386 writel(FEC_ENET_RXF
, fep
->hwp
+ FEC_IEVENT
);
1388 /* Check for errors. */
1389 status
^= BD_ENET_RX_LAST
;
1390 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
| BD_ENET_RX_NO
|
1391 BD_ENET_RX_CR
| BD_ENET_RX_OV
| BD_ENET_RX_LAST
|
1393 ndev
->stats
.rx_errors
++;
1394 if (status
& BD_ENET_RX_OV
) {
1396 ndev
->stats
.rx_fifo_errors
++;
1397 goto rx_processing_done
;
1399 if (status
& (BD_ENET_RX_LG
| BD_ENET_RX_SH
1400 | BD_ENET_RX_LAST
)) {
1401 /* Frame too long or too short. */
1402 ndev
->stats
.rx_length_errors
++;
1403 if (status
& BD_ENET_RX_LAST
)
1404 netdev_err(ndev
, "rcv is not +last\n");
1406 if (status
& BD_ENET_RX_CR
) /* CRC Error */
1407 ndev
->stats
.rx_crc_errors
++;
1408 /* Report late collisions as a frame error. */
1409 if (status
& (BD_ENET_RX_NO
| BD_ENET_RX_CL
))
1410 ndev
->stats
.rx_frame_errors
++;
1411 goto rx_processing_done
;
1414 /* Process the incoming frame. */
1415 ndev
->stats
.rx_packets
++;
1416 pkt_len
= fec16_to_cpu(bdp
->cbd_datlen
);
1417 ndev
->stats
.rx_bytes
+= pkt_len
;
1419 index
= fec_enet_get_bd_index(bdp
, &rxq
->bd
);
1420 skb
= rxq
->rx_skbuff
[index
];
1422 /* The packet length includes FCS, but we don't want to
1423 * include that when passing upstream as it messes up
1424 * bridging applications.
1426 is_copybreak
= fec_enet_copybreak(ndev
, &skb
, bdp
, pkt_len
- 4,
1428 if (!is_copybreak
) {
1429 skb_new
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
1430 if (unlikely(!skb_new
)) {
1431 ndev
->stats
.rx_dropped
++;
1432 goto rx_processing_done
;
1434 dma_unmap_single(&fep
->pdev
->dev
,
1435 fec32_to_cpu(bdp
->cbd_bufaddr
),
1436 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1440 prefetch(skb
->data
- NET_IP_ALIGN
);
1441 skb_put(skb
, pkt_len
- 4);
1444 if (!is_copybreak
&& need_swap
)
1445 swap_buffer(data
, pkt_len
);
1447 #if !defined(CONFIG_M5272)
1448 if (fep
->quirks
& FEC_QUIRK_HAS_RACC
)
1449 data
= skb_pull_inline(skb
, 2);
1452 /* Extract the enhanced buffer descriptor */
1454 if (fep
->bufdesc_ex
)
1455 ebdp
= (struct bufdesc_ex
*)bdp
;
1457 /* If this is a VLAN packet remove the VLAN Tag */
1458 vlan_packet_rcvd
= false;
1459 if ((ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1461 (ebdp
->cbd_esc
& cpu_to_fec32(BD_ENET_RX_VLAN
))) {
1462 /* Push and remove the vlan tag */
1463 struct vlan_hdr
*vlan_header
=
1464 (struct vlan_hdr
*) (data
+ ETH_HLEN
);
1465 vlan_tag
= ntohs(vlan_header
->h_vlan_TCI
);
1467 vlan_packet_rcvd
= true;
1469 memmove(skb
->data
+ VLAN_HLEN
, data
, ETH_ALEN
* 2);
1470 skb_pull(skb
, VLAN_HLEN
);
1473 skb
->protocol
= eth_type_trans(skb
, ndev
);
1475 /* Get receive timestamp from the skb */
1476 if (fep
->hwts_rx_en
&& fep
->bufdesc_ex
)
1477 fec_enet_hwtstamp(fep
, fec32_to_cpu(ebdp
->ts
),
1478 skb_hwtstamps(skb
));
1480 if (fep
->bufdesc_ex
&&
1481 (fep
->csum_flags
& FLAG_RX_CSUM_ENABLED
)) {
1482 if (!(ebdp
->cbd_esc
& cpu_to_fec32(FLAG_RX_CSUM_ERROR
))) {
1483 /* don't check it */
1484 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1486 skb_checksum_none_assert(skb
);
1490 /* Handle received VLAN packets */
1491 if (vlan_packet_rcvd
)
1492 __vlan_hwaccel_put_tag(skb
,
1496 napi_gro_receive(&fep
->napi
, skb
);
1499 dma_sync_single_for_device(&fep
->pdev
->dev
,
1500 fec32_to_cpu(bdp
->cbd_bufaddr
),
1501 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
1504 rxq
->rx_skbuff
[index
] = skb_new
;
1505 fec_enet_new_rxbdp(ndev
, bdp
, skb_new
);
1509 /* Clear the status flags for this buffer */
1510 status
&= ~BD_ENET_RX_STATS
;
1512 /* Mark the buffer empty */
1513 status
|= BD_ENET_RX_EMPTY
;
1515 if (fep
->bufdesc_ex
) {
1516 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
1518 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
1522 /* Make sure the updates to rest of the descriptor are
1523 * performed before transferring ownership.
1526 bdp
->cbd_sc
= cpu_to_fec16(status
);
1528 /* Update BD pointer to next entry */
1529 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
1531 /* Doing this here will keep the FEC running while we process
1532 * incoming frames. On a heavily loaded network, we should be
1533 * able to keep up at the expense of system resources.
1535 writel(0, rxq
->bd
.reg_desc_active
);
1538 return pkt_received
;
1542 fec_enet_rx(struct net_device
*ndev
, int budget
)
1544 int pkt_received
= 0;
1546 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1548 for_each_set_bit(queue_id
, &fep
->work_rx
, FEC_ENET_MAX_RX_QS
) {
1551 ret
= fec_enet_rx_queue(ndev
,
1552 budget
- pkt_received
, queue_id
);
1554 if (ret
< budget
- pkt_received
)
1555 clear_bit(queue_id
, &fep
->work_rx
);
1557 pkt_received
+= ret
;
1559 return pkt_received
;
1563 fec_enet_collect_events(struct fec_enet_private
*fep
, uint int_events
)
1565 if (int_events
== 0)
1568 if (int_events
& FEC_ENET_RXF_0
)
1569 fep
->work_rx
|= (1 << 2);
1570 if (int_events
& FEC_ENET_RXF_1
)
1571 fep
->work_rx
|= (1 << 0);
1572 if (int_events
& FEC_ENET_RXF_2
)
1573 fep
->work_rx
|= (1 << 1);
1575 if (int_events
& FEC_ENET_TXF_0
)
1576 fep
->work_tx
|= (1 << 2);
1577 if (int_events
& FEC_ENET_TXF_1
)
1578 fep
->work_tx
|= (1 << 0);
1579 if (int_events
& FEC_ENET_TXF_2
)
1580 fep
->work_tx
|= (1 << 1);
1586 fec_enet_interrupt(int irq
, void *dev_id
)
1588 struct net_device
*ndev
= dev_id
;
1589 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1591 irqreturn_t ret
= IRQ_NONE
;
1593 int_events
= readl(fep
->hwp
+ FEC_IEVENT
);
1594 writel(int_events
, fep
->hwp
+ FEC_IEVENT
);
1595 fec_enet_collect_events(fep
, int_events
);
1597 if ((fep
->work_tx
|| fep
->work_rx
) && fep
->link
) {
1600 if (napi_schedule_prep(&fep
->napi
)) {
1601 /* Disable the NAPI interrupts */
1602 writel(FEC_NAPI_IMASK
, fep
->hwp
+ FEC_IMASK
);
1603 __napi_schedule(&fep
->napi
);
1607 if (int_events
& FEC_ENET_MII
) {
1609 complete(&fep
->mdio_done
);
1614 static int fec_enet_rx_napi(struct napi_struct
*napi
, int budget
)
1616 struct net_device
*ndev
= napi
->dev
;
1617 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1620 pkts
= fec_enet_rx(ndev
, budget
);
1624 if (pkts
< budget
) {
1625 napi_complete_done(napi
, pkts
);
1626 writel(FEC_DEFAULT_IMASK
, fep
->hwp
+ FEC_IMASK
);
1631 /* ------------------------------------------------------------------------- */
1632 static void fec_get_mac(struct net_device
*ndev
)
1634 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1635 struct fec_platform_data
*pdata
= dev_get_platdata(&fep
->pdev
->dev
);
1636 unsigned char *iap
, tmpaddr
[ETH_ALEN
];
1639 * try to get mac address in following order:
1641 * 1) module parameter via kernel command line in form
1642 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1647 * 2) from device tree data
1649 if (!is_valid_ether_addr(iap
)) {
1650 struct device_node
*np
= fep
->pdev
->dev
.of_node
;
1652 const char *mac
= of_get_mac_address(np
);
1654 iap
= (unsigned char *) mac
;
1659 * 3) from flash or fuse (via platform data)
1661 if (!is_valid_ether_addr(iap
)) {
1664 iap
= (unsigned char *)FEC_FLASHMAC
;
1667 iap
= (unsigned char *)&pdata
->mac
;
1672 * 4) FEC mac registers set by bootloader
1674 if (!is_valid_ether_addr(iap
)) {
1675 *((__be32
*) &tmpaddr
[0]) =
1676 cpu_to_be32(readl(fep
->hwp
+ FEC_ADDR_LOW
));
1677 *((__be16
*) &tmpaddr
[4]) =
1678 cpu_to_be16(readl(fep
->hwp
+ FEC_ADDR_HIGH
) >> 16);
1683 * 5) random mac address
1685 if (!is_valid_ether_addr(iap
)) {
1686 /* Report it and use a random ethernet address instead */
1687 netdev_err(ndev
, "Invalid MAC address: %pM\n", iap
);
1688 eth_hw_addr_random(ndev
);
1689 netdev_info(ndev
, "Using random MAC address: %pM\n",
1694 memcpy(ndev
->dev_addr
, iap
, ETH_ALEN
);
1696 /* Adjust MAC if using macaddr */
1698 ndev
->dev_addr
[ETH_ALEN
-1] = macaddr
[ETH_ALEN
-1] + fep
->dev_id
;
1701 /* ------------------------------------------------------------------------- */
1706 static void fec_enet_adjust_link(struct net_device
*ndev
)
1708 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1709 struct phy_device
*phy_dev
= ndev
->phydev
;
1710 int status_change
= 0;
1712 /* Prevent a state halted on mii error */
1713 if (fep
->mii_timeout
&& phy_dev
->state
== PHY_HALTED
) {
1714 phy_dev
->state
= PHY_RESUMING
;
1719 * If the netdev is down, or is going down, we're not interested
1720 * in link state events, so just mark our idea of the link as down
1721 * and ignore the event.
1723 if (!netif_running(ndev
) || !netif_device_present(ndev
)) {
1725 } else if (phy_dev
->link
) {
1727 fep
->link
= phy_dev
->link
;
1731 if (fep
->full_duplex
!= phy_dev
->duplex
) {
1732 fep
->full_duplex
= phy_dev
->duplex
;
1736 if (phy_dev
->speed
!= fep
->speed
) {
1737 fep
->speed
= phy_dev
->speed
;
1741 /* if any of the above changed restart the FEC */
1742 if (status_change
) {
1743 napi_disable(&fep
->napi
);
1744 netif_tx_lock_bh(ndev
);
1746 netif_wake_queue(ndev
);
1747 netif_tx_unlock_bh(ndev
);
1748 napi_enable(&fep
->napi
);
1752 napi_disable(&fep
->napi
);
1753 netif_tx_lock_bh(ndev
);
1755 netif_tx_unlock_bh(ndev
);
1756 napi_enable(&fep
->napi
);
1757 fep
->link
= phy_dev
->link
;
1763 phy_print_status(phy_dev
);
1766 static int fec_enet_mdio_read(struct mii_bus
*bus
, int mii_id
, int regnum
)
1768 struct fec_enet_private
*fep
= bus
->priv
;
1769 struct device
*dev
= &fep
->pdev
->dev
;
1770 unsigned long time_left
;
1773 ret
= pm_runtime_get_sync(dev
);
1777 fep
->mii_timeout
= 0;
1778 reinit_completion(&fep
->mdio_done
);
1780 /* start a read op */
1781 writel(FEC_MMFR_ST
| FEC_MMFR_OP_READ
|
1782 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(regnum
) |
1783 FEC_MMFR_TA
, fep
->hwp
+ FEC_MII_DATA
);
1785 /* wait for end of transfer */
1786 time_left
= wait_for_completion_timeout(&fep
->mdio_done
,
1787 usecs_to_jiffies(FEC_MII_TIMEOUT
));
1788 if (time_left
== 0) {
1789 fep
->mii_timeout
= 1;
1790 netdev_err(fep
->netdev
, "MDIO read timeout\n");
1795 ret
= FEC_MMFR_DATA(readl(fep
->hwp
+ FEC_MII_DATA
));
1798 pm_runtime_mark_last_busy(dev
);
1799 pm_runtime_put_autosuspend(dev
);
1804 static int fec_enet_mdio_write(struct mii_bus
*bus
, int mii_id
, int regnum
,
1807 struct fec_enet_private
*fep
= bus
->priv
;
1808 struct device
*dev
= &fep
->pdev
->dev
;
1809 unsigned long time_left
;
1812 ret
= pm_runtime_get_sync(dev
);
1818 fep
->mii_timeout
= 0;
1819 reinit_completion(&fep
->mdio_done
);
1821 /* start a write op */
1822 writel(FEC_MMFR_ST
| FEC_MMFR_OP_WRITE
|
1823 FEC_MMFR_PA(mii_id
) | FEC_MMFR_RA(regnum
) |
1824 FEC_MMFR_TA
| FEC_MMFR_DATA(value
),
1825 fep
->hwp
+ FEC_MII_DATA
);
1827 /* wait for end of transfer */
1828 time_left
= wait_for_completion_timeout(&fep
->mdio_done
,
1829 usecs_to_jiffies(FEC_MII_TIMEOUT
));
1830 if (time_left
== 0) {
1831 fep
->mii_timeout
= 1;
1832 netdev_err(fep
->netdev
, "MDIO write timeout\n");
1836 pm_runtime_mark_last_busy(dev
);
1837 pm_runtime_put_autosuspend(dev
);
1842 static int fec_enet_clk_enable(struct net_device
*ndev
, bool enable
)
1844 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1848 ret
= clk_prepare_enable(fep
->clk_ahb
);
1852 ret
= clk_prepare_enable(fep
->clk_enet_out
);
1854 goto failed_clk_enet_out
;
1857 mutex_lock(&fep
->ptp_clk_mutex
);
1858 ret
= clk_prepare_enable(fep
->clk_ptp
);
1860 mutex_unlock(&fep
->ptp_clk_mutex
);
1861 goto failed_clk_ptp
;
1863 fep
->ptp_clk_on
= true;
1865 mutex_unlock(&fep
->ptp_clk_mutex
);
1868 ret
= clk_prepare_enable(fep
->clk_ref
);
1870 goto failed_clk_ref
;
1872 clk_disable_unprepare(fep
->clk_ahb
);
1873 clk_disable_unprepare(fep
->clk_enet_out
);
1875 mutex_lock(&fep
->ptp_clk_mutex
);
1876 clk_disable_unprepare(fep
->clk_ptp
);
1877 fep
->ptp_clk_on
= false;
1878 mutex_unlock(&fep
->ptp_clk_mutex
);
1880 clk_disable_unprepare(fep
->clk_ref
);
1887 clk_disable_unprepare(fep
->clk_ref
);
1889 if (fep
->clk_enet_out
)
1890 clk_disable_unprepare(fep
->clk_enet_out
);
1891 failed_clk_enet_out
:
1892 clk_disable_unprepare(fep
->clk_ahb
);
1897 static int fec_enet_mii_probe(struct net_device
*ndev
)
1899 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1900 struct phy_device
*phy_dev
= NULL
;
1901 char mdio_bus_id
[MII_BUS_ID_SIZE
];
1902 char phy_name
[MII_BUS_ID_SIZE
+ 3];
1904 int dev_id
= fep
->dev_id
;
1906 if (fep
->phy_node
) {
1907 phy_dev
= of_phy_connect(ndev
, fep
->phy_node
,
1908 &fec_enet_adjust_link
, 0,
1909 fep
->phy_interface
);
1911 netdev_err(ndev
, "Unable to connect to phy\n");
1915 /* check for attached phy */
1916 for (phy_id
= 0; (phy_id
< PHY_MAX_ADDR
); phy_id
++) {
1917 if (!mdiobus_is_registered_device(fep
->mii_bus
, phy_id
))
1921 strlcpy(mdio_bus_id
, fep
->mii_bus
->id
, MII_BUS_ID_SIZE
);
1925 if (phy_id
>= PHY_MAX_ADDR
) {
1926 netdev_info(ndev
, "no PHY, assuming direct connection to switch\n");
1927 strlcpy(mdio_bus_id
, "fixed-0", MII_BUS_ID_SIZE
);
1931 snprintf(phy_name
, sizeof(phy_name
),
1932 PHY_ID_FMT
, mdio_bus_id
, phy_id
);
1933 phy_dev
= phy_connect(ndev
, phy_name
, &fec_enet_adjust_link
,
1934 fep
->phy_interface
);
1937 if (IS_ERR(phy_dev
)) {
1938 netdev_err(ndev
, "could not attach to PHY\n");
1939 return PTR_ERR(phy_dev
);
1942 /* mask with MAC supported features */
1943 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
) {
1944 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
1945 phy_dev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1946 #if !defined(CONFIG_M5272)
1947 phy_dev
->supported
|= SUPPORTED_Pause
;
1951 phy_dev
->supported
&= PHY_BASIC_FEATURES
;
1953 phy_dev
->advertising
= phy_dev
->supported
;
1956 fep
->full_duplex
= 0;
1958 phy_attached_info(phy_dev
);
1963 static int fec_enet_mii_init(struct platform_device
*pdev
)
1965 static struct mii_bus
*fec0_mii_bus
;
1966 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1967 struct fec_enet_private
*fep
= netdev_priv(ndev
);
1968 struct device_node
*node
;
1970 u32 mii_speed
, holdtime
;
1973 * The i.MX28 dual fec interfaces are not equal.
1974 * Here are the differences:
1976 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1977 * - fec0 acts as the 1588 time master while fec1 is slave
1978 * - external phys can only be configured by fec0
1980 * That is to say fec1 can not work independently. It only works
1981 * when fec0 is working. The reason behind this design is that the
1982 * second interface is added primarily for Switch mode.
1984 * Because of the last point above, both phys are attached on fec0
1985 * mdio interface in board design, and need to be configured by
1988 if ((fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
) && fep
->dev_id
> 0) {
1989 /* fec1 uses fec0 mii_bus */
1990 if (mii_cnt
&& fec0_mii_bus
) {
1991 fep
->mii_bus
= fec0_mii_bus
;
1998 fep
->mii_timeout
= 0;
2001 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
2003 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2004 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2005 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2008 mii_speed
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 5000000);
2009 if (fep
->quirks
& FEC_QUIRK_ENET_MAC
)
2011 if (mii_speed
> 63) {
2013 "fec clock (%lu) too fast to get right mii speed\n",
2014 clk_get_rate(fep
->clk_ipg
));
2020 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2021 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2022 * versions are RAZ there, so just ignore the difference and write the
2024 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2025 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2027 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2028 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2029 * holdtime cannot result in a value greater than 3.
2031 holdtime
= DIV_ROUND_UP(clk_get_rate(fep
->clk_ipg
), 100000000) - 1;
2033 fep
->phy_speed
= mii_speed
<< 1 | holdtime
<< 8;
2035 writel(fep
->phy_speed
, fep
->hwp
+ FEC_MII_SPEED
);
2037 fep
->mii_bus
= mdiobus_alloc();
2038 if (fep
->mii_bus
== NULL
) {
2043 fep
->mii_bus
->name
= "fec_enet_mii_bus";
2044 fep
->mii_bus
->read
= fec_enet_mdio_read
;
2045 fep
->mii_bus
->write
= fec_enet_mdio_write
;
2046 snprintf(fep
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
2047 pdev
->name
, fep
->dev_id
+ 1);
2048 fep
->mii_bus
->priv
= fep
;
2049 fep
->mii_bus
->parent
= &pdev
->dev
;
2051 node
= of_get_child_by_name(pdev
->dev
.of_node
, "mdio");
2053 err
= of_mdiobus_register(fep
->mii_bus
, node
);
2056 err
= mdiobus_register(fep
->mii_bus
);
2060 goto err_out_free_mdiobus
;
2064 /* save fec0 mii_bus */
2065 if (fep
->quirks
& FEC_QUIRK_SINGLE_MDIO
)
2066 fec0_mii_bus
= fep
->mii_bus
;
2070 err_out_free_mdiobus
:
2071 mdiobus_free(fep
->mii_bus
);
2076 static void fec_enet_mii_remove(struct fec_enet_private
*fep
)
2078 if (--mii_cnt
== 0) {
2079 mdiobus_unregister(fep
->mii_bus
);
2080 mdiobus_free(fep
->mii_bus
);
2084 static void fec_enet_get_drvinfo(struct net_device
*ndev
,
2085 struct ethtool_drvinfo
*info
)
2087 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2089 strlcpy(info
->driver
, fep
->pdev
->dev
.driver
->name
,
2090 sizeof(info
->driver
));
2091 strlcpy(info
->version
, "Revision: 1.0", sizeof(info
->version
));
2092 strlcpy(info
->bus_info
, dev_name(&ndev
->dev
), sizeof(info
->bus_info
));
2095 static int fec_enet_get_regs_len(struct net_device
*ndev
)
2097 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2101 r
= platform_get_resource(fep
->pdev
, IORESOURCE_MEM
, 0);
2103 s
= resource_size(r
);
2108 /* List of registers that can be safety be read to dump them with ethtool */
2109 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2110 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2111 static u32 fec_enet_register_offset
[] = {
2112 FEC_IEVENT
, FEC_IMASK
, FEC_R_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_0
,
2113 FEC_ECNTRL
, FEC_MII_DATA
, FEC_MII_SPEED
, FEC_MIB_CTRLSTAT
, FEC_R_CNTRL
,
2114 FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
, FEC_OPD
, FEC_TXIC0
, FEC_TXIC1
,
2115 FEC_TXIC2
, FEC_RXIC0
, FEC_RXIC1
, FEC_RXIC2
, FEC_HASH_TABLE_HIGH
,
2116 FEC_HASH_TABLE_LOW
, FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
,
2117 FEC_X_WMRK
, FEC_R_BOUND
, FEC_R_FSTART
, FEC_R_DES_START_1
,
2118 FEC_X_DES_START_1
, FEC_R_BUFF_SIZE_1
, FEC_R_DES_START_2
,
2119 FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_2
, FEC_R_DES_START_0
,
2120 FEC_X_DES_START_0
, FEC_R_BUFF_SIZE_0
, FEC_R_FIFO_RSFL
, FEC_R_FIFO_RSEM
,
2121 FEC_R_FIFO_RAEM
, FEC_R_FIFO_RAFL
, FEC_RACC
, FEC_RCMR_1
, FEC_RCMR_2
,
2122 FEC_DMA_CFG_1
, FEC_DMA_CFG_2
, FEC_R_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_1
,
2123 FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_2
, FEC_QOS_SCHEME
,
2124 RMON_T_DROP
, RMON_T_PACKETS
, RMON_T_BC_PKT
, RMON_T_MC_PKT
,
2125 RMON_T_CRC_ALIGN
, RMON_T_UNDERSIZE
, RMON_T_OVERSIZE
, RMON_T_FRAG
,
2126 RMON_T_JAB
, RMON_T_COL
, RMON_T_P64
, RMON_T_P65TO127
, RMON_T_P128TO255
,
2127 RMON_T_P256TO511
, RMON_T_P512TO1023
, RMON_T_P1024TO2047
,
2128 RMON_T_P_GTE2048
, RMON_T_OCTETS
,
2129 IEEE_T_DROP
, IEEE_T_FRAME_OK
, IEEE_T_1COL
, IEEE_T_MCOL
, IEEE_T_DEF
,
2130 IEEE_T_LCOL
, IEEE_T_EXCOL
, IEEE_T_MACERR
, IEEE_T_CSERR
, IEEE_T_SQE
,
2131 IEEE_T_FDXFC
, IEEE_T_OCTETS_OK
,
2132 RMON_R_PACKETS
, RMON_R_BC_PKT
, RMON_R_MC_PKT
, RMON_R_CRC_ALIGN
,
2133 RMON_R_UNDERSIZE
, RMON_R_OVERSIZE
, RMON_R_FRAG
, RMON_R_JAB
,
2134 RMON_R_RESVD_O
, RMON_R_P64
, RMON_R_P65TO127
, RMON_R_P128TO255
,
2135 RMON_R_P256TO511
, RMON_R_P512TO1023
, RMON_R_P1024TO2047
,
2136 RMON_R_P_GTE2048
, RMON_R_OCTETS
,
2137 IEEE_R_DROP
, IEEE_R_FRAME_OK
, IEEE_R_CRC
, IEEE_R_ALIGN
, IEEE_R_MACERR
,
2138 IEEE_R_FDXFC
, IEEE_R_OCTETS_OK
2141 static u32 fec_enet_register_offset
[] = {
2142 FEC_ECNTRL
, FEC_IEVENT
, FEC_IMASK
, FEC_IVEC
, FEC_R_DES_ACTIVE_0
,
2143 FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
, FEC_X_DES_ACTIVE_0
,
2144 FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
, FEC_MII_DATA
, FEC_MII_SPEED
,
2145 FEC_R_BOUND
, FEC_R_FSTART
, FEC_X_WMRK
, FEC_X_FSTART
, FEC_R_CNTRL
,
2146 FEC_MAX_FRM_LEN
, FEC_X_CNTRL
, FEC_ADDR_LOW
, FEC_ADDR_HIGH
,
2147 FEC_GRP_HASH_TABLE_HIGH
, FEC_GRP_HASH_TABLE_LOW
, FEC_R_DES_START_0
,
2148 FEC_R_DES_START_1
, FEC_R_DES_START_2
, FEC_X_DES_START_0
,
2149 FEC_X_DES_START_1
, FEC_X_DES_START_2
, FEC_R_BUFF_SIZE_0
,
2150 FEC_R_BUFF_SIZE_1
, FEC_R_BUFF_SIZE_2
2154 static void fec_enet_get_regs(struct net_device
*ndev
,
2155 struct ethtool_regs
*regs
, void *regbuf
)
2157 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2158 u32 __iomem
*theregs
= (u32 __iomem
*)fep
->hwp
;
2159 u32
*buf
= (u32
*)regbuf
;
2162 memset(buf
, 0, regs
->len
);
2164 for (i
= 0; i
< ARRAY_SIZE(fec_enet_register_offset
); i
++) {
2165 off
= fec_enet_register_offset
[i
] / 4;
2166 buf
[off
] = readl(&theregs
[off
]);
2170 static int fec_enet_get_ts_info(struct net_device
*ndev
,
2171 struct ethtool_ts_info
*info
)
2173 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2175 if (fep
->bufdesc_ex
) {
2177 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
2178 SOF_TIMESTAMPING_RX_SOFTWARE
|
2179 SOF_TIMESTAMPING_SOFTWARE
|
2180 SOF_TIMESTAMPING_TX_HARDWARE
|
2181 SOF_TIMESTAMPING_RX_HARDWARE
|
2182 SOF_TIMESTAMPING_RAW_HARDWARE
;
2184 info
->phc_index
= ptp_clock_index(fep
->ptp_clock
);
2186 info
->phc_index
= -1;
2188 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
2189 (1 << HWTSTAMP_TX_ON
);
2191 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
2192 (1 << HWTSTAMP_FILTER_ALL
);
2195 return ethtool_op_get_ts_info(ndev
, info
);
2199 #if !defined(CONFIG_M5272)
2201 static void fec_enet_get_pauseparam(struct net_device
*ndev
,
2202 struct ethtool_pauseparam
*pause
)
2204 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2206 pause
->autoneg
= (fep
->pause_flag
& FEC_PAUSE_FLAG_AUTONEG
) != 0;
2207 pause
->tx_pause
= (fep
->pause_flag
& FEC_PAUSE_FLAG_ENABLE
) != 0;
2208 pause
->rx_pause
= pause
->tx_pause
;
2211 static int fec_enet_set_pauseparam(struct net_device
*ndev
,
2212 struct ethtool_pauseparam
*pause
)
2214 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2219 if (pause
->tx_pause
!= pause
->rx_pause
) {
2221 "hardware only support enable/disable both tx and rx");
2225 fep
->pause_flag
= 0;
2227 /* tx pause must be same as rx pause */
2228 fep
->pause_flag
|= pause
->rx_pause
? FEC_PAUSE_FLAG_ENABLE
: 0;
2229 fep
->pause_flag
|= pause
->autoneg
? FEC_PAUSE_FLAG_AUTONEG
: 0;
2231 if (pause
->rx_pause
|| pause
->autoneg
) {
2232 ndev
->phydev
->supported
|= ADVERTISED_Pause
;
2233 ndev
->phydev
->advertising
|= ADVERTISED_Pause
;
2235 ndev
->phydev
->supported
&= ~ADVERTISED_Pause
;
2236 ndev
->phydev
->advertising
&= ~ADVERTISED_Pause
;
2239 if (pause
->autoneg
) {
2240 if (netif_running(ndev
))
2242 phy_start_aneg(ndev
->phydev
);
2244 if (netif_running(ndev
)) {
2245 napi_disable(&fep
->napi
);
2246 netif_tx_lock_bh(ndev
);
2248 netif_wake_queue(ndev
);
2249 netif_tx_unlock_bh(ndev
);
2250 napi_enable(&fep
->napi
);
2256 static const struct fec_stat
{
2257 char name
[ETH_GSTRING_LEN
];
2261 { "tx_dropped", RMON_T_DROP
},
2262 { "tx_packets", RMON_T_PACKETS
},
2263 { "tx_broadcast", RMON_T_BC_PKT
},
2264 { "tx_multicast", RMON_T_MC_PKT
},
2265 { "tx_crc_errors", RMON_T_CRC_ALIGN
},
2266 { "tx_undersize", RMON_T_UNDERSIZE
},
2267 { "tx_oversize", RMON_T_OVERSIZE
},
2268 { "tx_fragment", RMON_T_FRAG
},
2269 { "tx_jabber", RMON_T_JAB
},
2270 { "tx_collision", RMON_T_COL
},
2271 { "tx_64byte", RMON_T_P64
},
2272 { "tx_65to127byte", RMON_T_P65TO127
},
2273 { "tx_128to255byte", RMON_T_P128TO255
},
2274 { "tx_256to511byte", RMON_T_P256TO511
},
2275 { "tx_512to1023byte", RMON_T_P512TO1023
},
2276 { "tx_1024to2047byte", RMON_T_P1024TO2047
},
2277 { "tx_GTE2048byte", RMON_T_P_GTE2048
},
2278 { "tx_octets", RMON_T_OCTETS
},
2281 { "IEEE_tx_drop", IEEE_T_DROP
},
2282 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK
},
2283 { "IEEE_tx_1col", IEEE_T_1COL
},
2284 { "IEEE_tx_mcol", IEEE_T_MCOL
},
2285 { "IEEE_tx_def", IEEE_T_DEF
},
2286 { "IEEE_tx_lcol", IEEE_T_LCOL
},
2287 { "IEEE_tx_excol", IEEE_T_EXCOL
},
2288 { "IEEE_tx_macerr", IEEE_T_MACERR
},
2289 { "IEEE_tx_cserr", IEEE_T_CSERR
},
2290 { "IEEE_tx_sqe", IEEE_T_SQE
},
2291 { "IEEE_tx_fdxfc", IEEE_T_FDXFC
},
2292 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK
},
2295 { "rx_packets", RMON_R_PACKETS
},
2296 { "rx_broadcast", RMON_R_BC_PKT
},
2297 { "rx_multicast", RMON_R_MC_PKT
},
2298 { "rx_crc_errors", RMON_R_CRC_ALIGN
},
2299 { "rx_undersize", RMON_R_UNDERSIZE
},
2300 { "rx_oversize", RMON_R_OVERSIZE
},
2301 { "rx_fragment", RMON_R_FRAG
},
2302 { "rx_jabber", RMON_R_JAB
},
2303 { "rx_64byte", RMON_R_P64
},
2304 { "rx_65to127byte", RMON_R_P65TO127
},
2305 { "rx_128to255byte", RMON_R_P128TO255
},
2306 { "rx_256to511byte", RMON_R_P256TO511
},
2307 { "rx_512to1023byte", RMON_R_P512TO1023
},
2308 { "rx_1024to2047byte", RMON_R_P1024TO2047
},
2309 { "rx_GTE2048byte", RMON_R_P_GTE2048
},
2310 { "rx_octets", RMON_R_OCTETS
},
2313 { "IEEE_rx_drop", IEEE_R_DROP
},
2314 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK
},
2315 { "IEEE_rx_crc", IEEE_R_CRC
},
2316 { "IEEE_rx_align", IEEE_R_ALIGN
},
2317 { "IEEE_rx_macerr", IEEE_R_MACERR
},
2318 { "IEEE_rx_fdxfc", IEEE_R_FDXFC
},
2319 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK
},
2322 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2324 static void fec_enet_update_ethtool_stats(struct net_device
*dev
)
2326 struct fec_enet_private
*fep
= netdev_priv(dev
);
2329 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2330 fep
->ethtool_stats
[i
] = readl(fep
->hwp
+ fec_stats
[i
].offset
);
2333 static void fec_enet_get_ethtool_stats(struct net_device
*dev
,
2334 struct ethtool_stats
*stats
, u64
*data
)
2336 struct fec_enet_private
*fep
= netdev_priv(dev
);
2338 if (netif_running(dev
))
2339 fec_enet_update_ethtool_stats(dev
);
2341 memcpy(data
, fep
->ethtool_stats
, FEC_STATS_SIZE
);
2344 static void fec_enet_get_strings(struct net_device
*netdev
,
2345 u32 stringset
, u8
*data
)
2348 switch (stringset
) {
2350 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2351 memcpy(data
+ i
* ETH_GSTRING_LEN
,
2352 fec_stats
[i
].name
, ETH_GSTRING_LEN
);
2357 static int fec_enet_get_sset_count(struct net_device
*dev
, int sset
)
2361 return ARRAY_SIZE(fec_stats
);
2367 static void fec_enet_clear_ethtool_stats(struct net_device
*dev
)
2369 struct fec_enet_private
*fep
= netdev_priv(dev
);
2372 /* Disable MIB statistics counters */
2373 writel(FEC_MIB_CTRLSTAT_DISABLE
, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
2375 for (i
= 0; i
< ARRAY_SIZE(fec_stats
); i
++)
2376 writel(0, fep
->hwp
+ fec_stats
[i
].offset
);
2378 /* Don't disable MIB statistics counters */
2379 writel(0, fep
->hwp
+ FEC_MIB_CTRLSTAT
);
2382 #else /* !defined(CONFIG_M5272) */
2383 #define FEC_STATS_SIZE 0
2384 static inline void fec_enet_update_ethtool_stats(struct net_device
*dev
)
2388 static inline void fec_enet_clear_ethtool_stats(struct net_device
*dev
)
2391 #endif /* !defined(CONFIG_M5272) */
2393 /* ITR clock source is enet system clock (clk_ahb).
2394 * TCTT unit is cycle_ns * 64 cycle
2395 * So, the ICTT value = X us / (cycle_ns * 64)
2397 static int fec_enet_us_to_itr_clock(struct net_device
*ndev
, int us
)
2399 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2401 return us
* (fep
->itr_clk_rate
/ 64000) / 1000;
2404 /* Set threshold for interrupt coalescing */
2405 static void fec_enet_itr_coal_set(struct net_device
*ndev
)
2407 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2410 /* Must be greater than zero to avoid unpredictable behavior */
2411 if (!fep
->rx_time_itr
|| !fep
->rx_pkts_itr
||
2412 !fep
->tx_time_itr
|| !fep
->tx_pkts_itr
)
2415 /* Select enet system clock as Interrupt Coalescing
2416 * timer Clock Source
2418 rx_itr
= FEC_ITR_CLK_SEL
;
2419 tx_itr
= FEC_ITR_CLK_SEL
;
2421 /* set ICFT and ICTT */
2422 rx_itr
|= FEC_ITR_ICFT(fep
->rx_pkts_itr
);
2423 rx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
));
2424 tx_itr
|= FEC_ITR_ICFT(fep
->tx_pkts_itr
);
2425 tx_itr
|= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
));
2427 rx_itr
|= FEC_ITR_EN
;
2428 tx_itr
|= FEC_ITR_EN
;
2430 writel(tx_itr
, fep
->hwp
+ FEC_TXIC0
);
2431 writel(rx_itr
, fep
->hwp
+ FEC_RXIC0
);
2432 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
2433 writel(tx_itr
, fep
->hwp
+ FEC_TXIC1
);
2434 writel(rx_itr
, fep
->hwp
+ FEC_RXIC1
);
2435 writel(tx_itr
, fep
->hwp
+ FEC_TXIC2
);
2436 writel(rx_itr
, fep
->hwp
+ FEC_RXIC2
);
2441 fec_enet_get_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2443 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2445 if (!(fep
->quirks
& FEC_QUIRK_HAS_COALESCE
))
2448 ec
->rx_coalesce_usecs
= fep
->rx_time_itr
;
2449 ec
->rx_max_coalesced_frames
= fep
->rx_pkts_itr
;
2451 ec
->tx_coalesce_usecs
= fep
->tx_time_itr
;
2452 ec
->tx_max_coalesced_frames
= fep
->tx_pkts_itr
;
2458 fec_enet_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*ec
)
2460 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2463 if (!(fep
->quirks
& FEC_QUIRK_HAS_COALESCE
))
2466 if (ec
->rx_max_coalesced_frames
> 255) {
2467 pr_err("Rx coalesced frames exceed hardware limitation\n");
2471 if (ec
->tx_max_coalesced_frames
> 255) {
2472 pr_err("Tx coalesced frame exceed hardware limitation\n");
2476 cycle
= fec_enet_us_to_itr_clock(ndev
, fep
->rx_time_itr
);
2477 if (cycle
> 0xFFFF) {
2478 pr_err("Rx coalesced usec exceed hardware limitation\n");
2482 cycle
= fec_enet_us_to_itr_clock(ndev
, fep
->tx_time_itr
);
2483 if (cycle
> 0xFFFF) {
2484 pr_err("Rx coalesced usec exceed hardware limitation\n");
2488 fep
->rx_time_itr
= ec
->rx_coalesce_usecs
;
2489 fep
->rx_pkts_itr
= ec
->rx_max_coalesced_frames
;
2491 fep
->tx_time_itr
= ec
->tx_coalesce_usecs
;
2492 fep
->tx_pkts_itr
= ec
->tx_max_coalesced_frames
;
2494 fec_enet_itr_coal_set(ndev
);
2499 static void fec_enet_itr_coal_init(struct net_device
*ndev
)
2501 struct ethtool_coalesce ec
;
2503 ec
.rx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2504 ec
.rx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2506 ec
.tx_coalesce_usecs
= FEC_ITR_ICTT_DEFAULT
;
2507 ec
.tx_max_coalesced_frames
= FEC_ITR_ICFT_DEFAULT
;
2509 fec_enet_set_coalesce(ndev
, &ec
);
2512 static int fec_enet_get_tunable(struct net_device
*netdev
,
2513 const struct ethtool_tunable
*tuna
,
2516 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2520 case ETHTOOL_RX_COPYBREAK
:
2521 *(u32
*)data
= fep
->rx_copybreak
;
2531 static int fec_enet_set_tunable(struct net_device
*netdev
,
2532 const struct ethtool_tunable
*tuna
,
2535 struct fec_enet_private
*fep
= netdev_priv(netdev
);
2539 case ETHTOOL_RX_COPYBREAK
:
2540 fep
->rx_copybreak
= *(u32
*)data
;
2551 fec_enet_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2553 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2555 if (fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
) {
2556 wol
->supported
= WAKE_MAGIC
;
2557 wol
->wolopts
= fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
? WAKE_MAGIC
: 0;
2559 wol
->supported
= wol
->wolopts
= 0;
2564 fec_enet_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
2566 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2568 if (!(fep
->wol_flag
& FEC_WOL_HAS_MAGIC_PACKET
))
2571 if (wol
->wolopts
& ~WAKE_MAGIC
)
2574 device_set_wakeup_enable(&ndev
->dev
, wol
->wolopts
& WAKE_MAGIC
);
2575 if (device_may_wakeup(&ndev
->dev
)) {
2576 fep
->wol_flag
|= FEC_WOL_FLAG_ENABLE
;
2577 if (fep
->irq
[0] > 0)
2578 enable_irq_wake(fep
->irq
[0]);
2580 fep
->wol_flag
&= (~FEC_WOL_FLAG_ENABLE
);
2581 if (fep
->irq
[0] > 0)
2582 disable_irq_wake(fep
->irq
[0]);
2588 static const struct ethtool_ops fec_enet_ethtool_ops
= {
2589 .get_drvinfo
= fec_enet_get_drvinfo
,
2590 .get_regs_len
= fec_enet_get_regs_len
,
2591 .get_regs
= fec_enet_get_regs
,
2592 .nway_reset
= phy_ethtool_nway_reset
,
2593 .get_link
= ethtool_op_get_link
,
2594 .get_coalesce
= fec_enet_get_coalesce
,
2595 .set_coalesce
= fec_enet_set_coalesce
,
2596 #ifndef CONFIG_M5272
2597 .get_pauseparam
= fec_enet_get_pauseparam
,
2598 .set_pauseparam
= fec_enet_set_pauseparam
,
2599 .get_strings
= fec_enet_get_strings
,
2600 .get_ethtool_stats
= fec_enet_get_ethtool_stats
,
2601 .get_sset_count
= fec_enet_get_sset_count
,
2603 .get_ts_info
= fec_enet_get_ts_info
,
2604 .get_tunable
= fec_enet_get_tunable
,
2605 .set_tunable
= fec_enet_set_tunable
,
2606 .get_wol
= fec_enet_get_wol
,
2607 .set_wol
= fec_enet_set_wol
,
2608 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2609 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2612 static int fec_enet_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
2614 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2615 struct phy_device
*phydev
= ndev
->phydev
;
2617 if (!netif_running(ndev
))
2623 if (fep
->bufdesc_ex
) {
2624 if (cmd
== SIOCSHWTSTAMP
)
2625 return fec_ptp_set(ndev
, rq
);
2626 if (cmd
== SIOCGHWTSTAMP
)
2627 return fec_ptp_get(ndev
, rq
);
2630 return phy_mii_ioctl(phydev
, rq
, cmd
);
2633 static void fec_enet_free_buffers(struct net_device
*ndev
)
2635 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2637 struct sk_buff
*skb
;
2638 struct bufdesc
*bdp
;
2639 struct fec_enet_priv_tx_q
*txq
;
2640 struct fec_enet_priv_rx_q
*rxq
;
2643 for (q
= 0; q
< fep
->num_rx_queues
; q
++) {
2644 rxq
= fep
->rx_queue
[q
];
2646 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2647 skb
= rxq
->rx_skbuff
[i
];
2648 rxq
->rx_skbuff
[i
] = NULL
;
2650 dma_unmap_single(&fep
->pdev
->dev
,
2651 fec32_to_cpu(bdp
->cbd_bufaddr
),
2652 FEC_ENET_RX_FRSIZE
- fep
->rx_align
,
2656 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2660 for (q
= 0; q
< fep
->num_tx_queues
; q
++) {
2661 txq
= fep
->tx_queue
[q
];
2663 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2664 kfree(txq
->tx_bounce
[i
]);
2665 txq
->tx_bounce
[i
] = NULL
;
2666 skb
= txq
->tx_skbuff
[i
];
2667 txq
->tx_skbuff
[i
] = NULL
;
2673 static void fec_enet_free_queue(struct net_device
*ndev
)
2675 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2677 struct fec_enet_priv_tx_q
*txq
;
2679 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2680 if (fep
->tx_queue
[i
] && fep
->tx_queue
[i
]->tso_hdrs
) {
2681 txq
= fep
->tx_queue
[i
];
2682 dma_free_coherent(&fep
->pdev
->dev
,
2683 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2688 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2689 kfree(fep
->rx_queue
[i
]);
2690 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2691 kfree(fep
->tx_queue
[i
]);
2694 static int fec_enet_alloc_queue(struct net_device
*ndev
)
2696 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2699 struct fec_enet_priv_tx_q
*txq
;
2701 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
2702 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
2708 fep
->tx_queue
[i
] = txq
;
2709 txq
->bd
.ring_size
= TX_RING_SIZE
;
2710 fep
->total_tx_ring_size
+= fep
->tx_queue
[i
]->bd
.ring_size
;
2712 txq
->tx_stop_threshold
= FEC_MAX_SKB_DESCS
;
2713 txq
->tx_wake_threshold
=
2714 (txq
->bd
.ring_size
- txq
->tx_stop_threshold
) / 2;
2716 txq
->tso_hdrs
= dma_alloc_coherent(&fep
->pdev
->dev
,
2717 txq
->bd
.ring_size
* TSO_HEADER_SIZE
,
2720 if (!txq
->tso_hdrs
) {
2726 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
2727 fep
->rx_queue
[i
] = kzalloc(sizeof(*fep
->rx_queue
[i
]),
2729 if (!fep
->rx_queue
[i
]) {
2734 fep
->rx_queue
[i
]->bd
.ring_size
= RX_RING_SIZE
;
2735 fep
->total_rx_ring_size
+= fep
->rx_queue
[i
]->bd
.ring_size
;
2740 fec_enet_free_queue(ndev
);
2745 fec_enet_alloc_rxq_buffers(struct net_device
*ndev
, unsigned int queue
)
2747 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2749 struct sk_buff
*skb
;
2750 struct bufdesc
*bdp
;
2751 struct fec_enet_priv_rx_q
*rxq
;
2753 rxq
= fep
->rx_queue
[queue
];
2755 for (i
= 0; i
< rxq
->bd
.ring_size
; i
++) {
2756 skb
= netdev_alloc_skb(ndev
, FEC_ENET_RX_FRSIZE
);
2760 if (fec_enet_new_rxbdp(ndev
, bdp
, skb
)) {
2765 rxq
->rx_skbuff
[i
] = skb
;
2766 bdp
->cbd_sc
= cpu_to_fec16(BD_ENET_RX_EMPTY
);
2768 if (fep
->bufdesc_ex
) {
2769 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2770 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_RX_INT
);
2773 bdp
= fec_enet_get_nextdesc(bdp
, &rxq
->bd
);
2776 /* Set the last buffer to wrap. */
2777 bdp
= fec_enet_get_prevdesc(bdp
, &rxq
->bd
);
2778 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2782 fec_enet_free_buffers(ndev
);
2787 fec_enet_alloc_txq_buffers(struct net_device
*ndev
, unsigned int queue
)
2789 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2791 struct bufdesc
*bdp
;
2792 struct fec_enet_priv_tx_q
*txq
;
2794 txq
= fep
->tx_queue
[queue
];
2796 for (i
= 0; i
< txq
->bd
.ring_size
; i
++) {
2797 txq
->tx_bounce
[i
] = kmalloc(FEC_ENET_TX_FRSIZE
, GFP_KERNEL
);
2798 if (!txq
->tx_bounce
[i
])
2801 bdp
->cbd_sc
= cpu_to_fec16(0);
2802 bdp
->cbd_bufaddr
= cpu_to_fec32(0);
2804 if (fep
->bufdesc_ex
) {
2805 struct bufdesc_ex
*ebdp
= (struct bufdesc_ex
*)bdp
;
2806 ebdp
->cbd_esc
= cpu_to_fec32(BD_ENET_TX_INT
);
2809 bdp
= fec_enet_get_nextdesc(bdp
, &txq
->bd
);
2812 /* Set the last buffer to wrap. */
2813 bdp
= fec_enet_get_prevdesc(bdp
, &txq
->bd
);
2814 bdp
->cbd_sc
|= cpu_to_fec16(BD_SC_WRAP
);
2819 fec_enet_free_buffers(ndev
);
2823 static int fec_enet_alloc_buffers(struct net_device
*ndev
)
2825 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2828 for (i
= 0; i
< fep
->num_rx_queues
; i
++)
2829 if (fec_enet_alloc_rxq_buffers(ndev
, i
))
2832 for (i
= 0; i
< fep
->num_tx_queues
; i
++)
2833 if (fec_enet_alloc_txq_buffers(ndev
, i
))
2839 fec_enet_open(struct net_device
*ndev
)
2841 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2844 ret
= pm_runtime_get_sync(&fep
->pdev
->dev
);
2848 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
2849 ret
= fec_enet_clk_enable(ndev
, true);
2853 /* I should reset the ring buffers here, but I don't yet know
2854 * a simple way to do that.
2857 ret
= fec_enet_alloc_buffers(ndev
);
2859 goto err_enet_alloc
;
2861 /* Init MAC prior to mii bus probe */
2864 /* Probe and connect to PHY when open the interface */
2865 ret
= fec_enet_mii_probe(ndev
);
2867 goto err_enet_mii_probe
;
2869 if (fep
->quirks
& FEC_QUIRK_ERR006687
)
2870 imx6q_cpuidle_fec_irqs_used();
2872 napi_enable(&fep
->napi
);
2873 phy_start(ndev
->phydev
);
2874 netif_tx_start_all_queues(ndev
);
2876 device_set_wakeup_enable(&ndev
->dev
, fep
->wol_flag
&
2877 FEC_WOL_FLAG_ENABLE
);
2882 fec_enet_free_buffers(ndev
);
2884 fec_enet_clk_enable(ndev
, false);
2886 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
2887 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
2888 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
2893 fec_enet_close(struct net_device
*ndev
)
2895 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2897 phy_stop(ndev
->phydev
);
2899 if (netif_device_present(ndev
)) {
2900 napi_disable(&fep
->napi
);
2901 netif_tx_disable(ndev
);
2905 phy_disconnect(ndev
->phydev
);
2907 if (fep
->quirks
& FEC_QUIRK_ERR006687
)
2908 imx6q_cpuidle_fec_irqs_unused();
2910 fec_enet_update_ethtool_stats(ndev
);
2912 fec_enet_clk_enable(ndev
, false);
2913 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
2914 pm_runtime_mark_last_busy(&fep
->pdev
->dev
);
2915 pm_runtime_put_autosuspend(&fep
->pdev
->dev
);
2917 fec_enet_free_buffers(ndev
);
2922 /* Set or clear the multicast filter for this adaptor.
2923 * Skeleton taken from sunlance driver.
2924 * The CPM Ethernet implementation allows Multicast as well as individual
2925 * MAC address filtering. Some of the drivers check to make sure it is
2926 * a group multicast address, and discard those that are not. I guess I
2927 * will do the same for now, but just remove the test if you want
2928 * individual filtering as well (do the upper net layers want or support
2929 * this kind of feature?).
2932 #define FEC_HASH_BITS 6 /* #bits in hash */
2933 #define CRC32_POLY 0xEDB88320
2935 static void set_multicast_list(struct net_device
*ndev
)
2937 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2938 struct netdev_hw_addr
*ha
;
2939 unsigned int i
, bit
, data
, crc
, tmp
;
2941 unsigned int hash_high
= 0, hash_low
= 0;
2943 if (ndev
->flags
& IFF_PROMISC
) {
2944 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
2946 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
2950 tmp
= readl(fep
->hwp
+ FEC_R_CNTRL
);
2952 writel(tmp
, fep
->hwp
+ FEC_R_CNTRL
);
2954 if (ndev
->flags
& IFF_ALLMULTI
) {
2955 /* Catch all multicast addresses, so set the
2958 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2959 writel(0xffffffff, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2964 /* Add the addresses in hash register */
2965 netdev_for_each_mc_addr(ha
, ndev
) {
2966 /* calculate crc32 value of mac address */
2969 for (i
= 0; i
< ndev
->addr_len
; i
++) {
2971 for (bit
= 0; bit
< 8; bit
++, data
>>= 1) {
2973 (((crc
^ data
) & 1) ? CRC32_POLY
: 0);
2977 /* only upper 6 bits (FEC_HASH_BITS) are used
2978 * which point to specific bit in the hash registers
2980 hash
= (crc
>> (32 - FEC_HASH_BITS
)) & 0x3f;
2983 hash_high
|= 1 << (hash
- 32);
2985 hash_low
|= 1 << hash
;
2988 writel(hash_high
, fep
->hwp
+ FEC_GRP_HASH_TABLE_HIGH
);
2989 writel(hash_low
, fep
->hwp
+ FEC_GRP_HASH_TABLE_LOW
);
2992 /* Set a MAC change in hardware. */
2994 fec_set_mac_address(struct net_device
*ndev
, void *p
)
2996 struct fec_enet_private
*fep
= netdev_priv(ndev
);
2997 struct sockaddr
*addr
= p
;
3000 if (!is_valid_ether_addr(addr
->sa_data
))
3001 return -EADDRNOTAVAIL
;
3002 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3005 /* Add netif status check here to avoid system hang in below case:
3006 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3007 * After ethx down, fec all clocks are gated off and then register
3008 * access causes system hang.
3010 if (!netif_running(ndev
))
3013 writel(ndev
->dev_addr
[3] | (ndev
->dev_addr
[2] << 8) |
3014 (ndev
->dev_addr
[1] << 16) | (ndev
->dev_addr
[0] << 24),
3015 fep
->hwp
+ FEC_ADDR_LOW
);
3016 writel((ndev
->dev_addr
[5] << 16) | (ndev
->dev_addr
[4] << 24),
3017 fep
->hwp
+ FEC_ADDR_HIGH
);
3021 #ifdef CONFIG_NET_POLL_CONTROLLER
3023 * fec_poll_controller - FEC Poll controller function
3024 * @dev: The FEC network adapter
3026 * Polled functionality used by netconsole and others in non interrupt mode
3029 static void fec_poll_controller(struct net_device
*dev
)
3032 struct fec_enet_private
*fep
= netdev_priv(dev
);
3034 for (i
= 0; i
< FEC_IRQ_NUM
; i
++) {
3035 if (fep
->irq
[i
] > 0) {
3036 disable_irq(fep
->irq
[i
]);
3037 fec_enet_interrupt(fep
->irq
[i
], dev
);
3038 enable_irq(fep
->irq
[i
]);
3044 static inline void fec_enet_set_netdev_features(struct net_device
*netdev
,
3045 netdev_features_t features
)
3047 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3048 netdev_features_t changed
= features
^ netdev
->features
;
3050 netdev
->features
= features
;
3052 /* Receive checksum has been changed */
3053 if (changed
& NETIF_F_RXCSUM
) {
3054 if (features
& NETIF_F_RXCSUM
)
3055 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3057 fep
->csum_flags
&= ~FLAG_RX_CSUM_ENABLED
;
3061 static int fec_set_features(struct net_device
*netdev
,
3062 netdev_features_t features
)
3064 struct fec_enet_private
*fep
= netdev_priv(netdev
);
3065 netdev_features_t changed
= features
^ netdev
->features
;
3067 if (netif_running(netdev
) && changed
& NETIF_F_RXCSUM
) {
3068 napi_disable(&fep
->napi
);
3069 netif_tx_lock_bh(netdev
);
3071 fec_enet_set_netdev_features(netdev
, features
);
3072 fec_restart(netdev
);
3073 netif_tx_wake_all_queues(netdev
);
3074 netif_tx_unlock_bh(netdev
);
3075 napi_enable(&fep
->napi
);
3077 fec_enet_set_netdev_features(netdev
, features
);
3083 static const struct net_device_ops fec_netdev_ops
= {
3084 .ndo_open
= fec_enet_open
,
3085 .ndo_stop
= fec_enet_close
,
3086 .ndo_start_xmit
= fec_enet_start_xmit
,
3087 .ndo_set_rx_mode
= set_multicast_list
,
3088 .ndo_validate_addr
= eth_validate_addr
,
3089 .ndo_tx_timeout
= fec_timeout
,
3090 .ndo_set_mac_address
= fec_set_mac_address
,
3091 .ndo_do_ioctl
= fec_enet_ioctl
,
3092 #ifdef CONFIG_NET_POLL_CONTROLLER
3093 .ndo_poll_controller
= fec_poll_controller
,
3095 .ndo_set_features
= fec_set_features
,
3098 static const unsigned short offset_des_active_rxq
[] = {
3099 FEC_R_DES_ACTIVE_0
, FEC_R_DES_ACTIVE_1
, FEC_R_DES_ACTIVE_2
3102 static const unsigned short offset_des_active_txq
[] = {
3103 FEC_X_DES_ACTIVE_0
, FEC_X_DES_ACTIVE_1
, FEC_X_DES_ACTIVE_2
3107 * XXX: We need to clean up on failure exits here.
3110 static int fec_enet_init(struct net_device
*ndev
)
3112 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3113 struct bufdesc
*cbd_base
;
3117 unsigned dsize
= fep
->bufdesc_ex
? sizeof(struct bufdesc_ex
) :
3118 sizeof(struct bufdesc
);
3119 unsigned dsize_log2
= __fls(dsize
);
3121 WARN_ON(dsize
!= (1 << dsize_log2
));
3122 #if defined(CONFIG_ARM)
3123 fep
->rx_align
= 0xf;
3124 fep
->tx_align
= 0xf;
3126 fep
->rx_align
= 0x3;
3127 fep
->tx_align
= 0x3;
3130 fec_enet_alloc_queue(ndev
);
3132 bd_size
= (fep
->total_tx_ring_size
+ fep
->total_rx_ring_size
) * dsize
;
3134 /* Allocate memory for buffer descriptors. */
3135 cbd_base
= dmam_alloc_coherent(&fep
->pdev
->dev
, bd_size
, &bd_dma
,
3141 memset(cbd_base
, 0, bd_size
);
3143 /* Get the Ethernet address */
3145 /* make sure MAC we just acquired is programmed into the hw */
3146 fec_set_mac_address(ndev
, NULL
);
3148 /* Set receive and transmit descriptor base. */
3149 for (i
= 0; i
< fep
->num_rx_queues
; i
++) {
3150 struct fec_enet_priv_rx_q
*rxq
= fep
->rx_queue
[i
];
3151 unsigned size
= dsize
* rxq
->bd
.ring_size
;
3154 rxq
->bd
.base
= cbd_base
;
3155 rxq
->bd
.cur
= cbd_base
;
3156 rxq
->bd
.dma
= bd_dma
;
3157 rxq
->bd
.dsize
= dsize
;
3158 rxq
->bd
.dsize_log2
= dsize_log2
;
3159 rxq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_rxq
[i
];
3161 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3162 rxq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3165 for (i
= 0; i
< fep
->num_tx_queues
; i
++) {
3166 struct fec_enet_priv_tx_q
*txq
= fep
->tx_queue
[i
];
3167 unsigned size
= dsize
* txq
->bd
.ring_size
;
3170 txq
->bd
.base
= cbd_base
;
3171 txq
->bd
.cur
= cbd_base
;
3172 txq
->bd
.dma
= bd_dma
;
3173 txq
->bd
.dsize
= dsize
;
3174 txq
->bd
.dsize_log2
= dsize_log2
;
3175 txq
->bd
.reg_desc_active
= fep
->hwp
+ offset_des_active_txq
[i
];
3177 cbd_base
= (struct bufdesc
*)(((void *)cbd_base
) + size
);
3178 txq
->bd
.last
= (struct bufdesc
*)(((void *)cbd_base
) - dsize
);
3182 /* The FEC Ethernet specific entries in the device structure */
3183 ndev
->watchdog_timeo
= TX_TIMEOUT
;
3184 ndev
->netdev_ops
= &fec_netdev_ops
;
3185 ndev
->ethtool_ops
= &fec_enet_ethtool_ops
;
3187 writel(FEC_RX_DISABLED_IMASK
, fep
->hwp
+ FEC_IMASK
);
3188 netif_napi_add(ndev
, &fep
->napi
, fec_enet_rx_napi
, NAPI_POLL_WEIGHT
);
3190 if (fep
->quirks
& FEC_QUIRK_HAS_VLAN
)
3191 /* enable hw VLAN support */
3192 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3194 if (fep
->quirks
& FEC_QUIRK_HAS_CSUM
) {
3195 ndev
->gso_max_segs
= FEC_MAX_TSO_SEGS
;
3197 /* enable hw accelerator */
3198 ndev
->features
|= (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3199 | NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_TSO
);
3200 fep
->csum_flags
|= FLAG_RX_CSUM_ENABLED
;
3203 if (fep
->quirks
& FEC_QUIRK_HAS_AVB
) {
3205 fep
->rx_align
= 0x3f;
3208 ndev
->hw_features
= ndev
->features
;
3212 if (fep
->quirks
& FEC_QUIRK_MIB_CLEAR
)
3213 fec_enet_clear_ethtool_stats(ndev
);
3215 fec_enet_update_ethtool_stats(ndev
);
3221 static int fec_reset_phy(struct platform_device
*pdev
)
3224 bool active_high
= false;
3225 int msec
= 1, phy_post_delay
= 0;
3226 struct device_node
*np
= pdev
->dev
.of_node
;
3231 err
= of_property_read_u32(np
, "phy-reset-duration", &msec
);
3232 /* A sane reset duration should not be longer than 1s */
3233 if (!err
&& msec
> 1000)
3236 phy_reset
= of_get_named_gpio(np
, "phy-reset-gpios", 0);
3237 if (phy_reset
== -EPROBE_DEFER
)
3239 else if (!gpio_is_valid(phy_reset
))
3242 err
= of_property_read_u32(np
, "phy-reset-post-delay", &phy_post_delay
);
3243 /* valid reset duration should be less than 1s */
3244 if (!err
&& phy_post_delay
> 1000)
3247 active_high
= of_property_read_bool(np
, "phy-reset-active-high");
3249 err
= devm_gpio_request_one(&pdev
->dev
, phy_reset
,
3250 active_high
? GPIOF_OUT_INIT_HIGH
: GPIOF_OUT_INIT_LOW
,
3253 dev_err(&pdev
->dev
, "failed to get phy-reset-gpios: %d\n", err
);
3260 usleep_range(msec
* 1000, msec
* 1000 + 1000);
3262 gpio_set_value_cansleep(phy_reset
, !active_high
);
3264 if (!phy_post_delay
)
3267 if (phy_post_delay
> 20)
3268 msleep(phy_post_delay
);
3270 usleep_range(phy_post_delay
* 1000,
3271 phy_post_delay
* 1000 + 1000);
3275 #else /* CONFIG_OF */
3276 static int fec_reset_phy(struct platform_device
*pdev
)
3279 * In case of platform probe, the reset has been done
3284 #endif /* CONFIG_OF */
3287 fec_enet_get_queue_num(struct platform_device
*pdev
, int *num_tx
, int *num_rx
)
3289 struct device_node
*np
= pdev
->dev
.of_node
;
3291 *num_tx
= *num_rx
= 1;
3293 if (!np
|| !of_device_is_available(np
))
3296 /* parse the num of tx and rx queues */
3297 of_property_read_u32(np
, "fsl,num-tx-queues", num_tx
);
3299 of_property_read_u32(np
, "fsl,num-rx-queues", num_rx
);
3301 if (*num_tx
< 1 || *num_tx
> FEC_ENET_MAX_TX_QS
) {
3302 dev_warn(&pdev
->dev
, "Invalid num_tx(=%d), fall back to 1\n",
3308 if (*num_rx
< 1 || *num_rx
> FEC_ENET_MAX_RX_QS
) {
3309 dev_warn(&pdev
->dev
, "Invalid num_rx(=%d), fall back to 1\n",
3317 static int fec_enet_get_irq_cnt(struct platform_device
*pdev
)
3319 int irq_cnt
= platform_irq_count(pdev
);
3321 if (irq_cnt
> FEC_IRQ_NUM
)
3322 irq_cnt
= FEC_IRQ_NUM
; /* last for pps */
3323 else if (irq_cnt
== 2)
3324 irq_cnt
= 1; /* last for pps */
3325 else if (irq_cnt
<= 0)
3326 irq_cnt
= 1; /* At least 1 irq is needed */
3331 fec_probe(struct platform_device
*pdev
)
3333 struct fec_enet_private
*fep
;
3334 struct fec_platform_data
*pdata
;
3335 struct net_device
*ndev
;
3336 int i
, irq
, ret
= 0;
3338 const struct of_device_id
*of_id
;
3340 struct device_node
*np
= pdev
->dev
.of_node
, *phy_node
;
3346 fec_enet_get_queue_num(pdev
, &num_tx_qs
, &num_rx_qs
);
3348 /* Init network device */
3349 ndev
= alloc_etherdev_mqs(sizeof(struct fec_enet_private
) +
3350 FEC_STATS_SIZE
, num_tx_qs
, num_rx_qs
);
3354 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
3356 /* setup board info structure */
3357 fep
= netdev_priv(ndev
);
3359 of_id
= of_match_device(fec_dt_ids
, &pdev
->dev
);
3361 pdev
->id_entry
= of_id
->data
;
3362 fep
->quirks
= pdev
->id_entry
->driver_data
;
3365 fep
->num_rx_queues
= num_rx_qs
;
3366 fep
->num_tx_queues
= num_tx_qs
;
3368 #if !defined(CONFIG_M5272)
3369 /* default enable pause frame auto negotiation */
3370 if (fep
->quirks
& FEC_QUIRK_HAS_GBIT
)
3371 fep
->pause_flag
|= FEC_PAUSE_FLAG_AUTONEG
;
3374 /* Select default pin state */
3375 pinctrl_pm_select_default_state(&pdev
->dev
);
3377 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3378 fep
->hwp
= devm_ioremap_resource(&pdev
->dev
, r
);
3379 if (IS_ERR(fep
->hwp
)) {
3380 ret
= PTR_ERR(fep
->hwp
);
3381 goto failed_ioremap
;
3385 fep
->dev_id
= dev_id
++;
3387 platform_set_drvdata(pdev
, ndev
);
3389 if ((of_machine_is_compatible("fsl,imx6q") ||
3390 of_machine_is_compatible("fsl,imx6dl")) &&
3391 !of_property_read_bool(np
, "fsl,err006687-workaround-present"))
3392 fep
->quirks
|= FEC_QUIRK_ERR006687
;
3394 if (of_get_property(np
, "fsl,magic-packet", NULL
))
3395 fep
->wol_flag
|= FEC_WOL_HAS_MAGIC_PACKET
;
3397 phy_node
= of_parse_phandle(np
, "phy-handle", 0);
3398 if (!phy_node
&& of_phy_is_fixed_link(np
)) {
3399 ret
= of_phy_register_fixed_link(np
);
3402 "broken fixed-link specification\n");
3405 phy_node
= of_node_get(np
);
3407 fep
->phy_node
= phy_node
;
3409 ret
= of_get_phy_mode(pdev
->dev
.of_node
);
3411 pdata
= dev_get_platdata(&pdev
->dev
);
3413 fep
->phy_interface
= pdata
->phy
;
3415 fep
->phy_interface
= PHY_INTERFACE_MODE_MII
;
3417 fep
->phy_interface
= ret
;
3420 fep
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
3421 if (IS_ERR(fep
->clk_ipg
)) {
3422 ret
= PTR_ERR(fep
->clk_ipg
);
3426 fep
->clk_ahb
= devm_clk_get(&pdev
->dev
, "ahb");
3427 if (IS_ERR(fep
->clk_ahb
)) {
3428 ret
= PTR_ERR(fep
->clk_ahb
);
3432 fep
->itr_clk_rate
= clk_get_rate(fep
->clk_ahb
);
3434 /* enet_out is optional, depends on board */
3435 fep
->clk_enet_out
= devm_clk_get(&pdev
->dev
, "enet_out");
3436 if (IS_ERR(fep
->clk_enet_out
))
3437 fep
->clk_enet_out
= NULL
;
3439 fep
->ptp_clk_on
= false;
3440 mutex_init(&fep
->ptp_clk_mutex
);
3442 /* clk_ref is optional, depends on board */
3443 fep
->clk_ref
= devm_clk_get(&pdev
->dev
, "enet_clk_ref");
3444 if (IS_ERR(fep
->clk_ref
))
3445 fep
->clk_ref
= NULL
;
3447 fep
->bufdesc_ex
= fep
->quirks
& FEC_QUIRK_HAS_BUFDESC_EX
;
3448 fep
->clk_ptp
= devm_clk_get(&pdev
->dev
, "ptp");
3449 if (IS_ERR(fep
->clk_ptp
)) {
3450 fep
->clk_ptp
= NULL
;
3451 fep
->bufdesc_ex
= false;
3454 ret
= fec_enet_clk_enable(ndev
, true);
3458 ret
= clk_prepare_enable(fep
->clk_ipg
);
3460 goto failed_clk_ipg
;
3462 fep
->reg_phy
= devm_regulator_get(&pdev
->dev
, "phy");
3463 if (!IS_ERR(fep
->reg_phy
)) {
3464 ret
= regulator_enable(fep
->reg_phy
);
3467 "Failed to enable phy regulator: %d\n", ret
);
3468 clk_disable_unprepare(fep
->clk_ipg
);
3469 goto failed_regulator
;
3472 if (PTR_ERR(fep
->reg_phy
) == -EPROBE_DEFER
) {
3473 ret
= -EPROBE_DEFER
;
3474 goto failed_regulator
;
3476 fep
->reg_phy
= NULL
;
3479 pm_runtime_set_autosuspend_delay(&pdev
->dev
, FEC_MDIO_PM_TIMEOUT
);
3480 pm_runtime_use_autosuspend(&pdev
->dev
);
3481 pm_runtime_get_noresume(&pdev
->dev
);
3482 pm_runtime_set_active(&pdev
->dev
);
3483 pm_runtime_enable(&pdev
->dev
);
3485 ret
= fec_reset_phy(pdev
);
3489 irq_cnt
= fec_enet_get_irq_cnt(pdev
);
3490 if (fep
->bufdesc_ex
)
3491 fec_ptp_init(pdev
, irq_cnt
);
3493 ret
= fec_enet_init(ndev
);
3497 for (i
= 0; i
< irq_cnt
; i
++) {
3498 sprintf(irq_name
, "int%d", i
);
3499 irq
= platform_get_irq_byname(pdev
, irq_name
);
3501 irq
= platform_get_irq(pdev
, i
);
3506 ret
= devm_request_irq(&pdev
->dev
, irq
, fec_enet_interrupt
,
3507 0, pdev
->name
, ndev
);
3514 init_completion(&fep
->mdio_done
);
3515 ret
= fec_enet_mii_init(pdev
);
3517 goto failed_mii_init
;
3519 /* Carrier starts down, phylib will bring it up */
3520 netif_carrier_off(ndev
);
3521 fec_enet_clk_enable(ndev
, false);
3522 pinctrl_pm_select_sleep_state(&pdev
->dev
);
3524 ret
= register_netdev(ndev
);
3526 goto failed_register
;
3528 device_init_wakeup(&ndev
->dev
, fep
->wol_flag
&
3529 FEC_WOL_HAS_MAGIC_PACKET
);
3531 if (fep
->bufdesc_ex
&& fep
->ptp_clock
)
3532 netdev_info(ndev
, "registered PHC device %d\n", fep
->dev_id
);
3534 fep
->rx_copybreak
= COPYBREAK_DEFAULT
;
3535 INIT_WORK(&fep
->tx_timeout_work
, fec_enet_timeout_work
);
3537 pm_runtime_mark_last_busy(&pdev
->dev
);
3538 pm_runtime_put_autosuspend(&pdev
->dev
);
3543 fec_enet_mii_remove(fep
);
3549 regulator_disable(fep
->reg_phy
);
3551 pm_runtime_put(&pdev
->dev
);
3552 pm_runtime_disable(&pdev
->dev
);
3555 fec_enet_clk_enable(ndev
, false);
3557 if (of_phy_is_fixed_link(np
))
3558 of_phy_deregister_fixed_link(np
);
3559 of_node_put(phy_node
);
3569 fec_drv_remove(struct platform_device
*pdev
)
3571 struct net_device
*ndev
= platform_get_drvdata(pdev
);
3572 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3573 struct device_node
*np
= pdev
->dev
.of_node
;
3575 cancel_work_sync(&fep
->tx_timeout_work
);
3577 unregister_netdev(ndev
);
3578 fec_enet_mii_remove(fep
);
3580 regulator_disable(fep
->reg_phy
);
3581 if (of_phy_is_fixed_link(np
))
3582 of_phy_deregister_fixed_link(np
);
3583 of_node_put(fep
->phy_node
);
3589 static int __maybe_unused
fec_suspend(struct device
*dev
)
3591 struct net_device
*ndev
= dev_get_drvdata(dev
);
3592 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3595 if (netif_running(ndev
)) {
3596 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)
3597 fep
->wol_flag
|= FEC_WOL_FLAG_SLEEP_ON
;
3598 phy_stop(ndev
->phydev
);
3599 napi_disable(&fep
->napi
);
3600 netif_tx_lock_bh(ndev
);
3601 netif_device_detach(ndev
);
3602 netif_tx_unlock_bh(ndev
);
3604 fec_enet_clk_enable(ndev
, false);
3605 if (!(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3606 pinctrl_pm_select_sleep_state(&fep
->pdev
->dev
);
3610 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
))
3611 regulator_disable(fep
->reg_phy
);
3613 /* SOC supply clock to phy, when clock is disabled, phy link down
3614 * SOC control phy regulator, when regulator is disabled, phy link down
3616 if (fep
->clk_enet_out
|| fep
->reg_phy
)
3622 static int __maybe_unused
fec_resume(struct device
*dev
)
3624 struct net_device
*ndev
= dev_get_drvdata(dev
);
3625 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3626 struct fec_platform_data
*pdata
= fep
->pdev
->dev
.platform_data
;
3630 if (fep
->reg_phy
&& !(fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
)) {
3631 ret
= regulator_enable(fep
->reg_phy
);
3637 if (netif_running(ndev
)) {
3638 ret
= fec_enet_clk_enable(ndev
, true);
3643 if (fep
->wol_flag
& FEC_WOL_FLAG_ENABLE
) {
3644 if (pdata
&& pdata
->sleep_mode_enable
)
3645 pdata
->sleep_mode_enable(false);
3646 val
= readl(fep
->hwp
+ FEC_ECNTRL
);
3647 val
&= ~(FEC_ECR_MAGICEN
| FEC_ECR_SLEEP
);
3648 writel(val
, fep
->hwp
+ FEC_ECNTRL
);
3649 fep
->wol_flag
&= ~FEC_WOL_FLAG_SLEEP_ON
;
3651 pinctrl_pm_select_default_state(&fep
->pdev
->dev
);
3654 netif_tx_lock_bh(ndev
);
3655 netif_device_attach(ndev
);
3656 netif_tx_unlock_bh(ndev
);
3657 napi_enable(&fep
->napi
);
3658 phy_start(ndev
->phydev
);
3666 regulator_disable(fep
->reg_phy
);
3670 static int __maybe_unused
fec_runtime_suspend(struct device
*dev
)
3672 struct net_device
*ndev
= dev_get_drvdata(dev
);
3673 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3675 clk_disable_unprepare(fep
->clk_ipg
);
3680 static int __maybe_unused
fec_runtime_resume(struct device
*dev
)
3682 struct net_device
*ndev
= dev_get_drvdata(dev
);
3683 struct fec_enet_private
*fep
= netdev_priv(ndev
);
3685 return clk_prepare_enable(fep
->clk_ipg
);
3688 static const struct dev_pm_ops fec_pm_ops
= {
3689 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend
, fec_resume
)
3690 SET_RUNTIME_PM_OPS(fec_runtime_suspend
, fec_runtime_resume
, NULL
)
3693 static struct platform_driver fec_driver
= {
3695 .name
= DRIVER_NAME
,
3697 .of_match_table
= fec_dt_ids
,
3699 .id_table
= fec_devtype
,
3701 .remove
= fec_drv_remove
,
3704 module_platform_driver(fec_driver
);
3706 MODULE_ALIAS("platform:"DRIVER_NAME
);
3707 MODULE_LICENSE("GPL");