]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | |
3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | |
4 | * | |
7dd6a2aa | 5 | * Right now, I am very wasteful with the buffers. I allocate memory |
1da177e4 LT |
6 | * pages and then divide them into 2K frame buffers. This way I know I |
7 | * have buffers large enough to hold one frame within one buffer descriptor. | |
8 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | |
9 | * will be much more memory efficient and will easily handle lots of | |
10 | * small packets. | |
11 | * | |
12 | * Much better multiple PHY support by Magnus Damm. | |
13 | * Copyright (c) 2000 Ericsson Radio Systems AB. | |
14 | * | |
562d2f8c GU |
15 | * Support for FEC controller of ColdFire processors. |
16 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) | |
7dd6a2aa GU |
17 | * |
18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | |
677177c5 | 19 | * Copyright (c) 2004-2006 Macq Electronique SA. |
b5680e0b | 20 | * |
230dec61 | 21 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. |
1da177e4 LT |
22 | */ |
23 | ||
1da177e4 LT |
24 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/ptrace.h> | |
28 | #include <linux/errno.h> | |
29 | #include <linux/ioport.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/interrupt.h> | |
1da177e4 LT |
32 | #include <linux/delay.h> |
33 | #include <linux/netdevice.h> | |
34 | #include <linux/etherdevice.h> | |
35 | #include <linux/skbuff.h> | |
4c09eed9 JB |
36 | #include <linux/in.h> |
37 | #include <linux/ip.h> | |
38 | #include <net/ip.h> | |
79f33912 | 39 | #include <net/tso.h> |
4c09eed9 JB |
40 | #include <linux/tcp.h> |
41 | #include <linux/udp.h> | |
42 | #include <linux/icmp.h> | |
1da177e4 LT |
43 | #include <linux/spinlock.h> |
44 | #include <linux/workqueue.h> | |
45 | #include <linux/bitops.h> | |
6f501b17 SH |
46 | #include <linux/io.h> |
47 | #include <linux/irq.h> | |
196719ec | 48 | #include <linux/clk.h> |
ead73183 | 49 | #include <linux/platform_device.h> |
e6b043d5 | 50 | #include <linux/phy.h> |
5eb32bd0 | 51 | #include <linux/fec.h> |
ca2cc333 SG |
52 | #include <linux/of.h> |
53 | #include <linux/of_device.h> | |
54 | #include <linux/of_gpio.h> | |
407066f8 | 55 | #include <linux/of_mdio.h> |
ca2cc333 | 56 | #include <linux/of_net.h> |
5fa9c0fe | 57 | #include <linux/regulator/consumer.h> |
cdffcf1b | 58 | #include <linux/if_vlan.h> |
a68ab98e | 59 | #include <linux/pinctrl/consumer.h> |
1da177e4 | 60 | |
080853af | 61 | #include <asm/cacheflush.h> |
196719ec | 62 | |
1da177e4 | 63 | #include "fec.h" |
1da177e4 | 64 | |
772e42b0 | 65 | static void set_multicast_list(struct net_device *ndev); |
d851b47b | 66 | static void fec_enet_itr_coal_init(struct net_device *ndev); |
772e42b0 | 67 | |
b5680e0b SG |
68 | #define DRIVER_NAME "fec" |
69 | ||
4d494cdc FD |
70 | #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) |
71 | ||
baa70a5c FL |
72 | /* Pause frame feild and FIFO threshold */ |
73 | #define FEC_ENET_FCE (1 << 5) | |
74 | #define FEC_ENET_RSEM_V 0x84 | |
75 | #define FEC_ENET_RSFL_V 16 | |
76 | #define FEC_ENET_RAEM_V 0x8 | |
77 | #define FEC_ENET_RAFL_V 0x8 | |
78 | #define FEC_ENET_OPD_V 0xFFF0 | |
79 | ||
b5680e0b SG |
80 | /* Controller is ENET-MAC */ |
81 | #define FEC_QUIRK_ENET_MAC (1 << 0) | |
82 | /* Controller needs driver to swap frame */ | |
83 | #define FEC_QUIRK_SWAP_FRAME (1 << 1) | |
0ca1e290 SG |
84 | /* Controller uses gasket */ |
85 | #define FEC_QUIRK_USE_GASKET (1 << 2) | |
230dec61 SG |
86 | /* Controller has GBIT support */ |
87 | #define FEC_QUIRK_HAS_GBIT (1 << 3) | |
ff43da86 FL |
88 | /* Controller has extend desc buffer */ |
89 | #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) | |
48496255 SG |
90 | /* Controller has hardware checksum support */ |
91 | #define FEC_QUIRK_HAS_CSUM (1 << 5) | |
cdffcf1b JB |
92 | /* Controller has hardware vlan support */ |
93 | #define FEC_QUIRK_HAS_VLAN (1 << 6) | |
03191656 FL |
94 | /* ENET IP errata ERR006358 |
95 | * | |
96 | * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously | |
97 | * detected as not set during a prior frame transmission, then the | |
98 | * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs | |
99 | * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in | |
03191656 FL |
100 | * frames not being transmitted until there is a 0-to-1 transition on |
101 | * ENET_TDAR[TDAR]. | |
102 | */ | |
103 | #define FEC_QUIRK_ERR006358 (1 << 7) | |
95a77470 FD |
104 | /* ENET IP hw AVB |
105 | * | |
106 | * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. | |
107 | * - Two class indicators on receive with configurable priority | |
108 | * - Two class indicators and line speed timer on transmit allowing | |
109 | * implementation class credit based shapers externally | |
110 | * - Additional DMA registers provisioned to allow managing up to 3 | |
111 | * independent rings | |
112 | */ | |
113 | #define FEC_QUIRK_HAS_AVB (1 << 8) | |
37d6017b FD |
114 | /* There is a TDAR race condition for mutliQ when the software sets TDAR |
115 | * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). | |
116 | * This will cause the udma_tx and udma_tx_arbiter state machines to hang. | |
117 | * The issue exist at i.MX6SX enet IP. | |
118 | */ | |
119 | #define FEC_QUIRK_ERR007885 (1 << 9) | |
b5680e0b SG |
120 | |
121 | static struct platform_device_id fec_devtype[] = { | |
122 | { | |
0ca1e290 | 123 | /* keep it for coldfire */ |
b5680e0b SG |
124 | .name = DRIVER_NAME, |
125 | .driver_data = 0, | |
0ca1e290 SG |
126 | }, { |
127 | .name = "imx25-fec", | |
128 | .driver_data = FEC_QUIRK_USE_GASKET, | |
129 | }, { | |
130 | .name = "imx27-fec", | |
131 | .driver_data = 0, | |
b5680e0b SG |
132 | }, { |
133 | .name = "imx28-fec", | |
134 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | |
230dec61 SG |
135 | }, { |
136 | .name = "imx6q-fec", | |
ff43da86 | 137 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
cdffcf1b | 138 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | |
03191656 | 139 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, |
ca7c4a45 | 140 | }, { |
36803542 | 141 | .name = "mvf600-fec", |
ca7c4a45 | 142 | .driver_data = FEC_QUIRK_ENET_MAC, |
95a77470 FD |
143 | }, { |
144 | .name = "imx6sx-fec", | |
145 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | |
146 | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | | |
147 | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | | |
37d6017b | 148 | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885, |
0ca1e290 SG |
149 | }, { |
150 | /* sentinel */ | |
151 | } | |
b5680e0b | 152 | }; |
0ca1e290 | 153 | MODULE_DEVICE_TABLE(platform, fec_devtype); |
b5680e0b | 154 | |
ca2cc333 | 155 | enum imx_fec_type { |
a7dd3219 | 156 | IMX25_FEC = 1, /* runs on i.mx25/50/53 */ |
ca2cc333 SG |
157 | IMX27_FEC, /* runs on i.mx27/35/51 */ |
158 | IMX28_FEC, | |
230dec61 | 159 | IMX6Q_FEC, |
36803542 | 160 | MVF600_FEC, |
ba593e00 | 161 | IMX6SX_FEC, |
ca2cc333 SG |
162 | }; |
163 | ||
164 | static const struct of_device_id fec_dt_ids[] = { | |
165 | { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, | |
166 | { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, | |
167 | { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, | |
230dec61 | 168 | { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, |
36803542 | 169 | { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, |
ba593e00 | 170 | { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, |
ca2cc333 SG |
171 | { /* sentinel */ } |
172 | }; | |
173 | MODULE_DEVICE_TABLE(of, fec_dt_ids); | |
174 | ||
49da97dc SG |
175 | static unsigned char macaddr[ETH_ALEN]; |
176 | module_param_array(macaddr, byte, NULL, 0); | |
177 | MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); | |
1da177e4 | 178 | |
49da97dc | 179 | #if defined(CONFIG_M5272) |
1da177e4 LT |
180 | /* |
181 | * Some hardware gets it MAC address out of local flash memory. | |
182 | * if this is non-zero then assume it is the address to get MAC from. | |
183 | */ | |
184 | #if defined(CONFIG_NETtel) | |
185 | #define FEC_FLASHMAC 0xf0006006 | |
186 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) | |
187 | #define FEC_FLASHMAC 0xf0006000 | |
1da177e4 LT |
188 | #elif defined(CONFIG_CANCam) |
189 | #define FEC_FLASHMAC 0xf0020000 | |
7dd6a2aa GU |
190 | #elif defined (CONFIG_M5272C3) |
191 | #define FEC_FLASHMAC (0xffe04000 + 4) | |
192 | #elif defined(CONFIG_MOD5272) | |
a7dd3219 | 193 | #define FEC_FLASHMAC 0xffc0406b |
1da177e4 LT |
194 | #else |
195 | #define FEC_FLASHMAC 0 | |
196 | #endif | |
43be6366 | 197 | #endif /* CONFIG_M5272 */ |
ead73183 | 198 | |
cdffcf1b | 199 | /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. |
1da177e4 | 200 | */ |
cdffcf1b | 201 | #define PKT_MAXBUF_SIZE 1522 |
1da177e4 | 202 | #define PKT_MINBUF_SIZE 64 |
cdffcf1b | 203 | #define PKT_MAXBLR_SIZE 1536 |
1da177e4 | 204 | |
4c09eed9 JB |
205 | /* FEC receive acceleration */ |
206 | #define FEC_RACC_IPDIS (1 << 1) | |
207 | #define FEC_RACC_PRODIS (1 << 2) | |
208 | #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) | |
209 | ||
1da177e4 | 210 | /* |
6b265293 | 211 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame |
1da177e4 LT |
212 | * size bits. Other FEC hardware does not, so we need to take that into |
213 | * account when setting it. | |
214 | */ | |
562d2f8c | 215 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
085e79ed | 216 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) |
1da177e4 LT |
217 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
218 | #else | |
219 | #define OPT_FRAME_SIZE 0 | |
220 | #endif | |
221 | ||
e6b043d5 BW |
222 | /* FEC MII MMFR bits definition */ |
223 | #define FEC_MMFR_ST (1 << 30) | |
224 | #define FEC_MMFR_OP_READ (2 << 28) | |
225 | #define FEC_MMFR_OP_WRITE (1 << 28) | |
226 | #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) | |
227 | #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) | |
228 | #define FEC_MMFR_TA (2 << 16) | |
229 | #define FEC_MMFR_DATA(v) (v & 0xffff) | |
1da177e4 | 230 | |
c3b084c2 | 231 | #define FEC_MII_TIMEOUT 30000 /* us */ |
1da177e4 | 232 | |
22f6b860 SH |
233 | /* Transmitter timeout */ |
234 | #define TX_TIMEOUT (2 * HZ) | |
1da177e4 | 235 | |
baa70a5c FL |
236 | #define FEC_PAUSE_FLAG_AUTONEG 0x1 |
237 | #define FEC_PAUSE_FLAG_ENABLE 0x2 | |
238 | ||
79f33912 NA |
239 | #define TSO_HEADER_SIZE 128 |
240 | /* Max number of allowed TCP segments for software TSO */ | |
241 | #define FEC_MAX_TSO_SEGS 100 | |
242 | #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) | |
243 | ||
244 | #define IS_TSO_HEADER(txq, addr) \ | |
245 | ((addr >= txq->tso_hdrs_dma) && \ | |
246 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) | |
247 | ||
e163cc97 LW |
248 | static int mii_cnt; |
249 | ||
36e24e2e | 250 | static inline |
4d494cdc FD |
251 | struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, |
252 | struct fec_enet_private *fep, | |
253 | int queue_id) | |
ff43da86 | 254 | { |
36e24e2e DFB |
255 | struct bufdesc *new_bd = bdp + 1; |
256 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; | |
4d494cdc FD |
257 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; |
258 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; | |
36e24e2e DFB |
259 | struct bufdesc_ex *ex_base; |
260 | struct bufdesc *base; | |
261 | int ring_size; | |
262 | ||
4d494cdc FD |
263 | if (bdp >= txq->tx_bd_base) { |
264 | base = txq->tx_bd_base; | |
265 | ring_size = txq->tx_ring_size; | |
266 | ex_base = (struct bufdesc_ex *)txq->tx_bd_base; | |
36e24e2e | 267 | } else { |
4d494cdc FD |
268 | base = rxq->rx_bd_base; |
269 | ring_size = rxq->rx_ring_size; | |
270 | ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; | |
36e24e2e DFB |
271 | } |
272 | ||
273 | if (fep->bufdesc_ex) | |
274 | return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? | |
275 | ex_base : ex_new_bd); | |
ff43da86 | 276 | else |
36e24e2e DFB |
277 | return (new_bd >= (base + ring_size)) ? |
278 | base : new_bd; | |
ff43da86 FL |
279 | } |
280 | ||
36e24e2e | 281 | static inline |
4d494cdc FD |
282 | struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, |
283 | struct fec_enet_private *fep, | |
284 | int queue_id) | |
ff43da86 | 285 | { |
36e24e2e DFB |
286 | struct bufdesc *new_bd = bdp - 1; |
287 | struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; | |
4d494cdc FD |
288 | struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; |
289 | struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; | |
36e24e2e DFB |
290 | struct bufdesc_ex *ex_base; |
291 | struct bufdesc *base; | |
292 | int ring_size; | |
293 | ||
4d494cdc FD |
294 | if (bdp >= txq->tx_bd_base) { |
295 | base = txq->tx_bd_base; | |
296 | ring_size = txq->tx_ring_size; | |
297 | ex_base = (struct bufdesc_ex *)txq->tx_bd_base; | |
36e24e2e | 298 | } else { |
4d494cdc FD |
299 | base = rxq->rx_bd_base; |
300 | ring_size = rxq->rx_ring_size; | |
301 | ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; | |
36e24e2e DFB |
302 | } |
303 | ||
304 | if (fep->bufdesc_ex) | |
305 | return (struct bufdesc *)((ex_new_bd < ex_base) ? | |
306 | (ex_new_bd + ring_size) : ex_new_bd); | |
ff43da86 | 307 | else |
36e24e2e | 308 | return (new_bd < base) ? (new_bd + ring_size) : new_bd; |
ff43da86 FL |
309 | } |
310 | ||
61a4427b NA |
311 | static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, |
312 | struct fec_enet_private *fep) | |
313 | { | |
314 | return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; | |
315 | } | |
316 | ||
4d494cdc FD |
317 | static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, |
318 | struct fec_enet_priv_tx_q *txq) | |
6e909283 NA |
319 | { |
320 | int entries; | |
321 | ||
4d494cdc FD |
322 | entries = ((const char *)txq->dirty_tx - |
323 | (const char *)txq->cur_tx) / fep->bufdesc_size - 1; | |
6e909283 | 324 | |
4d494cdc | 325 | return entries > 0 ? entries : entries + txq->tx_ring_size; |
6e909283 NA |
326 | } |
327 | ||
b5680e0b SG |
328 | static void *swap_buffer(void *bufaddr, int len) |
329 | { | |
330 | int i; | |
331 | unsigned int *buf = bufaddr; | |
332 | ||
ffed61e6 | 333 | for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) |
b5680e0b SG |
334 | *buf = cpu_to_be32(*buf); |
335 | ||
336 | return bufaddr; | |
337 | } | |
338 | ||
344756f6 RK |
339 | static void fec_dump(struct net_device *ndev) |
340 | { | |
341 | struct fec_enet_private *fep = netdev_priv(ndev); | |
4d494cdc FD |
342 | struct bufdesc *bdp; |
343 | struct fec_enet_priv_tx_q *txq; | |
344 | int index = 0; | |
344756f6 RK |
345 | |
346 | netdev_info(ndev, "TX ring dump\n"); | |
347 | pr_info("Nr SC addr len SKB\n"); | |
348 | ||
4d494cdc FD |
349 | txq = fep->tx_queue[0]; |
350 | bdp = txq->tx_bd_base; | |
351 | ||
344756f6 RK |
352 | do { |
353 | pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", | |
354 | index, | |
4d494cdc FD |
355 | bdp == txq->cur_tx ? 'S' : ' ', |
356 | bdp == txq->dirty_tx ? 'H' : ' ', | |
344756f6 | 357 | bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, |
4d494cdc FD |
358 | txq->tx_skbuff[index]); |
359 | bdp = fec_enet_get_nextdesc(bdp, fep, 0); | |
344756f6 | 360 | index++; |
4d494cdc | 361 | } while (bdp != txq->tx_bd_base); |
344756f6 RK |
362 | } |
363 | ||
62a02c98 FD |
364 | static inline bool is_ipv4_pkt(struct sk_buff *skb) |
365 | { | |
366 | return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; | |
367 | } | |
368 | ||
4c09eed9 JB |
369 | static int |
370 | fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) | |
371 | { | |
372 | /* Only run for packets requiring a checksum. */ | |
373 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
374 | return 0; | |
375 | ||
376 | if (unlikely(skb_cow_head(skb, 0))) | |
377 | return -1; | |
378 | ||
62a02c98 FD |
379 | if (is_ipv4_pkt(skb)) |
380 | ip_hdr(skb)->check = 0; | |
4c09eed9 JB |
381 | *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; |
382 | ||
383 | return 0; | |
384 | } | |
385 | ||
6e909283 | 386 | static int |
4d494cdc FD |
387 | fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, |
388 | struct sk_buff *skb, | |
389 | struct net_device *ndev) | |
1da177e4 | 390 | { |
c556167f | 391 | struct fec_enet_private *fep = netdev_priv(ndev); |
b5680e0b SG |
392 | const struct platform_device_id *id_entry = |
393 | platform_get_device_id(fep->pdev); | |
4d494cdc | 394 | struct bufdesc *bdp = txq->cur_tx; |
6e909283 NA |
395 | struct bufdesc_ex *ebdp; |
396 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
4d494cdc | 397 | unsigned short queue = skb_get_queue_mapping(skb); |
6e909283 NA |
398 | int frag, frag_len; |
399 | unsigned short status; | |
400 | unsigned int estatus = 0; | |
401 | skb_frag_t *this_frag; | |
de5fb0a0 | 402 | unsigned int index; |
6e909283 | 403 | void *bufaddr; |
d6bf3143 | 404 | dma_addr_t addr; |
6e909283 | 405 | int i; |
1da177e4 | 406 | |
6e909283 NA |
407 | for (frag = 0; frag < nr_frags; frag++) { |
408 | this_frag = &skb_shinfo(skb)->frags[frag]; | |
4d494cdc | 409 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
6e909283 NA |
410 | ebdp = (struct bufdesc_ex *)bdp; |
411 | ||
412 | status = bdp->cbd_sc; | |
413 | status &= ~BD_ENET_TX_STATS; | |
414 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | |
415 | frag_len = skb_shinfo(skb)->frags[frag].size; | |
416 | ||
417 | /* Handle the last BD specially */ | |
418 | if (frag == nr_frags - 1) { | |
419 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | |
420 | if (fep->bufdesc_ex) { | |
421 | estatus |= BD_ENET_TX_INT; | |
422 | if (unlikely(skb_shinfo(skb)->tx_flags & | |
423 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) | |
424 | estatus |= BD_ENET_TX_TS; | |
425 | } | |
426 | } | |
427 | ||
428 | if (fep->bufdesc_ex) { | |
429 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
430 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
431 | ebdp->cbd_bdu = 0; | |
432 | ebdp->cbd_esc = estatus; | |
433 | } | |
434 | ||
435 | bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; | |
436 | ||
4d494cdc | 437 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
41ef84ce | 438 | if (((unsigned long) bufaddr) & fep->tx_align || |
6e909283 | 439 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
4d494cdc FD |
440 | memcpy(txq->tx_bounce[index], bufaddr, frag_len); |
441 | bufaddr = txq->tx_bounce[index]; | |
6e909283 NA |
442 | |
443 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | |
444 | swap_buffer(bufaddr, frag_len); | |
445 | } | |
446 | ||
d6bf3143 RK |
447 | addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, |
448 | DMA_TO_DEVICE); | |
449 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | |
6e909283 NA |
450 | dev_kfree_skb_any(skb); |
451 | if (net_ratelimit()) | |
452 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
453 | goto dma_mapping_error; | |
454 | } | |
455 | ||
d6bf3143 | 456 | bdp->cbd_bufaddr = addr; |
6e909283 NA |
457 | bdp->cbd_datlen = frag_len; |
458 | bdp->cbd_sc = status; | |
459 | } | |
460 | ||
4d494cdc | 461 | txq->cur_tx = bdp; |
6e909283 NA |
462 | |
463 | return 0; | |
464 | ||
465 | dma_mapping_error: | |
4d494cdc | 466 | bdp = txq->cur_tx; |
6e909283 | 467 | for (i = 0; i < frag; i++) { |
4d494cdc | 468 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
6e909283 NA |
469 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, |
470 | bdp->cbd_datlen, DMA_TO_DEVICE); | |
471 | } | |
472 | return NETDEV_TX_OK; | |
473 | } | |
1da177e4 | 474 | |
4d494cdc FD |
475 | static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, |
476 | struct sk_buff *skb, struct net_device *ndev) | |
6e909283 NA |
477 | { |
478 | struct fec_enet_private *fep = netdev_priv(ndev); | |
479 | const struct platform_device_id *id_entry = | |
480 | platform_get_device_id(fep->pdev); | |
481 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
482 | struct bufdesc *bdp, *last_bdp; | |
483 | void *bufaddr; | |
d6bf3143 | 484 | dma_addr_t addr; |
6e909283 NA |
485 | unsigned short status; |
486 | unsigned short buflen; | |
4d494cdc | 487 | unsigned short queue; |
6e909283 NA |
488 | unsigned int estatus = 0; |
489 | unsigned int index; | |
79f33912 | 490 | int entries_free; |
6e909283 | 491 | int ret; |
22f6b860 | 492 | |
4d494cdc | 493 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
79f33912 NA |
494 | if (entries_free < MAX_SKB_FRAGS + 1) { |
495 | dev_kfree_skb_any(skb); | |
496 | if (net_ratelimit()) | |
497 | netdev_err(ndev, "NOT enough BD for SG!\n"); | |
498 | return NETDEV_TX_OK; | |
499 | } | |
500 | ||
4c09eed9 JB |
501 | /* Protocol checksum off-load for TCP and UDP. */ |
502 | if (fec_enet_clear_csum(skb, ndev)) { | |
8e7e6874 | 503 | dev_kfree_skb_any(skb); |
4c09eed9 JB |
504 | return NETDEV_TX_OK; |
505 | } | |
506 | ||
6e909283 | 507 | /* Fill in a Tx ring entry */ |
4d494cdc | 508 | bdp = txq->cur_tx; |
6e909283 | 509 | status = bdp->cbd_sc; |
0e702ab3 | 510 | status &= ~BD_ENET_TX_STATS; |
1da177e4 | 511 | |
22f6b860 | 512 | /* Set buffer length and buffer pointer */ |
9555b31e | 513 | bufaddr = skb->data; |
6e909283 | 514 | buflen = skb_headlen(skb); |
1da177e4 | 515 | |
4d494cdc FD |
516 | queue = skb_get_queue_mapping(skb); |
517 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); | |
41ef84ce | 518 | if (((unsigned long) bufaddr) & fep->tx_align || |
6e909283 | 519 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
4d494cdc FD |
520 | memcpy(txq->tx_bounce[index], skb->data, buflen); |
521 | bufaddr = txq->tx_bounce[index]; | |
1da177e4 | 522 | |
6e909283 NA |
523 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
524 | swap_buffer(bufaddr, buflen); | |
525 | } | |
6aa20a22 | 526 | |
d6bf3143 RK |
527 | /* Push the data cache so the CPM does not get stale memory data. */ |
528 | addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); | |
529 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | |
d842a31f DFB |
530 | dev_kfree_skb_any(skb); |
531 | if (net_ratelimit()) | |
532 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
533 | return NETDEV_TX_OK; | |
534 | } | |
1da177e4 | 535 | |
6e909283 | 536 | if (nr_frags) { |
4d494cdc | 537 | ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); |
6e909283 NA |
538 | if (ret) |
539 | return ret; | |
540 | } else { | |
541 | status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); | |
542 | if (fep->bufdesc_ex) { | |
543 | estatus = BD_ENET_TX_INT; | |
544 | if (unlikely(skb_shinfo(skb)->tx_flags & | |
545 | SKBTX_HW_TSTAMP && fep->hwts_tx_en)) | |
546 | estatus |= BD_ENET_TX_TS; | |
547 | } | |
548 | } | |
549 | ||
ff43da86 FL |
550 | if (fep->bufdesc_ex) { |
551 | ||
552 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
6e909283 | 553 | |
ff43da86 | 554 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && |
6e909283 | 555 | fep->hwts_tx_en)) |
6605b730 | 556 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
4c09eed9 | 557 | |
6e909283 NA |
558 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
559 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
560 | ||
561 | ebdp->cbd_bdu = 0; | |
562 | ebdp->cbd_esc = estatus; | |
6605b730 | 563 | } |
03191656 | 564 | |
4d494cdc FD |
565 | last_bdp = txq->cur_tx; |
566 | index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); | |
6e909283 | 567 | /* Save skb pointer */ |
4d494cdc | 568 | txq->tx_skbuff[index] = skb; |
6e909283 NA |
569 | |
570 | bdp->cbd_datlen = buflen; | |
d6bf3143 | 571 | bdp->cbd_bufaddr = addr; |
6e909283 | 572 | |
fb8ef788 DFB |
573 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
574 | * it's the last BD of the frame, and to put the CRC on the end. | |
575 | */ | |
6e909283 | 576 | status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); |
fb8ef788 DFB |
577 | bdp->cbd_sc = status; |
578 | ||
22f6b860 | 579 | /* If this was the last BD in the ring, start at the beginning again. */ |
4d494cdc | 580 | bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); |
1da177e4 | 581 | |
7a2a8451 ED |
582 | skb_tx_timestamp(skb); |
583 | ||
4d494cdc | 584 | txq->cur_tx = bdp; |
de5fb0a0 | 585 | |
de5fb0a0 | 586 | /* Trigger transmission start */ |
4d494cdc | 587 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); |
1da177e4 | 588 | |
6e909283 | 589 | return 0; |
1da177e4 LT |
590 | } |
591 | ||
79f33912 | 592 | static int |
4d494cdc FD |
593 | fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, |
594 | struct net_device *ndev, | |
595 | struct bufdesc *bdp, int index, char *data, | |
596 | int size, bool last_tcp, bool is_last) | |
61a4427b NA |
597 | { |
598 | struct fec_enet_private *fep = netdev_priv(ndev); | |
79f33912 NA |
599 | const struct platform_device_id *id_entry = |
600 | platform_get_device_id(fep->pdev); | |
61cd2ebb | 601 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); |
79f33912 NA |
602 | unsigned short status; |
603 | unsigned int estatus = 0; | |
d6bf3143 | 604 | dma_addr_t addr; |
61a4427b NA |
605 | |
606 | status = bdp->cbd_sc; | |
79f33912 | 607 | status &= ~BD_ENET_TX_STATS; |
61a4427b | 608 | |
79f33912 | 609 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); |
79f33912 | 610 | |
41ef84ce | 611 | if (((unsigned long) data) & fep->tx_align || |
79f33912 | 612 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
4d494cdc FD |
613 | memcpy(txq->tx_bounce[index], data, size); |
614 | data = txq->tx_bounce[index]; | |
79f33912 NA |
615 | |
616 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | |
617 | swap_buffer(data, size); | |
618 | } | |
619 | ||
d6bf3143 RK |
620 | addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); |
621 | if (dma_mapping_error(&fep->pdev->dev, addr)) { | |
79f33912 | 622 | dev_kfree_skb_any(skb); |
6e909283 | 623 | if (net_ratelimit()) |
79f33912 | 624 | netdev_err(ndev, "Tx DMA memory map failed\n"); |
61a4427b NA |
625 | return NETDEV_TX_BUSY; |
626 | } | |
627 | ||
d6bf3143 RK |
628 | bdp->cbd_datlen = size; |
629 | bdp->cbd_bufaddr = addr; | |
630 | ||
79f33912 NA |
631 | if (fep->bufdesc_ex) { |
632 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
633 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
634 | ebdp->cbd_bdu = 0; | |
635 | ebdp->cbd_esc = estatus; | |
636 | } | |
637 | ||
638 | /* Handle the last BD specially */ | |
639 | if (last_tcp) | |
640 | status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); | |
641 | if (is_last) { | |
642 | status |= BD_ENET_TX_INTR; | |
643 | if (fep->bufdesc_ex) | |
644 | ebdp->cbd_esc |= BD_ENET_TX_INT; | |
645 | } | |
646 | ||
647 | bdp->cbd_sc = status; | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
652 | static int | |
4d494cdc FD |
653 | fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, |
654 | struct sk_buff *skb, struct net_device *ndev, | |
655 | struct bufdesc *bdp, int index) | |
79f33912 NA |
656 | { |
657 | struct fec_enet_private *fep = netdev_priv(ndev); | |
658 | const struct platform_device_id *id_entry = | |
659 | platform_get_device_id(fep->pdev); | |
660 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
61cd2ebb | 661 | struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); |
79f33912 NA |
662 | void *bufaddr; |
663 | unsigned long dmabuf; | |
664 | unsigned short status; | |
665 | unsigned int estatus = 0; | |
666 | ||
667 | status = bdp->cbd_sc; | |
668 | status &= ~BD_ENET_TX_STATS; | |
669 | status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); | |
670 | ||
4d494cdc FD |
671 | bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
672 | dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; | |
41ef84ce | 673 | if (((unsigned long)bufaddr) & fep->tx_align || |
79f33912 | 674 | id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { |
4d494cdc FD |
675 | memcpy(txq->tx_bounce[index], skb->data, hdr_len); |
676 | bufaddr = txq->tx_bounce[index]; | |
79f33912 NA |
677 | |
678 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) | |
679 | swap_buffer(bufaddr, hdr_len); | |
680 | ||
681 | dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, | |
682 | hdr_len, DMA_TO_DEVICE); | |
683 | if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { | |
684 | dev_kfree_skb_any(skb); | |
685 | if (net_ratelimit()) | |
686 | netdev_err(ndev, "Tx DMA memory map failed\n"); | |
687 | return NETDEV_TX_BUSY; | |
688 | } | |
689 | } | |
690 | ||
691 | bdp->cbd_bufaddr = dmabuf; | |
692 | bdp->cbd_datlen = hdr_len; | |
693 | ||
694 | if (fep->bufdesc_ex) { | |
695 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
696 | estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; | |
697 | ebdp->cbd_bdu = 0; | |
698 | ebdp->cbd_esc = estatus; | |
699 | } | |
700 | ||
701 | bdp->cbd_sc = status; | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
4d494cdc FD |
706 | static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, |
707 | struct sk_buff *skb, | |
708 | struct net_device *ndev) | |
79f33912 NA |
709 | { |
710 | struct fec_enet_private *fep = netdev_priv(ndev); | |
711 | int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
712 | int total_len, data_left; | |
4d494cdc FD |
713 | struct bufdesc *bdp = txq->cur_tx; |
714 | unsigned short queue = skb_get_queue_mapping(skb); | |
79f33912 NA |
715 | struct tso_t tso; |
716 | unsigned int index = 0; | |
717 | int ret; | |
37d6017b FD |
718 | const struct platform_device_id *id_entry = |
719 | platform_get_device_id(fep->pdev); | |
79f33912 | 720 | |
4d494cdc | 721 | if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { |
79f33912 NA |
722 | dev_kfree_skb_any(skb); |
723 | if (net_ratelimit()) | |
724 | netdev_err(ndev, "NOT enough BD for TSO!\n"); | |
725 | return NETDEV_TX_OK; | |
726 | } | |
727 | ||
728 | /* Protocol checksum off-load for TCP and UDP. */ | |
729 | if (fec_enet_clear_csum(skb, ndev)) { | |
730 | dev_kfree_skb_any(skb); | |
731 | return NETDEV_TX_OK; | |
732 | } | |
733 | ||
734 | /* Initialize the TSO handler, and prepare the first payload */ | |
735 | tso_start(skb, &tso); | |
736 | ||
737 | total_len = skb->len - hdr_len; | |
738 | while (total_len > 0) { | |
739 | char *hdr; | |
740 | ||
4d494cdc | 741 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
79f33912 NA |
742 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
743 | total_len -= data_left; | |
744 | ||
745 | /* prepare packet headers: MAC + IP + TCP */ | |
4d494cdc | 746 | hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; |
79f33912 | 747 | tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); |
4d494cdc | 748 | ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); |
79f33912 NA |
749 | if (ret) |
750 | goto err_release; | |
751 | ||
752 | while (data_left > 0) { | |
753 | int size; | |
754 | ||
755 | size = min_t(int, tso.size, data_left); | |
4d494cdc FD |
756 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
757 | index = fec_enet_get_bd_index(txq->tx_bd_base, | |
758 | bdp, fep); | |
759 | ret = fec_enet_txq_put_data_tso(txq, skb, ndev, | |
760 | bdp, index, | |
761 | tso.data, size, | |
762 | size == data_left, | |
79f33912 NA |
763 | total_len == 0); |
764 | if (ret) | |
765 | goto err_release; | |
766 | ||
767 | data_left -= size; | |
768 | tso_build_data(skb, &tso, size); | |
769 | } | |
770 | ||
4d494cdc | 771 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
79f33912 NA |
772 | } |
773 | ||
774 | /* Save skb pointer */ | |
4d494cdc | 775 | txq->tx_skbuff[index] = skb; |
79f33912 | 776 | |
79f33912 | 777 | skb_tx_timestamp(skb); |
4d494cdc | 778 | txq->cur_tx = bdp; |
79f33912 NA |
779 | |
780 | /* Trigger transmission start */ | |
37d6017b FD |
781 | if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || |
782 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || | |
783 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || | |
784 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || | |
785 | !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) | |
786 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); | |
79f33912 NA |
787 | |
788 | return 0; | |
789 | ||
790 | err_release: | |
791 | /* TODO: Release all used data descriptors for TSO */ | |
792 | return ret; | |
793 | } | |
794 | ||
795 | static netdev_tx_t | |
796 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |
797 | { | |
798 | struct fec_enet_private *fep = netdev_priv(ndev); | |
799 | int entries_free; | |
4d494cdc FD |
800 | unsigned short queue; |
801 | struct fec_enet_priv_tx_q *txq; | |
802 | struct netdev_queue *nq; | |
79f33912 NA |
803 | int ret; |
804 | ||
4d494cdc FD |
805 | queue = skb_get_queue_mapping(skb); |
806 | txq = fep->tx_queue[queue]; | |
807 | nq = netdev_get_tx_queue(ndev, queue); | |
808 | ||
79f33912 | 809 | if (skb_is_gso(skb)) |
4d494cdc | 810 | ret = fec_enet_txq_submit_tso(txq, skb, ndev); |
79f33912 | 811 | else |
4d494cdc | 812 | ret = fec_enet_txq_submit_skb(txq, skb, ndev); |
6e909283 NA |
813 | if (ret) |
814 | return ret; | |
61a4427b | 815 | |
4d494cdc FD |
816 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
817 | if (entries_free <= txq->tx_stop_threshold) | |
818 | netif_tx_stop_queue(nq); | |
61a4427b NA |
819 | |
820 | return NETDEV_TX_OK; | |
821 | } | |
822 | ||
14109a59 FL |
823 | /* Init RX & TX buffer descriptors |
824 | */ | |
825 | static void fec_enet_bd_init(struct net_device *dev) | |
826 | { | |
827 | struct fec_enet_private *fep = netdev_priv(dev); | |
4d494cdc FD |
828 | struct fec_enet_priv_tx_q *txq; |
829 | struct fec_enet_priv_rx_q *rxq; | |
14109a59 FL |
830 | struct bufdesc *bdp; |
831 | unsigned int i; | |
59d0f746 | 832 | unsigned int q; |
14109a59 | 833 | |
59d0f746 FL |
834 | for (q = 0; q < fep->num_rx_queues; q++) { |
835 | /* Initialize the receive buffer descriptors. */ | |
836 | rxq = fep->rx_queue[q]; | |
837 | bdp = rxq->rx_bd_base; | |
4d494cdc | 838 | |
59d0f746 | 839 | for (i = 0; i < rxq->rx_ring_size; i++) { |
14109a59 | 840 | |
59d0f746 FL |
841 | /* Initialize the BD for every fragment in the page. */ |
842 | if (bdp->cbd_bufaddr) | |
843 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | |
844 | else | |
845 | bdp->cbd_sc = 0; | |
846 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | |
847 | } | |
848 | ||
849 | /* Set the last buffer to wrap */ | |
850 | bdp = fec_enet_get_prevdesc(bdp, fep, q); | |
851 | bdp->cbd_sc |= BD_SC_WRAP; | |
852 | ||
853 | rxq->cur_rx = rxq->rx_bd_base; | |
854 | } | |
855 | ||
856 | for (q = 0; q < fep->num_tx_queues; q++) { | |
857 | /* ...and the same for transmit */ | |
858 | txq = fep->tx_queue[q]; | |
859 | bdp = txq->tx_bd_base; | |
860 | txq->cur_tx = bdp; | |
861 | ||
862 | for (i = 0; i < txq->tx_ring_size; i++) { | |
863 | /* Initialize the BD for every fragment in the page. */ | |
14109a59 | 864 | bdp->cbd_sc = 0; |
59d0f746 FL |
865 | if (txq->tx_skbuff[i]) { |
866 | dev_kfree_skb_any(txq->tx_skbuff[i]); | |
867 | txq->tx_skbuff[i] = NULL; | |
868 | } | |
869 | bdp->cbd_bufaddr = 0; | |
870 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | |
871 | } | |
872 | ||
873 | /* Set the last buffer to wrap */ | |
874 | bdp = fec_enet_get_prevdesc(bdp, fep, q); | |
875 | bdp->cbd_sc |= BD_SC_WRAP; | |
876 | txq->dirty_tx = bdp; | |
14109a59 | 877 | } |
59d0f746 | 878 | } |
14109a59 | 879 | |
ce99d0d3 FL |
880 | static void fec_enet_active_rxring(struct net_device *ndev) |
881 | { | |
882 | struct fec_enet_private *fep = netdev_priv(ndev); | |
883 | int i; | |
884 | ||
885 | for (i = 0; i < fep->num_rx_queues; i++) | |
886 | writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); | |
887 | } | |
888 | ||
59d0f746 FL |
889 | static void fec_enet_enable_ring(struct net_device *ndev) |
890 | { | |
891 | struct fec_enet_private *fep = netdev_priv(ndev); | |
892 | struct fec_enet_priv_tx_q *txq; | |
893 | struct fec_enet_priv_rx_q *rxq; | |
894 | int i; | |
14109a59 | 895 | |
59d0f746 FL |
896 | for (i = 0; i < fep->num_rx_queues; i++) { |
897 | rxq = fep->rx_queue[i]; | |
898 | writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); | |
14109a59 | 899 | |
59d0f746 FL |
900 | /* enable DMA1/2 */ |
901 | if (i) | |
902 | writel(RCMR_MATCHEN | RCMR_CMP(i), | |
903 | fep->hwp + FEC_RCMR(i)); | |
904 | } | |
14109a59 | 905 | |
59d0f746 FL |
906 | for (i = 0; i < fep->num_tx_queues; i++) { |
907 | txq = fep->tx_queue[i]; | |
908 | writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); | |
909 | ||
910 | /* enable DMA1/2 */ | |
911 | if (i) | |
912 | writel(DMA_CLASS_EN | IDLE_SLOPE(i), | |
913 | fep->hwp + FEC_DMA_CFG(i)); | |
14109a59 | 914 | } |
59d0f746 | 915 | } |
14109a59 | 916 | |
59d0f746 FL |
917 | static void fec_enet_reset_skb(struct net_device *ndev) |
918 | { | |
919 | struct fec_enet_private *fep = netdev_priv(ndev); | |
920 | struct fec_enet_priv_tx_q *txq; | |
921 | int i, j; | |
922 | ||
923 | for (i = 0; i < fep->num_tx_queues; i++) { | |
924 | txq = fep->tx_queue[i]; | |
925 | ||
926 | for (j = 0; j < txq->tx_ring_size; j++) { | |
927 | if (txq->tx_skbuff[j]) { | |
928 | dev_kfree_skb_any(txq->tx_skbuff[j]); | |
929 | txq->tx_skbuff[j] = NULL; | |
930 | } | |
931 | } | |
932 | } | |
14109a59 FL |
933 | } |
934 | ||
dbc64a8e RK |
935 | /* |
936 | * This function is called to start or restart the FEC during a link | |
937 | * change, transmit timeout, or to reconfigure the FEC. The network | |
938 | * packet processing for this device must be stopped before this call. | |
45993653 | 939 | */ |
1da177e4 | 940 | static void |
ef83337d | 941 | fec_restart(struct net_device *ndev) |
1da177e4 | 942 | { |
c556167f | 943 | struct fec_enet_private *fep = netdev_priv(ndev); |
45993653 UKK |
944 | const struct platform_device_id *id_entry = |
945 | platform_get_device_id(fep->pdev); | |
4c09eed9 | 946 | u32 val; |
cd1f402c UKK |
947 | u32 temp_mac[2]; |
948 | u32 rcntl = OPT_FRAME_SIZE | 0x04; | |
230dec61 | 949 | u32 ecntl = 0x2; /* ETHEREN */ |
1da177e4 | 950 | |
106c314c FD |
951 | /* Whack a reset. We should wait for this. |
952 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC | |
953 | * instead of reset MAC itself. | |
954 | */ | |
955 | if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { | |
956 | writel(0, fep->hwp + FEC_ECNTRL); | |
957 | } else { | |
958 | writel(1, fep->hwp + FEC_ECNTRL); | |
959 | udelay(10); | |
960 | } | |
1da177e4 | 961 | |
45993653 UKK |
962 | /* |
963 | * enet-mac reset will reset mac address registers too, | |
964 | * so need to reconfigure it. | |
965 | */ | |
966 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | |
967 | memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); | |
968 | writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); | |
969 | writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); | |
970 | } | |
1da177e4 | 971 | |
45993653 UKK |
972 | /* Clear any outstanding interrupt. */ |
973 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | |
1da177e4 | 974 | |
45993653 UKK |
975 | /* Set maximum receive buffer size. */ |
976 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | |
1da177e4 | 977 | |
14109a59 FL |
978 | fec_enet_bd_init(ndev); |
979 | ||
59d0f746 | 980 | fec_enet_enable_ring(ndev); |
45993653 | 981 | |
59d0f746 FL |
982 | /* Reset tx SKB buffers. */ |
983 | fec_enet_reset_skb(ndev); | |
97b72e43 | 984 | |
45993653 | 985 | /* Enable MII mode */ |
ef83337d | 986 | if (fep->full_duplex == DUPLEX_FULL) { |
cd1f402c | 987 | /* FD enable */ |
45993653 UKK |
988 | writel(0x04, fep->hwp + FEC_X_CNTRL); |
989 | } else { | |
cd1f402c UKK |
990 | /* No Rcv on Xmit */ |
991 | rcntl |= 0x02; | |
45993653 UKK |
992 | writel(0x0, fep->hwp + FEC_X_CNTRL); |
993 | } | |
cd1f402c | 994 | |
45993653 UKK |
995 | /* Set MII speed */ |
996 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | |
997 | ||
d1391930 | 998 | #if !defined(CONFIG_M5272) |
4c09eed9 JB |
999 | /* set RX checksum */ |
1000 | val = readl(fep->hwp + FEC_RACC); | |
1001 | if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) | |
1002 | val |= FEC_RACC_OPTIONS; | |
1003 | else | |
1004 | val &= ~FEC_RACC_OPTIONS; | |
1005 | writel(val, fep->hwp + FEC_RACC); | |
d1391930 | 1006 | #endif |
4c09eed9 | 1007 | |
45993653 UKK |
1008 | /* |
1009 | * The phy interface and speed need to get configured | |
1010 | * differently on enet-mac. | |
1011 | */ | |
1012 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { | |
cd1f402c UKK |
1013 | /* Enable flow control and length check */ |
1014 | rcntl |= 0x40000000 | 0x00000020; | |
45993653 | 1015 | |
230dec61 SG |
1016 | /* RGMII, RMII or MII */ |
1017 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) | |
1018 | rcntl |= (1 << 6); | |
1019 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) | |
cd1f402c | 1020 | rcntl |= (1 << 8); |
45993653 | 1021 | else |
cd1f402c | 1022 | rcntl &= ~(1 << 8); |
45993653 | 1023 | |
230dec61 SG |
1024 | /* 1G, 100M or 10M */ |
1025 | if (fep->phy_dev) { | |
1026 | if (fep->phy_dev->speed == SPEED_1000) | |
1027 | ecntl |= (1 << 5); | |
1028 | else if (fep->phy_dev->speed == SPEED_100) | |
1029 | rcntl &= ~(1 << 9); | |
1030 | else | |
1031 | rcntl |= (1 << 9); | |
1032 | } | |
45993653 UKK |
1033 | } else { |
1034 | #ifdef FEC_MIIGSK_ENR | |
0ca1e290 | 1035 | if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { |
8d82f219 | 1036 | u32 cfgr; |
45993653 UKK |
1037 | /* disable the gasket and wait */ |
1038 | writel(0, fep->hwp + FEC_MIIGSK_ENR); | |
1039 | while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) | |
1040 | udelay(1); | |
1041 | ||
1042 | /* | |
1043 | * configure the gasket: | |
1044 | * RMII, 50 MHz, no loopback, no echo | |
0ca1e290 | 1045 | * MII, 25 MHz, no loopback, no echo |
45993653 | 1046 | */ |
8d82f219 EB |
1047 | cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
1048 | ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; | |
1049 | if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) | |
1050 | cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; | |
1051 | writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); | |
45993653 UKK |
1052 | |
1053 | /* re-enable the gasket */ | |
1054 | writel(2, fep->hwp + FEC_MIIGSK_ENR); | |
97b72e43 | 1055 | } |
45993653 UKK |
1056 | #endif |
1057 | } | |
baa70a5c | 1058 | |
d1391930 | 1059 | #if !defined(CONFIG_M5272) |
baa70a5c FL |
1060 | /* enable pause frame*/ |
1061 | if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || | |
1062 | ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && | |
1063 | fep->phy_dev && fep->phy_dev->pause)) { | |
1064 | rcntl |= FEC_ENET_FCE; | |
1065 | ||
4c09eed9 | 1066 | /* set FIFO threshold parameter to reduce overrun */ |
baa70a5c FL |
1067 | writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); |
1068 | writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); | |
1069 | writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); | |
1070 | writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); | |
1071 | ||
1072 | /* OPD */ | |
1073 | writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); | |
1074 | } else { | |
1075 | rcntl &= ~FEC_ENET_FCE; | |
1076 | } | |
d1391930 | 1077 | #endif /* !defined(CONFIG_M5272) */ |
baa70a5c | 1078 | |
cd1f402c | 1079 | writel(rcntl, fep->hwp + FEC_R_CNTRL); |
3b2b74ca | 1080 | |
84fe6182 SW |
1081 | /* Setup multicast filter. */ |
1082 | set_multicast_list(ndev); | |
1083 | #ifndef CONFIG_M5272 | |
1084 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | |
1085 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | |
1086 | #endif | |
1087 | ||
230dec61 SG |
1088 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { |
1089 | /* enable ENET endian swap */ | |
1090 | ecntl |= (1 << 8); | |
1091 | /* enable ENET store and forward mode */ | |
1092 | writel(1 << 8, fep->hwp + FEC_X_WMRK); | |
1093 | } | |
1094 | ||
ff43da86 FL |
1095 | if (fep->bufdesc_ex) |
1096 | ecntl |= (1 << 4); | |
6605b730 | 1097 | |
38ae92dc | 1098 | #ifndef CONFIG_M5272 |
b9eef55c JB |
1099 | /* Enable the MIB statistic event counters */ |
1100 | writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); | |
38ae92dc CH |
1101 | #endif |
1102 | ||
45993653 | 1103 | /* And last, enable the transmit and receive processing */ |
230dec61 | 1104 | writel(ecntl, fep->hwp + FEC_ECNTRL); |
ce99d0d3 | 1105 | fec_enet_active_rxring(ndev); |
45993653 | 1106 | |
ff43da86 FL |
1107 | if (fep->bufdesc_ex) |
1108 | fec_ptp_start_cyclecounter(ndev); | |
1109 | ||
45993653 UKK |
1110 | /* Enable interrupts we wish to service */ |
1111 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | |
d851b47b FD |
1112 | |
1113 | /* Init the interrupt coalescing */ | |
1114 | fec_enet_itr_coal_init(ndev); | |
1115 | ||
45993653 UKK |
1116 | } |
1117 | ||
1118 | static void | |
1119 | fec_stop(struct net_device *ndev) | |
1120 | { | |
1121 | struct fec_enet_private *fep = netdev_priv(ndev); | |
230dec61 SG |
1122 | const struct platform_device_id *id_entry = |
1123 | platform_get_device_id(fep->pdev); | |
42431dc2 | 1124 | u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); |
45993653 UKK |
1125 | |
1126 | /* We cannot expect a graceful transmit stop without link !!! */ | |
1127 | if (fep->link) { | |
1128 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ | |
1129 | udelay(10); | |
1130 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) | |
31b7720c | 1131 | netdev_err(ndev, "Graceful transmit stop did not complete!\n"); |
45993653 UKK |
1132 | } |
1133 | ||
106c314c FD |
1134 | /* Whack a reset. We should wait for this. |
1135 | * For i.MX6SX SOC, enet use AXI bus, we use disable MAC | |
1136 | * instead of reset MAC itself. | |
1137 | */ | |
1138 | if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { | |
1139 | writel(0, fep->hwp + FEC_ECNTRL); | |
1140 | } else { | |
1141 | writel(1, fep->hwp + FEC_ECNTRL); | |
1142 | udelay(10); | |
1143 | } | |
45993653 UKK |
1144 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1145 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | |
230dec61 SG |
1146 | |
1147 | /* We have to keep ENET enabled to have MII interrupt stay working */ | |
42431dc2 | 1148 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { |
230dec61 | 1149 | writel(2, fep->hwp + FEC_ECNTRL); |
42431dc2 LW |
1150 | writel(rmii_mode, fep->hwp + FEC_R_CNTRL); |
1151 | } | |
1da177e4 LT |
1152 | } |
1153 | ||
1154 | ||
45993653 UKK |
1155 | static void |
1156 | fec_timeout(struct net_device *ndev) | |
1157 | { | |
1158 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1159 | ||
344756f6 RK |
1160 | fec_dump(ndev); |
1161 | ||
45993653 UKK |
1162 | ndev->stats.tx_errors++; |
1163 | ||
36cdc743 | 1164 | schedule_work(&fep->tx_timeout_work); |
54309fa6 FL |
1165 | } |
1166 | ||
36cdc743 | 1167 | static void fec_enet_timeout_work(struct work_struct *work) |
54309fa6 FL |
1168 | { |
1169 | struct fec_enet_private *fep = | |
36cdc743 | 1170 | container_of(work, struct fec_enet_private, tx_timeout_work); |
8ce5624f | 1171 | struct net_device *ndev = fep->netdev; |
54309fa6 | 1172 | |
36cdc743 RK |
1173 | rtnl_lock(); |
1174 | if (netif_device_present(ndev) || netif_running(ndev)) { | |
1175 | napi_disable(&fep->napi); | |
1176 | netif_tx_lock_bh(ndev); | |
1177 | fec_restart(ndev); | |
1178 | netif_wake_queue(ndev); | |
1179 | netif_tx_unlock_bh(ndev); | |
1180 | napi_enable(&fep->napi); | |
54309fa6 | 1181 | } |
36cdc743 | 1182 | rtnl_unlock(); |
45993653 UKK |
1183 | } |
1184 | ||
bfd4ecdd RK |
1185 | static void |
1186 | fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, | |
1187 | struct skb_shared_hwtstamps *hwtstamps) | |
1188 | { | |
1189 | unsigned long flags; | |
1190 | u64 ns; | |
1191 | ||
1192 | spin_lock_irqsave(&fep->tmreg_lock, flags); | |
1193 | ns = timecounter_cyc2time(&fep->tc, ts); | |
1194 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | |
1195 | ||
1196 | memset(hwtstamps, 0, sizeof(*hwtstamps)); | |
1197 | hwtstamps->hwtstamp = ns_to_ktime(ns); | |
1198 | } | |
1199 | ||
1da177e4 | 1200 | static void |
4d494cdc | 1201 | fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) |
1da177e4 LT |
1202 | { |
1203 | struct fec_enet_private *fep; | |
2e28532f | 1204 | struct bufdesc *bdp; |
0e702ab3 | 1205 | unsigned short status; |
1da177e4 | 1206 | struct sk_buff *skb; |
4d494cdc FD |
1207 | struct fec_enet_priv_tx_q *txq; |
1208 | struct netdev_queue *nq; | |
de5fb0a0 | 1209 | int index = 0; |
79f33912 | 1210 | int entries_free; |
1da177e4 | 1211 | |
c556167f | 1212 | fep = netdev_priv(ndev); |
4d494cdc FD |
1213 | |
1214 | queue_id = FEC_ENET_GET_QUQUE(queue_id); | |
1215 | ||
1216 | txq = fep->tx_queue[queue_id]; | |
1217 | /* get next bdp of dirty_tx */ | |
1218 | nq = netdev_get_tx_queue(ndev, queue_id); | |
1219 | bdp = txq->dirty_tx; | |
1da177e4 | 1220 | |
de5fb0a0 | 1221 | /* get next bdp of dirty_tx */ |
4d494cdc | 1222 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
de5fb0a0 | 1223 | |
0e702ab3 | 1224 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
de5fb0a0 FL |
1225 | |
1226 | /* current queue is empty */ | |
4d494cdc | 1227 | if (bdp == txq->cur_tx) |
f0b3fbea SH |
1228 | break; |
1229 | ||
4d494cdc | 1230 | index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); |
de5fb0a0 | 1231 | |
4d494cdc FD |
1232 | skb = txq->tx_skbuff[index]; |
1233 | txq->tx_skbuff[index] = NULL; | |
1234 | if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) | |
79f33912 NA |
1235 | dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, |
1236 | bdp->cbd_datlen, DMA_TO_DEVICE); | |
2488a54e | 1237 | bdp->cbd_bufaddr = 0; |
6e909283 | 1238 | if (!skb) { |
4d494cdc | 1239 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
6e909283 NA |
1240 | continue; |
1241 | } | |
de5fb0a0 | 1242 | |
1da177e4 | 1243 | /* Check for errors. */ |
0e702ab3 | 1244 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
1da177e4 LT |
1245 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
1246 | BD_ENET_TX_CSL)) { | |
c556167f | 1247 | ndev->stats.tx_errors++; |
0e702ab3 | 1248 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
c556167f | 1249 | ndev->stats.tx_heartbeat_errors++; |
0e702ab3 | 1250 | if (status & BD_ENET_TX_LC) /* Late collision */ |
c556167f | 1251 | ndev->stats.tx_window_errors++; |
0e702ab3 | 1252 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
c556167f | 1253 | ndev->stats.tx_aborted_errors++; |
0e702ab3 | 1254 | if (status & BD_ENET_TX_UN) /* Underrun */ |
c556167f | 1255 | ndev->stats.tx_fifo_errors++; |
0e702ab3 | 1256 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
c556167f | 1257 | ndev->stats.tx_carrier_errors++; |
1da177e4 | 1258 | } else { |
c556167f | 1259 | ndev->stats.tx_packets++; |
6e909283 | 1260 | ndev->stats.tx_bytes += skb->len; |
1da177e4 LT |
1261 | } |
1262 | ||
ff43da86 FL |
1263 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && |
1264 | fep->bufdesc_ex) { | |
6605b730 | 1265 | struct skb_shared_hwtstamps shhwtstamps; |
ff43da86 | 1266 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; |
6605b730 | 1267 | |
bfd4ecdd | 1268 | fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); |
6605b730 FL |
1269 | skb_tstamp_tx(skb, &shhwtstamps); |
1270 | } | |
ff43da86 | 1271 | |
1da177e4 LT |
1272 | /* Deferred means some collisions occurred during transmit, |
1273 | * but we eventually sent the packet OK. | |
1274 | */ | |
0e702ab3 | 1275 | if (status & BD_ENET_TX_DEF) |
c556167f | 1276 | ndev->stats.collisions++; |
6aa20a22 | 1277 | |
22f6b860 | 1278 | /* Free the sk buffer associated with this last transmit */ |
1da177e4 | 1279 | dev_kfree_skb_any(skb); |
de5fb0a0 | 1280 | |
4d494cdc | 1281 | txq->dirty_tx = bdp; |
6aa20a22 | 1282 | |
22f6b860 | 1283 | /* Update pointer to next buffer descriptor to be transmitted */ |
4d494cdc | 1284 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
6aa20a22 | 1285 | |
22f6b860 | 1286 | /* Since we have freed up a buffer, the ring is no longer full |
1da177e4 | 1287 | */ |
79f33912 | 1288 | if (netif_queue_stopped(ndev)) { |
4d494cdc FD |
1289 | entries_free = fec_enet_get_free_txdesc_num(fep, txq); |
1290 | if (entries_free >= txq->tx_wake_threshold) | |
1291 | netif_tx_wake_queue(nq); | |
79f33912 | 1292 | } |
1da177e4 | 1293 | } |
ccea2968 RK |
1294 | |
1295 | /* ERR006538: Keep the transmitter going */ | |
4d494cdc FD |
1296 | if (bdp != txq->cur_tx && |
1297 | readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) | |
1298 | writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); | |
1299 | } | |
1300 | ||
1301 | static void | |
1302 | fec_enet_tx(struct net_device *ndev) | |
1303 | { | |
1304 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1305 | u16 queue_id; | |
1306 | /* First process class A queue, then Class B and Best Effort queue */ | |
1307 | for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { | |
1308 | clear_bit(queue_id, &fep->work_tx); | |
1309 | fec_enet_tx_queue(ndev, queue_id); | |
1310 | } | |
1311 | return; | |
1da177e4 LT |
1312 | } |
1313 | ||
1da177e4 LT |
1314 | /* During a receive, the cur_rx points to the current incoming buffer. |
1315 | * When we update through the ring, if the next incoming buffer has | |
1316 | * not been given to the system, we just set the empty indicator, | |
1317 | * effectively tossing the packet. | |
1318 | */ | |
dc975382 | 1319 | static int |
4d494cdc | 1320 | fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) |
1da177e4 | 1321 | { |
c556167f | 1322 | struct fec_enet_private *fep = netdev_priv(ndev); |
b5680e0b SG |
1323 | const struct platform_device_id *id_entry = |
1324 | platform_get_device_id(fep->pdev); | |
4d494cdc | 1325 | struct fec_enet_priv_rx_q *rxq; |
2e28532f | 1326 | struct bufdesc *bdp; |
0e702ab3 | 1327 | unsigned short status; |
1da177e4 LT |
1328 | struct sk_buff *skb; |
1329 | ushort pkt_len; | |
1330 | __u8 *data; | |
dc975382 | 1331 | int pkt_received = 0; |
cdffcf1b JB |
1332 | struct bufdesc_ex *ebdp = NULL; |
1333 | bool vlan_packet_rcvd = false; | |
1334 | u16 vlan_tag; | |
d842a31f | 1335 | int index = 0; |
6aa20a22 | 1336 | |
0e702ab3 GU |
1337 | #ifdef CONFIG_M532x |
1338 | flush_cache_all(); | |
6aa20a22 | 1339 | #endif |
4d494cdc FD |
1340 | queue_id = FEC_ENET_GET_QUQUE(queue_id); |
1341 | rxq = fep->rx_queue[queue_id]; | |
1da177e4 | 1342 | |
1da177e4 LT |
1343 | /* First, grab all of the stats for the incoming packet. |
1344 | * These get messed up if we get called due to a busy condition. | |
1345 | */ | |
4d494cdc | 1346 | bdp = rxq->cur_rx; |
1da177e4 | 1347 | |
22f6b860 | 1348 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
1da177e4 | 1349 | |
dc975382 FL |
1350 | if (pkt_received >= budget) |
1351 | break; | |
1352 | pkt_received++; | |
1353 | ||
22f6b860 SH |
1354 | /* Since we have allocated space to hold a complete frame, |
1355 | * the last indicator should be set. | |
1356 | */ | |
1357 | if ((status & BD_ENET_RX_LAST) == 0) | |
31b7720c | 1358 | netdev_err(ndev, "rcv is not +last\n"); |
1da177e4 | 1359 | |
db3421c1 | 1360 | |
22f6b860 SH |
1361 | /* Check for errors. */ |
1362 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | |
1da177e4 | 1363 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
c556167f | 1364 | ndev->stats.rx_errors++; |
22f6b860 SH |
1365 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
1366 | /* Frame too long or too short. */ | |
c556167f | 1367 | ndev->stats.rx_length_errors++; |
22f6b860 SH |
1368 | } |
1369 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | |
c556167f | 1370 | ndev->stats.rx_frame_errors++; |
22f6b860 | 1371 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
c556167f | 1372 | ndev->stats.rx_crc_errors++; |
22f6b860 | 1373 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ |
c556167f | 1374 | ndev->stats.rx_fifo_errors++; |
1da177e4 | 1375 | } |
1da177e4 | 1376 | |
22f6b860 SH |
1377 | /* Report late collisions as a frame error. |
1378 | * On this error, the BD is closed, but we don't know what we | |
1379 | * have in the buffer. So, just drop this frame on the floor. | |
1380 | */ | |
1381 | if (status & BD_ENET_RX_CL) { | |
c556167f UKK |
1382 | ndev->stats.rx_errors++; |
1383 | ndev->stats.rx_frame_errors++; | |
22f6b860 SH |
1384 | goto rx_processing_done; |
1385 | } | |
1da177e4 | 1386 | |
22f6b860 | 1387 | /* Process the incoming frame. */ |
c556167f | 1388 | ndev->stats.rx_packets++; |
22f6b860 | 1389 | pkt_len = bdp->cbd_datlen; |
c556167f | 1390 | ndev->stats.rx_bytes += pkt_len; |
1da177e4 | 1391 | |
4d494cdc FD |
1392 | index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); |
1393 | data = rxq->rx_skbuff[index]->data; | |
d842a31f DFB |
1394 | dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, |
1395 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | |
ccdc4f19 | 1396 | |
b5680e0b SG |
1397 | if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) |
1398 | swap_buffer(data, pkt_len); | |
1399 | ||
cdffcf1b JB |
1400 | /* Extract the enhanced buffer descriptor */ |
1401 | ebdp = NULL; | |
1402 | if (fep->bufdesc_ex) | |
1403 | ebdp = (struct bufdesc_ex *)bdp; | |
1404 | ||
1405 | /* If this is a VLAN packet remove the VLAN Tag */ | |
1406 | vlan_packet_rcvd = false; | |
1407 | if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && | |
4d494cdc | 1408 | fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { |
cdffcf1b JB |
1409 | /* Push and remove the vlan tag */ |
1410 | struct vlan_hdr *vlan_header = | |
1411 | (struct vlan_hdr *) (data + ETH_HLEN); | |
1412 | vlan_tag = ntohs(vlan_header->h_vlan_TCI); | |
1413 | pkt_len -= VLAN_HLEN; | |
1414 | ||
1415 | vlan_packet_rcvd = true; | |
1416 | } | |
1417 | ||
22f6b860 SH |
1418 | /* This does 16 byte alignment, exactly what we need. |
1419 | * The packet length includes FCS, but we don't want to | |
1420 | * include that when passing upstream as it messes up | |
1421 | * bridging applications. | |
1422 | */ | |
b72061a3 | 1423 | skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); |
1da177e4 | 1424 | |
8549889c | 1425 | if (unlikely(!skb)) { |
c556167f | 1426 | ndev->stats.rx_dropped++; |
22f6b860 | 1427 | } else { |
cdffcf1b | 1428 | int payload_offset = (2 * ETH_ALEN); |
8549889c | 1429 | skb_reserve(skb, NET_IP_ALIGN); |
22f6b860 | 1430 | skb_put(skb, pkt_len - 4); /* Make room */ |
cdffcf1b JB |
1431 | |
1432 | /* Extract the frame data without the VLAN header. */ | |
1433 | skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); | |
1434 | if (vlan_packet_rcvd) | |
1435 | payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; | |
b749fc9b | 1436 | skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), |
cdffcf1b JB |
1437 | data + payload_offset, |
1438 | pkt_len - 4 - (2 * ETH_ALEN)); | |
1439 | ||
c556167f | 1440 | skb->protocol = eth_type_trans(skb, ndev); |
ff43da86 | 1441 | |
6605b730 | 1442 | /* Get receive timestamp from the skb */ |
bfd4ecdd RK |
1443 | if (fep->hwts_rx_en && fep->bufdesc_ex) |
1444 | fec_enet_hwtstamp(fep, ebdp->ts, | |
1445 | skb_hwtstamps(skb)); | |
ff43da86 | 1446 | |
4c09eed9 | 1447 | if (fep->bufdesc_ex && |
cdffcf1b | 1448 | (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { |
4c09eed9 JB |
1449 | if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { |
1450 | /* don't check it */ | |
1451 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1452 | } else { | |
1453 | skb_checksum_none_assert(skb); | |
1454 | } | |
1455 | } | |
1456 | ||
cdffcf1b JB |
1457 | /* Handle received VLAN packets */ |
1458 | if (vlan_packet_rcvd) | |
1459 | __vlan_hwaccel_put_tag(skb, | |
1460 | htons(ETH_P_8021Q), | |
1461 | vlan_tag); | |
1462 | ||
0affdf34 | 1463 | napi_gro_receive(&fep->napi, skb); |
22f6b860 | 1464 | } |
f0b3fbea | 1465 | |
d842a31f DFB |
1466 | dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, |
1467 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | |
22f6b860 SH |
1468 | rx_processing_done: |
1469 | /* Clear the status flags for this buffer */ | |
1470 | status &= ~BD_ENET_RX_STATS; | |
1da177e4 | 1471 | |
22f6b860 SH |
1472 | /* Mark the buffer empty */ |
1473 | status |= BD_ENET_RX_EMPTY; | |
1474 | bdp->cbd_sc = status; | |
6aa20a22 | 1475 | |
ff43da86 FL |
1476 | if (fep->bufdesc_ex) { |
1477 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
1478 | ||
1479 | ebdp->cbd_esc = BD_ENET_RX_INT; | |
1480 | ebdp->cbd_prot = 0; | |
1481 | ebdp->cbd_bdu = 0; | |
1482 | } | |
6605b730 | 1483 | |
22f6b860 | 1484 | /* Update BD pointer to next entry */ |
4d494cdc | 1485 | bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); |
36e24e2e | 1486 | |
22f6b860 SH |
1487 | /* Doing this here will keep the FEC running while we process |
1488 | * incoming frames. On a heavily loaded network, we should be | |
1489 | * able to keep up at the expense of system resources. | |
1490 | */ | |
4d494cdc | 1491 | writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); |
22f6b860 | 1492 | } |
4d494cdc FD |
1493 | rxq->cur_rx = bdp; |
1494 | return pkt_received; | |
1495 | } | |
1da177e4 | 1496 | |
4d494cdc FD |
1497 | static int |
1498 | fec_enet_rx(struct net_device *ndev, int budget) | |
1499 | { | |
1500 | int pkt_received = 0; | |
1501 | u16 queue_id; | |
1502 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1503 | ||
1504 | for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { | |
1505 | clear_bit(queue_id, &fep->work_rx); | |
1506 | pkt_received += fec_enet_rx_queue(ndev, | |
1507 | budget - pkt_received, queue_id); | |
1508 | } | |
dc975382 | 1509 | return pkt_received; |
1da177e4 LT |
1510 | } |
1511 | ||
4d494cdc FD |
1512 | static bool |
1513 | fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) | |
1514 | { | |
1515 | if (int_events == 0) | |
1516 | return false; | |
1517 | ||
1518 | if (int_events & FEC_ENET_RXF) | |
1519 | fep->work_rx |= (1 << 2); | |
ce99d0d3 FL |
1520 | if (int_events & FEC_ENET_RXF_1) |
1521 | fep->work_rx |= (1 << 0); | |
1522 | if (int_events & FEC_ENET_RXF_2) | |
1523 | fep->work_rx |= (1 << 1); | |
4d494cdc FD |
1524 | |
1525 | if (int_events & FEC_ENET_TXF) | |
1526 | fep->work_tx |= (1 << 2); | |
ce99d0d3 FL |
1527 | if (int_events & FEC_ENET_TXF_1) |
1528 | fep->work_tx |= (1 << 0); | |
1529 | if (int_events & FEC_ENET_TXF_2) | |
1530 | fep->work_tx |= (1 << 1); | |
4d494cdc FD |
1531 | |
1532 | return true; | |
1533 | } | |
1534 | ||
45993653 UKK |
1535 | static irqreturn_t |
1536 | fec_enet_interrupt(int irq, void *dev_id) | |
1537 | { | |
1538 | struct net_device *ndev = dev_id; | |
1539 | struct fec_enet_private *fep = netdev_priv(ndev); | |
7a16807c | 1540 | const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF; |
45993653 UKK |
1541 | uint int_events; |
1542 | irqreturn_t ret = IRQ_NONE; | |
1543 | ||
7a16807c RK |
1544 | int_events = readl(fep->hwp + FEC_IEVENT); |
1545 | writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); | |
4d494cdc | 1546 | fec_enet_collect_events(fep, int_events); |
45993653 | 1547 | |
7a16807c RK |
1548 | if (int_events & napi_mask) { |
1549 | ret = IRQ_HANDLED; | |
dc975382 | 1550 | |
7a16807c RK |
1551 | /* Disable the NAPI interrupts */ |
1552 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); | |
1553 | napi_schedule(&fep->napi); | |
1554 | } | |
45993653 | 1555 | |
7a16807c RK |
1556 | if (int_events & FEC_ENET_MII) { |
1557 | ret = IRQ_HANDLED; | |
1558 | complete(&fep->mdio_done); | |
1559 | } | |
45993653 UKK |
1560 | |
1561 | return ret; | |
1562 | } | |
1563 | ||
dc975382 FL |
1564 | static int fec_enet_rx_napi(struct napi_struct *napi, int budget) |
1565 | { | |
1566 | struct net_device *ndev = napi->dev; | |
dc975382 | 1567 | struct fec_enet_private *fep = netdev_priv(ndev); |
7a16807c RK |
1568 | int pkts; |
1569 | ||
1570 | /* | |
1571 | * Clear any pending transmit or receive interrupts before | |
1572 | * processing the rings to avoid racing with the hardware. | |
1573 | */ | |
1574 | writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT); | |
1575 | ||
1576 | pkts = fec_enet_rx(ndev, budget); | |
45993653 | 1577 | |
de5fb0a0 FL |
1578 | fec_enet_tx(ndev); |
1579 | ||
dc975382 FL |
1580 | if (pkts < budget) { |
1581 | napi_complete(napi); | |
1582 | writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); | |
1583 | } | |
1584 | return pkts; | |
1585 | } | |
45993653 | 1586 | |
e6b043d5 | 1587 | /* ------------------------------------------------------------------------- */ |
0c7768a0 | 1588 | static void fec_get_mac(struct net_device *ndev) |
1da177e4 | 1589 | { |
c556167f | 1590 | struct fec_enet_private *fep = netdev_priv(ndev); |
94660ba0 | 1591 | struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); |
e6b043d5 | 1592 | unsigned char *iap, tmpaddr[ETH_ALEN]; |
1da177e4 | 1593 | |
49da97dc SG |
1594 | /* |
1595 | * try to get mac address in following order: | |
1596 | * | |
1597 | * 1) module parameter via kernel command line in form | |
1598 | * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 | |
1599 | */ | |
1600 | iap = macaddr; | |
1601 | ||
ca2cc333 SG |
1602 | /* |
1603 | * 2) from device tree data | |
1604 | */ | |
1605 | if (!is_valid_ether_addr(iap)) { | |
1606 | struct device_node *np = fep->pdev->dev.of_node; | |
1607 | if (np) { | |
1608 | const char *mac = of_get_mac_address(np); | |
1609 | if (mac) | |
1610 | iap = (unsigned char *) mac; | |
1611 | } | |
1612 | } | |
ca2cc333 | 1613 | |
49da97dc | 1614 | /* |
ca2cc333 | 1615 | * 3) from flash or fuse (via platform data) |
49da97dc SG |
1616 | */ |
1617 | if (!is_valid_ether_addr(iap)) { | |
1618 | #ifdef CONFIG_M5272 | |
1619 | if (FEC_FLASHMAC) | |
1620 | iap = (unsigned char *)FEC_FLASHMAC; | |
1621 | #else | |
1622 | if (pdata) | |
589efdc7 | 1623 | iap = (unsigned char *)&pdata->mac; |
49da97dc SG |
1624 | #endif |
1625 | } | |
1626 | ||
1627 | /* | |
ca2cc333 | 1628 | * 4) FEC mac registers set by bootloader |
49da97dc SG |
1629 | */ |
1630 | if (!is_valid_ether_addr(iap)) { | |
7d7628f3 DC |
1631 | *((__be32 *) &tmpaddr[0]) = |
1632 | cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); | |
1633 | *((__be16 *) &tmpaddr[4]) = | |
1634 | cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | |
e6b043d5 | 1635 | iap = &tmpaddr[0]; |
1da177e4 LT |
1636 | } |
1637 | ||
ff5b2fab LS |
1638 | /* |
1639 | * 5) random mac address | |
1640 | */ | |
1641 | if (!is_valid_ether_addr(iap)) { | |
1642 | /* Report it and use a random ethernet address instead */ | |
1643 | netdev_err(ndev, "Invalid MAC address: %pM\n", iap); | |
1644 | eth_hw_addr_random(ndev); | |
1645 | netdev_info(ndev, "Using random MAC address: %pM\n", | |
1646 | ndev->dev_addr); | |
1647 | return; | |
1648 | } | |
1649 | ||
c556167f | 1650 | memcpy(ndev->dev_addr, iap, ETH_ALEN); |
1da177e4 | 1651 | |
49da97dc SG |
1652 | /* Adjust MAC if using macaddr */ |
1653 | if (iap == macaddr) | |
43af940c | 1654 | ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; |
1da177e4 LT |
1655 | } |
1656 | ||
e6b043d5 | 1657 | /* ------------------------------------------------------------------------- */ |
1da177e4 | 1658 | |
e6b043d5 BW |
1659 | /* |
1660 | * Phy section | |
1661 | */ | |
c556167f | 1662 | static void fec_enet_adjust_link(struct net_device *ndev) |
1da177e4 | 1663 | { |
c556167f | 1664 | struct fec_enet_private *fep = netdev_priv(ndev); |
e6b043d5 | 1665 | struct phy_device *phy_dev = fep->phy_dev; |
e6b043d5 | 1666 | int status_change = 0; |
1da177e4 | 1667 | |
e6b043d5 BW |
1668 | /* Prevent a state halted on mii error */ |
1669 | if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { | |
1670 | phy_dev->state = PHY_RESUMING; | |
54309fa6 | 1671 | return; |
e6b043d5 | 1672 | } |
1da177e4 | 1673 | |
8ce5624f RK |
1674 | /* |
1675 | * If the netdev is down, or is going down, we're not interested | |
1676 | * in link state events, so just mark our idea of the link as down | |
1677 | * and ignore the event. | |
1678 | */ | |
1679 | if (!netif_running(ndev) || !netif_device_present(ndev)) { | |
1680 | fep->link = 0; | |
1681 | } else if (phy_dev->link) { | |
d97e7497 | 1682 | if (!fep->link) { |
6ea0722f | 1683 | fep->link = phy_dev->link; |
e6b043d5 BW |
1684 | status_change = 1; |
1685 | } | |
1da177e4 | 1686 | |
ef83337d RK |
1687 | if (fep->full_duplex != phy_dev->duplex) { |
1688 | fep->full_duplex = phy_dev->duplex; | |
d97e7497 | 1689 | status_change = 1; |
ef83337d | 1690 | } |
d97e7497 LS |
1691 | |
1692 | if (phy_dev->speed != fep->speed) { | |
1693 | fep->speed = phy_dev->speed; | |
1694 | status_change = 1; | |
1695 | } | |
1696 | ||
1697 | /* if any of the above changed restart the FEC */ | |
dbc64a8e | 1698 | if (status_change) { |
dbc64a8e | 1699 | napi_disable(&fep->napi); |
dbc64a8e | 1700 | netif_tx_lock_bh(ndev); |
ef83337d | 1701 | fec_restart(ndev); |
dbc64a8e | 1702 | netif_wake_queue(ndev); |
6af42d42 | 1703 | netif_tx_unlock_bh(ndev); |
dbc64a8e | 1704 | napi_enable(&fep->napi); |
dbc64a8e | 1705 | } |
d97e7497 LS |
1706 | } else { |
1707 | if (fep->link) { | |
f208ce10 RK |
1708 | napi_disable(&fep->napi); |
1709 | netif_tx_lock_bh(ndev); | |
c556167f | 1710 | fec_stop(ndev); |
f208ce10 RK |
1711 | netif_tx_unlock_bh(ndev); |
1712 | napi_enable(&fep->napi); | |
8d7ed0f0 | 1713 | fep->link = phy_dev->link; |
d97e7497 LS |
1714 | status_change = 1; |
1715 | } | |
1da177e4 | 1716 | } |
6aa20a22 | 1717 | |
e6b043d5 BW |
1718 | if (status_change) |
1719 | phy_print_status(phy_dev); | |
1720 | } | |
1da177e4 | 1721 | |
e6b043d5 | 1722 | static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
1da177e4 | 1723 | { |
e6b043d5 | 1724 | struct fec_enet_private *fep = bus->priv; |
97b72e43 | 1725 | unsigned long time_left; |
1da177e4 | 1726 | |
e6b043d5 | 1727 | fep->mii_timeout = 0; |
97b72e43 | 1728 | init_completion(&fep->mdio_done); |
e6b043d5 BW |
1729 | |
1730 | /* start a read op */ | |
1731 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | |
1732 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | | |
1733 | FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); | |
1734 | ||
1735 | /* wait for end of transfer */ | |
97b72e43 BS |
1736 | time_left = wait_for_completion_timeout(&fep->mdio_done, |
1737 | usecs_to_jiffies(FEC_MII_TIMEOUT)); | |
1738 | if (time_left == 0) { | |
1739 | fep->mii_timeout = 1; | |
31b7720c | 1740 | netdev_err(fep->netdev, "MDIO read timeout\n"); |
97b72e43 | 1741 | return -ETIMEDOUT; |
1da177e4 | 1742 | } |
1da177e4 | 1743 | |
e6b043d5 BW |
1744 | /* return value */ |
1745 | return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); | |
7dd6a2aa | 1746 | } |
6aa20a22 | 1747 | |
e6b043d5 BW |
1748 | static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
1749 | u16 value) | |
1da177e4 | 1750 | { |
e6b043d5 | 1751 | struct fec_enet_private *fep = bus->priv; |
97b72e43 | 1752 | unsigned long time_left; |
1da177e4 | 1753 | |
e6b043d5 | 1754 | fep->mii_timeout = 0; |
97b72e43 | 1755 | init_completion(&fep->mdio_done); |
1da177e4 | 1756 | |
862f0982 SG |
1757 | /* start a write op */ |
1758 | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | | |
e6b043d5 BW |
1759 | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | |
1760 | FEC_MMFR_TA | FEC_MMFR_DATA(value), | |
1761 | fep->hwp + FEC_MII_DATA); | |
1762 | ||
1763 | /* wait for end of transfer */ | |
97b72e43 BS |
1764 | time_left = wait_for_completion_timeout(&fep->mdio_done, |
1765 | usecs_to_jiffies(FEC_MII_TIMEOUT)); | |
1766 | if (time_left == 0) { | |
1767 | fep->mii_timeout = 1; | |
31b7720c | 1768 | netdev_err(fep->netdev, "MDIO write timeout\n"); |
97b72e43 | 1769 | return -ETIMEDOUT; |
e6b043d5 | 1770 | } |
1da177e4 | 1771 | |
e6b043d5 BW |
1772 | return 0; |
1773 | } | |
1da177e4 | 1774 | |
e8fcfcd5 NA |
1775 | static int fec_enet_clk_enable(struct net_device *ndev, bool enable) |
1776 | { | |
1777 | struct fec_enet_private *fep = netdev_priv(ndev); | |
1778 | int ret; | |
1779 | ||
1780 | if (enable) { | |
1781 | ret = clk_prepare_enable(fep->clk_ahb); | |
1782 | if (ret) | |
1783 | return ret; | |
1784 | ret = clk_prepare_enable(fep->clk_ipg); | |
1785 | if (ret) | |
1786 | goto failed_clk_ipg; | |
1787 | if (fep->clk_enet_out) { | |
1788 | ret = clk_prepare_enable(fep->clk_enet_out); | |
1789 | if (ret) | |
1790 | goto failed_clk_enet_out; | |
1791 | } | |
1792 | if (fep->clk_ptp) { | |
91c0d987 | 1793 | mutex_lock(&fep->ptp_clk_mutex); |
e8fcfcd5 | 1794 | ret = clk_prepare_enable(fep->clk_ptp); |
91c0d987 NA |
1795 | if (ret) { |
1796 | mutex_unlock(&fep->ptp_clk_mutex); | |
e8fcfcd5 | 1797 | goto failed_clk_ptp; |
91c0d987 NA |
1798 | } else { |
1799 | fep->ptp_clk_on = true; | |
1800 | } | |
1801 | mutex_unlock(&fep->ptp_clk_mutex); | |
e8fcfcd5 | 1802 | } |
9b5330ed FD |
1803 | if (fep->clk_ref) { |
1804 | ret = clk_prepare_enable(fep->clk_ref); | |
1805 | if (ret) | |
1806 | goto failed_clk_ref; | |
1807 | } | |
e8fcfcd5 NA |
1808 | } else { |
1809 | clk_disable_unprepare(fep->clk_ahb); | |
1810 | clk_disable_unprepare(fep->clk_ipg); | |
1811 | if (fep->clk_enet_out) | |
1812 | clk_disable_unprepare(fep->clk_enet_out); | |
91c0d987 NA |
1813 | if (fep->clk_ptp) { |
1814 | mutex_lock(&fep->ptp_clk_mutex); | |
e8fcfcd5 | 1815 | clk_disable_unprepare(fep->clk_ptp); |
91c0d987 NA |
1816 | fep->ptp_clk_on = false; |
1817 | mutex_unlock(&fep->ptp_clk_mutex); | |
1818 | } | |
9b5330ed FD |
1819 | if (fep->clk_ref) |
1820 | clk_disable_unprepare(fep->clk_ref); | |
e8fcfcd5 NA |
1821 | } |
1822 | ||
1823 | return 0; | |
9b5330ed FD |
1824 | |
1825 | failed_clk_ref: | |
1826 | if (fep->clk_ref) | |
1827 | clk_disable_unprepare(fep->clk_ref); | |
e8fcfcd5 NA |
1828 | failed_clk_ptp: |
1829 | if (fep->clk_enet_out) | |
1830 | clk_disable_unprepare(fep->clk_enet_out); | |
1831 | failed_clk_enet_out: | |
1832 | clk_disable_unprepare(fep->clk_ipg); | |
1833 | failed_clk_ipg: | |
1834 | clk_disable_unprepare(fep->clk_ahb); | |
1835 | ||
1836 | return ret; | |
1837 | } | |
1838 | ||
c556167f | 1839 | static int fec_enet_mii_probe(struct net_device *ndev) |
562d2f8c | 1840 | { |
c556167f | 1841 | struct fec_enet_private *fep = netdev_priv(ndev); |
230dec61 SG |
1842 | const struct platform_device_id *id_entry = |
1843 | platform_get_device_id(fep->pdev); | |
e6b043d5 | 1844 | struct phy_device *phy_dev = NULL; |
6fcc040f GU |
1845 | char mdio_bus_id[MII_BUS_ID_SIZE]; |
1846 | char phy_name[MII_BUS_ID_SIZE + 3]; | |
1847 | int phy_id; | |
43af940c | 1848 | int dev_id = fep->dev_id; |
562d2f8c | 1849 | |
418bd0d4 BW |
1850 | fep->phy_dev = NULL; |
1851 | ||
407066f8 UKK |
1852 | if (fep->phy_node) { |
1853 | phy_dev = of_phy_connect(ndev, fep->phy_node, | |
1854 | &fec_enet_adjust_link, 0, | |
1855 | fep->phy_interface); | |
1856 | } else { | |
1857 | /* check for attached phy */ | |
1858 | for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { | |
1859 | if ((fep->mii_bus->phy_mask & (1 << phy_id))) | |
1860 | continue; | |
1861 | if (fep->mii_bus->phy_map[phy_id] == NULL) | |
1862 | continue; | |
1863 | if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) | |
1864 | continue; | |
1865 | if (dev_id--) | |
1866 | continue; | |
949bdd20 | 1867 | strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); |
407066f8 UKK |
1868 | break; |
1869 | } | |
1da177e4 | 1870 | |
407066f8 UKK |
1871 | if (phy_id >= PHY_MAX_ADDR) { |
1872 | netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); | |
949bdd20 | 1873 | strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); |
407066f8 UKK |
1874 | phy_id = 0; |
1875 | } | |
1876 | ||
1877 | snprintf(phy_name, sizeof(phy_name), | |
1878 | PHY_ID_FMT, mdio_bus_id, phy_id); | |
1879 | phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, | |
1880 | fep->phy_interface); | |
6fcc040f GU |
1881 | } |
1882 | ||
6fcc040f | 1883 | if (IS_ERR(phy_dev)) { |
31b7720c | 1884 | netdev_err(ndev, "could not attach to PHY\n"); |
6fcc040f | 1885 | return PTR_ERR(phy_dev); |
e6b043d5 | 1886 | } |
1da177e4 | 1887 | |
e6b043d5 | 1888 | /* mask with MAC supported features */ |
baa70a5c | 1889 | if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { |
230dec61 | 1890 | phy_dev->supported &= PHY_GBIT_FEATURES; |
b44592ff | 1891 | phy_dev->supported &= ~SUPPORTED_1000baseT_Half; |
d1391930 | 1892 | #if !defined(CONFIG_M5272) |
baa70a5c | 1893 | phy_dev->supported |= SUPPORTED_Pause; |
d1391930 | 1894 | #endif |
baa70a5c | 1895 | } |
230dec61 SG |
1896 | else |
1897 | phy_dev->supported &= PHY_BASIC_FEATURES; | |
1898 | ||
e6b043d5 | 1899 | phy_dev->advertising = phy_dev->supported; |
1da177e4 | 1900 | |
e6b043d5 BW |
1901 | fep->phy_dev = phy_dev; |
1902 | fep->link = 0; | |
1903 | fep->full_duplex = 0; | |
1da177e4 | 1904 | |
31b7720c JP |
1905 | netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
1906 | fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), | |
1907 | fep->phy_dev->irq); | |
418bd0d4 | 1908 | |
e6b043d5 | 1909 | return 0; |
1da177e4 LT |
1910 | } |
1911 | ||
e6b043d5 | 1912 | static int fec_enet_mii_init(struct platform_device *pdev) |
562d2f8c | 1913 | { |
b5680e0b | 1914 | static struct mii_bus *fec0_mii_bus; |
c556167f UKK |
1915 | struct net_device *ndev = platform_get_drvdata(pdev); |
1916 | struct fec_enet_private *fep = netdev_priv(ndev); | |
b5680e0b SG |
1917 | const struct platform_device_id *id_entry = |
1918 | platform_get_device_id(fep->pdev); | |
407066f8 | 1919 | struct device_node *node; |
e6b043d5 | 1920 | int err = -ENXIO, i; |
6b265293 | 1921 | |
b5680e0b SG |
1922 | /* |
1923 | * The dual fec interfaces are not equivalent with enet-mac. | |
1924 | * Here are the differences: | |
1925 | * | |
1926 | * - fec0 supports MII & RMII modes while fec1 only supports RMII | |
1927 | * - fec0 acts as the 1588 time master while fec1 is slave | |
1928 | * - external phys can only be configured by fec0 | |
1929 | * | |
1930 | * That is to say fec1 can not work independently. It only works | |
1931 | * when fec0 is working. The reason behind this design is that the | |
1932 | * second interface is added primarily for Switch mode. | |
1933 | * | |
1934 | * Because of the last point above, both phys are attached on fec0 | |
1935 | * mdio interface in board design, and need to be configured by | |
1936 | * fec0 mii_bus. | |
1937 | */ | |
43af940c | 1938 | if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { |
b5680e0b | 1939 | /* fec1 uses fec0 mii_bus */ |
e163cc97 LW |
1940 | if (mii_cnt && fec0_mii_bus) { |
1941 | fep->mii_bus = fec0_mii_bus; | |
1942 | mii_cnt++; | |
1943 | return 0; | |
1944 | } | |
1945 | return -ENOENT; | |
b5680e0b SG |
1946 | } |
1947 | ||
e6b043d5 | 1948 | fep->mii_timeout = 0; |
1da177e4 | 1949 | |
e6b043d5 BW |
1950 | /* |
1951 | * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) | |
230dec61 SG |
1952 | * |
1953 | * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while | |
1954 | * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 | |
1955 | * Reference Manual has an error on this, and gets fixed on i.MX6Q | |
1956 | * document. | |
e6b043d5 | 1957 | */ |
98a6eeb8 | 1958 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); |
230dec61 SG |
1959 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) |
1960 | fep->phy_speed--; | |
1961 | fep->phy_speed <<= 1; | |
e6b043d5 | 1962 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1da177e4 | 1963 | |
e6b043d5 BW |
1964 | fep->mii_bus = mdiobus_alloc(); |
1965 | if (fep->mii_bus == NULL) { | |
1966 | err = -ENOMEM; | |
1967 | goto err_out; | |
1da177e4 LT |
1968 | } |
1969 | ||
e6b043d5 BW |
1970 | fep->mii_bus->name = "fec_enet_mii_bus"; |
1971 | fep->mii_bus->read = fec_enet_mdio_read; | |
1972 | fep->mii_bus->write = fec_enet_mdio_write; | |
391420f7 FF |
1973 | snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
1974 | pdev->name, fep->dev_id + 1); | |
e6b043d5 BW |
1975 | fep->mii_bus->priv = fep; |
1976 | fep->mii_bus->parent = &pdev->dev; | |
1977 | ||
1978 | fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); | |
1979 | if (!fep->mii_bus->irq) { | |
1980 | err = -ENOMEM; | |
1981 | goto err_out_free_mdiobus; | |
1da177e4 LT |
1982 | } |
1983 | ||
e6b043d5 BW |
1984 | for (i = 0; i < PHY_MAX_ADDR; i++) |
1985 | fep->mii_bus->irq[i] = PHY_POLL; | |
1da177e4 | 1986 | |
407066f8 UKK |
1987 | node = of_get_child_by_name(pdev->dev.of_node, "mdio"); |
1988 | if (node) { | |
1989 | err = of_mdiobus_register(fep->mii_bus, node); | |
1990 | of_node_put(node); | |
1991 | } else { | |
1992 | err = mdiobus_register(fep->mii_bus); | |
1993 | } | |
1994 | ||
1995 | if (err) | |
e6b043d5 | 1996 | goto err_out_free_mdio_irq; |
1da177e4 | 1997 | |
e163cc97 LW |
1998 | mii_cnt++; |
1999 | ||
b5680e0b SG |
2000 | /* save fec0 mii_bus */ |
2001 | if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) | |
2002 | fec0_mii_bus = fep->mii_bus; | |
2003 | ||
e6b043d5 | 2004 | return 0; |
1da177e4 | 2005 | |
e6b043d5 BW |
2006 | err_out_free_mdio_irq: |
2007 | kfree(fep->mii_bus->irq); | |
2008 | err_out_free_mdiobus: | |
2009 | mdiobus_free(fep->mii_bus); | |
2010 | err_out: | |
2011 | return err; | |
1da177e4 LT |
2012 | } |
2013 | ||
e6b043d5 | 2014 | static void fec_enet_mii_remove(struct fec_enet_private *fep) |
1da177e4 | 2015 | { |
e163cc97 LW |
2016 | if (--mii_cnt == 0) { |
2017 | mdiobus_unregister(fep->mii_bus); | |
2018 | kfree(fep->mii_bus->irq); | |
2019 | mdiobus_free(fep->mii_bus); | |
2020 | } | |
1da177e4 LT |
2021 | } |
2022 | ||
c556167f | 2023 | static int fec_enet_get_settings(struct net_device *ndev, |
e6b043d5 | 2024 | struct ethtool_cmd *cmd) |
1da177e4 | 2025 | { |
c556167f | 2026 | struct fec_enet_private *fep = netdev_priv(ndev); |
e6b043d5 | 2027 | struct phy_device *phydev = fep->phy_dev; |
1da177e4 | 2028 | |
e6b043d5 BW |
2029 | if (!phydev) |
2030 | return -ENODEV; | |
1da177e4 | 2031 | |
e6b043d5 | 2032 | return phy_ethtool_gset(phydev, cmd); |
1da177e4 LT |
2033 | } |
2034 | ||
c556167f | 2035 | static int fec_enet_set_settings(struct net_device *ndev, |
e6b043d5 | 2036 | struct ethtool_cmd *cmd) |
1da177e4 | 2037 | { |
c556167f | 2038 | struct fec_enet_private *fep = netdev_priv(ndev); |
e6b043d5 | 2039 | struct phy_device *phydev = fep->phy_dev; |
1da177e4 | 2040 | |
e6b043d5 BW |
2041 | if (!phydev) |
2042 | return -ENODEV; | |
1da177e4 | 2043 | |
e6b043d5 | 2044 | return phy_ethtool_sset(phydev, cmd); |
1da177e4 LT |
2045 | } |
2046 | ||
c556167f | 2047 | static void fec_enet_get_drvinfo(struct net_device *ndev, |
e6b043d5 | 2048 | struct ethtool_drvinfo *info) |
1da177e4 | 2049 | { |
c556167f | 2050 | struct fec_enet_private *fep = netdev_priv(ndev); |
6aa20a22 | 2051 | |
7826d43f JP |
2052 | strlcpy(info->driver, fep->pdev->dev.driver->name, |
2053 | sizeof(info->driver)); | |
2054 | strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); | |
2055 | strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); | |
1da177e4 LT |
2056 | } |
2057 | ||
5ebae489 FL |
2058 | static int fec_enet_get_ts_info(struct net_device *ndev, |
2059 | struct ethtool_ts_info *info) | |
2060 | { | |
2061 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2062 | ||
2063 | if (fep->bufdesc_ex) { | |
2064 | ||
2065 | info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | | |
2066 | SOF_TIMESTAMPING_RX_SOFTWARE | | |
2067 | SOF_TIMESTAMPING_SOFTWARE | | |
2068 | SOF_TIMESTAMPING_TX_HARDWARE | | |
2069 | SOF_TIMESTAMPING_RX_HARDWARE | | |
2070 | SOF_TIMESTAMPING_RAW_HARDWARE; | |
2071 | if (fep->ptp_clock) | |
2072 | info->phc_index = ptp_clock_index(fep->ptp_clock); | |
2073 | else | |
2074 | info->phc_index = -1; | |
2075 | ||
2076 | info->tx_types = (1 << HWTSTAMP_TX_OFF) | | |
2077 | (1 << HWTSTAMP_TX_ON); | |
2078 | ||
2079 | info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | | |
2080 | (1 << HWTSTAMP_FILTER_ALL); | |
2081 | return 0; | |
2082 | } else { | |
2083 | return ethtool_op_get_ts_info(ndev, info); | |
2084 | } | |
2085 | } | |
2086 | ||
d1391930 GR |
2087 | #if !defined(CONFIG_M5272) |
2088 | ||
baa70a5c FL |
2089 | static void fec_enet_get_pauseparam(struct net_device *ndev, |
2090 | struct ethtool_pauseparam *pause) | |
2091 | { | |
2092 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2093 | ||
2094 | pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; | |
2095 | pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; | |
2096 | pause->rx_pause = pause->tx_pause; | |
2097 | } | |
2098 | ||
2099 | static int fec_enet_set_pauseparam(struct net_device *ndev, | |
2100 | struct ethtool_pauseparam *pause) | |
2101 | { | |
2102 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2103 | ||
0b146ca8 RK |
2104 | if (!fep->phy_dev) |
2105 | return -ENODEV; | |
2106 | ||
baa70a5c FL |
2107 | if (pause->tx_pause != pause->rx_pause) { |
2108 | netdev_info(ndev, | |
2109 | "hardware only support enable/disable both tx and rx"); | |
2110 | return -EINVAL; | |
2111 | } | |
2112 | ||
2113 | fep->pause_flag = 0; | |
2114 | ||
2115 | /* tx pause must be same as rx pause */ | |
2116 | fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; | |
2117 | fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; | |
2118 | ||
2119 | if (pause->rx_pause || pause->autoneg) { | |
2120 | fep->phy_dev->supported |= ADVERTISED_Pause; | |
2121 | fep->phy_dev->advertising |= ADVERTISED_Pause; | |
2122 | } else { | |
2123 | fep->phy_dev->supported &= ~ADVERTISED_Pause; | |
2124 | fep->phy_dev->advertising &= ~ADVERTISED_Pause; | |
2125 | } | |
2126 | ||
2127 | if (pause->autoneg) { | |
2128 | if (netif_running(ndev)) | |
2129 | fec_stop(ndev); | |
2130 | phy_start_aneg(fep->phy_dev); | |
2131 | } | |
dbc64a8e | 2132 | if (netif_running(ndev)) { |
dbc64a8e | 2133 | napi_disable(&fep->napi); |
dbc64a8e | 2134 | netif_tx_lock_bh(ndev); |
ef83337d | 2135 | fec_restart(ndev); |
dbc64a8e | 2136 | netif_wake_queue(ndev); |
6af42d42 | 2137 | netif_tx_unlock_bh(ndev); |
dbc64a8e | 2138 | napi_enable(&fep->napi); |
dbc64a8e | 2139 | } |
baa70a5c FL |
2140 | |
2141 | return 0; | |
2142 | } | |
2143 | ||
38ae92dc CH |
2144 | static const struct fec_stat { |
2145 | char name[ETH_GSTRING_LEN]; | |
2146 | u16 offset; | |
2147 | } fec_stats[] = { | |
2148 | /* RMON TX */ | |
2149 | { "tx_dropped", RMON_T_DROP }, | |
2150 | { "tx_packets", RMON_T_PACKETS }, | |
2151 | { "tx_broadcast", RMON_T_BC_PKT }, | |
2152 | { "tx_multicast", RMON_T_MC_PKT }, | |
2153 | { "tx_crc_errors", RMON_T_CRC_ALIGN }, | |
2154 | { "tx_undersize", RMON_T_UNDERSIZE }, | |
2155 | { "tx_oversize", RMON_T_OVERSIZE }, | |
2156 | { "tx_fragment", RMON_T_FRAG }, | |
2157 | { "tx_jabber", RMON_T_JAB }, | |
2158 | { "tx_collision", RMON_T_COL }, | |
2159 | { "tx_64byte", RMON_T_P64 }, | |
2160 | { "tx_65to127byte", RMON_T_P65TO127 }, | |
2161 | { "tx_128to255byte", RMON_T_P128TO255 }, | |
2162 | { "tx_256to511byte", RMON_T_P256TO511 }, | |
2163 | { "tx_512to1023byte", RMON_T_P512TO1023 }, | |
2164 | { "tx_1024to2047byte", RMON_T_P1024TO2047 }, | |
2165 | { "tx_GTE2048byte", RMON_T_P_GTE2048 }, | |
2166 | { "tx_octets", RMON_T_OCTETS }, | |
2167 | ||
2168 | /* IEEE TX */ | |
2169 | { "IEEE_tx_drop", IEEE_T_DROP }, | |
2170 | { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, | |
2171 | { "IEEE_tx_1col", IEEE_T_1COL }, | |
2172 | { "IEEE_tx_mcol", IEEE_T_MCOL }, | |
2173 | { "IEEE_tx_def", IEEE_T_DEF }, | |
2174 | { "IEEE_tx_lcol", IEEE_T_LCOL }, | |
2175 | { "IEEE_tx_excol", IEEE_T_EXCOL }, | |
2176 | { "IEEE_tx_macerr", IEEE_T_MACERR }, | |
2177 | { "IEEE_tx_cserr", IEEE_T_CSERR }, | |
2178 | { "IEEE_tx_sqe", IEEE_T_SQE }, | |
2179 | { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, | |
2180 | { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, | |
2181 | ||
2182 | /* RMON RX */ | |
2183 | { "rx_packets", RMON_R_PACKETS }, | |
2184 | { "rx_broadcast", RMON_R_BC_PKT }, | |
2185 | { "rx_multicast", RMON_R_MC_PKT }, | |
2186 | { "rx_crc_errors", RMON_R_CRC_ALIGN }, | |
2187 | { "rx_undersize", RMON_R_UNDERSIZE }, | |
2188 | { "rx_oversize", RMON_R_OVERSIZE }, | |
2189 | { "rx_fragment", RMON_R_FRAG }, | |
2190 | { "rx_jabber", RMON_R_JAB }, | |
2191 | { "rx_64byte", RMON_R_P64 }, | |
2192 | { "rx_65to127byte", RMON_R_P65TO127 }, | |
2193 | { "rx_128to255byte", RMON_R_P128TO255 }, | |
2194 | { "rx_256to511byte", RMON_R_P256TO511 }, | |
2195 | { "rx_512to1023byte", RMON_R_P512TO1023 }, | |
2196 | { "rx_1024to2047byte", RMON_R_P1024TO2047 }, | |
2197 | { "rx_GTE2048byte", RMON_R_P_GTE2048 }, | |
2198 | { "rx_octets", RMON_R_OCTETS }, | |
2199 | ||
2200 | /* IEEE RX */ | |
2201 | { "IEEE_rx_drop", IEEE_R_DROP }, | |
2202 | { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, | |
2203 | { "IEEE_rx_crc", IEEE_R_CRC }, | |
2204 | { "IEEE_rx_align", IEEE_R_ALIGN }, | |
2205 | { "IEEE_rx_macerr", IEEE_R_MACERR }, | |
2206 | { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, | |
2207 | { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, | |
2208 | }; | |
2209 | ||
2210 | static void fec_enet_get_ethtool_stats(struct net_device *dev, | |
2211 | struct ethtool_stats *stats, u64 *data) | |
2212 | { | |
2213 | struct fec_enet_private *fep = netdev_priv(dev); | |
2214 | int i; | |
2215 | ||
2216 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) | |
2217 | data[i] = readl(fep->hwp + fec_stats[i].offset); | |
2218 | } | |
2219 | ||
2220 | static void fec_enet_get_strings(struct net_device *netdev, | |
2221 | u32 stringset, u8 *data) | |
2222 | { | |
2223 | int i; | |
2224 | switch (stringset) { | |
2225 | case ETH_SS_STATS: | |
2226 | for (i = 0; i < ARRAY_SIZE(fec_stats); i++) | |
2227 | memcpy(data + i * ETH_GSTRING_LEN, | |
2228 | fec_stats[i].name, ETH_GSTRING_LEN); | |
2229 | break; | |
2230 | } | |
2231 | } | |
2232 | ||
2233 | static int fec_enet_get_sset_count(struct net_device *dev, int sset) | |
2234 | { | |
2235 | switch (sset) { | |
2236 | case ETH_SS_STATS: | |
2237 | return ARRAY_SIZE(fec_stats); | |
2238 | default: | |
2239 | return -EOPNOTSUPP; | |
2240 | } | |
2241 | } | |
d1391930 | 2242 | #endif /* !defined(CONFIG_M5272) */ |
38ae92dc | 2243 | |
32bc9b46 CH |
2244 | static int fec_enet_nway_reset(struct net_device *dev) |
2245 | { | |
2246 | struct fec_enet_private *fep = netdev_priv(dev); | |
2247 | struct phy_device *phydev = fep->phy_dev; | |
2248 | ||
2249 | if (!phydev) | |
2250 | return -ENODEV; | |
2251 | ||
2252 | return genphy_restart_aneg(phydev); | |
2253 | } | |
2254 | ||
d851b47b FD |
2255 | /* ITR clock source is enet system clock (clk_ahb). |
2256 | * TCTT unit is cycle_ns * 64 cycle | |
2257 | * So, the ICTT value = X us / (cycle_ns * 64) | |
2258 | */ | |
2259 | static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) | |
2260 | { | |
2261 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2262 | ||
2263 | return us * (fep->itr_clk_rate / 64000) / 1000; | |
2264 | } | |
2265 | ||
2266 | /* Set threshold for interrupt coalescing */ | |
2267 | static void fec_enet_itr_coal_set(struct net_device *ndev) | |
2268 | { | |
2269 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2270 | const struct platform_device_id *id_entry = | |
2271 | platform_get_device_id(fep->pdev); | |
2272 | int rx_itr, tx_itr; | |
2273 | ||
2274 | if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) | |
2275 | return; | |
2276 | ||
2277 | /* Must be greater than zero to avoid unpredictable behavior */ | |
2278 | if (!fep->rx_time_itr || !fep->rx_pkts_itr || | |
2279 | !fep->tx_time_itr || !fep->tx_pkts_itr) | |
2280 | return; | |
2281 | ||
2282 | /* Select enet system clock as Interrupt Coalescing | |
2283 | * timer Clock Source | |
2284 | */ | |
2285 | rx_itr = FEC_ITR_CLK_SEL; | |
2286 | tx_itr = FEC_ITR_CLK_SEL; | |
2287 | ||
2288 | /* set ICFT and ICTT */ | |
2289 | rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); | |
2290 | rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); | |
2291 | tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); | |
2292 | tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); | |
2293 | ||
2294 | rx_itr |= FEC_ITR_EN; | |
2295 | tx_itr |= FEC_ITR_EN; | |
2296 | ||
2297 | writel(tx_itr, fep->hwp + FEC_TXIC0); | |
2298 | writel(rx_itr, fep->hwp + FEC_RXIC0); | |
2299 | writel(tx_itr, fep->hwp + FEC_TXIC1); | |
2300 | writel(rx_itr, fep->hwp + FEC_RXIC1); | |
2301 | writel(tx_itr, fep->hwp + FEC_TXIC2); | |
2302 | writel(rx_itr, fep->hwp + FEC_RXIC2); | |
2303 | } | |
2304 | ||
2305 | static int | |
2306 | fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |
2307 | { | |
2308 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2309 | const struct platform_device_id *id_entry = | |
2310 | platform_get_device_id(fep->pdev); | |
2311 | ||
2312 | if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) | |
2313 | return -EOPNOTSUPP; | |
2314 | ||
2315 | ec->rx_coalesce_usecs = fep->rx_time_itr; | |
2316 | ec->rx_max_coalesced_frames = fep->rx_pkts_itr; | |
2317 | ||
2318 | ec->tx_coalesce_usecs = fep->tx_time_itr; | |
2319 | ec->tx_max_coalesced_frames = fep->tx_pkts_itr; | |
2320 | ||
2321 | return 0; | |
2322 | } | |
2323 | ||
2324 | static int | |
2325 | fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |
2326 | { | |
2327 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2328 | const struct platform_device_id *id_entry = | |
2329 | platform_get_device_id(fep->pdev); | |
2330 | ||
2331 | unsigned int cycle; | |
2332 | ||
2333 | if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) | |
2334 | return -EOPNOTSUPP; | |
2335 | ||
2336 | if (ec->rx_max_coalesced_frames > 255) { | |
2337 | pr_err("Rx coalesced frames exceed hardware limiation"); | |
2338 | return -EINVAL; | |
2339 | } | |
2340 | ||
2341 | if (ec->tx_max_coalesced_frames > 255) { | |
2342 | pr_err("Tx coalesced frame exceed hardware limiation"); | |
2343 | return -EINVAL; | |
2344 | } | |
2345 | ||
2346 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); | |
2347 | if (cycle > 0xFFFF) { | |
2348 | pr_err("Rx coalesed usec exceeed hardware limiation"); | |
2349 | return -EINVAL; | |
2350 | } | |
2351 | ||
2352 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); | |
2353 | if (cycle > 0xFFFF) { | |
2354 | pr_err("Rx coalesed usec exceeed hardware limiation"); | |
2355 | return -EINVAL; | |
2356 | } | |
2357 | ||
2358 | fep->rx_time_itr = ec->rx_coalesce_usecs; | |
2359 | fep->rx_pkts_itr = ec->rx_max_coalesced_frames; | |
2360 | ||
2361 | fep->tx_time_itr = ec->tx_coalesce_usecs; | |
2362 | fep->tx_pkts_itr = ec->tx_max_coalesced_frames; | |
2363 | ||
2364 | fec_enet_itr_coal_set(ndev); | |
2365 | ||
2366 | return 0; | |
2367 | } | |
2368 | ||
2369 | static void fec_enet_itr_coal_init(struct net_device *ndev) | |
2370 | { | |
2371 | struct ethtool_coalesce ec; | |
2372 | ||
2373 | ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; | |
2374 | ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; | |
2375 | ||
2376 | ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; | |
2377 | ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; | |
2378 | ||
2379 | fec_enet_set_coalesce(ndev, &ec); | |
2380 | } | |
2381 | ||
9b07be4b | 2382 | static const struct ethtool_ops fec_enet_ethtool_ops = { |
e6b043d5 BW |
2383 | .get_settings = fec_enet_get_settings, |
2384 | .set_settings = fec_enet_set_settings, | |
2385 | .get_drvinfo = fec_enet_get_drvinfo, | |
32bc9b46 | 2386 | .nway_reset = fec_enet_nway_reset, |
c1d7c48f | 2387 | .get_link = ethtool_op_get_link, |
d851b47b FD |
2388 | .get_coalesce = fec_enet_get_coalesce, |
2389 | .set_coalesce = fec_enet_set_coalesce, | |
38ae92dc | 2390 | #ifndef CONFIG_M5272 |
c1d7c48f RK |
2391 | .get_pauseparam = fec_enet_get_pauseparam, |
2392 | .set_pauseparam = fec_enet_set_pauseparam, | |
38ae92dc | 2393 | .get_strings = fec_enet_get_strings, |
c1d7c48f | 2394 | .get_ethtool_stats = fec_enet_get_ethtool_stats, |
38ae92dc CH |
2395 | .get_sset_count = fec_enet_get_sset_count, |
2396 | #endif | |
c1d7c48f | 2397 | .get_ts_info = fec_enet_get_ts_info, |
e6b043d5 | 2398 | }; |
1da177e4 | 2399 | |
c556167f | 2400 | static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) |
1da177e4 | 2401 | { |
c556167f | 2402 | struct fec_enet_private *fep = netdev_priv(ndev); |
e6b043d5 | 2403 | struct phy_device *phydev = fep->phy_dev; |
1da177e4 | 2404 | |
c556167f | 2405 | if (!netif_running(ndev)) |
e6b043d5 | 2406 | return -EINVAL; |
1da177e4 | 2407 | |
e6b043d5 BW |
2408 | if (!phydev) |
2409 | return -ENODEV; | |
2410 | ||
1d5244d0 BH |
2411 | if (fep->bufdesc_ex) { |
2412 | if (cmd == SIOCSHWTSTAMP) | |
2413 | return fec_ptp_set(ndev, rq); | |
2414 | if (cmd == SIOCGHWTSTAMP) | |
2415 | return fec_ptp_get(ndev, rq); | |
2416 | } | |
ff43da86 | 2417 | |
28b04113 | 2418 | return phy_mii_ioctl(phydev, rq, cmd); |
1da177e4 LT |
2419 | } |
2420 | ||
c556167f | 2421 | static void fec_enet_free_buffers(struct net_device *ndev) |
f0b3fbea | 2422 | { |
c556167f | 2423 | struct fec_enet_private *fep = netdev_priv(ndev); |
da2191e3 | 2424 | unsigned int i; |
f0b3fbea SH |
2425 | struct sk_buff *skb; |
2426 | struct bufdesc *bdp; | |
4d494cdc FD |
2427 | struct fec_enet_priv_tx_q *txq; |
2428 | struct fec_enet_priv_rx_q *rxq; | |
59d0f746 FL |
2429 | unsigned int q; |
2430 | ||
2431 | for (q = 0; q < fep->num_rx_queues; q++) { | |
2432 | rxq = fep->rx_queue[q]; | |
2433 | bdp = rxq->rx_bd_base; | |
2434 | for (i = 0; i < rxq->rx_ring_size; i++) { | |
2435 | skb = rxq->rx_skbuff[i]; | |
2436 | rxq->rx_skbuff[i] = NULL; | |
2437 | if (skb) { | |
2438 | dma_unmap_single(&fep->pdev->dev, | |
2439 | bdp->cbd_bufaddr, | |
2440 | FEC_ENET_RX_FRSIZE, | |
2441 | DMA_FROM_DEVICE); | |
2442 | dev_kfree_skb(skb); | |
2443 | } | |
2444 | bdp = fec_enet_get_nextdesc(bdp, fep, q); | |
2445 | } | |
2446 | } | |
4d494cdc | 2447 | |
59d0f746 FL |
2448 | for (q = 0; q < fep->num_tx_queues; q++) { |
2449 | txq = fep->tx_queue[q]; | |
2450 | bdp = txq->tx_bd_base; | |
2451 | for (i = 0; i < txq->tx_ring_size; i++) { | |
2452 | kfree(txq->tx_bounce[i]); | |
2453 | txq->tx_bounce[i] = NULL; | |
2454 | skb = txq->tx_skbuff[i]; | |
2455 | txq->tx_skbuff[i] = NULL; | |
f0b3fbea | 2456 | dev_kfree_skb(skb); |
730ee360 | 2457 | } |
f0b3fbea | 2458 | } |
59d0f746 | 2459 | } |
f0b3fbea | 2460 | |
59d0f746 FL |
2461 | static void fec_enet_free_queue(struct net_device *ndev) |
2462 | { | |
2463 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2464 | int i; | |
2465 | struct fec_enet_priv_tx_q *txq; | |
2466 | ||
2467 | for (i = 0; i < fep->num_tx_queues; i++) | |
2468 | if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { | |
2469 | txq = fep->tx_queue[i]; | |
2470 | dma_free_coherent(NULL, | |
2471 | txq->tx_ring_size * TSO_HEADER_SIZE, | |
2472 | txq->tso_hdrs, | |
2473 | txq->tso_hdrs_dma); | |
2474 | } | |
2475 | ||
2476 | for (i = 0; i < fep->num_rx_queues; i++) | |
2477 | if (fep->rx_queue[i]) | |
2478 | kfree(fep->rx_queue[i]); | |
2479 | ||
2480 | for (i = 0; i < fep->num_tx_queues; i++) | |
2481 | if (fep->tx_queue[i]) | |
2482 | kfree(fep->tx_queue[i]); | |
2483 | } | |
2484 | ||
2485 | static int fec_enet_alloc_queue(struct net_device *ndev) | |
2486 | { | |
2487 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2488 | int i; | |
2489 | int ret = 0; | |
2490 | struct fec_enet_priv_tx_q *txq; | |
2491 | ||
2492 | for (i = 0; i < fep->num_tx_queues; i++) { | |
2493 | txq = kzalloc(sizeof(*txq), GFP_KERNEL); | |
2494 | if (!txq) { | |
2495 | ret = -ENOMEM; | |
2496 | goto alloc_failed; | |
2497 | } | |
2498 | ||
2499 | fep->tx_queue[i] = txq; | |
2500 | txq->tx_ring_size = TX_RING_SIZE; | |
2501 | fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; | |
2502 | ||
2503 | txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; | |
2504 | txq->tx_wake_threshold = | |
2505 | (txq->tx_ring_size - txq->tx_stop_threshold) / 2; | |
2506 | ||
2507 | txq->tso_hdrs = dma_alloc_coherent(NULL, | |
2508 | txq->tx_ring_size * TSO_HEADER_SIZE, | |
2509 | &txq->tso_hdrs_dma, | |
2510 | GFP_KERNEL); | |
2511 | if (!txq->tso_hdrs) { | |
2512 | ret = -ENOMEM; | |
2513 | goto alloc_failed; | |
2514 | } | |
8b7c9efa | 2515 | } |
59d0f746 FL |
2516 | |
2517 | for (i = 0; i < fep->num_rx_queues; i++) { | |
2518 | fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), | |
2519 | GFP_KERNEL); | |
2520 | if (!fep->rx_queue[i]) { | |
2521 | ret = -ENOMEM; | |
2522 | goto alloc_failed; | |
2523 | } | |
2524 | ||
2525 | fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; | |
2526 | fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; | |
2527 | } | |
2528 | return ret; | |
2529 | ||
2530 | alloc_failed: | |
2531 | fec_enet_free_queue(ndev); | |
2532 | return ret; | |
f0b3fbea SH |
2533 | } |
2534 | ||
59d0f746 FL |
2535 | static int |
2536 | fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) | |
f0b3fbea | 2537 | { |
c556167f | 2538 | struct fec_enet_private *fep = netdev_priv(ndev); |
da2191e3 | 2539 | unsigned int i; |
f0b3fbea SH |
2540 | struct sk_buff *skb; |
2541 | struct bufdesc *bdp; | |
4d494cdc | 2542 | struct fec_enet_priv_rx_q *rxq; |
41ef84ce | 2543 | unsigned int off; |
f0b3fbea | 2544 | |
59d0f746 | 2545 | rxq = fep->rx_queue[queue]; |
4d494cdc FD |
2546 | bdp = rxq->rx_bd_base; |
2547 | for (i = 0; i < rxq->rx_ring_size; i++) { | |
730ee360 RK |
2548 | dma_addr_t addr; |
2549 | ||
b72061a3 | 2550 | skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); |
ffdce2cc RK |
2551 | if (!skb) |
2552 | goto err_alloc; | |
f0b3fbea | 2553 | |
41ef84ce FD |
2554 | off = ((unsigned long)skb->data) & fep->rx_align; |
2555 | if (off) | |
2556 | skb_reserve(skb, fep->rx_align + 1 - off); | |
2557 | ||
730ee360 | 2558 | addr = dma_map_single(&fep->pdev->dev, skb->data, |
41ef84ce FD |
2559 | FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); |
2560 | ||
730ee360 RK |
2561 | if (dma_mapping_error(&fep->pdev->dev, addr)) { |
2562 | dev_kfree_skb(skb); | |
d842a31f DFB |
2563 | if (net_ratelimit()) |
2564 | netdev_err(ndev, "Rx DMA memory map failed\n"); | |
ffdce2cc | 2565 | goto err_alloc; |
d842a31f | 2566 | } |
730ee360 | 2567 | |
4d494cdc | 2568 | rxq->rx_skbuff[i] = skb; |
730ee360 | 2569 | bdp->cbd_bufaddr = addr; |
f0b3fbea | 2570 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
ff43da86 FL |
2571 | |
2572 | if (fep->bufdesc_ex) { | |
2573 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
2574 | ebdp->cbd_esc = BD_ENET_RX_INT; | |
2575 | } | |
2576 | ||
59d0f746 | 2577 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
f0b3fbea SH |
2578 | } |
2579 | ||
2580 | /* Set the last buffer to wrap. */ | |
59d0f746 | 2581 | bdp = fec_enet_get_prevdesc(bdp, fep, queue); |
f0b3fbea | 2582 | bdp->cbd_sc |= BD_SC_WRAP; |
59d0f746 | 2583 | return 0; |
f0b3fbea | 2584 | |
59d0f746 FL |
2585 | err_alloc: |
2586 | fec_enet_free_buffers(ndev); | |
2587 | return -ENOMEM; | |
2588 | } | |
2589 | ||
2590 | static int | |
2591 | fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) | |
2592 | { | |
2593 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2594 | unsigned int i; | |
2595 | struct bufdesc *bdp; | |
2596 | struct fec_enet_priv_tx_q *txq; | |
2597 | ||
2598 | txq = fep->tx_queue[queue]; | |
4d494cdc FD |
2599 | bdp = txq->tx_bd_base; |
2600 | for (i = 0; i < txq->tx_ring_size; i++) { | |
2601 | txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | |
2602 | if (!txq->tx_bounce[i]) | |
ffdce2cc | 2603 | goto err_alloc; |
f0b3fbea SH |
2604 | |
2605 | bdp->cbd_sc = 0; | |
2606 | bdp->cbd_bufaddr = 0; | |
6605b730 | 2607 | |
ff43da86 FL |
2608 | if (fep->bufdesc_ex) { |
2609 | struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; | |
96d2222b | 2610 | ebdp->cbd_esc = BD_ENET_TX_INT; |
ff43da86 FL |
2611 | } |
2612 | ||
59d0f746 | 2613 | bdp = fec_enet_get_nextdesc(bdp, fep, queue); |
f0b3fbea SH |
2614 | } |
2615 | ||
2616 | /* Set the last buffer to wrap. */ | |
59d0f746 | 2617 | bdp = fec_enet_get_prevdesc(bdp, fep, queue); |
f0b3fbea SH |
2618 | bdp->cbd_sc |= BD_SC_WRAP; |
2619 | ||
2620 | return 0; | |
ffdce2cc RK |
2621 | |
2622 | err_alloc: | |
2623 | fec_enet_free_buffers(ndev); | |
2624 | return -ENOMEM; | |
f0b3fbea SH |
2625 | } |
2626 | ||
59d0f746 FL |
2627 | static int fec_enet_alloc_buffers(struct net_device *ndev) |
2628 | { | |
2629 | struct fec_enet_private *fep = netdev_priv(ndev); | |
2630 | unsigned int i; | |
2631 | ||
2632 | for (i = 0; i < fep->num_rx_queues; i++) | |
2633 | if (fec_enet_alloc_rxq_buffers(ndev, i)) | |
2634 | return -ENOMEM; | |
2635 | ||
2636 | for (i = 0; i < fep->num_tx_queues; i++) | |
2637 | if (fec_enet_alloc_txq_buffers(ndev, i)) | |
2638 | return -ENOMEM; | |
2639 | return 0; | |
2640 | } | |
2641 | ||
1da177e4 | 2642 | static int |
c556167f | 2643 | fec_enet_open(struct net_device *ndev) |
1da177e4 | 2644 | { |
c556167f | 2645 | struct fec_enet_private *fep = netdev_priv(ndev); |
f0b3fbea | 2646 | int ret; |
1da177e4 | 2647 | |
5bbde4d2 | 2648 | pinctrl_pm_select_default_state(&fep->pdev->dev); |
e8fcfcd5 NA |
2649 | ret = fec_enet_clk_enable(ndev, true); |
2650 | if (ret) | |
2651 | return ret; | |
2652 | ||
1da177e4 LT |
2653 | /* I should reset the ring buffers here, but I don't yet know |
2654 | * a simple way to do that. | |
2655 | */ | |
1da177e4 | 2656 | |
c556167f | 2657 | ret = fec_enet_alloc_buffers(ndev); |
f0b3fbea SH |
2658 | if (ret) |
2659 | return ret; | |
2660 | ||
418bd0d4 | 2661 | /* Probe and connect to PHY when open the interface */ |
c556167f | 2662 | ret = fec_enet_mii_probe(ndev); |
418bd0d4 | 2663 | if (ret) { |
c556167f | 2664 | fec_enet_free_buffers(ndev); |
418bd0d4 BW |
2665 | return ret; |
2666 | } | |
ce5eaf02 | 2667 | |
ef83337d | 2668 | fec_restart(ndev); |
ce5eaf02 | 2669 | napi_enable(&fep->napi); |
e6b043d5 | 2670 | phy_start(fep->phy_dev); |
4d494cdc FD |
2671 | netif_tx_start_all_queues(ndev); |
2672 | ||
22f6b860 | 2673 | return 0; |
1da177e4 LT |
2674 | } |
2675 | ||
2676 | static int | |
c556167f | 2677 | fec_enet_close(struct net_device *ndev) |
1da177e4 | 2678 | { |
c556167f | 2679 | struct fec_enet_private *fep = netdev_priv(ndev); |
1da177e4 | 2680 | |
d76cfae9 RK |
2681 | phy_stop(fep->phy_dev); |
2682 | ||
31a6de34 RK |
2683 | if (netif_device_present(ndev)) { |
2684 | napi_disable(&fep->napi); | |
2685 | netif_tx_disable(ndev); | |
8bbbd3c1 | 2686 | fec_stop(ndev); |
31a6de34 | 2687 | } |
1da177e4 | 2688 | |
635cf17c | 2689 | phy_disconnect(fep->phy_dev); |
0b146ca8 | 2690 | fep->phy_dev = NULL; |
418bd0d4 | 2691 | |
e8fcfcd5 | 2692 | fec_enet_clk_enable(ndev, false); |
5bbde4d2 | 2693 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); |
db8880bc | 2694 | fec_enet_free_buffers(ndev); |
f0b3fbea | 2695 | |
1da177e4 LT |
2696 | return 0; |
2697 | } | |
2698 | ||
1da177e4 LT |
2699 | /* Set or clear the multicast filter for this adaptor. |
2700 | * Skeleton taken from sunlance driver. | |
2701 | * The CPM Ethernet implementation allows Multicast as well as individual | |
2702 | * MAC address filtering. Some of the drivers check to make sure it is | |
2703 | * a group multicast address, and discard those that are not. I guess I | |
2704 | * will do the same for now, but just remove the test if you want | |
2705 | * individual filtering as well (do the upper net layers want or support | |
2706 | * this kind of feature?). | |
2707 | */ | |
2708 | ||
2709 | #define HASH_BITS 6 /* #bits in hash */ | |
2710 | #define CRC32_POLY 0xEDB88320 | |
2711 | ||
c556167f | 2712 | static void set_multicast_list(struct net_device *ndev) |
1da177e4 | 2713 | { |
c556167f | 2714 | struct fec_enet_private *fep = netdev_priv(ndev); |
22bedad3 | 2715 | struct netdev_hw_addr *ha; |
48e2f183 | 2716 | unsigned int i, bit, data, crc, tmp; |
1da177e4 LT |
2717 | unsigned char hash; |
2718 | ||
c556167f | 2719 | if (ndev->flags & IFF_PROMISC) { |
f44d6305 SH |
2720 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
2721 | tmp |= 0x8; | |
2722 | writel(tmp, fep->hwp + FEC_R_CNTRL); | |
4e831836 SH |
2723 | return; |
2724 | } | |
1da177e4 | 2725 | |
4e831836 SH |
2726 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
2727 | tmp &= ~0x8; | |
2728 | writel(tmp, fep->hwp + FEC_R_CNTRL); | |
2729 | ||
c556167f | 2730 | if (ndev->flags & IFF_ALLMULTI) { |
4e831836 SH |
2731 | /* Catch all multicast addresses, so set the |
2732 | * filter to all 1's | |
2733 | */ | |
2734 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2735 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2736 | ||
2737 | return; | |
2738 | } | |
2739 | ||
2740 | /* Clear filter and add the addresses in hash register | |
2741 | */ | |
2742 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2743 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2744 | ||
c556167f | 2745 | netdev_for_each_mc_addr(ha, ndev) { |
4e831836 SH |
2746 | /* calculate crc32 value of mac address */ |
2747 | crc = 0xffffffff; | |
2748 | ||
c556167f | 2749 | for (i = 0; i < ndev->addr_len; i++) { |
22bedad3 | 2750 | data = ha->addr[i]; |
4e831836 SH |
2751 | for (bit = 0; bit < 8; bit++, data >>= 1) { |
2752 | crc = (crc >> 1) ^ | |
2753 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | |
1da177e4 LT |
2754 | } |
2755 | } | |
4e831836 SH |
2756 | |
2757 | /* only upper 6 bits (HASH_BITS) are used | |
2758 | * which point to specific bit in he hash registers | |
2759 | */ | |
2760 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | |
2761 | ||
2762 | if (hash > 31) { | |
2763 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2764 | tmp |= 1 << (hash - 32); | |
2765 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | |
2766 | } else { | |
2767 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2768 | tmp |= 1 << hash; | |
2769 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | |
2770 | } | |
1da177e4 LT |
2771 | } |
2772 | } | |
2773 | ||
22f6b860 | 2774 | /* Set a MAC change in hardware. */ |
009fda83 | 2775 | static int |
c556167f | 2776 | fec_set_mac_address(struct net_device *ndev, void *p) |
1da177e4 | 2777 | { |
c556167f | 2778 | struct fec_enet_private *fep = netdev_priv(ndev); |
009fda83 SH |
2779 | struct sockaddr *addr = p; |
2780 | ||
44934fac LS |
2781 | if (addr) { |
2782 | if (!is_valid_ether_addr(addr->sa_data)) | |
2783 | return -EADDRNOTAVAIL; | |
2784 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | |
2785 | } | |
1da177e4 | 2786 | |
c556167f UKK |
2787 | writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | |
2788 | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), | |
f44d6305 | 2789 | fep->hwp + FEC_ADDR_LOW); |
c556167f | 2790 | writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), |
7cff0943 | 2791 | fep->hwp + FEC_ADDR_HIGH); |
009fda83 | 2792 | return 0; |
1da177e4 LT |
2793 | } |
2794 | ||
7f5c6add | 2795 | #ifdef CONFIG_NET_POLL_CONTROLLER |
49ce9c2c BH |
2796 | /** |
2797 | * fec_poll_controller - FEC Poll controller function | |
7f5c6add XJ |
2798 | * @dev: The FEC network adapter |
2799 | * | |
2800 | * Polled functionality used by netconsole and others in non interrupt mode | |
2801 | * | |
2802 | */ | |
47a5247f | 2803 | static void fec_poll_controller(struct net_device *dev) |
7f5c6add XJ |
2804 | { |
2805 | int i; | |
2806 | struct fec_enet_private *fep = netdev_priv(dev); | |
2807 | ||
2808 | for (i = 0; i < FEC_IRQ_NUM; i++) { | |
2809 | if (fep->irq[i] > 0) { | |
2810 | disable_irq(fep->irq[i]); | |
2811 | fec_enet_interrupt(fep->irq[i], dev); | |
2812 | enable_irq(fep->irq[i]); | |
2813 | } | |
2814 | } | |
2815 | } | |
2816 | #endif | |
2817 | ||
8506fa1d RK |
2818 | #define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM |
2819 | ||
4c09eed9 JB |
2820 | static int fec_set_features(struct net_device *netdev, |
2821 | netdev_features_t features) | |
2822 | { | |
2823 | struct fec_enet_private *fep = netdev_priv(netdev); | |
2824 | netdev_features_t changed = features ^ netdev->features; | |
2825 | ||
8506fa1d RK |
2826 | /* Quiesce the device if necessary */ |
2827 | if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { | |
2828 | napi_disable(&fep->napi); | |
2829 | netif_tx_lock_bh(netdev); | |
2830 | fec_stop(netdev); | |
2831 | } | |
2832 | ||
4c09eed9 JB |
2833 | netdev->features = features; |
2834 | ||
2835 | /* Receive checksum has been changed */ | |
2836 | if (changed & NETIF_F_RXCSUM) { | |
2837 | if (features & NETIF_F_RXCSUM) | |
2838 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; | |
2839 | else | |
2840 | fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; | |
8506fa1d | 2841 | } |
4c09eed9 | 2842 | |
8506fa1d RK |
2843 | /* Resume the device after updates */ |
2844 | if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { | |
ef83337d | 2845 | fec_restart(netdev); |
4d494cdc | 2846 | netif_tx_wake_all_queues(netdev); |
8506fa1d RK |
2847 | netif_tx_unlock_bh(netdev); |
2848 | napi_enable(&fep->napi); | |
4c09eed9 JB |
2849 | } |
2850 | ||
2851 | return 0; | |
2852 | } | |
2853 | ||
009fda83 SH |
2854 | static const struct net_device_ops fec_netdev_ops = { |
2855 | .ndo_open = fec_enet_open, | |
2856 | .ndo_stop = fec_enet_close, | |
2857 | .ndo_start_xmit = fec_enet_start_xmit, | |
afc4b13d | 2858 | .ndo_set_rx_mode = set_multicast_list, |
635ecaa7 | 2859 | .ndo_change_mtu = eth_change_mtu, |
009fda83 SH |
2860 | .ndo_validate_addr = eth_validate_addr, |
2861 | .ndo_tx_timeout = fec_timeout, | |
2862 | .ndo_set_mac_address = fec_set_mac_address, | |
db8880bc | 2863 | .ndo_do_ioctl = fec_enet_ioctl, |
7f5c6add XJ |
2864 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2865 | .ndo_poll_controller = fec_poll_controller, | |
2866 | #endif | |
4c09eed9 | 2867 | .ndo_set_features = fec_set_features, |
009fda83 SH |
2868 | }; |
2869 | ||
1da177e4 LT |
2870 | /* |
2871 | * XXX: We need to clean up on failure exits here. | |
ead73183 | 2872 | * |
1da177e4 | 2873 | */ |
c556167f | 2874 | static int fec_enet_init(struct net_device *ndev) |
1da177e4 | 2875 | { |
c556167f | 2876 | struct fec_enet_private *fep = netdev_priv(ndev); |
48496255 SG |
2877 | const struct platform_device_id *id_entry = |
2878 | platform_get_device_id(fep->pdev); | |
4d494cdc FD |
2879 | struct fec_enet_priv_tx_q *txq; |
2880 | struct fec_enet_priv_rx_q *rxq; | |
f0b3fbea | 2881 | struct bufdesc *cbd_base; |
4d494cdc | 2882 | dma_addr_t bd_dma; |
55d0218a | 2883 | int bd_size; |
59d0f746 | 2884 | unsigned int i; |
55d0218a | 2885 | |
41ef84ce FD |
2886 | #if defined(CONFIG_ARM) |
2887 | fep->rx_align = 0xf; | |
2888 | fep->tx_align = 0xf; | |
2889 | #else | |
2890 | fep->rx_align = 0x3; | |
2891 | fep->tx_align = 0x3; | |
2892 | #endif | |
2893 | ||
59d0f746 | 2894 | fec_enet_alloc_queue(ndev); |
79f33912 | 2895 | |
55d0218a NA |
2896 | if (fep->bufdesc_ex) |
2897 | fep->bufdesc_size = sizeof(struct bufdesc_ex); | |
2898 | else | |
2899 | fep->bufdesc_size = sizeof(struct bufdesc); | |
4d494cdc | 2900 | bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * |
55d0218a | 2901 | fep->bufdesc_size; |
1da177e4 | 2902 | |
8d4dd5cf | 2903 | /* Allocate memory for buffer descriptors. */ |
4d494cdc | 2904 | cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, |
d0320f75 | 2905 | GFP_KERNEL); |
4d494cdc | 2906 | if (!cbd_base) { |
79f33912 NA |
2907 | return -ENOMEM; |
2908 | } | |
2909 | ||
4d494cdc | 2910 | memset(cbd_base, 0, bd_size); |
1da177e4 | 2911 | |
49da97dc | 2912 | /* Get the Ethernet address */ |
c556167f | 2913 | fec_get_mac(ndev); |
44934fac LS |
2914 | /* make sure MAC we just acquired is programmed into the hw */ |
2915 | fec_set_mac_address(ndev, NULL); | |
1da177e4 | 2916 | |
8d4dd5cf | 2917 | /* Set receive and transmit descriptor base. */ |
59d0f746 FL |
2918 | for (i = 0; i < fep->num_rx_queues; i++) { |
2919 | rxq = fep->rx_queue[i]; | |
2920 | rxq->index = i; | |
2921 | rxq->rx_bd_base = (struct bufdesc *)cbd_base; | |
2922 | rxq->bd_dma = bd_dma; | |
2923 | if (fep->bufdesc_ex) { | |
2924 | bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; | |
2925 | cbd_base = (struct bufdesc *) | |
2926 | (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); | |
2927 | } else { | |
2928 | bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; | |
2929 | cbd_base += rxq->rx_ring_size; | |
2930 | } | |
2931 | } | |
2932 | ||
2933 | for (i = 0; i < fep->num_tx_queues; i++) { | |
2934 | txq = fep->tx_queue[i]; | |
2935 | txq->index = i; | |
2936 | txq->tx_bd_base = (struct bufdesc *)cbd_base; | |
2937 | txq->bd_dma = bd_dma; | |
2938 | if (fep->bufdesc_ex) { | |
2939 | bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; | |
2940 | cbd_base = (struct bufdesc *) | |
2941 | (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); | |
2942 | } else { | |
2943 | bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; | |
2944 | cbd_base += txq->tx_ring_size; | |
2945 | } | |
2946 | } | |
4d494cdc | 2947 | |
1da177e4 | 2948 | |
22f6b860 | 2949 | /* The FEC Ethernet specific entries in the device structure */ |
c556167f UKK |
2950 | ndev->watchdog_timeo = TX_TIMEOUT; |
2951 | ndev->netdev_ops = &fec_netdev_ops; | |
2952 | ndev->ethtool_ops = &fec_enet_ethtool_ops; | |
633e7533 | 2953 | |
dc975382 | 2954 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); |
322555f5 | 2955 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); |
dc975382 | 2956 | |
09d1e541 | 2957 | if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) |
cdffcf1b JB |
2958 | /* enable hw VLAN support */ |
2959 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; | |
cdffcf1b | 2960 | |
48496255 | 2961 | if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { |
79f33912 NA |
2962 | ndev->gso_max_segs = FEC_MAX_TSO_SEGS; |
2963 | ||
48496255 SG |
2964 | /* enable hw accelerator */ |
2965 | ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
79f33912 | 2966 | | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); |
48496255 SG |
2967 | fep->csum_flags |= FLAG_RX_CSUM_ENABLED; |
2968 | } | |
4c09eed9 | 2969 | |
41ef84ce FD |
2970 | if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { |
2971 | fep->tx_align = 0; | |
2972 | fep->rx_align = 0x3f; | |
2973 | } | |
2974 | ||
09d1e541 NA |
2975 | ndev->hw_features = ndev->features; |
2976 | ||
ef83337d | 2977 | fec_restart(ndev); |
1da177e4 | 2978 | |
1da177e4 LT |
2979 | return 0; |
2980 | } | |
2981 | ||
ca2cc333 | 2982 | #ifdef CONFIG_OF |
33897cc8 | 2983 | static void fec_reset_phy(struct platform_device *pdev) |
ca2cc333 SG |
2984 | { |
2985 | int err, phy_reset; | |
a3caad0a | 2986 | int msec = 1; |
ca2cc333 SG |
2987 | struct device_node *np = pdev->dev.of_node; |
2988 | ||
2989 | if (!np) | |
a9b2c8ef | 2990 | return; |
ca2cc333 | 2991 | |
a3caad0a SG |
2992 | of_property_read_u32(np, "phy-reset-duration", &msec); |
2993 | /* A sane reset duration should not be longer than 1s */ | |
2994 | if (msec > 1000) | |
2995 | msec = 1; | |
2996 | ||
ca2cc333 | 2997 | phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); |
07dcf8e9 FE |
2998 | if (!gpio_is_valid(phy_reset)) |
2999 | return; | |
3000 | ||
119fc007 SG |
3001 | err = devm_gpio_request_one(&pdev->dev, phy_reset, |
3002 | GPIOF_OUT_INIT_LOW, "phy-reset"); | |
ca2cc333 | 3003 | if (err) { |
07dcf8e9 | 3004 | dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); |
a9b2c8ef | 3005 | return; |
ca2cc333 | 3006 | } |
a3caad0a | 3007 | msleep(msec); |
ca2cc333 | 3008 | gpio_set_value(phy_reset, 1); |
ca2cc333 SG |
3009 | } |
3010 | #else /* CONFIG_OF */ | |
0c7768a0 | 3011 | static void fec_reset_phy(struct platform_device *pdev) |
ca2cc333 SG |
3012 | { |
3013 | /* | |
3014 | * In case of platform probe, the reset has been done | |
3015 | * by machine code. | |
3016 | */ | |
ca2cc333 SG |
3017 | } |
3018 | #endif /* CONFIG_OF */ | |
3019 | ||
9fc095f1 FD |
3020 | static void |
3021 | fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) | |
3022 | { | |
3023 | struct device_node *np = pdev->dev.of_node; | |
3024 | int err; | |
3025 | ||
3026 | *num_tx = *num_rx = 1; | |
3027 | ||
3028 | if (!np || !of_device_is_available(np)) | |
3029 | return; | |
3030 | ||
3031 | /* parse the num of tx and rx queues */ | |
3032 | err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); | |
b7bd75cf | 3033 | if (err) |
9fc095f1 | 3034 | *num_tx = 1; |
b7bd75cf FL |
3035 | |
3036 | err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); | |
3037 | if (err) | |
9fc095f1 | 3038 | *num_rx = 1; |
9fc095f1 FD |
3039 | |
3040 | if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { | |
b7bd75cf FL |
3041 | dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", |
3042 | *num_tx); | |
9fc095f1 FD |
3043 | *num_tx = 1; |
3044 | return; | |
3045 | } | |
3046 | ||
3047 | if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { | |
b7bd75cf FL |
3048 | dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", |
3049 | *num_rx); | |
9fc095f1 FD |
3050 | *num_rx = 1; |
3051 | return; | |
3052 | } | |
3053 | ||
3054 | } | |
3055 | ||
33897cc8 | 3056 | static int |
ead73183 SH |
3057 | fec_probe(struct platform_device *pdev) |
3058 | { | |
3059 | struct fec_enet_private *fep; | |
5eb32bd0 | 3060 | struct fec_platform_data *pdata; |
ead73183 SH |
3061 | struct net_device *ndev; |
3062 | int i, irq, ret = 0; | |
3063 | struct resource *r; | |
ca2cc333 | 3064 | const struct of_device_id *of_id; |
43af940c | 3065 | static int dev_id; |
407066f8 | 3066 | struct device_node *np = pdev->dev.of_node, *phy_node; |
b7bd75cf FL |
3067 | int num_tx_qs; |
3068 | int num_rx_qs; | |
ca2cc333 SG |
3069 | |
3070 | of_id = of_match_device(fec_dt_ids, &pdev->dev); | |
3071 | if (of_id) | |
3072 | pdev->id_entry = of_id->data; | |
ead73183 | 3073 | |
9fc095f1 FD |
3074 | fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); |
3075 | ||
ead73183 | 3076 | /* Init network device */ |
9fc095f1 FD |
3077 | ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), |
3078 | num_tx_qs, num_rx_qs); | |
83e519b6 FE |
3079 | if (!ndev) |
3080 | return -ENOMEM; | |
ead73183 SH |
3081 | |
3082 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
3083 | ||
3084 | /* setup board info structure */ | |
3085 | fep = netdev_priv(ndev); | |
ead73183 | 3086 | |
9fc095f1 FD |
3087 | fep->num_rx_queues = num_rx_qs; |
3088 | fep->num_tx_queues = num_tx_qs; | |
3089 | ||
d1391930 | 3090 | #if !defined(CONFIG_M5272) |
baa70a5c FL |
3091 | /* default enable pause frame auto negotiation */ |
3092 | if (pdev->id_entry && | |
3093 | (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) | |
3094 | fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; | |
d1391930 | 3095 | #endif |
baa70a5c | 3096 | |
5bbde4d2 NA |
3097 | /* Select default pin state */ |
3098 | pinctrl_pm_select_default_state(&pdev->dev); | |
3099 | ||
399db75b | 3100 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
941e173a TB |
3101 | fep->hwp = devm_ioremap_resource(&pdev->dev, r); |
3102 | if (IS_ERR(fep->hwp)) { | |
3103 | ret = PTR_ERR(fep->hwp); | |
3104 | goto failed_ioremap; | |
3105 | } | |
3106 | ||
e6b043d5 | 3107 | fep->pdev = pdev; |
43af940c | 3108 | fep->dev_id = dev_id++; |
ead73183 | 3109 | |
ff43da86 FL |
3110 | fep->bufdesc_ex = 0; |
3111 | ||
ead73183 SH |
3112 | platform_set_drvdata(pdev, ndev); |
3113 | ||
407066f8 UKK |
3114 | phy_node = of_parse_phandle(np, "phy-handle", 0); |
3115 | if (!phy_node && of_phy_is_fixed_link(np)) { | |
3116 | ret = of_phy_register_fixed_link(np); | |
3117 | if (ret < 0) { | |
3118 | dev_err(&pdev->dev, | |
3119 | "broken fixed-link specification\n"); | |
3120 | goto failed_phy; | |
3121 | } | |
3122 | phy_node = of_node_get(np); | |
3123 | } | |
3124 | fep->phy_node = phy_node; | |
3125 | ||
6c5f7808 | 3126 | ret = of_get_phy_mode(pdev->dev.of_node); |
ca2cc333 | 3127 | if (ret < 0) { |
94660ba0 | 3128 | pdata = dev_get_platdata(&pdev->dev); |
ca2cc333 SG |
3129 | if (pdata) |
3130 | fep->phy_interface = pdata->phy; | |
3131 | else | |
3132 | fep->phy_interface = PHY_INTERFACE_MODE_MII; | |
3133 | } else { | |
3134 | fep->phy_interface = ret; | |
3135 | } | |
3136 | ||
f4d40de3 SH |
3137 | fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); |
3138 | if (IS_ERR(fep->clk_ipg)) { | |
3139 | ret = PTR_ERR(fep->clk_ipg); | |
ead73183 SH |
3140 | goto failed_clk; |
3141 | } | |
f4d40de3 SH |
3142 | |
3143 | fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | |
3144 | if (IS_ERR(fep->clk_ahb)) { | |
3145 | ret = PTR_ERR(fep->clk_ahb); | |
3146 | goto failed_clk; | |
3147 | } | |
3148 | ||
d851b47b FD |
3149 | fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); |
3150 | ||
daa7d392 WS |
3151 | /* enet_out is optional, depends on board */ |
3152 | fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); | |
3153 | if (IS_ERR(fep->clk_enet_out)) | |
3154 | fep->clk_enet_out = NULL; | |
3155 | ||
91c0d987 NA |
3156 | fep->ptp_clk_on = false; |
3157 | mutex_init(&fep->ptp_clk_mutex); | |
9b5330ed FD |
3158 | |
3159 | /* clk_ref is optional, depends on board */ | |
3160 | fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); | |
3161 | if (IS_ERR(fep->clk_ref)) | |
3162 | fep->clk_ref = NULL; | |
3163 | ||
6605b730 | 3164 | fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); |
e2f8d555 FE |
3165 | fep->bufdesc_ex = |
3166 | pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; | |
6605b730 | 3167 | if (IS_ERR(fep->clk_ptp)) { |
c29dc2d7 | 3168 | fep->clk_ptp = NULL; |
ff43da86 | 3169 | fep->bufdesc_ex = 0; |
6605b730 | 3170 | } |
6605b730 | 3171 | |
e8fcfcd5 | 3172 | ret = fec_enet_clk_enable(ndev, true); |
13a097bd FE |
3173 | if (ret) |
3174 | goto failed_clk; | |
3175 | ||
f4e9f3d2 FE |
3176 | fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); |
3177 | if (!IS_ERR(fep->reg_phy)) { | |
3178 | ret = regulator_enable(fep->reg_phy); | |
5fa9c0fe SG |
3179 | if (ret) { |
3180 | dev_err(&pdev->dev, | |
3181 | "Failed to enable phy regulator: %d\n", ret); | |
3182 | goto failed_regulator; | |
3183 | } | |
f6a4d607 FE |
3184 | } else { |
3185 | fep->reg_phy = NULL; | |
5fa9c0fe SG |
3186 | } |
3187 | ||
2ca9b2aa SG |
3188 | fec_reset_phy(pdev); |
3189 | ||
e2f8d555 | 3190 | if (fep->bufdesc_ex) |
ca162a82 | 3191 | fec_ptp_init(pdev); |
e2f8d555 FE |
3192 | |
3193 | ret = fec_enet_init(ndev); | |
3194 | if (ret) | |
3195 | goto failed_init; | |
3196 | ||
3197 | for (i = 0; i < FEC_IRQ_NUM; i++) { | |
3198 | irq = platform_get_irq(pdev, i); | |
3199 | if (irq < 0) { | |
3200 | if (i) | |
3201 | break; | |
3202 | ret = irq; | |
3203 | goto failed_irq; | |
3204 | } | |
0d9b2ab1 | 3205 | ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, |
44a272dd | 3206 | 0, pdev->name, ndev); |
0d9b2ab1 | 3207 | if (ret) |
e2f8d555 | 3208 | goto failed_irq; |
e2f8d555 FE |
3209 | } |
3210 | ||
b4d39b53 | 3211 | init_completion(&fep->mdio_done); |
e6b043d5 BW |
3212 | ret = fec_enet_mii_init(pdev); |
3213 | if (ret) | |
3214 | goto failed_mii_init; | |
3215 | ||
03c698c9 OS |
3216 | /* Carrier starts down, phylib will bring it up */ |
3217 | netif_carrier_off(ndev); | |
e8fcfcd5 | 3218 | fec_enet_clk_enable(ndev, false); |
5bbde4d2 | 3219 | pinctrl_pm_select_sleep_state(&pdev->dev); |
03c698c9 | 3220 | |
ead73183 SH |
3221 | ret = register_netdev(ndev); |
3222 | if (ret) | |
3223 | goto failed_register; | |
3224 | ||
eb1d0640 FE |
3225 | if (fep->bufdesc_ex && fep->ptp_clock) |
3226 | netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); | |
3227 | ||
36cdc743 | 3228 | INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); |
ead73183 SH |
3229 | return 0; |
3230 | ||
3231 | failed_register: | |
e6b043d5 BW |
3232 | fec_enet_mii_remove(fep); |
3233 | failed_mii_init: | |
7a2bbd8d | 3234 | failed_irq: |
7a2bbd8d | 3235 | failed_init: |
f6a4d607 FE |
3236 | if (fep->reg_phy) |
3237 | regulator_disable(fep->reg_phy); | |
5fa9c0fe | 3238 | failed_regulator: |
e8fcfcd5 | 3239 | fec_enet_clk_enable(ndev, false); |
ead73183 | 3240 | failed_clk: |
407066f8 UKK |
3241 | failed_phy: |
3242 | of_node_put(phy_node); | |
ead73183 SH |
3243 | failed_ioremap: |
3244 | free_netdev(ndev); | |
3245 | ||
3246 | return ret; | |
3247 | } | |
3248 | ||
33897cc8 | 3249 | static int |
ead73183 SH |
3250 | fec_drv_remove(struct platform_device *pdev) |
3251 | { | |
3252 | struct net_device *ndev = platform_get_drvdata(pdev); | |
3253 | struct fec_enet_private *fep = netdev_priv(ndev); | |
3254 | ||
91c0d987 | 3255 | cancel_delayed_work_sync(&fep->time_keep); |
36cdc743 | 3256 | cancel_work_sync(&fep->tx_timeout_work); |
e163cc97 | 3257 | unregister_netdev(ndev); |
e6b043d5 | 3258 | fec_enet_mii_remove(fep); |
f6a4d607 FE |
3259 | if (fep->reg_phy) |
3260 | regulator_disable(fep->reg_phy); | |
6605b730 FL |
3261 | if (fep->ptp_clock) |
3262 | ptp_clock_unregister(fep->ptp_clock); | |
e8fcfcd5 | 3263 | fec_enet_clk_enable(ndev, false); |
407066f8 | 3264 | of_node_put(fep->phy_node); |
ead73183 | 3265 | free_netdev(ndev); |
28e2188e | 3266 | |
ead73183 SH |
3267 | return 0; |
3268 | } | |
3269 | ||
dd66d386 | 3270 | static int __maybe_unused fec_suspend(struct device *dev) |
ead73183 | 3271 | { |
87cad5c3 | 3272 | struct net_device *ndev = dev_get_drvdata(dev); |
04e5216d | 3273 | struct fec_enet_private *fep = netdev_priv(ndev); |
ead73183 | 3274 | |
da1774e5 | 3275 | rtnl_lock(); |
04e5216d | 3276 | if (netif_running(ndev)) { |
d76cfae9 | 3277 | phy_stop(fep->phy_dev); |
31a6de34 RK |
3278 | napi_disable(&fep->napi); |
3279 | netif_tx_lock_bh(ndev); | |
04e5216d | 3280 | netif_device_detach(ndev); |
31a6de34 RK |
3281 | netif_tx_unlock_bh(ndev); |
3282 | fec_stop(ndev); | |
ead73183 | 3283 | } |
da1774e5 RK |
3284 | rtnl_unlock(); |
3285 | ||
e8fcfcd5 | 3286 | fec_enet_clk_enable(ndev, false); |
5bbde4d2 | 3287 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); |
04e5216d | 3288 | |
238f7bc7 FE |
3289 | if (fep->reg_phy) |
3290 | regulator_disable(fep->reg_phy); | |
3291 | ||
ead73183 SH |
3292 | return 0; |
3293 | } | |
3294 | ||
dd66d386 | 3295 | static int __maybe_unused fec_resume(struct device *dev) |
ead73183 | 3296 | { |
87cad5c3 | 3297 | struct net_device *ndev = dev_get_drvdata(dev); |
04e5216d | 3298 | struct fec_enet_private *fep = netdev_priv(ndev); |
238f7bc7 FE |
3299 | int ret; |
3300 | ||
3301 | if (fep->reg_phy) { | |
3302 | ret = regulator_enable(fep->reg_phy); | |
3303 | if (ret) | |
3304 | return ret; | |
3305 | } | |
ead73183 | 3306 | |
5bbde4d2 | 3307 | pinctrl_pm_select_default_state(&fep->pdev->dev); |
e8fcfcd5 | 3308 | ret = fec_enet_clk_enable(ndev, true); |
13a097bd | 3309 | if (ret) |
e8fcfcd5 | 3310 | goto failed_clk; |
13a097bd | 3311 | |
da1774e5 | 3312 | rtnl_lock(); |
04e5216d | 3313 | if (netif_running(ndev)) { |
ef83337d | 3314 | fec_restart(ndev); |
31a6de34 | 3315 | netif_tx_lock_bh(ndev); |
6af42d42 | 3316 | netif_device_attach(ndev); |
dbc64a8e | 3317 | netif_tx_unlock_bh(ndev); |
6af42d42 | 3318 | napi_enable(&fep->napi); |
d76cfae9 | 3319 | phy_start(fep->phy_dev); |
ead73183 | 3320 | } |
da1774e5 | 3321 | rtnl_unlock(); |
04e5216d | 3322 | |
ead73183 | 3323 | return 0; |
13a097bd | 3324 | |
e8fcfcd5 | 3325 | failed_clk: |
13a097bd FE |
3326 | if (fep->reg_phy) |
3327 | regulator_disable(fep->reg_phy); | |
3328 | return ret; | |
ead73183 SH |
3329 | } |
3330 | ||
bf7bfd7f | 3331 | static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); |
59d4289b | 3332 | |
ead73183 SH |
3333 | static struct platform_driver fec_driver = { |
3334 | .driver = { | |
b5680e0b | 3335 | .name = DRIVER_NAME, |
87cad5c3 | 3336 | .owner = THIS_MODULE, |
87cad5c3 | 3337 | .pm = &fec_pm_ops, |
ca2cc333 | 3338 | .of_match_table = fec_dt_ids, |
ead73183 | 3339 | }, |
b5680e0b | 3340 | .id_table = fec_devtype, |
87cad5c3 | 3341 | .probe = fec_probe, |
33897cc8 | 3342 | .remove = fec_drv_remove, |
ead73183 SH |
3343 | }; |
3344 | ||
aaca2377 | 3345 | module_platform_driver(fec_driver); |
1da177e4 | 3346 | |
f8c0aca9 | 3347 | MODULE_ALIAS("platform:"DRIVER_NAME); |
1da177e4 | 3348 | MODULE_LICENSE("GPL"); |