]>
Commit | Line | Data |
---|---|---|
0977f817 | 1 | /* drivers/net/ethernet/freescale/gianfar.c |
1da177e4 LT |
2 | * |
3 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
4 | * This driver is designed for the non-CPM ethernet controllers |
5 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
6 | * Based on 8260_io/fcc_enet.c |
7 | * | |
8 | * Author: Andy Fleming | |
4c8d3d99 | 9 | * Maintainer: Kumar Gala |
a12f801d | 10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 11 | * |
20862788 | 12 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
a12f801d | 13 | * Copyright 2007 MontaVista Software, Inc. |
1da177e4 LT |
14 | * |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the | |
17 | * Free Software Foundation; either version 2 of the License, or (at your | |
18 | * option) any later version. | |
19 | * | |
20 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
21 | * RA 11 31 24.2 | |
22 | * Dec +69 19 52 | |
23 | * V 3.84 | |
24 | * B-V +1.62 | |
25 | * | |
26 | * Theory of operation | |
0bbaf069 | 27 | * |
b31a1d8b AF |
28 | * The driver is initialized through of_device. Configuration information |
29 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
30 | * |
31 | * The Gianfar Ethernet Controller uses a ring of buffer | |
32 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
33 | * pointing to the physical address of the start of the ring. |
34 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
35 | * last descriptor of the ring. |
36 | * | |
37 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 38 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
39 | * corresponding bit in the IMASK register is also set (if |
40 | * interrupt coalescing is active, then the interrupt may not | |
41 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 42 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 43 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 44 | * exit. This method will start at the last known empty |
0bbaf069 | 45 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
46 | * are none left with data (NAPI will stop after a set number of |
47 | * packets to give time to other tasks, but will eventually | |
48 | * process all the packets). The data arrives inside a | |
49 | * pre-allocated skb, and so after the skb is passed up to the | |
50 | * stack, a new skb must be allocated, and the address field in | |
51 | * the buffer descriptor must be updated to indicate this new | |
52 | * skb. | |
53 | * | |
54 | * When the kernel requests that a packet be transmitted, the | |
55 | * driver starts where it left off last time, and points the | |
56 | * descriptor at the buffer which was passed in. The driver | |
57 | * then informs the DMA engine that there are packets ready to | |
58 | * be transmitted. Once the controller is finished transmitting | |
59 | * the packet, an interrupt may be triggered (under the same | |
60 | * conditions as for reception, but depending on the TXF bit). | |
61 | * The driver then cleans up the buffer. | |
62 | */ | |
63 | ||
59deab26 JP |
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
65 | #define DEBUG | |
66 | ||
1da177e4 | 67 | #include <linux/kernel.h> |
1da177e4 LT |
68 | #include <linux/string.h> |
69 | #include <linux/errno.h> | |
bb40dcbb | 70 | #include <linux/unistd.h> |
1da177e4 LT |
71 | #include <linux/slab.h> |
72 | #include <linux/interrupt.h> | |
1da177e4 LT |
73 | #include <linux/delay.h> |
74 | #include <linux/netdevice.h> | |
75 | #include <linux/etherdevice.h> | |
76 | #include <linux/skbuff.h> | |
0bbaf069 | 77 | #include <linux/if_vlan.h> |
1da177e4 LT |
78 | #include <linux/spinlock.h> |
79 | #include <linux/mm.h> | |
5af50730 RH |
80 | #include <linux/of_address.h> |
81 | #include <linux/of_irq.h> | |
fe192a49 | 82 | #include <linux/of_mdio.h> |
b31a1d8b | 83 | #include <linux/of_platform.h> |
0bbaf069 KG |
84 | #include <linux/ip.h> |
85 | #include <linux/tcp.h> | |
86 | #include <linux/udp.h> | |
9c07b884 | 87 | #include <linux/in.h> |
cc772ab7 | 88 | #include <linux/net_tstamp.h> |
1da177e4 LT |
89 | |
90 | #include <asm/io.h> | |
d6ef0bcc | 91 | #ifdef CONFIG_PPC |
7d350977 | 92 | #include <asm/reg.h> |
2969b1f7 | 93 | #include <asm/mpc85xx.h> |
d6ef0bcc | 94 | #endif |
1da177e4 | 95 | #include <asm/irq.h> |
7c0f6ba6 | 96 | #include <linux/uaccess.h> |
1da177e4 | 97 | #include <linux/module.h> |
1da177e4 LT |
98 | #include <linux/dma-mapping.h> |
99 | #include <linux/crc32.h> | |
bb40dcbb AF |
100 | #include <linux/mii.h> |
101 | #include <linux/phy.h> | |
b31a1d8b AF |
102 | #include <linux/phy_fixed.h> |
103 | #include <linux/of.h> | |
4b6ba8aa | 104 | #include <linux/of_net.h> |
fd31a952 CM |
105 | #include <linux/of_address.h> |
106 | #include <linux/of_irq.h> | |
1da177e4 LT |
107 | |
108 | #include "gianfar.h" | |
1da177e4 | 109 | |
8fcc6033 | 110 | #define TX_TIMEOUT (5*HZ) |
1da177e4 | 111 | |
75354148 | 112 | const char gfar_driver_version[] = "2.0"; |
1da177e4 | 113 | |
1da177e4 LT |
114 | static int gfar_enet_open(struct net_device *dev); |
115 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 116 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
117 | static void gfar_timeout(struct net_device *dev); |
118 | static int gfar_close(struct net_device *dev); | |
76f31e8b CM |
119 | static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, |
120 | int alloc_cnt); | |
1da177e4 LT |
121 | static int gfar_set_mac_address(struct net_device *dev); |
122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
123 | static irqreturn_t gfar_error(int irq, void *dev_id); |
124 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
125 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 | 126 | static void adjust_link(struct net_device *dev); |
6ce29b0e | 127 | static noinline void gfar_update_link_state(struct gfar_private *priv); |
1da177e4 | 128 | static int init_phy(struct net_device *dev); |
74888760 | 129 | static int gfar_probe(struct platform_device *ofdev); |
2dc11581 | 130 | static int gfar_remove(struct platform_device *ofdev); |
bb40dcbb | 131 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
132 | static void gfar_set_multi(struct net_device *dev); |
133 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 134 | static void gfar_configure_serdes(struct net_device *dev); |
aeb12c5e CM |
135 | static int gfar_poll_rx(struct napi_struct *napi, int budget); |
136 | static int gfar_poll_tx(struct napi_struct *napi, int budget); | |
137 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); | |
138 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); | |
f2d71c2d VW |
139 | #ifdef CONFIG_NET_POLL_CONTROLLER |
140 | static void gfar_netpoll(struct net_device *dev); | |
141 | #endif | |
a12f801d | 142 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
c233cf40 | 143 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
f23223f1 | 144 | static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); |
c10650b6 | 145 | static void gfar_halt_nodisable(struct gfar_private *priv); |
7f7f5316 | 146 | static void gfar_clear_exact_match(struct net_device *dev); |
b6bc7650 JP |
147 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
148 | const u8 *addr); | |
26ccfc37 | 149 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
1da177e4 | 150 | |
1da177e4 LT |
151 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
152 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
153 | MODULE_LICENSE("GPL"); | |
154 | ||
a12f801d | 155 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
156 | dma_addr_t buf) |
157 | { | |
8a102fe0 AV |
158 | u32 lstatus; |
159 | ||
a7312d58 | 160 | bdp->bufPtr = cpu_to_be32(buf); |
8a102fe0 AV |
161 | |
162 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 163 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
164 | lstatus |= BD_LFLAG(RXBD_WRAP); |
165 | ||
d55398ba | 166 | gfar_wmb(); |
8a102fe0 | 167 | |
a7312d58 | 168 | bdp->lstatus = cpu_to_be32(lstatus); |
8a102fe0 AV |
169 | } |
170 | ||
76f31e8b | 171 | static void gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 172 | { |
8728327e | 173 | struct gfar_private *priv = netdev_priv(ndev); |
45b679c9 | 174 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
a12f801d SG |
175 | struct gfar_priv_tx_q *tx_queue = NULL; |
176 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 | 177 | struct txbd8 *txbdp; |
03366a33 | 178 | u32 __iomem *rfbptr; |
fba4ed03 | 179 | int i, j; |
a12f801d | 180 | |
fba4ed03 SG |
181 | for (i = 0; i < priv->num_tx_queues; i++) { |
182 | tx_queue = priv->tx_queue[i]; | |
183 | /* Initialize some variables in our dev structure */ | |
184 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
185 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
186 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
187 | tx_queue->skb_curtx = 0; | |
188 | tx_queue->skb_dirtytx = 0; | |
189 | ||
190 | /* Initialize Transmit Descriptor Ring */ | |
191 | txbdp = tx_queue->tx_bd_base; | |
192 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
193 | txbdp->lstatus = 0; | |
194 | txbdp->bufPtr = 0; | |
195 | txbdp++; | |
196 | } | |
8728327e | 197 | |
fba4ed03 SG |
198 | /* Set the last descriptor in the ring to indicate wrap */ |
199 | txbdp--; | |
a7312d58 CM |
200 | txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | |
201 | TXBD_WRAP); | |
8728327e AV |
202 | } |
203 | ||
45b679c9 | 204 | rfbptr = ®s->rfbptr0; |
fba4ed03 SG |
205 | for (i = 0; i < priv->num_rx_queues; i++) { |
206 | rx_queue = priv->rx_queue[i]; | |
8728327e | 207 | |
76f31e8b CM |
208 | rx_queue->next_to_clean = 0; |
209 | rx_queue->next_to_use = 0; | |
75354148 | 210 | rx_queue->next_to_alloc = 0; |
8728327e | 211 | |
76f31e8b CM |
212 | /* make sure next_to_clean != next_to_use after this |
213 | * by leaving at least 1 unused descriptor | |
214 | */ | |
215 | gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); | |
8728327e | 216 | |
45b679c9 MP |
217 | rx_queue->rfbptr = rfbptr; |
218 | rfbptr += 2; | |
8728327e | 219 | } |
8728327e AV |
220 | } |
221 | ||
222 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
223 | { | |
826aa4a0 | 224 | void *vaddr; |
fba4ed03 | 225 | dma_addr_t addr; |
75354148 | 226 | int i, j; |
826aa4a0 | 227 | struct gfar_private *priv = netdev_priv(ndev); |
369ec162 | 228 | struct device *dev = priv->dev; |
a12f801d SG |
229 | struct gfar_priv_tx_q *tx_queue = NULL; |
230 | struct gfar_priv_rx_q *rx_queue = NULL; | |
231 | ||
fba4ed03 SG |
232 | priv->total_tx_ring_size = 0; |
233 | for (i = 0; i < priv->num_tx_queues; i++) | |
234 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
235 | ||
236 | priv->total_rx_ring_size = 0; | |
237 | for (i = 0; i < priv->num_rx_queues; i++) | |
238 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
239 | |
240 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 241 | vaddr = dma_alloc_coherent(dev, |
d0320f75 JP |
242 | (priv->total_tx_ring_size * |
243 | sizeof(struct txbd8)) + | |
244 | (priv->total_rx_ring_size * | |
245 | sizeof(struct rxbd8)), | |
246 | &addr, GFP_KERNEL); | |
247 | if (!vaddr) | |
826aa4a0 | 248 | return -ENOMEM; |
826aa4a0 | 249 | |
fba4ed03 SG |
250 | for (i = 0; i < priv->num_tx_queues; i++) { |
251 | tx_queue = priv->tx_queue[i]; | |
43d620c8 | 252 | tx_queue->tx_bd_base = vaddr; |
fba4ed03 SG |
253 | tx_queue->tx_bd_dma_base = addr; |
254 | tx_queue->dev = ndev; | |
255 | /* enet DMA only understands physical addresses */ | |
bc4598bc JC |
256 | addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
257 | vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; | |
fba4ed03 | 258 | } |
826aa4a0 | 259 | |
826aa4a0 | 260 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
261 | for (i = 0; i < priv->num_rx_queues; i++) { |
262 | rx_queue = priv->rx_queue[i]; | |
43d620c8 | 263 | rx_queue->rx_bd_base = vaddr; |
fba4ed03 | 264 | rx_queue->rx_bd_dma_base = addr; |
f23223f1 | 265 | rx_queue->ndev = ndev; |
75354148 | 266 | rx_queue->dev = dev; |
bc4598bc JC |
267 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
268 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; | |
fba4ed03 | 269 | } |
826aa4a0 AV |
270 | |
271 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
272 | for (i = 0; i < priv->num_tx_queues; i++) { |
273 | tx_queue = priv->tx_queue[i]; | |
14f8dc49 JP |
274 | tx_queue->tx_skbuff = |
275 | kmalloc_array(tx_queue->tx_ring_size, | |
276 | sizeof(*tx_queue->tx_skbuff), | |
277 | GFP_KERNEL); | |
278 | if (!tx_queue->tx_skbuff) | |
fba4ed03 | 279 | goto cleanup; |
826aa4a0 | 280 | |
75354148 CM |
281 | for (j = 0; j < tx_queue->tx_ring_size; j++) |
282 | tx_queue->tx_skbuff[j] = NULL; | |
fba4ed03 | 283 | } |
826aa4a0 | 284 | |
fba4ed03 SG |
285 | for (i = 0; i < priv->num_rx_queues; i++) { |
286 | rx_queue = priv->rx_queue[i]; | |
75354148 CM |
287 | rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, |
288 | sizeof(*rx_queue->rx_buff), | |
289 | GFP_KERNEL); | |
290 | if (!rx_queue->rx_buff) | |
fba4ed03 | 291 | goto cleanup; |
fba4ed03 | 292 | } |
826aa4a0 | 293 | |
76f31e8b | 294 | gfar_init_bds(ndev); |
826aa4a0 AV |
295 | |
296 | return 0; | |
297 | ||
298 | cleanup: | |
299 | free_skb_resources(priv); | |
300 | return -ENOMEM; | |
301 | } | |
302 | ||
fba4ed03 SG |
303 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
304 | { | |
46ceb60c | 305 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 306 | u32 __iomem *baddr; |
fba4ed03 SG |
307 | int i; |
308 | ||
309 | baddr = ®s->tbase0; | |
bc4598bc | 310 | for (i = 0; i < priv->num_tx_queues; i++) { |
fba4ed03 | 311 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); |
bc4598bc | 312 | baddr += 2; |
fba4ed03 SG |
313 | } |
314 | ||
315 | baddr = ®s->rbase0; | |
bc4598bc | 316 | for (i = 0; i < priv->num_rx_queues; i++) { |
fba4ed03 | 317 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); |
bc4598bc | 318 | baddr += 2; |
fba4ed03 SG |
319 | } |
320 | } | |
321 | ||
45b679c9 MP |
322 | static void gfar_init_rqprm(struct gfar_private *priv) |
323 | { | |
324 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
325 | u32 __iomem *baddr; | |
326 | int i; | |
327 | ||
328 | baddr = ®s->rqprm0; | |
329 | for (i = 0; i < priv->num_rx_queues; i++) { | |
330 | gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | | |
331 | (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); | |
332 | baddr++; | |
333 | } | |
334 | } | |
335 | ||
75354148 | 336 | static void gfar_rx_offload_en(struct gfar_private *priv) |
826aa4a0 | 337 | { |
ba779711 CM |
338 | /* set this when rx hw offload (TOE) functions are being used */ |
339 | priv->uses_rxfcb = 0; | |
340 | ||
88302648 CM |
341 | if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) |
342 | priv->uses_rxfcb = 1; | |
343 | ||
15bf176d | 344 | if (priv->hwts_rx_en || priv->rx_filer_enable) |
88302648 | 345 | priv->uses_rxfcb = 1; |
88302648 CM |
346 | } |
347 | ||
348 | static void gfar_mac_rx_config(struct gfar_private *priv) | |
349 | { | |
350 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
351 | u32 rctrl = 0; | |
352 | ||
1ccb8389 | 353 | if (priv->rx_filer_enable) { |
15bf176d | 354 | rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; |
1ccb8389 | 355 | /* Program the RIR0 reg with the required distribution */ |
71ff9e3d CM |
356 | if (priv->poll_mode == GFAR_SQ_POLLING) |
357 | gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); | |
358 | else /* GFAR_MQ_POLLING */ | |
359 | gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); | |
1ccb8389 | 360 | } |
826aa4a0 | 361 | |
f5ae6279 | 362 | /* Restore PROMISC mode */ |
a328ac92 | 363 | if (priv->ndev->flags & IFF_PROMISC) |
f5ae6279 CM |
364 | rctrl |= RCTRL_PROM; |
365 | ||
88302648 | 366 | if (priv->ndev->features & NETIF_F_RXCSUM) |
826aa4a0 AV |
367 | rctrl |= RCTRL_CHECKSUMMING; |
368 | ||
88302648 CM |
369 | if (priv->extended_hash) |
370 | rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; | |
826aa4a0 AV |
371 | |
372 | if (priv->padding) { | |
373 | rctrl &= ~RCTRL_PAL_MASK; | |
374 | rctrl |= RCTRL_PADDING(priv->padding); | |
375 | } | |
376 | ||
97553f7f | 377 | /* Enable HW time stamping if requested from user space */ |
88302648 | 378 | if (priv->hwts_rx_en) |
97553f7f MR |
379 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
380 | ||
88302648 | 381 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
b852b720 | 382 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
826aa4a0 | 383 | |
45b679c9 MP |
384 | /* Clear the LFC bit */ |
385 | gfar_write(®s->rctrl, rctrl); | |
386 | /* Init flow control threshold values */ | |
387 | gfar_init_rqprm(priv); | |
388 | gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); | |
389 | rctrl |= RCTRL_LFC; | |
390 | ||
826aa4a0 AV |
391 | /* Init rctrl based on our settings */ |
392 | gfar_write(®s->rctrl, rctrl); | |
a328ac92 | 393 | } |
826aa4a0 | 394 | |
a328ac92 CM |
395 | static void gfar_mac_tx_config(struct gfar_private *priv) |
396 | { | |
397 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
398 | u32 tctrl = 0; | |
399 | ||
400 | if (priv->ndev->features & NETIF_F_IP_CSUM) | |
826aa4a0 AV |
401 | tctrl |= TCTRL_INIT_CSUM; |
402 | ||
b98b8bab CM |
403 | if (priv->prio_sched_en) |
404 | tctrl |= TCTRL_TXSCHED_PRIO; | |
405 | else { | |
406 | tctrl |= TCTRL_TXSCHED_WRRS; | |
407 | gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); | |
408 | gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); | |
409 | } | |
fba4ed03 | 410 | |
88302648 CM |
411 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) |
412 | tctrl |= TCTRL_VLINS; | |
413 | ||
826aa4a0 | 414 | gfar_write(®s->tctrl, tctrl); |
826aa4a0 AV |
415 | } |
416 | ||
f19015ba CM |
417 | static void gfar_configure_coalescing(struct gfar_private *priv, |
418 | unsigned long tx_mask, unsigned long rx_mask) | |
419 | { | |
420 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
421 | u32 __iomem *baddr; | |
422 | ||
423 | if (priv->mode == MQ_MG_MODE) { | |
424 | int i = 0; | |
425 | ||
426 | baddr = ®s->txic0; | |
427 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { | |
428 | gfar_write(baddr + i, 0); | |
429 | if (likely(priv->tx_queue[i]->txcoalescing)) | |
430 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
431 | } | |
432 | ||
433 | baddr = ®s->rxic0; | |
434 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { | |
435 | gfar_write(baddr + i, 0); | |
436 | if (likely(priv->rx_queue[i]->rxcoalescing)) | |
437 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
438 | } | |
439 | } else { | |
440 | /* Backward compatible case -- even if we enable | |
441 | * multiple queues, there's only single reg to program | |
442 | */ | |
443 | gfar_write(®s->txic, 0); | |
444 | if (likely(priv->tx_queue[0]->txcoalescing)) | |
445 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
446 | ||
447 | gfar_write(®s->rxic, 0); | |
448 | if (unlikely(priv->rx_queue[0]->rxcoalescing)) | |
449 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
450 | } | |
451 | } | |
452 | ||
453 | void gfar_configure_coalescing_all(struct gfar_private *priv) | |
454 | { | |
455 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | |
456 | } | |
457 | ||
a7f38041 SG |
458 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
459 | { | |
460 | struct gfar_private *priv = netdev_priv(dev); | |
a7f38041 SG |
461 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
462 | unsigned long tx_packets = 0, tx_bytes = 0; | |
3a2e16c8 | 463 | int i; |
a7f38041 SG |
464 | |
465 | for (i = 0; i < priv->num_rx_queues; i++) { | |
466 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | |
bc4598bc | 467 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; |
a7f38041 SG |
468 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; |
469 | } | |
470 | ||
471 | dev->stats.rx_packets = rx_packets; | |
bc4598bc | 472 | dev->stats.rx_bytes = rx_bytes; |
a7f38041 SG |
473 | dev->stats.rx_dropped = rx_dropped; |
474 | ||
475 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1ac9ad13 ED |
476 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
477 | tx_packets += priv->tx_queue[i]->stats.tx_packets; | |
a7f38041 SG |
478 | } |
479 | ||
bc4598bc | 480 | dev->stats.tx_bytes = tx_bytes; |
a7f38041 SG |
481 | dev->stats.tx_packets = tx_packets; |
482 | ||
483 | return &dev->stats; | |
484 | } | |
485 | ||
3d23a05c CM |
486 | static int gfar_set_mac_addr(struct net_device *dev, void *p) |
487 | { | |
488 | eth_mac_addr(dev, p); | |
489 | ||
490 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
26ccfc37 AF |
495 | static const struct net_device_ops gfar_netdev_ops = { |
496 | .ndo_open = gfar_enet_open, | |
497 | .ndo_start_xmit = gfar_start_xmit, | |
498 | .ndo_stop = gfar_close, | |
499 | .ndo_change_mtu = gfar_change_mtu, | |
8b3afe95 | 500 | .ndo_set_features = gfar_set_features, |
afc4b13d | 501 | .ndo_set_rx_mode = gfar_set_multi, |
26ccfc37 AF |
502 | .ndo_tx_timeout = gfar_timeout, |
503 | .ndo_do_ioctl = gfar_ioctl, | |
a7f38041 | 504 | .ndo_get_stats = gfar_get_stats, |
3d23a05c | 505 | .ndo_set_mac_address = gfar_set_mac_addr, |
240c102d | 506 | .ndo_validate_addr = eth_validate_addr, |
26ccfc37 AF |
507 | #ifdef CONFIG_NET_POLL_CONTROLLER |
508 | .ndo_poll_controller = gfar_netpoll, | |
509 | #endif | |
510 | }; | |
511 | ||
efeddce7 CM |
512 | static void gfar_ints_disable(struct gfar_private *priv) |
513 | { | |
514 | int i; | |
515 | for (i = 0; i < priv->num_grps; i++) { | |
516 | struct gfar __iomem *regs = priv->gfargrp[i].regs; | |
517 | /* Clear IEVENT */ | |
518 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
519 | ||
520 | /* Initialize IMASK */ | |
521 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
522 | } | |
523 | } | |
524 | ||
525 | static void gfar_ints_enable(struct gfar_private *priv) | |
526 | { | |
527 | int i; | |
528 | for (i = 0; i < priv->num_grps; i++) { | |
529 | struct gfar __iomem *regs = priv->gfargrp[i].regs; | |
530 | /* Unmask the interrupts we look for */ | |
531 | gfar_write(®s->imask, IMASK_DEFAULT); | |
532 | } | |
533 | } | |
534 | ||
20862788 CM |
535 | static int gfar_alloc_tx_queues(struct gfar_private *priv) |
536 | { | |
537 | int i; | |
538 | ||
539 | for (i = 0; i < priv->num_tx_queues; i++) { | |
540 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), | |
541 | GFP_KERNEL); | |
542 | if (!priv->tx_queue[i]) | |
543 | return -ENOMEM; | |
544 | ||
545 | priv->tx_queue[i]->tx_skbuff = NULL; | |
546 | priv->tx_queue[i]->qindex = i; | |
547 | priv->tx_queue[i]->dev = priv->ndev; | |
548 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
549 | } | |
550 | return 0; | |
551 | } | |
552 | ||
553 | static int gfar_alloc_rx_queues(struct gfar_private *priv) | |
554 | { | |
555 | int i; | |
556 | ||
557 | for (i = 0; i < priv->num_rx_queues; i++) { | |
558 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), | |
559 | GFP_KERNEL); | |
560 | if (!priv->rx_queue[i]) | |
561 | return -ENOMEM; | |
562 | ||
20862788 | 563 | priv->rx_queue[i]->qindex = i; |
f23223f1 | 564 | priv->rx_queue[i]->ndev = priv->ndev; |
20862788 CM |
565 | } |
566 | return 0; | |
567 | } | |
568 | ||
569 | static void gfar_free_tx_queues(struct gfar_private *priv) | |
fba4ed03 | 570 | { |
3a2e16c8 | 571 | int i; |
fba4ed03 SG |
572 | |
573 | for (i = 0; i < priv->num_tx_queues; i++) | |
574 | kfree(priv->tx_queue[i]); | |
575 | } | |
576 | ||
20862788 | 577 | static void gfar_free_rx_queues(struct gfar_private *priv) |
fba4ed03 | 578 | { |
3a2e16c8 | 579 | int i; |
fba4ed03 SG |
580 | |
581 | for (i = 0; i < priv->num_rx_queues; i++) | |
582 | kfree(priv->rx_queue[i]); | |
583 | } | |
584 | ||
46ceb60c SG |
585 | static void unmap_group_regs(struct gfar_private *priv) |
586 | { | |
3a2e16c8 | 587 | int i; |
46ceb60c SG |
588 | |
589 | for (i = 0; i < MAXGROUPS; i++) | |
590 | if (priv->gfargrp[i].regs) | |
591 | iounmap(priv->gfargrp[i].regs); | |
592 | } | |
593 | ||
ee873fda CM |
594 | static void free_gfar_dev(struct gfar_private *priv) |
595 | { | |
596 | int i, j; | |
597 | ||
598 | for (i = 0; i < priv->num_grps; i++) | |
599 | for (j = 0; j < GFAR_NUM_IRQS; j++) { | |
600 | kfree(priv->gfargrp[i].irqinfo[j]); | |
601 | priv->gfargrp[i].irqinfo[j] = NULL; | |
602 | } | |
603 | ||
604 | free_netdev(priv->ndev); | |
605 | } | |
606 | ||
46ceb60c SG |
607 | static void disable_napi(struct gfar_private *priv) |
608 | { | |
3a2e16c8 | 609 | int i; |
46ceb60c | 610 | |
aeb12c5e CM |
611 | for (i = 0; i < priv->num_grps; i++) { |
612 | napi_disable(&priv->gfargrp[i].napi_rx); | |
613 | napi_disable(&priv->gfargrp[i].napi_tx); | |
614 | } | |
46ceb60c SG |
615 | } |
616 | ||
617 | static void enable_napi(struct gfar_private *priv) | |
618 | { | |
3a2e16c8 | 619 | int i; |
46ceb60c | 620 | |
aeb12c5e CM |
621 | for (i = 0; i < priv->num_grps; i++) { |
622 | napi_enable(&priv->gfargrp[i].napi_rx); | |
623 | napi_enable(&priv->gfargrp[i].napi_tx); | |
624 | } | |
46ceb60c SG |
625 | } |
626 | ||
627 | static int gfar_parse_group(struct device_node *np, | |
bc4598bc | 628 | struct gfar_private *priv, const char *model) |
46ceb60c | 629 | { |
5fedcc14 | 630 | struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
ee873fda CM |
631 | int i; |
632 | ||
7c1e7e99 PG |
633 | for (i = 0; i < GFAR_NUM_IRQS; i++) { |
634 | grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), | |
635 | GFP_KERNEL); | |
636 | if (!grp->irqinfo[i]) | |
ee873fda | 637 | return -ENOMEM; |
ee873fda | 638 | } |
46ceb60c | 639 | |
5fedcc14 CM |
640 | grp->regs = of_iomap(np, 0); |
641 | if (!grp->regs) | |
46ceb60c SG |
642 | return -ENOMEM; |
643 | ||
ee873fda | 644 | gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); |
46ceb60c SG |
645 | |
646 | /* If we aren't the FEC we have multiple interrupts */ | |
647 | if (model && strcasecmp(model, "FEC")) { | |
ee873fda CM |
648 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
649 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); | |
fea0f665 MB |
650 | if (!gfar_irq(grp, TX)->irq || |
651 | !gfar_irq(grp, RX)->irq || | |
652 | !gfar_irq(grp, ER)->irq) | |
46ceb60c | 653 | return -EINVAL; |
46ceb60c SG |
654 | } |
655 | ||
5fedcc14 CM |
656 | grp->priv = priv; |
657 | spin_lock_init(&grp->grplock); | |
bc4598bc | 658 | if (priv->mode == MQ_MG_MODE) { |
55917641 JL |
659 | u32 rxq_mask, txq_mask; |
660 | int ret; | |
661 | ||
662 | grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
663 | grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
664 | ||
665 | ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); | |
666 | if (!ret) { | |
667 | grp->rx_bit_map = rxq_mask ? | |
668 | rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
669 | } | |
670 | ||
671 | ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); | |
672 | if (!ret) { | |
673 | grp->tx_bit_map = txq_mask ? | |
674 | txq_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
675 | } | |
71ff9e3d CM |
676 | |
677 | if (priv->poll_mode == GFAR_SQ_POLLING) { | |
678 | /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ | |
679 | grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
680 | grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
71ff9e3d | 681 | } |
46ceb60c | 682 | } else { |
5fedcc14 CM |
683 | grp->rx_bit_map = 0xFF; |
684 | grp->tx_bit_map = 0xFF; | |
46ceb60c | 685 | } |
20862788 CM |
686 | |
687 | /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses | |
688 | * right to left, so we need to revert the 8 bits to get the q index | |
689 | */ | |
690 | grp->rx_bit_map = bitrev8(grp->rx_bit_map); | |
691 | grp->tx_bit_map = bitrev8(grp->tx_bit_map); | |
692 | ||
693 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
694 | * also assign queues to groups | |
695 | */ | |
696 | for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { | |
71ff9e3d CM |
697 | if (!grp->rx_queue) |
698 | grp->rx_queue = priv->rx_queue[i]; | |
20862788 CM |
699 | grp->num_rx_queues++; |
700 | grp->rstat |= (RSTAT_CLEAR_RHALT >> i); | |
701 | priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
702 | priv->rx_queue[i]->grp = grp; | |
703 | } | |
704 | ||
705 | for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { | |
71ff9e3d CM |
706 | if (!grp->tx_queue) |
707 | grp->tx_queue = priv->tx_queue[i]; | |
20862788 CM |
708 | grp->num_tx_queues++; |
709 | grp->tstat |= (TSTAT_CLEAR_THALT >> i); | |
710 | priv->tqueue |= (TQUEUE_EN0 >> i); | |
711 | priv->tx_queue[i]->grp = grp; | |
712 | } | |
713 | ||
46ceb60c SG |
714 | priv->num_grps++; |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
f50724cd TW |
719 | static int gfar_of_group_count(struct device_node *np) |
720 | { | |
721 | struct device_node *child; | |
722 | int num = 0; | |
723 | ||
724 | for_each_available_child_of_node(np, child) | |
725 | if (!of_node_cmp(child->name, "queue-group")) | |
726 | num++; | |
727 | ||
728 | return num; | |
729 | } | |
730 | ||
2dc11581 | 731 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
b31a1d8b | 732 | { |
b31a1d8b AF |
733 | const char *model; |
734 | const char *ctype; | |
735 | const void *mac_addr; | |
fba4ed03 SG |
736 | int err = 0, i; |
737 | struct net_device *dev = NULL; | |
738 | struct gfar_private *priv = NULL; | |
61c7a080 | 739 | struct device_node *np = ofdev->dev.of_node; |
46ceb60c | 740 | struct device_node *child = NULL; |
55917641 JL |
741 | u32 stash_len = 0; |
742 | u32 stash_idx = 0; | |
fba4ed03 | 743 | unsigned int num_tx_qs, num_rx_qs; |
b338ce27 | 744 | unsigned short mode, poll_mode; |
b31a1d8b | 745 | |
4b222ca6 | 746 | if (!np) |
b31a1d8b AF |
747 | return -ENODEV; |
748 | ||
b338ce27 CM |
749 | if (of_device_is_compatible(np, "fsl,etsec2")) { |
750 | mode = MQ_MG_MODE; | |
751 | poll_mode = GFAR_SQ_POLLING; | |
752 | } else { | |
753 | mode = SQ_SG_MODE; | |
754 | poll_mode = GFAR_SQ_POLLING; | |
755 | } | |
756 | ||
b338ce27 | 757 | if (mode == SQ_SG_MODE) { |
71ff9e3d CM |
758 | num_tx_qs = 1; |
759 | num_rx_qs = 1; | |
760 | } else { /* MQ_MG_MODE */ | |
c65d7533 | 761 | /* get the actual number of supported groups */ |
f50724cd | 762 | unsigned int num_grps = gfar_of_group_count(np); |
c65d7533 CM |
763 | |
764 | if (num_grps == 0 || num_grps > MAXGROUPS) { | |
765 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | |
766 | num_grps); | |
767 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
768 | return -EINVAL; | |
769 | } | |
770 | ||
b338ce27 | 771 | if (poll_mode == GFAR_SQ_POLLING) { |
c65d7533 CM |
772 | num_tx_qs = num_grps; /* one txq per int group */ |
773 | num_rx_qs = num_grps; /* one rxq per int group */ | |
71ff9e3d | 774 | } else { /* GFAR_MQ_POLLING */ |
55917641 JL |
775 | u32 tx_queues, rx_queues; |
776 | int ret; | |
777 | ||
778 | /* parse the num of HW tx and rx queues */ | |
779 | ret = of_property_read_u32(np, "fsl,num_tx_queues", | |
780 | &tx_queues); | |
781 | num_tx_qs = ret ? 1 : tx_queues; | |
782 | ||
783 | ret = of_property_read_u32(np, "fsl,num_rx_queues", | |
784 | &rx_queues); | |
785 | num_rx_qs = ret ? 1 : rx_queues; | |
71ff9e3d CM |
786 | } |
787 | } | |
fba4ed03 SG |
788 | |
789 | if (num_tx_qs > MAX_TX_QS) { | |
59deab26 JP |
790 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", |
791 | num_tx_qs, MAX_TX_QS); | |
792 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
793 | return -EINVAL; |
794 | } | |
795 | ||
fba4ed03 | 796 | if (num_rx_qs > MAX_RX_QS) { |
59deab26 JP |
797 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", |
798 | num_rx_qs, MAX_RX_QS); | |
799 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
800 | return -EINVAL; |
801 | } | |
802 | ||
803 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
804 | dev = *pdev; | |
805 | if (NULL == dev) | |
806 | return -ENOMEM; | |
807 | ||
808 | priv = netdev_priv(dev); | |
fba4ed03 SG |
809 | priv->ndev = dev; |
810 | ||
b338ce27 CM |
811 | priv->mode = mode; |
812 | priv->poll_mode = poll_mode; | |
813 | ||
fba4ed03 | 814 | priv->num_tx_queues = num_tx_qs; |
fe069123 | 815 | netif_set_real_num_rx_queues(dev, num_rx_qs); |
fba4ed03 | 816 | priv->num_rx_queues = num_rx_qs; |
20862788 CM |
817 | |
818 | err = gfar_alloc_tx_queues(priv); | |
819 | if (err) | |
820 | goto tx_alloc_failed; | |
821 | ||
822 | err = gfar_alloc_rx_queues(priv); | |
823 | if (err) | |
824 | goto rx_alloc_failed; | |
b31a1d8b | 825 | |
55917641 JL |
826 | err = of_property_read_string(np, "model", &model); |
827 | if (err) { | |
828 | pr_err("Device model property missing, aborting\n"); | |
829 | goto rx_alloc_failed; | |
830 | } | |
831 | ||
0977f817 | 832 | /* Init Rx queue filer rule set linked list */ |
4aa3a715 SP |
833 | INIT_LIST_HEAD(&priv->rx_list.list); |
834 | priv->rx_list.count = 0; | |
835 | mutex_init(&priv->rx_queue_access); | |
836 | ||
46ceb60c SG |
837 | for (i = 0; i < MAXGROUPS; i++) |
838 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 839 | |
46ceb60c | 840 | /* Parse and initialize group specific information */ |
b338ce27 | 841 | if (priv->mode == MQ_MG_MODE) { |
f50724cd TW |
842 | for_each_available_child_of_node(np, child) { |
843 | if (of_node_cmp(child->name, "queue-group")) | |
844 | continue; | |
845 | ||
46ceb60c SG |
846 | err = gfar_parse_group(child, priv, model); |
847 | if (err) | |
848 | goto err_grp_init; | |
b31a1d8b | 849 | } |
b338ce27 | 850 | } else { /* SQ_SG_MODE */ |
46ceb60c | 851 | err = gfar_parse_group(np, priv, model); |
bc4598bc | 852 | if (err) |
46ceb60c | 853 | goto err_grp_init; |
b31a1d8b AF |
854 | } |
855 | ||
3f8c0f7e | 856 | if (of_property_read_bool(np, "bd-stash")) { |
4d7902f2 AF |
857 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
858 | priv->bd_stash_en = 1; | |
859 | } | |
860 | ||
55917641 | 861 | err = of_property_read_u32(np, "rx-stash-len", &stash_len); |
4d7902f2 | 862 | |
55917641 JL |
863 | if (err == 0) |
864 | priv->rx_stash_size = stash_len; | |
4d7902f2 | 865 | |
55917641 | 866 | err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); |
4d7902f2 | 867 | |
55917641 JL |
868 | if (err == 0) |
869 | priv->rx_stash_index = stash_idx; | |
4d7902f2 AF |
870 | |
871 | if (stash_len || stash_idx) | |
872 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
873 | ||
b31a1d8b | 874 | mac_addr = of_get_mac_address(np); |
bc4598bc | 875 | |
b31a1d8b | 876 | if (mac_addr) |
6a3c910c | 877 | memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
b31a1d8b AF |
878 | |
879 | if (model && !strcasecmp(model, "TSEC")) | |
34018fd4 | 880 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
bc4598bc JC |
881 | FSL_GIANFAR_DEV_HAS_COALESCE | |
882 | FSL_GIANFAR_DEV_HAS_RMON | | |
883 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
884 | ||
b31a1d8b | 885 | if (model && !strcasecmp(model, "eTSEC")) |
34018fd4 | 886 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
bc4598bc JC |
887 | FSL_GIANFAR_DEV_HAS_COALESCE | |
888 | FSL_GIANFAR_DEV_HAS_RMON | | |
889 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
bc4598bc JC |
890 | FSL_GIANFAR_DEV_HAS_CSUM | |
891 | FSL_GIANFAR_DEV_HAS_VLAN | | |
892 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
893 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | |
7bff47da HM |
894 | FSL_GIANFAR_DEV_HAS_TIMER | |
895 | FSL_GIANFAR_DEV_HAS_RX_FILER; | |
b31a1d8b | 896 | |
55917641 | 897 | err = of_property_read_string(np, "phy-connection-type", &ctype); |
b31a1d8b AF |
898 | |
899 | /* We only care about rgmii-id. The rest are autodetected */ | |
55917641 | 900 | if (err == 0 && !strcmp(ctype, "rgmii-id")) |
b31a1d8b AF |
901 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; |
902 | else | |
903 | priv->interface = PHY_INTERFACE_MODE_MII; | |
904 | ||
55917641 | 905 | if (of_find_property(np, "fsl,magic-packet", NULL)) |
b31a1d8b AF |
906 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; |
907 | ||
3e905b80 CM |
908 | if (of_get_property(np, "fsl,wake-on-filer", NULL)) |
909 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; | |
910 | ||
fe192a49 | 911 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b | 912 | |
be403645 FF |
913 | /* In the case of a fixed PHY, the DT node associated |
914 | * to the PHY is the Ethernet MAC DT node. | |
915 | */ | |
6f2c9bd8 | 916 | if (!priv->phy_node && of_phy_is_fixed_link(np)) { |
be403645 FF |
917 | err = of_phy_register_fixed_link(np); |
918 | if (err) | |
919 | goto err_grp_init; | |
920 | ||
6f2c9bd8 | 921 | priv->phy_node = of_node_get(np); |
be403645 FF |
922 | } |
923 | ||
b31a1d8b | 924 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ |
fe192a49 | 925 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
926 | |
927 | return 0; | |
928 | ||
46ceb60c SG |
929 | err_grp_init: |
930 | unmap_group_regs(priv); | |
20862788 CM |
931 | rx_alloc_failed: |
932 | gfar_free_rx_queues(priv); | |
933 | tx_alloc_failed: | |
934 | gfar_free_tx_queues(priv); | |
ee873fda | 935 | free_gfar_dev(priv); |
b31a1d8b AF |
936 | return err; |
937 | } | |
938 | ||
ca0c88c2 | 939 | static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
cc772ab7 MR |
940 | { |
941 | struct hwtstamp_config config; | |
942 | struct gfar_private *priv = netdev_priv(netdev); | |
943 | ||
944 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
945 | return -EFAULT; | |
946 | ||
947 | /* reserved for future extensions */ | |
948 | if (config.flags) | |
949 | return -EINVAL; | |
950 | ||
f0ee7acf MR |
951 | switch (config.tx_type) { |
952 | case HWTSTAMP_TX_OFF: | |
953 | priv->hwts_tx_en = 0; | |
954 | break; | |
955 | case HWTSTAMP_TX_ON: | |
956 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
957 | return -ERANGE; | |
958 | priv->hwts_tx_en = 1; | |
959 | break; | |
960 | default: | |
cc772ab7 | 961 | return -ERANGE; |
f0ee7acf | 962 | } |
cc772ab7 MR |
963 | |
964 | switch (config.rx_filter) { | |
965 | case HWTSTAMP_FILTER_NONE: | |
97553f7f | 966 | if (priv->hwts_rx_en) { |
97553f7f | 967 | priv->hwts_rx_en = 0; |
0851133b | 968 | reset_gfar(netdev); |
97553f7f | 969 | } |
cc772ab7 MR |
970 | break; |
971 | default: | |
972 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
973 | return -ERANGE; | |
97553f7f | 974 | if (!priv->hwts_rx_en) { |
97553f7f | 975 | priv->hwts_rx_en = 1; |
0851133b | 976 | reset_gfar(netdev); |
97553f7f | 977 | } |
cc772ab7 MR |
978 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
979 | break; | |
980 | } | |
981 | ||
982 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
983 | -EFAULT : 0; | |
984 | } | |
985 | ||
ca0c88c2 BH |
986 | static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
987 | { | |
988 | struct hwtstamp_config config; | |
989 | struct gfar_private *priv = netdev_priv(netdev); | |
990 | ||
991 | config.flags = 0; | |
992 | config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; | |
993 | config.rx_filter = (priv->hwts_rx_en ? | |
994 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); | |
995 | ||
996 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
997 | -EFAULT : 0; | |
998 | } | |
999 | ||
0faac9f7 CW |
1000 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1001 | { | |
4c4a6b0e | 1002 | struct phy_device *phydev = dev->phydev; |
0faac9f7 CW |
1003 | |
1004 | if (!netif_running(dev)) | |
1005 | return -EINVAL; | |
1006 | ||
cc772ab7 | 1007 | if (cmd == SIOCSHWTSTAMP) |
ca0c88c2 BH |
1008 | return gfar_hwtstamp_set(dev, rq); |
1009 | if (cmd == SIOCGHWTSTAMP) | |
1010 | return gfar_hwtstamp_get(dev, rq); | |
cc772ab7 | 1011 | |
4c4a6b0e | 1012 | if (!phydev) |
0faac9f7 CW |
1013 | return -ENODEV; |
1014 | ||
4c4a6b0e | 1015 | return phy_mii_ioctl(phydev, rq, cmd); |
0faac9f7 CW |
1016 | } |
1017 | ||
18294ad1 AV |
1018 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
1019 | u32 class) | |
7a8b3372 SG |
1020 | { |
1021 | u32 rqfpr = FPR_FILER_MASK; | |
1022 | u32 rqfcr = 0x0; | |
1023 | ||
1024 | rqfar--; | |
1025 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
6c43e046 WJB |
1026 | priv->ftp_rqfpr[rqfar] = rqfpr; |
1027 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
1028 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1029 | ||
1030 | rqfar--; | |
1031 | rqfcr = RQFCR_CMP_NOMATCH; | |
6c43e046 WJB |
1032 | priv->ftp_rqfpr[rqfar] = rqfpr; |
1033 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
1034 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1035 | ||
1036 | rqfar--; | |
1037 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
1038 | rqfpr = class; | |
6c43e046 WJB |
1039 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1040 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1041 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1042 | ||
1043 | rqfar--; | |
1044 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
1045 | rqfpr = class; | |
6c43e046 WJB |
1046 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1047 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1048 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1049 | ||
1050 | return rqfar; | |
1051 | } | |
1052 | ||
1053 | static void gfar_init_filer_table(struct gfar_private *priv) | |
1054 | { | |
1055 | int i = 0x0; | |
1056 | u32 rqfar = MAX_FILER_IDX; | |
1057 | u32 rqfcr = 0x0; | |
1058 | u32 rqfpr = FPR_FILER_MASK; | |
1059 | ||
1060 | /* Default rule */ | |
1061 | rqfcr = RQFCR_CMP_MATCH; | |
6c43e046 WJB |
1062 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1063 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1064 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1065 | ||
1066 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
1067 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
1068 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
1069 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
1070 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
1071 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
1072 | ||
85dd08eb | 1073 | /* cur_filer_idx indicated the first non-masked rule */ |
7a8b3372 SG |
1074 | priv->cur_filer_idx = rqfar; |
1075 | ||
1076 | /* Rest are masked rules */ | |
1077 | rqfcr = RQFCR_CMP_NOMATCH; | |
1078 | for (i = 0; i < rqfar; i++) { | |
6c43e046 WJB |
1079 | priv->ftp_rqfcr[i] = rqfcr; |
1080 | priv->ftp_rqfpr[i] = rqfpr; | |
7a8b3372 SG |
1081 | gfar_write_filer(priv, i, rqfcr, rqfpr); |
1082 | } | |
1083 | } | |
1084 | ||
d6ef0bcc | 1085 | #ifdef CONFIG_PPC |
2969b1f7 | 1086 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
7d350977 | 1087 | { |
7d350977 AV |
1088 | unsigned int pvr = mfspr(SPRN_PVR); |
1089 | unsigned int svr = mfspr(SPRN_SVR); | |
1090 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | |
1091 | unsigned int rev = svr & 0xffff; | |
1092 | ||
1093 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | |
1094 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | |
bc4598bc | 1095 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
7d350977 AV |
1096 | priv->errata |= GFAR_ERRATA_74; |
1097 | ||
deb90eac AV |
1098 | /* MPC8313 and MPC837x all rev */ |
1099 | if ((pvr == 0x80850010 && mod == 0x80b0) || | |
bc4598bc | 1100 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
deb90eac AV |
1101 | priv->errata |= GFAR_ERRATA_76; |
1102 | ||
2969b1f7 CM |
1103 | /* MPC8313 Rev < 2.0 */ |
1104 | if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) | |
1105 | priv->errata |= GFAR_ERRATA_12; | |
1106 | } | |
1107 | ||
1108 | static void __gfar_detect_errata_85xx(struct gfar_private *priv) | |
1109 | { | |
1110 | unsigned int svr = mfspr(SPRN_SVR); | |
1111 | ||
1112 | if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) | |
4363c2fd | 1113 | priv->errata |= GFAR_ERRATA_12; |
7bfc6082 | 1114 | /* P2020/P1010 Rev 1; MPC8548 Rev 2 */ |
53fad773 | 1115 | if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || |
7bfc6082 AN |
1116 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) || |
1117 | ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31))) | |
53fad773 | 1118 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ |
2969b1f7 | 1119 | } |
d6ef0bcc | 1120 | #endif |
2969b1f7 CM |
1121 | |
1122 | static void gfar_detect_errata(struct gfar_private *priv) | |
1123 | { | |
1124 | struct device *dev = &priv->ofdev->dev; | |
1125 | ||
1126 | /* no plans to fix */ | |
1127 | priv->errata |= GFAR_ERRATA_A002; | |
1128 | ||
d6ef0bcc | 1129 | #ifdef CONFIG_PPC |
2969b1f7 CM |
1130 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) |
1131 | __gfar_detect_errata_85xx(priv); | |
1132 | else /* non-mpc85xx parts, i.e. e300 core based */ | |
1133 | __gfar_detect_errata_83xx(priv); | |
d6ef0bcc | 1134 | #endif |
4363c2fd | 1135 | |
7d350977 AV |
1136 | if (priv->errata) |
1137 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | |
1138 | priv->errata); | |
1139 | } | |
1140 | ||
0851133b | 1141 | void gfar_mac_reset(struct gfar_private *priv) |
20862788 CM |
1142 | { |
1143 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
a328ac92 | 1144 | u32 tempval; |
20862788 CM |
1145 | |
1146 | /* Reset MAC layer */ | |
1147 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); | |
1148 | ||
1149 | /* We need to delay at least 3 TX clocks */ | |
a328ac92 | 1150 | udelay(3); |
20862788 CM |
1151 | |
1152 | /* the soft reset bit is not self-resetting, so we need to | |
1153 | * clear it before resuming normal operation | |
1154 | */ | |
1155 | gfar_write(®s->maccfg1, 0); | |
1156 | ||
a328ac92 CM |
1157 | udelay(3); |
1158 | ||
75354148 | 1159 | gfar_rx_offload_en(priv); |
88302648 CM |
1160 | |
1161 | /* Initialize the max receive frame/buffer lengths */ | |
75354148 CM |
1162 | gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); |
1163 | gfar_write(®s->mrblr, GFAR_RXB_SIZE); | |
a328ac92 CM |
1164 | |
1165 | /* Initialize the Minimum Frame Length Register */ | |
1166 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | |
1167 | ||
20862788 CM |
1168 | /* Initialize MACCFG2. */ |
1169 | tempval = MACCFG2_INIT_SETTINGS; | |
88302648 | 1170 | |
75354148 CM |
1171 | /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 |
1172 | * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, | |
1173 | * and by checking RxBD[LG] and discarding larger than MAXFRM. | |
88302648 | 1174 | */ |
75354148 | 1175 | if (gfar_has_errata(priv, GFAR_ERRATA_74)) |
20862788 | 1176 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
88302648 | 1177 | |
20862788 CM |
1178 | gfar_write(®s->maccfg2, tempval); |
1179 | ||
a328ac92 CM |
1180 | /* Clear mac addr hash registers */ |
1181 | gfar_write(®s->igaddr0, 0); | |
1182 | gfar_write(®s->igaddr1, 0); | |
1183 | gfar_write(®s->igaddr2, 0); | |
1184 | gfar_write(®s->igaddr3, 0); | |
1185 | gfar_write(®s->igaddr4, 0); | |
1186 | gfar_write(®s->igaddr5, 0); | |
1187 | gfar_write(®s->igaddr6, 0); | |
1188 | gfar_write(®s->igaddr7, 0); | |
1189 | ||
1190 | gfar_write(®s->gaddr0, 0); | |
1191 | gfar_write(®s->gaddr1, 0); | |
1192 | gfar_write(®s->gaddr2, 0); | |
1193 | gfar_write(®s->gaddr3, 0); | |
1194 | gfar_write(®s->gaddr4, 0); | |
1195 | gfar_write(®s->gaddr5, 0); | |
1196 | gfar_write(®s->gaddr6, 0); | |
1197 | gfar_write(®s->gaddr7, 0); | |
1198 | ||
1199 | if (priv->extended_hash) | |
1200 | gfar_clear_exact_match(priv->ndev); | |
1201 | ||
1202 | gfar_mac_rx_config(priv); | |
1203 | ||
1204 | gfar_mac_tx_config(priv); | |
1205 | ||
1206 | gfar_set_mac_address(priv->ndev); | |
1207 | ||
1208 | gfar_set_multi(priv->ndev); | |
1209 | ||
1210 | /* clear ievent and imask before configuring coalescing */ | |
1211 | gfar_ints_disable(priv); | |
1212 | ||
1213 | /* Configure the coalescing support */ | |
1214 | gfar_configure_coalescing_all(priv); | |
1215 | } | |
1216 | ||
1217 | static void gfar_hw_init(struct gfar_private *priv) | |
1218 | { | |
1219 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1220 | u32 attrs; | |
1221 | ||
1222 | /* Stop the DMA engine now, in case it was running before | |
1223 | * (The firmware could have used it, and left it running). | |
1224 | */ | |
1225 | gfar_halt(priv); | |
1226 | ||
1227 | gfar_mac_reset(priv); | |
1228 | ||
1229 | /* Zero out the rmon mib registers if it has them */ | |
1230 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | |
1231 | memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); | |
1232 | ||
1233 | /* Mask off the CAM interrupts */ | |
1234 | gfar_write(®s->rmon.cam1, 0xffffffff); | |
1235 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1236 | } | |
1237 | ||
20862788 CM |
1238 | /* Initialize ECNTRL */ |
1239 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); | |
1240 | ||
34018fd4 CM |
1241 | /* Set the extraction length and index */ |
1242 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
1243 | ATTRELI_EI(priv->rx_stash_index); | |
1244 | ||
1245 | gfar_write(®s->attreli, attrs); | |
1246 | ||
1247 | /* Start with defaults, and add stashing | |
1248 | * depending on driver parameters | |
1249 | */ | |
1250 | attrs = ATTR_INIT_SETTINGS; | |
1251 | ||
1252 | if (priv->bd_stash_en) | |
1253 | attrs |= ATTR_BDSTASH; | |
1254 | ||
1255 | if (priv->rx_stash_size != 0) | |
1256 | attrs |= ATTR_BUFSTASH; | |
1257 | ||
1258 | gfar_write(®s->attr, attrs); | |
1259 | ||
1260 | /* FIFO configs */ | |
1261 | gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); | |
1262 | gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); | |
1263 | gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); | |
1264 | ||
20862788 CM |
1265 | /* Program the interrupt steering regs, only for MG devices */ |
1266 | if (priv->num_grps > 1) | |
1267 | gfar_write_isrg(priv); | |
20862788 CM |
1268 | } |
1269 | ||
898157ed | 1270 | static void gfar_init_addr_hash_table(struct gfar_private *priv) |
20862788 CM |
1271 | { |
1272 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1273 | ||
1274 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { | |
1275 | priv->extended_hash = 1; | |
1276 | priv->hash_width = 9; | |
1277 | ||
1278 | priv->hash_regs[0] = ®s->igaddr0; | |
1279 | priv->hash_regs[1] = ®s->igaddr1; | |
1280 | priv->hash_regs[2] = ®s->igaddr2; | |
1281 | priv->hash_regs[3] = ®s->igaddr3; | |
1282 | priv->hash_regs[4] = ®s->igaddr4; | |
1283 | priv->hash_regs[5] = ®s->igaddr5; | |
1284 | priv->hash_regs[6] = ®s->igaddr6; | |
1285 | priv->hash_regs[7] = ®s->igaddr7; | |
1286 | priv->hash_regs[8] = ®s->gaddr0; | |
1287 | priv->hash_regs[9] = ®s->gaddr1; | |
1288 | priv->hash_regs[10] = ®s->gaddr2; | |
1289 | priv->hash_regs[11] = ®s->gaddr3; | |
1290 | priv->hash_regs[12] = ®s->gaddr4; | |
1291 | priv->hash_regs[13] = ®s->gaddr5; | |
1292 | priv->hash_regs[14] = ®s->gaddr6; | |
1293 | priv->hash_regs[15] = ®s->gaddr7; | |
1294 | ||
1295 | } else { | |
1296 | priv->extended_hash = 0; | |
1297 | priv->hash_width = 8; | |
1298 | ||
1299 | priv->hash_regs[0] = ®s->gaddr0; | |
1300 | priv->hash_regs[1] = ®s->gaddr1; | |
1301 | priv->hash_regs[2] = ®s->gaddr2; | |
1302 | priv->hash_regs[3] = ®s->gaddr3; | |
1303 | priv->hash_regs[4] = ®s->gaddr4; | |
1304 | priv->hash_regs[5] = ®s->gaddr5; | |
1305 | priv->hash_regs[6] = ®s->gaddr6; | |
1306 | priv->hash_regs[7] = ®s->gaddr7; | |
1307 | } | |
1308 | } | |
1309 | ||
bb40dcbb | 1310 | /* Set up the ethernet device structure, private data, |
0977f817 JC |
1311 | * and anything else we need before we start |
1312 | */ | |
74888760 | 1313 | static int gfar_probe(struct platform_device *ofdev) |
1da177e4 | 1314 | { |
42c70042 | 1315 | struct device_node *np = ofdev->dev.of_node; |
1da177e4 LT |
1316 | struct net_device *dev = NULL; |
1317 | struct gfar_private *priv = NULL; | |
20862788 | 1318 | int err = 0, i; |
1da177e4 | 1319 | |
fba4ed03 | 1320 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 1321 | |
fba4ed03 SG |
1322 | if (err) |
1323 | return err; | |
1da177e4 LT |
1324 | |
1325 | priv = netdev_priv(dev); | |
4826857f KG |
1326 | priv->ndev = dev; |
1327 | priv->ofdev = ofdev; | |
369ec162 | 1328 | priv->dev = &ofdev->dev; |
4826857f | 1329 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 1330 | |
ab939905 | 1331 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 1332 | |
8513fbd8 | 1333 | platform_set_drvdata(ofdev, priv); |
1da177e4 | 1334 | |
7d350977 AV |
1335 | gfar_detect_errata(priv); |
1336 | ||
1da177e4 | 1337 | /* Set the dev->base_addr to the gfar reg region */ |
20862788 | 1338 | dev->base_addr = (unsigned long) priv->gfargrp[0].regs; |
1da177e4 | 1339 | |
1da177e4 | 1340 | /* Fill in the dev structure */ |
1da177e4 | 1341 | dev->watchdog_timeo = TX_TIMEOUT; |
44770e11 | 1342 | /* MTU range: 50 - 9586 */ |
1da177e4 | 1343 | dev->mtu = 1500; |
44770e11 JW |
1344 | dev->min_mtu = 50; |
1345 | dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN; | |
26ccfc37 | 1346 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
1347 | dev->ethtool_ops = &gfar_ethtool_ops; |
1348 | ||
fba4ed03 | 1349 | /* Register for napi ...We are registering NAPI for each grp */ |
71ff9e3d CM |
1350 | for (i = 0; i < priv->num_grps; i++) { |
1351 | if (priv->poll_mode == GFAR_SQ_POLLING) { | |
1352 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, | |
1353 | gfar_poll_rx_sq, GFAR_DEV_WEIGHT); | |
d64b5e85 | 1354 | netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, |
71ff9e3d CM |
1355 | gfar_poll_tx_sq, 2); |
1356 | } else { | |
aeb12c5e CM |
1357 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, |
1358 | gfar_poll_rx, GFAR_DEV_WEIGHT); | |
d64b5e85 | 1359 | netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, |
aeb12c5e CM |
1360 | gfar_poll_tx, 2); |
1361 | } | |
1362 | } | |
a12f801d | 1363 | |
b31a1d8b | 1364 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
8b3afe95 | 1365 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
bc4598bc | 1366 | NETIF_F_RXCSUM; |
8b3afe95 | 1367 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
bc4598bc | 1368 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
8b3afe95 | 1369 | } |
0bbaf069 | 1370 | |
87c288c6 | 1371 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
f646968f PM |
1372 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
1373 | NETIF_F_HW_VLAN_CTAG_RX; | |
1374 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; | |
87c288c6 | 1375 | } |
0bbaf069 | 1376 | |
3d23a05c CM |
1377 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
1378 | ||
20862788 | 1379 | gfar_init_addr_hash_table(priv); |
0bbaf069 | 1380 | |
58117672 ZC |
1381 | /* Insert receive time stamps into padding alignment bytes, and |
1382 | * plus 2 bytes padding to ensure the cpu alignment. | |
1383 | */ | |
532c37bc | 1384 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
58117672 | 1385 | priv->padding = 8 + DEFAULT_PADDING; |
0bbaf069 | 1386 | |
cc772ab7 | 1387 | if (dev->features & NETIF_F_IP_CSUM || |
bc4598bc | 1388 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
bee9e58c | 1389 | dev->needed_headroom = GMAC_FCB_LEN; |
1da177e4 | 1390 | |
a12f801d | 1391 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1392 | for (i = 0; i < priv->num_tx_queues; i++) { |
1393 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1394 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1395 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1396 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1397 | } | |
a12f801d | 1398 | |
fba4ed03 SG |
1399 | for (i = 0; i < priv->num_rx_queues; i++) { |
1400 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1401 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1402 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1403 | } | |
1da177e4 | 1404 | |
7bff47da HM |
1405 | /* Always enable rx filer if available */ |
1406 | priv->rx_filer_enable = | |
1407 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; | |
0bbaf069 KG |
1408 | /* Enable most messages by default */ |
1409 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
b98b8bab CM |
1410 | /* use pritority h/w tx queue scheduling for single queue devices */ |
1411 | if (priv->num_tx_queues == 1) | |
1412 | priv->prio_sched_en = 1; | |
0bbaf069 | 1413 | |
0851133b CM |
1414 | set_bit(GFAR_DOWN, &priv->state); |
1415 | ||
a328ac92 | 1416 | gfar_hw_init(priv); |
d3eab82b | 1417 | |
d4c642ea FE |
1418 | /* Carrier starts down, phylib will bring it up */ |
1419 | netif_carrier_off(dev); | |
1420 | ||
1da177e4 LT |
1421 | err = register_netdev(dev); |
1422 | ||
1423 | if (err) { | |
59deab26 | 1424 | pr_err("%s: Cannot register net device, aborting\n", dev->name); |
1da177e4 LT |
1425 | goto register_fail; |
1426 | } | |
1427 | ||
3e905b80 CM |
1428 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) |
1429 | priv->wol_supported |= GFAR_WOL_MAGIC; | |
1430 | ||
1431 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && | |
1432 | priv->rx_filer_enable) | |
1433 | priv->wol_supported |= GFAR_WOL_FILER_UCAST; | |
1434 | ||
1435 | device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); | |
2884e5cc | 1436 | |
c50a5d9a | 1437 | /* fill out IRQ number and name fields */ |
46ceb60c | 1438 | for (i = 0; i < priv->num_grps; i++) { |
ee873fda | 1439 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
46ceb60c | 1440 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
ee873fda | 1441 | sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", |
0015e551 | 1442 | dev->name, "_g", '0' + i, "_tx"); |
ee873fda | 1443 | sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", |
0015e551 | 1444 | dev->name, "_g", '0' + i, "_rx"); |
ee873fda | 1445 | sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", |
0015e551 | 1446 | dev->name, "_g", '0' + i, "_er"); |
46ceb60c | 1447 | } else |
ee873fda | 1448 | strcpy(gfar_irq(grp, TX)->name, dev->name); |
46ceb60c | 1449 | } |
c50a5d9a | 1450 | |
7a8b3372 SG |
1451 | /* Initialize the filer table */ |
1452 | gfar_init_filer_table(priv); | |
1453 | ||
1da177e4 | 1454 | /* Print out the device info */ |
59deab26 | 1455 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
1da177e4 | 1456 | |
0977f817 JC |
1457 | /* Even more device info helps when determining which kernel |
1458 | * provided which set of benchmarks. | |
1459 | */ | |
59deab26 | 1460 | netdev_info(dev, "Running with NAPI enabled\n"); |
fba4ed03 | 1461 | for (i = 0; i < priv->num_rx_queues; i++) |
59deab26 JP |
1462 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
1463 | i, priv->rx_queue[i]->rx_ring_size); | |
bc4598bc | 1464 | for (i = 0; i < priv->num_tx_queues; i++) |
59deab26 JP |
1465 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", |
1466 | i, priv->tx_queue[i]->tx_ring_size); | |
1da177e4 LT |
1467 | |
1468 | return 0; | |
1469 | ||
1470 | register_fail: | |
42c70042 JH |
1471 | if (of_phy_is_fixed_link(np)) |
1472 | of_phy_deregister_fixed_link(np); | |
46ceb60c | 1473 | unmap_group_regs(priv); |
20862788 CM |
1474 | gfar_free_rx_queues(priv); |
1475 | gfar_free_tx_queues(priv); | |
888c88b8 UKK |
1476 | of_node_put(priv->phy_node); |
1477 | of_node_put(priv->tbi_node); | |
ee873fda | 1478 | free_gfar_dev(priv); |
bb40dcbb | 1479 | return err; |
1da177e4 LT |
1480 | } |
1481 | ||
2dc11581 | 1482 | static int gfar_remove(struct platform_device *ofdev) |
1da177e4 | 1483 | { |
8513fbd8 | 1484 | struct gfar_private *priv = platform_get_drvdata(ofdev); |
42c70042 | 1485 | struct device_node *np = ofdev->dev.of_node; |
1da177e4 | 1486 | |
888c88b8 UKK |
1487 | of_node_put(priv->phy_node); |
1488 | of_node_put(priv->tbi_node); | |
fe192a49 | 1489 | |
d9d8e041 | 1490 | unregister_netdev(priv->ndev); |
42c70042 JH |
1491 | |
1492 | if (of_phy_is_fixed_link(np)) | |
1493 | of_phy_deregister_fixed_link(np); | |
1494 | ||
46ceb60c | 1495 | unmap_group_regs(priv); |
20862788 CM |
1496 | gfar_free_rx_queues(priv); |
1497 | gfar_free_tx_queues(priv); | |
ee873fda | 1498 | free_gfar_dev(priv); |
1da177e4 LT |
1499 | |
1500 | return 0; | |
1501 | } | |
1502 | ||
d87eb127 | 1503 | #ifdef CONFIG_PM |
be926fc4 | 1504 | |
3e905b80 CM |
1505 | static void __gfar_filer_disable(struct gfar_private *priv) |
1506 | { | |
1507 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1508 | u32 temp; | |
1509 | ||
1510 | temp = gfar_read(®s->rctrl); | |
1511 | temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); | |
1512 | gfar_write(®s->rctrl, temp); | |
1513 | } | |
1514 | ||
1515 | static void __gfar_filer_enable(struct gfar_private *priv) | |
1516 | { | |
1517 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1518 | u32 temp; | |
1519 | ||
1520 | temp = gfar_read(®s->rctrl); | |
1521 | temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; | |
1522 | gfar_write(®s->rctrl, temp); | |
1523 | } | |
1524 | ||
1525 | /* Filer rules implementing wol capabilities */ | |
1526 | static void gfar_filer_config_wol(struct gfar_private *priv) | |
1527 | { | |
1528 | unsigned int i; | |
1529 | u32 rqfcr; | |
1530 | ||
1531 | __gfar_filer_disable(priv); | |
1532 | ||
1533 | /* clear the filer table, reject any packet by default */ | |
1534 | rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; | |
1535 | for (i = 0; i <= MAX_FILER_IDX; i++) | |
1536 | gfar_write_filer(priv, i, rqfcr, 0); | |
1537 | ||
1538 | i = 0; | |
1539 | if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { | |
1540 | /* unicast packet, accept it */ | |
1541 | struct net_device *ndev = priv->ndev; | |
1542 | /* get the default rx queue index */ | |
1543 | u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; | |
1544 | u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | | |
1545 | (ndev->dev_addr[1] << 8) | | |
1546 | ndev->dev_addr[2]; | |
1547 | ||
1548 | rqfcr = (qindex << 10) | RQFCR_AND | | |
1549 | RQFCR_CMP_EXACT | RQFCR_PID_DAH; | |
1550 | ||
1551 | gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); | |
1552 | ||
1553 | dest_mac_addr = (ndev->dev_addr[3] << 16) | | |
1554 | (ndev->dev_addr[4] << 8) | | |
1555 | ndev->dev_addr[5]; | |
1556 | rqfcr = (qindex << 10) | RQFCR_GPI | | |
1557 | RQFCR_CMP_EXACT | RQFCR_PID_DAL; | |
1558 | gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); | |
1559 | } | |
1560 | ||
1561 | __gfar_filer_enable(priv); | |
1562 | } | |
1563 | ||
1564 | static void gfar_filer_restore_table(struct gfar_private *priv) | |
1565 | { | |
1566 | u32 rqfcr, rqfpr; | |
1567 | unsigned int i; | |
1568 | ||
1569 | __gfar_filer_disable(priv); | |
1570 | ||
1571 | for (i = 0; i <= MAX_FILER_IDX; i++) { | |
1572 | rqfcr = priv->ftp_rqfcr[i]; | |
1573 | rqfpr = priv->ftp_rqfpr[i]; | |
1574 | gfar_write_filer(priv, i, rqfcr, rqfpr); | |
1575 | } | |
1576 | ||
1577 | __gfar_filer_enable(priv); | |
1578 | } | |
1579 | ||
1580 | /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ | |
1581 | static void gfar_start_wol_filer(struct gfar_private *priv) | |
1582 | { | |
1583 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1584 | u32 tempval; | |
1585 | int i = 0; | |
1586 | ||
1587 | /* Enable Rx hw queues */ | |
1588 | gfar_write(®s->rqueue, priv->rqueue); | |
1589 | ||
1590 | /* Initialize DMACTRL to have WWR and WOP */ | |
1591 | tempval = gfar_read(®s->dmactrl); | |
1592 | tempval |= DMACTRL_INIT_SETTINGS; | |
1593 | gfar_write(®s->dmactrl, tempval); | |
1594 | ||
1595 | /* Make sure we aren't stopped */ | |
1596 | tempval = gfar_read(®s->dmactrl); | |
1597 | tempval &= ~DMACTRL_GRS; | |
1598 | gfar_write(®s->dmactrl, tempval); | |
1599 | ||
1600 | for (i = 0; i < priv->num_grps; i++) { | |
1601 | regs = priv->gfargrp[i].regs; | |
1602 | /* Clear RHLT, so that the DMA starts polling now */ | |
1603 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
1604 | /* enable the Filer General Purpose Interrupt */ | |
1605 | gfar_write(®s->imask, IMASK_FGPI); | |
1606 | } | |
1607 | ||
1608 | /* Enable Rx DMA */ | |
1609 | tempval = gfar_read(®s->maccfg1); | |
1610 | tempval |= MACCFG1_RX_EN; | |
1611 | gfar_write(®s->maccfg1, tempval); | |
1612 | } | |
1613 | ||
be926fc4 | 1614 | static int gfar_suspend(struct device *dev) |
d87eb127 | 1615 | { |
be926fc4 AV |
1616 | struct gfar_private *priv = dev_get_drvdata(dev); |
1617 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1618 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1619 | u32 tempval; |
3e905b80 | 1620 | u16 wol = priv->wol_opts; |
d87eb127 | 1621 | |
614b4242 CM |
1622 | if (!netif_running(ndev)) |
1623 | return 0; | |
1624 | ||
1625 | disable_napi(priv); | |
1626 | netif_tx_lock(ndev); | |
be926fc4 | 1627 | netif_device_detach(ndev); |
614b4242 | 1628 | netif_tx_unlock(ndev); |
d87eb127 | 1629 | |
614b4242 | 1630 | gfar_halt(priv); |
fba4ed03 | 1631 | |
3e905b80 | 1632 | if (wol & GFAR_WOL_MAGIC) { |
614b4242 CM |
1633 | /* Enable interrupt on Magic Packet */ |
1634 | gfar_write(®s->imask, IMASK_MAG); | |
d87eb127 | 1635 | |
614b4242 CM |
1636 | /* Enable Magic Packet mode */ |
1637 | tempval = gfar_read(®s->maccfg2); | |
1638 | tempval |= MACCFG2_MPEN; | |
1639 | gfar_write(®s->maccfg2, tempval); | |
d87eb127 | 1640 | |
614b4242 | 1641 | /* re-enable the Rx block */ |
f4983704 | 1642 | tempval = gfar_read(®s->maccfg1); |
614b4242 | 1643 | tempval |= MACCFG1_RX_EN; |
f4983704 | 1644 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1645 | |
3e905b80 CM |
1646 | } else if (wol & GFAR_WOL_FILER_UCAST) { |
1647 | gfar_filer_config_wol(priv); | |
1648 | gfar_start_wol_filer(priv); | |
1649 | ||
614b4242 | 1650 | } else { |
4c4a6b0e | 1651 | phy_stop(ndev->phydev); |
d87eb127 SW |
1652 | } |
1653 | ||
1654 | return 0; | |
1655 | } | |
1656 | ||
be926fc4 | 1657 | static int gfar_resume(struct device *dev) |
d87eb127 | 1658 | { |
be926fc4 AV |
1659 | struct gfar_private *priv = dev_get_drvdata(dev); |
1660 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1661 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1662 | u32 tempval; |
3e905b80 | 1663 | u16 wol = priv->wol_opts; |
d87eb127 | 1664 | |
614b4242 | 1665 | if (!netif_running(ndev)) |
d87eb127 | 1666 | return 0; |
d87eb127 | 1667 | |
3e905b80 | 1668 | if (wol & GFAR_WOL_MAGIC) { |
614b4242 CM |
1669 | /* Disable Magic Packet mode */ |
1670 | tempval = gfar_read(®s->maccfg2); | |
1671 | tempval &= ~MACCFG2_MPEN; | |
1672 | gfar_write(®s->maccfg2, tempval); | |
3e905b80 CM |
1673 | |
1674 | } else if (wol & GFAR_WOL_FILER_UCAST) { | |
1675 | /* need to stop rx only, tx is already down */ | |
1676 | gfar_halt(priv); | |
1677 | gfar_filer_restore_table(priv); | |
1678 | ||
614b4242 | 1679 | } else { |
4c4a6b0e | 1680 | phy_start(ndev->phydev); |
614b4242 | 1681 | } |
d87eb127 | 1682 | |
c10650b6 | 1683 | gfar_start(priv); |
d87eb127 | 1684 | |
be926fc4 | 1685 | netif_device_attach(ndev); |
46ceb60c | 1686 | enable_napi(priv); |
be926fc4 AV |
1687 | |
1688 | return 0; | |
1689 | } | |
1690 | ||
1691 | static int gfar_restore(struct device *dev) | |
1692 | { | |
1693 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1694 | struct net_device *ndev = priv->ndev; | |
1695 | ||
103cdd1d WD |
1696 | if (!netif_running(ndev)) { |
1697 | netif_device_attach(ndev); | |
1698 | ||
be926fc4 | 1699 | return 0; |
103cdd1d | 1700 | } |
be926fc4 | 1701 | |
76f31e8b | 1702 | gfar_init_bds(ndev); |
1eb8f7a7 | 1703 | |
a328ac92 CM |
1704 | gfar_mac_reset(priv); |
1705 | ||
1706 | gfar_init_tx_rx_base(priv); | |
1707 | ||
c10650b6 | 1708 | gfar_start(priv); |
be926fc4 AV |
1709 | |
1710 | priv->oldlink = 0; | |
1711 | priv->oldspeed = 0; | |
1712 | priv->oldduplex = -1; | |
1713 | ||
4c4a6b0e PR |
1714 | if (ndev->phydev) |
1715 | phy_start(ndev->phydev); | |
d87eb127 | 1716 | |
be926fc4 | 1717 | netif_device_attach(ndev); |
5ea681d4 | 1718 | enable_napi(priv); |
d87eb127 SW |
1719 | |
1720 | return 0; | |
1721 | } | |
be926fc4 | 1722 | |
ee27244b | 1723 | static const struct dev_pm_ops gfar_pm_ops = { |
be926fc4 AV |
1724 | .suspend = gfar_suspend, |
1725 | .resume = gfar_resume, | |
1726 | .freeze = gfar_suspend, | |
1727 | .thaw = gfar_resume, | |
1728 | .restore = gfar_restore, | |
1729 | }; | |
1730 | ||
1731 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1732 | ||
d87eb127 | 1733 | #else |
be926fc4 AV |
1734 | |
1735 | #define GFAR_PM_OPS NULL | |
be926fc4 | 1736 | |
d87eb127 | 1737 | #endif |
1da177e4 | 1738 | |
e8a2b6a4 AF |
1739 | /* Reads the controller's registers to determine what interface |
1740 | * connects it to the PHY. | |
1741 | */ | |
1742 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1743 | { | |
1744 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1745 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1746 | u32 ecntrl; |
1747 | ||
f4983704 | 1748 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1749 | |
1750 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1751 | return PHY_INTERFACE_MODE_SGMII; | |
1752 | ||
1753 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1754 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1755 | return PHY_INTERFACE_MODE_RTBI; | |
1756 | else | |
1757 | return PHY_INTERFACE_MODE_TBI; | |
1758 | } | |
1759 | ||
1760 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
bc4598bc | 1761 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
e8a2b6a4 | 1762 | return PHY_INTERFACE_MODE_RMII; |
bc4598bc | 1763 | } |
7132ab7f | 1764 | else { |
b31a1d8b | 1765 | phy_interface_t interface = priv->interface; |
7132ab7f | 1766 | |
0977f817 | 1767 | /* This isn't autodetected right now, so it must |
7132ab7f AF |
1768 | * be set by the device tree or platform code. |
1769 | */ | |
1770 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1771 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1772 | ||
e8a2b6a4 | 1773 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1774 | } |
e8a2b6a4 AF |
1775 | } |
1776 | ||
b31a1d8b | 1777 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1778 | return PHY_INTERFACE_MODE_GMII; |
1779 | ||
1780 | return PHY_INTERFACE_MODE_MII; | |
1781 | } | |
1782 | ||
1783 | ||
bb40dcbb AF |
1784 | /* Initializes driver's PHY state, and attaches to the PHY. |
1785 | * Returns 0 on success. | |
1da177e4 LT |
1786 | */ |
1787 | static int init_phy(struct net_device *dev) | |
1788 | { | |
1789 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1790 | uint gigabit_support = |
b31a1d8b | 1791 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
23402bdd | 1792 | GFAR_SUPPORTED_GBIT : 0; |
e8a2b6a4 | 1793 | phy_interface_t interface; |
4c4a6b0e | 1794 | struct phy_device *phydev; |
b6b5e8a6 | 1795 | struct ethtool_eee edata; |
1da177e4 LT |
1796 | |
1797 | priv->oldlink = 0; | |
1798 | priv->oldspeed = 0; | |
1799 | priv->oldduplex = -1; | |
1800 | ||
e8a2b6a4 AF |
1801 | interface = gfar_get_interface(dev); |
1802 | ||
4c4a6b0e PR |
1803 | phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1804 | interface); | |
1805 | if (!phydev) { | |
1db780f8 AV |
1806 | dev_err(&dev->dev, "could not attach to PHY\n"); |
1807 | return -ENODEV; | |
fe192a49 | 1808 | } |
1da177e4 | 1809 | |
d3c12873 KJ |
1810 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1811 | gfar_configure_serdes(dev); | |
1812 | ||
bb40dcbb | 1813 | /* Remove any features not supported by the controller */ |
4c4a6b0e PR |
1814 | phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1815 | phydev->advertising = phydev->supported; | |
1da177e4 | 1816 | |
cf987afc | 1817 | /* Add support for flow control, but don't advertise it by default */ |
4c4a6b0e | 1818 | phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
cf987afc | 1819 | |
b6b5e8a6 CM |
1820 | /* disable EEE autoneg, EEE not supported by eTSEC */ |
1821 | memset(&edata, 0, sizeof(struct ethtool_eee)); | |
1822 | phy_ethtool_set_eee(phydev, &edata); | |
1823 | ||
1da177e4 | 1824 | return 0; |
1da177e4 LT |
1825 | } |
1826 | ||
0977f817 | 1827 | /* Initialize TBI PHY interface for communicating with the |
d0313587 PG |
1828 | * SERDES lynx PHY on the chip. We communicate with this PHY |
1829 | * through the MDIO bus on each controller, treating it as a | |
1830 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1831 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1832 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1833 | * value doesn't matter, as there are no other PHYs on the bus. | |
1834 | */ | |
d3c12873 KJ |
1835 | static void gfar_configure_serdes(struct net_device *dev) |
1836 | { | |
1837 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1838 | struct phy_device *tbiphy; |
1839 | ||
1840 | if (!priv->tbi_node) { | |
1841 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1842 | "device tree specify a tbi-handle\n"); | |
1843 | return; | |
1844 | } | |
c132419e | 1845 | |
fe192a49 GL |
1846 | tbiphy = of_phy_find_device(priv->tbi_node); |
1847 | if (!tbiphy) { | |
1848 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1849 | return; |
1850 | } | |
d3c12873 | 1851 | |
0977f817 | 1852 | /* If the link is already up, we must already be ok, and don't need to |
bdb59f94 TP |
1853 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1854 | * everything for us? Resetting it takes the link down and requires | |
1855 | * several seconds for it to come back. | |
1856 | */ | |
38737e49 | 1857 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { |
e5a03bfd | 1858 | put_device(&tbiphy->mdio.dev); |
b31a1d8b | 1859 | return; |
38737e49 | 1860 | } |
d3c12873 | 1861 | |
d0313587 | 1862 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1863 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1864 | |
fe192a49 | 1865 | phy_write(tbiphy, MII_ADVERTISE, |
bc4598bc JC |
1866 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1867 | ADVERTISE_1000XPSE_ASYM); | |
d3c12873 | 1868 | |
bc4598bc JC |
1869 | phy_write(tbiphy, MII_BMCR, |
1870 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | | |
1871 | BMCR_SPEED1000); | |
04d53b20 | 1872 | |
e5a03bfd | 1873 | put_device(&tbiphy->mdio.dev); |
d3c12873 KJ |
1874 | } |
1875 | ||
511d934f AV |
1876 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
1877 | { | |
1878 | u32 res; | |
1879 | ||
0977f817 | 1880 | /* Normaly TSEC should not hang on GRS commands, so we should |
511d934f AV |
1881 | * actually wait for IEVENT_GRSC flag. |
1882 | */ | |
ad3660c2 | 1883 | if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
511d934f AV |
1884 | return 0; |
1885 | ||
0977f817 | 1886 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
511d934f AV |
1887 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
1888 | * and the Rx can be safely reset. | |
1889 | */ | |
1890 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | |
1891 | res &= 0x7f807f80; | |
1892 | if ((res & 0xffff) == (res >> 16)) | |
1893 | return 1; | |
1894 | ||
1895 | return 0; | |
1896 | } | |
0bbaf069 KG |
1897 | |
1898 | /* Halt the receive and transmit queues */ | |
c10650b6 | 1899 | static void gfar_halt_nodisable(struct gfar_private *priv) |
1da177e4 | 1900 | { |
efeddce7 | 1901 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 | 1902 | u32 tempval; |
a4feee89 CM |
1903 | unsigned int timeout; |
1904 | int stopped; | |
1da177e4 | 1905 | |
efeddce7 | 1906 | gfar_ints_disable(priv); |
1da177e4 | 1907 | |
a4feee89 CM |
1908 | if (gfar_is_dma_stopped(priv)) |
1909 | return; | |
1910 | ||
1da177e4 | 1911 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1912 | tempval = gfar_read(®s->dmactrl); |
a4feee89 CM |
1913 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1914 | gfar_write(®s->dmactrl, tempval); | |
1915 | ||
1916 | retry: | |
1917 | timeout = 1000; | |
1918 | while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { | |
1919 | cpu_relax(); | |
1920 | timeout--; | |
1da177e4 | 1921 | } |
a4feee89 CM |
1922 | |
1923 | if (!timeout) | |
1924 | stopped = gfar_is_dma_stopped(priv); | |
1925 | ||
1926 | if (!stopped && !gfar_is_rx_dma_stopped(priv) && | |
1927 | !__gfar_is_rx_idle(priv)) | |
1928 | goto retry; | |
d87eb127 | 1929 | } |
d87eb127 SW |
1930 | |
1931 | /* Halt the receive and transmit queues */ | |
c10650b6 | 1932 | void gfar_halt(struct gfar_private *priv) |
d87eb127 | 1933 | { |
46ceb60c | 1934 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1935 | u32 tempval; |
1da177e4 | 1936 | |
c10650b6 CM |
1937 | /* Dissable the Rx/Tx hw queues */ |
1938 | gfar_write(®s->rqueue, 0); | |
1939 | gfar_write(®s->tqueue, 0); | |
2a54adc3 | 1940 | |
c10650b6 CM |
1941 | mdelay(10); |
1942 | ||
1943 | gfar_halt_nodisable(priv); | |
1944 | ||
1945 | /* Disable Rx/Tx DMA */ | |
1da177e4 LT |
1946 | tempval = gfar_read(®s->maccfg1); |
1947 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1948 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1949 | } |
1950 | ||
1951 | void stop_gfar(struct net_device *dev) | |
1952 | { | |
1953 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1954 | |
0851133b | 1955 | netif_tx_stop_all_queues(dev); |
bb40dcbb | 1956 | |
4e857c58 | 1957 | smp_mb__before_atomic(); |
0851133b | 1958 | set_bit(GFAR_DOWN, &priv->state); |
4e857c58 | 1959 | smp_mb__after_atomic(); |
a12f801d | 1960 | |
0851133b | 1961 | disable_napi(priv); |
0bbaf069 | 1962 | |
0851133b | 1963 | /* disable ints and gracefully shut down Rx/Tx DMA */ |
c10650b6 | 1964 | gfar_halt(priv); |
1da177e4 | 1965 | |
4c4a6b0e | 1966 | phy_stop(dev->phydev); |
1da177e4 | 1967 | |
1da177e4 | 1968 | free_skb_resources(priv); |
1da177e4 LT |
1969 | } |
1970 | ||
fba4ed03 | 1971 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1972 | { |
1da177e4 | 1973 | struct txbd8 *txbdp; |
fba4ed03 | 1974 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1975 | int i, j; |
1da177e4 | 1976 | |
a12f801d | 1977 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1978 | |
a12f801d SG |
1979 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1980 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1981 | continue; |
1da177e4 | 1982 | |
a7312d58 CM |
1983 | dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), |
1984 | be16_to_cpu(txbdp->length), DMA_TO_DEVICE); | |
4669bc90 | 1985 | txbdp->lstatus = 0; |
fba4ed03 | 1986 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
bc4598bc | 1987 | j++) { |
4669bc90 | 1988 | txbdp++; |
a7312d58 CM |
1989 | dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), |
1990 | be16_to_cpu(txbdp->length), | |
1991 | DMA_TO_DEVICE); | |
1da177e4 | 1992 | } |
ad5da7ab | 1993 | txbdp++; |
a12f801d SG |
1994 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1995 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1996 | } |
a12f801d | 1997 | kfree(tx_queue->tx_skbuff); |
1eb8f7a7 | 1998 | tx_queue->tx_skbuff = NULL; |
fba4ed03 | 1999 | } |
1da177e4 | 2000 | |
fba4ed03 SG |
2001 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
2002 | { | |
fba4ed03 | 2003 | int i; |
1da177e4 | 2004 | |
75354148 CM |
2005 | struct rxbd8 *rxbdp = rx_queue->rx_bd_base; |
2006 | ||
2007 | if (rx_queue->skb) | |
2008 | dev_kfree_skb(rx_queue->skb); | |
1da177e4 | 2009 | |
a12f801d | 2010 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
75354148 CM |
2011 | struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; |
2012 | ||
e69edd21 AV |
2013 | rxbdp->lstatus = 0; |
2014 | rxbdp->bufPtr = 0; | |
2015 | rxbdp++; | |
75354148 CM |
2016 | |
2017 | if (!rxb->page) | |
2018 | continue; | |
2019 | ||
4af0e5bb AS |
2020 | dma_unmap_page(rx_queue->dev, rxb->dma, |
2021 | PAGE_SIZE, DMA_FROM_DEVICE); | |
75354148 CM |
2022 | __free_page(rxb->page); |
2023 | ||
2024 | rxb->page = NULL; | |
1da177e4 | 2025 | } |
75354148 CM |
2026 | |
2027 | kfree(rx_queue->rx_buff); | |
2028 | rx_queue->rx_buff = NULL; | |
fba4ed03 | 2029 | } |
e69edd21 | 2030 | |
fba4ed03 | 2031 | /* If there are any tx skbs or rx skbs still around, free them. |
0977f817 JC |
2032 | * Then free tx_skbuff and rx_skbuff |
2033 | */ | |
fba4ed03 SG |
2034 | static void free_skb_resources(struct gfar_private *priv) |
2035 | { | |
2036 | struct gfar_priv_tx_q *tx_queue = NULL; | |
2037 | struct gfar_priv_rx_q *rx_queue = NULL; | |
2038 | int i; | |
2039 | ||
2040 | /* Go through all the buffer descriptors and free their data buffers */ | |
2041 | for (i = 0; i < priv->num_tx_queues; i++) { | |
d8a0f1b0 | 2042 | struct netdev_queue *txq; |
bc4598bc | 2043 | |
fba4ed03 | 2044 | tx_queue = priv->tx_queue[i]; |
d8a0f1b0 | 2045 | txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
bc4598bc | 2046 | if (tx_queue->tx_skbuff) |
fba4ed03 | 2047 | free_skb_tx_queue(tx_queue); |
d8a0f1b0 | 2048 | netdev_tx_reset_queue(txq); |
fba4ed03 SG |
2049 | } |
2050 | ||
2051 | for (i = 0; i < priv->num_rx_queues; i++) { | |
2052 | rx_queue = priv->rx_queue[i]; | |
75354148 | 2053 | if (rx_queue->rx_buff) |
fba4ed03 SG |
2054 | free_skb_rx_queue(rx_queue); |
2055 | } | |
2056 | ||
369ec162 | 2057 | dma_free_coherent(priv->dev, |
bc4598bc JC |
2058 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
2059 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
2060 | priv->tx_queue[0]->tx_bd_base, | |
2061 | priv->tx_queue[0]->tx_bd_dma_base); | |
1da177e4 LT |
2062 | } |
2063 | ||
c10650b6 | 2064 | void gfar_start(struct gfar_private *priv) |
0bbaf069 | 2065 | { |
46ceb60c | 2066 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 2067 | u32 tempval; |
46ceb60c | 2068 | int i = 0; |
0bbaf069 | 2069 | |
c10650b6 CM |
2070 | /* Enable Rx/Tx hw queues */ |
2071 | gfar_write(®s->rqueue, priv->rqueue); | |
2072 | gfar_write(®s->tqueue, priv->tqueue); | |
0bbaf069 KG |
2073 | |
2074 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 2075 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 2076 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 2077 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 2078 | |
0bbaf069 | 2079 | /* Make sure we aren't stopped */ |
f4983704 | 2080 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 2081 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 2082 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 2083 | |
46ceb60c SG |
2084 | for (i = 0; i < priv->num_grps; i++) { |
2085 | regs = priv->gfargrp[i].regs; | |
2086 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
2087 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
2088 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
46ceb60c | 2089 | } |
12dea57b | 2090 | |
c10650b6 CM |
2091 | /* Enable Rx/Tx DMA */ |
2092 | tempval = gfar_read(®s->maccfg1); | |
2093 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
2094 | gfar_write(®s->maccfg1, tempval); | |
2095 | ||
efeddce7 CM |
2096 | gfar_ints_enable(priv); |
2097 | ||
860e9538 | 2098 | netif_trans_update(priv->ndev); /* prevent tx timeout */ |
0bbaf069 KG |
2099 | } |
2100 | ||
80ec396c CM |
2101 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
2102 | { | |
2103 | free_irq(gfar_irq(grp, TX)->irq, grp); | |
2104 | free_irq(gfar_irq(grp, RX)->irq, grp); | |
2105 | free_irq(gfar_irq(grp, ER)->irq, grp); | |
2106 | } | |
2107 | ||
46ceb60c SG |
2108 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
2109 | { | |
2110 | struct gfar_private *priv = grp->priv; | |
2111 | struct net_device *dev = priv->ndev; | |
2112 | int err; | |
1da177e4 | 2113 | |
1da177e4 | 2114 | /* If the device has multiple interrupts, register for |
0977f817 JC |
2115 | * them. Otherwise, only register for the one |
2116 | */ | |
b31a1d8b | 2117 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 2118 | /* Install our interrupt handlers for Error, |
0977f817 JC |
2119 | * Transmit, and Receive |
2120 | */ | |
d5b8d640 | 2121 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
ee873fda CM |
2122 | gfar_irq(grp, ER)->name, grp); |
2123 | if (err < 0) { | |
59deab26 | 2124 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2125 | gfar_irq(grp, ER)->irq); |
46ceb60c | 2126 | |
2145f1af | 2127 | goto err_irq_fail; |
1da177e4 | 2128 | } |
d5b8d640 SH |
2129 | enable_irq_wake(gfar_irq(grp, ER)->irq); |
2130 | ||
ee873fda CM |
2131 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
2132 | gfar_irq(grp, TX)->name, grp); | |
2133 | if (err < 0) { | |
59deab26 | 2134 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2135 | gfar_irq(grp, TX)->irq); |
1da177e4 LT |
2136 | goto tx_irq_fail; |
2137 | } | |
ee873fda CM |
2138 | err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
2139 | gfar_irq(grp, RX)->name, grp); | |
2140 | if (err < 0) { | |
59deab26 | 2141 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2142 | gfar_irq(grp, RX)->irq); |
1da177e4 LT |
2143 | goto rx_irq_fail; |
2144 | } | |
3e905b80 CM |
2145 | enable_irq_wake(gfar_irq(grp, RX)->irq); |
2146 | ||
1da177e4 | 2147 | } else { |
d5b8d640 | 2148 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
ee873fda CM |
2149 | gfar_irq(grp, TX)->name, grp); |
2150 | if (err < 0) { | |
59deab26 | 2151 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2152 | gfar_irq(grp, TX)->irq); |
1da177e4 LT |
2153 | goto err_irq_fail; |
2154 | } | |
d5b8d640 | 2155 | enable_irq_wake(gfar_irq(grp, TX)->irq); |
1da177e4 LT |
2156 | } |
2157 | ||
46ceb60c SG |
2158 | return 0; |
2159 | ||
2160 | rx_irq_fail: | |
ee873fda | 2161 | free_irq(gfar_irq(grp, TX)->irq, grp); |
46ceb60c | 2162 | tx_irq_fail: |
ee873fda | 2163 | free_irq(gfar_irq(grp, ER)->irq, grp); |
46ceb60c SG |
2164 | err_irq_fail: |
2165 | return err; | |
2166 | ||
2167 | } | |
2168 | ||
80ec396c CM |
2169 | static void gfar_free_irq(struct gfar_private *priv) |
2170 | { | |
2171 | int i; | |
2172 | ||
2173 | /* Free the IRQs */ | |
2174 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
2175 | for (i = 0; i < priv->num_grps; i++) | |
2176 | free_grp_irqs(&priv->gfargrp[i]); | |
2177 | } else { | |
2178 | for (i = 0; i < priv->num_grps; i++) | |
2179 | free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, | |
2180 | &priv->gfargrp[i]); | |
2181 | } | |
2182 | } | |
2183 | ||
2184 | static int gfar_request_irq(struct gfar_private *priv) | |
2185 | { | |
2186 | int err, i, j; | |
2187 | ||
2188 | for (i = 0; i < priv->num_grps; i++) { | |
2189 | err = register_grp_irqs(&priv->gfargrp[i]); | |
2190 | if (err) { | |
2191 | for (j = 0; j < i; j++) | |
2192 | free_grp_irqs(&priv->gfargrp[j]); | |
2193 | return err; | |
2194 | } | |
2195 | } | |
2196 | ||
2197 | return 0; | |
2198 | } | |
2199 | ||
46ceb60c SG |
2200 | /* Bring the controller up and running */ |
2201 | int startup_gfar(struct net_device *ndev) | |
2202 | { | |
2203 | struct gfar_private *priv = netdev_priv(ndev); | |
80ec396c | 2204 | int err; |
46ceb60c | 2205 | |
a328ac92 | 2206 | gfar_mac_reset(priv); |
46ceb60c | 2207 | |
46ceb60c SG |
2208 | err = gfar_alloc_skb_resources(ndev); |
2209 | if (err) | |
2210 | return err; | |
2211 | ||
a328ac92 | 2212 | gfar_init_tx_rx_base(priv); |
46ceb60c | 2213 | |
4e857c58 | 2214 | smp_mb__before_atomic(); |
0851133b | 2215 | clear_bit(GFAR_DOWN, &priv->state); |
4e857c58 | 2216 | smp_mb__after_atomic(); |
0851133b CM |
2217 | |
2218 | /* Start Rx/Tx DMA and enable the interrupts */ | |
c10650b6 | 2219 | gfar_start(priv); |
1da177e4 | 2220 | |
2a4eebf0 CM |
2221 | /* force link state update after mac reset */ |
2222 | priv->oldlink = 0; | |
2223 | priv->oldspeed = 0; | |
2224 | priv->oldduplex = -1; | |
2225 | ||
4c4a6b0e | 2226 | phy_start(ndev->phydev); |
826aa4a0 | 2227 | |
0851133b CM |
2228 | enable_napi(priv); |
2229 | ||
2230 | netif_tx_wake_all_queues(ndev); | |
2231 | ||
1da177e4 | 2232 | return 0; |
1da177e4 LT |
2233 | } |
2234 | ||
0977f817 JC |
2235 | /* Called when something needs to use the ethernet device |
2236 | * Returns 0 for success. | |
2237 | */ | |
1da177e4 LT |
2238 | static int gfar_enet_open(struct net_device *dev) |
2239 | { | |
94e8cc35 | 2240 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
2241 | int err; |
2242 | ||
1da177e4 | 2243 | err = init_phy(dev); |
0851133b | 2244 | if (err) |
1da177e4 LT |
2245 | return err; |
2246 | ||
80ec396c CM |
2247 | err = gfar_request_irq(priv); |
2248 | if (err) | |
2249 | return err; | |
2250 | ||
1da177e4 | 2251 | err = startup_gfar(dev); |
0851133b | 2252 | if (err) |
db0e8e3f | 2253 | return err; |
1da177e4 LT |
2254 | |
2255 | return err; | |
2256 | } | |
2257 | ||
54dc79fe | 2258 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 2259 | { |
d58ff351 | 2260 | struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
2261 | |
2262 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 2263 | |
0bbaf069 KG |
2264 | return fcb; |
2265 | } | |
2266 | ||
9c4886e5 | 2267 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
bc4598bc | 2268 | int fcb_length) |
0bbaf069 | 2269 | { |
0bbaf069 KG |
2270 | /* If we're here, it's a IP packet with a TCP or UDP |
2271 | * payload. We set it to checksum, using a pseudo-header | |
2272 | * we provide | |
2273 | */ | |
3a2e16c8 | 2274 | u8 flags = TXFCB_DEFAULT; |
0bbaf069 | 2275 | |
0977f817 JC |
2276 | /* Tell the controller what the protocol is |
2277 | * And provide the already calculated phcs | |
2278 | */ | |
eddc9ec5 | 2279 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 2280 | flags |= TXFCB_UDP; |
26eb9374 | 2281 | fcb->phcs = (__force __be16)(udp_hdr(skb)->check); |
7f7f5316 | 2282 | } else |
26eb9374 | 2283 | fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); |
0bbaf069 KG |
2284 | |
2285 | /* l3os is the distance between the start of the | |
2286 | * frame (skb->data) and the start of the IP hdr. | |
2287 | * l4os is the distance between the start of the | |
0977f817 JC |
2288 | * l3 hdr and the l4 hdr |
2289 | */ | |
26eb9374 | 2290 | fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); |
cfe1fc77 | 2291 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 2292 | |
7f7f5316 | 2293 | fcb->flags = flags; |
0bbaf069 KG |
2294 | } |
2295 | ||
278af574 | 2296 | static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 2297 | { |
7f7f5316 | 2298 | fcb->flags |= TXFCB_VLN; |
26eb9374 | 2299 | fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); |
0bbaf069 KG |
2300 | } |
2301 | ||
4669bc90 | 2302 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
bc4598bc | 2303 | struct txbd8 *base, int ring_size) |
4669bc90 DH |
2304 | { |
2305 | struct txbd8 *new_bd = bdp + stride; | |
2306 | ||
2307 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
2308 | } | |
2309 | ||
2310 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
bc4598bc | 2311 | int ring_size) |
4669bc90 DH |
2312 | { |
2313 | return skip_txbd(bdp, 1, base, ring_size); | |
2314 | } | |
2315 | ||
02d88fb4 CM |
2316 | /* eTSEC12: csum generation not supported for some fcb offsets */ |
2317 | static inline bool gfar_csum_errata_12(struct gfar_private *priv, | |
2318 | unsigned long fcb_addr) | |
2319 | { | |
2320 | return (gfar_has_errata(priv, GFAR_ERRATA_12) && | |
2321 | (fcb_addr % 0x20) > 0x18); | |
2322 | } | |
2323 | ||
2324 | /* eTSEC76: csum generation for frames larger than 2500 may | |
2325 | * cause excess delays before start of transmission | |
2326 | */ | |
2327 | static inline bool gfar_csum_errata_76(struct gfar_private *priv, | |
2328 | unsigned int len) | |
2329 | { | |
2330 | return (gfar_has_errata(priv, GFAR_ERRATA_76) && | |
2331 | (len > 2500)); | |
2332 | } | |
2333 | ||
0977f817 JC |
2334 | /* This is called by the kernel when a frame is ready for transmission. |
2335 | * It is pointed to by the dev->hard_start_xmit function pointer | |
2336 | */ | |
1da177e4 LT |
2337 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2338 | { | |
2339 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 2340 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 2341 | struct netdev_queue *txq; |
f4983704 | 2342 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 2343 | struct txfcb *fcb = NULL; |
f0ee7acf | 2344 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
5a5efed4 | 2345 | u32 lstatus; |
42f397ad | 2346 | skb_frag_t *frag; |
0d0cffdc CM |
2347 | int i, rq = 0; |
2348 | int do_tstamp, do_csum, do_vlan; | |
4669bc90 | 2349 | u32 bufaddr; |
50ad076b | 2350 | unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
fba4ed03 SG |
2351 | |
2352 | rq = skb->queue_mapping; | |
2353 | tx_queue = priv->tx_queue[rq]; | |
2354 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 2355 | base = tx_queue->tx_bd_base; |
46ceb60c | 2356 | regs = tx_queue->grp->regs; |
f0ee7acf | 2357 | |
0d0cffdc | 2358 | do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
df8a39de | 2359 | do_vlan = skb_vlan_tag_present(skb); |
0d0cffdc CM |
2360 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
2361 | priv->hwts_tx_en; | |
2362 | ||
2363 | if (do_csum || do_vlan) | |
2364 | fcb_len = GMAC_FCB_LEN; | |
2365 | ||
f0ee7acf | 2366 | /* check if time stamp should be generated */ |
0d0cffdc CM |
2367 | if (unlikely(do_tstamp)) |
2368 | fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; | |
4669bc90 | 2369 | |
5b28beaf | 2370 | /* make space for additional header when fcb is needed */ |
0d0cffdc | 2371 | if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { |
54dc79fe SH |
2372 | struct sk_buff *skb_new; |
2373 | ||
0d0cffdc | 2374 | skb_new = skb_realloc_headroom(skb, fcb_len); |
54dc79fe SH |
2375 | if (!skb_new) { |
2376 | dev->stats.tx_errors++; | |
c9974ad4 | 2377 | dev_kfree_skb_any(skb); |
54dc79fe SH |
2378 | return NETDEV_TX_OK; |
2379 | } | |
db83d136 | 2380 | |
313b037c ED |
2381 | if (skb->sk) |
2382 | skb_set_owner_w(skb_new, skb->sk); | |
c9974ad4 | 2383 | dev_consume_skb_any(skb); |
54dc79fe SH |
2384 | skb = skb_new; |
2385 | } | |
2386 | ||
4669bc90 DH |
2387 | /* total number of fragments in the SKB */ |
2388 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2389 | ||
f0ee7acf MR |
2390 | /* calculate the required number of TxBDs for this skb */ |
2391 | if (unlikely(do_tstamp)) | |
2392 | nr_txbds = nr_frags + 2; | |
2393 | else | |
2394 | nr_txbds = nr_frags + 1; | |
2395 | ||
4669bc90 | 2396 | /* check if there is space to queue this packet */ |
f0ee7acf | 2397 | if (nr_txbds > tx_queue->num_txbdfree) { |
4669bc90 | 2398 | /* no space, stop the queue */ |
fba4ed03 | 2399 | netif_tx_stop_queue(txq); |
4669bc90 | 2400 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
2401 | return NETDEV_TX_BUSY; |
2402 | } | |
1da177e4 LT |
2403 | |
2404 | /* Update transmit stats */ | |
50ad076b CM |
2405 | bytes_sent = skb->len; |
2406 | tx_queue->stats.tx_bytes += bytes_sent; | |
2407 | /* keep Tx bytes on wire for BQL accounting */ | |
2408 | GFAR_CB(skb)->bytes_sent = bytes_sent; | |
1ac9ad13 | 2409 | tx_queue->stats.tx_packets++; |
1da177e4 | 2410 | |
a12f801d | 2411 | txbdp = txbdp_start = tx_queue->cur_tx; |
a7312d58 | 2412 | lstatus = be32_to_cpu(txbdp->lstatus); |
f0ee7acf | 2413 | |
9c4886e5 MR |
2414 | /* Add TxPAL between FCB and frame if required */ |
2415 | if (unlikely(do_tstamp)) { | |
2416 | skb_push(skb, GMAC_TXPAL_LEN); | |
2417 | memset(skb->data, 0, GMAC_TXPAL_LEN); | |
2418 | } | |
2419 | ||
0d0cffdc CM |
2420 | /* Add TxFCB if required */ |
2421 | if (fcb_len) { | |
54dc79fe | 2422 | fcb = gfar_add_fcb(skb); |
02d88fb4 | 2423 | lstatus |= BD_LFLAG(TXBD_TOE); |
0d0cffdc CM |
2424 | } |
2425 | ||
2426 | /* Set up checksumming */ | |
2427 | if (do_csum) { | |
2428 | gfar_tx_checksum(skb, fcb, fcb_len); | |
02d88fb4 CM |
2429 | |
2430 | if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || | |
2431 | unlikely(gfar_csum_errata_76(priv, skb->len))) { | |
4363c2fd AD |
2432 | __skb_pull(skb, GMAC_FCB_LEN); |
2433 | skb_checksum_help(skb); | |
0d0cffdc CM |
2434 | if (do_vlan || do_tstamp) { |
2435 | /* put back a new fcb for vlan/tstamp TOE */ | |
2436 | fcb = gfar_add_fcb(skb); | |
2437 | } else { | |
2438 | /* Tx TOE not used */ | |
2439 | lstatus &= ~(BD_LFLAG(TXBD_TOE)); | |
2440 | fcb = NULL; | |
2441 | } | |
4363c2fd | 2442 | } |
0bbaf069 KG |
2443 | } |
2444 | ||
0d0cffdc | 2445 | if (do_vlan) |
54dc79fe | 2446 | gfar_tx_vlan(skb, fcb); |
0bbaf069 | 2447 | |
0a4b5a24 KH |
2448 | bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), |
2449 | DMA_TO_DEVICE); | |
2450 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) | |
2451 | goto dma_map_err; | |
2452 | ||
a7312d58 | 2453 | txbdp_start->bufPtr = cpu_to_be32(bufaddr); |
1da177e4 | 2454 | |
f0ee7acf MR |
2455 | /* Time stamp insertion requires one additional TxBD */ |
2456 | if (unlikely(do_tstamp)) | |
2457 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | |
bc4598bc | 2458 | tx_queue->tx_ring_size); |
1da177e4 | 2459 | |
48963b44 | 2460 | if (likely(!nr_frags)) { |
9c8b0778 YL |
2461 | if (likely(!do_tstamp)) |
2462 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
4669bc90 | 2463 | } else { |
e19d0839 CM |
2464 | u32 lstatus_start = lstatus; |
2465 | ||
4669bc90 | 2466 | /* Place the fragment addresses and lengths into the TxBDs */ |
42f397ad CM |
2467 | frag = &skb_shinfo(skb)->frags[0]; |
2468 | for (i = 0; i < nr_frags; i++, frag++) { | |
2469 | unsigned int size; | |
2470 | ||
4669bc90 | 2471 | /* Point at the next BD, wrapping as needed */ |
a12f801d | 2472 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 | 2473 | |
42f397ad | 2474 | size = skb_frag_size(frag); |
4669bc90 | 2475 | |
42f397ad | 2476 | lstatus = be32_to_cpu(txbdp->lstatus) | size | |
bc4598bc | 2477 | BD_LFLAG(TXBD_READY); |
4669bc90 DH |
2478 | |
2479 | /* Handle the last BD specially */ | |
2480 | if (i == nr_frags - 1) | |
2481 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 2482 | |
42f397ad CM |
2483 | bufaddr = skb_frag_dma_map(priv->dev, frag, 0, |
2484 | size, DMA_TO_DEVICE); | |
0a4b5a24 KH |
2485 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
2486 | goto dma_map_err; | |
4669bc90 DH |
2487 | |
2488 | /* set the TxBD length and buffer pointer */ | |
a7312d58 CM |
2489 | txbdp->bufPtr = cpu_to_be32(bufaddr); |
2490 | txbdp->lstatus = cpu_to_be32(lstatus); | |
4669bc90 DH |
2491 | } |
2492 | ||
e19d0839 | 2493 | lstatus = lstatus_start; |
4669bc90 | 2494 | } |
1da177e4 | 2495 | |
0977f817 | 2496 | /* If time stamping is requested one additional TxBD must be set up. The |
f0ee7acf MR |
2497 | * first TxBD points to the FCB and must have a data length of |
2498 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | |
2499 | * the full frame length. | |
2500 | */ | |
2501 | if (unlikely(do_tstamp)) { | |
a7312d58 CM |
2502 | u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); |
2503 | ||
2504 | bufaddr = be32_to_cpu(txbdp_start->bufPtr); | |
2505 | bufaddr += fcb_len; | |
48963b44 | 2506 | |
a7312d58 CM |
2507 | lstatus_ts |= BD_LFLAG(TXBD_READY) | |
2508 | (skb_headlen(skb) - fcb_len); | |
48963b44 CM |
2509 | if (!nr_frags) |
2510 | lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
a7312d58 CM |
2511 | |
2512 | txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); | |
2513 | txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); | |
f0ee7acf | 2514 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
e19d0839 CM |
2515 | |
2516 | /* Setup tx hardware time stamping */ | |
2517 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
2518 | fcb->ptp = 1; | |
f0ee7acf MR |
2519 | } else { |
2520 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | |
2521 | } | |
1da177e4 | 2522 | |
50ad076b | 2523 | netdev_tx_sent_queue(txq, bytes_sent); |
d8a0f1b0 | 2524 | |
d55398ba | 2525 | gfar_wmb(); |
7f7f5316 | 2526 | |
a7312d58 | 2527 | txbdp_start->lstatus = cpu_to_be32(lstatus); |
4669bc90 | 2528 | |
d55398ba | 2529 | gfar_wmb(); /* force lstatus write before tx_skbuff */ |
0eddba52 AV |
2530 | |
2531 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | |
2532 | ||
4669bc90 | 2533 | /* Update the current skb pointer to the next entry we will use |
0977f817 JC |
2534 | * (wrapping if necessary) |
2535 | */ | |
a12f801d | 2536 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
bc4598bc | 2537 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
4669bc90 | 2538 | |
a12f801d | 2539 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 | 2540 | |
bc602280 CM |
2541 | /* We can work in parallel with gfar_clean_tx_ring(), except |
2542 | * when modifying num_txbdfree. Note that we didn't grab the lock | |
2543 | * when we were reading the num_txbdfree and checking for available | |
2544 | * space, that's because outside of this function it can only grow. | |
2545 | */ | |
2546 | spin_lock_bh(&tx_queue->txlock); | |
4669bc90 | 2547 | /* reduce TxBD free count */ |
f0ee7acf | 2548 | tx_queue->num_txbdfree -= (nr_txbds); |
bc602280 | 2549 | spin_unlock_bh(&tx_queue->txlock); |
1da177e4 LT |
2550 | |
2551 | /* If the next BD still needs to be cleaned up, then the bds | |
0977f817 JC |
2552 | * are full. We need to tell the kernel to stop sending us stuff. |
2553 | */ | |
a12f801d | 2554 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2555 | netif_tx_stop_queue(txq); |
1da177e4 | 2556 | |
09f75cd7 | 2557 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2558 | } |
2559 | ||
1da177e4 | 2560 | /* Tell the DMA to go go go */ |
fba4ed03 | 2561 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 | 2562 | |
54dc79fe | 2563 | return NETDEV_TX_OK; |
0a4b5a24 KH |
2564 | |
2565 | dma_map_err: | |
2566 | txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); | |
2567 | if (do_tstamp) | |
2568 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | |
2569 | for (i = 0; i < nr_frags; i++) { | |
a7312d58 | 2570 | lstatus = be32_to_cpu(txbdp->lstatus); |
0a4b5a24 KH |
2571 | if (!(lstatus & BD_LFLAG(TXBD_READY))) |
2572 | break; | |
2573 | ||
a7312d58 CM |
2574 | lstatus &= ~BD_LFLAG(TXBD_READY); |
2575 | txbdp->lstatus = cpu_to_be32(lstatus); | |
2576 | bufaddr = be32_to_cpu(txbdp->bufPtr); | |
2577 | dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), | |
0a4b5a24 KH |
2578 | DMA_TO_DEVICE); |
2579 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | |
2580 | } | |
2581 | gfar_wmb(); | |
2582 | dev_kfree_skb_any(skb); | |
2583 | return NETDEV_TX_OK; | |
1da177e4 LT |
2584 | } |
2585 | ||
2586 | /* Stops the kernel queue, and halts the controller */ | |
2587 | static int gfar_close(struct net_device *dev) | |
2588 | { | |
2589 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2590 | |
ab939905 | 2591 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2592 | stop_gfar(dev); |
2593 | ||
bb40dcbb | 2594 | /* Disconnect from the PHY */ |
4c4a6b0e | 2595 | phy_disconnect(dev->phydev); |
1da177e4 | 2596 | |
80ec396c CM |
2597 | gfar_free_irq(priv); |
2598 | ||
1da177e4 LT |
2599 | return 0; |
2600 | } | |
2601 | ||
1da177e4 | 2602 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2603 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2604 | { |
7f7f5316 | 2605 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2606 | |
2607 | return 0; | |
2608 | } | |
2609 | ||
1da177e4 LT |
2610 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2611 | { | |
1da177e4 | 2612 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 | 2613 | |
0851133b CM |
2614 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
2615 | cpu_relax(); | |
2616 | ||
88302648 | 2617 | if (dev->flags & IFF_UP) |
1da177e4 LT |
2618 | stop_gfar(dev); |
2619 | ||
1da177e4 LT |
2620 | dev->mtu = new_mtu; |
2621 | ||
88302648 | 2622 | if (dev->flags & IFF_UP) |
1da177e4 LT |
2623 | startup_gfar(dev); |
2624 | ||
0851133b CM |
2625 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
2626 | ||
1da177e4 LT |
2627 | return 0; |
2628 | } | |
2629 | ||
0851133b CM |
2630 | void reset_gfar(struct net_device *ndev) |
2631 | { | |
2632 | struct gfar_private *priv = netdev_priv(ndev); | |
2633 | ||
2634 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) | |
2635 | cpu_relax(); | |
2636 | ||
2637 | stop_gfar(ndev); | |
2638 | startup_gfar(ndev); | |
2639 | ||
2640 | clear_bit_unlock(GFAR_RESETTING, &priv->state); | |
2641 | } | |
2642 | ||
ab939905 | 2643 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2644 | * transmitted after a set amount of time. |
2645 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2646 | * starting over will fix the problem. |
2647 | */ | |
2648 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2649 | { |
ab939905 | 2650 | struct gfar_private *priv = container_of(work, struct gfar_private, |
bc4598bc | 2651 | reset_task); |
0851133b | 2652 | reset_gfar(priv->ndev); |
1da177e4 LT |
2653 | } |
2654 | ||
ab939905 SS |
2655 | static void gfar_timeout(struct net_device *dev) |
2656 | { | |
2657 | struct gfar_private *priv = netdev_priv(dev); | |
2658 | ||
2659 | dev->stats.tx_errors++; | |
2660 | schedule_work(&priv->reset_task); | |
2661 | } | |
2662 | ||
1da177e4 | 2663 | /* Interrupt Handler for Transmit complete */ |
c233cf40 | 2664 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2665 | { |
a12f801d | 2666 | struct net_device *dev = tx_queue->dev; |
d8a0f1b0 | 2667 | struct netdev_queue *txq; |
d080cd63 | 2668 | struct gfar_private *priv = netdev_priv(dev); |
f0ee7acf | 2669 | struct txbd8 *bdp, *next = NULL; |
4669bc90 | 2670 | struct txbd8 *lbdp = NULL; |
a12f801d | 2671 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2672 | struct sk_buff *skb; |
2673 | int skb_dirtytx; | |
a12f801d | 2674 | int tx_ring_size = tx_queue->tx_ring_size; |
f0ee7acf | 2675 | int frags = 0, nr_txbds = 0; |
4669bc90 | 2676 | int i; |
d080cd63 | 2677 | int howmany = 0; |
d8a0f1b0 PG |
2678 | int tqi = tx_queue->qindex; |
2679 | unsigned int bytes_sent = 0; | |
4669bc90 | 2680 | u32 lstatus; |
f0ee7acf | 2681 | size_t buflen; |
1da177e4 | 2682 | |
d8a0f1b0 | 2683 | txq = netdev_get_tx_queue(dev, tqi); |
a12f801d SG |
2684 | bdp = tx_queue->dirty_tx; |
2685 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2686 | |
a12f801d | 2687 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 | 2688 | |
4669bc90 | 2689 | frags = skb_shinfo(skb)->nr_frags; |
f0ee7acf | 2690 | |
0977f817 | 2691 | /* When time stamping, one additional TxBD must be freed. |
f0ee7acf MR |
2692 | * Also, we need to dma_unmap_single() the TxPAL. |
2693 | */ | |
2244d07b | 2694 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
f0ee7acf MR |
2695 | nr_txbds = frags + 2; |
2696 | else | |
2697 | nr_txbds = frags + 1; | |
2698 | ||
2699 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | |
1da177e4 | 2700 | |
a7312d58 | 2701 | lstatus = be32_to_cpu(lbdp->lstatus); |
1da177e4 | 2702 | |
4669bc90 DH |
2703 | /* Only clean completed frames */ |
2704 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
bc4598bc | 2705 | (lstatus & BD_LENGTH_MASK)) |
4669bc90 DH |
2706 | break; |
2707 | ||
2244d07b | 2708 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf | 2709 | next = next_txbd(bdp, base, tx_ring_size); |
a7312d58 CM |
2710 | buflen = be16_to_cpu(next->length) + |
2711 | GMAC_FCB_LEN + GMAC_TXPAL_LEN; | |
f0ee7acf | 2712 | } else |
a7312d58 | 2713 | buflen = be16_to_cpu(bdp->length); |
f0ee7acf | 2714 | |
a7312d58 | 2715 | dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), |
bc4598bc | 2716 | buflen, DMA_TO_DEVICE); |
f0ee7acf | 2717 | |
2244d07b | 2718 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf | 2719 | struct skb_shared_hwtstamps shhwtstamps; |
b4b67f26 SW |
2720 | u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & |
2721 | ~0x7UL); | |
bc4598bc | 2722 | |
f0ee7acf | 2723 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
f54af12f | 2724 | shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
9c4886e5 | 2725 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
f0ee7acf | 2726 | skb_tstamp_tx(skb, &shhwtstamps); |
a7312d58 | 2727 | gfar_clear_txbd_status(bdp); |
f0ee7acf MR |
2728 | bdp = next; |
2729 | } | |
81183059 | 2730 | |
a7312d58 | 2731 | gfar_clear_txbd_status(bdp); |
4669bc90 | 2732 | bdp = next_txbd(bdp, base, tx_ring_size); |
d080cd63 | 2733 | |
4669bc90 | 2734 | for (i = 0; i < frags; i++) { |
a7312d58 CM |
2735 | dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), |
2736 | be16_to_cpu(bdp->length), | |
2737 | DMA_TO_DEVICE); | |
2738 | gfar_clear_txbd_status(bdp); | |
4669bc90 DH |
2739 | bdp = next_txbd(bdp, base, tx_ring_size); |
2740 | } | |
1da177e4 | 2741 | |
50ad076b | 2742 | bytes_sent += GFAR_CB(skb)->bytes_sent; |
d8a0f1b0 | 2743 | |
acb600de | 2744 | dev_kfree_skb_any(skb); |
0fd56bb5 | 2745 | |
a12f801d | 2746 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2747 | |
4669bc90 | 2748 | skb_dirtytx = (skb_dirtytx + 1) & |
bc4598bc | 2749 | TX_RING_MOD_MASK(tx_ring_size); |
4669bc90 DH |
2750 | |
2751 | howmany++; | |
bc602280 | 2752 | spin_lock(&tx_queue->txlock); |
f0ee7acf | 2753 | tx_queue->num_txbdfree += nr_txbds; |
bc602280 | 2754 | spin_unlock(&tx_queue->txlock); |
4669bc90 | 2755 | } |
1da177e4 | 2756 | |
4669bc90 | 2757 | /* If we freed a buffer, we can restart transmission, if necessary */ |
0851133b CM |
2758 | if (tx_queue->num_txbdfree && |
2759 | netif_tx_queue_stopped(txq) && | |
2760 | !(test_bit(GFAR_DOWN, &priv->state))) | |
2761 | netif_wake_subqueue(priv->ndev, tqi); | |
1da177e4 | 2762 | |
4669bc90 | 2763 | /* Update dirty indicators */ |
a12f801d SG |
2764 | tx_queue->skb_dirtytx = skb_dirtytx; |
2765 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2766 | |
d8a0f1b0 | 2767 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
d080cd63 DH |
2768 | } |
2769 | ||
75354148 | 2770 | static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) |
1da177e4 | 2771 | { |
75354148 | 2772 | struct page *page; |
76f31e8b | 2773 | dma_addr_t addr; |
1da177e4 | 2774 | |
75354148 CM |
2775 | page = dev_alloc_page(); |
2776 | if (unlikely(!page)) | |
2777 | return false; | |
1da177e4 | 2778 | |
75354148 CM |
2779 | addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); |
2780 | if (unlikely(dma_mapping_error(rxq->dev, addr))) { | |
2781 | __free_page(page); | |
7f7f5316 | 2782 | |
75354148 | 2783 | return false; |
0a4b5a24 KH |
2784 | } |
2785 | ||
75354148 CM |
2786 | rxb->dma = addr; |
2787 | rxb->page = page; | |
2788 | rxb->page_offset = 0; | |
2789 | ||
2790 | return true; | |
1da177e4 LT |
2791 | } |
2792 | ||
76f31e8b CM |
2793 | static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) |
2794 | { | |
f23223f1 | 2795 | struct gfar_private *priv = netdev_priv(rx_queue->ndev); |
76f31e8b CM |
2796 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2797 | ||
f23223f1 | 2798 | netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); |
76f31e8b CM |
2799 | atomic64_inc(&estats->rx_alloc_err); |
2800 | } | |
2801 | ||
2802 | static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, | |
2803 | int alloc_cnt) | |
2804 | { | |
75354148 CM |
2805 | struct rxbd8 *bdp; |
2806 | struct gfar_rx_buff *rxb; | |
76f31e8b CM |
2807 | int i; |
2808 | ||
2809 | i = rx_queue->next_to_use; | |
76f31e8b | 2810 | bdp = &rx_queue->rx_bd_base[i]; |
75354148 | 2811 | rxb = &rx_queue->rx_buff[i]; |
76f31e8b CM |
2812 | |
2813 | while (alloc_cnt--) { | |
75354148 CM |
2814 | /* try reuse page */ |
2815 | if (unlikely(!rxb->page)) { | |
2816 | if (unlikely(!gfar_new_page(rx_queue, rxb))) { | |
76f31e8b CM |
2817 | gfar_rx_alloc_err(rx_queue); |
2818 | break; | |
2819 | } | |
76f31e8b CM |
2820 | } |
2821 | ||
76f31e8b | 2822 | /* Setup the new RxBD */ |
75354148 CM |
2823 | gfar_init_rxbdp(rx_queue, bdp, |
2824 | rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); | |
76f31e8b CM |
2825 | |
2826 | /* Update to the next pointer */ | |
75354148 CM |
2827 | bdp++; |
2828 | rxb++; | |
76f31e8b | 2829 | |
75354148 | 2830 | if (unlikely(++i == rx_queue->rx_ring_size)) { |
76f31e8b | 2831 | i = 0; |
75354148 CM |
2832 | bdp = rx_queue->rx_bd_base; |
2833 | rxb = rx_queue->rx_buff; | |
2834 | } | |
76f31e8b CM |
2835 | } |
2836 | ||
2837 | rx_queue->next_to_use = i; | |
75354148 | 2838 | rx_queue->next_to_alloc = i; |
76f31e8b CM |
2839 | } |
2840 | ||
f23223f1 | 2841 | static void count_errors(u32 lstatus, struct net_device *ndev) |
1da177e4 | 2842 | { |
f23223f1 CM |
2843 | struct gfar_private *priv = netdev_priv(ndev); |
2844 | struct net_device_stats *stats = &ndev->stats; | |
1da177e4 LT |
2845 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2846 | ||
0977f817 | 2847 | /* If the packet was truncated, none of the other errors matter */ |
f966082e | 2848 | if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { |
1da177e4 LT |
2849 | stats->rx_length_errors++; |
2850 | ||
212079df | 2851 | atomic64_inc(&estats->rx_trunc); |
1da177e4 LT |
2852 | |
2853 | return; | |
2854 | } | |
2855 | /* Count the errors, if there were any */ | |
f966082e | 2856 | if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { |
1da177e4 LT |
2857 | stats->rx_length_errors++; |
2858 | ||
f966082e | 2859 | if (lstatus & BD_LFLAG(RXBD_LARGE)) |
212079df | 2860 | atomic64_inc(&estats->rx_large); |
1da177e4 | 2861 | else |
212079df | 2862 | atomic64_inc(&estats->rx_short); |
1da177e4 | 2863 | } |
f966082e | 2864 | if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { |
1da177e4 | 2865 | stats->rx_frame_errors++; |
212079df | 2866 | atomic64_inc(&estats->rx_nonoctet); |
1da177e4 | 2867 | } |
f966082e | 2868 | if (lstatus & BD_LFLAG(RXBD_CRCERR)) { |
212079df | 2869 | atomic64_inc(&estats->rx_crcerr); |
1da177e4 LT |
2870 | stats->rx_crc_errors++; |
2871 | } | |
f966082e | 2872 | if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { |
212079df | 2873 | atomic64_inc(&estats->rx_overrun); |
f966082e | 2874 | stats->rx_over_errors++; |
1da177e4 LT |
2875 | } |
2876 | } | |
2877 | ||
f4983704 | 2878 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2879 | { |
aeb12c5e CM |
2880 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
2881 | unsigned long flags; | |
3e905b80 CM |
2882 | u32 imask, ievent; |
2883 | ||
2884 | ievent = gfar_read(&grp->regs->ievent); | |
2885 | ||
2886 | if (unlikely(ievent & IEVENT_FGPI)) { | |
2887 | gfar_write(&grp->regs->ievent, IEVENT_FGPI); | |
2888 | return IRQ_HANDLED; | |
2889 | } | |
aeb12c5e CM |
2890 | |
2891 | if (likely(napi_schedule_prep(&grp->napi_rx))) { | |
2892 | spin_lock_irqsave(&grp->grplock, flags); | |
2893 | imask = gfar_read(&grp->regs->imask); | |
2894 | imask &= IMASK_RX_DISABLED; | |
2895 | gfar_write(&grp->regs->imask, imask); | |
2896 | spin_unlock_irqrestore(&grp->grplock, flags); | |
2897 | __napi_schedule(&grp->napi_rx); | |
2898 | } else { | |
2899 | /* Clear IEVENT, so interrupts aren't called again | |
2900 | * because of the packets that have already arrived. | |
2901 | */ | |
2902 | gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); | |
2903 | } | |
2904 | ||
2905 | return IRQ_HANDLED; | |
2906 | } | |
2907 | ||
2908 | /* Interrupt Handler for Transmit complete */ | |
2909 | static irqreturn_t gfar_transmit(int irq, void *grp_id) | |
2910 | { | |
2911 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; | |
2912 | unsigned long flags; | |
2913 | u32 imask; | |
2914 | ||
2915 | if (likely(napi_schedule_prep(&grp->napi_tx))) { | |
2916 | spin_lock_irqsave(&grp->grplock, flags); | |
2917 | imask = gfar_read(&grp->regs->imask); | |
2918 | imask &= IMASK_TX_DISABLED; | |
2919 | gfar_write(&grp->regs->imask, imask); | |
2920 | spin_unlock_irqrestore(&grp->grplock, flags); | |
2921 | __napi_schedule(&grp->napi_tx); | |
2922 | } else { | |
2923 | /* Clear IEVENT, so interrupts aren't called again | |
2924 | * because of the packets that have already arrived. | |
2925 | */ | |
2926 | gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); | |
2927 | } | |
2928 | ||
1da177e4 LT |
2929 | return IRQ_HANDLED; |
2930 | } | |
2931 | ||
75354148 CM |
2932 | static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, |
2933 | struct sk_buff *skb, bool first) | |
2934 | { | |
2935 | unsigned int size = lstatus & BD_LENGTH_MASK; | |
2936 | struct page *page = rxb->page; | |
6c389fc9 | 2937 | bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); |
75354148 CM |
2938 | |
2939 | /* Remove the FCS from the packet length */ | |
6c389fc9 | 2940 | if (last) |
75354148 CM |
2941 | size -= ETH_FCS_LEN; |
2942 | ||
6c389fc9 | 2943 | if (likely(first)) { |
75354148 | 2944 | skb_put(skb, size); |
6c389fc9 ZK |
2945 | } else { |
2946 | /* the last fragments' length contains the full frame length */ | |
2947 | if (last) | |
2948 | size -= skb->len; | |
2949 | ||
2950 | /* in case the last fragment consisted only of the FCS */ | |
2951 | if (size > 0) | |
2952 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | |
2953 | rxb->page_offset + RXBUF_ALIGNMENT, | |
2954 | size, GFAR_RXB_TRUESIZE); | |
2955 | } | |
75354148 CM |
2956 | |
2957 | /* try reuse page */ | |
69fed99b | 2958 | if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) |
75354148 CM |
2959 | return false; |
2960 | ||
2961 | /* change offset to the other half */ | |
2962 | rxb->page_offset ^= GFAR_RXB_TRUESIZE; | |
2963 | ||
fe896d18 | 2964 | page_ref_inc(page); |
75354148 CM |
2965 | |
2966 | return true; | |
2967 | } | |
2968 | ||
2969 | static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, | |
2970 | struct gfar_rx_buff *old_rxb) | |
2971 | { | |
2972 | struct gfar_rx_buff *new_rxb; | |
2973 | u16 nta = rxq->next_to_alloc; | |
2974 | ||
2975 | new_rxb = &rxq->rx_buff[nta]; | |
2976 | ||
2977 | /* find next buf that can reuse a page */ | |
2978 | nta++; | |
2979 | rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; | |
2980 | ||
2981 | /* copy page reference */ | |
2982 | *new_rxb = *old_rxb; | |
2983 | ||
2984 | /* sync for use by the device */ | |
2985 | dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, | |
2986 | old_rxb->page_offset, | |
2987 | GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); | |
2988 | } | |
2989 | ||
2990 | static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, | |
2991 | u32 lstatus, struct sk_buff *skb) | |
2992 | { | |
2993 | struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; | |
2994 | struct page *page = rxb->page; | |
2995 | bool first = false; | |
2996 | ||
2997 | if (likely(!skb)) { | |
2998 | void *buff_addr = page_address(page) + rxb->page_offset; | |
2999 | ||
3000 | skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); | |
3001 | if (unlikely(!skb)) { | |
3002 | gfar_rx_alloc_err(rx_queue); | |
3003 | return NULL; | |
3004 | } | |
3005 | skb_reserve(skb, RXBUF_ALIGNMENT); | |
3006 | first = true; | |
3007 | } | |
3008 | ||
3009 | dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, | |
3010 | GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); | |
3011 | ||
3012 | if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { | |
3013 | /* reuse the free half of the page */ | |
3014 | gfar_reuse_rx_page(rx_queue, rxb); | |
3015 | } else { | |
3016 | /* page cannot be reused, unmap it */ | |
3017 | dma_unmap_page(rx_queue->dev, rxb->dma, | |
3018 | PAGE_SIZE, DMA_FROM_DEVICE); | |
3019 | } | |
3020 | ||
3021 | /* clear rxb content */ | |
3022 | rxb->page = NULL; | |
3023 | ||
3024 | return skb; | |
3025 | } | |
3026 | ||
0bbaf069 KG |
3027 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
3028 | { | |
3029 | /* If valid headers were found, and valid sums | |
3030 | * were verified, then we tell the kernel that no | |
0977f817 JC |
3031 | * checksumming is necessary. Otherwise, it is [FIXME] |
3032 | */ | |
26eb9374 CM |
3033 | if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == |
3034 | (RXFCB_CIP | RXFCB_CTU)) | |
0bbaf069 KG |
3035 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3036 | else | |
bc8acf2c | 3037 | skb_checksum_none_assert(skb); |
0bbaf069 KG |
3038 | } |
3039 | ||
0977f817 | 3040 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
f23223f1 | 3041 | static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) |
1da177e4 | 3042 | { |
f23223f1 | 3043 | struct gfar_private *priv = netdev_priv(ndev); |
0bbaf069 | 3044 | struct rxfcb *fcb = NULL; |
1da177e4 | 3045 | |
2c2db48a DH |
3046 | /* fcb is at the beginning if exists */ |
3047 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 3048 | |
0977f817 JC |
3049 | /* Remove the FCB from the skb |
3050 | * Remove the padded bytes, if there are any | |
3051 | */ | |
f23223f1 | 3052 | if (priv->uses_rxfcb) |
76f31e8b | 3053 | skb_pull(skb, GMAC_FCB_LEN); |
0bbaf069 | 3054 | |
cc772ab7 MR |
3055 | /* Get receive timestamp from the skb */ |
3056 | if (priv->hwts_rx_en) { | |
3057 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
3058 | u64 *ns = (u64 *) skb->data; | |
bc4598bc | 3059 | |
cc772ab7 | 3060 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
f54af12f | 3061 | shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); |
cc772ab7 MR |
3062 | } |
3063 | ||
3064 | if (priv->padding) | |
3065 | skb_pull(skb, priv->padding); | |
3066 | ||
f23223f1 | 3067 | if (ndev->features & NETIF_F_RXCSUM) |
2c2db48a | 3068 | gfar_rx_checksum(skb, fcb); |
0bbaf069 | 3069 | |
2c2db48a | 3070 | /* Tell the skb what kind of packet this is */ |
f23223f1 | 3071 | skb->protocol = eth_type_trans(skb, ndev); |
1da177e4 | 3072 | |
f646968f | 3073 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
32f7fd44 JP |
3074 | * Even if vlan rx accel is disabled, on some chips |
3075 | * RXFCB_VLN is pseudo randomly set. | |
3076 | */ | |
f23223f1 | 3077 | if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && |
26eb9374 CM |
3078 | be16_to_cpu(fcb->flags) & RXFCB_VLN) |
3079 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
3080 | be16_to_cpu(fcb->vlctl)); | |
1da177e4 LT |
3081 | } |
3082 | ||
3083 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
2281a0f3 JC |
3084 | * until the budget/quota has been reached. Returns the number |
3085 | * of frames handled | |
1da177e4 | 3086 | */ |
a12f801d | 3087 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 3088 | { |
f23223f1 | 3089 | struct net_device *ndev = rx_queue->ndev; |
75354148 CM |
3090 | struct gfar_private *priv = netdev_priv(ndev); |
3091 | struct rxbd8 *bdp; | |
76f31e8b | 3092 | int i, howmany = 0; |
75354148 | 3093 | struct sk_buff *skb = rx_queue->skb; |
76f31e8b | 3094 | int cleaned_cnt = gfar_rxbd_unused(rx_queue); |
75354148 | 3095 | unsigned int total_bytes = 0, total_pkts = 0; |
1da177e4 LT |
3096 | |
3097 | /* Get the first full descriptor */ | |
76f31e8b | 3098 | i = rx_queue->next_to_clean; |
1da177e4 | 3099 | |
76f31e8b | 3100 | while (rx_work_limit--) { |
f966082e | 3101 | u32 lstatus; |
2c2db48a | 3102 | |
76f31e8b CM |
3103 | if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { |
3104 | gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); | |
3105 | cleaned_cnt = 0; | |
3106 | } | |
bc4598bc | 3107 | |
76f31e8b | 3108 | bdp = &rx_queue->rx_bd_base[i]; |
f966082e CM |
3109 | lstatus = be32_to_cpu(bdp->lstatus); |
3110 | if (lstatus & BD_LFLAG(RXBD_EMPTY)) | |
76f31e8b | 3111 | break; |
815b97c6 | 3112 | |
76f31e8b CM |
3113 | /* order rx buffer descriptor reads */ |
3114 | rmb(); | |
815b97c6 | 3115 | |
76f31e8b | 3116 | /* fetch next to clean buffer from the ring */ |
75354148 CM |
3117 | skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); |
3118 | if (unlikely(!skb)) | |
3119 | break; | |
1da177e4 | 3120 | |
75354148 CM |
3121 | cleaned_cnt++; |
3122 | howmany++; | |
81183059 | 3123 | |
75354148 CM |
3124 | if (unlikely(++i == rx_queue->rx_ring_size)) |
3125 | i = 0; | |
3126 | ||
3127 | rx_queue->next_to_clean = i; | |
3128 | ||
3129 | /* fetch next buffer if not the last in frame */ | |
3130 | if (!(lstatus & BD_LFLAG(RXBD_LAST))) | |
3131 | continue; | |
63b88b90 | 3132 | |
75354148 | 3133 | if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { |
f23223f1 | 3134 | count_errors(lstatus, ndev); |
815b97c6 | 3135 | |
76f31e8b CM |
3136 | /* discard faulty buffer */ |
3137 | dev_kfree_skb(skb); | |
75354148 CM |
3138 | skb = NULL; |
3139 | rx_queue->stats.rx_dropped++; | |
3140 | continue; | |
3141 | } | |
76f31e8b | 3142 | |
75354148 CM |
3143 | /* Increment the number of packets */ |
3144 | total_pkts++; | |
3145 | total_bytes += skb->len; | |
2c2db48a | 3146 | |
75354148 | 3147 | skb_record_rx_queue(skb, rx_queue->qindex); |
1da177e4 | 3148 | |
75354148 | 3149 | gfar_process_frame(ndev, skb); |
1da177e4 | 3150 | |
75354148 CM |
3151 | /* Send the packet up the stack */ |
3152 | napi_gro_receive(&rx_queue->grp->napi_rx, skb); | |
3153 | ||
3154 | skb = NULL; | |
76f31e8b | 3155 | } |
1da177e4 | 3156 | |
75354148 CM |
3157 | /* Store incomplete frames for completion */ |
3158 | rx_queue->skb = skb; | |
3159 | ||
3160 | rx_queue->stats.rx_packets += total_pkts; | |
3161 | rx_queue->stats.rx_bytes += total_bytes; | |
45b679c9 | 3162 | |
76f31e8b CM |
3163 | if (cleaned_cnt) |
3164 | gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); | |
1da177e4 | 3165 | |
76f31e8b CM |
3166 | /* Update Last Free RxBD pointer for LFC */ |
3167 | if (unlikely(priv->tx_actual_en)) { | |
b4b67f26 SW |
3168 | u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); |
3169 | ||
3170 | gfar_write(rx_queue->rfbptr, bdp_dma); | |
1da177e4 LT |
3171 | } |
3172 | ||
1da177e4 LT |
3173 | return howmany; |
3174 | } | |
3175 | ||
aeb12c5e | 3176 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
5eaedf31 CM |
3177 | { |
3178 | struct gfar_priv_grp *gfargrp = | |
aeb12c5e | 3179 | container_of(napi, struct gfar_priv_grp, napi_rx); |
5eaedf31 | 3180 | struct gfar __iomem *regs = gfargrp->regs; |
71ff9e3d | 3181 | struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
5eaedf31 CM |
3182 | int work_done = 0; |
3183 | ||
3184 | /* Clear IEVENT, so interrupts aren't called again | |
3185 | * because of the packets that have already arrived | |
3186 | */ | |
aeb12c5e | 3187 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
5eaedf31 CM |
3188 | |
3189 | work_done = gfar_clean_rx_ring(rx_queue, budget); | |
3190 | ||
3191 | if (work_done < budget) { | |
aeb12c5e | 3192 | u32 imask; |
6ad20165 | 3193 | napi_complete_done(napi, work_done); |
5eaedf31 CM |
3194 | /* Clear the halt bit in RSTAT */ |
3195 | gfar_write(®s->rstat, gfargrp->rstat); | |
3196 | ||
aeb12c5e CM |
3197 | spin_lock_irq(&gfargrp->grplock); |
3198 | imask = gfar_read(®s->imask); | |
3199 | imask |= IMASK_RX_DEFAULT; | |
3200 | gfar_write(®s->imask, imask); | |
3201 | spin_unlock_irq(&gfargrp->grplock); | |
5eaedf31 CM |
3202 | } |
3203 | ||
3204 | return work_done; | |
3205 | } | |
3206 | ||
aeb12c5e | 3207 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
1da177e4 | 3208 | { |
bc4598bc | 3209 | struct gfar_priv_grp *gfargrp = |
aeb12c5e CM |
3210 | container_of(napi, struct gfar_priv_grp, napi_tx); |
3211 | struct gfar __iomem *regs = gfargrp->regs; | |
71ff9e3d | 3212 | struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
aeb12c5e CM |
3213 | u32 imask; |
3214 | ||
3215 | /* Clear IEVENT, so interrupts aren't called again | |
3216 | * because of the packets that have already arrived | |
3217 | */ | |
3218 | gfar_write(®s->ievent, IEVENT_TX_MASK); | |
3219 | ||
3220 | /* run Tx cleanup to completion */ | |
3221 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) | |
3222 | gfar_clean_tx_ring(tx_queue); | |
3223 | ||
3224 | napi_complete(napi); | |
3225 | ||
3226 | spin_lock_irq(&gfargrp->grplock); | |
3227 | imask = gfar_read(®s->imask); | |
3228 | imask |= IMASK_TX_DEFAULT; | |
3229 | gfar_write(®s->imask, imask); | |
3230 | spin_unlock_irq(&gfargrp->grplock); | |
3231 | ||
3232 | return 0; | |
3233 | } | |
3234 | ||
3235 | static int gfar_poll_rx(struct napi_struct *napi, int budget) | |
3236 | { | |
3237 | struct gfar_priv_grp *gfargrp = | |
3238 | container_of(napi, struct gfar_priv_grp, napi_rx); | |
fba4ed03 | 3239 | struct gfar_private *priv = gfargrp->priv; |
46ceb60c | 3240 | struct gfar __iomem *regs = gfargrp->regs; |
fba4ed03 | 3241 | struct gfar_priv_rx_q *rx_queue = NULL; |
c233cf40 | 3242 | int work_done = 0, work_done_per_q = 0; |
39c0a0d5 | 3243 | int i, budget_per_q = 0; |
6be5ed3f CM |
3244 | unsigned long rstat_rxf; |
3245 | int num_act_queues; | |
fba4ed03 | 3246 | |
8c7396ae | 3247 | /* Clear IEVENT, so interrupts aren't called again |
0977f817 JC |
3248 | * because of the packets that have already arrived |
3249 | */ | |
aeb12c5e | 3250 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
8c7396ae | 3251 | |
6be5ed3f CM |
3252 | rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
3253 | ||
3254 | num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); | |
3255 | if (num_act_queues) | |
3256 | budget_per_q = budget/num_act_queues; | |
3257 | ||
3ba405db CM |
3258 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
3259 | /* skip queue if not active */ | |
3260 | if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) | |
3261 | continue; | |
1da177e4 | 3262 | |
3ba405db CM |
3263 | rx_queue = priv->rx_queue[i]; |
3264 | work_done_per_q = | |
3265 | gfar_clean_rx_ring(rx_queue, budget_per_q); | |
3266 | work_done += work_done_per_q; | |
3267 | ||
3268 | /* finished processing this queue */ | |
3269 | if (work_done_per_q < budget_per_q) { | |
3270 | /* clear active queue hw indication */ | |
3271 | gfar_write(®s->rstat, | |
3272 | RSTAT_CLEAR_RXF0 >> i); | |
3273 | num_act_queues--; | |
3274 | ||
3275 | if (!num_act_queues) | |
3276 | break; | |
3277 | } | |
3278 | } | |
42199884 | 3279 | |
aeb12c5e CM |
3280 | if (!num_act_queues) { |
3281 | u32 imask; | |
6ad20165 | 3282 | napi_complete_done(napi, work_done); |
1da177e4 | 3283 | |
3ba405db CM |
3284 | /* Clear the halt bit in RSTAT */ |
3285 | gfar_write(®s->rstat, gfargrp->rstat); | |
1da177e4 | 3286 | |
aeb12c5e CM |
3287 | spin_lock_irq(&gfargrp->grplock); |
3288 | imask = gfar_read(®s->imask); | |
3289 | imask |= IMASK_RX_DEFAULT; | |
3290 | gfar_write(®s->imask, imask); | |
3291 | spin_unlock_irq(&gfargrp->grplock); | |
1da177e4 LT |
3292 | } |
3293 | ||
c233cf40 | 3294 | return work_done; |
1da177e4 | 3295 | } |
1da177e4 | 3296 | |
aeb12c5e CM |
3297 | static int gfar_poll_tx(struct napi_struct *napi, int budget) |
3298 | { | |
3299 | struct gfar_priv_grp *gfargrp = | |
3300 | container_of(napi, struct gfar_priv_grp, napi_tx); | |
3301 | struct gfar_private *priv = gfargrp->priv; | |
3302 | struct gfar __iomem *regs = gfargrp->regs; | |
3303 | struct gfar_priv_tx_q *tx_queue = NULL; | |
3304 | int has_tx_work = 0; | |
3305 | int i; | |
3306 | ||
3307 | /* Clear IEVENT, so interrupts aren't called again | |
3308 | * because of the packets that have already arrived | |
3309 | */ | |
3310 | gfar_write(®s->ievent, IEVENT_TX_MASK); | |
3311 | ||
3312 | for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { | |
3313 | tx_queue = priv->tx_queue[i]; | |
3314 | /* run Tx cleanup to completion */ | |
3315 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { | |
3316 | gfar_clean_tx_ring(tx_queue); | |
3317 | has_tx_work = 1; | |
3318 | } | |
3319 | } | |
3320 | ||
3321 | if (!has_tx_work) { | |
3322 | u32 imask; | |
3323 | napi_complete(napi); | |
3324 | ||
3325 | spin_lock_irq(&gfargrp->grplock); | |
3326 | imask = gfar_read(®s->imask); | |
3327 | imask |= IMASK_TX_DEFAULT; | |
3328 | gfar_write(®s->imask, imask); | |
3329 | spin_unlock_irq(&gfargrp->grplock); | |
3330 | } | |
3331 | ||
3332 | return 0; | |
3333 | } | |
3334 | ||
3335 | ||
f2d71c2d | 3336 | #ifdef CONFIG_NET_POLL_CONTROLLER |
0977f817 | 3337 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
f2d71c2d VW |
3338 | * without having to re-enable interrupts. It's not called while |
3339 | * the interrupt routine is executing. | |
3340 | */ | |
3341 | static void gfar_netpoll(struct net_device *dev) | |
3342 | { | |
3343 | struct gfar_private *priv = netdev_priv(dev); | |
3a2e16c8 | 3344 | int i; |
f2d71c2d VW |
3345 | |
3346 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 3347 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c | 3348 | for (i = 0; i < priv->num_grps; i++) { |
62ed839d PG |
3349 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3350 | ||
3351 | disable_irq(gfar_irq(grp, TX)->irq); | |
3352 | disable_irq(gfar_irq(grp, RX)->irq); | |
3353 | disable_irq(gfar_irq(grp, ER)->irq); | |
3354 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); | |
3355 | enable_irq(gfar_irq(grp, ER)->irq); | |
3356 | enable_irq(gfar_irq(grp, RX)->irq); | |
3357 | enable_irq(gfar_irq(grp, TX)->irq); | |
46ceb60c | 3358 | } |
f2d71c2d | 3359 | } else { |
46ceb60c | 3360 | for (i = 0; i < priv->num_grps; i++) { |
62ed839d PG |
3361 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3362 | ||
3363 | disable_irq(gfar_irq(grp, TX)->irq); | |
3364 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); | |
3365 | enable_irq(gfar_irq(grp, TX)->irq); | |
43de004b | 3366 | } |
f2d71c2d VW |
3367 | } |
3368 | } | |
3369 | #endif | |
3370 | ||
1da177e4 | 3371 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 3372 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 3373 | { |
f4983704 | 3374 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
3375 | |
3376 | /* Save ievent for future reference */ | |
f4983704 | 3377 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 3378 | |
1da177e4 | 3379 | /* Check for reception */ |
538cc7ee | 3380 | if (events & IEVENT_RX_MASK) |
f4983704 | 3381 | gfar_receive(irq, grp_id); |
1da177e4 LT |
3382 | |
3383 | /* Check for transmit completion */ | |
538cc7ee | 3384 | if (events & IEVENT_TX_MASK) |
f4983704 | 3385 | gfar_transmit(irq, grp_id); |
1da177e4 | 3386 | |
538cc7ee SS |
3387 | /* Check for errors */ |
3388 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 3389 | gfar_error(irq, grp_id); |
1da177e4 LT |
3390 | |
3391 | return IRQ_HANDLED; | |
3392 | } | |
3393 | ||
1da177e4 LT |
3394 | /* Called every time the controller might need to be made |
3395 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 3396 | * information through variables in the phydev structure, and this |
1da177e4 LT |
3397 | * function converts those variables into the appropriate |
3398 | * register values, and can bring down the device if needed. | |
3399 | */ | |
3400 | static void adjust_link(struct net_device *dev) | |
3401 | { | |
3402 | struct gfar_private *priv = netdev_priv(dev); | |
4c4a6b0e | 3403 | struct phy_device *phydev = dev->phydev; |
bb40dcbb | 3404 | |
6ce29b0e | 3405 | if (unlikely(phydev->link != priv->oldlink || |
0ae93b2c GR |
3406 | (phydev->link && (phydev->duplex != priv->oldduplex || |
3407 | phydev->speed != priv->oldspeed)))) | |
6ce29b0e | 3408 | gfar_update_link_state(priv); |
bb40dcbb | 3409 | } |
1da177e4 LT |
3410 | |
3411 | /* Update the hash table based on the current list of multicast | |
3412 | * addresses we subscribe to. Also, change the promiscuity of | |
3413 | * the device based on the flags (this function is called | |
0977f817 JC |
3414 | * whenever dev->flags is changed |
3415 | */ | |
1da177e4 LT |
3416 | static void gfar_set_multi(struct net_device *dev) |
3417 | { | |
22bedad3 | 3418 | struct netdev_hw_addr *ha; |
1da177e4 | 3419 | struct gfar_private *priv = netdev_priv(dev); |
46ceb60c | 3420 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
3421 | u32 tempval; |
3422 | ||
a12f801d | 3423 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
3424 | /* Set RCTRL to PROM */ |
3425 | tempval = gfar_read(®s->rctrl); | |
3426 | tempval |= RCTRL_PROM; | |
3427 | gfar_write(®s->rctrl, tempval); | |
3428 | } else { | |
3429 | /* Set RCTRL to not PROM */ | |
3430 | tempval = gfar_read(®s->rctrl); | |
3431 | tempval &= ~(RCTRL_PROM); | |
3432 | gfar_write(®s->rctrl, tempval); | |
3433 | } | |
6aa20a22 | 3434 | |
a12f801d | 3435 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 3436 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
3437 | gfar_write(®s->igaddr0, 0xffffffff); |
3438 | gfar_write(®s->igaddr1, 0xffffffff); | |
3439 | gfar_write(®s->igaddr2, 0xffffffff); | |
3440 | gfar_write(®s->igaddr3, 0xffffffff); | |
3441 | gfar_write(®s->igaddr4, 0xffffffff); | |
3442 | gfar_write(®s->igaddr5, 0xffffffff); | |
3443 | gfar_write(®s->igaddr6, 0xffffffff); | |
3444 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
3445 | gfar_write(®s->gaddr0, 0xffffffff); |
3446 | gfar_write(®s->gaddr1, 0xffffffff); | |
3447 | gfar_write(®s->gaddr2, 0xffffffff); | |
3448 | gfar_write(®s->gaddr3, 0xffffffff); | |
3449 | gfar_write(®s->gaddr4, 0xffffffff); | |
3450 | gfar_write(®s->gaddr5, 0xffffffff); | |
3451 | gfar_write(®s->gaddr6, 0xffffffff); | |
3452 | gfar_write(®s->gaddr7, 0xffffffff); | |
3453 | } else { | |
7f7f5316 AF |
3454 | int em_num; |
3455 | int idx; | |
3456 | ||
1da177e4 | 3457 | /* zero out the hash */ |
0bbaf069 KG |
3458 | gfar_write(®s->igaddr0, 0x0); |
3459 | gfar_write(®s->igaddr1, 0x0); | |
3460 | gfar_write(®s->igaddr2, 0x0); | |
3461 | gfar_write(®s->igaddr3, 0x0); | |
3462 | gfar_write(®s->igaddr4, 0x0); | |
3463 | gfar_write(®s->igaddr5, 0x0); | |
3464 | gfar_write(®s->igaddr6, 0x0); | |
3465 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
3466 | gfar_write(®s->gaddr0, 0x0); |
3467 | gfar_write(®s->gaddr1, 0x0); | |
3468 | gfar_write(®s->gaddr2, 0x0); | |
3469 | gfar_write(®s->gaddr3, 0x0); | |
3470 | gfar_write(®s->gaddr4, 0x0); | |
3471 | gfar_write(®s->gaddr5, 0x0); | |
3472 | gfar_write(®s->gaddr6, 0x0); | |
3473 | gfar_write(®s->gaddr7, 0x0); | |
3474 | ||
7f7f5316 AF |
3475 | /* If we have extended hash tables, we need to |
3476 | * clear the exact match registers to prepare for | |
0977f817 JC |
3477 | * setting them |
3478 | */ | |
7f7f5316 AF |
3479 | if (priv->extended_hash) { |
3480 | em_num = GFAR_EM_NUM + 1; | |
3481 | gfar_clear_exact_match(dev); | |
3482 | idx = 1; | |
3483 | } else { | |
3484 | idx = 0; | |
3485 | em_num = 0; | |
3486 | } | |
3487 | ||
4cd24eaf | 3488 | if (netdev_mc_empty(dev)) |
1da177e4 LT |
3489 | return; |
3490 | ||
3491 | /* Parse the list, and set the appropriate bits */ | |
22bedad3 | 3492 | netdev_for_each_mc_addr(ha, dev) { |
7f7f5316 | 3493 | if (idx < em_num) { |
22bedad3 | 3494 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
7f7f5316 AF |
3495 | idx++; |
3496 | } else | |
22bedad3 | 3497 | gfar_set_hash_for_addr(dev, ha->addr); |
1da177e4 LT |
3498 | } |
3499 | } | |
1da177e4 LT |
3500 | } |
3501 | ||
7f7f5316 AF |
3502 | |
3503 | /* Clears each of the exact match registers to zero, so they | |
0977f817 JC |
3504 | * don't interfere with normal reception |
3505 | */ | |
7f7f5316 AF |
3506 | static void gfar_clear_exact_match(struct net_device *dev) |
3507 | { | |
3508 | int idx; | |
6a3c910c | 3509 | static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
7f7f5316 | 3510 | |
bc4598bc | 3511 | for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
b6bc7650 | 3512 | gfar_set_mac_for_addr(dev, idx, zero_arr); |
7f7f5316 AF |
3513 | } |
3514 | ||
1da177e4 LT |
3515 | /* Set the appropriate hash bit for the given addr */ |
3516 | /* The algorithm works like so: | |
3517 | * 1) Take the Destination Address (ie the multicast address), and | |
3518 | * do a CRC on it (little endian), and reverse the bits of the | |
3519 | * result. | |
3520 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
3521 | * table. The table is controlled through 8 32-bit registers: | |
3522 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
3523 | * gaddr7. This means that the 3 most significant bits in the | |
3524 | * hash index which gaddr register to use, and the 5 other bits | |
3525 | * indicate which bit (assuming an IBM numbering scheme, which | |
3526 | * for PowerPC (tm) is usually the case) in the register holds | |
0977f817 JC |
3527 | * the entry. |
3528 | */ | |
1da177e4 LT |
3529 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
3530 | { | |
3531 | u32 tempval; | |
3532 | struct gfar_private *priv = netdev_priv(dev); | |
6a3c910c | 3533 | u32 result = ether_crc(ETH_ALEN, addr); |
0bbaf069 KG |
3534 | int width = priv->hash_width; |
3535 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
3536 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
3537 | u32 value = (1 << (31-whichbit)); |
3538 | ||
0bbaf069 | 3539 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 3540 | tempval |= value; |
0bbaf069 | 3541 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
3542 | } |
3543 | ||
7f7f5316 AF |
3544 | |
3545 | /* There are multiple MAC Address register pairs on some controllers | |
3546 | * This function sets the numth pair to a given address | |
3547 | */ | |
b6bc7650 JP |
3548 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
3549 | const u8 *addr) | |
7f7f5316 AF |
3550 | { |
3551 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 3552 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 | 3553 | u32 tempval; |
f4983704 | 3554 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
3555 | |
3556 | macptr += num*2; | |
3557 | ||
83bfc3c4 CM |
3558 | /* For a station address of 0x12345678ABCD in transmission |
3559 | * order (BE), MACnADDR1 is set to 0xCDAB7856 and | |
3560 | * MACnADDR2 is set to 0x34120000. | |
0977f817 | 3561 | */ |
83bfc3c4 CM |
3562 | tempval = (addr[5] << 24) | (addr[4] << 16) | |
3563 | (addr[3] << 8) | addr[2]; | |
7f7f5316 | 3564 | |
83bfc3c4 | 3565 | gfar_write(macptr, tempval); |
7f7f5316 | 3566 | |
83bfc3c4 | 3567 | tempval = (addr[1] << 24) | (addr[0] << 16); |
7f7f5316 AF |
3568 | |
3569 | gfar_write(macptr+1, tempval); | |
3570 | } | |
3571 | ||
1da177e4 | 3572 | /* GFAR error interrupt handler */ |
f4983704 | 3573 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 3574 | { |
f4983704 SG |
3575 | struct gfar_priv_grp *gfargrp = grp_id; |
3576 | struct gfar __iomem *regs = gfargrp->regs; | |
3577 | struct gfar_private *priv= gfargrp->priv; | |
3578 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
3579 | |
3580 | /* Save ievent for future reference */ | |
f4983704 | 3581 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
3582 | |
3583 | /* Clear IEVENT */ | |
f4983704 | 3584 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
3585 | |
3586 | /* Magic Packet is not an error. */ | |
b31a1d8b | 3587 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
3588 | (events & IEVENT_MAG)) |
3589 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
3590 | |
3591 | /* Hmm... */ | |
0bbaf069 | 3592 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
bc4598bc JC |
3593 | netdev_dbg(dev, |
3594 | "error interrupt (ievent=0x%08x imask=0x%08x)\n", | |
59deab26 | 3595 | events, gfar_read(®s->imask)); |
1da177e4 LT |
3596 | |
3597 | /* Update the error counters */ | |
3598 | if (events & IEVENT_TXE) { | |
09f75cd7 | 3599 | dev->stats.tx_errors++; |
1da177e4 LT |
3600 | |
3601 | if (events & IEVENT_LC) | |
09f75cd7 | 3602 | dev->stats.tx_window_errors++; |
1da177e4 | 3603 | if (events & IEVENT_CRL) |
09f75cd7 | 3604 | dev->stats.tx_aborted_errors++; |
1da177e4 | 3605 | if (events & IEVENT_XFUN) { |
59deab26 JP |
3606 | netif_dbg(priv, tx_err, dev, |
3607 | "TX FIFO underrun, packet dropped\n"); | |
09f75cd7 | 3608 | dev->stats.tx_dropped++; |
212079df | 3609 | atomic64_inc(&priv->extra_stats.tx_underrun); |
1da177e4 | 3610 | |
bc602280 | 3611 | schedule_work(&priv->reset_task); |
1da177e4 | 3612 | } |
59deab26 | 3613 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
1da177e4 LT |
3614 | } |
3615 | if (events & IEVENT_BSY) { | |
1de65a5e | 3616 | dev->stats.rx_over_errors++; |
212079df | 3617 | atomic64_inc(&priv->extra_stats.rx_bsy); |
1da177e4 | 3618 | |
59deab26 JP |
3619 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
3620 | gfar_read(®s->rstat)); | |
1da177e4 LT |
3621 | } |
3622 | if (events & IEVENT_BABR) { | |
09f75cd7 | 3623 | dev->stats.rx_errors++; |
212079df | 3624 | atomic64_inc(&priv->extra_stats.rx_babr); |
1da177e4 | 3625 | |
59deab26 | 3626 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
1da177e4 LT |
3627 | } |
3628 | if (events & IEVENT_EBERR) { | |
212079df | 3629 | atomic64_inc(&priv->extra_stats.eberr); |
59deab26 | 3630 | netif_dbg(priv, rx_err, dev, "bus error\n"); |
1da177e4 | 3631 | } |
59deab26 JP |
3632 | if (events & IEVENT_RXC) |
3633 | netif_dbg(priv, rx_status, dev, "control frame\n"); | |
1da177e4 LT |
3634 | |
3635 | if (events & IEVENT_BABT) { | |
212079df | 3636 | atomic64_inc(&priv->extra_stats.tx_babt); |
59deab26 | 3637 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
1da177e4 LT |
3638 | } |
3639 | return IRQ_HANDLED; | |
3640 | } | |
3641 | ||
6ce29b0e CM |
3642 | static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
3643 | { | |
4c4a6b0e PR |
3644 | struct net_device *ndev = priv->ndev; |
3645 | struct phy_device *phydev = ndev->phydev; | |
6ce29b0e CM |
3646 | u32 val = 0; |
3647 | ||
3648 | if (!phydev->duplex) | |
3649 | return val; | |
3650 | ||
3651 | if (!priv->pause_aneg_en) { | |
3652 | if (priv->tx_pause_en) | |
3653 | val |= MACCFG1_TX_FLOW; | |
3654 | if (priv->rx_pause_en) | |
3655 | val |= MACCFG1_RX_FLOW; | |
3656 | } else { | |
3657 | u16 lcl_adv, rmt_adv; | |
3658 | u8 flowctrl; | |
3659 | /* get link partner capabilities */ | |
3660 | rmt_adv = 0; | |
3661 | if (phydev->pause) | |
3662 | rmt_adv = LPA_PAUSE_CAP; | |
3663 | if (phydev->asym_pause) | |
3664 | rmt_adv |= LPA_PAUSE_ASYM; | |
3665 | ||
43ef8d29 PMB |
3666 | lcl_adv = 0; |
3667 | if (phydev->advertising & ADVERTISED_Pause) | |
3668 | lcl_adv |= ADVERTISE_PAUSE_CAP; | |
3669 | if (phydev->advertising & ADVERTISED_Asym_Pause) | |
3670 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | |
6ce29b0e CM |
3671 | |
3672 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | |
3673 | if (flowctrl & FLOW_CTRL_TX) | |
3674 | val |= MACCFG1_TX_FLOW; | |
3675 | if (flowctrl & FLOW_CTRL_RX) | |
3676 | val |= MACCFG1_RX_FLOW; | |
3677 | } | |
3678 | ||
3679 | return val; | |
3680 | } | |
3681 | ||
3682 | static noinline void gfar_update_link_state(struct gfar_private *priv) | |
3683 | { | |
3684 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
4c4a6b0e PR |
3685 | struct net_device *ndev = priv->ndev; |
3686 | struct phy_device *phydev = ndev->phydev; | |
45b679c9 MP |
3687 | struct gfar_priv_rx_q *rx_queue = NULL; |
3688 | int i; | |
6ce29b0e CM |
3689 | |
3690 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) | |
3691 | return; | |
3692 | ||
3693 | if (phydev->link) { | |
3694 | u32 tempval1 = gfar_read(®s->maccfg1); | |
3695 | u32 tempval = gfar_read(®s->maccfg2); | |
3696 | u32 ecntrl = gfar_read(®s->ecntrl); | |
5d621672 | 3697 | u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW); |
6ce29b0e CM |
3698 | |
3699 | if (phydev->duplex != priv->oldduplex) { | |
3700 | if (!(phydev->duplex)) | |
3701 | tempval &= ~(MACCFG2_FULL_DUPLEX); | |
3702 | else | |
3703 | tempval |= MACCFG2_FULL_DUPLEX; | |
3704 | ||
3705 | priv->oldduplex = phydev->duplex; | |
3706 | } | |
3707 | ||
3708 | if (phydev->speed != priv->oldspeed) { | |
3709 | switch (phydev->speed) { | |
3710 | case 1000: | |
3711 | tempval = | |
3712 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
3713 | ||
3714 | ecntrl &= ~(ECNTRL_R100); | |
3715 | break; | |
3716 | case 100: | |
3717 | case 10: | |
3718 | tempval = | |
3719 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
3720 | ||
3721 | /* Reduced mode distinguishes | |
3722 | * between 10 and 100 | |
3723 | */ | |
3724 | if (phydev->speed == SPEED_100) | |
3725 | ecntrl |= ECNTRL_R100; | |
3726 | else | |
3727 | ecntrl &= ~(ECNTRL_R100); | |
3728 | break; | |
3729 | default: | |
3730 | netif_warn(priv, link, priv->ndev, | |
3731 | "Ack! Speed (%d) is not 10/100/1000!\n", | |
3732 | phydev->speed); | |
3733 | break; | |
3734 | } | |
3735 | ||
3736 | priv->oldspeed = phydev->speed; | |
3737 | } | |
3738 | ||
3739 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | |
3740 | tempval1 |= gfar_get_flowctrl_cfg(priv); | |
3741 | ||
45b679c9 MP |
3742 | /* Turn last free buffer recording on */ |
3743 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { | |
3744 | for (i = 0; i < priv->num_rx_queues; i++) { | |
b4b67f26 SW |
3745 | u32 bdp_dma; |
3746 | ||
45b679c9 | 3747 | rx_queue = priv->rx_queue[i]; |
b4b67f26 SW |
3748 | bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); |
3749 | gfar_write(rx_queue->rfbptr, bdp_dma); | |
45b679c9 MP |
3750 | } |
3751 | ||
3752 | priv->tx_actual_en = 1; | |
3753 | } | |
3754 | ||
3755 | if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) | |
3756 | priv->tx_actual_en = 0; | |
3757 | ||
6ce29b0e CM |
3758 | gfar_write(®s->maccfg1, tempval1); |
3759 | gfar_write(®s->maccfg2, tempval); | |
3760 | gfar_write(®s->ecntrl, ecntrl); | |
3761 | ||
3762 | if (!priv->oldlink) | |
3763 | priv->oldlink = 1; | |
3764 | ||
3765 | } else if (priv->oldlink) { | |
3766 | priv->oldlink = 0; | |
3767 | priv->oldspeed = 0; | |
3768 | priv->oldduplex = -1; | |
3769 | } | |
3770 | ||
3771 | if (netif_msg_link(priv)) | |
3772 | phy_print_status(phydev); | |
3773 | } | |
3774 | ||
94e5a2a8 | 3775 | static const struct of_device_id gfar_match[] = |
b31a1d8b AF |
3776 | { |
3777 | { | |
3778 | .type = "network", | |
3779 | .compatible = "gianfar", | |
3780 | }, | |
46ceb60c SG |
3781 | { |
3782 | .compatible = "fsl,etsec2", | |
3783 | }, | |
b31a1d8b AF |
3784 | {}, |
3785 | }; | |
e72701ac | 3786 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3787 | |
1da177e4 | 3788 | /* Structure for a device driver */ |
74888760 | 3789 | static struct platform_driver gfar_driver = { |
4018294b GL |
3790 | .driver = { |
3791 | .name = "fsl-gianfar", | |
4018294b GL |
3792 | .pm = GFAR_PM_OPS, |
3793 | .of_match_table = gfar_match, | |
3794 | }, | |
1da177e4 LT |
3795 | .probe = gfar_probe, |
3796 | .remove = gfar_remove, | |
3797 | }; | |
3798 | ||
db62f684 | 3799 | module_platform_driver(gfar_driver); |