]>
Commit | Line | Data |
---|---|---|
0977f817 | 1 | /* drivers/net/ethernet/freescale/gianfar.c |
1da177e4 LT |
2 | * |
3 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
4 | * This driver is designed for the non-CPM ethernet controllers |
5 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
6 | * Based on 8260_io/fcc_enet.c |
7 | * | |
8 | * Author: Andy Fleming | |
4c8d3d99 | 9 | * Maintainer: Kumar Gala |
a12f801d | 10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 11 | * |
20862788 | 12 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
a12f801d | 13 | * Copyright 2007 MontaVista Software, Inc. |
1da177e4 LT |
14 | * |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the | |
17 | * Free Software Foundation; either version 2 of the License, or (at your | |
18 | * option) any later version. | |
19 | * | |
20 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
21 | * RA 11 31 24.2 | |
22 | * Dec +69 19 52 | |
23 | * V 3.84 | |
24 | * B-V +1.62 | |
25 | * | |
26 | * Theory of operation | |
0bbaf069 | 27 | * |
b31a1d8b AF |
28 | * The driver is initialized through of_device. Configuration information |
29 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
30 | * |
31 | * The Gianfar Ethernet Controller uses a ring of buffer | |
32 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
33 | * pointing to the physical address of the start of the ring. |
34 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
35 | * last descriptor of the ring. |
36 | * | |
37 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 38 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
39 | * corresponding bit in the IMASK register is also set (if |
40 | * interrupt coalescing is active, then the interrupt may not | |
41 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 42 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 43 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 44 | * exit. This method will start at the last known empty |
0bbaf069 | 45 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
46 | * are none left with data (NAPI will stop after a set number of |
47 | * packets to give time to other tasks, but will eventually | |
48 | * process all the packets). The data arrives inside a | |
49 | * pre-allocated skb, and so after the skb is passed up to the | |
50 | * stack, a new skb must be allocated, and the address field in | |
51 | * the buffer descriptor must be updated to indicate this new | |
52 | * skb. | |
53 | * | |
54 | * When the kernel requests that a packet be transmitted, the | |
55 | * driver starts where it left off last time, and points the | |
56 | * descriptor at the buffer which was passed in. The driver | |
57 | * then informs the DMA engine that there are packets ready to | |
58 | * be transmitted. Once the controller is finished transmitting | |
59 | * the packet, an interrupt may be triggered (under the same | |
60 | * conditions as for reception, but depending on the TXF bit). | |
61 | * The driver then cleans up the buffer. | |
62 | */ | |
63 | ||
59deab26 JP |
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
65 | #define DEBUG | |
66 | ||
1da177e4 | 67 | #include <linux/kernel.h> |
1da177e4 LT |
68 | #include <linux/string.h> |
69 | #include <linux/errno.h> | |
bb40dcbb | 70 | #include <linux/unistd.h> |
1da177e4 LT |
71 | #include <linux/slab.h> |
72 | #include <linux/interrupt.h> | |
1da177e4 LT |
73 | #include <linux/delay.h> |
74 | #include <linux/netdevice.h> | |
75 | #include <linux/etherdevice.h> | |
76 | #include <linux/skbuff.h> | |
0bbaf069 | 77 | #include <linux/if_vlan.h> |
1da177e4 LT |
78 | #include <linux/spinlock.h> |
79 | #include <linux/mm.h> | |
5af50730 RH |
80 | #include <linux/of_address.h> |
81 | #include <linux/of_irq.h> | |
fe192a49 | 82 | #include <linux/of_mdio.h> |
b31a1d8b | 83 | #include <linux/of_platform.h> |
0bbaf069 KG |
84 | #include <linux/ip.h> |
85 | #include <linux/tcp.h> | |
86 | #include <linux/udp.h> | |
9c07b884 | 87 | #include <linux/in.h> |
cc772ab7 | 88 | #include <linux/net_tstamp.h> |
1da177e4 LT |
89 | |
90 | #include <asm/io.h> | |
7d350977 | 91 | #include <asm/reg.h> |
2969b1f7 | 92 | #include <asm/mpc85xx.h> |
1da177e4 LT |
93 | #include <asm/irq.h> |
94 | #include <asm/uaccess.h> | |
95 | #include <linux/module.h> | |
1da177e4 LT |
96 | #include <linux/dma-mapping.h> |
97 | #include <linux/crc32.h> | |
bb40dcbb AF |
98 | #include <linux/mii.h> |
99 | #include <linux/phy.h> | |
b31a1d8b AF |
100 | #include <linux/phy_fixed.h> |
101 | #include <linux/of.h> | |
4b6ba8aa | 102 | #include <linux/of_net.h> |
1da177e4 LT |
103 | |
104 | #include "gianfar.h" | |
1da177e4 LT |
105 | |
106 | #define TX_TIMEOUT (1*HZ) | |
1da177e4 | 107 | |
7f7f5316 | 108 | const char gfar_driver_version[] = "1.3"; |
1da177e4 | 109 | |
1da177e4 LT |
110 | static int gfar_enet_open(struct net_device *dev); |
111 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 112 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
113 | static void gfar_timeout(struct net_device *dev); |
114 | static int gfar_close(struct net_device *dev); | |
815b97c6 | 115 | struct sk_buff *gfar_new_skb(struct net_device *dev); |
a12f801d | 116 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
bc4598bc | 117 | struct sk_buff *skb); |
1da177e4 LT |
118 | static int gfar_set_mac_address(struct net_device *dev); |
119 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
120 | static irqreturn_t gfar_error(int irq, void *dev_id); |
121 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
122 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 | 123 | static void adjust_link(struct net_device *dev); |
6ce29b0e | 124 | static noinline void gfar_update_link_state(struct gfar_private *priv); |
1da177e4 | 125 | static int init_phy(struct net_device *dev); |
74888760 | 126 | static int gfar_probe(struct platform_device *ofdev); |
2dc11581 | 127 | static int gfar_remove(struct platform_device *ofdev); |
bb40dcbb | 128 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
129 | static void gfar_set_multi(struct net_device *dev); |
130 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 131 | static void gfar_configure_serdes(struct net_device *dev); |
aeb12c5e CM |
132 | static int gfar_poll_rx(struct napi_struct *napi, int budget); |
133 | static int gfar_poll_tx(struct napi_struct *napi, int budget); | |
134 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); | |
135 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); | |
f2d71c2d VW |
136 | #ifdef CONFIG_NET_POLL_CONTROLLER |
137 | static void gfar_netpoll(struct net_device *dev); | |
138 | #endif | |
a12f801d | 139 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
c233cf40 | 140 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
61db26c6 CM |
141 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
142 | int amount_pull, struct napi_struct *napi); | |
c10650b6 | 143 | static void gfar_halt_nodisable(struct gfar_private *priv); |
7f7f5316 | 144 | static void gfar_clear_exact_match(struct net_device *dev); |
b6bc7650 JP |
145 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
146 | const u8 *addr); | |
26ccfc37 | 147 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
1da177e4 | 148 | |
1da177e4 LT |
149 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
150 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
151 | MODULE_LICENSE("GPL"); | |
152 | ||
a12f801d | 153 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
154 | dma_addr_t buf) |
155 | { | |
8a102fe0 AV |
156 | u32 lstatus; |
157 | ||
158 | bdp->bufPtr = buf; | |
159 | ||
160 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 161 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
162 | lstatus |= BD_LFLAG(RXBD_WRAP); |
163 | ||
164 | eieio(); | |
165 | ||
166 | bdp->lstatus = lstatus; | |
167 | } | |
168 | ||
8728327e | 169 | static int gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 170 | { |
8728327e | 171 | struct gfar_private *priv = netdev_priv(ndev); |
a12f801d SG |
172 | struct gfar_priv_tx_q *tx_queue = NULL; |
173 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 AV |
174 | struct txbd8 *txbdp; |
175 | struct rxbd8 *rxbdp; | |
fba4ed03 | 176 | int i, j; |
a12f801d | 177 | |
fba4ed03 SG |
178 | for (i = 0; i < priv->num_tx_queues; i++) { |
179 | tx_queue = priv->tx_queue[i]; | |
180 | /* Initialize some variables in our dev structure */ | |
181 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
182 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
183 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
184 | tx_queue->skb_curtx = 0; | |
185 | tx_queue->skb_dirtytx = 0; | |
186 | ||
187 | /* Initialize Transmit Descriptor Ring */ | |
188 | txbdp = tx_queue->tx_bd_base; | |
189 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
190 | txbdp->lstatus = 0; | |
191 | txbdp->bufPtr = 0; | |
192 | txbdp++; | |
193 | } | |
8728327e | 194 | |
fba4ed03 SG |
195 | /* Set the last descriptor in the ring to indicate wrap */ |
196 | txbdp--; | |
197 | txbdp->status |= TXBD_WRAP; | |
8728327e AV |
198 | } |
199 | ||
fba4ed03 SG |
200 | for (i = 0; i < priv->num_rx_queues; i++) { |
201 | rx_queue = priv->rx_queue[i]; | |
202 | rx_queue->cur_rx = rx_queue->rx_bd_base; | |
203 | rx_queue->skb_currx = 0; | |
204 | rxbdp = rx_queue->rx_bd_base; | |
8728327e | 205 | |
fba4ed03 SG |
206 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
207 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | |
8728327e | 208 | |
fba4ed03 SG |
209 | if (skb) { |
210 | gfar_init_rxbdp(rx_queue, rxbdp, | |
211 | rxbdp->bufPtr); | |
212 | } else { | |
213 | skb = gfar_new_skb(ndev); | |
214 | if (!skb) { | |
59deab26 | 215 | netdev_err(ndev, "Can't allocate RX buffers\n"); |
1eb8f7a7 | 216 | return -ENOMEM; |
fba4ed03 SG |
217 | } |
218 | rx_queue->rx_skbuff[j] = skb; | |
219 | ||
220 | gfar_new_rxbdp(rx_queue, rxbdp, skb); | |
8728327e | 221 | } |
8728327e | 222 | |
fba4ed03 | 223 | rxbdp++; |
8728327e AV |
224 | } |
225 | ||
8728327e AV |
226 | } |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
232 | { | |
826aa4a0 | 233 | void *vaddr; |
fba4ed03 SG |
234 | dma_addr_t addr; |
235 | int i, j, k; | |
826aa4a0 | 236 | struct gfar_private *priv = netdev_priv(ndev); |
369ec162 | 237 | struct device *dev = priv->dev; |
a12f801d SG |
238 | struct gfar_priv_tx_q *tx_queue = NULL; |
239 | struct gfar_priv_rx_q *rx_queue = NULL; | |
240 | ||
fba4ed03 SG |
241 | priv->total_tx_ring_size = 0; |
242 | for (i = 0; i < priv->num_tx_queues; i++) | |
243 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
244 | ||
245 | priv->total_rx_ring_size = 0; | |
246 | for (i = 0; i < priv->num_rx_queues; i++) | |
247 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
248 | |
249 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 250 | vaddr = dma_alloc_coherent(dev, |
d0320f75 JP |
251 | (priv->total_tx_ring_size * |
252 | sizeof(struct txbd8)) + | |
253 | (priv->total_rx_ring_size * | |
254 | sizeof(struct rxbd8)), | |
255 | &addr, GFP_KERNEL); | |
256 | if (!vaddr) | |
826aa4a0 | 257 | return -ENOMEM; |
826aa4a0 | 258 | |
fba4ed03 SG |
259 | for (i = 0; i < priv->num_tx_queues; i++) { |
260 | tx_queue = priv->tx_queue[i]; | |
43d620c8 | 261 | tx_queue->tx_bd_base = vaddr; |
fba4ed03 SG |
262 | tx_queue->tx_bd_dma_base = addr; |
263 | tx_queue->dev = ndev; | |
264 | /* enet DMA only understands physical addresses */ | |
bc4598bc JC |
265 | addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
266 | vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; | |
fba4ed03 | 267 | } |
826aa4a0 | 268 | |
826aa4a0 | 269 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
270 | for (i = 0; i < priv->num_rx_queues; i++) { |
271 | rx_queue = priv->rx_queue[i]; | |
43d620c8 | 272 | rx_queue->rx_bd_base = vaddr; |
fba4ed03 SG |
273 | rx_queue->rx_bd_dma_base = addr; |
274 | rx_queue->dev = ndev; | |
bc4598bc JC |
275 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
276 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; | |
fba4ed03 | 277 | } |
826aa4a0 AV |
278 | |
279 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
280 | for (i = 0; i < priv->num_tx_queues; i++) { |
281 | tx_queue = priv->tx_queue[i]; | |
14f8dc49 JP |
282 | tx_queue->tx_skbuff = |
283 | kmalloc_array(tx_queue->tx_ring_size, | |
284 | sizeof(*tx_queue->tx_skbuff), | |
285 | GFP_KERNEL); | |
286 | if (!tx_queue->tx_skbuff) | |
fba4ed03 | 287 | goto cleanup; |
826aa4a0 | 288 | |
fba4ed03 SG |
289 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
290 | tx_queue->tx_skbuff[k] = NULL; | |
291 | } | |
826aa4a0 | 292 | |
fba4ed03 SG |
293 | for (i = 0; i < priv->num_rx_queues; i++) { |
294 | rx_queue = priv->rx_queue[i]; | |
14f8dc49 JP |
295 | rx_queue->rx_skbuff = |
296 | kmalloc_array(rx_queue->rx_ring_size, | |
297 | sizeof(*rx_queue->rx_skbuff), | |
298 | GFP_KERNEL); | |
299 | if (!rx_queue->rx_skbuff) | |
fba4ed03 | 300 | goto cleanup; |
fba4ed03 SG |
301 | |
302 | for (j = 0; j < rx_queue->rx_ring_size; j++) | |
303 | rx_queue->rx_skbuff[j] = NULL; | |
304 | } | |
826aa4a0 | 305 | |
8728327e AV |
306 | if (gfar_init_bds(ndev)) |
307 | goto cleanup; | |
826aa4a0 AV |
308 | |
309 | return 0; | |
310 | ||
311 | cleanup: | |
312 | free_skb_resources(priv); | |
313 | return -ENOMEM; | |
314 | } | |
315 | ||
fba4ed03 SG |
316 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
317 | { | |
46ceb60c | 318 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 319 | u32 __iomem *baddr; |
fba4ed03 SG |
320 | int i; |
321 | ||
322 | baddr = ®s->tbase0; | |
bc4598bc | 323 | for (i = 0; i < priv->num_tx_queues; i++) { |
fba4ed03 | 324 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); |
bc4598bc | 325 | baddr += 2; |
fba4ed03 SG |
326 | } |
327 | ||
328 | baddr = ®s->rbase0; | |
bc4598bc | 329 | for (i = 0; i < priv->num_rx_queues; i++) { |
fba4ed03 | 330 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); |
bc4598bc | 331 | baddr += 2; |
fba4ed03 SG |
332 | } |
333 | } | |
334 | ||
88302648 | 335 | static void gfar_rx_buff_size_config(struct gfar_private *priv) |
826aa4a0 | 336 | { |
88302648 | 337 | int frame_size = priv->ndev->mtu + ETH_HLEN; |
fba4ed03 | 338 | |
ba779711 CM |
339 | /* set this when rx hw offload (TOE) functions are being used */ |
340 | priv->uses_rxfcb = 0; | |
341 | ||
88302648 CM |
342 | if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) |
343 | priv->uses_rxfcb = 1; | |
344 | ||
345 | if (priv->hwts_rx_en) | |
346 | priv->uses_rxfcb = 1; | |
347 | ||
348 | if (priv->uses_rxfcb) | |
349 | frame_size += GMAC_FCB_LEN; | |
350 | ||
351 | frame_size += priv->padding; | |
352 | ||
353 | frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | |
354 | INCREMENTAL_BUFFER_SIZE; | |
355 | ||
356 | priv->rx_buffer_size = frame_size; | |
357 | } | |
358 | ||
359 | static void gfar_mac_rx_config(struct gfar_private *priv) | |
360 | { | |
361 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
362 | u32 rctrl = 0; | |
363 | ||
1ccb8389 | 364 | if (priv->rx_filer_enable) { |
fba4ed03 | 365 | rctrl |= RCTRL_FILREN; |
1ccb8389 | 366 | /* Program the RIR0 reg with the required distribution */ |
71ff9e3d CM |
367 | if (priv->poll_mode == GFAR_SQ_POLLING) |
368 | gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); | |
369 | else /* GFAR_MQ_POLLING */ | |
370 | gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); | |
1ccb8389 | 371 | } |
826aa4a0 | 372 | |
f5ae6279 | 373 | /* Restore PROMISC mode */ |
a328ac92 | 374 | if (priv->ndev->flags & IFF_PROMISC) |
f5ae6279 CM |
375 | rctrl |= RCTRL_PROM; |
376 | ||
88302648 | 377 | if (priv->ndev->features & NETIF_F_RXCSUM) |
826aa4a0 AV |
378 | rctrl |= RCTRL_CHECKSUMMING; |
379 | ||
88302648 CM |
380 | if (priv->extended_hash) |
381 | rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; | |
826aa4a0 AV |
382 | |
383 | if (priv->padding) { | |
384 | rctrl &= ~RCTRL_PAL_MASK; | |
385 | rctrl |= RCTRL_PADDING(priv->padding); | |
386 | } | |
387 | ||
97553f7f | 388 | /* Enable HW time stamping if requested from user space */ |
88302648 | 389 | if (priv->hwts_rx_en) |
97553f7f MR |
390 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
391 | ||
88302648 | 392 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
b852b720 | 393 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
826aa4a0 AV |
394 | |
395 | /* Init rctrl based on our settings */ | |
396 | gfar_write(®s->rctrl, rctrl); | |
a328ac92 | 397 | } |
826aa4a0 | 398 | |
a328ac92 CM |
399 | static void gfar_mac_tx_config(struct gfar_private *priv) |
400 | { | |
401 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
402 | u32 tctrl = 0; | |
403 | ||
404 | if (priv->ndev->features & NETIF_F_IP_CSUM) | |
826aa4a0 AV |
405 | tctrl |= TCTRL_INIT_CSUM; |
406 | ||
b98b8bab CM |
407 | if (priv->prio_sched_en) |
408 | tctrl |= TCTRL_TXSCHED_PRIO; | |
409 | else { | |
410 | tctrl |= TCTRL_TXSCHED_WRRS; | |
411 | gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); | |
412 | gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); | |
413 | } | |
fba4ed03 | 414 | |
88302648 CM |
415 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) |
416 | tctrl |= TCTRL_VLINS; | |
417 | ||
826aa4a0 | 418 | gfar_write(®s->tctrl, tctrl); |
826aa4a0 AV |
419 | } |
420 | ||
f19015ba CM |
421 | static void gfar_configure_coalescing(struct gfar_private *priv, |
422 | unsigned long tx_mask, unsigned long rx_mask) | |
423 | { | |
424 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
425 | u32 __iomem *baddr; | |
426 | ||
427 | if (priv->mode == MQ_MG_MODE) { | |
428 | int i = 0; | |
429 | ||
430 | baddr = ®s->txic0; | |
431 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { | |
432 | gfar_write(baddr + i, 0); | |
433 | if (likely(priv->tx_queue[i]->txcoalescing)) | |
434 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
435 | } | |
436 | ||
437 | baddr = ®s->rxic0; | |
438 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { | |
439 | gfar_write(baddr + i, 0); | |
440 | if (likely(priv->rx_queue[i]->rxcoalescing)) | |
441 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
442 | } | |
443 | } else { | |
444 | /* Backward compatible case -- even if we enable | |
445 | * multiple queues, there's only single reg to program | |
446 | */ | |
447 | gfar_write(®s->txic, 0); | |
448 | if (likely(priv->tx_queue[0]->txcoalescing)) | |
449 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
450 | ||
451 | gfar_write(®s->rxic, 0); | |
452 | if (unlikely(priv->rx_queue[0]->rxcoalescing)) | |
453 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
454 | } | |
455 | } | |
456 | ||
457 | void gfar_configure_coalescing_all(struct gfar_private *priv) | |
458 | { | |
459 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | |
460 | } | |
461 | ||
a7f38041 SG |
462 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
463 | { | |
464 | struct gfar_private *priv = netdev_priv(dev); | |
a7f38041 SG |
465 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
466 | unsigned long tx_packets = 0, tx_bytes = 0; | |
3a2e16c8 | 467 | int i; |
a7f38041 SG |
468 | |
469 | for (i = 0; i < priv->num_rx_queues; i++) { | |
470 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | |
bc4598bc | 471 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; |
a7f38041 SG |
472 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; |
473 | } | |
474 | ||
475 | dev->stats.rx_packets = rx_packets; | |
bc4598bc | 476 | dev->stats.rx_bytes = rx_bytes; |
a7f38041 SG |
477 | dev->stats.rx_dropped = rx_dropped; |
478 | ||
479 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1ac9ad13 ED |
480 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
481 | tx_packets += priv->tx_queue[i]->stats.tx_packets; | |
a7f38041 SG |
482 | } |
483 | ||
bc4598bc | 484 | dev->stats.tx_bytes = tx_bytes; |
a7f38041 SG |
485 | dev->stats.tx_packets = tx_packets; |
486 | ||
487 | return &dev->stats; | |
488 | } | |
489 | ||
26ccfc37 AF |
490 | static const struct net_device_ops gfar_netdev_ops = { |
491 | .ndo_open = gfar_enet_open, | |
492 | .ndo_start_xmit = gfar_start_xmit, | |
493 | .ndo_stop = gfar_close, | |
494 | .ndo_change_mtu = gfar_change_mtu, | |
8b3afe95 | 495 | .ndo_set_features = gfar_set_features, |
afc4b13d | 496 | .ndo_set_rx_mode = gfar_set_multi, |
26ccfc37 AF |
497 | .ndo_tx_timeout = gfar_timeout, |
498 | .ndo_do_ioctl = gfar_ioctl, | |
a7f38041 | 499 | .ndo_get_stats = gfar_get_stats, |
240c102d BH |
500 | .ndo_set_mac_address = eth_mac_addr, |
501 | .ndo_validate_addr = eth_validate_addr, | |
26ccfc37 AF |
502 | #ifdef CONFIG_NET_POLL_CONTROLLER |
503 | .ndo_poll_controller = gfar_netpoll, | |
504 | #endif | |
505 | }; | |
506 | ||
efeddce7 CM |
507 | static void gfar_ints_disable(struct gfar_private *priv) |
508 | { | |
509 | int i; | |
510 | for (i = 0; i < priv->num_grps; i++) { | |
511 | struct gfar __iomem *regs = priv->gfargrp[i].regs; | |
512 | /* Clear IEVENT */ | |
513 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
514 | ||
515 | /* Initialize IMASK */ | |
516 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
517 | } | |
518 | } | |
519 | ||
520 | static void gfar_ints_enable(struct gfar_private *priv) | |
521 | { | |
522 | int i; | |
523 | for (i = 0; i < priv->num_grps; i++) { | |
524 | struct gfar __iomem *regs = priv->gfargrp[i].regs; | |
525 | /* Unmask the interrupts we look for */ | |
526 | gfar_write(®s->imask, IMASK_DEFAULT); | |
527 | } | |
528 | } | |
529 | ||
fba4ed03 SG |
530 | void lock_tx_qs(struct gfar_private *priv) |
531 | { | |
3a2e16c8 | 532 | int i; |
fba4ed03 SG |
533 | |
534 | for (i = 0; i < priv->num_tx_queues; i++) | |
535 | spin_lock(&priv->tx_queue[i]->txlock); | |
536 | } | |
537 | ||
fba4ed03 SG |
538 | void unlock_tx_qs(struct gfar_private *priv) |
539 | { | |
3a2e16c8 | 540 | int i; |
fba4ed03 SG |
541 | |
542 | for (i = 0; i < priv->num_tx_queues; i++) | |
543 | spin_unlock(&priv->tx_queue[i]->txlock); | |
544 | } | |
545 | ||
20862788 CM |
546 | static int gfar_alloc_tx_queues(struct gfar_private *priv) |
547 | { | |
548 | int i; | |
549 | ||
550 | for (i = 0; i < priv->num_tx_queues; i++) { | |
551 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), | |
552 | GFP_KERNEL); | |
553 | if (!priv->tx_queue[i]) | |
554 | return -ENOMEM; | |
555 | ||
556 | priv->tx_queue[i]->tx_skbuff = NULL; | |
557 | priv->tx_queue[i]->qindex = i; | |
558 | priv->tx_queue[i]->dev = priv->ndev; | |
559 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
560 | } | |
561 | return 0; | |
562 | } | |
563 | ||
564 | static int gfar_alloc_rx_queues(struct gfar_private *priv) | |
565 | { | |
566 | int i; | |
567 | ||
568 | for (i = 0; i < priv->num_rx_queues; i++) { | |
569 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), | |
570 | GFP_KERNEL); | |
571 | if (!priv->rx_queue[i]) | |
572 | return -ENOMEM; | |
573 | ||
574 | priv->rx_queue[i]->rx_skbuff = NULL; | |
575 | priv->rx_queue[i]->qindex = i; | |
576 | priv->rx_queue[i]->dev = priv->ndev; | |
20862788 CM |
577 | } |
578 | return 0; | |
579 | } | |
580 | ||
581 | static void gfar_free_tx_queues(struct gfar_private *priv) | |
fba4ed03 | 582 | { |
3a2e16c8 | 583 | int i; |
fba4ed03 SG |
584 | |
585 | for (i = 0; i < priv->num_tx_queues; i++) | |
586 | kfree(priv->tx_queue[i]); | |
587 | } | |
588 | ||
20862788 | 589 | static void gfar_free_rx_queues(struct gfar_private *priv) |
fba4ed03 | 590 | { |
3a2e16c8 | 591 | int i; |
fba4ed03 SG |
592 | |
593 | for (i = 0; i < priv->num_rx_queues; i++) | |
594 | kfree(priv->rx_queue[i]); | |
595 | } | |
596 | ||
46ceb60c SG |
597 | static void unmap_group_regs(struct gfar_private *priv) |
598 | { | |
3a2e16c8 | 599 | int i; |
46ceb60c SG |
600 | |
601 | for (i = 0; i < MAXGROUPS; i++) | |
602 | if (priv->gfargrp[i].regs) | |
603 | iounmap(priv->gfargrp[i].regs); | |
604 | } | |
605 | ||
ee873fda CM |
606 | static void free_gfar_dev(struct gfar_private *priv) |
607 | { | |
608 | int i, j; | |
609 | ||
610 | for (i = 0; i < priv->num_grps; i++) | |
611 | for (j = 0; j < GFAR_NUM_IRQS; j++) { | |
612 | kfree(priv->gfargrp[i].irqinfo[j]); | |
613 | priv->gfargrp[i].irqinfo[j] = NULL; | |
614 | } | |
615 | ||
616 | free_netdev(priv->ndev); | |
617 | } | |
618 | ||
46ceb60c SG |
619 | static void disable_napi(struct gfar_private *priv) |
620 | { | |
3a2e16c8 | 621 | int i; |
46ceb60c | 622 | |
aeb12c5e CM |
623 | for (i = 0; i < priv->num_grps; i++) { |
624 | napi_disable(&priv->gfargrp[i].napi_rx); | |
625 | napi_disable(&priv->gfargrp[i].napi_tx); | |
626 | } | |
46ceb60c SG |
627 | } |
628 | ||
629 | static void enable_napi(struct gfar_private *priv) | |
630 | { | |
3a2e16c8 | 631 | int i; |
46ceb60c | 632 | |
aeb12c5e CM |
633 | for (i = 0; i < priv->num_grps; i++) { |
634 | napi_enable(&priv->gfargrp[i].napi_rx); | |
635 | napi_enable(&priv->gfargrp[i].napi_tx); | |
636 | } | |
46ceb60c SG |
637 | } |
638 | ||
639 | static int gfar_parse_group(struct device_node *np, | |
bc4598bc | 640 | struct gfar_private *priv, const char *model) |
46ceb60c | 641 | { |
5fedcc14 | 642 | struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
ee873fda CM |
643 | int i; |
644 | ||
7c1e7e99 PG |
645 | for (i = 0; i < GFAR_NUM_IRQS; i++) { |
646 | grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), | |
647 | GFP_KERNEL); | |
648 | if (!grp->irqinfo[i]) | |
ee873fda | 649 | return -ENOMEM; |
ee873fda | 650 | } |
46ceb60c | 651 | |
5fedcc14 CM |
652 | grp->regs = of_iomap(np, 0); |
653 | if (!grp->regs) | |
46ceb60c SG |
654 | return -ENOMEM; |
655 | ||
ee873fda | 656 | gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); |
46ceb60c SG |
657 | |
658 | /* If we aren't the FEC we have multiple interrupts */ | |
659 | if (model && strcasecmp(model, "FEC")) { | |
ee873fda CM |
660 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
661 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); | |
662 | if (gfar_irq(grp, TX)->irq == NO_IRQ || | |
663 | gfar_irq(grp, RX)->irq == NO_IRQ || | |
664 | gfar_irq(grp, ER)->irq == NO_IRQ) | |
46ceb60c | 665 | return -EINVAL; |
46ceb60c SG |
666 | } |
667 | ||
5fedcc14 CM |
668 | grp->priv = priv; |
669 | spin_lock_init(&grp->grplock); | |
bc4598bc | 670 | if (priv->mode == MQ_MG_MODE) { |
71ff9e3d CM |
671 | u32 *rxq_mask, *txq_mask; |
672 | rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); | |
673 | txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); | |
674 | ||
675 | if (priv->poll_mode == GFAR_SQ_POLLING) { | |
676 | /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ | |
677 | grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
678 | grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
679 | } else { /* GFAR_MQ_POLLING */ | |
680 | grp->rx_bit_map = rxq_mask ? | |
681 | *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
682 | grp->tx_bit_map = txq_mask ? | |
683 | *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
684 | } | |
46ceb60c | 685 | } else { |
5fedcc14 CM |
686 | grp->rx_bit_map = 0xFF; |
687 | grp->tx_bit_map = 0xFF; | |
46ceb60c | 688 | } |
20862788 CM |
689 | |
690 | /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses | |
691 | * right to left, so we need to revert the 8 bits to get the q index | |
692 | */ | |
693 | grp->rx_bit_map = bitrev8(grp->rx_bit_map); | |
694 | grp->tx_bit_map = bitrev8(grp->tx_bit_map); | |
695 | ||
696 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
697 | * also assign queues to groups | |
698 | */ | |
699 | for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { | |
71ff9e3d CM |
700 | if (!grp->rx_queue) |
701 | grp->rx_queue = priv->rx_queue[i]; | |
20862788 CM |
702 | grp->num_rx_queues++; |
703 | grp->rstat |= (RSTAT_CLEAR_RHALT >> i); | |
704 | priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
705 | priv->rx_queue[i]->grp = grp; | |
706 | } | |
707 | ||
708 | for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { | |
71ff9e3d CM |
709 | if (!grp->tx_queue) |
710 | grp->tx_queue = priv->tx_queue[i]; | |
20862788 CM |
711 | grp->num_tx_queues++; |
712 | grp->tstat |= (TSTAT_CLEAR_THALT >> i); | |
713 | priv->tqueue |= (TQUEUE_EN0 >> i); | |
714 | priv->tx_queue[i]->grp = grp; | |
715 | } | |
716 | ||
46ceb60c SG |
717 | priv->num_grps++; |
718 | ||
719 | return 0; | |
720 | } | |
721 | ||
2dc11581 | 722 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
b31a1d8b | 723 | { |
b31a1d8b AF |
724 | const char *model; |
725 | const char *ctype; | |
726 | const void *mac_addr; | |
fba4ed03 SG |
727 | int err = 0, i; |
728 | struct net_device *dev = NULL; | |
729 | struct gfar_private *priv = NULL; | |
61c7a080 | 730 | struct device_node *np = ofdev->dev.of_node; |
46ceb60c | 731 | struct device_node *child = NULL; |
4d7902f2 AF |
732 | const u32 *stash; |
733 | const u32 *stash_len; | |
734 | const u32 *stash_idx; | |
fba4ed03 SG |
735 | unsigned int num_tx_qs, num_rx_qs; |
736 | u32 *tx_queues, *rx_queues; | |
b338ce27 | 737 | unsigned short mode, poll_mode; |
b31a1d8b AF |
738 | |
739 | if (!np || !of_device_is_available(np)) | |
740 | return -ENODEV; | |
741 | ||
b338ce27 CM |
742 | if (of_device_is_compatible(np, "fsl,etsec2")) { |
743 | mode = MQ_MG_MODE; | |
744 | poll_mode = GFAR_SQ_POLLING; | |
745 | } else { | |
746 | mode = SQ_SG_MODE; | |
747 | poll_mode = GFAR_SQ_POLLING; | |
748 | } | |
749 | ||
71ff9e3d | 750 | /* parse the num of HW tx and rx queues */ |
fba4ed03 | 751 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); |
71ff9e3d CM |
752 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); |
753 | ||
b338ce27 | 754 | if (mode == SQ_SG_MODE) { |
71ff9e3d CM |
755 | num_tx_qs = 1; |
756 | num_rx_qs = 1; | |
757 | } else { /* MQ_MG_MODE */ | |
c65d7533 CM |
758 | /* get the actual number of supported groups */ |
759 | unsigned int num_grps = of_get_available_child_count(np); | |
760 | ||
761 | if (num_grps == 0 || num_grps > MAXGROUPS) { | |
762 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | |
763 | num_grps); | |
764 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
765 | return -EINVAL; | |
766 | } | |
767 | ||
b338ce27 | 768 | if (poll_mode == GFAR_SQ_POLLING) { |
c65d7533 CM |
769 | num_tx_qs = num_grps; /* one txq per int group */ |
770 | num_rx_qs = num_grps; /* one rxq per int group */ | |
71ff9e3d CM |
771 | } else { /* GFAR_MQ_POLLING */ |
772 | num_tx_qs = tx_queues ? *tx_queues : 1; | |
773 | num_rx_qs = rx_queues ? *rx_queues : 1; | |
774 | } | |
775 | } | |
fba4ed03 SG |
776 | |
777 | if (num_tx_qs > MAX_TX_QS) { | |
59deab26 JP |
778 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", |
779 | num_tx_qs, MAX_TX_QS); | |
780 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
781 | return -EINVAL; |
782 | } | |
783 | ||
fba4ed03 | 784 | if (num_rx_qs > MAX_RX_QS) { |
59deab26 JP |
785 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", |
786 | num_rx_qs, MAX_RX_QS); | |
787 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
788 | return -EINVAL; |
789 | } | |
790 | ||
791 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
792 | dev = *pdev; | |
793 | if (NULL == dev) | |
794 | return -ENOMEM; | |
795 | ||
796 | priv = netdev_priv(dev); | |
fba4ed03 SG |
797 | priv->ndev = dev; |
798 | ||
b338ce27 CM |
799 | priv->mode = mode; |
800 | priv->poll_mode = poll_mode; | |
801 | ||
fba4ed03 | 802 | priv->num_tx_queues = num_tx_qs; |
fe069123 | 803 | netif_set_real_num_rx_queues(dev, num_rx_qs); |
fba4ed03 | 804 | priv->num_rx_queues = num_rx_qs; |
20862788 CM |
805 | |
806 | err = gfar_alloc_tx_queues(priv); | |
807 | if (err) | |
808 | goto tx_alloc_failed; | |
809 | ||
810 | err = gfar_alloc_rx_queues(priv); | |
811 | if (err) | |
812 | goto rx_alloc_failed; | |
b31a1d8b | 813 | |
0977f817 | 814 | /* Init Rx queue filer rule set linked list */ |
4aa3a715 SP |
815 | INIT_LIST_HEAD(&priv->rx_list.list); |
816 | priv->rx_list.count = 0; | |
817 | mutex_init(&priv->rx_queue_access); | |
818 | ||
b31a1d8b AF |
819 | model = of_get_property(np, "model", NULL); |
820 | ||
46ceb60c SG |
821 | for (i = 0; i < MAXGROUPS; i++) |
822 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 823 | |
46ceb60c | 824 | /* Parse and initialize group specific information */ |
b338ce27 | 825 | if (priv->mode == MQ_MG_MODE) { |
46ceb60c SG |
826 | for_each_child_of_node(np, child) { |
827 | err = gfar_parse_group(child, priv, model); | |
828 | if (err) | |
829 | goto err_grp_init; | |
b31a1d8b | 830 | } |
b338ce27 | 831 | } else { /* SQ_SG_MODE */ |
46ceb60c | 832 | err = gfar_parse_group(np, priv, model); |
bc4598bc | 833 | if (err) |
46ceb60c | 834 | goto err_grp_init; |
b31a1d8b AF |
835 | } |
836 | ||
4d7902f2 AF |
837 | stash = of_get_property(np, "bd-stash", NULL); |
838 | ||
a12f801d | 839 | if (stash) { |
4d7902f2 AF |
840 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
841 | priv->bd_stash_en = 1; | |
842 | } | |
843 | ||
844 | stash_len = of_get_property(np, "rx-stash-len", NULL); | |
845 | ||
846 | if (stash_len) | |
847 | priv->rx_stash_size = *stash_len; | |
848 | ||
849 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | |
850 | ||
851 | if (stash_idx) | |
852 | priv->rx_stash_index = *stash_idx; | |
853 | ||
854 | if (stash_len || stash_idx) | |
855 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
856 | ||
b31a1d8b | 857 | mac_addr = of_get_mac_address(np); |
bc4598bc | 858 | |
b31a1d8b | 859 | if (mac_addr) |
6a3c910c | 860 | memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
b31a1d8b AF |
861 | |
862 | if (model && !strcasecmp(model, "TSEC")) | |
34018fd4 | 863 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
bc4598bc JC |
864 | FSL_GIANFAR_DEV_HAS_COALESCE | |
865 | FSL_GIANFAR_DEV_HAS_RMON | | |
866 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
867 | ||
b31a1d8b | 868 | if (model && !strcasecmp(model, "eTSEC")) |
34018fd4 | 869 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
bc4598bc JC |
870 | FSL_GIANFAR_DEV_HAS_COALESCE | |
871 | FSL_GIANFAR_DEV_HAS_RMON | | |
872 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
bc4598bc JC |
873 | FSL_GIANFAR_DEV_HAS_CSUM | |
874 | FSL_GIANFAR_DEV_HAS_VLAN | | |
875 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
876 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | |
877 | FSL_GIANFAR_DEV_HAS_TIMER; | |
b31a1d8b AF |
878 | |
879 | ctype = of_get_property(np, "phy-connection-type", NULL); | |
880 | ||
881 | /* We only care about rgmii-id. The rest are autodetected */ | |
882 | if (ctype && !strcmp(ctype, "rgmii-id")) | |
883 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | |
884 | else | |
885 | priv->interface = PHY_INTERFACE_MODE_MII; | |
886 | ||
887 | if (of_get_property(np, "fsl,magic-packet", NULL)) | |
888 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | |
889 | ||
fe192a49 | 890 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b AF |
891 | |
892 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ | |
fe192a49 | 893 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
894 | |
895 | return 0; | |
896 | ||
46ceb60c SG |
897 | err_grp_init: |
898 | unmap_group_regs(priv); | |
20862788 CM |
899 | rx_alloc_failed: |
900 | gfar_free_rx_queues(priv); | |
901 | tx_alloc_failed: | |
902 | gfar_free_tx_queues(priv); | |
ee873fda | 903 | free_gfar_dev(priv); |
b31a1d8b AF |
904 | return err; |
905 | } | |
906 | ||
ca0c88c2 | 907 | static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
cc772ab7 MR |
908 | { |
909 | struct hwtstamp_config config; | |
910 | struct gfar_private *priv = netdev_priv(netdev); | |
911 | ||
912 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
913 | return -EFAULT; | |
914 | ||
915 | /* reserved for future extensions */ | |
916 | if (config.flags) | |
917 | return -EINVAL; | |
918 | ||
f0ee7acf MR |
919 | switch (config.tx_type) { |
920 | case HWTSTAMP_TX_OFF: | |
921 | priv->hwts_tx_en = 0; | |
922 | break; | |
923 | case HWTSTAMP_TX_ON: | |
924 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
925 | return -ERANGE; | |
926 | priv->hwts_tx_en = 1; | |
927 | break; | |
928 | default: | |
cc772ab7 | 929 | return -ERANGE; |
f0ee7acf | 930 | } |
cc772ab7 MR |
931 | |
932 | switch (config.rx_filter) { | |
933 | case HWTSTAMP_FILTER_NONE: | |
97553f7f | 934 | if (priv->hwts_rx_en) { |
97553f7f | 935 | priv->hwts_rx_en = 0; |
0851133b | 936 | reset_gfar(netdev); |
97553f7f | 937 | } |
cc772ab7 MR |
938 | break; |
939 | default: | |
940 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
941 | return -ERANGE; | |
97553f7f | 942 | if (!priv->hwts_rx_en) { |
97553f7f | 943 | priv->hwts_rx_en = 1; |
0851133b | 944 | reset_gfar(netdev); |
97553f7f | 945 | } |
cc772ab7 MR |
946 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
947 | break; | |
948 | } | |
949 | ||
950 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
951 | -EFAULT : 0; | |
952 | } | |
953 | ||
ca0c88c2 BH |
954 | static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
955 | { | |
956 | struct hwtstamp_config config; | |
957 | struct gfar_private *priv = netdev_priv(netdev); | |
958 | ||
959 | config.flags = 0; | |
960 | config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; | |
961 | config.rx_filter = (priv->hwts_rx_en ? | |
962 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); | |
963 | ||
964 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
965 | -EFAULT : 0; | |
966 | } | |
967 | ||
0faac9f7 CW |
968 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
969 | { | |
970 | struct gfar_private *priv = netdev_priv(dev); | |
971 | ||
972 | if (!netif_running(dev)) | |
973 | return -EINVAL; | |
974 | ||
cc772ab7 | 975 | if (cmd == SIOCSHWTSTAMP) |
ca0c88c2 BH |
976 | return gfar_hwtstamp_set(dev, rq); |
977 | if (cmd == SIOCGHWTSTAMP) | |
978 | return gfar_hwtstamp_get(dev, rq); | |
cc772ab7 | 979 | |
0faac9f7 CW |
980 | if (!priv->phydev) |
981 | return -ENODEV; | |
982 | ||
28b04113 | 983 | return phy_mii_ioctl(priv->phydev, rq, cmd); |
0faac9f7 CW |
984 | } |
985 | ||
18294ad1 AV |
986 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
987 | u32 class) | |
7a8b3372 SG |
988 | { |
989 | u32 rqfpr = FPR_FILER_MASK; | |
990 | u32 rqfcr = 0x0; | |
991 | ||
992 | rqfar--; | |
993 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
6c43e046 WJB |
994 | priv->ftp_rqfpr[rqfar] = rqfpr; |
995 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
996 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
997 | ||
998 | rqfar--; | |
999 | rqfcr = RQFCR_CMP_NOMATCH; | |
6c43e046 WJB |
1000 | priv->ftp_rqfpr[rqfar] = rqfpr; |
1001 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
1002 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1003 | ||
1004 | rqfar--; | |
1005 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
1006 | rqfpr = class; | |
6c43e046 WJB |
1007 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1008 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1009 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1010 | ||
1011 | rqfar--; | |
1012 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
1013 | rqfpr = class; | |
6c43e046 WJB |
1014 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1015 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1016 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1017 | ||
1018 | return rqfar; | |
1019 | } | |
1020 | ||
1021 | static void gfar_init_filer_table(struct gfar_private *priv) | |
1022 | { | |
1023 | int i = 0x0; | |
1024 | u32 rqfar = MAX_FILER_IDX; | |
1025 | u32 rqfcr = 0x0; | |
1026 | u32 rqfpr = FPR_FILER_MASK; | |
1027 | ||
1028 | /* Default rule */ | |
1029 | rqfcr = RQFCR_CMP_MATCH; | |
6c43e046 WJB |
1030 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1031 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1032 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1033 | ||
1034 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
1035 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
1036 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
1037 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
1038 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
1039 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
1040 | ||
85dd08eb | 1041 | /* cur_filer_idx indicated the first non-masked rule */ |
7a8b3372 SG |
1042 | priv->cur_filer_idx = rqfar; |
1043 | ||
1044 | /* Rest are masked rules */ | |
1045 | rqfcr = RQFCR_CMP_NOMATCH; | |
1046 | for (i = 0; i < rqfar; i++) { | |
6c43e046 WJB |
1047 | priv->ftp_rqfcr[i] = rqfcr; |
1048 | priv->ftp_rqfpr[i] = rqfpr; | |
7a8b3372 SG |
1049 | gfar_write_filer(priv, i, rqfcr, rqfpr); |
1050 | } | |
1051 | } | |
1052 | ||
2969b1f7 | 1053 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
7d350977 | 1054 | { |
7d350977 AV |
1055 | unsigned int pvr = mfspr(SPRN_PVR); |
1056 | unsigned int svr = mfspr(SPRN_SVR); | |
1057 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | |
1058 | unsigned int rev = svr & 0xffff; | |
1059 | ||
1060 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | |
1061 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | |
bc4598bc | 1062 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
7d350977 AV |
1063 | priv->errata |= GFAR_ERRATA_74; |
1064 | ||
deb90eac AV |
1065 | /* MPC8313 and MPC837x all rev */ |
1066 | if ((pvr == 0x80850010 && mod == 0x80b0) || | |
bc4598bc | 1067 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
deb90eac AV |
1068 | priv->errata |= GFAR_ERRATA_76; |
1069 | ||
2969b1f7 CM |
1070 | /* MPC8313 Rev < 2.0 */ |
1071 | if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) | |
1072 | priv->errata |= GFAR_ERRATA_12; | |
1073 | } | |
1074 | ||
1075 | static void __gfar_detect_errata_85xx(struct gfar_private *priv) | |
1076 | { | |
1077 | unsigned int svr = mfspr(SPRN_SVR); | |
1078 | ||
1079 | if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) | |
4363c2fd | 1080 | priv->errata |= GFAR_ERRATA_12; |
53fad773 CM |
1081 | if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || |
1082 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) | |
1083 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ | |
2969b1f7 CM |
1084 | } |
1085 | ||
1086 | static void gfar_detect_errata(struct gfar_private *priv) | |
1087 | { | |
1088 | struct device *dev = &priv->ofdev->dev; | |
1089 | ||
1090 | /* no plans to fix */ | |
1091 | priv->errata |= GFAR_ERRATA_A002; | |
1092 | ||
1093 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) | |
1094 | __gfar_detect_errata_85xx(priv); | |
1095 | else /* non-mpc85xx parts, i.e. e300 core based */ | |
1096 | __gfar_detect_errata_83xx(priv); | |
4363c2fd | 1097 | |
7d350977 AV |
1098 | if (priv->errata) |
1099 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | |
1100 | priv->errata); | |
1101 | } | |
1102 | ||
0851133b | 1103 | void gfar_mac_reset(struct gfar_private *priv) |
20862788 CM |
1104 | { |
1105 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
a328ac92 | 1106 | u32 tempval; |
20862788 CM |
1107 | |
1108 | /* Reset MAC layer */ | |
1109 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); | |
1110 | ||
1111 | /* We need to delay at least 3 TX clocks */ | |
a328ac92 | 1112 | udelay(3); |
20862788 CM |
1113 | |
1114 | /* the soft reset bit is not self-resetting, so we need to | |
1115 | * clear it before resuming normal operation | |
1116 | */ | |
1117 | gfar_write(®s->maccfg1, 0); | |
1118 | ||
a328ac92 CM |
1119 | udelay(3); |
1120 | ||
88302648 CM |
1121 | /* Compute rx_buff_size based on config flags */ |
1122 | gfar_rx_buff_size_config(priv); | |
1123 | ||
1124 | /* Initialize the max receive frame/buffer lengths */ | |
1125 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | |
a328ac92 CM |
1126 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
1127 | ||
1128 | /* Initialize the Minimum Frame Length Register */ | |
1129 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | |
1130 | ||
20862788 CM |
1131 | /* Initialize MACCFG2. */ |
1132 | tempval = MACCFG2_INIT_SETTINGS; | |
88302648 CM |
1133 | |
1134 | /* If the mtu is larger than the max size for standard | |
1135 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | |
1136 | * to allow huge frames, and to check the length | |
1137 | */ | |
1138 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || | |
1139 | gfar_has_errata(priv, GFAR_ERRATA_74)) | |
20862788 | 1140 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
88302648 | 1141 | |
20862788 CM |
1142 | gfar_write(®s->maccfg2, tempval); |
1143 | ||
a328ac92 CM |
1144 | /* Clear mac addr hash registers */ |
1145 | gfar_write(®s->igaddr0, 0); | |
1146 | gfar_write(®s->igaddr1, 0); | |
1147 | gfar_write(®s->igaddr2, 0); | |
1148 | gfar_write(®s->igaddr3, 0); | |
1149 | gfar_write(®s->igaddr4, 0); | |
1150 | gfar_write(®s->igaddr5, 0); | |
1151 | gfar_write(®s->igaddr6, 0); | |
1152 | gfar_write(®s->igaddr7, 0); | |
1153 | ||
1154 | gfar_write(®s->gaddr0, 0); | |
1155 | gfar_write(®s->gaddr1, 0); | |
1156 | gfar_write(®s->gaddr2, 0); | |
1157 | gfar_write(®s->gaddr3, 0); | |
1158 | gfar_write(®s->gaddr4, 0); | |
1159 | gfar_write(®s->gaddr5, 0); | |
1160 | gfar_write(®s->gaddr6, 0); | |
1161 | gfar_write(®s->gaddr7, 0); | |
1162 | ||
1163 | if (priv->extended_hash) | |
1164 | gfar_clear_exact_match(priv->ndev); | |
1165 | ||
1166 | gfar_mac_rx_config(priv); | |
1167 | ||
1168 | gfar_mac_tx_config(priv); | |
1169 | ||
1170 | gfar_set_mac_address(priv->ndev); | |
1171 | ||
1172 | gfar_set_multi(priv->ndev); | |
1173 | ||
1174 | /* clear ievent and imask before configuring coalescing */ | |
1175 | gfar_ints_disable(priv); | |
1176 | ||
1177 | /* Configure the coalescing support */ | |
1178 | gfar_configure_coalescing_all(priv); | |
1179 | } | |
1180 | ||
1181 | static void gfar_hw_init(struct gfar_private *priv) | |
1182 | { | |
1183 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1184 | u32 attrs; | |
1185 | ||
1186 | /* Stop the DMA engine now, in case it was running before | |
1187 | * (The firmware could have used it, and left it running). | |
1188 | */ | |
1189 | gfar_halt(priv); | |
1190 | ||
1191 | gfar_mac_reset(priv); | |
1192 | ||
1193 | /* Zero out the rmon mib registers if it has them */ | |
1194 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | |
1195 | memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); | |
1196 | ||
1197 | /* Mask off the CAM interrupts */ | |
1198 | gfar_write(®s->rmon.cam1, 0xffffffff); | |
1199 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1200 | } | |
1201 | ||
20862788 CM |
1202 | /* Initialize ECNTRL */ |
1203 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); | |
1204 | ||
34018fd4 CM |
1205 | /* Set the extraction length and index */ |
1206 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
1207 | ATTRELI_EI(priv->rx_stash_index); | |
1208 | ||
1209 | gfar_write(®s->attreli, attrs); | |
1210 | ||
1211 | /* Start with defaults, and add stashing | |
1212 | * depending on driver parameters | |
1213 | */ | |
1214 | attrs = ATTR_INIT_SETTINGS; | |
1215 | ||
1216 | if (priv->bd_stash_en) | |
1217 | attrs |= ATTR_BDSTASH; | |
1218 | ||
1219 | if (priv->rx_stash_size != 0) | |
1220 | attrs |= ATTR_BUFSTASH; | |
1221 | ||
1222 | gfar_write(®s->attr, attrs); | |
1223 | ||
1224 | /* FIFO configs */ | |
1225 | gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); | |
1226 | gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); | |
1227 | gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); | |
1228 | ||
20862788 CM |
1229 | /* Program the interrupt steering regs, only for MG devices */ |
1230 | if (priv->num_grps > 1) | |
1231 | gfar_write_isrg(priv); | |
20862788 CM |
1232 | } |
1233 | ||
1234 | static void __init gfar_init_addr_hash_table(struct gfar_private *priv) | |
1235 | { | |
1236 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1237 | ||
1238 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { | |
1239 | priv->extended_hash = 1; | |
1240 | priv->hash_width = 9; | |
1241 | ||
1242 | priv->hash_regs[0] = ®s->igaddr0; | |
1243 | priv->hash_regs[1] = ®s->igaddr1; | |
1244 | priv->hash_regs[2] = ®s->igaddr2; | |
1245 | priv->hash_regs[3] = ®s->igaddr3; | |
1246 | priv->hash_regs[4] = ®s->igaddr4; | |
1247 | priv->hash_regs[5] = ®s->igaddr5; | |
1248 | priv->hash_regs[6] = ®s->igaddr6; | |
1249 | priv->hash_regs[7] = ®s->igaddr7; | |
1250 | priv->hash_regs[8] = ®s->gaddr0; | |
1251 | priv->hash_regs[9] = ®s->gaddr1; | |
1252 | priv->hash_regs[10] = ®s->gaddr2; | |
1253 | priv->hash_regs[11] = ®s->gaddr3; | |
1254 | priv->hash_regs[12] = ®s->gaddr4; | |
1255 | priv->hash_regs[13] = ®s->gaddr5; | |
1256 | priv->hash_regs[14] = ®s->gaddr6; | |
1257 | priv->hash_regs[15] = ®s->gaddr7; | |
1258 | ||
1259 | } else { | |
1260 | priv->extended_hash = 0; | |
1261 | priv->hash_width = 8; | |
1262 | ||
1263 | priv->hash_regs[0] = ®s->gaddr0; | |
1264 | priv->hash_regs[1] = ®s->gaddr1; | |
1265 | priv->hash_regs[2] = ®s->gaddr2; | |
1266 | priv->hash_regs[3] = ®s->gaddr3; | |
1267 | priv->hash_regs[4] = ®s->gaddr4; | |
1268 | priv->hash_regs[5] = ®s->gaddr5; | |
1269 | priv->hash_regs[6] = ®s->gaddr6; | |
1270 | priv->hash_regs[7] = ®s->gaddr7; | |
1271 | } | |
1272 | } | |
1273 | ||
bb40dcbb | 1274 | /* Set up the ethernet device structure, private data, |
0977f817 JC |
1275 | * and anything else we need before we start |
1276 | */ | |
74888760 | 1277 | static int gfar_probe(struct platform_device *ofdev) |
1da177e4 | 1278 | { |
1da177e4 LT |
1279 | struct net_device *dev = NULL; |
1280 | struct gfar_private *priv = NULL; | |
20862788 | 1281 | int err = 0, i; |
1da177e4 | 1282 | |
fba4ed03 | 1283 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 1284 | |
fba4ed03 SG |
1285 | if (err) |
1286 | return err; | |
1da177e4 LT |
1287 | |
1288 | priv = netdev_priv(dev); | |
4826857f KG |
1289 | priv->ndev = dev; |
1290 | priv->ofdev = ofdev; | |
369ec162 | 1291 | priv->dev = &ofdev->dev; |
4826857f | 1292 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 1293 | |
d87eb127 | 1294 | spin_lock_init(&priv->bflock); |
ab939905 | 1295 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 1296 | |
8513fbd8 | 1297 | platform_set_drvdata(ofdev, priv); |
1da177e4 | 1298 | |
7d350977 AV |
1299 | gfar_detect_errata(priv); |
1300 | ||
1da177e4 | 1301 | /* Set the dev->base_addr to the gfar reg region */ |
20862788 | 1302 | dev->base_addr = (unsigned long) priv->gfargrp[0].regs; |
1da177e4 | 1303 | |
1da177e4 | 1304 | /* Fill in the dev structure */ |
1da177e4 | 1305 | dev->watchdog_timeo = TX_TIMEOUT; |
1da177e4 | 1306 | dev->mtu = 1500; |
26ccfc37 | 1307 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
1308 | dev->ethtool_ops = &gfar_ethtool_ops; |
1309 | ||
fba4ed03 | 1310 | /* Register for napi ...We are registering NAPI for each grp */ |
71ff9e3d CM |
1311 | for (i = 0; i < priv->num_grps; i++) { |
1312 | if (priv->poll_mode == GFAR_SQ_POLLING) { | |
1313 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, | |
1314 | gfar_poll_rx_sq, GFAR_DEV_WEIGHT); | |
1315 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, | |
1316 | gfar_poll_tx_sq, 2); | |
1317 | } else { | |
aeb12c5e CM |
1318 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, |
1319 | gfar_poll_rx, GFAR_DEV_WEIGHT); | |
1320 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, | |
1321 | gfar_poll_tx, 2); | |
1322 | } | |
1323 | } | |
a12f801d | 1324 | |
b31a1d8b | 1325 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
8b3afe95 | 1326 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
bc4598bc | 1327 | NETIF_F_RXCSUM; |
8b3afe95 | 1328 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
bc4598bc | 1329 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
8b3afe95 | 1330 | } |
0bbaf069 | 1331 | |
87c288c6 | 1332 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
f646968f PM |
1333 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
1334 | NETIF_F_HW_VLAN_CTAG_RX; | |
1335 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; | |
87c288c6 | 1336 | } |
0bbaf069 | 1337 | |
20862788 | 1338 | gfar_init_addr_hash_table(priv); |
0bbaf069 | 1339 | |
532c37bc CM |
1340 | /* Insert receive time stamps into padding alignment bytes */ |
1341 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | |
1342 | priv->padding = 8; | |
0bbaf069 | 1343 | |
cc772ab7 | 1344 | if (dev->features & NETIF_F_IP_CSUM || |
bc4598bc | 1345 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
bee9e58c | 1346 | dev->needed_headroom = GMAC_FCB_LEN; |
1da177e4 LT |
1347 | |
1348 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | |
1da177e4 | 1349 | |
a12f801d | 1350 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1351 | for (i = 0; i < priv->num_tx_queues; i++) { |
1352 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1353 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1354 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1355 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1356 | } | |
a12f801d | 1357 | |
fba4ed03 SG |
1358 | for (i = 0; i < priv->num_rx_queues; i++) { |
1359 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1360 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1361 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1362 | } | |
1da177e4 | 1363 | |
0977f817 | 1364 | /* always enable rx filer */ |
4aa3a715 | 1365 | priv->rx_filer_enable = 1; |
0bbaf069 KG |
1366 | /* Enable most messages by default */ |
1367 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
b98b8bab CM |
1368 | /* use pritority h/w tx queue scheduling for single queue devices */ |
1369 | if (priv->num_tx_queues == 1) | |
1370 | priv->prio_sched_en = 1; | |
0bbaf069 | 1371 | |
0851133b CM |
1372 | set_bit(GFAR_DOWN, &priv->state); |
1373 | ||
a328ac92 | 1374 | gfar_hw_init(priv); |
d3eab82b | 1375 | |
1da177e4 LT |
1376 | err = register_netdev(dev); |
1377 | ||
1378 | if (err) { | |
59deab26 | 1379 | pr_err("%s: Cannot register net device, aborting\n", dev->name); |
1da177e4 LT |
1380 | goto register_fail; |
1381 | } | |
1382 | ||
a328ac92 CM |
1383 | /* Carrier starts down, phylib will bring it up */ |
1384 | netif_carrier_off(dev); | |
1385 | ||
2884e5cc | 1386 | device_init_wakeup(&dev->dev, |
bc4598bc JC |
1387 | priv->device_flags & |
1388 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
2884e5cc | 1389 | |
c50a5d9a | 1390 | /* fill out IRQ number and name fields */ |
46ceb60c | 1391 | for (i = 0; i < priv->num_grps; i++) { |
ee873fda | 1392 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
46ceb60c | 1393 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
ee873fda | 1394 | sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", |
0015e551 | 1395 | dev->name, "_g", '0' + i, "_tx"); |
ee873fda | 1396 | sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", |
0015e551 | 1397 | dev->name, "_g", '0' + i, "_rx"); |
ee873fda | 1398 | sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", |
0015e551 | 1399 | dev->name, "_g", '0' + i, "_er"); |
46ceb60c | 1400 | } else |
ee873fda | 1401 | strcpy(gfar_irq(grp, TX)->name, dev->name); |
46ceb60c | 1402 | } |
c50a5d9a | 1403 | |
7a8b3372 SG |
1404 | /* Initialize the filer table */ |
1405 | gfar_init_filer_table(priv); | |
1406 | ||
1da177e4 | 1407 | /* Print out the device info */ |
59deab26 | 1408 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
1da177e4 | 1409 | |
0977f817 JC |
1410 | /* Even more device info helps when determining which kernel |
1411 | * provided which set of benchmarks. | |
1412 | */ | |
59deab26 | 1413 | netdev_info(dev, "Running with NAPI enabled\n"); |
fba4ed03 | 1414 | for (i = 0; i < priv->num_rx_queues; i++) |
59deab26 JP |
1415 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
1416 | i, priv->rx_queue[i]->rx_ring_size); | |
bc4598bc | 1417 | for (i = 0; i < priv->num_tx_queues; i++) |
59deab26 JP |
1418 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", |
1419 | i, priv->tx_queue[i]->tx_ring_size); | |
1da177e4 LT |
1420 | |
1421 | return 0; | |
1422 | ||
1423 | register_fail: | |
46ceb60c | 1424 | unmap_group_regs(priv); |
20862788 CM |
1425 | gfar_free_rx_queues(priv); |
1426 | gfar_free_tx_queues(priv); | |
fe192a49 GL |
1427 | if (priv->phy_node) |
1428 | of_node_put(priv->phy_node); | |
1429 | if (priv->tbi_node) | |
1430 | of_node_put(priv->tbi_node); | |
ee873fda | 1431 | free_gfar_dev(priv); |
bb40dcbb | 1432 | return err; |
1da177e4 LT |
1433 | } |
1434 | ||
2dc11581 | 1435 | static int gfar_remove(struct platform_device *ofdev) |
1da177e4 | 1436 | { |
8513fbd8 | 1437 | struct gfar_private *priv = platform_get_drvdata(ofdev); |
1da177e4 | 1438 | |
fe192a49 GL |
1439 | if (priv->phy_node) |
1440 | of_node_put(priv->phy_node); | |
1441 | if (priv->tbi_node) | |
1442 | of_node_put(priv->tbi_node); | |
1443 | ||
d9d8e041 | 1444 | unregister_netdev(priv->ndev); |
46ceb60c | 1445 | unmap_group_regs(priv); |
20862788 CM |
1446 | gfar_free_rx_queues(priv); |
1447 | gfar_free_tx_queues(priv); | |
ee873fda | 1448 | free_gfar_dev(priv); |
1da177e4 LT |
1449 | |
1450 | return 0; | |
1451 | } | |
1452 | ||
d87eb127 | 1453 | #ifdef CONFIG_PM |
be926fc4 AV |
1454 | |
1455 | static int gfar_suspend(struct device *dev) | |
d87eb127 | 1456 | { |
be926fc4 AV |
1457 | struct gfar_private *priv = dev_get_drvdata(dev); |
1458 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1459 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1460 | unsigned long flags; |
1461 | u32 tempval; | |
1462 | ||
1463 | int magic_packet = priv->wol_en && | |
bc4598bc JC |
1464 | (priv->device_flags & |
1465 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
d87eb127 | 1466 | |
be926fc4 | 1467 | netif_device_detach(ndev); |
d87eb127 | 1468 | |
be926fc4 | 1469 | if (netif_running(ndev)) { |
fba4ed03 SG |
1470 | |
1471 | local_irq_save(flags); | |
1472 | lock_tx_qs(priv); | |
d87eb127 | 1473 | |
c10650b6 | 1474 | gfar_halt_nodisable(priv); |
d87eb127 SW |
1475 | |
1476 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | |
f4983704 | 1477 | tempval = gfar_read(®s->maccfg1); |
d87eb127 SW |
1478 | |
1479 | tempval &= ~MACCFG1_TX_EN; | |
1480 | ||
1481 | if (!magic_packet) | |
1482 | tempval &= ~MACCFG1_RX_EN; | |
1483 | ||
f4983704 | 1484 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1485 | |
fba4ed03 SG |
1486 | unlock_tx_qs(priv); |
1487 | local_irq_restore(flags); | |
d87eb127 | 1488 | |
46ceb60c | 1489 | disable_napi(priv); |
d87eb127 SW |
1490 | |
1491 | if (magic_packet) { | |
1492 | /* Enable interrupt on Magic Packet */ | |
f4983704 | 1493 | gfar_write(®s->imask, IMASK_MAG); |
d87eb127 SW |
1494 | |
1495 | /* Enable Magic Packet mode */ | |
f4983704 | 1496 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1497 | tempval |= MACCFG2_MPEN; |
f4983704 | 1498 | gfar_write(®s->maccfg2, tempval); |
d87eb127 SW |
1499 | } else { |
1500 | phy_stop(priv->phydev); | |
1501 | } | |
1502 | } | |
1503 | ||
1504 | return 0; | |
1505 | } | |
1506 | ||
be926fc4 | 1507 | static int gfar_resume(struct device *dev) |
d87eb127 | 1508 | { |
be926fc4 AV |
1509 | struct gfar_private *priv = dev_get_drvdata(dev); |
1510 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1511 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1512 | unsigned long flags; |
1513 | u32 tempval; | |
1514 | int magic_packet = priv->wol_en && | |
bc4598bc JC |
1515 | (priv->device_flags & |
1516 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
d87eb127 | 1517 | |
be926fc4 AV |
1518 | if (!netif_running(ndev)) { |
1519 | netif_device_attach(ndev); | |
d87eb127 SW |
1520 | return 0; |
1521 | } | |
1522 | ||
1523 | if (!magic_packet && priv->phydev) | |
1524 | phy_start(priv->phydev); | |
1525 | ||
1526 | /* Disable Magic Packet mode, in case something | |
1527 | * else woke us up. | |
1528 | */ | |
fba4ed03 SG |
1529 | local_irq_save(flags); |
1530 | lock_tx_qs(priv); | |
d87eb127 | 1531 | |
f4983704 | 1532 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1533 | tempval &= ~MACCFG2_MPEN; |
f4983704 | 1534 | gfar_write(®s->maccfg2, tempval); |
d87eb127 | 1535 | |
c10650b6 | 1536 | gfar_start(priv); |
d87eb127 | 1537 | |
fba4ed03 SG |
1538 | unlock_tx_qs(priv); |
1539 | local_irq_restore(flags); | |
d87eb127 | 1540 | |
be926fc4 AV |
1541 | netif_device_attach(ndev); |
1542 | ||
46ceb60c | 1543 | enable_napi(priv); |
be926fc4 AV |
1544 | |
1545 | return 0; | |
1546 | } | |
1547 | ||
1548 | static int gfar_restore(struct device *dev) | |
1549 | { | |
1550 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1551 | struct net_device *ndev = priv->ndev; | |
1552 | ||
103cdd1d WD |
1553 | if (!netif_running(ndev)) { |
1554 | netif_device_attach(ndev); | |
1555 | ||
be926fc4 | 1556 | return 0; |
103cdd1d | 1557 | } |
be926fc4 | 1558 | |
1eb8f7a7 CM |
1559 | if (gfar_init_bds(ndev)) { |
1560 | free_skb_resources(priv); | |
1561 | return -ENOMEM; | |
1562 | } | |
1563 | ||
a328ac92 CM |
1564 | gfar_mac_reset(priv); |
1565 | ||
1566 | gfar_init_tx_rx_base(priv); | |
1567 | ||
c10650b6 | 1568 | gfar_start(priv); |
be926fc4 AV |
1569 | |
1570 | priv->oldlink = 0; | |
1571 | priv->oldspeed = 0; | |
1572 | priv->oldduplex = -1; | |
1573 | ||
1574 | if (priv->phydev) | |
1575 | phy_start(priv->phydev); | |
d87eb127 | 1576 | |
be926fc4 | 1577 | netif_device_attach(ndev); |
5ea681d4 | 1578 | enable_napi(priv); |
d87eb127 SW |
1579 | |
1580 | return 0; | |
1581 | } | |
be926fc4 AV |
1582 | |
1583 | static struct dev_pm_ops gfar_pm_ops = { | |
1584 | .suspend = gfar_suspend, | |
1585 | .resume = gfar_resume, | |
1586 | .freeze = gfar_suspend, | |
1587 | .thaw = gfar_resume, | |
1588 | .restore = gfar_restore, | |
1589 | }; | |
1590 | ||
1591 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1592 | ||
d87eb127 | 1593 | #else |
be926fc4 AV |
1594 | |
1595 | #define GFAR_PM_OPS NULL | |
be926fc4 | 1596 | |
d87eb127 | 1597 | #endif |
1da177e4 | 1598 | |
e8a2b6a4 AF |
1599 | /* Reads the controller's registers to determine what interface |
1600 | * connects it to the PHY. | |
1601 | */ | |
1602 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1603 | { | |
1604 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1605 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1606 | u32 ecntrl; |
1607 | ||
f4983704 | 1608 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1609 | |
1610 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1611 | return PHY_INTERFACE_MODE_SGMII; | |
1612 | ||
1613 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1614 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1615 | return PHY_INTERFACE_MODE_RTBI; | |
1616 | else | |
1617 | return PHY_INTERFACE_MODE_TBI; | |
1618 | } | |
1619 | ||
1620 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
bc4598bc | 1621 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
e8a2b6a4 | 1622 | return PHY_INTERFACE_MODE_RMII; |
bc4598bc | 1623 | } |
7132ab7f | 1624 | else { |
b31a1d8b | 1625 | phy_interface_t interface = priv->interface; |
7132ab7f | 1626 | |
0977f817 | 1627 | /* This isn't autodetected right now, so it must |
7132ab7f AF |
1628 | * be set by the device tree or platform code. |
1629 | */ | |
1630 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1631 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1632 | ||
e8a2b6a4 | 1633 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1634 | } |
e8a2b6a4 AF |
1635 | } |
1636 | ||
b31a1d8b | 1637 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1638 | return PHY_INTERFACE_MODE_GMII; |
1639 | ||
1640 | return PHY_INTERFACE_MODE_MII; | |
1641 | } | |
1642 | ||
1643 | ||
bb40dcbb AF |
1644 | /* Initializes driver's PHY state, and attaches to the PHY. |
1645 | * Returns 0 on success. | |
1da177e4 LT |
1646 | */ |
1647 | static int init_phy(struct net_device *dev) | |
1648 | { | |
1649 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1650 | uint gigabit_support = |
b31a1d8b | 1651 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
23402bdd | 1652 | GFAR_SUPPORTED_GBIT : 0; |
e8a2b6a4 | 1653 | phy_interface_t interface; |
1da177e4 LT |
1654 | |
1655 | priv->oldlink = 0; | |
1656 | priv->oldspeed = 0; | |
1657 | priv->oldduplex = -1; | |
1658 | ||
e8a2b6a4 AF |
1659 | interface = gfar_get_interface(dev); |
1660 | ||
1db780f8 AV |
1661 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1662 | interface); | |
1663 | if (!priv->phydev) | |
1664 | priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, | |
1665 | interface); | |
1666 | if (!priv->phydev) { | |
1667 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1668 | return -ENODEV; | |
fe192a49 | 1669 | } |
1da177e4 | 1670 | |
d3c12873 KJ |
1671 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1672 | gfar_configure_serdes(dev); | |
1673 | ||
bb40dcbb | 1674 | /* Remove any features not supported by the controller */ |
fe192a49 GL |
1675 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1676 | priv->phydev->advertising = priv->phydev->supported; | |
1da177e4 LT |
1677 | |
1678 | return 0; | |
1da177e4 LT |
1679 | } |
1680 | ||
0977f817 | 1681 | /* Initialize TBI PHY interface for communicating with the |
d0313587 PG |
1682 | * SERDES lynx PHY on the chip. We communicate with this PHY |
1683 | * through the MDIO bus on each controller, treating it as a | |
1684 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1685 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1686 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1687 | * value doesn't matter, as there are no other PHYs on the bus. | |
1688 | */ | |
d3c12873 KJ |
1689 | static void gfar_configure_serdes(struct net_device *dev) |
1690 | { | |
1691 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1692 | struct phy_device *tbiphy; |
1693 | ||
1694 | if (!priv->tbi_node) { | |
1695 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1696 | "device tree specify a tbi-handle\n"); | |
1697 | return; | |
1698 | } | |
c132419e | 1699 | |
fe192a49 GL |
1700 | tbiphy = of_phy_find_device(priv->tbi_node); |
1701 | if (!tbiphy) { | |
1702 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1703 | return; |
1704 | } | |
d3c12873 | 1705 | |
0977f817 | 1706 | /* If the link is already up, we must already be ok, and don't need to |
bdb59f94 TP |
1707 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1708 | * everything for us? Resetting it takes the link down and requires | |
1709 | * several seconds for it to come back. | |
1710 | */ | |
fe192a49 | 1711 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
b31a1d8b | 1712 | return; |
d3c12873 | 1713 | |
d0313587 | 1714 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1715 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1716 | |
fe192a49 | 1717 | phy_write(tbiphy, MII_ADVERTISE, |
bc4598bc JC |
1718 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1719 | ADVERTISE_1000XPSE_ASYM); | |
d3c12873 | 1720 | |
bc4598bc JC |
1721 | phy_write(tbiphy, MII_BMCR, |
1722 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | | |
1723 | BMCR_SPEED1000); | |
d3c12873 KJ |
1724 | } |
1725 | ||
511d934f AV |
1726 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
1727 | { | |
1728 | u32 res; | |
1729 | ||
0977f817 | 1730 | /* Normaly TSEC should not hang on GRS commands, so we should |
511d934f AV |
1731 | * actually wait for IEVENT_GRSC flag. |
1732 | */ | |
ad3660c2 | 1733 | if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
511d934f AV |
1734 | return 0; |
1735 | ||
0977f817 | 1736 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
511d934f AV |
1737 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
1738 | * and the Rx can be safely reset. | |
1739 | */ | |
1740 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | |
1741 | res &= 0x7f807f80; | |
1742 | if ((res & 0xffff) == (res >> 16)) | |
1743 | return 1; | |
1744 | ||
1745 | return 0; | |
1746 | } | |
0bbaf069 KG |
1747 | |
1748 | /* Halt the receive and transmit queues */ | |
c10650b6 | 1749 | static void gfar_halt_nodisable(struct gfar_private *priv) |
1da177e4 | 1750 | { |
efeddce7 | 1751 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
1752 | u32 tempval; |
1753 | ||
efeddce7 | 1754 | gfar_ints_disable(priv); |
1da177e4 | 1755 | |
1da177e4 | 1756 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1757 | tempval = gfar_read(®s->dmactrl); |
bc4598bc JC |
1758 | if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != |
1759 | (DMACTRL_GRS | DMACTRL_GTS)) { | |
511d934f AV |
1760 | int ret; |
1761 | ||
1da177e4 | 1762 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1763 | gfar_write(®s->dmactrl, tempval); |
1da177e4 | 1764 | |
511d934f AV |
1765 | do { |
1766 | ret = spin_event_timeout(((gfar_read(®s->ievent) & | |
1767 | (IEVENT_GRSC | IEVENT_GTSC)) == | |
1768 | (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0); | |
1769 | if (!ret && !(gfar_read(®s->ievent) & IEVENT_GRSC)) | |
1770 | ret = __gfar_is_rx_idle(priv); | |
1771 | } while (!ret); | |
1da177e4 | 1772 | } |
d87eb127 | 1773 | } |
d87eb127 SW |
1774 | |
1775 | /* Halt the receive and transmit queues */ | |
c10650b6 | 1776 | void gfar_halt(struct gfar_private *priv) |
d87eb127 | 1777 | { |
46ceb60c | 1778 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1779 | u32 tempval; |
1da177e4 | 1780 | |
c10650b6 CM |
1781 | /* Dissable the Rx/Tx hw queues */ |
1782 | gfar_write(®s->rqueue, 0); | |
1783 | gfar_write(®s->tqueue, 0); | |
2a54adc3 | 1784 | |
c10650b6 CM |
1785 | mdelay(10); |
1786 | ||
1787 | gfar_halt_nodisable(priv); | |
1788 | ||
1789 | /* Disable Rx/Tx DMA */ | |
1da177e4 LT |
1790 | tempval = gfar_read(®s->maccfg1); |
1791 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1792 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1793 | } |
1794 | ||
1795 | void stop_gfar(struct net_device *dev) | |
1796 | { | |
1797 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1798 | |
0851133b | 1799 | netif_tx_stop_all_queues(dev); |
bb40dcbb | 1800 | |
0851133b CM |
1801 | smp_mb__before_clear_bit(); |
1802 | set_bit(GFAR_DOWN, &priv->state); | |
1803 | smp_mb__after_clear_bit(); | |
a12f801d | 1804 | |
0851133b | 1805 | disable_napi(priv); |
0bbaf069 | 1806 | |
0851133b | 1807 | /* disable ints and gracefully shut down Rx/Tx DMA */ |
c10650b6 | 1808 | gfar_halt(priv); |
1da177e4 | 1809 | |
0851133b | 1810 | phy_stop(priv->phydev); |
1da177e4 | 1811 | |
1da177e4 | 1812 | free_skb_resources(priv); |
1da177e4 LT |
1813 | } |
1814 | ||
fba4ed03 | 1815 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1816 | { |
1da177e4 | 1817 | struct txbd8 *txbdp; |
fba4ed03 | 1818 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1819 | int i, j; |
1da177e4 | 1820 | |
a12f801d | 1821 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1822 | |
a12f801d SG |
1823 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1824 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1825 | continue; |
1da177e4 | 1826 | |
369ec162 | 1827 | dma_unmap_single(priv->dev, txbdp->bufPtr, |
bc4598bc | 1828 | txbdp->length, DMA_TO_DEVICE); |
4669bc90 | 1829 | txbdp->lstatus = 0; |
fba4ed03 | 1830 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
bc4598bc | 1831 | j++) { |
4669bc90 | 1832 | txbdp++; |
369ec162 | 1833 | dma_unmap_page(priv->dev, txbdp->bufPtr, |
bc4598bc | 1834 | txbdp->length, DMA_TO_DEVICE); |
1da177e4 | 1835 | } |
ad5da7ab | 1836 | txbdp++; |
a12f801d SG |
1837 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1838 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1839 | } |
a12f801d | 1840 | kfree(tx_queue->tx_skbuff); |
1eb8f7a7 | 1841 | tx_queue->tx_skbuff = NULL; |
fba4ed03 | 1842 | } |
1da177e4 | 1843 | |
fba4ed03 SG |
1844 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1845 | { | |
1846 | struct rxbd8 *rxbdp; | |
1847 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | |
1848 | int i; | |
1da177e4 | 1849 | |
fba4ed03 | 1850 | rxbdp = rx_queue->rx_bd_base; |
1da177e4 | 1851 | |
a12f801d SG |
1852 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1853 | if (rx_queue->rx_skbuff[i]) { | |
369ec162 CM |
1854 | dma_unmap_single(priv->dev, rxbdp->bufPtr, |
1855 | priv->rx_buffer_size, | |
bc4598bc | 1856 | DMA_FROM_DEVICE); |
a12f801d SG |
1857 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1858 | rx_queue->rx_skbuff[i] = NULL; | |
1da177e4 | 1859 | } |
e69edd21 AV |
1860 | rxbdp->lstatus = 0; |
1861 | rxbdp->bufPtr = 0; | |
1862 | rxbdp++; | |
1da177e4 | 1863 | } |
a12f801d | 1864 | kfree(rx_queue->rx_skbuff); |
1eb8f7a7 | 1865 | rx_queue->rx_skbuff = NULL; |
fba4ed03 | 1866 | } |
e69edd21 | 1867 | |
fba4ed03 | 1868 | /* If there are any tx skbs or rx skbs still around, free them. |
0977f817 JC |
1869 | * Then free tx_skbuff and rx_skbuff |
1870 | */ | |
fba4ed03 SG |
1871 | static void free_skb_resources(struct gfar_private *priv) |
1872 | { | |
1873 | struct gfar_priv_tx_q *tx_queue = NULL; | |
1874 | struct gfar_priv_rx_q *rx_queue = NULL; | |
1875 | int i; | |
1876 | ||
1877 | /* Go through all the buffer descriptors and free their data buffers */ | |
1878 | for (i = 0; i < priv->num_tx_queues; i++) { | |
d8a0f1b0 | 1879 | struct netdev_queue *txq; |
bc4598bc | 1880 | |
fba4ed03 | 1881 | tx_queue = priv->tx_queue[i]; |
d8a0f1b0 | 1882 | txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
bc4598bc | 1883 | if (tx_queue->tx_skbuff) |
fba4ed03 | 1884 | free_skb_tx_queue(tx_queue); |
d8a0f1b0 | 1885 | netdev_tx_reset_queue(txq); |
fba4ed03 SG |
1886 | } |
1887 | ||
1888 | for (i = 0; i < priv->num_rx_queues; i++) { | |
1889 | rx_queue = priv->rx_queue[i]; | |
bc4598bc | 1890 | if (rx_queue->rx_skbuff) |
fba4ed03 SG |
1891 | free_skb_rx_queue(rx_queue); |
1892 | } | |
1893 | ||
369ec162 | 1894 | dma_free_coherent(priv->dev, |
bc4598bc JC |
1895 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
1896 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
1897 | priv->tx_queue[0]->tx_bd_base, | |
1898 | priv->tx_queue[0]->tx_bd_dma_base); | |
1da177e4 LT |
1899 | } |
1900 | ||
c10650b6 | 1901 | void gfar_start(struct gfar_private *priv) |
0bbaf069 | 1902 | { |
46ceb60c | 1903 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 1904 | u32 tempval; |
46ceb60c | 1905 | int i = 0; |
0bbaf069 | 1906 | |
c10650b6 CM |
1907 | /* Enable Rx/Tx hw queues */ |
1908 | gfar_write(®s->rqueue, priv->rqueue); | |
1909 | gfar_write(®s->tqueue, priv->tqueue); | |
0bbaf069 KG |
1910 | |
1911 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 1912 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1913 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 1914 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1915 | |
0bbaf069 | 1916 | /* Make sure we aren't stopped */ |
f4983704 | 1917 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1918 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1919 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1920 | |
46ceb60c SG |
1921 | for (i = 0; i < priv->num_grps; i++) { |
1922 | regs = priv->gfargrp[i].regs; | |
1923 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
1924 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
1925 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
46ceb60c | 1926 | } |
12dea57b | 1927 | |
c10650b6 CM |
1928 | /* Enable Rx/Tx DMA */ |
1929 | tempval = gfar_read(®s->maccfg1); | |
1930 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1931 | gfar_write(®s->maccfg1, tempval); | |
1932 | ||
efeddce7 CM |
1933 | gfar_ints_enable(priv); |
1934 | ||
c10650b6 | 1935 | priv->ndev->trans_start = jiffies; /* prevent tx timeout */ |
0bbaf069 KG |
1936 | } |
1937 | ||
80ec396c CM |
1938 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
1939 | { | |
1940 | free_irq(gfar_irq(grp, TX)->irq, grp); | |
1941 | free_irq(gfar_irq(grp, RX)->irq, grp); | |
1942 | free_irq(gfar_irq(grp, ER)->irq, grp); | |
1943 | } | |
1944 | ||
46ceb60c SG |
1945 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
1946 | { | |
1947 | struct gfar_private *priv = grp->priv; | |
1948 | struct net_device *dev = priv->ndev; | |
1949 | int err; | |
1da177e4 | 1950 | |
1da177e4 | 1951 | /* If the device has multiple interrupts, register for |
0977f817 JC |
1952 | * them. Otherwise, only register for the one |
1953 | */ | |
b31a1d8b | 1954 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 1955 | /* Install our interrupt handlers for Error, |
0977f817 JC |
1956 | * Transmit, and Receive |
1957 | */ | |
ee873fda CM |
1958 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
1959 | gfar_irq(grp, ER)->name, grp); | |
1960 | if (err < 0) { | |
59deab26 | 1961 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 1962 | gfar_irq(grp, ER)->irq); |
46ceb60c | 1963 | |
2145f1af | 1964 | goto err_irq_fail; |
1da177e4 | 1965 | } |
ee873fda CM |
1966 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
1967 | gfar_irq(grp, TX)->name, grp); | |
1968 | if (err < 0) { | |
59deab26 | 1969 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 1970 | gfar_irq(grp, TX)->irq); |
1da177e4 LT |
1971 | goto tx_irq_fail; |
1972 | } | |
ee873fda CM |
1973 | err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
1974 | gfar_irq(grp, RX)->name, grp); | |
1975 | if (err < 0) { | |
59deab26 | 1976 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 1977 | gfar_irq(grp, RX)->irq); |
1da177e4 LT |
1978 | goto rx_irq_fail; |
1979 | } | |
1980 | } else { | |
ee873fda CM |
1981 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
1982 | gfar_irq(grp, TX)->name, grp); | |
1983 | if (err < 0) { | |
59deab26 | 1984 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 1985 | gfar_irq(grp, TX)->irq); |
1da177e4 LT |
1986 | goto err_irq_fail; |
1987 | } | |
1988 | } | |
1989 | ||
46ceb60c SG |
1990 | return 0; |
1991 | ||
1992 | rx_irq_fail: | |
ee873fda | 1993 | free_irq(gfar_irq(grp, TX)->irq, grp); |
46ceb60c | 1994 | tx_irq_fail: |
ee873fda | 1995 | free_irq(gfar_irq(grp, ER)->irq, grp); |
46ceb60c SG |
1996 | err_irq_fail: |
1997 | return err; | |
1998 | ||
1999 | } | |
2000 | ||
80ec396c CM |
2001 | static void gfar_free_irq(struct gfar_private *priv) |
2002 | { | |
2003 | int i; | |
2004 | ||
2005 | /* Free the IRQs */ | |
2006 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
2007 | for (i = 0; i < priv->num_grps; i++) | |
2008 | free_grp_irqs(&priv->gfargrp[i]); | |
2009 | } else { | |
2010 | for (i = 0; i < priv->num_grps; i++) | |
2011 | free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, | |
2012 | &priv->gfargrp[i]); | |
2013 | } | |
2014 | } | |
2015 | ||
2016 | static int gfar_request_irq(struct gfar_private *priv) | |
2017 | { | |
2018 | int err, i, j; | |
2019 | ||
2020 | for (i = 0; i < priv->num_grps; i++) { | |
2021 | err = register_grp_irqs(&priv->gfargrp[i]); | |
2022 | if (err) { | |
2023 | for (j = 0; j < i; j++) | |
2024 | free_grp_irqs(&priv->gfargrp[j]); | |
2025 | return err; | |
2026 | } | |
2027 | } | |
2028 | ||
2029 | return 0; | |
2030 | } | |
2031 | ||
46ceb60c SG |
2032 | /* Bring the controller up and running */ |
2033 | int startup_gfar(struct net_device *ndev) | |
2034 | { | |
2035 | struct gfar_private *priv = netdev_priv(ndev); | |
80ec396c | 2036 | int err; |
46ceb60c | 2037 | |
a328ac92 | 2038 | gfar_mac_reset(priv); |
46ceb60c | 2039 | |
46ceb60c SG |
2040 | err = gfar_alloc_skb_resources(ndev); |
2041 | if (err) | |
2042 | return err; | |
2043 | ||
a328ac92 | 2044 | gfar_init_tx_rx_base(priv); |
46ceb60c | 2045 | |
0851133b CM |
2046 | smp_mb__before_clear_bit(); |
2047 | clear_bit(GFAR_DOWN, &priv->state); | |
2048 | smp_mb__after_clear_bit(); | |
2049 | ||
2050 | /* Start Rx/Tx DMA and enable the interrupts */ | |
c10650b6 | 2051 | gfar_start(priv); |
1da177e4 | 2052 | |
826aa4a0 AV |
2053 | phy_start(priv->phydev); |
2054 | ||
0851133b CM |
2055 | enable_napi(priv); |
2056 | ||
2057 | netif_tx_wake_all_queues(ndev); | |
2058 | ||
1da177e4 | 2059 | return 0; |
1da177e4 LT |
2060 | } |
2061 | ||
0977f817 JC |
2062 | /* Called when something needs to use the ethernet device |
2063 | * Returns 0 for success. | |
2064 | */ | |
1da177e4 LT |
2065 | static int gfar_enet_open(struct net_device *dev) |
2066 | { | |
94e8cc35 | 2067 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
2068 | int err; |
2069 | ||
1da177e4 | 2070 | err = init_phy(dev); |
0851133b | 2071 | if (err) |
1da177e4 LT |
2072 | return err; |
2073 | ||
80ec396c CM |
2074 | err = gfar_request_irq(priv); |
2075 | if (err) | |
2076 | return err; | |
2077 | ||
1da177e4 | 2078 | err = startup_gfar(dev); |
0851133b | 2079 | if (err) |
db0e8e3f | 2080 | return err; |
1da177e4 | 2081 | |
2884e5cc AV |
2082 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
2083 | ||
1da177e4 LT |
2084 | return err; |
2085 | } | |
2086 | ||
54dc79fe | 2087 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 2088 | { |
54dc79fe | 2089 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
2090 | |
2091 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 2092 | |
0bbaf069 KG |
2093 | return fcb; |
2094 | } | |
2095 | ||
9c4886e5 | 2096 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
bc4598bc | 2097 | int fcb_length) |
0bbaf069 | 2098 | { |
0bbaf069 KG |
2099 | /* If we're here, it's a IP packet with a TCP or UDP |
2100 | * payload. We set it to checksum, using a pseudo-header | |
2101 | * we provide | |
2102 | */ | |
3a2e16c8 | 2103 | u8 flags = TXFCB_DEFAULT; |
0bbaf069 | 2104 | |
0977f817 JC |
2105 | /* Tell the controller what the protocol is |
2106 | * And provide the already calculated phcs | |
2107 | */ | |
eddc9ec5 | 2108 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 2109 | flags |= TXFCB_UDP; |
4bedb452 | 2110 | fcb->phcs = udp_hdr(skb)->check; |
7f7f5316 | 2111 | } else |
8da32de5 | 2112 | fcb->phcs = tcp_hdr(skb)->check; |
0bbaf069 KG |
2113 | |
2114 | /* l3os is the distance between the start of the | |
2115 | * frame (skb->data) and the start of the IP hdr. | |
2116 | * l4os is the distance between the start of the | |
0977f817 JC |
2117 | * l3 hdr and the l4 hdr |
2118 | */ | |
9c4886e5 | 2119 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); |
cfe1fc77 | 2120 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 2121 | |
7f7f5316 | 2122 | fcb->flags = flags; |
0bbaf069 KG |
2123 | } |
2124 | ||
7f7f5316 | 2125 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 2126 | { |
7f7f5316 | 2127 | fcb->flags |= TXFCB_VLN; |
0bbaf069 KG |
2128 | fcb->vlctl = vlan_tx_tag_get(skb); |
2129 | } | |
2130 | ||
4669bc90 | 2131 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
bc4598bc | 2132 | struct txbd8 *base, int ring_size) |
4669bc90 DH |
2133 | { |
2134 | struct txbd8 *new_bd = bdp + stride; | |
2135 | ||
2136 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
2137 | } | |
2138 | ||
2139 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
bc4598bc | 2140 | int ring_size) |
4669bc90 DH |
2141 | { |
2142 | return skip_txbd(bdp, 1, base, ring_size); | |
2143 | } | |
2144 | ||
02d88fb4 CM |
2145 | /* eTSEC12: csum generation not supported for some fcb offsets */ |
2146 | static inline bool gfar_csum_errata_12(struct gfar_private *priv, | |
2147 | unsigned long fcb_addr) | |
2148 | { | |
2149 | return (gfar_has_errata(priv, GFAR_ERRATA_12) && | |
2150 | (fcb_addr % 0x20) > 0x18); | |
2151 | } | |
2152 | ||
2153 | /* eTSEC76: csum generation for frames larger than 2500 may | |
2154 | * cause excess delays before start of transmission | |
2155 | */ | |
2156 | static inline bool gfar_csum_errata_76(struct gfar_private *priv, | |
2157 | unsigned int len) | |
2158 | { | |
2159 | return (gfar_has_errata(priv, GFAR_ERRATA_76) && | |
2160 | (len > 2500)); | |
2161 | } | |
2162 | ||
0977f817 JC |
2163 | /* This is called by the kernel when a frame is ready for transmission. |
2164 | * It is pointed to by the dev->hard_start_xmit function pointer | |
2165 | */ | |
1da177e4 LT |
2166 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2167 | { | |
2168 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 2169 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 2170 | struct netdev_queue *txq; |
f4983704 | 2171 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 2172 | struct txfcb *fcb = NULL; |
f0ee7acf | 2173 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
5a5efed4 | 2174 | u32 lstatus; |
0d0cffdc CM |
2175 | int i, rq = 0; |
2176 | int do_tstamp, do_csum, do_vlan; | |
4669bc90 | 2177 | u32 bufaddr; |
fef6108d | 2178 | unsigned long flags; |
50ad076b | 2179 | unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
fba4ed03 SG |
2180 | |
2181 | rq = skb->queue_mapping; | |
2182 | tx_queue = priv->tx_queue[rq]; | |
2183 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 2184 | base = tx_queue->tx_bd_base; |
46ceb60c | 2185 | regs = tx_queue->grp->regs; |
f0ee7acf | 2186 | |
0d0cffdc CM |
2187 | do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
2188 | do_vlan = vlan_tx_tag_present(skb); | |
2189 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
2190 | priv->hwts_tx_en; | |
2191 | ||
2192 | if (do_csum || do_vlan) | |
2193 | fcb_len = GMAC_FCB_LEN; | |
2194 | ||
f0ee7acf | 2195 | /* check if time stamp should be generated */ |
0d0cffdc CM |
2196 | if (unlikely(do_tstamp)) |
2197 | fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; | |
4669bc90 | 2198 | |
5b28beaf | 2199 | /* make space for additional header when fcb is needed */ |
0d0cffdc | 2200 | if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { |
54dc79fe SH |
2201 | struct sk_buff *skb_new; |
2202 | ||
0d0cffdc | 2203 | skb_new = skb_realloc_headroom(skb, fcb_len); |
54dc79fe SH |
2204 | if (!skb_new) { |
2205 | dev->stats.tx_errors++; | |
c9974ad4 | 2206 | dev_kfree_skb_any(skb); |
54dc79fe SH |
2207 | return NETDEV_TX_OK; |
2208 | } | |
db83d136 | 2209 | |
313b037c ED |
2210 | if (skb->sk) |
2211 | skb_set_owner_w(skb_new, skb->sk); | |
c9974ad4 | 2212 | dev_consume_skb_any(skb); |
54dc79fe SH |
2213 | skb = skb_new; |
2214 | } | |
2215 | ||
4669bc90 DH |
2216 | /* total number of fragments in the SKB */ |
2217 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2218 | ||
f0ee7acf MR |
2219 | /* calculate the required number of TxBDs for this skb */ |
2220 | if (unlikely(do_tstamp)) | |
2221 | nr_txbds = nr_frags + 2; | |
2222 | else | |
2223 | nr_txbds = nr_frags + 1; | |
2224 | ||
4669bc90 | 2225 | /* check if there is space to queue this packet */ |
f0ee7acf | 2226 | if (nr_txbds > tx_queue->num_txbdfree) { |
4669bc90 | 2227 | /* no space, stop the queue */ |
fba4ed03 | 2228 | netif_tx_stop_queue(txq); |
4669bc90 | 2229 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
2230 | return NETDEV_TX_BUSY; |
2231 | } | |
1da177e4 LT |
2232 | |
2233 | /* Update transmit stats */ | |
50ad076b CM |
2234 | bytes_sent = skb->len; |
2235 | tx_queue->stats.tx_bytes += bytes_sent; | |
2236 | /* keep Tx bytes on wire for BQL accounting */ | |
2237 | GFAR_CB(skb)->bytes_sent = bytes_sent; | |
1ac9ad13 | 2238 | tx_queue->stats.tx_packets++; |
1da177e4 | 2239 | |
a12f801d | 2240 | txbdp = txbdp_start = tx_queue->cur_tx; |
f0ee7acf MR |
2241 | lstatus = txbdp->lstatus; |
2242 | ||
2243 | /* Time stamp insertion requires one additional TxBD */ | |
2244 | if (unlikely(do_tstamp)) | |
2245 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | |
bc4598bc | 2246 | tx_queue->tx_ring_size); |
1da177e4 | 2247 | |
4669bc90 | 2248 | if (nr_frags == 0) { |
f0ee7acf MR |
2249 | if (unlikely(do_tstamp)) |
2250 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | |
bc4598bc | 2251 | TXBD_INTERRUPT); |
f0ee7acf MR |
2252 | else |
2253 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
4669bc90 DH |
2254 | } else { |
2255 | /* Place the fragment addresses and lengths into the TxBDs */ | |
2256 | for (i = 0; i < nr_frags; i++) { | |
50ad076b | 2257 | unsigned int frag_len; |
4669bc90 | 2258 | /* Point at the next BD, wrapping as needed */ |
a12f801d | 2259 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 | 2260 | |
50ad076b | 2261 | frag_len = skb_shinfo(skb)->frags[i].size; |
4669bc90 | 2262 | |
50ad076b | 2263 | lstatus = txbdp->lstatus | frag_len | |
bc4598bc | 2264 | BD_LFLAG(TXBD_READY); |
4669bc90 DH |
2265 | |
2266 | /* Handle the last BD specially */ | |
2267 | if (i == nr_frags - 1) | |
2268 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 2269 | |
369ec162 | 2270 | bufaddr = skb_frag_dma_map(priv->dev, |
2234a722 IC |
2271 | &skb_shinfo(skb)->frags[i], |
2272 | 0, | |
50ad076b | 2273 | frag_len, |
2234a722 | 2274 | DMA_TO_DEVICE); |
4669bc90 DH |
2275 | |
2276 | /* set the TxBD length and buffer pointer */ | |
2277 | txbdp->bufPtr = bufaddr; | |
2278 | txbdp->lstatus = lstatus; | |
2279 | } | |
2280 | ||
2281 | lstatus = txbdp_start->lstatus; | |
2282 | } | |
1da177e4 | 2283 | |
9c4886e5 MR |
2284 | /* Add TxPAL between FCB and frame if required */ |
2285 | if (unlikely(do_tstamp)) { | |
2286 | skb_push(skb, GMAC_TXPAL_LEN); | |
2287 | memset(skb->data, 0, GMAC_TXPAL_LEN); | |
2288 | } | |
2289 | ||
0d0cffdc CM |
2290 | /* Add TxFCB if required */ |
2291 | if (fcb_len) { | |
54dc79fe | 2292 | fcb = gfar_add_fcb(skb); |
02d88fb4 | 2293 | lstatus |= BD_LFLAG(TXBD_TOE); |
0d0cffdc CM |
2294 | } |
2295 | ||
2296 | /* Set up checksumming */ | |
2297 | if (do_csum) { | |
2298 | gfar_tx_checksum(skb, fcb, fcb_len); | |
02d88fb4 CM |
2299 | |
2300 | if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || | |
2301 | unlikely(gfar_csum_errata_76(priv, skb->len))) { | |
4363c2fd AD |
2302 | __skb_pull(skb, GMAC_FCB_LEN); |
2303 | skb_checksum_help(skb); | |
0d0cffdc CM |
2304 | if (do_vlan || do_tstamp) { |
2305 | /* put back a new fcb for vlan/tstamp TOE */ | |
2306 | fcb = gfar_add_fcb(skb); | |
2307 | } else { | |
2308 | /* Tx TOE not used */ | |
2309 | lstatus &= ~(BD_LFLAG(TXBD_TOE)); | |
2310 | fcb = NULL; | |
2311 | } | |
4363c2fd | 2312 | } |
0bbaf069 KG |
2313 | } |
2314 | ||
0d0cffdc | 2315 | if (do_vlan) |
54dc79fe | 2316 | gfar_tx_vlan(skb, fcb); |
0bbaf069 | 2317 | |
f0ee7acf MR |
2318 | /* Setup tx hardware time stamping if requested */ |
2319 | if (unlikely(do_tstamp)) { | |
2244d07b | 2320 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
f0ee7acf | 2321 | fcb->ptp = 1; |
f0ee7acf MR |
2322 | } |
2323 | ||
369ec162 | 2324 | txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data, |
bc4598bc | 2325 | skb_headlen(skb), DMA_TO_DEVICE); |
1da177e4 | 2326 | |
0977f817 | 2327 | /* If time stamping is requested one additional TxBD must be set up. The |
f0ee7acf MR |
2328 | * first TxBD points to the FCB and must have a data length of |
2329 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | |
2330 | * the full frame length. | |
2331 | */ | |
2332 | if (unlikely(do_tstamp)) { | |
0d0cffdc | 2333 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; |
f0ee7acf | 2334 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | |
0d0cffdc | 2335 | (skb_headlen(skb) - fcb_len); |
f0ee7acf MR |
2336 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
2337 | } else { | |
2338 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | |
2339 | } | |
1da177e4 | 2340 | |
50ad076b | 2341 | netdev_tx_sent_queue(txq, bytes_sent); |
d8a0f1b0 | 2342 | |
0977f817 | 2343 | /* We can work in parallel with gfar_clean_tx_ring(), except |
a3bc1f11 AV |
2344 | * when modifying num_txbdfree. Note that we didn't grab the lock |
2345 | * when we were reading the num_txbdfree and checking for available | |
2346 | * space, that's because outside of this function it can only grow, | |
2347 | * and once we've got needed space, it cannot suddenly disappear. | |
2348 | * | |
2349 | * The lock also protects us from gfar_error(), which can modify | |
2350 | * regs->tstat and thus retrigger the transfers, which is why we | |
2351 | * also must grab the lock before setting ready bit for the first | |
2352 | * to be transmitted BD. | |
2353 | */ | |
2354 | spin_lock_irqsave(&tx_queue->txlock, flags); | |
2355 | ||
0977f817 | 2356 | /* The powerpc-specific eieio() is used, as wmb() has too strong |
3b6330ce SW |
2357 | * semantics (it requires synchronization between cacheable and |
2358 | * uncacheable mappings, which eieio doesn't provide and which we | |
2359 | * don't need), thus requiring a more expensive sync instruction. At | |
2360 | * some point, the set of architecture-independent barrier functions | |
2361 | * should be expanded to include weaker barriers. | |
2362 | */ | |
3b6330ce | 2363 | eieio(); |
7f7f5316 | 2364 | |
4669bc90 DH |
2365 | txbdp_start->lstatus = lstatus; |
2366 | ||
0eddba52 AV |
2367 | eieio(); /* force lstatus write before tx_skbuff */ |
2368 | ||
2369 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | |
2370 | ||
4669bc90 | 2371 | /* Update the current skb pointer to the next entry we will use |
0977f817 JC |
2372 | * (wrapping if necessary) |
2373 | */ | |
a12f801d | 2374 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
bc4598bc | 2375 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
4669bc90 | 2376 | |
a12f801d | 2377 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2378 | |
2379 | /* reduce TxBD free count */ | |
f0ee7acf | 2380 | tx_queue->num_txbdfree -= (nr_txbds); |
1da177e4 LT |
2381 | |
2382 | /* If the next BD still needs to be cleaned up, then the bds | |
0977f817 JC |
2383 | * are full. We need to tell the kernel to stop sending us stuff. |
2384 | */ | |
a12f801d | 2385 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2386 | netif_tx_stop_queue(txq); |
1da177e4 | 2387 | |
09f75cd7 | 2388 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2389 | } |
2390 | ||
1da177e4 | 2391 | /* Tell the DMA to go go go */ |
fba4ed03 | 2392 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 LT |
2393 | |
2394 | /* Unlock priv */ | |
a12f801d | 2395 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1da177e4 | 2396 | |
54dc79fe | 2397 | return NETDEV_TX_OK; |
1da177e4 LT |
2398 | } |
2399 | ||
2400 | /* Stops the kernel queue, and halts the controller */ | |
2401 | static int gfar_close(struct net_device *dev) | |
2402 | { | |
2403 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2404 | |
ab939905 | 2405 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2406 | stop_gfar(dev); |
2407 | ||
bb40dcbb AF |
2408 | /* Disconnect from the PHY */ |
2409 | phy_disconnect(priv->phydev); | |
2410 | priv->phydev = NULL; | |
1da177e4 | 2411 | |
80ec396c CM |
2412 | gfar_free_irq(priv); |
2413 | ||
1da177e4 LT |
2414 | return 0; |
2415 | } | |
2416 | ||
1da177e4 | 2417 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2418 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2419 | { |
7f7f5316 | 2420 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2421 | |
2422 | return 0; | |
2423 | } | |
2424 | ||
1da177e4 LT |
2425 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2426 | { | |
1da177e4 | 2427 | struct gfar_private *priv = netdev_priv(dev); |
0bbaf069 KG |
2428 | int frame_size = new_mtu + ETH_HLEN; |
2429 | ||
1da177e4 | 2430 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
59deab26 | 2431 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); |
1da177e4 LT |
2432 | return -EINVAL; |
2433 | } | |
2434 | ||
0851133b CM |
2435 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
2436 | cpu_relax(); | |
2437 | ||
88302648 | 2438 | if (dev->flags & IFF_UP) |
1da177e4 LT |
2439 | stop_gfar(dev); |
2440 | ||
1da177e4 LT |
2441 | dev->mtu = new_mtu; |
2442 | ||
88302648 | 2443 | if (dev->flags & IFF_UP) |
1da177e4 LT |
2444 | startup_gfar(dev); |
2445 | ||
0851133b CM |
2446 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
2447 | ||
1da177e4 LT |
2448 | return 0; |
2449 | } | |
2450 | ||
0851133b CM |
2451 | void reset_gfar(struct net_device *ndev) |
2452 | { | |
2453 | struct gfar_private *priv = netdev_priv(ndev); | |
2454 | ||
2455 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) | |
2456 | cpu_relax(); | |
2457 | ||
2458 | stop_gfar(ndev); | |
2459 | startup_gfar(ndev); | |
2460 | ||
2461 | clear_bit_unlock(GFAR_RESETTING, &priv->state); | |
2462 | } | |
2463 | ||
ab939905 | 2464 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2465 | * transmitted after a set amount of time. |
2466 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2467 | * starting over will fix the problem. |
2468 | */ | |
2469 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2470 | { |
ab939905 | 2471 | struct gfar_private *priv = container_of(work, struct gfar_private, |
bc4598bc | 2472 | reset_task); |
0851133b | 2473 | reset_gfar(priv->ndev); |
1da177e4 LT |
2474 | } |
2475 | ||
ab939905 SS |
2476 | static void gfar_timeout(struct net_device *dev) |
2477 | { | |
2478 | struct gfar_private *priv = netdev_priv(dev); | |
2479 | ||
2480 | dev->stats.tx_errors++; | |
2481 | schedule_work(&priv->reset_task); | |
2482 | } | |
2483 | ||
acbc0f03 EL |
2484 | static void gfar_align_skb(struct sk_buff *skb) |
2485 | { | |
2486 | /* We need the data buffer to be aligned properly. We will reserve | |
2487 | * as many bytes as needed to align the data properly | |
2488 | */ | |
2489 | skb_reserve(skb, RXBUF_ALIGNMENT - | |
bc4598bc | 2490 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); |
acbc0f03 EL |
2491 | } |
2492 | ||
1da177e4 | 2493 | /* Interrupt Handler for Transmit complete */ |
c233cf40 | 2494 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2495 | { |
a12f801d | 2496 | struct net_device *dev = tx_queue->dev; |
d8a0f1b0 | 2497 | struct netdev_queue *txq; |
d080cd63 | 2498 | struct gfar_private *priv = netdev_priv(dev); |
f0ee7acf | 2499 | struct txbd8 *bdp, *next = NULL; |
4669bc90 | 2500 | struct txbd8 *lbdp = NULL; |
a12f801d | 2501 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2502 | struct sk_buff *skb; |
2503 | int skb_dirtytx; | |
a12f801d | 2504 | int tx_ring_size = tx_queue->tx_ring_size; |
f0ee7acf | 2505 | int frags = 0, nr_txbds = 0; |
4669bc90 | 2506 | int i; |
d080cd63 | 2507 | int howmany = 0; |
d8a0f1b0 PG |
2508 | int tqi = tx_queue->qindex; |
2509 | unsigned int bytes_sent = 0; | |
4669bc90 | 2510 | u32 lstatus; |
f0ee7acf | 2511 | size_t buflen; |
1da177e4 | 2512 | |
d8a0f1b0 | 2513 | txq = netdev_get_tx_queue(dev, tqi); |
a12f801d SG |
2514 | bdp = tx_queue->dirty_tx; |
2515 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2516 | |
a12f801d | 2517 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 AV |
2518 | unsigned long flags; |
2519 | ||
4669bc90 | 2520 | frags = skb_shinfo(skb)->nr_frags; |
f0ee7acf | 2521 | |
0977f817 | 2522 | /* When time stamping, one additional TxBD must be freed. |
f0ee7acf MR |
2523 | * Also, we need to dma_unmap_single() the TxPAL. |
2524 | */ | |
2244d07b | 2525 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
f0ee7acf MR |
2526 | nr_txbds = frags + 2; |
2527 | else | |
2528 | nr_txbds = frags + 1; | |
2529 | ||
2530 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | |
1da177e4 | 2531 | |
4669bc90 | 2532 | lstatus = lbdp->lstatus; |
1da177e4 | 2533 | |
4669bc90 DH |
2534 | /* Only clean completed frames */ |
2535 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
bc4598bc | 2536 | (lstatus & BD_LENGTH_MASK)) |
4669bc90 DH |
2537 | break; |
2538 | ||
2244d07b | 2539 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf | 2540 | next = next_txbd(bdp, base, tx_ring_size); |
9c4886e5 | 2541 | buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
f0ee7acf MR |
2542 | } else |
2543 | buflen = bdp->length; | |
2544 | ||
369ec162 | 2545 | dma_unmap_single(priv->dev, bdp->bufPtr, |
bc4598bc | 2546 | buflen, DMA_TO_DEVICE); |
f0ee7acf | 2547 | |
2244d07b | 2548 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf MR |
2549 | struct skb_shared_hwtstamps shhwtstamps; |
2550 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | |
bc4598bc | 2551 | |
f0ee7acf MR |
2552 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
2553 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | |
9c4886e5 | 2554 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
f0ee7acf MR |
2555 | skb_tstamp_tx(skb, &shhwtstamps); |
2556 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2557 | bdp = next; | |
2558 | } | |
81183059 | 2559 | |
4669bc90 DH |
2560 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2561 | bdp = next_txbd(bdp, base, tx_ring_size); | |
d080cd63 | 2562 | |
4669bc90 | 2563 | for (i = 0; i < frags; i++) { |
369ec162 | 2564 | dma_unmap_page(priv->dev, bdp->bufPtr, |
bc4598bc | 2565 | bdp->length, DMA_TO_DEVICE); |
4669bc90 DH |
2566 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2567 | bdp = next_txbd(bdp, base, tx_ring_size); | |
2568 | } | |
1da177e4 | 2569 | |
50ad076b | 2570 | bytes_sent += GFAR_CB(skb)->bytes_sent; |
d8a0f1b0 | 2571 | |
acb600de | 2572 | dev_kfree_skb_any(skb); |
0fd56bb5 | 2573 | |
a12f801d | 2574 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2575 | |
4669bc90 | 2576 | skb_dirtytx = (skb_dirtytx + 1) & |
bc4598bc | 2577 | TX_RING_MOD_MASK(tx_ring_size); |
4669bc90 DH |
2578 | |
2579 | howmany++; | |
a3bc1f11 | 2580 | spin_lock_irqsave(&tx_queue->txlock, flags); |
f0ee7acf | 2581 | tx_queue->num_txbdfree += nr_txbds; |
a3bc1f11 | 2582 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
4669bc90 | 2583 | } |
1da177e4 | 2584 | |
4669bc90 | 2585 | /* If we freed a buffer, we can restart transmission, if necessary */ |
0851133b CM |
2586 | if (tx_queue->num_txbdfree && |
2587 | netif_tx_queue_stopped(txq) && | |
2588 | !(test_bit(GFAR_DOWN, &priv->state))) | |
2589 | netif_wake_subqueue(priv->ndev, tqi); | |
1da177e4 | 2590 | |
4669bc90 | 2591 | /* Update dirty indicators */ |
a12f801d SG |
2592 | tx_queue->skb_dirtytx = skb_dirtytx; |
2593 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2594 | |
d8a0f1b0 | 2595 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
d080cd63 DH |
2596 | } |
2597 | ||
a12f801d | 2598 | static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
bc4598bc | 2599 | struct sk_buff *skb) |
815b97c6 | 2600 | { |
a12f801d | 2601 | struct net_device *dev = rx_queue->dev; |
815b97c6 | 2602 | struct gfar_private *priv = netdev_priv(dev); |
8a102fe0 | 2603 | dma_addr_t buf; |
815b97c6 | 2604 | |
369ec162 | 2605 | buf = dma_map_single(priv->dev, skb->data, |
8a102fe0 | 2606 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
a12f801d | 2607 | gfar_init_rxbdp(rx_queue, bdp, buf); |
815b97c6 AF |
2608 | } |
2609 | ||
2281a0f3 | 2610 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) |
1da177e4 LT |
2611 | { |
2612 | struct gfar_private *priv = netdev_priv(dev); | |
acb600de | 2613 | struct sk_buff *skb; |
1da177e4 | 2614 | |
acbc0f03 | 2615 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
815b97c6 | 2616 | if (!skb) |
1da177e4 LT |
2617 | return NULL; |
2618 | ||
acbc0f03 | 2619 | gfar_align_skb(skb); |
7f7f5316 | 2620 | |
acbc0f03 EL |
2621 | return skb; |
2622 | } | |
2623 | ||
2281a0f3 | 2624 | struct sk_buff *gfar_new_skb(struct net_device *dev) |
acbc0f03 | 2625 | { |
acb600de | 2626 | return gfar_alloc_skb(dev); |
1da177e4 LT |
2627 | } |
2628 | ||
298e1a9e | 2629 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1da177e4 | 2630 | { |
298e1a9e | 2631 | struct gfar_private *priv = netdev_priv(dev); |
09f75cd7 | 2632 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
2633 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2634 | ||
0977f817 | 2635 | /* If the packet was truncated, none of the other errors matter */ |
1da177e4 LT |
2636 | if (status & RXBD_TRUNCATED) { |
2637 | stats->rx_length_errors++; | |
2638 | ||
212079df | 2639 | atomic64_inc(&estats->rx_trunc); |
1da177e4 LT |
2640 | |
2641 | return; | |
2642 | } | |
2643 | /* Count the errors, if there were any */ | |
2644 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | |
2645 | stats->rx_length_errors++; | |
2646 | ||
2647 | if (status & RXBD_LARGE) | |
212079df | 2648 | atomic64_inc(&estats->rx_large); |
1da177e4 | 2649 | else |
212079df | 2650 | atomic64_inc(&estats->rx_short); |
1da177e4 LT |
2651 | } |
2652 | if (status & RXBD_NONOCTET) { | |
2653 | stats->rx_frame_errors++; | |
212079df | 2654 | atomic64_inc(&estats->rx_nonoctet); |
1da177e4 LT |
2655 | } |
2656 | if (status & RXBD_CRCERR) { | |
212079df | 2657 | atomic64_inc(&estats->rx_crcerr); |
1da177e4 LT |
2658 | stats->rx_crc_errors++; |
2659 | } | |
2660 | if (status & RXBD_OVERRUN) { | |
212079df | 2661 | atomic64_inc(&estats->rx_overrun); |
1da177e4 LT |
2662 | stats->rx_crc_errors++; |
2663 | } | |
2664 | } | |
2665 | ||
f4983704 | 2666 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2667 | { |
aeb12c5e CM |
2668 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
2669 | unsigned long flags; | |
2670 | u32 imask; | |
2671 | ||
2672 | if (likely(napi_schedule_prep(&grp->napi_rx))) { | |
2673 | spin_lock_irqsave(&grp->grplock, flags); | |
2674 | imask = gfar_read(&grp->regs->imask); | |
2675 | imask &= IMASK_RX_DISABLED; | |
2676 | gfar_write(&grp->regs->imask, imask); | |
2677 | spin_unlock_irqrestore(&grp->grplock, flags); | |
2678 | __napi_schedule(&grp->napi_rx); | |
2679 | } else { | |
2680 | /* Clear IEVENT, so interrupts aren't called again | |
2681 | * because of the packets that have already arrived. | |
2682 | */ | |
2683 | gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); | |
2684 | } | |
2685 | ||
2686 | return IRQ_HANDLED; | |
2687 | } | |
2688 | ||
2689 | /* Interrupt Handler for Transmit complete */ | |
2690 | static irqreturn_t gfar_transmit(int irq, void *grp_id) | |
2691 | { | |
2692 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; | |
2693 | unsigned long flags; | |
2694 | u32 imask; | |
2695 | ||
2696 | if (likely(napi_schedule_prep(&grp->napi_tx))) { | |
2697 | spin_lock_irqsave(&grp->grplock, flags); | |
2698 | imask = gfar_read(&grp->regs->imask); | |
2699 | imask &= IMASK_TX_DISABLED; | |
2700 | gfar_write(&grp->regs->imask, imask); | |
2701 | spin_unlock_irqrestore(&grp->grplock, flags); | |
2702 | __napi_schedule(&grp->napi_tx); | |
2703 | } else { | |
2704 | /* Clear IEVENT, so interrupts aren't called again | |
2705 | * because of the packets that have already arrived. | |
2706 | */ | |
2707 | gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); | |
2708 | } | |
2709 | ||
1da177e4 LT |
2710 | return IRQ_HANDLED; |
2711 | } | |
2712 | ||
0bbaf069 KG |
2713 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2714 | { | |
2715 | /* If valid headers were found, and valid sums | |
2716 | * were verified, then we tell the kernel that no | |
0977f817 JC |
2717 | * checksumming is necessary. Otherwise, it is [FIXME] |
2718 | */ | |
7f7f5316 | 2719 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
0bbaf069 KG |
2720 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2721 | else | |
bc8acf2c | 2722 | skb_checksum_none_assert(skb); |
0bbaf069 KG |
2723 | } |
2724 | ||
2725 | ||
0977f817 | 2726 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
61db26c6 CM |
2727 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
2728 | int amount_pull, struct napi_struct *napi) | |
1da177e4 LT |
2729 | { |
2730 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 2731 | struct rxfcb *fcb = NULL; |
1da177e4 | 2732 | |
2c2db48a DH |
2733 | /* fcb is at the beginning if exists */ |
2734 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 2735 | |
0977f817 JC |
2736 | /* Remove the FCB from the skb |
2737 | * Remove the padded bytes, if there are any | |
2738 | */ | |
f74dac08 SG |
2739 | if (amount_pull) { |
2740 | skb_record_rx_queue(skb, fcb->rq); | |
2c2db48a | 2741 | skb_pull(skb, amount_pull); |
f74dac08 | 2742 | } |
0bbaf069 | 2743 | |
cc772ab7 MR |
2744 | /* Get receive timestamp from the skb */ |
2745 | if (priv->hwts_rx_en) { | |
2746 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
2747 | u64 *ns = (u64 *) skb->data; | |
bc4598bc | 2748 | |
cc772ab7 MR |
2749 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
2750 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | |
2751 | } | |
2752 | ||
2753 | if (priv->padding) | |
2754 | skb_pull(skb, priv->padding); | |
2755 | ||
8b3afe95 | 2756 | if (dev->features & NETIF_F_RXCSUM) |
2c2db48a | 2757 | gfar_rx_checksum(skb, fcb); |
0bbaf069 | 2758 | |
2c2db48a DH |
2759 | /* Tell the skb what kind of packet this is */ |
2760 | skb->protocol = eth_type_trans(skb, dev); | |
1da177e4 | 2761 | |
f646968f | 2762 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
32f7fd44 JP |
2763 | * Even if vlan rx accel is disabled, on some chips |
2764 | * RXFCB_VLN is pseudo randomly set. | |
2765 | */ | |
f646968f | 2766 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && |
32f7fd44 | 2767 | fcb->flags & RXFCB_VLN) |
e5905c83 | 2768 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); |
87c288c6 | 2769 | |
2c2db48a | 2770 | /* Send the packet up the stack */ |
953d2768 | 2771 | napi_gro_receive(napi, skb); |
0bbaf069 | 2772 | |
1da177e4 LT |
2773 | } |
2774 | ||
2775 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
2281a0f3 JC |
2776 | * until the budget/quota has been reached. Returns the number |
2777 | * of frames handled | |
1da177e4 | 2778 | */ |
a12f801d | 2779 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 2780 | { |
a12f801d | 2781 | struct net_device *dev = rx_queue->dev; |
31de198b | 2782 | struct rxbd8 *bdp, *base; |
1da177e4 | 2783 | struct sk_buff *skb; |
2c2db48a DH |
2784 | int pkt_len; |
2785 | int amount_pull; | |
1da177e4 LT |
2786 | int howmany = 0; |
2787 | struct gfar_private *priv = netdev_priv(dev); | |
2788 | ||
2789 | /* Get the first full descriptor */ | |
a12f801d SG |
2790 | bdp = rx_queue->cur_rx; |
2791 | base = rx_queue->rx_bd_base; | |
1da177e4 | 2792 | |
ba779711 | 2793 | amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; |
2c2db48a | 2794 | |
1da177e4 | 2795 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
815b97c6 | 2796 | struct sk_buff *newskb; |
bc4598bc | 2797 | |
3b6330ce | 2798 | rmb(); |
815b97c6 AF |
2799 | |
2800 | /* Add another skb for the future */ | |
2801 | newskb = gfar_new_skb(dev); | |
2802 | ||
a12f801d | 2803 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
1da177e4 | 2804 | |
369ec162 | 2805 | dma_unmap_single(priv->dev, bdp->bufPtr, |
bc4598bc | 2806 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
81183059 | 2807 | |
63b88b90 | 2808 | if (unlikely(!(bdp->status & RXBD_ERR) && |
bc4598bc | 2809 | bdp->length > priv->rx_buffer_size)) |
63b88b90 AV |
2810 | bdp->status = RXBD_LARGE; |
2811 | ||
815b97c6 AF |
2812 | /* We drop the frame if we failed to allocate a new buffer */ |
2813 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | |
bc4598bc | 2814 | bdp->status & RXBD_ERR)) { |
815b97c6 AF |
2815 | count_errors(bdp->status, dev); |
2816 | ||
2817 | if (unlikely(!newskb)) | |
2818 | newskb = skb; | |
acbc0f03 | 2819 | else if (skb) |
acb600de | 2820 | dev_kfree_skb(skb); |
815b97c6 | 2821 | } else { |
1da177e4 | 2822 | /* Increment the number of packets */ |
a7f38041 | 2823 | rx_queue->stats.rx_packets++; |
1da177e4 LT |
2824 | howmany++; |
2825 | ||
2c2db48a DH |
2826 | if (likely(skb)) { |
2827 | pkt_len = bdp->length - ETH_FCS_LEN; | |
2828 | /* Remove the FCS from the packet length */ | |
2829 | skb_put(skb, pkt_len); | |
a7f38041 | 2830 | rx_queue->stats.rx_bytes += pkt_len; |
f74dac08 | 2831 | skb_record_rx_queue(skb, rx_queue->qindex); |
cd754a57 | 2832 | gfar_process_frame(dev, skb, amount_pull, |
aeb12c5e | 2833 | &rx_queue->grp->napi_rx); |
2c2db48a DH |
2834 | |
2835 | } else { | |
59deab26 | 2836 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); |
a7f38041 | 2837 | rx_queue->stats.rx_dropped++; |
212079df | 2838 | atomic64_inc(&priv->extra_stats.rx_skbmissing); |
2c2db48a | 2839 | } |
1da177e4 | 2840 | |
1da177e4 LT |
2841 | } |
2842 | ||
a12f801d | 2843 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
1da177e4 | 2844 | |
815b97c6 | 2845 | /* Setup the new bdp */ |
a12f801d | 2846 | gfar_new_rxbdp(rx_queue, bdp, newskb); |
1da177e4 LT |
2847 | |
2848 | /* Update to the next pointer */ | |
a12f801d | 2849 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
1da177e4 LT |
2850 | |
2851 | /* update to point at the next skb */ | |
bc4598bc JC |
2852 | rx_queue->skb_currx = (rx_queue->skb_currx + 1) & |
2853 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | |
1da177e4 LT |
2854 | } |
2855 | ||
2856 | /* Update the current rxbd pointer to be the next one */ | |
a12f801d | 2857 | rx_queue->cur_rx = bdp; |
1da177e4 | 2858 | |
1da177e4 LT |
2859 | return howmany; |
2860 | } | |
2861 | ||
aeb12c5e | 2862 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
5eaedf31 CM |
2863 | { |
2864 | struct gfar_priv_grp *gfargrp = | |
aeb12c5e | 2865 | container_of(napi, struct gfar_priv_grp, napi_rx); |
5eaedf31 | 2866 | struct gfar __iomem *regs = gfargrp->regs; |
71ff9e3d | 2867 | struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
5eaedf31 CM |
2868 | int work_done = 0; |
2869 | ||
2870 | /* Clear IEVENT, so interrupts aren't called again | |
2871 | * because of the packets that have already arrived | |
2872 | */ | |
aeb12c5e | 2873 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
5eaedf31 CM |
2874 | |
2875 | work_done = gfar_clean_rx_ring(rx_queue, budget); | |
2876 | ||
2877 | if (work_done < budget) { | |
aeb12c5e | 2878 | u32 imask; |
5eaedf31 CM |
2879 | napi_complete(napi); |
2880 | /* Clear the halt bit in RSTAT */ | |
2881 | gfar_write(®s->rstat, gfargrp->rstat); | |
2882 | ||
aeb12c5e CM |
2883 | spin_lock_irq(&gfargrp->grplock); |
2884 | imask = gfar_read(®s->imask); | |
2885 | imask |= IMASK_RX_DEFAULT; | |
2886 | gfar_write(®s->imask, imask); | |
2887 | spin_unlock_irq(&gfargrp->grplock); | |
5eaedf31 CM |
2888 | } |
2889 | ||
2890 | return work_done; | |
2891 | } | |
2892 | ||
aeb12c5e | 2893 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
1da177e4 | 2894 | { |
bc4598bc | 2895 | struct gfar_priv_grp *gfargrp = |
aeb12c5e CM |
2896 | container_of(napi, struct gfar_priv_grp, napi_tx); |
2897 | struct gfar __iomem *regs = gfargrp->regs; | |
71ff9e3d | 2898 | struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
aeb12c5e CM |
2899 | u32 imask; |
2900 | ||
2901 | /* Clear IEVENT, so interrupts aren't called again | |
2902 | * because of the packets that have already arrived | |
2903 | */ | |
2904 | gfar_write(®s->ievent, IEVENT_TX_MASK); | |
2905 | ||
2906 | /* run Tx cleanup to completion */ | |
2907 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) | |
2908 | gfar_clean_tx_ring(tx_queue); | |
2909 | ||
2910 | napi_complete(napi); | |
2911 | ||
2912 | spin_lock_irq(&gfargrp->grplock); | |
2913 | imask = gfar_read(®s->imask); | |
2914 | imask |= IMASK_TX_DEFAULT; | |
2915 | gfar_write(®s->imask, imask); | |
2916 | spin_unlock_irq(&gfargrp->grplock); | |
2917 | ||
2918 | return 0; | |
2919 | } | |
2920 | ||
2921 | static int gfar_poll_rx(struct napi_struct *napi, int budget) | |
2922 | { | |
2923 | struct gfar_priv_grp *gfargrp = | |
2924 | container_of(napi, struct gfar_priv_grp, napi_rx); | |
fba4ed03 | 2925 | struct gfar_private *priv = gfargrp->priv; |
46ceb60c | 2926 | struct gfar __iomem *regs = gfargrp->regs; |
fba4ed03 | 2927 | struct gfar_priv_rx_q *rx_queue = NULL; |
c233cf40 | 2928 | int work_done = 0, work_done_per_q = 0; |
39c0a0d5 | 2929 | int i, budget_per_q = 0; |
6be5ed3f CM |
2930 | unsigned long rstat_rxf; |
2931 | int num_act_queues; | |
fba4ed03 | 2932 | |
8c7396ae | 2933 | /* Clear IEVENT, so interrupts aren't called again |
0977f817 JC |
2934 | * because of the packets that have already arrived |
2935 | */ | |
aeb12c5e | 2936 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
8c7396ae | 2937 | |
6be5ed3f CM |
2938 | rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
2939 | ||
2940 | num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); | |
2941 | if (num_act_queues) | |
2942 | budget_per_q = budget/num_act_queues; | |
2943 | ||
3ba405db CM |
2944 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
2945 | /* skip queue if not active */ | |
2946 | if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) | |
2947 | continue; | |
1da177e4 | 2948 | |
3ba405db CM |
2949 | rx_queue = priv->rx_queue[i]; |
2950 | work_done_per_q = | |
2951 | gfar_clean_rx_ring(rx_queue, budget_per_q); | |
2952 | work_done += work_done_per_q; | |
2953 | ||
2954 | /* finished processing this queue */ | |
2955 | if (work_done_per_q < budget_per_q) { | |
2956 | /* clear active queue hw indication */ | |
2957 | gfar_write(®s->rstat, | |
2958 | RSTAT_CLEAR_RXF0 >> i); | |
2959 | num_act_queues--; | |
2960 | ||
2961 | if (!num_act_queues) | |
2962 | break; | |
2963 | } | |
2964 | } | |
42199884 | 2965 | |
aeb12c5e CM |
2966 | if (!num_act_queues) { |
2967 | u32 imask; | |
3ba405db | 2968 | napi_complete(napi); |
1da177e4 | 2969 | |
3ba405db CM |
2970 | /* Clear the halt bit in RSTAT */ |
2971 | gfar_write(®s->rstat, gfargrp->rstat); | |
1da177e4 | 2972 | |
aeb12c5e CM |
2973 | spin_lock_irq(&gfargrp->grplock); |
2974 | imask = gfar_read(®s->imask); | |
2975 | imask |= IMASK_RX_DEFAULT; | |
2976 | gfar_write(®s->imask, imask); | |
2977 | spin_unlock_irq(&gfargrp->grplock); | |
1da177e4 LT |
2978 | } |
2979 | ||
c233cf40 | 2980 | return work_done; |
1da177e4 | 2981 | } |
1da177e4 | 2982 | |
aeb12c5e CM |
2983 | static int gfar_poll_tx(struct napi_struct *napi, int budget) |
2984 | { | |
2985 | struct gfar_priv_grp *gfargrp = | |
2986 | container_of(napi, struct gfar_priv_grp, napi_tx); | |
2987 | struct gfar_private *priv = gfargrp->priv; | |
2988 | struct gfar __iomem *regs = gfargrp->regs; | |
2989 | struct gfar_priv_tx_q *tx_queue = NULL; | |
2990 | int has_tx_work = 0; | |
2991 | int i; | |
2992 | ||
2993 | /* Clear IEVENT, so interrupts aren't called again | |
2994 | * because of the packets that have already arrived | |
2995 | */ | |
2996 | gfar_write(®s->ievent, IEVENT_TX_MASK); | |
2997 | ||
2998 | for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { | |
2999 | tx_queue = priv->tx_queue[i]; | |
3000 | /* run Tx cleanup to completion */ | |
3001 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { | |
3002 | gfar_clean_tx_ring(tx_queue); | |
3003 | has_tx_work = 1; | |
3004 | } | |
3005 | } | |
3006 | ||
3007 | if (!has_tx_work) { | |
3008 | u32 imask; | |
3009 | napi_complete(napi); | |
3010 | ||
3011 | spin_lock_irq(&gfargrp->grplock); | |
3012 | imask = gfar_read(®s->imask); | |
3013 | imask |= IMASK_TX_DEFAULT; | |
3014 | gfar_write(®s->imask, imask); | |
3015 | spin_unlock_irq(&gfargrp->grplock); | |
3016 | } | |
3017 | ||
3018 | return 0; | |
3019 | } | |
3020 | ||
3021 | ||
f2d71c2d | 3022 | #ifdef CONFIG_NET_POLL_CONTROLLER |
0977f817 | 3023 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
f2d71c2d VW |
3024 | * without having to re-enable interrupts. It's not called while |
3025 | * the interrupt routine is executing. | |
3026 | */ | |
3027 | static void gfar_netpoll(struct net_device *dev) | |
3028 | { | |
3029 | struct gfar_private *priv = netdev_priv(dev); | |
3a2e16c8 | 3030 | int i; |
f2d71c2d VW |
3031 | |
3032 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 3033 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c | 3034 | for (i = 0; i < priv->num_grps; i++) { |
62ed839d PG |
3035 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3036 | ||
3037 | disable_irq(gfar_irq(grp, TX)->irq); | |
3038 | disable_irq(gfar_irq(grp, RX)->irq); | |
3039 | disable_irq(gfar_irq(grp, ER)->irq); | |
3040 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); | |
3041 | enable_irq(gfar_irq(grp, ER)->irq); | |
3042 | enable_irq(gfar_irq(grp, RX)->irq); | |
3043 | enable_irq(gfar_irq(grp, TX)->irq); | |
46ceb60c | 3044 | } |
f2d71c2d | 3045 | } else { |
46ceb60c | 3046 | for (i = 0; i < priv->num_grps; i++) { |
62ed839d PG |
3047 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3048 | ||
3049 | disable_irq(gfar_irq(grp, TX)->irq); | |
3050 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); | |
3051 | enable_irq(gfar_irq(grp, TX)->irq); | |
43de004b | 3052 | } |
f2d71c2d VW |
3053 | } |
3054 | } | |
3055 | #endif | |
3056 | ||
1da177e4 | 3057 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 3058 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 3059 | { |
f4983704 | 3060 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
3061 | |
3062 | /* Save ievent for future reference */ | |
f4983704 | 3063 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 3064 | |
1da177e4 | 3065 | /* Check for reception */ |
538cc7ee | 3066 | if (events & IEVENT_RX_MASK) |
f4983704 | 3067 | gfar_receive(irq, grp_id); |
1da177e4 LT |
3068 | |
3069 | /* Check for transmit completion */ | |
538cc7ee | 3070 | if (events & IEVENT_TX_MASK) |
f4983704 | 3071 | gfar_transmit(irq, grp_id); |
1da177e4 | 3072 | |
538cc7ee SS |
3073 | /* Check for errors */ |
3074 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 3075 | gfar_error(irq, grp_id); |
1da177e4 LT |
3076 | |
3077 | return IRQ_HANDLED; | |
3078 | } | |
3079 | ||
1da177e4 LT |
3080 | /* Called every time the controller might need to be made |
3081 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 3082 | * information through variables in the phydev structure, and this |
1da177e4 LT |
3083 | * function converts those variables into the appropriate |
3084 | * register values, and can bring down the device if needed. | |
3085 | */ | |
3086 | static void adjust_link(struct net_device *dev) | |
3087 | { | |
3088 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 3089 | struct phy_device *phydev = priv->phydev; |
bb40dcbb | 3090 | |
6ce29b0e CM |
3091 | if (unlikely(phydev->link != priv->oldlink || |
3092 | phydev->duplex != priv->oldduplex || | |
3093 | phydev->speed != priv->oldspeed)) | |
3094 | gfar_update_link_state(priv); | |
bb40dcbb | 3095 | } |
1da177e4 LT |
3096 | |
3097 | /* Update the hash table based on the current list of multicast | |
3098 | * addresses we subscribe to. Also, change the promiscuity of | |
3099 | * the device based on the flags (this function is called | |
0977f817 JC |
3100 | * whenever dev->flags is changed |
3101 | */ | |
1da177e4 LT |
3102 | static void gfar_set_multi(struct net_device *dev) |
3103 | { | |
22bedad3 | 3104 | struct netdev_hw_addr *ha; |
1da177e4 | 3105 | struct gfar_private *priv = netdev_priv(dev); |
46ceb60c | 3106 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
3107 | u32 tempval; |
3108 | ||
a12f801d | 3109 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
3110 | /* Set RCTRL to PROM */ |
3111 | tempval = gfar_read(®s->rctrl); | |
3112 | tempval |= RCTRL_PROM; | |
3113 | gfar_write(®s->rctrl, tempval); | |
3114 | } else { | |
3115 | /* Set RCTRL to not PROM */ | |
3116 | tempval = gfar_read(®s->rctrl); | |
3117 | tempval &= ~(RCTRL_PROM); | |
3118 | gfar_write(®s->rctrl, tempval); | |
3119 | } | |
6aa20a22 | 3120 | |
a12f801d | 3121 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 3122 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
3123 | gfar_write(®s->igaddr0, 0xffffffff); |
3124 | gfar_write(®s->igaddr1, 0xffffffff); | |
3125 | gfar_write(®s->igaddr2, 0xffffffff); | |
3126 | gfar_write(®s->igaddr3, 0xffffffff); | |
3127 | gfar_write(®s->igaddr4, 0xffffffff); | |
3128 | gfar_write(®s->igaddr5, 0xffffffff); | |
3129 | gfar_write(®s->igaddr6, 0xffffffff); | |
3130 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
3131 | gfar_write(®s->gaddr0, 0xffffffff); |
3132 | gfar_write(®s->gaddr1, 0xffffffff); | |
3133 | gfar_write(®s->gaddr2, 0xffffffff); | |
3134 | gfar_write(®s->gaddr3, 0xffffffff); | |
3135 | gfar_write(®s->gaddr4, 0xffffffff); | |
3136 | gfar_write(®s->gaddr5, 0xffffffff); | |
3137 | gfar_write(®s->gaddr6, 0xffffffff); | |
3138 | gfar_write(®s->gaddr7, 0xffffffff); | |
3139 | } else { | |
7f7f5316 AF |
3140 | int em_num; |
3141 | int idx; | |
3142 | ||
1da177e4 | 3143 | /* zero out the hash */ |
0bbaf069 KG |
3144 | gfar_write(®s->igaddr0, 0x0); |
3145 | gfar_write(®s->igaddr1, 0x0); | |
3146 | gfar_write(®s->igaddr2, 0x0); | |
3147 | gfar_write(®s->igaddr3, 0x0); | |
3148 | gfar_write(®s->igaddr4, 0x0); | |
3149 | gfar_write(®s->igaddr5, 0x0); | |
3150 | gfar_write(®s->igaddr6, 0x0); | |
3151 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
3152 | gfar_write(®s->gaddr0, 0x0); |
3153 | gfar_write(®s->gaddr1, 0x0); | |
3154 | gfar_write(®s->gaddr2, 0x0); | |
3155 | gfar_write(®s->gaddr3, 0x0); | |
3156 | gfar_write(®s->gaddr4, 0x0); | |
3157 | gfar_write(®s->gaddr5, 0x0); | |
3158 | gfar_write(®s->gaddr6, 0x0); | |
3159 | gfar_write(®s->gaddr7, 0x0); | |
3160 | ||
7f7f5316 AF |
3161 | /* If we have extended hash tables, we need to |
3162 | * clear the exact match registers to prepare for | |
0977f817 JC |
3163 | * setting them |
3164 | */ | |
7f7f5316 AF |
3165 | if (priv->extended_hash) { |
3166 | em_num = GFAR_EM_NUM + 1; | |
3167 | gfar_clear_exact_match(dev); | |
3168 | idx = 1; | |
3169 | } else { | |
3170 | idx = 0; | |
3171 | em_num = 0; | |
3172 | } | |
3173 | ||
4cd24eaf | 3174 | if (netdev_mc_empty(dev)) |
1da177e4 LT |
3175 | return; |
3176 | ||
3177 | /* Parse the list, and set the appropriate bits */ | |
22bedad3 | 3178 | netdev_for_each_mc_addr(ha, dev) { |
7f7f5316 | 3179 | if (idx < em_num) { |
22bedad3 | 3180 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
7f7f5316 AF |
3181 | idx++; |
3182 | } else | |
22bedad3 | 3183 | gfar_set_hash_for_addr(dev, ha->addr); |
1da177e4 LT |
3184 | } |
3185 | } | |
1da177e4 LT |
3186 | } |
3187 | ||
7f7f5316 AF |
3188 | |
3189 | /* Clears each of the exact match registers to zero, so they | |
0977f817 JC |
3190 | * don't interfere with normal reception |
3191 | */ | |
7f7f5316 AF |
3192 | static void gfar_clear_exact_match(struct net_device *dev) |
3193 | { | |
3194 | int idx; | |
6a3c910c | 3195 | static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
7f7f5316 | 3196 | |
bc4598bc | 3197 | for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
b6bc7650 | 3198 | gfar_set_mac_for_addr(dev, idx, zero_arr); |
7f7f5316 AF |
3199 | } |
3200 | ||
1da177e4 LT |
3201 | /* Set the appropriate hash bit for the given addr */ |
3202 | /* The algorithm works like so: | |
3203 | * 1) Take the Destination Address (ie the multicast address), and | |
3204 | * do a CRC on it (little endian), and reverse the bits of the | |
3205 | * result. | |
3206 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
3207 | * table. The table is controlled through 8 32-bit registers: | |
3208 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
3209 | * gaddr7. This means that the 3 most significant bits in the | |
3210 | * hash index which gaddr register to use, and the 5 other bits | |
3211 | * indicate which bit (assuming an IBM numbering scheme, which | |
3212 | * for PowerPC (tm) is usually the case) in the register holds | |
0977f817 JC |
3213 | * the entry. |
3214 | */ | |
1da177e4 LT |
3215 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
3216 | { | |
3217 | u32 tempval; | |
3218 | struct gfar_private *priv = netdev_priv(dev); | |
6a3c910c | 3219 | u32 result = ether_crc(ETH_ALEN, addr); |
0bbaf069 KG |
3220 | int width = priv->hash_width; |
3221 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
3222 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
3223 | u32 value = (1 << (31-whichbit)); |
3224 | ||
0bbaf069 | 3225 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 3226 | tempval |= value; |
0bbaf069 | 3227 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
3228 | } |
3229 | ||
7f7f5316 AF |
3230 | |
3231 | /* There are multiple MAC Address register pairs on some controllers | |
3232 | * This function sets the numth pair to a given address | |
3233 | */ | |
b6bc7650 JP |
3234 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
3235 | const u8 *addr) | |
7f7f5316 AF |
3236 | { |
3237 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 3238 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 | 3239 | int idx; |
6a3c910c | 3240 | char tmpbuf[ETH_ALEN]; |
7f7f5316 | 3241 | u32 tempval; |
f4983704 | 3242 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
3243 | |
3244 | macptr += num*2; | |
3245 | ||
0977f817 JC |
3246 | /* Now copy it into the mac registers backwards, cuz |
3247 | * little endian is silly | |
3248 | */ | |
6a3c910c JP |
3249 | for (idx = 0; idx < ETH_ALEN; idx++) |
3250 | tmpbuf[ETH_ALEN - 1 - idx] = addr[idx]; | |
7f7f5316 AF |
3251 | |
3252 | gfar_write(macptr, *((u32 *) (tmpbuf))); | |
3253 | ||
3254 | tempval = *((u32 *) (tmpbuf + 4)); | |
3255 | ||
3256 | gfar_write(macptr+1, tempval); | |
3257 | } | |
3258 | ||
1da177e4 | 3259 | /* GFAR error interrupt handler */ |
f4983704 | 3260 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 3261 | { |
f4983704 SG |
3262 | struct gfar_priv_grp *gfargrp = grp_id; |
3263 | struct gfar __iomem *regs = gfargrp->regs; | |
3264 | struct gfar_private *priv= gfargrp->priv; | |
3265 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
3266 | |
3267 | /* Save ievent for future reference */ | |
f4983704 | 3268 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
3269 | |
3270 | /* Clear IEVENT */ | |
f4983704 | 3271 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
3272 | |
3273 | /* Magic Packet is not an error. */ | |
b31a1d8b | 3274 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
3275 | (events & IEVENT_MAG)) |
3276 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
3277 | |
3278 | /* Hmm... */ | |
0bbaf069 | 3279 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
bc4598bc JC |
3280 | netdev_dbg(dev, |
3281 | "error interrupt (ievent=0x%08x imask=0x%08x)\n", | |
59deab26 | 3282 | events, gfar_read(®s->imask)); |
1da177e4 LT |
3283 | |
3284 | /* Update the error counters */ | |
3285 | if (events & IEVENT_TXE) { | |
09f75cd7 | 3286 | dev->stats.tx_errors++; |
1da177e4 LT |
3287 | |
3288 | if (events & IEVENT_LC) | |
09f75cd7 | 3289 | dev->stats.tx_window_errors++; |
1da177e4 | 3290 | if (events & IEVENT_CRL) |
09f75cd7 | 3291 | dev->stats.tx_aborted_errors++; |
1da177e4 | 3292 | if (events & IEVENT_XFUN) { |
836cf7fa AV |
3293 | unsigned long flags; |
3294 | ||
59deab26 JP |
3295 | netif_dbg(priv, tx_err, dev, |
3296 | "TX FIFO underrun, packet dropped\n"); | |
09f75cd7 | 3297 | dev->stats.tx_dropped++; |
212079df | 3298 | atomic64_inc(&priv->extra_stats.tx_underrun); |
1da177e4 | 3299 | |
836cf7fa AV |
3300 | local_irq_save(flags); |
3301 | lock_tx_qs(priv); | |
3302 | ||
1da177e4 | 3303 | /* Reactivate the Tx Queues */ |
fba4ed03 | 3304 | gfar_write(®s->tstat, gfargrp->tstat); |
836cf7fa AV |
3305 | |
3306 | unlock_tx_qs(priv); | |
3307 | local_irq_restore(flags); | |
1da177e4 | 3308 | } |
59deab26 | 3309 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
1da177e4 LT |
3310 | } |
3311 | if (events & IEVENT_BSY) { | |
09f75cd7 | 3312 | dev->stats.rx_errors++; |
212079df | 3313 | atomic64_inc(&priv->extra_stats.rx_bsy); |
1da177e4 | 3314 | |
f4983704 | 3315 | gfar_receive(irq, grp_id); |
1da177e4 | 3316 | |
59deab26 JP |
3317 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
3318 | gfar_read(®s->rstat)); | |
1da177e4 LT |
3319 | } |
3320 | if (events & IEVENT_BABR) { | |
09f75cd7 | 3321 | dev->stats.rx_errors++; |
212079df | 3322 | atomic64_inc(&priv->extra_stats.rx_babr); |
1da177e4 | 3323 | |
59deab26 | 3324 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
1da177e4 LT |
3325 | } |
3326 | if (events & IEVENT_EBERR) { | |
212079df | 3327 | atomic64_inc(&priv->extra_stats.eberr); |
59deab26 | 3328 | netif_dbg(priv, rx_err, dev, "bus error\n"); |
1da177e4 | 3329 | } |
59deab26 JP |
3330 | if (events & IEVENT_RXC) |
3331 | netif_dbg(priv, rx_status, dev, "control frame\n"); | |
1da177e4 LT |
3332 | |
3333 | if (events & IEVENT_BABT) { | |
212079df | 3334 | atomic64_inc(&priv->extra_stats.tx_babt); |
59deab26 | 3335 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
1da177e4 LT |
3336 | } |
3337 | return IRQ_HANDLED; | |
3338 | } | |
3339 | ||
6ce29b0e CM |
3340 | static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
3341 | { | |
3342 | struct phy_device *phydev = priv->phydev; | |
3343 | u32 val = 0; | |
3344 | ||
3345 | if (!phydev->duplex) | |
3346 | return val; | |
3347 | ||
3348 | if (!priv->pause_aneg_en) { | |
3349 | if (priv->tx_pause_en) | |
3350 | val |= MACCFG1_TX_FLOW; | |
3351 | if (priv->rx_pause_en) | |
3352 | val |= MACCFG1_RX_FLOW; | |
3353 | } else { | |
3354 | u16 lcl_adv, rmt_adv; | |
3355 | u8 flowctrl; | |
3356 | /* get link partner capabilities */ | |
3357 | rmt_adv = 0; | |
3358 | if (phydev->pause) | |
3359 | rmt_adv = LPA_PAUSE_CAP; | |
3360 | if (phydev->asym_pause) | |
3361 | rmt_adv |= LPA_PAUSE_ASYM; | |
3362 | ||
3363 | lcl_adv = mii_advertise_flowctrl(phydev->advertising); | |
3364 | ||
3365 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | |
3366 | if (flowctrl & FLOW_CTRL_TX) | |
3367 | val |= MACCFG1_TX_FLOW; | |
3368 | if (flowctrl & FLOW_CTRL_RX) | |
3369 | val |= MACCFG1_RX_FLOW; | |
3370 | } | |
3371 | ||
3372 | return val; | |
3373 | } | |
3374 | ||
3375 | static noinline void gfar_update_link_state(struct gfar_private *priv) | |
3376 | { | |
3377 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
3378 | struct phy_device *phydev = priv->phydev; | |
3379 | ||
3380 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) | |
3381 | return; | |
3382 | ||
3383 | if (phydev->link) { | |
3384 | u32 tempval1 = gfar_read(®s->maccfg1); | |
3385 | u32 tempval = gfar_read(®s->maccfg2); | |
3386 | u32 ecntrl = gfar_read(®s->ecntrl); | |
3387 | ||
3388 | if (phydev->duplex != priv->oldduplex) { | |
3389 | if (!(phydev->duplex)) | |
3390 | tempval &= ~(MACCFG2_FULL_DUPLEX); | |
3391 | else | |
3392 | tempval |= MACCFG2_FULL_DUPLEX; | |
3393 | ||
3394 | priv->oldduplex = phydev->duplex; | |
3395 | } | |
3396 | ||
3397 | if (phydev->speed != priv->oldspeed) { | |
3398 | switch (phydev->speed) { | |
3399 | case 1000: | |
3400 | tempval = | |
3401 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
3402 | ||
3403 | ecntrl &= ~(ECNTRL_R100); | |
3404 | break; | |
3405 | case 100: | |
3406 | case 10: | |
3407 | tempval = | |
3408 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
3409 | ||
3410 | /* Reduced mode distinguishes | |
3411 | * between 10 and 100 | |
3412 | */ | |
3413 | if (phydev->speed == SPEED_100) | |
3414 | ecntrl |= ECNTRL_R100; | |
3415 | else | |
3416 | ecntrl &= ~(ECNTRL_R100); | |
3417 | break; | |
3418 | default: | |
3419 | netif_warn(priv, link, priv->ndev, | |
3420 | "Ack! Speed (%d) is not 10/100/1000!\n", | |
3421 | phydev->speed); | |
3422 | break; | |
3423 | } | |
3424 | ||
3425 | priv->oldspeed = phydev->speed; | |
3426 | } | |
3427 | ||
3428 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | |
3429 | tempval1 |= gfar_get_flowctrl_cfg(priv); | |
3430 | ||
3431 | gfar_write(®s->maccfg1, tempval1); | |
3432 | gfar_write(®s->maccfg2, tempval); | |
3433 | gfar_write(®s->ecntrl, ecntrl); | |
3434 | ||
3435 | if (!priv->oldlink) | |
3436 | priv->oldlink = 1; | |
3437 | ||
3438 | } else if (priv->oldlink) { | |
3439 | priv->oldlink = 0; | |
3440 | priv->oldspeed = 0; | |
3441 | priv->oldduplex = -1; | |
3442 | } | |
3443 | ||
3444 | if (netif_msg_link(priv)) | |
3445 | phy_print_status(phydev); | |
3446 | } | |
3447 | ||
b31a1d8b AF |
3448 | static struct of_device_id gfar_match[] = |
3449 | { | |
3450 | { | |
3451 | .type = "network", | |
3452 | .compatible = "gianfar", | |
3453 | }, | |
46ceb60c SG |
3454 | { |
3455 | .compatible = "fsl,etsec2", | |
3456 | }, | |
b31a1d8b AF |
3457 | {}, |
3458 | }; | |
e72701ac | 3459 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3460 | |
1da177e4 | 3461 | /* Structure for a device driver */ |
74888760 | 3462 | static struct platform_driver gfar_driver = { |
4018294b GL |
3463 | .driver = { |
3464 | .name = "fsl-gianfar", | |
3465 | .owner = THIS_MODULE, | |
3466 | .pm = GFAR_PM_OPS, | |
3467 | .of_match_table = gfar_match, | |
3468 | }, | |
1da177e4 LT |
3469 | .probe = gfar_probe, |
3470 | .remove = gfar_remove, | |
3471 | }; | |
3472 | ||
db62f684 | 3473 | module_platform_driver(gfar_driver); |