]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/freescale/gianfar.c
net/fsl_pq_mdio: Replace spin_event_timeout() with arch independent
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / freescale / gianfar.c
CommitLineData
0977f817 1/* drivers/net/ethernet/freescale/gianfar.c
1da177e4
LT
2 *
3 * Gianfar Ethernet Driver
7f7f5316
AF
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
1da177e4
LT
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
4c8d3d99 9 * Maintainer: Kumar Gala
a12f801d 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
1da177e4 11 *
20862788 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
a12f801d 13 * Copyright 2007 MontaVista Software, Inc.
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
0bbaf069 27 *
b31a1d8b
AF
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
1da177e4
LT
30 *
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
0bbaf069
KG
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
1da177e4
LT
35 * last descriptor of the ring.
36 *
37 * When a packet is received, the RXF bit in the
0bbaf069 38 * IEVENT register is set, triggering an interrupt when the
1da177e4
LT
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
bb40dcbb 42 * of frames or amount of time have passed). In NAPI, the
1da177e4 43 * interrupt handler will signal there is work to be done, and
0aa1538f 44 * exit. This method will start at the last known empty
0bbaf069 45 * descriptor, and process every subsequent descriptor until there
1da177e4
LT
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
52 * skb.
53 *
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
62 */
63
59deab26
JP
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
1da177e4 67#include <linux/kernel.h>
1da177e4
LT
68#include <linux/string.h>
69#include <linux/errno.h>
bb40dcbb 70#include <linux/unistd.h>
1da177e4
LT
71#include <linux/slab.h>
72#include <linux/interrupt.h>
1da177e4
LT
73#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
0bbaf069 77#include <linux/if_vlan.h>
1da177e4
LT
78#include <linux/spinlock.h>
79#include <linux/mm.h>
5af50730
RH
80#include <linux/of_address.h>
81#include <linux/of_irq.h>
fe192a49 82#include <linux/of_mdio.h>
b31a1d8b 83#include <linux/of_platform.h>
0bbaf069
KG
84#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
9c07b884 87#include <linux/in.h>
cc772ab7 88#include <linux/net_tstamp.h>
1da177e4
LT
89
90#include <asm/io.h>
7d350977 91#include <asm/reg.h>
2969b1f7 92#include <asm/mpc85xx.h>
1da177e4
LT
93#include <asm/irq.h>
94#include <asm/uaccess.h>
95#include <linux/module.h>
1da177e4
LT
96#include <linux/dma-mapping.h>
97#include <linux/crc32.h>
bb40dcbb
AF
98#include <linux/mii.h>
99#include <linux/phy.h>
b31a1d8b
AF
100#include <linux/phy_fixed.h>
101#include <linux/of.h>
4b6ba8aa 102#include <linux/of_net.h>
1da177e4
LT
103
104#include "gianfar.h"
1da177e4
LT
105
106#define TX_TIMEOUT (1*HZ)
1da177e4 107
7f7f5316 108const char gfar_driver_version[] = "1.3";
1da177e4 109
1da177e4
LT
110static int gfar_enet_open(struct net_device *dev);
111static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
ab939905 112static void gfar_reset_task(struct work_struct *work);
1da177e4
LT
113static void gfar_timeout(struct net_device *dev);
114static int gfar_close(struct net_device *dev);
815b97c6 115struct sk_buff *gfar_new_skb(struct net_device *dev);
a12f801d 116static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
bc4598bc 117 struct sk_buff *skb);
1da177e4
LT
118static int gfar_set_mac_address(struct net_device *dev);
119static int gfar_change_mtu(struct net_device *dev, int new_mtu);
7d12e780
DH
120static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
1da177e4 123static void adjust_link(struct net_device *dev);
6ce29b0e 124static noinline void gfar_update_link_state(struct gfar_private *priv);
1da177e4 125static int init_phy(struct net_device *dev);
74888760 126static int gfar_probe(struct platform_device *ofdev);
2dc11581 127static int gfar_remove(struct platform_device *ofdev);
bb40dcbb 128static void free_skb_resources(struct gfar_private *priv);
1da177e4
LT
129static void gfar_set_multi(struct net_device *dev);
130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
d3c12873 131static void gfar_configure_serdes(struct net_device *dev);
aeb12c5e
CM
132static int gfar_poll_rx(struct napi_struct *napi, int budget);
133static int gfar_poll_tx(struct napi_struct *napi, int budget);
134static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
135static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
f2d71c2d
VW
136#ifdef CONFIG_NET_POLL_CONTROLLER
137static void gfar_netpoll(struct net_device *dev);
138#endif
a12f801d 139int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
c233cf40 140static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
61db26c6
CM
141static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
142 int amount_pull, struct napi_struct *napi);
c10650b6 143static void gfar_halt_nodisable(struct gfar_private *priv);
7f7f5316 144static void gfar_clear_exact_match(struct net_device *dev);
b6bc7650
JP
145static void gfar_set_mac_for_addr(struct net_device *dev, int num,
146 const u8 *addr);
26ccfc37 147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1da177e4 148
1da177e4
LT
149MODULE_AUTHOR("Freescale Semiconductor, Inc");
150MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151MODULE_LICENSE("GPL");
152
a12f801d 153static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
8a102fe0
AV
154 dma_addr_t buf)
155{
8a102fe0
AV
156 u32 lstatus;
157
158 bdp->bufPtr = buf;
159
160 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
a12f801d 161 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
8a102fe0
AV
162 lstatus |= BD_LFLAG(RXBD_WRAP);
163
164 eieio();
165
166 bdp->lstatus = lstatus;
167}
168
8728327e 169static int gfar_init_bds(struct net_device *ndev)
826aa4a0 170{
8728327e 171 struct gfar_private *priv = netdev_priv(ndev);
a12f801d
SG
172 struct gfar_priv_tx_q *tx_queue = NULL;
173 struct gfar_priv_rx_q *rx_queue = NULL;
826aa4a0
AV
174 struct txbd8 *txbdp;
175 struct rxbd8 *rxbdp;
fba4ed03 176 int i, j;
a12f801d 177
fba4ed03
SG
178 for (i = 0; i < priv->num_tx_queues; i++) {
179 tx_queue = priv->tx_queue[i];
180 /* Initialize some variables in our dev structure */
181 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
182 tx_queue->dirty_tx = tx_queue->tx_bd_base;
183 tx_queue->cur_tx = tx_queue->tx_bd_base;
184 tx_queue->skb_curtx = 0;
185 tx_queue->skb_dirtytx = 0;
186
187 /* Initialize Transmit Descriptor Ring */
188 txbdp = tx_queue->tx_bd_base;
189 for (j = 0; j < tx_queue->tx_ring_size; j++) {
190 txbdp->lstatus = 0;
191 txbdp->bufPtr = 0;
192 txbdp++;
193 }
8728327e 194
fba4ed03
SG
195 /* Set the last descriptor in the ring to indicate wrap */
196 txbdp--;
197 txbdp->status |= TXBD_WRAP;
8728327e
AV
198 }
199
fba4ed03
SG
200 for (i = 0; i < priv->num_rx_queues; i++) {
201 rx_queue = priv->rx_queue[i];
202 rx_queue->cur_rx = rx_queue->rx_bd_base;
203 rx_queue->skb_currx = 0;
204 rxbdp = rx_queue->rx_bd_base;
8728327e 205
fba4ed03
SG
206 for (j = 0; j < rx_queue->rx_ring_size; j++) {
207 struct sk_buff *skb = rx_queue->rx_skbuff[j];
8728327e 208
fba4ed03
SG
209 if (skb) {
210 gfar_init_rxbdp(rx_queue, rxbdp,
211 rxbdp->bufPtr);
212 } else {
213 skb = gfar_new_skb(ndev);
214 if (!skb) {
59deab26 215 netdev_err(ndev, "Can't allocate RX buffers\n");
1eb8f7a7 216 return -ENOMEM;
fba4ed03
SG
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
8728327e 221 }
8728327e 222
fba4ed03 223 rxbdp++;
8728327e
AV
224 }
225
8728327e
AV
226 }
227
228 return 0;
229}
230
231static int gfar_alloc_skb_resources(struct net_device *ndev)
232{
826aa4a0 233 void *vaddr;
fba4ed03
SG
234 dma_addr_t addr;
235 int i, j, k;
826aa4a0 236 struct gfar_private *priv = netdev_priv(ndev);
369ec162 237 struct device *dev = priv->dev;
a12f801d
SG
238 struct gfar_priv_tx_q *tx_queue = NULL;
239 struct gfar_priv_rx_q *rx_queue = NULL;
240
fba4ed03
SG
241 priv->total_tx_ring_size = 0;
242 for (i = 0; i < priv->num_tx_queues; i++)
243 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
244
245 priv->total_rx_ring_size = 0;
246 for (i = 0; i < priv->num_rx_queues; i++)
247 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
826aa4a0
AV
248
249 /* Allocate memory for the buffer descriptors */
8728327e 250 vaddr = dma_alloc_coherent(dev,
d0320f75
JP
251 (priv->total_tx_ring_size *
252 sizeof(struct txbd8)) +
253 (priv->total_rx_ring_size *
254 sizeof(struct rxbd8)),
255 &addr, GFP_KERNEL);
256 if (!vaddr)
826aa4a0 257 return -ENOMEM;
826aa4a0 258
fba4ed03
SG
259 for (i = 0; i < priv->num_tx_queues; i++) {
260 tx_queue = priv->tx_queue[i];
43d620c8 261 tx_queue->tx_bd_base = vaddr;
fba4ed03
SG
262 tx_queue->tx_bd_dma_base = addr;
263 tx_queue->dev = ndev;
264 /* enet DMA only understands physical addresses */
bc4598bc
JC
265 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
266 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
fba4ed03 267 }
826aa4a0 268
826aa4a0 269 /* Start the rx descriptor ring where the tx ring leaves off */
fba4ed03
SG
270 for (i = 0; i < priv->num_rx_queues; i++) {
271 rx_queue = priv->rx_queue[i];
43d620c8 272 rx_queue->rx_bd_base = vaddr;
fba4ed03
SG
273 rx_queue->rx_bd_dma_base = addr;
274 rx_queue->dev = ndev;
bc4598bc
JC
275 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
276 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
fba4ed03 277 }
826aa4a0
AV
278
279 /* Setup the skbuff rings */
fba4ed03
SG
280 for (i = 0; i < priv->num_tx_queues; i++) {
281 tx_queue = priv->tx_queue[i];
14f8dc49
JP
282 tx_queue->tx_skbuff =
283 kmalloc_array(tx_queue->tx_ring_size,
284 sizeof(*tx_queue->tx_skbuff),
285 GFP_KERNEL);
286 if (!tx_queue->tx_skbuff)
fba4ed03 287 goto cleanup;
826aa4a0 288
fba4ed03
SG
289 for (k = 0; k < tx_queue->tx_ring_size; k++)
290 tx_queue->tx_skbuff[k] = NULL;
291 }
826aa4a0 292
fba4ed03
SG
293 for (i = 0; i < priv->num_rx_queues; i++) {
294 rx_queue = priv->rx_queue[i];
14f8dc49
JP
295 rx_queue->rx_skbuff =
296 kmalloc_array(rx_queue->rx_ring_size,
297 sizeof(*rx_queue->rx_skbuff),
298 GFP_KERNEL);
299 if (!rx_queue->rx_skbuff)
fba4ed03 300 goto cleanup;
fba4ed03
SG
301
302 for (j = 0; j < rx_queue->rx_ring_size; j++)
303 rx_queue->rx_skbuff[j] = NULL;
304 }
826aa4a0 305
8728327e
AV
306 if (gfar_init_bds(ndev))
307 goto cleanup;
826aa4a0
AV
308
309 return 0;
310
311cleanup:
312 free_skb_resources(priv);
313 return -ENOMEM;
314}
315
fba4ed03
SG
316static void gfar_init_tx_rx_base(struct gfar_private *priv)
317{
46ceb60c 318 struct gfar __iomem *regs = priv->gfargrp[0].regs;
18294ad1 319 u32 __iomem *baddr;
fba4ed03
SG
320 int i;
321
322 baddr = &regs->tbase0;
bc4598bc 323 for (i = 0; i < priv->num_tx_queues; i++) {
fba4ed03 324 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
bc4598bc 325 baddr += 2;
fba4ed03
SG
326 }
327
328 baddr = &regs->rbase0;
bc4598bc 329 for (i = 0; i < priv->num_rx_queues; i++) {
fba4ed03 330 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
bc4598bc 331 baddr += 2;
fba4ed03
SG
332 }
333}
334
88302648 335static void gfar_rx_buff_size_config(struct gfar_private *priv)
826aa4a0 336{
88302648 337 int frame_size = priv->ndev->mtu + ETH_HLEN;
fba4ed03 338
ba779711
CM
339 /* set this when rx hw offload (TOE) functions are being used */
340 priv->uses_rxfcb = 0;
341
88302648
CM
342 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
343 priv->uses_rxfcb = 1;
344
345 if (priv->hwts_rx_en)
346 priv->uses_rxfcb = 1;
347
348 if (priv->uses_rxfcb)
349 frame_size += GMAC_FCB_LEN;
350
351 frame_size += priv->padding;
352
353 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
354 INCREMENTAL_BUFFER_SIZE;
355
356 priv->rx_buffer_size = frame_size;
357}
358
359static void gfar_mac_rx_config(struct gfar_private *priv)
360{
361 struct gfar __iomem *regs = priv->gfargrp[0].regs;
362 u32 rctrl = 0;
363
1ccb8389 364 if (priv->rx_filer_enable) {
fba4ed03 365 rctrl |= RCTRL_FILREN;
1ccb8389 366 /* Program the RIR0 reg with the required distribution */
71ff9e3d
CM
367 if (priv->poll_mode == GFAR_SQ_POLLING)
368 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
369 else /* GFAR_MQ_POLLING */
370 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
1ccb8389 371 }
826aa4a0 372
f5ae6279 373 /* Restore PROMISC mode */
a328ac92 374 if (priv->ndev->flags & IFF_PROMISC)
f5ae6279
CM
375 rctrl |= RCTRL_PROM;
376
88302648 377 if (priv->ndev->features & NETIF_F_RXCSUM)
826aa4a0
AV
378 rctrl |= RCTRL_CHECKSUMMING;
379
88302648
CM
380 if (priv->extended_hash)
381 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
826aa4a0
AV
382
383 if (priv->padding) {
384 rctrl &= ~RCTRL_PAL_MASK;
385 rctrl |= RCTRL_PADDING(priv->padding);
386 }
387
97553f7f 388 /* Enable HW time stamping if requested from user space */
88302648 389 if (priv->hwts_rx_en)
97553f7f
MR
390 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
391
88302648 392 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
b852b720 393 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
826aa4a0
AV
394
395 /* Init rctrl based on our settings */
396 gfar_write(&regs->rctrl, rctrl);
a328ac92 397}
826aa4a0 398
a328ac92
CM
399static void gfar_mac_tx_config(struct gfar_private *priv)
400{
401 struct gfar __iomem *regs = priv->gfargrp[0].regs;
402 u32 tctrl = 0;
403
404 if (priv->ndev->features & NETIF_F_IP_CSUM)
826aa4a0
AV
405 tctrl |= TCTRL_INIT_CSUM;
406
b98b8bab
CM
407 if (priv->prio_sched_en)
408 tctrl |= TCTRL_TXSCHED_PRIO;
409 else {
410 tctrl |= TCTRL_TXSCHED_WRRS;
411 gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
412 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
413 }
fba4ed03 414
88302648
CM
415 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
416 tctrl |= TCTRL_VLINS;
417
826aa4a0 418 gfar_write(&regs->tctrl, tctrl);
826aa4a0
AV
419}
420
f19015ba
CM
421static void gfar_configure_coalescing(struct gfar_private *priv,
422 unsigned long tx_mask, unsigned long rx_mask)
423{
424 struct gfar __iomem *regs = priv->gfargrp[0].regs;
425 u32 __iomem *baddr;
426
427 if (priv->mode == MQ_MG_MODE) {
428 int i = 0;
429
430 baddr = &regs->txic0;
431 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
432 gfar_write(baddr + i, 0);
433 if (likely(priv->tx_queue[i]->txcoalescing))
434 gfar_write(baddr + i, priv->tx_queue[i]->txic);
435 }
436
437 baddr = &regs->rxic0;
438 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
439 gfar_write(baddr + i, 0);
440 if (likely(priv->rx_queue[i]->rxcoalescing))
441 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
442 }
443 } else {
444 /* Backward compatible case -- even if we enable
445 * multiple queues, there's only single reg to program
446 */
447 gfar_write(&regs->txic, 0);
448 if (likely(priv->tx_queue[0]->txcoalescing))
449 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
450
451 gfar_write(&regs->rxic, 0);
452 if (unlikely(priv->rx_queue[0]->rxcoalescing))
453 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
454 }
455}
456
457void gfar_configure_coalescing_all(struct gfar_private *priv)
458{
459 gfar_configure_coalescing(priv, 0xFF, 0xFF);
460}
461
a7f38041
SG
462static struct net_device_stats *gfar_get_stats(struct net_device *dev)
463{
464 struct gfar_private *priv = netdev_priv(dev);
a7f38041
SG
465 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
466 unsigned long tx_packets = 0, tx_bytes = 0;
3a2e16c8 467 int i;
a7f38041
SG
468
469 for (i = 0; i < priv->num_rx_queues; i++) {
470 rx_packets += priv->rx_queue[i]->stats.rx_packets;
bc4598bc 471 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
a7f38041
SG
472 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
473 }
474
475 dev->stats.rx_packets = rx_packets;
bc4598bc 476 dev->stats.rx_bytes = rx_bytes;
a7f38041
SG
477 dev->stats.rx_dropped = rx_dropped;
478
479 for (i = 0; i < priv->num_tx_queues; i++) {
1ac9ad13
ED
480 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
481 tx_packets += priv->tx_queue[i]->stats.tx_packets;
a7f38041
SG
482 }
483
bc4598bc 484 dev->stats.tx_bytes = tx_bytes;
a7f38041
SG
485 dev->stats.tx_packets = tx_packets;
486
487 return &dev->stats;
488}
489
26ccfc37
AF
490static const struct net_device_ops gfar_netdev_ops = {
491 .ndo_open = gfar_enet_open,
492 .ndo_start_xmit = gfar_start_xmit,
493 .ndo_stop = gfar_close,
494 .ndo_change_mtu = gfar_change_mtu,
8b3afe95 495 .ndo_set_features = gfar_set_features,
afc4b13d 496 .ndo_set_rx_mode = gfar_set_multi,
26ccfc37
AF
497 .ndo_tx_timeout = gfar_timeout,
498 .ndo_do_ioctl = gfar_ioctl,
a7f38041 499 .ndo_get_stats = gfar_get_stats,
240c102d
BH
500 .ndo_set_mac_address = eth_mac_addr,
501 .ndo_validate_addr = eth_validate_addr,
26ccfc37
AF
502#ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = gfar_netpoll,
504#endif
505};
506
efeddce7
CM
507static void gfar_ints_disable(struct gfar_private *priv)
508{
509 int i;
510 for (i = 0; i < priv->num_grps; i++) {
511 struct gfar __iomem *regs = priv->gfargrp[i].regs;
512 /* Clear IEVENT */
513 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
514
515 /* Initialize IMASK */
516 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
517 }
518}
519
520static void gfar_ints_enable(struct gfar_private *priv)
521{
522 int i;
523 for (i = 0; i < priv->num_grps; i++) {
524 struct gfar __iomem *regs = priv->gfargrp[i].regs;
525 /* Unmask the interrupts we look for */
526 gfar_write(&regs->imask, IMASK_DEFAULT);
527 }
528}
529
fba4ed03
SG
530void lock_tx_qs(struct gfar_private *priv)
531{
3a2e16c8 532 int i;
fba4ed03
SG
533
534 for (i = 0; i < priv->num_tx_queues; i++)
535 spin_lock(&priv->tx_queue[i]->txlock);
536}
537
fba4ed03
SG
538void unlock_tx_qs(struct gfar_private *priv)
539{
3a2e16c8 540 int i;
fba4ed03
SG
541
542 for (i = 0; i < priv->num_tx_queues; i++)
543 spin_unlock(&priv->tx_queue[i]->txlock);
544}
545
20862788
CM
546static int gfar_alloc_tx_queues(struct gfar_private *priv)
547{
548 int i;
549
550 for (i = 0; i < priv->num_tx_queues; i++) {
551 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
552 GFP_KERNEL);
553 if (!priv->tx_queue[i])
554 return -ENOMEM;
555
556 priv->tx_queue[i]->tx_skbuff = NULL;
557 priv->tx_queue[i]->qindex = i;
558 priv->tx_queue[i]->dev = priv->ndev;
559 spin_lock_init(&(priv->tx_queue[i]->txlock));
560 }
561 return 0;
562}
563
564static int gfar_alloc_rx_queues(struct gfar_private *priv)
565{
566 int i;
567
568 for (i = 0; i < priv->num_rx_queues; i++) {
569 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
570 GFP_KERNEL);
571 if (!priv->rx_queue[i])
572 return -ENOMEM;
573
574 priv->rx_queue[i]->rx_skbuff = NULL;
575 priv->rx_queue[i]->qindex = i;
576 priv->rx_queue[i]->dev = priv->ndev;
20862788
CM
577 }
578 return 0;
579}
580
581static void gfar_free_tx_queues(struct gfar_private *priv)
fba4ed03 582{
3a2e16c8 583 int i;
fba4ed03
SG
584
585 for (i = 0; i < priv->num_tx_queues; i++)
586 kfree(priv->tx_queue[i]);
587}
588
20862788 589static void gfar_free_rx_queues(struct gfar_private *priv)
fba4ed03 590{
3a2e16c8 591 int i;
fba4ed03
SG
592
593 for (i = 0; i < priv->num_rx_queues; i++)
594 kfree(priv->rx_queue[i]);
595}
596
46ceb60c
SG
597static void unmap_group_regs(struct gfar_private *priv)
598{
3a2e16c8 599 int i;
46ceb60c
SG
600
601 for (i = 0; i < MAXGROUPS; i++)
602 if (priv->gfargrp[i].regs)
603 iounmap(priv->gfargrp[i].regs);
604}
605
ee873fda
CM
606static void free_gfar_dev(struct gfar_private *priv)
607{
608 int i, j;
609
610 for (i = 0; i < priv->num_grps; i++)
611 for (j = 0; j < GFAR_NUM_IRQS; j++) {
612 kfree(priv->gfargrp[i].irqinfo[j]);
613 priv->gfargrp[i].irqinfo[j] = NULL;
614 }
615
616 free_netdev(priv->ndev);
617}
618
46ceb60c
SG
619static void disable_napi(struct gfar_private *priv)
620{
3a2e16c8 621 int i;
46ceb60c 622
aeb12c5e
CM
623 for (i = 0; i < priv->num_grps; i++) {
624 napi_disable(&priv->gfargrp[i].napi_rx);
625 napi_disable(&priv->gfargrp[i].napi_tx);
626 }
46ceb60c
SG
627}
628
629static void enable_napi(struct gfar_private *priv)
630{
3a2e16c8 631 int i;
46ceb60c 632
aeb12c5e
CM
633 for (i = 0; i < priv->num_grps; i++) {
634 napi_enable(&priv->gfargrp[i].napi_rx);
635 napi_enable(&priv->gfargrp[i].napi_tx);
636 }
46ceb60c
SG
637}
638
639static int gfar_parse_group(struct device_node *np,
bc4598bc 640 struct gfar_private *priv, const char *model)
46ceb60c 641{
5fedcc14 642 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
ee873fda
CM
643 int i;
644
7c1e7e99
PG
645 for (i = 0; i < GFAR_NUM_IRQS; i++) {
646 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
647 GFP_KERNEL);
648 if (!grp->irqinfo[i])
ee873fda 649 return -ENOMEM;
ee873fda 650 }
46ceb60c 651
5fedcc14
CM
652 grp->regs = of_iomap(np, 0);
653 if (!grp->regs)
46ceb60c
SG
654 return -ENOMEM;
655
ee873fda 656 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
46ceb60c
SG
657
658 /* If we aren't the FEC we have multiple interrupts */
659 if (model && strcasecmp(model, "FEC")) {
ee873fda
CM
660 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
661 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
662 if (gfar_irq(grp, TX)->irq == NO_IRQ ||
663 gfar_irq(grp, RX)->irq == NO_IRQ ||
664 gfar_irq(grp, ER)->irq == NO_IRQ)
46ceb60c 665 return -EINVAL;
46ceb60c
SG
666 }
667
5fedcc14
CM
668 grp->priv = priv;
669 spin_lock_init(&grp->grplock);
bc4598bc 670 if (priv->mode == MQ_MG_MODE) {
71ff9e3d
CM
671 u32 *rxq_mask, *txq_mask;
672 rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
673 txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
674
675 if (priv->poll_mode == GFAR_SQ_POLLING) {
676 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
677 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
679 } else { /* GFAR_MQ_POLLING */
680 grp->rx_bit_map = rxq_mask ?
681 *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
682 grp->tx_bit_map = txq_mask ?
683 *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
684 }
46ceb60c 685 } else {
5fedcc14
CM
686 grp->rx_bit_map = 0xFF;
687 grp->tx_bit_map = 0xFF;
46ceb60c 688 }
20862788
CM
689
690 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
691 * right to left, so we need to revert the 8 bits to get the q index
692 */
693 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
694 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
695
696 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
697 * also assign queues to groups
698 */
699 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
71ff9e3d
CM
700 if (!grp->rx_queue)
701 grp->rx_queue = priv->rx_queue[i];
20862788
CM
702 grp->num_rx_queues++;
703 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
704 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
705 priv->rx_queue[i]->grp = grp;
706 }
707
708 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
71ff9e3d
CM
709 if (!grp->tx_queue)
710 grp->tx_queue = priv->tx_queue[i];
20862788
CM
711 grp->num_tx_queues++;
712 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
713 priv->tqueue |= (TQUEUE_EN0 >> i);
714 priv->tx_queue[i]->grp = grp;
715 }
716
46ceb60c
SG
717 priv->num_grps++;
718
719 return 0;
720}
721
2dc11581 722static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
b31a1d8b 723{
b31a1d8b
AF
724 const char *model;
725 const char *ctype;
726 const void *mac_addr;
fba4ed03
SG
727 int err = 0, i;
728 struct net_device *dev = NULL;
729 struct gfar_private *priv = NULL;
61c7a080 730 struct device_node *np = ofdev->dev.of_node;
46ceb60c 731 struct device_node *child = NULL;
4d7902f2
AF
732 const u32 *stash;
733 const u32 *stash_len;
734 const u32 *stash_idx;
fba4ed03
SG
735 unsigned int num_tx_qs, num_rx_qs;
736 u32 *tx_queues, *rx_queues;
b338ce27 737 unsigned short mode, poll_mode;
b31a1d8b
AF
738
739 if (!np || !of_device_is_available(np))
740 return -ENODEV;
741
b338ce27
CM
742 if (of_device_is_compatible(np, "fsl,etsec2")) {
743 mode = MQ_MG_MODE;
744 poll_mode = GFAR_SQ_POLLING;
745 } else {
746 mode = SQ_SG_MODE;
747 poll_mode = GFAR_SQ_POLLING;
748 }
749
71ff9e3d 750 /* parse the num of HW tx and rx queues */
fba4ed03 751 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
71ff9e3d
CM
752 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
753
b338ce27 754 if (mode == SQ_SG_MODE) {
71ff9e3d
CM
755 num_tx_qs = 1;
756 num_rx_qs = 1;
757 } else { /* MQ_MG_MODE */
c65d7533
CM
758 /* get the actual number of supported groups */
759 unsigned int num_grps = of_get_available_child_count(np);
760
761 if (num_grps == 0 || num_grps > MAXGROUPS) {
762 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
763 num_grps);
764 pr_err("Cannot do alloc_etherdev, aborting\n");
765 return -EINVAL;
766 }
767
b338ce27 768 if (poll_mode == GFAR_SQ_POLLING) {
c65d7533
CM
769 num_tx_qs = num_grps; /* one txq per int group */
770 num_rx_qs = num_grps; /* one rxq per int group */
71ff9e3d
CM
771 } else { /* GFAR_MQ_POLLING */
772 num_tx_qs = tx_queues ? *tx_queues : 1;
773 num_rx_qs = rx_queues ? *rx_queues : 1;
774 }
775 }
fba4ed03
SG
776
777 if (num_tx_qs > MAX_TX_QS) {
59deab26
JP
778 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
779 num_tx_qs, MAX_TX_QS);
780 pr_err("Cannot do alloc_etherdev, aborting\n");
fba4ed03
SG
781 return -EINVAL;
782 }
783
fba4ed03 784 if (num_rx_qs > MAX_RX_QS) {
59deab26
JP
785 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
786 num_rx_qs, MAX_RX_QS);
787 pr_err("Cannot do alloc_etherdev, aborting\n");
fba4ed03
SG
788 return -EINVAL;
789 }
790
791 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
792 dev = *pdev;
793 if (NULL == dev)
794 return -ENOMEM;
795
796 priv = netdev_priv(dev);
fba4ed03
SG
797 priv->ndev = dev;
798
b338ce27
CM
799 priv->mode = mode;
800 priv->poll_mode = poll_mode;
801
fba4ed03 802 priv->num_tx_queues = num_tx_qs;
fe069123 803 netif_set_real_num_rx_queues(dev, num_rx_qs);
fba4ed03 804 priv->num_rx_queues = num_rx_qs;
20862788
CM
805
806 err = gfar_alloc_tx_queues(priv);
807 if (err)
808 goto tx_alloc_failed;
809
810 err = gfar_alloc_rx_queues(priv);
811 if (err)
812 goto rx_alloc_failed;
b31a1d8b 813
0977f817 814 /* Init Rx queue filer rule set linked list */
4aa3a715
SP
815 INIT_LIST_HEAD(&priv->rx_list.list);
816 priv->rx_list.count = 0;
817 mutex_init(&priv->rx_queue_access);
818
b31a1d8b
AF
819 model = of_get_property(np, "model", NULL);
820
46ceb60c
SG
821 for (i = 0; i < MAXGROUPS; i++)
822 priv->gfargrp[i].regs = NULL;
b31a1d8b 823
46ceb60c 824 /* Parse and initialize group specific information */
b338ce27 825 if (priv->mode == MQ_MG_MODE) {
46ceb60c
SG
826 for_each_child_of_node(np, child) {
827 err = gfar_parse_group(child, priv, model);
828 if (err)
829 goto err_grp_init;
b31a1d8b 830 }
b338ce27 831 } else { /* SQ_SG_MODE */
46ceb60c 832 err = gfar_parse_group(np, priv, model);
bc4598bc 833 if (err)
46ceb60c 834 goto err_grp_init;
b31a1d8b
AF
835 }
836
4d7902f2
AF
837 stash = of_get_property(np, "bd-stash", NULL);
838
a12f801d 839 if (stash) {
4d7902f2
AF
840 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
841 priv->bd_stash_en = 1;
842 }
843
844 stash_len = of_get_property(np, "rx-stash-len", NULL);
845
846 if (stash_len)
847 priv->rx_stash_size = *stash_len;
848
849 stash_idx = of_get_property(np, "rx-stash-idx", NULL);
850
851 if (stash_idx)
852 priv->rx_stash_index = *stash_idx;
853
854 if (stash_len || stash_idx)
855 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
856
b31a1d8b 857 mac_addr = of_get_mac_address(np);
bc4598bc 858
b31a1d8b 859 if (mac_addr)
6a3c910c 860 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
b31a1d8b
AF
861
862 if (model && !strcasecmp(model, "TSEC"))
34018fd4 863 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
bc4598bc
JC
864 FSL_GIANFAR_DEV_HAS_COALESCE |
865 FSL_GIANFAR_DEV_HAS_RMON |
866 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
867
b31a1d8b 868 if (model && !strcasecmp(model, "eTSEC"))
34018fd4 869 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
bc4598bc
JC
870 FSL_GIANFAR_DEV_HAS_COALESCE |
871 FSL_GIANFAR_DEV_HAS_RMON |
872 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
bc4598bc
JC
873 FSL_GIANFAR_DEV_HAS_CSUM |
874 FSL_GIANFAR_DEV_HAS_VLAN |
875 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
876 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
877 FSL_GIANFAR_DEV_HAS_TIMER;
b31a1d8b
AF
878
879 ctype = of_get_property(np, "phy-connection-type", NULL);
880
881 /* We only care about rgmii-id. The rest are autodetected */
882 if (ctype && !strcmp(ctype, "rgmii-id"))
883 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
884 else
885 priv->interface = PHY_INTERFACE_MODE_MII;
886
887 if (of_get_property(np, "fsl,magic-packet", NULL))
888 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
889
fe192a49 890 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
b31a1d8b 891
be403645
FF
892 /* In the case of a fixed PHY, the DT node associated
893 * to the PHY is the Ethernet MAC DT node.
894 */
6f2c9bd8 895 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
be403645
FF
896 err = of_phy_register_fixed_link(np);
897 if (err)
898 goto err_grp_init;
899
6f2c9bd8 900 priv->phy_node = of_node_get(np);
be403645
FF
901 }
902
b31a1d8b 903 /* Find the TBI PHY. If it's not there, we don't support SGMII */
fe192a49 904 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
b31a1d8b
AF
905
906 return 0;
907
46ceb60c
SG
908err_grp_init:
909 unmap_group_regs(priv);
20862788
CM
910rx_alloc_failed:
911 gfar_free_rx_queues(priv);
912tx_alloc_failed:
913 gfar_free_tx_queues(priv);
ee873fda 914 free_gfar_dev(priv);
b31a1d8b
AF
915 return err;
916}
917
ca0c88c2 918static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
cc772ab7
MR
919{
920 struct hwtstamp_config config;
921 struct gfar_private *priv = netdev_priv(netdev);
922
923 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
924 return -EFAULT;
925
926 /* reserved for future extensions */
927 if (config.flags)
928 return -EINVAL;
929
f0ee7acf
MR
930 switch (config.tx_type) {
931 case HWTSTAMP_TX_OFF:
932 priv->hwts_tx_en = 0;
933 break;
934 case HWTSTAMP_TX_ON:
935 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
936 return -ERANGE;
937 priv->hwts_tx_en = 1;
938 break;
939 default:
cc772ab7 940 return -ERANGE;
f0ee7acf 941 }
cc772ab7
MR
942
943 switch (config.rx_filter) {
944 case HWTSTAMP_FILTER_NONE:
97553f7f 945 if (priv->hwts_rx_en) {
97553f7f 946 priv->hwts_rx_en = 0;
0851133b 947 reset_gfar(netdev);
97553f7f 948 }
cc772ab7
MR
949 break;
950 default:
951 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
952 return -ERANGE;
97553f7f 953 if (!priv->hwts_rx_en) {
97553f7f 954 priv->hwts_rx_en = 1;
0851133b 955 reset_gfar(netdev);
97553f7f 956 }
cc772ab7
MR
957 config.rx_filter = HWTSTAMP_FILTER_ALL;
958 break;
959 }
960
961 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
962 -EFAULT : 0;
963}
964
ca0c88c2
BH
965static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
966{
967 struct hwtstamp_config config;
968 struct gfar_private *priv = netdev_priv(netdev);
969
970 config.flags = 0;
971 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
972 config.rx_filter = (priv->hwts_rx_en ?
973 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
974
975 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
976 -EFAULT : 0;
977}
978
0faac9f7
CW
979static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
980{
981 struct gfar_private *priv = netdev_priv(dev);
982
983 if (!netif_running(dev))
984 return -EINVAL;
985
cc772ab7 986 if (cmd == SIOCSHWTSTAMP)
ca0c88c2
BH
987 return gfar_hwtstamp_set(dev, rq);
988 if (cmd == SIOCGHWTSTAMP)
989 return gfar_hwtstamp_get(dev, rq);
cc772ab7 990
0faac9f7
CW
991 if (!priv->phydev)
992 return -ENODEV;
993
28b04113 994 return phy_mii_ioctl(priv->phydev, rq, cmd);
0faac9f7
CW
995}
996
18294ad1
AV
997static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
998 u32 class)
7a8b3372
SG
999{
1000 u32 rqfpr = FPR_FILER_MASK;
1001 u32 rqfcr = 0x0;
1002
1003 rqfar--;
1004 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
6c43e046
WJB
1005 priv->ftp_rqfpr[rqfar] = rqfpr;
1006 priv->ftp_rqfcr[rqfar] = rqfcr;
7a8b3372
SG
1007 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1008
1009 rqfar--;
1010 rqfcr = RQFCR_CMP_NOMATCH;
6c43e046
WJB
1011 priv->ftp_rqfpr[rqfar] = rqfpr;
1012 priv->ftp_rqfcr[rqfar] = rqfcr;
7a8b3372
SG
1013 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1014
1015 rqfar--;
1016 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1017 rqfpr = class;
6c43e046
WJB
1018 priv->ftp_rqfcr[rqfar] = rqfcr;
1019 priv->ftp_rqfpr[rqfar] = rqfpr;
7a8b3372
SG
1020 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1021
1022 rqfar--;
1023 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1024 rqfpr = class;
6c43e046
WJB
1025 priv->ftp_rqfcr[rqfar] = rqfcr;
1026 priv->ftp_rqfpr[rqfar] = rqfpr;
7a8b3372
SG
1027 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1028
1029 return rqfar;
1030}
1031
1032static void gfar_init_filer_table(struct gfar_private *priv)
1033{
1034 int i = 0x0;
1035 u32 rqfar = MAX_FILER_IDX;
1036 u32 rqfcr = 0x0;
1037 u32 rqfpr = FPR_FILER_MASK;
1038
1039 /* Default rule */
1040 rqfcr = RQFCR_CMP_MATCH;
6c43e046
WJB
1041 priv->ftp_rqfcr[rqfar] = rqfcr;
1042 priv->ftp_rqfpr[rqfar] = rqfpr;
7a8b3372
SG
1043 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1044
1045 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1046 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1047 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1048 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1049 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1050 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1051
85dd08eb 1052 /* cur_filer_idx indicated the first non-masked rule */
7a8b3372
SG
1053 priv->cur_filer_idx = rqfar;
1054
1055 /* Rest are masked rules */
1056 rqfcr = RQFCR_CMP_NOMATCH;
1057 for (i = 0; i < rqfar; i++) {
6c43e046
WJB
1058 priv->ftp_rqfcr[i] = rqfcr;
1059 priv->ftp_rqfpr[i] = rqfpr;
7a8b3372
SG
1060 gfar_write_filer(priv, i, rqfcr, rqfpr);
1061 }
1062}
1063
2969b1f7 1064static void __gfar_detect_errata_83xx(struct gfar_private *priv)
7d350977 1065{
7d350977
AV
1066 unsigned int pvr = mfspr(SPRN_PVR);
1067 unsigned int svr = mfspr(SPRN_SVR);
1068 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1069 unsigned int rev = svr & 0xffff;
1070
1071 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1072 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
bc4598bc 1073 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
7d350977
AV
1074 priv->errata |= GFAR_ERRATA_74;
1075
deb90eac
AV
1076 /* MPC8313 and MPC837x all rev */
1077 if ((pvr == 0x80850010 && mod == 0x80b0) ||
bc4598bc 1078 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
deb90eac
AV
1079 priv->errata |= GFAR_ERRATA_76;
1080
2969b1f7
CM
1081 /* MPC8313 Rev < 2.0 */
1082 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1083 priv->errata |= GFAR_ERRATA_12;
1084}
1085
1086static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1087{
1088 unsigned int svr = mfspr(SPRN_SVR);
1089
1090 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
4363c2fd 1091 priv->errata |= GFAR_ERRATA_12;
53fad773
CM
1092 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1093 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1094 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
2969b1f7
CM
1095}
1096
1097static void gfar_detect_errata(struct gfar_private *priv)
1098{
1099 struct device *dev = &priv->ofdev->dev;
1100
1101 /* no plans to fix */
1102 priv->errata |= GFAR_ERRATA_A002;
1103
1104 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1105 __gfar_detect_errata_85xx(priv);
1106 else /* non-mpc85xx parts, i.e. e300 core based */
1107 __gfar_detect_errata_83xx(priv);
4363c2fd 1108
7d350977
AV
1109 if (priv->errata)
1110 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1111 priv->errata);
1112}
1113
0851133b 1114void gfar_mac_reset(struct gfar_private *priv)
20862788
CM
1115{
1116 struct gfar __iomem *regs = priv->gfargrp[0].regs;
a328ac92 1117 u32 tempval;
20862788
CM
1118
1119 /* Reset MAC layer */
1120 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1121
1122 /* We need to delay at least 3 TX clocks */
a328ac92 1123 udelay(3);
20862788
CM
1124
1125 /* the soft reset bit is not self-resetting, so we need to
1126 * clear it before resuming normal operation
1127 */
1128 gfar_write(&regs->maccfg1, 0);
1129
a328ac92
CM
1130 udelay(3);
1131
88302648
CM
1132 /* Compute rx_buff_size based on config flags */
1133 gfar_rx_buff_size_config(priv);
1134
1135 /* Initialize the max receive frame/buffer lengths */
1136 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
a328ac92
CM
1137 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1138
1139 /* Initialize the Minimum Frame Length Register */
1140 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1141
20862788
CM
1142 /* Initialize MACCFG2. */
1143 tempval = MACCFG2_INIT_SETTINGS;
88302648
CM
1144
1145 /* If the mtu is larger than the max size for standard
1146 * ethernet frames (ie, a jumbo frame), then set maccfg2
1147 * to allow huge frames, and to check the length
1148 */
1149 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1150 gfar_has_errata(priv, GFAR_ERRATA_74))
20862788 1151 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
88302648 1152
20862788
CM
1153 gfar_write(&regs->maccfg2, tempval);
1154
a328ac92
CM
1155 /* Clear mac addr hash registers */
1156 gfar_write(&regs->igaddr0, 0);
1157 gfar_write(&regs->igaddr1, 0);
1158 gfar_write(&regs->igaddr2, 0);
1159 gfar_write(&regs->igaddr3, 0);
1160 gfar_write(&regs->igaddr4, 0);
1161 gfar_write(&regs->igaddr5, 0);
1162 gfar_write(&regs->igaddr6, 0);
1163 gfar_write(&regs->igaddr7, 0);
1164
1165 gfar_write(&regs->gaddr0, 0);
1166 gfar_write(&regs->gaddr1, 0);
1167 gfar_write(&regs->gaddr2, 0);
1168 gfar_write(&regs->gaddr3, 0);
1169 gfar_write(&regs->gaddr4, 0);
1170 gfar_write(&regs->gaddr5, 0);
1171 gfar_write(&regs->gaddr6, 0);
1172 gfar_write(&regs->gaddr7, 0);
1173
1174 if (priv->extended_hash)
1175 gfar_clear_exact_match(priv->ndev);
1176
1177 gfar_mac_rx_config(priv);
1178
1179 gfar_mac_tx_config(priv);
1180
1181 gfar_set_mac_address(priv->ndev);
1182
1183 gfar_set_multi(priv->ndev);
1184
1185 /* clear ievent and imask before configuring coalescing */
1186 gfar_ints_disable(priv);
1187
1188 /* Configure the coalescing support */
1189 gfar_configure_coalescing_all(priv);
1190}
1191
1192static void gfar_hw_init(struct gfar_private *priv)
1193{
1194 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1195 u32 attrs;
1196
1197 /* Stop the DMA engine now, in case it was running before
1198 * (The firmware could have used it, and left it running).
1199 */
1200 gfar_halt(priv);
1201
1202 gfar_mac_reset(priv);
1203
1204 /* Zero out the rmon mib registers if it has them */
1205 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1206 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1207
1208 /* Mask off the CAM interrupts */
1209 gfar_write(&regs->rmon.cam1, 0xffffffff);
1210 gfar_write(&regs->rmon.cam2, 0xffffffff);
1211 }
1212
20862788
CM
1213 /* Initialize ECNTRL */
1214 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1215
34018fd4
CM
1216 /* Set the extraction length and index */
1217 attrs = ATTRELI_EL(priv->rx_stash_size) |
1218 ATTRELI_EI(priv->rx_stash_index);
1219
1220 gfar_write(&regs->attreli, attrs);
1221
1222 /* Start with defaults, and add stashing
1223 * depending on driver parameters
1224 */
1225 attrs = ATTR_INIT_SETTINGS;
1226
1227 if (priv->bd_stash_en)
1228 attrs |= ATTR_BDSTASH;
1229
1230 if (priv->rx_stash_size != 0)
1231 attrs |= ATTR_BUFSTASH;
1232
1233 gfar_write(&regs->attr, attrs);
1234
1235 /* FIFO configs */
1236 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1237 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1238 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1239
20862788
CM
1240 /* Program the interrupt steering regs, only for MG devices */
1241 if (priv->num_grps > 1)
1242 gfar_write_isrg(priv);
20862788
CM
1243}
1244
898157ed 1245static void gfar_init_addr_hash_table(struct gfar_private *priv)
20862788
CM
1246{
1247 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1248
1249 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1250 priv->extended_hash = 1;
1251 priv->hash_width = 9;
1252
1253 priv->hash_regs[0] = &regs->igaddr0;
1254 priv->hash_regs[1] = &regs->igaddr1;
1255 priv->hash_regs[2] = &regs->igaddr2;
1256 priv->hash_regs[3] = &regs->igaddr3;
1257 priv->hash_regs[4] = &regs->igaddr4;
1258 priv->hash_regs[5] = &regs->igaddr5;
1259 priv->hash_regs[6] = &regs->igaddr6;
1260 priv->hash_regs[7] = &regs->igaddr7;
1261 priv->hash_regs[8] = &regs->gaddr0;
1262 priv->hash_regs[9] = &regs->gaddr1;
1263 priv->hash_regs[10] = &regs->gaddr2;
1264 priv->hash_regs[11] = &regs->gaddr3;
1265 priv->hash_regs[12] = &regs->gaddr4;
1266 priv->hash_regs[13] = &regs->gaddr5;
1267 priv->hash_regs[14] = &regs->gaddr6;
1268 priv->hash_regs[15] = &regs->gaddr7;
1269
1270 } else {
1271 priv->extended_hash = 0;
1272 priv->hash_width = 8;
1273
1274 priv->hash_regs[0] = &regs->gaddr0;
1275 priv->hash_regs[1] = &regs->gaddr1;
1276 priv->hash_regs[2] = &regs->gaddr2;
1277 priv->hash_regs[3] = &regs->gaddr3;
1278 priv->hash_regs[4] = &regs->gaddr4;
1279 priv->hash_regs[5] = &regs->gaddr5;
1280 priv->hash_regs[6] = &regs->gaddr6;
1281 priv->hash_regs[7] = &regs->gaddr7;
1282 }
1283}
1284
bb40dcbb 1285/* Set up the ethernet device structure, private data,
0977f817
JC
1286 * and anything else we need before we start
1287 */
74888760 1288static int gfar_probe(struct platform_device *ofdev)
1da177e4 1289{
1da177e4
LT
1290 struct net_device *dev = NULL;
1291 struct gfar_private *priv = NULL;
20862788 1292 int err = 0, i;
1da177e4 1293
fba4ed03 1294 err = gfar_of_init(ofdev, &dev);
1da177e4 1295
fba4ed03
SG
1296 if (err)
1297 return err;
1da177e4
LT
1298
1299 priv = netdev_priv(dev);
4826857f
KG
1300 priv->ndev = dev;
1301 priv->ofdev = ofdev;
369ec162 1302 priv->dev = &ofdev->dev;
4826857f 1303 SET_NETDEV_DEV(dev, &ofdev->dev);
1da177e4 1304
d87eb127 1305 spin_lock_init(&priv->bflock);
ab939905 1306 INIT_WORK(&priv->reset_task, gfar_reset_task);
1da177e4 1307
8513fbd8 1308 platform_set_drvdata(ofdev, priv);
1da177e4 1309
7d350977
AV
1310 gfar_detect_errata(priv);
1311
1da177e4 1312 /* Set the dev->base_addr to the gfar reg region */
20862788 1313 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1da177e4 1314
1da177e4 1315 /* Fill in the dev structure */
1da177e4 1316 dev->watchdog_timeo = TX_TIMEOUT;
1da177e4 1317 dev->mtu = 1500;
26ccfc37 1318 dev->netdev_ops = &gfar_netdev_ops;
0bbaf069
KG
1319 dev->ethtool_ops = &gfar_ethtool_ops;
1320
fba4ed03 1321 /* Register for napi ...We are registering NAPI for each grp */
71ff9e3d
CM
1322 for (i = 0; i < priv->num_grps; i++) {
1323 if (priv->poll_mode == GFAR_SQ_POLLING) {
1324 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1325 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1326 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1327 gfar_poll_tx_sq, 2);
1328 } else {
aeb12c5e
CM
1329 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1330 gfar_poll_rx, GFAR_DEV_WEIGHT);
1331 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1332 gfar_poll_tx, 2);
1333 }
1334 }
a12f801d 1335
b31a1d8b 1336 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
8b3afe95 1337 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
bc4598bc 1338 NETIF_F_RXCSUM;
8b3afe95 1339 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
bc4598bc 1340 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
8b3afe95 1341 }
0bbaf069 1342
87c288c6 1343 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
f646968f
PM
1344 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1345 NETIF_F_HW_VLAN_CTAG_RX;
1346 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
87c288c6 1347 }
0bbaf069 1348
20862788 1349 gfar_init_addr_hash_table(priv);
0bbaf069 1350
532c37bc
CM
1351 /* Insert receive time stamps into padding alignment bytes */
1352 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1353 priv->padding = 8;
0bbaf069 1354
cc772ab7 1355 if (dev->features & NETIF_F_IP_CSUM ||
bc4598bc 1356 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
bee9e58c 1357 dev->needed_headroom = GMAC_FCB_LEN;
1da177e4
LT
1358
1359 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1da177e4 1360
a12f801d 1361 /* Initializing some of the rx/tx queue level parameters */
fba4ed03
SG
1362 for (i = 0; i < priv->num_tx_queues; i++) {
1363 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1364 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1365 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1366 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1367 }
a12f801d 1368
fba4ed03
SG
1369 for (i = 0; i < priv->num_rx_queues; i++) {
1370 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1371 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1372 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1373 }
1da177e4 1374
0977f817 1375 /* always enable rx filer */
4aa3a715 1376 priv->rx_filer_enable = 1;
0bbaf069
KG
1377 /* Enable most messages by default */
1378 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
b98b8bab
CM
1379 /* use pritority h/w tx queue scheduling for single queue devices */
1380 if (priv->num_tx_queues == 1)
1381 priv->prio_sched_en = 1;
0bbaf069 1382
0851133b
CM
1383 set_bit(GFAR_DOWN, &priv->state);
1384
a328ac92 1385 gfar_hw_init(priv);
d3eab82b 1386
d4c642ea
FE
1387 /* Carrier starts down, phylib will bring it up */
1388 netif_carrier_off(dev);
1389
1da177e4
LT
1390 err = register_netdev(dev);
1391
1392 if (err) {
59deab26 1393 pr_err("%s: Cannot register net device, aborting\n", dev->name);
1da177e4
LT
1394 goto register_fail;
1395 }
1396
2884e5cc 1397 device_init_wakeup(&dev->dev,
bc4598bc
JC
1398 priv->device_flags &
1399 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
2884e5cc 1400
c50a5d9a 1401 /* fill out IRQ number and name fields */
46ceb60c 1402 for (i = 0; i < priv->num_grps; i++) {
ee873fda 1403 struct gfar_priv_grp *grp = &priv->gfargrp[i];
46ceb60c 1404 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
ee873fda 1405 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
0015e551 1406 dev->name, "_g", '0' + i, "_tx");
ee873fda 1407 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
0015e551 1408 dev->name, "_g", '0' + i, "_rx");
ee873fda 1409 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
0015e551 1410 dev->name, "_g", '0' + i, "_er");
46ceb60c 1411 } else
ee873fda 1412 strcpy(gfar_irq(grp, TX)->name, dev->name);
46ceb60c 1413 }
c50a5d9a 1414
7a8b3372
SG
1415 /* Initialize the filer table */
1416 gfar_init_filer_table(priv);
1417
1da177e4 1418 /* Print out the device info */
59deab26 1419 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1da177e4 1420
0977f817
JC
1421 /* Even more device info helps when determining which kernel
1422 * provided which set of benchmarks.
1423 */
59deab26 1424 netdev_info(dev, "Running with NAPI enabled\n");
fba4ed03 1425 for (i = 0; i < priv->num_rx_queues; i++)
59deab26
JP
1426 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1427 i, priv->rx_queue[i]->rx_ring_size);
bc4598bc 1428 for (i = 0; i < priv->num_tx_queues; i++)
59deab26
JP
1429 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1430 i, priv->tx_queue[i]->tx_ring_size);
1da177e4
LT
1431
1432 return 0;
1433
1434register_fail:
46ceb60c 1435 unmap_group_regs(priv);
20862788
CM
1436 gfar_free_rx_queues(priv);
1437 gfar_free_tx_queues(priv);
888c88b8
UKK
1438 of_node_put(priv->phy_node);
1439 of_node_put(priv->tbi_node);
ee873fda 1440 free_gfar_dev(priv);
bb40dcbb 1441 return err;
1da177e4
LT
1442}
1443
2dc11581 1444static int gfar_remove(struct platform_device *ofdev)
1da177e4 1445{
8513fbd8 1446 struct gfar_private *priv = platform_get_drvdata(ofdev);
1da177e4 1447
888c88b8
UKK
1448 of_node_put(priv->phy_node);
1449 of_node_put(priv->tbi_node);
fe192a49 1450
d9d8e041 1451 unregister_netdev(priv->ndev);
46ceb60c 1452 unmap_group_regs(priv);
20862788
CM
1453 gfar_free_rx_queues(priv);
1454 gfar_free_tx_queues(priv);
ee873fda 1455 free_gfar_dev(priv);
1da177e4
LT
1456
1457 return 0;
1458}
1459
d87eb127 1460#ifdef CONFIG_PM
be926fc4
AV
1461
1462static int gfar_suspend(struct device *dev)
d87eb127 1463{
be926fc4
AV
1464 struct gfar_private *priv = dev_get_drvdata(dev);
1465 struct net_device *ndev = priv->ndev;
46ceb60c 1466 struct gfar __iomem *regs = priv->gfargrp[0].regs;
d87eb127
SW
1467 unsigned long flags;
1468 u32 tempval;
1469
1470 int magic_packet = priv->wol_en &&
bc4598bc
JC
1471 (priv->device_flags &
1472 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
d87eb127 1473
be926fc4 1474 netif_device_detach(ndev);
d87eb127 1475
be926fc4 1476 if (netif_running(ndev)) {
fba4ed03
SG
1477
1478 local_irq_save(flags);
1479 lock_tx_qs(priv);
d87eb127 1480
c10650b6 1481 gfar_halt_nodisable(priv);
d87eb127
SW
1482
1483 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
f4983704 1484 tempval = gfar_read(&regs->maccfg1);
d87eb127
SW
1485
1486 tempval &= ~MACCFG1_TX_EN;
1487
1488 if (!magic_packet)
1489 tempval &= ~MACCFG1_RX_EN;
1490
f4983704 1491 gfar_write(&regs->maccfg1, tempval);
d87eb127 1492
fba4ed03
SG
1493 unlock_tx_qs(priv);
1494 local_irq_restore(flags);
d87eb127 1495
46ceb60c 1496 disable_napi(priv);
d87eb127
SW
1497
1498 if (magic_packet) {
1499 /* Enable interrupt on Magic Packet */
f4983704 1500 gfar_write(&regs->imask, IMASK_MAG);
d87eb127
SW
1501
1502 /* Enable Magic Packet mode */
f4983704 1503 tempval = gfar_read(&regs->maccfg2);
d87eb127 1504 tempval |= MACCFG2_MPEN;
f4983704 1505 gfar_write(&regs->maccfg2, tempval);
d87eb127
SW
1506 } else {
1507 phy_stop(priv->phydev);
1508 }
1509 }
1510
1511 return 0;
1512}
1513
be926fc4 1514static int gfar_resume(struct device *dev)
d87eb127 1515{
be926fc4
AV
1516 struct gfar_private *priv = dev_get_drvdata(dev);
1517 struct net_device *ndev = priv->ndev;
46ceb60c 1518 struct gfar __iomem *regs = priv->gfargrp[0].regs;
d87eb127
SW
1519 unsigned long flags;
1520 u32 tempval;
1521 int magic_packet = priv->wol_en &&
bc4598bc
JC
1522 (priv->device_flags &
1523 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
d87eb127 1524
be926fc4
AV
1525 if (!netif_running(ndev)) {
1526 netif_device_attach(ndev);
d87eb127
SW
1527 return 0;
1528 }
1529
1530 if (!magic_packet && priv->phydev)
1531 phy_start(priv->phydev);
1532
1533 /* Disable Magic Packet mode, in case something
1534 * else woke us up.
1535 */
fba4ed03
SG
1536 local_irq_save(flags);
1537 lock_tx_qs(priv);
d87eb127 1538
f4983704 1539 tempval = gfar_read(&regs->maccfg2);
d87eb127 1540 tempval &= ~MACCFG2_MPEN;
f4983704 1541 gfar_write(&regs->maccfg2, tempval);
d87eb127 1542
c10650b6 1543 gfar_start(priv);
d87eb127 1544
fba4ed03
SG
1545 unlock_tx_qs(priv);
1546 local_irq_restore(flags);
d87eb127 1547
be926fc4
AV
1548 netif_device_attach(ndev);
1549
46ceb60c 1550 enable_napi(priv);
be926fc4
AV
1551
1552 return 0;
1553}
1554
1555static int gfar_restore(struct device *dev)
1556{
1557 struct gfar_private *priv = dev_get_drvdata(dev);
1558 struct net_device *ndev = priv->ndev;
1559
103cdd1d
WD
1560 if (!netif_running(ndev)) {
1561 netif_device_attach(ndev);
1562
be926fc4 1563 return 0;
103cdd1d 1564 }
be926fc4 1565
1eb8f7a7
CM
1566 if (gfar_init_bds(ndev)) {
1567 free_skb_resources(priv);
1568 return -ENOMEM;
1569 }
1570
a328ac92
CM
1571 gfar_mac_reset(priv);
1572
1573 gfar_init_tx_rx_base(priv);
1574
c10650b6 1575 gfar_start(priv);
be926fc4
AV
1576
1577 priv->oldlink = 0;
1578 priv->oldspeed = 0;
1579 priv->oldduplex = -1;
1580
1581 if (priv->phydev)
1582 phy_start(priv->phydev);
d87eb127 1583
be926fc4 1584 netif_device_attach(ndev);
5ea681d4 1585 enable_napi(priv);
d87eb127
SW
1586
1587 return 0;
1588}
be926fc4
AV
1589
1590static struct dev_pm_ops gfar_pm_ops = {
1591 .suspend = gfar_suspend,
1592 .resume = gfar_resume,
1593 .freeze = gfar_suspend,
1594 .thaw = gfar_resume,
1595 .restore = gfar_restore,
1596};
1597
1598#define GFAR_PM_OPS (&gfar_pm_ops)
1599
d87eb127 1600#else
be926fc4
AV
1601
1602#define GFAR_PM_OPS NULL
be926fc4 1603
d87eb127 1604#endif
1da177e4 1605
e8a2b6a4
AF
1606/* Reads the controller's registers to determine what interface
1607 * connects it to the PHY.
1608 */
1609static phy_interface_t gfar_get_interface(struct net_device *dev)
1610{
1611 struct gfar_private *priv = netdev_priv(dev);
46ceb60c 1612 struct gfar __iomem *regs = priv->gfargrp[0].regs;
f4983704
SG
1613 u32 ecntrl;
1614
f4983704 1615 ecntrl = gfar_read(&regs->ecntrl);
e8a2b6a4
AF
1616
1617 if (ecntrl & ECNTRL_SGMII_MODE)
1618 return PHY_INTERFACE_MODE_SGMII;
1619
1620 if (ecntrl & ECNTRL_TBI_MODE) {
1621 if (ecntrl & ECNTRL_REDUCED_MODE)
1622 return PHY_INTERFACE_MODE_RTBI;
1623 else
1624 return PHY_INTERFACE_MODE_TBI;
1625 }
1626
1627 if (ecntrl & ECNTRL_REDUCED_MODE) {
bc4598bc 1628 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
e8a2b6a4 1629 return PHY_INTERFACE_MODE_RMII;
bc4598bc 1630 }
7132ab7f 1631 else {
b31a1d8b 1632 phy_interface_t interface = priv->interface;
7132ab7f 1633
0977f817 1634 /* This isn't autodetected right now, so it must
7132ab7f
AF
1635 * be set by the device tree or platform code.
1636 */
1637 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1638 return PHY_INTERFACE_MODE_RGMII_ID;
1639
e8a2b6a4 1640 return PHY_INTERFACE_MODE_RGMII;
7132ab7f 1641 }
e8a2b6a4
AF
1642 }
1643
b31a1d8b 1644 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
e8a2b6a4
AF
1645 return PHY_INTERFACE_MODE_GMII;
1646
1647 return PHY_INTERFACE_MODE_MII;
1648}
1649
1650
bb40dcbb
AF
1651/* Initializes driver's PHY state, and attaches to the PHY.
1652 * Returns 0 on success.
1da177e4
LT
1653 */
1654static int init_phy(struct net_device *dev)
1655{
1656 struct gfar_private *priv = netdev_priv(dev);
bb40dcbb 1657 uint gigabit_support =
b31a1d8b 1658 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
23402bdd 1659 GFAR_SUPPORTED_GBIT : 0;
e8a2b6a4 1660 phy_interface_t interface;
1da177e4
LT
1661
1662 priv->oldlink = 0;
1663 priv->oldspeed = 0;
1664 priv->oldduplex = -1;
1665
e8a2b6a4
AF
1666 interface = gfar_get_interface(dev);
1667
1db780f8
AV
1668 priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1669 interface);
1db780f8
AV
1670 if (!priv->phydev) {
1671 dev_err(&dev->dev, "could not attach to PHY\n");
1672 return -ENODEV;
fe192a49 1673 }
1da177e4 1674
d3c12873
KJ
1675 if (interface == PHY_INTERFACE_MODE_SGMII)
1676 gfar_configure_serdes(dev);
1677
bb40dcbb 1678 /* Remove any features not supported by the controller */
fe192a49
GL
1679 priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1680 priv->phydev->advertising = priv->phydev->supported;
1da177e4
LT
1681
1682 return 0;
1da177e4
LT
1683}
1684
0977f817 1685/* Initialize TBI PHY interface for communicating with the
d0313587
PG
1686 * SERDES lynx PHY on the chip. We communicate with this PHY
1687 * through the MDIO bus on each controller, treating it as a
1688 * "normal" PHY at the address found in the TBIPA register. We assume
1689 * that the TBIPA register is valid. Either the MDIO bus code will set
1690 * it to a value that doesn't conflict with other PHYs on the bus, or the
1691 * value doesn't matter, as there are no other PHYs on the bus.
1692 */
d3c12873
KJ
1693static void gfar_configure_serdes(struct net_device *dev)
1694{
1695 struct gfar_private *priv = netdev_priv(dev);
fe192a49
GL
1696 struct phy_device *tbiphy;
1697
1698 if (!priv->tbi_node) {
1699 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1700 "device tree specify a tbi-handle\n");
1701 return;
1702 }
c132419e 1703
fe192a49
GL
1704 tbiphy = of_phy_find_device(priv->tbi_node);
1705 if (!tbiphy) {
1706 dev_err(&dev->dev, "error: Could not get TBI device\n");
b31a1d8b
AF
1707 return;
1708 }
d3c12873 1709
0977f817 1710 /* If the link is already up, we must already be ok, and don't need to
bdb59f94
TP
1711 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1712 * everything for us? Resetting it takes the link down and requires
1713 * several seconds for it to come back.
1714 */
fe192a49 1715 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
b31a1d8b 1716 return;
d3c12873 1717
d0313587 1718 /* Single clk mode, mii mode off(for serdes communication) */
fe192a49 1719 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
d3c12873 1720
fe192a49 1721 phy_write(tbiphy, MII_ADVERTISE,
bc4598bc
JC
1722 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1723 ADVERTISE_1000XPSE_ASYM);
d3c12873 1724
bc4598bc
JC
1725 phy_write(tbiphy, MII_BMCR,
1726 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1727 BMCR_SPEED1000);
d3c12873
KJ
1728}
1729
511d934f
AV
1730static int __gfar_is_rx_idle(struct gfar_private *priv)
1731{
1732 u32 res;
1733
0977f817 1734 /* Normaly TSEC should not hang on GRS commands, so we should
511d934f
AV
1735 * actually wait for IEVENT_GRSC flag.
1736 */
ad3660c2 1737 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
511d934f
AV
1738 return 0;
1739
0977f817 1740 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
511d934f
AV
1741 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1742 * and the Rx can be safely reset.
1743 */
1744 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1745 res &= 0x7f807f80;
1746 if ((res & 0xffff) == (res >> 16))
1747 return 1;
1748
1749 return 0;
1750}
0bbaf069
KG
1751
1752/* Halt the receive and transmit queues */
c10650b6 1753static void gfar_halt_nodisable(struct gfar_private *priv)
1da177e4 1754{
efeddce7 1755 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1da177e4
LT
1756 u32 tempval;
1757
efeddce7 1758 gfar_ints_disable(priv);
1da177e4 1759
1da177e4 1760 /* Stop the DMA, and wait for it to stop */
f4983704 1761 tempval = gfar_read(&regs->dmactrl);
bc4598bc
JC
1762 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
1763 (DMACTRL_GRS | DMACTRL_GTS)) {
511d934f
AV
1764 int ret;
1765
1da177e4 1766 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
f4983704 1767 gfar_write(&regs->dmactrl, tempval);
1da177e4 1768
511d934f
AV
1769 do {
1770 ret = spin_event_timeout(((gfar_read(&regs->ievent) &
1771 (IEVENT_GRSC | IEVENT_GTSC)) ==
1772 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
1773 if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
1774 ret = __gfar_is_rx_idle(priv);
1775 } while (!ret);
1da177e4 1776 }
d87eb127 1777}
d87eb127
SW
1778
1779/* Halt the receive and transmit queues */
c10650b6 1780void gfar_halt(struct gfar_private *priv)
d87eb127 1781{
46ceb60c 1782 struct gfar __iomem *regs = priv->gfargrp[0].regs;
d87eb127 1783 u32 tempval;
1da177e4 1784
c10650b6
CM
1785 /* Dissable the Rx/Tx hw queues */
1786 gfar_write(&regs->rqueue, 0);
1787 gfar_write(&regs->tqueue, 0);
2a54adc3 1788
c10650b6
CM
1789 mdelay(10);
1790
1791 gfar_halt_nodisable(priv);
1792
1793 /* Disable Rx/Tx DMA */
1da177e4
LT
1794 tempval = gfar_read(&regs->maccfg1);
1795 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1796 gfar_write(&regs->maccfg1, tempval);
0bbaf069
KG
1797}
1798
1799void stop_gfar(struct net_device *dev)
1800{
1801 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 1802
0851133b 1803 netif_tx_stop_all_queues(dev);
bb40dcbb 1804
4e857c58 1805 smp_mb__before_atomic();
0851133b 1806 set_bit(GFAR_DOWN, &priv->state);
4e857c58 1807 smp_mb__after_atomic();
a12f801d 1808
0851133b 1809 disable_napi(priv);
0bbaf069 1810
0851133b 1811 /* disable ints and gracefully shut down Rx/Tx DMA */
c10650b6 1812 gfar_halt(priv);
1da177e4 1813
0851133b 1814 phy_stop(priv->phydev);
1da177e4 1815
1da177e4 1816 free_skb_resources(priv);
1da177e4
LT
1817}
1818
fba4ed03 1819static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1da177e4 1820{
1da177e4 1821 struct txbd8 *txbdp;
fba4ed03 1822 struct gfar_private *priv = netdev_priv(tx_queue->dev);
4669bc90 1823 int i, j;
1da177e4 1824
a12f801d 1825 txbdp = tx_queue->tx_bd_base;
1da177e4 1826
a12f801d
SG
1827 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1828 if (!tx_queue->tx_skbuff[i])
4669bc90 1829 continue;
1da177e4 1830
369ec162 1831 dma_unmap_single(priv->dev, txbdp->bufPtr,
bc4598bc 1832 txbdp->length, DMA_TO_DEVICE);
4669bc90 1833 txbdp->lstatus = 0;
fba4ed03 1834 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
bc4598bc 1835 j++) {
4669bc90 1836 txbdp++;
369ec162 1837 dma_unmap_page(priv->dev, txbdp->bufPtr,
bc4598bc 1838 txbdp->length, DMA_TO_DEVICE);
1da177e4 1839 }
ad5da7ab 1840 txbdp++;
a12f801d
SG
1841 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1842 tx_queue->tx_skbuff[i] = NULL;
1da177e4 1843 }
a12f801d 1844 kfree(tx_queue->tx_skbuff);
1eb8f7a7 1845 tx_queue->tx_skbuff = NULL;
fba4ed03 1846}
1da177e4 1847
fba4ed03
SG
1848static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1849{
1850 struct rxbd8 *rxbdp;
1851 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1852 int i;
1da177e4 1853
fba4ed03 1854 rxbdp = rx_queue->rx_bd_base;
1da177e4 1855
a12f801d
SG
1856 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1857 if (rx_queue->rx_skbuff[i]) {
369ec162
CM
1858 dma_unmap_single(priv->dev, rxbdp->bufPtr,
1859 priv->rx_buffer_size,
bc4598bc 1860 DMA_FROM_DEVICE);
a12f801d
SG
1861 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1862 rx_queue->rx_skbuff[i] = NULL;
1da177e4 1863 }
e69edd21
AV
1864 rxbdp->lstatus = 0;
1865 rxbdp->bufPtr = 0;
1866 rxbdp++;
1da177e4 1867 }
a12f801d 1868 kfree(rx_queue->rx_skbuff);
1eb8f7a7 1869 rx_queue->rx_skbuff = NULL;
fba4ed03 1870}
e69edd21 1871
fba4ed03 1872/* If there are any tx skbs or rx skbs still around, free them.
0977f817
JC
1873 * Then free tx_skbuff and rx_skbuff
1874 */
fba4ed03
SG
1875static void free_skb_resources(struct gfar_private *priv)
1876{
1877 struct gfar_priv_tx_q *tx_queue = NULL;
1878 struct gfar_priv_rx_q *rx_queue = NULL;
1879 int i;
1880
1881 /* Go through all the buffer descriptors and free their data buffers */
1882 for (i = 0; i < priv->num_tx_queues; i++) {
d8a0f1b0 1883 struct netdev_queue *txq;
bc4598bc 1884
fba4ed03 1885 tx_queue = priv->tx_queue[i];
d8a0f1b0 1886 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
bc4598bc 1887 if (tx_queue->tx_skbuff)
fba4ed03 1888 free_skb_tx_queue(tx_queue);
d8a0f1b0 1889 netdev_tx_reset_queue(txq);
fba4ed03
SG
1890 }
1891
1892 for (i = 0; i < priv->num_rx_queues; i++) {
1893 rx_queue = priv->rx_queue[i];
bc4598bc 1894 if (rx_queue->rx_skbuff)
fba4ed03
SG
1895 free_skb_rx_queue(rx_queue);
1896 }
1897
369ec162 1898 dma_free_coherent(priv->dev,
bc4598bc
JC
1899 sizeof(struct txbd8) * priv->total_tx_ring_size +
1900 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1901 priv->tx_queue[0]->tx_bd_base,
1902 priv->tx_queue[0]->tx_bd_dma_base);
1da177e4
LT
1903}
1904
c10650b6 1905void gfar_start(struct gfar_private *priv)
0bbaf069 1906{
46ceb60c 1907 struct gfar __iomem *regs = priv->gfargrp[0].regs;
0bbaf069 1908 u32 tempval;
46ceb60c 1909 int i = 0;
0bbaf069 1910
c10650b6
CM
1911 /* Enable Rx/Tx hw queues */
1912 gfar_write(&regs->rqueue, priv->rqueue);
1913 gfar_write(&regs->tqueue, priv->tqueue);
0bbaf069
KG
1914
1915 /* Initialize DMACTRL to have WWR and WOP */
f4983704 1916 tempval = gfar_read(&regs->dmactrl);
0bbaf069 1917 tempval |= DMACTRL_INIT_SETTINGS;
f4983704 1918 gfar_write(&regs->dmactrl, tempval);
0bbaf069 1919
0bbaf069 1920 /* Make sure we aren't stopped */
f4983704 1921 tempval = gfar_read(&regs->dmactrl);
0bbaf069 1922 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
f4983704 1923 gfar_write(&regs->dmactrl, tempval);
0bbaf069 1924
46ceb60c
SG
1925 for (i = 0; i < priv->num_grps; i++) {
1926 regs = priv->gfargrp[i].regs;
1927 /* Clear THLT/RHLT, so that the DMA starts polling now */
1928 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1929 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
46ceb60c 1930 }
12dea57b 1931
c10650b6
CM
1932 /* Enable Rx/Tx DMA */
1933 tempval = gfar_read(&regs->maccfg1);
1934 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1935 gfar_write(&regs->maccfg1, tempval);
1936
efeddce7
CM
1937 gfar_ints_enable(priv);
1938
c10650b6 1939 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
0bbaf069
KG
1940}
1941
80ec396c
CM
1942static void free_grp_irqs(struct gfar_priv_grp *grp)
1943{
1944 free_irq(gfar_irq(grp, TX)->irq, grp);
1945 free_irq(gfar_irq(grp, RX)->irq, grp);
1946 free_irq(gfar_irq(grp, ER)->irq, grp);
1947}
1948
46ceb60c
SG
1949static int register_grp_irqs(struct gfar_priv_grp *grp)
1950{
1951 struct gfar_private *priv = grp->priv;
1952 struct net_device *dev = priv->ndev;
1953 int err;
1da177e4 1954
1da177e4 1955 /* If the device has multiple interrupts, register for
0977f817
JC
1956 * them. Otherwise, only register for the one
1957 */
b31a1d8b 1958 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
0bbaf069 1959 /* Install our interrupt handlers for Error,
0977f817
JC
1960 * Transmit, and Receive
1961 */
ee873fda
CM
1962 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1963 gfar_irq(grp, ER)->name, grp);
1964 if (err < 0) {
59deab26 1965 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
ee873fda 1966 gfar_irq(grp, ER)->irq);
46ceb60c 1967
2145f1af 1968 goto err_irq_fail;
1da177e4 1969 }
ee873fda
CM
1970 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1971 gfar_irq(grp, TX)->name, grp);
1972 if (err < 0) {
59deab26 1973 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
ee873fda 1974 gfar_irq(grp, TX)->irq);
1da177e4
LT
1975 goto tx_irq_fail;
1976 }
ee873fda
CM
1977 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
1978 gfar_irq(grp, RX)->name, grp);
1979 if (err < 0) {
59deab26 1980 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
ee873fda 1981 gfar_irq(grp, RX)->irq);
1da177e4
LT
1982 goto rx_irq_fail;
1983 }
1984 } else {
ee873fda
CM
1985 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1986 gfar_irq(grp, TX)->name, grp);
1987 if (err < 0) {
59deab26 1988 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
ee873fda 1989 gfar_irq(grp, TX)->irq);
1da177e4
LT
1990 goto err_irq_fail;
1991 }
1992 }
1993
46ceb60c
SG
1994 return 0;
1995
1996rx_irq_fail:
ee873fda 1997 free_irq(gfar_irq(grp, TX)->irq, grp);
46ceb60c 1998tx_irq_fail:
ee873fda 1999 free_irq(gfar_irq(grp, ER)->irq, grp);
46ceb60c
SG
2000err_irq_fail:
2001 return err;
2002
2003}
2004
80ec396c
CM
2005static void gfar_free_irq(struct gfar_private *priv)
2006{
2007 int i;
2008
2009 /* Free the IRQs */
2010 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2011 for (i = 0; i < priv->num_grps; i++)
2012 free_grp_irqs(&priv->gfargrp[i]);
2013 } else {
2014 for (i = 0; i < priv->num_grps; i++)
2015 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2016 &priv->gfargrp[i]);
2017 }
2018}
2019
2020static int gfar_request_irq(struct gfar_private *priv)
2021{
2022 int err, i, j;
2023
2024 for (i = 0; i < priv->num_grps; i++) {
2025 err = register_grp_irqs(&priv->gfargrp[i]);
2026 if (err) {
2027 for (j = 0; j < i; j++)
2028 free_grp_irqs(&priv->gfargrp[j]);
2029 return err;
2030 }
2031 }
2032
2033 return 0;
2034}
2035
46ceb60c
SG
2036/* Bring the controller up and running */
2037int startup_gfar(struct net_device *ndev)
2038{
2039 struct gfar_private *priv = netdev_priv(ndev);
80ec396c 2040 int err;
46ceb60c 2041
a328ac92 2042 gfar_mac_reset(priv);
46ceb60c 2043
46ceb60c
SG
2044 err = gfar_alloc_skb_resources(ndev);
2045 if (err)
2046 return err;
2047
a328ac92 2048 gfar_init_tx_rx_base(priv);
46ceb60c 2049
4e857c58 2050 smp_mb__before_atomic();
0851133b 2051 clear_bit(GFAR_DOWN, &priv->state);
4e857c58 2052 smp_mb__after_atomic();
0851133b
CM
2053
2054 /* Start Rx/Tx DMA and enable the interrupts */
c10650b6 2055 gfar_start(priv);
1da177e4 2056
826aa4a0
AV
2057 phy_start(priv->phydev);
2058
0851133b
CM
2059 enable_napi(priv);
2060
2061 netif_tx_wake_all_queues(ndev);
2062
1da177e4 2063 return 0;
1da177e4
LT
2064}
2065
0977f817
JC
2066/* Called when something needs to use the ethernet device
2067 * Returns 0 for success.
2068 */
1da177e4
LT
2069static int gfar_enet_open(struct net_device *dev)
2070{
94e8cc35 2071 struct gfar_private *priv = netdev_priv(dev);
1da177e4
LT
2072 int err;
2073
1da177e4 2074 err = init_phy(dev);
0851133b 2075 if (err)
1da177e4
LT
2076 return err;
2077
80ec396c
CM
2078 err = gfar_request_irq(priv);
2079 if (err)
2080 return err;
2081
1da177e4 2082 err = startup_gfar(dev);
0851133b 2083 if (err)
db0e8e3f 2084 return err;
1da177e4 2085
2884e5cc
AV
2086 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2087
1da177e4
LT
2088 return err;
2089}
2090
54dc79fe 2091static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
0bbaf069 2092{
54dc79fe 2093 struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
6c31d55f
KG
2094
2095 memset(fcb, 0, GMAC_FCB_LEN);
0bbaf069 2096
0bbaf069
KG
2097 return fcb;
2098}
2099
9c4886e5 2100static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
bc4598bc 2101 int fcb_length)
0bbaf069 2102{
0bbaf069
KG
2103 /* If we're here, it's a IP packet with a TCP or UDP
2104 * payload. We set it to checksum, using a pseudo-header
2105 * we provide
2106 */
3a2e16c8 2107 u8 flags = TXFCB_DEFAULT;
0bbaf069 2108
0977f817
JC
2109 /* Tell the controller what the protocol is
2110 * And provide the already calculated phcs
2111 */
eddc9ec5 2112 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
7f7f5316 2113 flags |= TXFCB_UDP;
4bedb452 2114 fcb->phcs = udp_hdr(skb)->check;
7f7f5316 2115 } else
8da32de5 2116 fcb->phcs = tcp_hdr(skb)->check;
0bbaf069
KG
2117
2118 /* l3os is the distance between the start of the
2119 * frame (skb->data) and the start of the IP hdr.
2120 * l4os is the distance between the start of the
0977f817
JC
2121 * l3 hdr and the l4 hdr
2122 */
9c4886e5 2123 fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
cfe1fc77 2124 fcb->l4os = skb_network_header_len(skb);
0bbaf069 2125
7f7f5316 2126 fcb->flags = flags;
0bbaf069
KG
2127}
2128
7f7f5316 2129void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
0bbaf069 2130{
7f7f5316 2131 fcb->flags |= TXFCB_VLN;
0bbaf069
KG
2132 fcb->vlctl = vlan_tx_tag_get(skb);
2133}
2134
4669bc90 2135static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
bc4598bc 2136 struct txbd8 *base, int ring_size)
4669bc90
DH
2137{
2138 struct txbd8 *new_bd = bdp + stride;
2139
2140 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2141}
2142
2143static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
bc4598bc 2144 int ring_size)
4669bc90
DH
2145{
2146 return skip_txbd(bdp, 1, base, ring_size);
2147}
2148
02d88fb4
CM
2149/* eTSEC12: csum generation not supported for some fcb offsets */
2150static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2151 unsigned long fcb_addr)
2152{
2153 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2154 (fcb_addr % 0x20) > 0x18);
2155}
2156
2157/* eTSEC76: csum generation for frames larger than 2500 may
2158 * cause excess delays before start of transmission
2159 */
2160static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2161 unsigned int len)
2162{
2163 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2164 (len > 2500));
2165}
2166
0977f817
JC
2167/* This is called by the kernel when a frame is ready for transmission.
2168 * It is pointed to by the dev->hard_start_xmit function pointer
2169 */
1da177e4
LT
2170static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2171{
2172 struct gfar_private *priv = netdev_priv(dev);
a12f801d 2173 struct gfar_priv_tx_q *tx_queue = NULL;
fba4ed03 2174 struct netdev_queue *txq;
f4983704 2175 struct gfar __iomem *regs = NULL;
0bbaf069 2176 struct txfcb *fcb = NULL;
f0ee7acf 2177 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
5a5efed4 2178 u32 lstatus;
0d0cffdc
CM
2179 int i, rq = 0;
2180 int do_tstamp, do_csum, do_vlan;
4669bc90 2181 u32 bufaddr;
fef6108d 2182 unsigned long flags;
50ad076b 2183 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
fba4ed03
SG
2184
2185 rq = skb->queue_mapping;
2186 tx_queue = priv->tx_queue[rq];
2187 txq = netdev_get_tx_queue(dev, rq);
a12f801d 2188 base = tx_queue->tx_bd_base;
46ceb60c 2189 regs = tx_queue->grp->regs;
f0ee7acf 2190
0d0cffdc
CM
2191 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2192 do_vlan = vlan_tx_tag_present(skb);
2193 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2194 priv->hwts_tx_en;
2195
2196 if (do_csum || do_vlan)
2197 fcb_len = GMAC_FCB_LEN;
2198
f0ee7acf 2199 /* check if time stamp should be generated */
0d0cffdc
CM
2200 if (unlikely(do_tstamp))
2201 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
4669bc90 2202
5b28beaf 2203 /* make space for additional header when fcb is needed */
0d0cffdc 2204 if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
54dc79fe
SH
2205 struct sk_buff *skb_new;
2206
0d0cffdc 2207 skb_new = skb_realloc_headroom(skb, fcb_len);
54dc79fe
SH
2208 if (!skb_new) {
2209 dev->stats.tx_errors++;
c9974ad4 2210 dev_kfree_skb_any(skb);
54dc79fe
SH
2211 return NETDEV_TX_OK;
2212 }
db83d136 2213
313b037c
ED
2214 if (skb->sk)
2215 skb_set_owner_w(skb_new, skb->sk);
c9974ad4 2216 dev_consume_skb_any(skb);
54dc79fe
SH
2217 skb = skb_new;
2218 }
2219
4669bc90
DH
2220 /* total number of fragments in the SKB */
2221 nr_frags = skb_shinfo(skb)->nr_frags;
2222
f0ee7acf
MR
2223 /* calculate the required number of TxBDs for this skb */
2224 if (unlikely(do_tstamp))
2225 nr_txbds = nr_frags + 2;
2226 else
2227 nr_txbds = nr_frags + 1;
2228
4669bc90 2229 /* check if there is space to queue this packet */
f0ee7acf 2230 if (nr_txbds > tx_queue->num_txbdfree) {
4669bc90 2231 /* no space, stop the queue */
fba4ed03 2232 netif_tx_stop_queue(txq);
4669bc90 2233 dev->stats.tx_fifo_errors++;
4669bc90
DH
2234 return NETDEV_TX_BUSY;
2235 }
1da177e4
LT
2236
2237 /* Update transmit stats */
50ad076b
CM
2238 bytes_sent = skb->len;
2239 tx_queue->stats.tx_bytes += bytes_sent;
2240 /* keep Tx bytes on wire for BQL accounting */
2241 GFAR_CB(skb)->bytes_sent = bytes_sent;
1ac9ad13 2242 tx_queue->stats.tx_packets++;
1da177e4 2243
a12f801d 2244 txbdp = txbdp_start = tx_queue->cur_tx;
f0ee7acf
MR
2245 lstatus = txbdp->lstatus;
2246
2247 /* Time stamp insertion requires one additional TxBD */
2248 if (unlikely(do_tstamp))
2249 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
bc4598bc 2250 tx_queue->tx_ring_size);
1da177e4 2251
4669bc90 2252 if (nr_frags == 0) {
f0ee7acf
MR
2253 if (unlikely(do_tstamp))
2254 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
bc4598bc 2255 TXBD_INTERRUPT);
f0ee7acf
MR
2256 else
2257 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
4669bc90
DH
2258 } else {
2259 /* Place the fragment addresses and lengths into the TxBDs */
2260 for (i = 0; i < nr_frags; i++) {
50ad076b 2261 unsigned int frag_len;
4669bc90 2262 /* Point at the next BD, wrapping as needed */
a12f801d 2263 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
4669bc90 2264
50ad076b 2265 frag_len = skb_shinfo(skb)->frags[i].size;
4669bc90 2266
50ad076b 2267 lstatus = txbdp->lstatus | frag_len |
bc4598bc 2268 BD_LFLAG(TXBD_READY);
4669bc90
DH
2269
2270 /* Handle the last BD specially */
2271 if (i == nr_frags - 1)
2272 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1da177e4 2273
369ec162 2274 bufaddr = skb_frag_dma_map(priv->dev,
2234a722
IC
2275 &skb_shinfo(skb)->frags[i],
2276 0,
50ad076b 2277 frag_len,
2234a722 2278 DMA_TO_DEVICE);
4669bc90
DH
2279
2280 /* set the TxBD length and buffer pointer */
2281 txbdp->bufPtr = bufaddr;
2282 txbdp->lstatus = lstatus;
2283 }
2284
2285 lstatus = txbdp_start->lstatus;
2286 }
1da177e4 2287
9c4886e5
MR
2288 /* Add TxPAL between FCB and frame if required */
2289 if (unlikely(do_tstamp)) {
2290 skb_push(skb, GMAC_TXPAL_LEN);
2291 memset(skb->data, 0, GMAC_TXPAL_LEN);
2292 }
2293
0d0cffdc
CM
2294 /* Add TxFCB if required */
2295 if (fcb_len) {
54dc79fe 2296 fcb = gfar_add_fcb(skb);
02d88fb4 2297 lstatus |= BD_LFLAG(TXBD_TOE);
0d0cffdc
CM
2298 }
2299
2300 /* Set up checksumming */
2301 if (do_csum) {
2302 gfar_tx_checksum(skb, fcb, fcb_len);
02d88fb4
CM
2303
2304 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2305 unlikely(gfar_csum_errata_76(priv, skb->len))) {
4363c2fd
AD
2306 __skb_pull(skb, GMAC_FCB_LEN);
2307 skb_checksum_help(skb);
0d0cffdc
CM
2308 if (do_vlan || do_tstamp) {
2309 /* put back a new fcb for vlan/tstamp TOE */
2310 fcb = gfar_add_fcb(skb);
2311 } else {
2312 /* Tx TOE not used */
2313 lstatus &= ~(BD_LFLAG(TXBD_TOE));
2314 fcb = NULL;
2315 }
4363c2fd 2316 }
0bbaf069
KG
2317 }
2318
0d0cffdc 2319 if (do_vlan)
54dc79fe 2320 gfar_tx_vlan(skb, fcb);
0bbaf069 2321
f0ee7acf
MR
2322 /* Setup tx hardware time stamping if requested */
2323 if (unlikely(do_tstamp)) {
2244d07b 2324 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
f0ee7acf 2325 fcb->ptp = 1;
f0ee7acf
MR
2326 }
2327
369ec162 2328 txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
bc4598bc 2329 skb_headlen(skb), DMA_TO_DEVICE);
1da177e4 2330
0977f817 2331 /* If time stamping is requested one additional TxBD must be set up. The
f0ee7acf
MR
2332 * first TxBD points to the FCB and must have a data length of
2333 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2334 * the full frame length.
2335 */
2336 if (unlikely(do_tstamp)) {
0d0cffdc 2337 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
f0ee7acf 2338 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
0d0cffdc 2339 (skb_headlen(skb) - fcb_len);
f0ee7acf
MR
2340 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2341 } else {
2342 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2343 }
1da177e4 2344
50ad076b 2345 netdev_tx_sent_queue(txq, bytes_sent);
d8a0f1b0 2346
0977f817 2347 /* We can work in parallel with gfar_clean_tx_ring(), except
a3bc1f11
AV
2348 * when modifying num_txbdfree. Note that we didn't grab the lock
2349 * when we were reading the num_txbdfree and checking for available
2350 * space, that's because outside of this function it can only grow,
2351 * and once we've got needed space, it cannot suddenly disappear.
2352 *
2353 * The lock also protects us from gfar_error(), which can modify
2354 * regs->tstat and thus retrigger the transfers, which is why we
2355 * also must grab the lock before setting ready bit for the first
2356 * to be transmitted BD.
2357 */
2358 spin_lock_irqsave(&tx_queue->txlock, flags);
2359
0977f817 2360 /* The powerpc-specific eieio() is used, as wmb() has too strong
3b6330ce
SW
2361 * semantics (it requires synchronization between cacheable and
2362 * uncacheable mappings, which eieio doesn't provide and which we
2363 * don't need), thus requiring a more expensive sync instruction. At
2364 * some point, the set of architecture-independent barrier functions
2365 * should be expanded to include weaker barriers.
2366 */
3b6330ce 2367 eieio();
7f7f5316 2368
4669bc90
DH
2369 txbdp_start->lstatus = lstatus;
2370
0eddba52
AV
2371 eieio(); /* force lstatus write before tx_skbuff */
2372
2373 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2374
4669bc90 2375 /* Update the current skb pointer to the next entry we will use
0977f817
JC
2376 * (wrapping if necessary)
2377 */
a12f801d 2378 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
bc4598bc 2379 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
4669bc90 2380
a12f801d 2381 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
4669bc90
DH
2382
2383 /* reduce TxBD free count */
f0ee7acf 2384 tx_queue->num_txbdfree -= (nr_txbds);
1da177e4
LT
2385
2386 /* If the next BD still needs to be cleaned up, then the bds
0977f817
JC
2387 * are full. We need to tell the kernel to stop sending us stuff.
2388 */
a12f801d 2389 if (!tx_queue->num_txbdfree) {
fba4ed03 2390 netif_tx_stop_queue(txq);
1da177e4 2391
09f75cd7 2392 dev->stats.tx_fifo_errors++;
1da177e4
LT
2393 }
2394
1da177e4 2395 /* Tell the DMA to go go go */
fba4ed03 2396 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1da177e4
LT
2397
2398 /* Unlock priv */
a12f801d 2399 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1da177e4 2400
54dc79fe 2401 return NETDEV_TX_OK;
1da177e4
LT
2402}
2403
2404/* Stops the kernel queue, and halts the controller */
2405static int gfar_close(struct net_device *dev)
2406{
2407 struct gfar_private *priv = netdev_priv(dev);
bea3348e 2408
ab939905 2409 cancel_work_sync(&priv->reset_task);
1da177e4
LT
2410 stop_gfar(dev);
2411
bb40dcbb
AF
2412 /* Disconnect from the PHY */
2413 phy_disconnect(priv->phydev);
2414 priv->phydev = NULL;
1da177e4 2415
80ec396c
CM
2416 gfar_free_irq(priv);
2417
1da177e4
LT
2418 return 0;
2419}
2420
1da177e4 2421/* Changes the mac address if the controller is not running. */
f162b9d5 2422static int gfar_set_mac_address(struct net_device *dev)
1da177e4 2423{
7f7f5316 2424 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1da177e4
LT
2425
2426 return 0;
2427}
2428
1da177e4
LT
2429static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2430{
1da177e4 2431 struct gfar_private *priv = netdev_priv(dev);
0bbaf069
KG
2432 int frame_size = new_mtu + ETH_HLEN;
2433
1da177e4 2434 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
59deab26 2435 netif_err(priv, drv, dev, "Invalid MTU setting\n");
1da177e4
LT
2436 return -EINVAL;
2437 }
2438
0851133b
CM
2439 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2440 cpu_relax();
2441
88302648 2442 if (dev->flags & IFF_UP)
1da177e4
LT
2443 stop_gfar(dev);
2444
1da177e4
LT
2445 dev->mtu = new_mtu;
2446
88302648 2447 if (dev->flags & IFF_UP)
1da177e4
LT
2448 startup_gfar(dev);
2449
0851133b
CM
2450 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2451
1da177e4
LT
2452 return 0;
2453}
2454
0851133b
CM
2455void reset_gfar(struct net_device *ndev)
2456{
2457 struct gfar_private *priv = netdev_priv(ndev);
2458
2459 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2460 cpu_relax();
2461
2462 stop_gfar(ndev);
2463 startup_gfar(ndev);
2464
2465 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2466}
2467
ab939905 2468/* gfar_reset_task gets scheduled when a packet has not been
1da177e4
LT
2469 * transmitted after a set amount of time.
2470 * For now, assume that clearing out all the structures, and
ab939905
SS
2471 * starting over will fix the problem.
2472 */
2473static void gfar_reset_task(struct work_struct *work)
1da177e4 2474{
ab939905 2475 struct gfar_private *priv = container_of(work, struct gfar_private,
bc4598bc 2476 reset_task);
0851133b 2477 reset_gfar(priv->ndev);
1da177e4
LT
2478}
2479
ab939905
SS
2480static void gfar_timeout(struct net_device *dev)
2481{
2482 struct gfar_private *priv = netdev_priv(dev);
2483
2484 dev->stats.tx_errors++;
2485 schedule_work(&priv->reset_task);
2486}
2487
acbc0f03
EL
2488static void gfar_align_skb(struct sk_buff *skb)
2489{
2490 /* We need the data buffer to be aligned properly. We will reserve
2491 * as many bytes as needed to align the data properly
2492 */
2493 skb_reserve(skb, RXBUF_ALIGNMENT -
bc4598bc 2494 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
acbc0f03
EL
2495}
2496
1da177e4 2497/* Interrupt Handler for Transmit complete */
c233cf40 2498static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1da177e4 2499{
a12f801d 2500 struct net_device *dev = tx_queue->dev;
d8a0f1b0 2501 struct netdev_queue *txq;
d080cd63 2502 struct gfar_private *priv = netdev_priv(dev);
f0ee7acf 2503 struct txbd8 *bdp, *next = NULL;
4669bc90 2504 struct txbd8 *lbdp = NULL;
a12f801d 2505 struct txbd8 *base = tx_queue->tx_bd_base;
4669bc90
DH
2506 struct sk_buff *skb;
2507 int skb_dirtytx;
a12f801d 2508 int tx_ring_size = tx_queue->tx_ring_size;
f0ee7acf 2509 int frags = 0, nr_txbds = 0;
4669bc90 2510 int i;
d080cd63 2511 int howmany = 0;
d8a0f1b0
PG
2512 int tqi = tx_queue->qindex;
2513 unsigned int bytes_sent = 0;
4669bc90 2514 u32 lstatus;
f0ee7acf 2515 size_t buflen;
1da177e4 2516
d8a0f1b0 2517 txq = netdev_get_tx_queue(dev, tqi);
a12f801d
SG
2518 bdp = tx_queue->dirty_tx;
2519 skb_dirtytx = tx_queue->skb_dirtytx;
1da177e4 2520
a12f801d 2521 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
a3bc1f11
AV
2522 unsigned long flags;
2523
4669bc90 2524 frags = skb_shinfo(skb)->nr_frags;
f0ee7acf 2525
0977f817 2526 /* When time stamping, one additional TxBD must be freed.
f0ee7acf
MR
2527 * Also, we need to dma_unmap_single() the TxPAL.
2528 */
2244d07b 2529 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
f0ee7acf
MR
2530 nr_txbds = frags + 2;
2531 else
2532 nr_txbds = frags + 1;
2533
2534 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
1da177e4 2535
4669bc90 2536 lstatus = lbdp->lstatus;
1da177e4 2537
4669bc90
DH
2538 /* Only clean completed frames */
2539 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
bc4598bc 2540 (lstatus & BD_LENGTH_MASK))
4669bc90
DH
2541 break;
2542
2244d07b 2543 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
f0ee7acf 2544 next = next_txbd(bdp, base, tx_ring_size);
9c4886e5 2545 buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
f0ee7acf
MR
2546 } else
2547 buflen = bdp->length;
2548
369ec162 2549 dma_unmap_single(priv->dev, bdp->bufPtr,
bc4598bc 2550 buflen, DMA_TO_DEVICE);
f0ee7acf 2551
2244d07b 2552 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
f0ee7acf
MR
2553 struct skb_shared_hwtstamps shhwtstamps;
2554 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
bc4598bc 2555
f0ee7acf
MR
2556 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2557 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
9c4886e5 2558 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
f0ee7acf
MR
2559 skb_tstamp_tx(skb, &shhwtstamps);
2560 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2561 bdp = next;
2562 }
81183059 2563
4669bc90
DH
2564 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2565 bdp = next_txbd(bdp, base, tx_ring_size);
d080cd63 2566
4669bc90 2567 for (i = 0; i < frags; i++) {
369ec162 2568 dma_unmap_page(priv->dev, bdp->bufPtr,
bc4598bc 2569 bdp->length, DMA_TO_DEVICE);
4669bc90
DH
2570 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2571 bdp = next_txbd(bdp, base, tx_ring_size);
2572 }
1da177e4 2573
50ad076b 2574 bytes_sent += GFAR_CB(skb)->bytes_sent;
d8a0f1b0 2575
acb600de 2576 dev_kfree_skb_any(skb);
0fd56bb5 2577
a12f801d 2578 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
d080cd63 2579
4669bc90 2580 skb_dirtytx = (skb_dirtytx + 1) &
bc4598bc 2581 TX_RING_MOD_MASK(tx_ring_size);
4669bc90
DH
2582
2583 howmany++;
a3bc1f11 2584 spin_lock_irqsave(&tx_queue->txlock, flags);
f0ee7acf 2585 tx_queue->num_txbdfree += nr_txbds;
a3bc1f11 2586 spin_unlock_irqrestore(&tx_queue->txlock, flags);
4669bc90 2587 }
1da177e4 2588
4669bc90 2589 /* If we freed a buffer, we can restart transmission, if necessary */
0851133b
CM
2590 if (tx_queue->num_txbdfree &&
2591 netif_tx_queue_stopped(txq) &&
2592 !(test_bit(GFAR_DOWN, &priv->state)))
2593 netif_wake_subqueue(priv->ndev, tqi);
1da177e4 2594
4669bc90 2595 /* Update dirty indicators */
a12f801d
SG
2596 tx_queue->skb_dirtytx = skb_dirtytx;
2597 tx_queue->dirty_tx = bdp;
1da177e4 2598
d8a0f1b0 2599 netdev_tx_completed_queue(txq, howmany, bytes_sent);
d080cd63
DH
2600}
2601
a12f801d 2602static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
bc4598bc 2603 struct sk_buff *skb)
815b97c6 2604{
a12f801d 2605 struct net_device *dev = rx_queue->dev;
815b97c6 2606 struct gfar_private *priv = netdev_priv(dev);
8a102fe0 2607 dma_addr_t buf;
815b97c6 2608
369ec162 2609 buf = dma_map_single(priv->dev, skb->data,
8a102fe0 2610 priv->rx_buffer_size, DMA_FROM_DEVICE);
a12f801d 2611 gfar_init_rxbdp(rx_queue, bdp, buf);
815b97c6
AF
2612}
2613
2281a0f3 2614static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
1da177e4
LT
2615{
2616 struct gfar_private *priv = netdev_priv(dev);
acb600de 2617 struct sk_buff *skb;
1da177e4 2618
acbc0f03 2619 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
815b97c6 2620 if (!skb)
1da177e4
LT
2621 return NULL;
2622
acbc0f03 2623 gfar_align_skb(skb);
7f7f5316 2624
acbc0f03
EL
2625 return skb;
2626}
2627
2281a0f3 2628struct sk_buff *gfar_new_skb(struct net_device *dev)
acbc0f03 2629{
acb600de 2630 return gfar_alloc_skb(dev);
1da177e4
LT
2631}
2632
298e1a9e 2633static inline void count_errors(unsigned short status, struct net_device *dev)
1da177e4 2634{
298e1a9e 2635 struct gfar_private *priv = netdev_priv(dev);
09f75cd7 2636 struct net_device_stats *stats = &dev->stats;
1da177e4
LT
2637 struct gfar_extra_stats *estats = &priv->extra_stats;
2638
0977f817 2639 /* If the packet was truncated, none of the other errors matter */
1da177e4
LT
2640 if (status & RXBD_TRUNCATED) {
2641 stats->rx_length_errors++;
2642
212079df 2643 atomic64_inc(&estats->rx_trunc);
1da177e4
LT
2644
2645 return;
2646 }
2647 /* Count the errors, if there were any */
2648 if (status & (RXBD_LARGE | RXBD_SHORT)) {
2649 stats->rx_length_errors++;
2650
2651 if (status & RXBD_LARGE)
212079df 2652 atomic64_inc(&estats->rx_large);
1da177e4 2653 else
212079df 2654 atomic64_inc(&estats->rx_short);
1da177e4
LT
2655 }
2656 if (status & RXBD_NONOCTET) {
2657 stats->rx_frame_errors++;
212079df 2658 atomic64_inc(&estats->rx_nonoctet);
1da177e4
LT
2659 }
2660 if (status & RXBD_CRCERR) {
212079df 2661 atomic64_inc(&estats->rx_crcerr);
1da177e4
LT
2662 stats->rx_crc_errors++;
2663 }
2664 if (status & RXBD_OVERRUN) {
212079df 2665 atomic64_inc(&estats->rx_overrun);
1da177e4
LT
2666 stats->rx_crc_errors++;
2667 }
2668}
2669
f4983704 2670irqreturn_t gfar_receive(int irq, void *grp_id)
1da177e4 2671{
aeb12c5e
CM
2672 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2673 unsigned long flags;
2674 u32 imask;
2675
2676 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2677 spin_lock_irqsave(&grp->grplock, flags);
2678 imask = gfar_read(&grp->regs->imask);
2679 imask &= IMASK_RX_DISABLED;
2680 gfar_write(&grp->regs->imask, imask);
2681 spin_unlock_irqrestore(&grp->grplock, flags);
2682 __napi_schedule(&grp->napi_rx);
2683 } else {
2684 /* Clear IEVENT, so interrupts aren't called again
2685 * because of the packets that have already arrived.
2686 */
2687 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2688 }
2689
2690 return IRQ_HANDLED;
2691}
2692
2693/* Interrupt Handler for Transmit complete */
2694static irqreturn_t gfar_transmit(int irq, void *grp_id)
2695{
2696 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2697 unsigned long flags;
2698 u32 imask;
2699
2700 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2701 spin_lock_irqsave(&grp->grplock, flags);
2702 imask = gfar_read(&grp->regs->imask);
2703 imask &= IMASK_TX_DISABLED;
2704 gfar_write(&grp->regs->imask, imask);
2705 spin_unlock_irqrestore(&grp->grplock, flags);
2706 __napi_schedule(&grp->napi_tx);
2707 } else {
2708 /* Clear IEVENT, so interrupts aren't called again
2709 * because of the packets that have already arrived.
2710 */
2711 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2712 }
2713
1da177e4
LT
2714 return IRQ_HANDLED;
2715}
2716
0bbaf069
KG
2717static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2718{
2719 /* If valid headers were found, and valid sums
2720 * were verified, then we tell the kernel that no
0977f817
JC
2721 * checksumming is necessary. Otherwise, it is [FIXME]
2722 */
7f7f5316 2723 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
0bbaf069
KG
2724 skb->ip_summed = CHECKSUM_UNNECESSARY;
2725 else
bc8acf2c 2726 skb_checksum_none_assert(skb);
0bbaf069
KG
2727}
2728
2729
0977f817 2730/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
61db26c6
CM
2731static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2732 int amount_pull, struct napi_struct *napi)
1da177e4
LT
2733{
2734 struct gfar_private *priv = netdev_priv(dev);
0bbaf069 2735 struct rxfcb *fcb = NULL;
1da177e4 2736
2c2db48a
DH
2737 /* fcb is at the beginning if exists */
2738 fcb = (struct rxfcb *)skb->data;
0bbaf069 2739
0977f817
JC
2740 /* Remove the FCB from the skb
2741 * Remove the padded bytes, if there are any
2742 */
f74dac08
SG
2743 if (amount_pull) {
2744 skb_record_rx_queue(skb, fcb->rq);
2c2db48a 2745 skb_pull(skb, amount_pull);
f74dac08 2746 }
0bbaf069 2747
cc772ab7
MR
2748 /* Get receive timestamp from the skb */
2749 if (priv->hwts_rx_en) {
2750 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2751 u64 *ns = (u64 *) skb->data;
bc4598bc 2752
cc772ab7
MR
2753 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2754 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2755 }
2756
2757 if (priv->padding)
2758 skb_pull(skb, priv->padding);
2759
8b3afe95 2760 if (dev->features & NETIF_F_RXCSUM)
2c2db48a 2761 gfar_rx_checksum(skb, fcb);
0bbaf069 2762
2c2db48a
DH
2763 /* Tell the skb what kind of packet this is */
2764 skb->protocol = eth_type_trans(skb, dev);
1da177e4 2765
f646968f 2766 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
32f7fd44
JP
2767 * Even if vlan rx accel is disabled, on some chips
2768 * RXFCB_VLN is pseudo randomly set.
2769 */
f646968f 2770 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
32f7fd44 2771 fcb->flags & RXFCB_VLN)
e5905c83 2772 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
87c288c6 2773
2c2db48a 2774 /* Send the packet up the stack */
953d2768 2775 napi_gro_receive(napi, skb);
0bbaf069 2776
1da177e4
LT
2777}
2778
2779/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2281a0f3
JC
2780 * until the budget/quota has been reached. Returns the number
2781 * of frames handled
1da177e4 2782 */
a12f801d 2783int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1da177e4 2784{
a12f801d 2785 struct net_device *dev = rx_queue->dev;
31de198b 2786 struct rxbd8 *bdp, *base;
1da177e4 2787 struct sk_buff *skb;
2c2db48a
DH
2788 int pkt_len;
2789 int amount_pull;
1da177e4
LT
2790 int howmany = 0;
2791 struct gfar_private *priv = netdev_priv(dev);
2792
2793 /* Get the first full descriptor */
a12f801d
SG
2794 bdp = rx_queue->cur_rx;
2795 base = rx_queue->rx_bd_base;
1da177e4 2796
ba779711 2797 amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2c2db48a 2798
1da177e4 2799 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
815b97c6 2800 struct sk_buff *newskb;
bc4598bc 2801
3b6330ce 2802 rmb();
815b97c6
AF
2803
2804 /* Add another skb for the future */
2805 newskb = gfar_new_skb(dev);
2806
a12f801d 2807 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1da177e4 2808
369ec162 2809 dma_unmap_single(priv->dev, bdp->bufPtr,
bc4598bc 2810 priv->rx_buffer_size, DMA_FROM_DEVICE);
81183059 2811
63b88b90 2812 if (unlikely(!(bdp->status & RXBD_ERR) &&
bc4598bc 2813 bdp->length > priv->rx_buffer_size))
63b88b90
AV
2814 bdp->status = RXBD_LARGE;
2815
815b97c6
AF
2816 /* We drop the frame if we failed to allocate a new buffer */
2817 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
bc4598bc 2818 bdp->status & RXBD_ERR)) {
815b97c6
AF
2819 count_errors(bdp->status, dev);
2820
2821 if (unlikely(!newskb))
2822 newskb = skb;
acbc0f03 2823 else if (skb)
acb600de 2824 dev_kfree_skb(skb);
815b97c6 2825 } else {
1da177e4 2826 /* Increment the number of packets */
a7f38041 2827 rx_queue->stats.rx_packets++;
1da177e4
LT
2828 howmany++;
2829
2c2db48a
DH
2830 if (likely(skb)) {
2831 pkt_len = bdp->length - ETH_FCS_LEN;
2832 /* Remove the FCS from the packet length */
2833 skb_put(skb, pkt_len);
a7f38041 2834 rx_queue->stats.rx_bytes += pkt_len;
f74dac08 2835 skb_record_rx_queue(skb, rx_queue->qindex);
cd754a57 2836 gfar_process_frame(dev, skb, amount_pull,
aeb12c5e 2837 &rx_queue->grp->napi_rx);
2c2db48a
DH
2838
2839 } else {
59deab26 2840 netif_warn(priv, rx_err, dev, "Missing skb!\n");
a7f38041 2841 rx_queue->stats.rx_dropped++;
212079df 2842 atomic64_inc(&priv->extra_stats.rx_skbmissing);
2c2db48a 2843 }
1da177e4 2844
1da177e4
LT
2845 }
2846
a12f801d 2847 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1da177e4 2848
815b97c6 2849 /* Setup the new bdp */
a12f801d 2850 gfar_new_rxbdp(rx_queue, bdp, newskb);
1da177e4
LT
2851
2852 /* Update to the next pointer */
a12f801d 2853 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1da177e4
LT
2854
2855 /* update to point at the next skb */
bc4598bc
JC
2856 rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2857 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1da177e4
LT
2858 }
2859
2860 /* Update the current rxbd pointer to be the next one */
a12f801d 2861 rx_queue->cur_rx = bdp;
1da177e4 2862
1da177e4
LT
2863 return howmany;
2864}
2865
aeb12c5e 2866static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
5eaedf31
CM
2867{
2868 struct gfar_priv_grp *gfargrp =
aeb12c5e 2869 container_of(napi, struct gfar_priv_grp, napi_rx);
5eaedf31 2870 struct gfar __iomem *regs = gfargrp->regs;
71ff9e3d 2871 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
5eaedf31
CM
2872 int work_done = 0;
2873
2874 /* Clear IEVENT, so interrupts aren't called again
2875 * because of the packets that have already arrived
2876 */
aeb12c5e 2877 gfar_write(&regs->ievent, IEVENT_RX_MASK);
5eaedf31
CM
2878
2879 work_done = gfar_clean_rx_ring(rx_queue, budget);
2880
2881 if (work_done < budget) {
aeb12c5e 2882 u32 imask;
5eaedf31
CM
2883 napi_complete(napi);
2884 /* Clear the halt bit in RSTAT */
2885 gfar_write(&regs->rstat, gfargrp->rstat);
2886
aeb12c5e
CM
2887 spin_lock_irq(&gfargrp->grplock);
2888 imask = gfar_read(&regs->imask);
2889 imask |= IMASK_RX_DEFAULT;
2890 gfar_write(&regs->imask, imask);
2891 spin_unlock_irq(&gfargrp->grplock);
5eaedf31
CM
2892 }
2893
2894 return work_done;
2895}
2896
aeb12c5e 2897static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
1da177e4 2898{
bc4598bc 2899 struct gfar_priv_grp *gfargrp =
aeb12c5e
CM
2900 container_of(napi, struct gfar_priv_grp, napi_tx);
2901 struct gfar __iomem *regs = gfargrp->regs;
71ff9e3d 2902 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
aeb12c5e
CM
2903 u32 imask;
2904
2905 /* Clear IEVENT, so interrupts aren't called again
2906 * because of the packets that have already arrived
2907 */
2908 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2909
2910 /* run Tx cleanup to completion */
2911 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2912 gfar_clean_tx_ring(tx_queue);
2913
2914 napi_complete(napi);
2915
2916 spin_lock_irq(&gfargrp->grplock);
2917 imask = gfar_read(&regs->imask);
2918 imask |= IMASK_TX_DEFAULT;
2919 gfar_write(&regs->imask, imask);
2920 spin_unlock_irq(&gfargrp->grplock);
2921
2922 return 0;
2923}
2924
2925static int gfar_poll_rx(struct napi_struct *napi, int budget)
2926{
2927 struct gfar_priv_grp *gfargrp =
2928 container_of(napi, struct gfar_priv_grp, napi_rx);
fba4ed03 2929 struct gfar_private *priv = gfargrp->priv;
46ceb60c 2930 struct gfar __iomem *regs = gfargrp->regs;
fba4ed03 2931 struct gfar_priv_rx_q *rx_queue = NULL;
c233cf40 2932 int work_done = 0, work_done_per_q = 0;
39c0a0d5 2933 int i, budget_per_q = 0;
6be5ed3f
CM
2934 unsigned long rstat_rxf;
2935 int num_act_queues;
fba4ed03 2936
8c7396ae 2937 /* Clear IEVENT, so interrupts aren't called again
0977f817
JC
2938 * because of the packets that have already arrived
2939 */
aeb12c5e 2940 gfar_write(&regs->ievent, IEVENT_RX_MASK);
8c7396ae 2941
6be5ed3f
CM
2942 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2943
2944 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2945 if (num_act_queues)
2946 budget_per_q = budget/num_act_queues;
2947
3ba405db
CM
2948 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2949 /* skip queue if not active */
2950 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2951 continue;
1da177e4 2952
3ba405db
CM
2953 rx_queue = priv->rx_queue[i];
2954 work_done_per_q =
2955 gfar_clean_rx_ring(rx_queue, budget_per_q);
2956 work_done += work_done_per_q;
2957
2958 /* finished processing this queue */
2959 if (work_done_per_q < budget_per_q) {
2960 /* clear active queue hw indication */
2961 gfar_write(&regs->rstat,
2962 RSTAT_CLEAR_RXF0 >> i);
2963 num_act_queues--;
2964
2965 if (!num_act_queues)
2966 break;
2967 }
2968 }
42199884 2969
aeb12c5e
CM
2970 if (!num_act_queues) {
2971 u32 imask;
3ba405db 2972 napi_complete(napi);
1da177e4 2973
3ba405db
CM
2974 /* Clear the halt bit in RSTAT */
2975 gfar_write(&regs->rstat, gfargrp->rstat);
1da177e4 2976
aeb12c5e
CM
2977 spin_lock_irq(&gfargrp->grplock);
2978 imask = gfar_read(&regs->imask);
2979 imask |= IMASK_RX_DEFAULT;
2980 gfar_write(&regs->imask, imask);
2981 spin_unlock_irq(&gfargrp->grplock);
1da177e4
LT
2982 }
2983
c233cf40 2984 return work_done;
1da177e4 2985}
1da177e4 2986
aeb12c5e
CM
2987static int gfar_poll_tx(struct napi_struct *napi, int budget)
2988{
2989 struct gfar_priv_grp *gfargrp =
2990 container_of(napi, struct gfar_priv_grp, napi_tx);
2991 struct gfar_private *priv = gfargrp->priv;
2992 struct gfar __iomem *regs = gfargrp->regs;
2993 struct gfar_priv_tx_q *tx_queue = NULL;
2994 int has_tx_work = 0;
2995 int i;
2996
2997 /* Clear IEVENT, so interrupts aren't called again
2998 * because of the packets that have already arrived
2999 */
3000 gfar_write(&regs->ievent, IEVENT_TX_MASK);
3001
3002 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3003 tx_queue = priv->tx_queue[i];
3004 /* run Tx cleanup to completion */
3005 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3006 gfar_clean_tx_ring(tx_queue);
3007 has_tx_work = 1;
3008 }
3009 }
3010
3011 if (!has_tx_work) {
3012 u32 imask;
3013 napi_complete(napi);
3014
3015 spin_lock_irq(&gfargrp->grplock);
3016 imask = gfar_read(&regs->imask);
3017 imask |= IMASK_TX_DEFAULT;
3018 gfar_write(&regs->imask, imask);
3019 spin_unlock_irq(&gfargrp->grplock);
3020 }
3021
3022 return 0;
3023}
3024
3025
f2d71c2d 3026#ifdef CONFIG_NET_POLL_CONTROLLER
0977f817 3027/* Polling 'interrupt' - used by things like netconsole to send skbs
f2d71c2d
VW
3028 * without having to re-enable interrupts. It's not called while
3029 * the interrupt routine is executing.
3030 */
3031static void gfar_netpoll(struct net_device *dev)
3032{
3033 struct gfar_private *priv = netdev_priv(dev);
3a2e16c8 3034 int i;
f2d71c2d
VW
3035
3036 /* If the device has multiple interrupts, run tx/rx */
b31a1d8b 3037 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
46ceb60c 3038 for (i = 0; i < priv->num_grps; i++) {
62ed839d
PG
3039 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3040
3041 disable_irq(gfar_irq(grp, TX)->irq);
3042 disable_irq(gfar_irq(grp, RX)->irq);
3043 disable_irq(gfar_irq(grp, ER)->irq);
3044 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3045 enable_irq(gfar_irq(grp, ER)->irq);
3046 enable_irq(gfar_irq(grp, RX)->irq);
3047 enable_irq(gfar_irq(grp, TX)->irq);
46ceb60c 3048 }
f2d71c2d 3049 } else {
46ceb60c 3050 for (i = 0; i < priv->num_grps; i++) {
62ed839d
PG
3051 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3052
3053 disable_irq(gfar_irq(grp, TX)->irq);
3054 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3055 enable_irq(gfar_irq(grp, TX)->irq);
43de004b 3056 }
f2d71c2d
VW
3057 }
3058}
3059#endif
3060
1da177e4 3061/* The interrupt handler for devices with one interrupt */
f4983704 3062static irqreturn_t gfar_interrupt(int irq, void *grp_id)
1da177e4 3063{
f4983704 3064 struct gfar_priv_grp *gfargrp = grp_id;
1da177e4
LT
3065
3066 /* Save ievent for future reference */
f4983704 3067 u32 events = gfar_read(&gfargrp->regs->ievent);
1da177e4 3068
1da177e4 3069 /* Check for reception */
538cc7ee 3070 if (events & IEVENT_RX_MASK)
f4983704 3071 gfar_receive(irq, grp_id);
1da177e4
LT
3072
3073 /* Check for transmit completion */
538cc7ee 3074 if (events & IEVENT_TX_MASK)
f4983704 3075 gfar_transmit(irq, grp_id);
1da177e4 3076
538cc7ee
SS
3077 /* Check for errors */
3078 if (events & IEVENT_ERR_MASK)
f4983704 3079 gfar_error(irq, grp_id);
1da177e4
LT
3080
3081 return IRQ_HANDLED;
3082}
3083
1da177e4
LT
3084/* Called every time the controller might need to be made
3085 * aware of new link state. The PHY code conveys this
bb40dcbb 3086 * information through variables in the phydev structure, and this
1da177e4
LT
3087 * function converts those variables into the appropriate
3088 * register values, and can bring down the device if needed.
3089 */
3090static void adjust_link(struct net_device *dev)
3091{
3092 struct gfar_private *priv = netdev_priv(dev);
bb40dcbb 3093 struct phy_device *phydev = priv->phydev;
bb40dcbb 3094
6ce29b0e
CM
3095 if (unlikely(phydev->link != priv->oldlink ||
3096 phydev->duplex != priv->oldduplex ||
3097 phydev->speed != priv->oldspeed))
3098 gfar_update_link_state(priv);
bb40dcbb 3099}
1da177e4
LT
3100
3101/* Update the hash table based on the current list of multicast
3102 * addresses we subscribe to. Also, change the promiscuity of
3103 * the device based on the flags (this function is called
0977f817
JC
3104 * whenever dev->flags is changed
3105 */
1da177e4
LT
3106static void gfar_set_multi(struct net_device *dev)
3107{
22bedad3 3108 struct netdev_hw_addr *ha;
1da177e4 3109 struct gfar_private *priv = netdev_priv(dev);
46ceb60c 3110 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1da177e4
LT
3111 u32 tempval;
3112
a12f801d 3113 if (dev->flags & IFF_PROMISC) {
1da177e4
LT
3114 /* Set RCTRL to PROM */
3115 tempval = gfar_read(&regs->rctrl);
3116 tempval |= RCTRL_PROM;
3117 gfar_write(&regs->rctrl, tempval);
3118 } else {
3119 /* Set RCTRL to not PROM */
3120 tempval = gfar_read(&regs->rctrl);
3121 tempval &= ~(RCTRL_PROM);
3122 gfar_write(&regs->rctrl, tempval);
3123 }
6aa20a22 3124
a12f801d 3125 if (dev->flags & IFF_ALLMULTI) {
1da177e4 3126 /* Set the hash to rx all multicast frames */
0bbaf069
KG
3127 gfar_write(&regs->igaddr0, 0xffffffff);
3128 gfar_write(&regs->igaddr1, 0xffffffff);
3129 gfar_write(&regs->igaddr2, 0xffffffff);
3130 gfar_write(&regs->igaddr3, 0xffffffff);
3131 gfar_write(&regs->igaddr4, 0xffffffff);
3132 gfar_write(&regs->igaddr5, 0xffffffff);
3133 gfar_write(&regs->igaddr6, 0xffffffff);
3134 gfar_write(&regs->igaddr7, 0xffffffff);
1da177e4
LT
3135 gfar_write(&regs->gaddr0, 0xffffffff);
3136 gfar_write(&regs->gaddr1, 0xffffffff);
3137 gfar_write(&regs->gaddr2, 0xffffffff);
3138 gfar_write(&regs->gaddr3, 0xffffffff);
3139 gfar_write(&regs->gaddr4, 0xffffffff);
3140 gfar_write(&regs->gaddr5, 0xffffffff);
3141 gfar_write(&regs->gaddr6, 0xffffffff);
3142 gfar_write(&regs->gaddr7, 0xffffffff);
3143 } else {
7f7f5316
AF
3144 int em_num;
3145 int idx;
3146
1da177e4 3147 /* zero out the hash */
0bbaf069
KG
3148 gfar_write(&regs->igaddr0, 0x0);
3149 gfar_write(&regs->igaddr1, 0x0);
3150 gfar_write(&regs->igaddr2, 0x0);
3151 gfar_write(&regs->igaddr3, 0x0);
3152 gfar_write(&regs->igaddr4, 0x0);
3153 gfar_write(&regs->igaddr5, 0x0);
3154 gfar_write(&regs->igaddr6, 0x0);
3155 gfar_write(&regs->igaddr7, 0x0);
1da177e4
LT
3156 gfar_write(&regs->gaddr0, 0x0);
3157 gfar_write(&regs->gaddr1, 0x0);
3158 gfar_write(&regs->gaddr2, 0x0);
3159 gfar_write(&regs->gaddr3, 0x0);
3160 gfar_write(&regs->gaddr4, 0x0);
3161 gfar_write(&regs->gaddr5, 0x0);
3162 gfar_write(&regs->gaddr6, 0x0);
3163 gfar_write(&regs->gaddr7, 0x0);
3164
7f7f5316
AF
3165 /* If we have extended hash tables, we need to
3166 * clear the exact match registers to prepare for
0977f817
JC
3167 * setting them
3168 */
7f7f5316
AF
3169 if (priv->extended_hash) {
3170 em_num = GFAR_EM_NUM + 1;
3171 gfar_clear_exact_match(dev);
3172 idx = 1;
3173 } else {
3174 idx = 0;
3175 em_num = 0;
3176 }
3177
4cd24eaf 3178 if (netdev_mc_empty(dev))
1da177e4
LT
3179 return;
3180
3181 /* Parse the list, and set the appropriate bits */
22bedad3 3182 netdev_for_each_mc_addr(ha, dev) {
7f7f5316 3183 if (idx < em_num) {
22bedad3 3184 gfar_set_mac_for_addr(dev, idx, ha->addr);
7f7f5316
AF
3185 idx++;
3186 } else
22bedad3 3187 gfar_set_hash_for_addr(dev, ha->addr);
1da177e4
LT
3188 }
3189 }
1da177e4
LT
3190}
3191
7f7f5316
AF
3192
3193/* Clears each of the exact match registers to zero, so they
0977f817
JC
3194 * don't interfere with normal reception
3195 */
7f7f5316
AF
3196static void gfar_clear_exact_match(struct net_device *dev)
3197{
3198 int idx;
6a3c910c 3199 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
7f7f5316 3200
bc4598bc 3201 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
b6bc7650 3202 gfar_set_mac_for_addr(dev, idx, zero_arr);
7f7f5316
AF
3203}
3204
1da177e4
LT
3205/* Set the appropriate hash bit for the given addr */
3206/* The algorithm works like so:
3207 * 1) Take the Destination Address (ie the multicast address), and
3208 * do a CRC on it (little endian), and reverse the bits of the
3209 * result.
3210 * 2) Use the 8 most significant bits as a hash into a 256-entry
3211 * table. The table is controlled through 8 32-bit registers:
3212 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3213 * gaddr7. This means that the 3 most significant bits in the
3214 * hash index which gaddr register to use, and the 5 other bits
3215 * indicate which bit (assuming an IBM numbering scheme, which
3216 * for PowerPC (tm) is usually the case) in the register holds
0977f817
JC
3217 * the entry.
3218 */
1da177e4
LT
3219static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3220{
3221 u32 tempval;
3222 struct gfar_private *priv = netdev_priv(dev);
6a3c910c 3223 u32 result = ether_crc(ETH_ALEN, addr);
0bbaf069
KG
3224 int width = priv->hash_width;
3225 u8 whichbit = (result >> (32 - width)) & 0x1f;
3226 u8 whichreg = result >> (32 - width + 5);
1da177e4
LT
3227 u32 value = (1 << (31-whichbit));
3228
0bbaf069 3229 tempval = gfar_read(priv->hash_regs[whichreg]);
1da177e4 3230 tempval |= value;
0bbaf069 3231 gfar_write(priv->hash_regs[whichreg], tempval);
1da177e4
LT
3232}
3233
7f7f5316
AF
3234
3235/* There are multiple MAC Address register pairs on some controllers
3236 * This function sets the numth pair to a given address
3237 */
b6bc7650
JP
3238static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3239 const u8 *addr)
7f7f5316
AF
3240{
3241 struct gfar_private *priv = netdev_priv(dev);
46ceb60c 3242 struct gfar __iomem *regs = priv->gfargrp[0].regs;
7f7f5316 3243 int idx;
6a3c910c 3244 char tmpbuf[ETH_ALEN];
7f7f5316 3245 u32 tempval;
f4983704 3246 u32 __iomem *macptr = &regs->macstnaddr1;
7f7f5316
AF
3247
3248 macptr += num*2;
3249
0977f817
JC
3250 /* Now copy it into the mac registers backwards, cuz
3251 * little endian is silly
3252 */
6a3c910c
JP
3253 for (idx = 0; idx < ETH_ALEN; idx++)
3254 tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
7f7f5316
AF
3255
3256 gfar_write(macptr, *((u32 *) (tmpbuf)));
3257
3258 tempval = *((u32 *) (tmpbuf + 4));
3259
3260 gfar_write(macptr+1, tempval);
3261}
3262
1da177e4 3263/* GFAR error interrupt handler */
f4983704 3264static irqreturn_t gfar_error(int irq, void *grp_id)
1da177e4 3265{
f4983704
SG
3266 struct gfar_priv_grp *gfargrp = grp_id;
3267 struct gfar __iomem *regs = gfargrp->regs;
3268 struct gfar_private *priv= gfargrp->priv;
3269 struct net_device *dev = priv->ndev;
1da177e4
LT
3270
3271 /* Save ievent for future reference */
f4983704 3272 u32 events = gfar_read(&regs->ievent);
1da177e4
LT
3273
3274 /* Clear IEVENT */
f4983704 3275 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
d87eb127
SW
3276
3277 /* Magic Packet is not an error. */
b31a1d8b 3278 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
d87eb127
SW
3279 (events & IEVENT_MAG))
3280 events &= ~IEVENT_MAG;
1da177e4
LT
3281
3282 /* Hmm... */
0bbaf069 3283 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
bc4598bc
JC
3284 netdev_dbg(dev,
3285 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
59deab26 3286 events, gfar_read(&regs->imask));
1da177e4
LT
3287
3288 /* Update the error counters */
3289 if (events & IEVENT_TXE) {
09f75cd7 3290 dev->stats.tx_errors++;
1da177e4
LT
3291
3292 if (events & IEVENT_LC)
09f75cd7 3293 dev->stats.tx_window_errors++;
1da177e4 3294 if (events & IEVENT_CRL)
09f75cd7 3295 dev->stats.tx_aborted_errors++;
1da177e4 3296 if (events & IEVENT_XFUN) {
836cf7fa
AV
3297 unsigned long flags;
3298
59deab26
JP
3299 netif_dbg(priv, tx_err, dev,
3300 "TX FIFO underrun, packet dropped\n");
09f75cd7 3301 dev->stats.tx_dropped++;
212079df 3302 atomic64_inc(&priv->extra_stats.tx_underrun);
1da177e4 3303
836cf7fa
AV
3304 local_irq_save(flags);
3305 lock_tx_qs(priv);
3306
1da177e4 3307 /* Reactivate the Tx Queues */
fba4ed03 3308 gfar_write(&regs->tstat, gfargrp->tstat);
836cf7fa
AV
3309
3310 unlock_tx_qs(priv);
3311 local_irq_restore(flags);
1da177e4 3312 }
59deab26 3313 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
1da177e4
LT
3314 }
3315 if (events & IEVENT_BSY) {
09f75cd7 3316 dev->stats.rx_errors++;
212079df 3317 atomic64_inc(&priv->extra_stats.rx_bsy);
1da177e4 3318
f4983704 3319 gfar_receive(irq, grp_id);
1da177e4 3320
59deab26
JP
3321 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3322 gfar_read(&regs->rstat));
1da177e4
LT
3323 }
3324 if (events & IEVENT_BABR) {
09f75cd7 3325 dev->stats.rx_errors++;
212079df 3326 atomic64_inc(&priv->extra_stats.rx_babr);
1da177e4 3327
59deab26 3328 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
1da177e4
LT
3329 }
3330 if (events & IEVENT_EBERR) {
212079df 3331 atomic64_inc(&priv->extra_stats.eberr);
59deab26 3332 netif_dbg(priv, rx_err, dev, "bus error\n");
1da177e4 3333 }
59deab26
JP
3334 if (events & IEVENT_RXC)
3335 netif_dbg(priv, rx_status, dev, "control frame\n");
1da177e4
LT
3336
3337 if (events & IEVENT_BABT) {
212079df 3338 atomic64_inc(&priv->extra_stats.tx_babt);
59deab26 3339 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
1da177e4
LT
3340 }
3341 return IRQ_HANDLED;
3342}
3343
6ce29b0e
CM
3344static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3345{
3346 struct phy_device *phydev = priv->phydev;
3347 u32 val = 0;
3348
3349 if (!phydev->duplex)
3350 return val;
3351
3352 if (!priv->pause_aneg_en) {
3353 if (priv->tx_pause_en)
3354 val |= MACCFG1_TX_FLOW;
3355 if (priv->rx_pause_en)
3356 val |= MACCFG1_RX_FLOW;
3357 } else {
3358 u16 lcl_adv, rmt_adv;
3359 u8 flowctrl;
3360 /* get link partner capabilities */
3361 rmt_adv = 0;
3362 if (phydev->pause)
3363 rmt_adv = LPA_PAUSE_CAP;
3364 if (phydev->asym_pause)
3365 rmt_adv |= LPA_PAUSE_ASYM;
3366
3367 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3368
3369 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3370 if (flowctrl & FLOW_CTRL_TX)
3371 val |= MACCFG1_TX_FLOW;
3372 if (flowctrl & FLOW_CTRL_RX)
3373 val |= MACCFG1_RX_FLOW;
3374 }
3375
3376 return val;
3377}
3378
3379static noinline void gfar_update_link_state(struct gfar_private *priv)
3380{
3381 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3382 struct phy_device *phydev = priv->phydev;
3383
3384 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3385 return;
3386
3387 if (phydev->link) {
3388 u32 tempval1 = gfar_read(&regs->maccfg1);
3389 u32 tempval = gfar_read(&regs->maccfg2);
3390 u32 ecntrl = gfar_read(&regs->ecntrl);
3391
3392 if (phydev->duplex != priv->oldduplex) {
3393 if (!(phydev->duplex))
3394 tempval &= ~(MACCFG2_FULL_DUPLEX);
3395 else
3396 tempval |= MACCFG2_FULL_DUPLEX;
3397
3398 priv->oldduplex = phydev->duplex;
3399 }
3400
3401 if (phydev->speed != priv->oldspeed) {
3402 switch (phydev->speed) {
3403 case 1000:
3404 tempval =
3405 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3406
3407 ecntrl &= ~(ECNTRL_R100);
3408 break;
3409 case 100:
3410 case 10:
3411 tempval =
3412 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3413
3414 /* Reduced mode distinguishes
3415 * between 10 and 100
3416 */
3417 if (phydev->speed == SPEED_100)
3418 ecntrl |= ECNTRL_R100;
3419 else
3420 ecntrl &= ~(ECNTRL_R100);
3421 break;
3422 default:
3423 netif_warn(priv, link, priv->ndev,
3424 "Ack! Speed (%d) is not 10/100/1000!\n",
3425 phydev->speed);
3426 break;
3427 }
3428
3429 priv->oldspeed = phydev->speed;
3430 }
3431
3432 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3433 tempval1 |= gfar_get_flowctrl_cfg(priv);
3434
3435 gfar_write(&regs->maccfg1, tempval1);
3436 gfar_write(&regs->maccfg2, tempval);
3437 gfar_write(&regs->ecntrl, ecntrl);
3438
3439 if (!priv->oldlink)
3440 priv->oldlink = 1;
3441
3442 } else if (priv->oldlink) {
3443 priv->oldlink = 0;
3444 priv->oldspeed = 0;
3445 priv->oldduplex = -1;
3446 }
3447
3448 if (netif_msg_link(priv))
3449 phy_print_status(phydev);
3450}
3451
b31a1d8b
AF
3452static struct of_device_id gfar_match[] =
3453{
3454 {
3455 .type = "network",
3456 .compatible = "gianfar",
3457 },
46ceb60c
SG
3458 {
3459 .compatible = "fsl,etsec2",
3460 },
b31a1d8b
AF
3461 {},
3462};
e72701ac 3463MODULE_DEVICE_TABLE(of, gfar_match);
b31a1d8b 3464
1da177e4 3465/* Structure for a device driver */
74888760 3466static struct platform_driver gfar_driver = {
4018294b
GL
3467 .driver = {
3468 .name = "fsl-gianfar",
3469 .owner = THIS_MODULE,
3470 .pm = GFAR_PM_OPS,
3471 .of_match_table = gfar_match,
3472 },
1da177e4
LT
3473 .probe = gfar_probe,
3474 .remove = gfar_remove,
3475};
3476
db62f684 3477module_platform_driver(gfar_driver);