]>
Commit | Line | Data |
---|---|---|
0977f817 | 1 | /* drivers/net/ethernet/freescale/gianfar.c |
1da177e4 LT |
2 | * |
3 | * Gianfar Ethernet Driver | |
7f7f5316 AF |
4 | * This driver is designed for the non-CPM ethernet controllers |
5 | * on the 85xx and 83xx family of integrated processors | |
1da177e4 LT |
6 | * Based on 8260_io/fcc_enet.c |
7 | * | |
8 | * Author: Andy Fleming | |
4c8d3d99 | 9 | * Maintainer: Kumar Gala |
a12f801d | 10 | * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> |
1da177e4 | 11 | * |
20862788 | 12 | * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. |
a12f801d | 13 | * Copyright 2007 MontaVista Software, Inc. |
1da177e4 LT |
14 | * |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the | |
17 | * Free Software Foundation; either version 2 of the License, or (at your | |
18 | * option) any later version. | |
19 | * | |
20 | * Gianfar: AKA Lambda Draconis, "Dragon" | |
21 | * RA 11 31 24.2 | |
22 | * Dec +69 19 52 | |
23 | * V 3.84 | |
24 | * B-V +1.62 | |
25 | * | |
26 | * Theory of operation | |
0bbaf069 | 27 | * |
b31a1d8b AF |
28 | * The driver is initialized through of_device. Configuration information |
29 | * is therefore conveyed through an OF-style device tree. | |
1da177e4 LT |
30 | * |
31 | * The Gianfar Ethernet Controller uses a ring of buffer | |
32 | * descriptors. The beginning is indicated by a register | |
0bbaf069 KG |
33 | * pointing to the physical address of the start of the ring. |
34 | * The end is determined by a "wrap" bit being set in the | |
1da177e4 LT |
35 | * last descriptor of the ring. |
36 | * | |
37 | * When a packet is received, the RXF bit in the | |
0bbaf069 | 38 | * IEVENT register is set, triggering an interrupt when the |
1da177e4 LT |
39 | * corresponding bit in the IMASK register is also set (if |
40 | * interrupt coalescing is active, then the interrupt may not | |
41 | * happen immediately, but will wait until either a set number | |
bb40dcbb | 42 | * of frames or amount of time have passed). In NAPI, the |
1da177e4 | 43 | * interrupt handler will signal there is work to be done, and |
0aa1538f | 44 | * exit. This method will start at the last known empty |
0bbaf069 | 45 | * descriptor, and process every subsequent descriptor until there |
1da177e4 LT |
46 | * are none left with data (NAPI will stop after a set number of |
47 | * packets to give time to other tasks, but will eventually | |
48 | * process all the packets). The data arrives inside a | |
49 | * pre-allocated skb, and so after the skb is passed up to the | |
50 | * stack, a new skb must be allocated, and the address field in | |
51 | * the buffer descriptor must be updated to indicate this new | |
52 | * skb. | |
53 | * | |
54 | * When the kernel requests that a packet be transmitted, the | |
55 | * driver starts where it left off last time, and points the | |
56 | * descriptor at the buffer which was passed in. The driver | |
57 | * then informs the DMA engine that there are packets ready to | |
58 | * be transmitted. Once the controller is finished transmitting | |
59 | * the packet, an interrupt may be triggered (under the same | |
60 | * conditions as for reception, but depending on the TXF bit). | |
61 | * The driver then cleans up the buffer. | |
62 | */ | |
63 | ||
59deab26 JP |
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
65 | #define DEBUG | |
66 | ||
1da177e4 | 67 | #include <linux/kernel.h> |
1da177e4 LT |
68 | #include <linux/string.h> |
69 | #include <linux/errno.h> | |
bb40dcbb | 70 | #include <linux/unistd.h> |
1da177e4 LT |
71 | #include <linux/slab.h> |
72 | #include <linux/interrupt.h> | |
1da177e4 LT |
73 | #include <linux/delay.h> |
74 | #include <linux/netdevice.h> | |
75 | #include <linux/etherdevice.h> | |
76 | #include <linux/skbuff.h> | |
0bbaf069 | 77 | #include <linux/if_vlan.h> |
1da177e4 LT |
78 | #include <linux/spinlock.h> |
79 | #include <linux/mm.h> | |
5af50730 RH |
80 | #include <linux/of_address.h> |
81 | #include <linux/of_irq.h> | |
fe192a49 | 82 | #include <linux/of_mdio.h> |
b31a1d8b | 83 | #include <linux/of_platform.h> |
0bbaf069 KG |
84 | #include <linux/ip.h> |
85 | #include <linux/tcp.h> | |
86 | #include <linux/udp.h> | |
9c07b884 | 87 | #include <linux/in.h> |
cc772ab7 | 88 | #include <linux/net_tstamp.h> |
1da177e4 LT |
89 | |
90 | #include <asm/io.h> | |
d6ef0bcc | 91 | #ifdef CONFIG_PPC |
7d350977 | 92 | #include <asm/reg.h> |
2969b1f7 | 93 | #include <asm/mpc85xx.h> |
d6ef0bcc | 94 | #endif |
1da177e4 LT |
95 | #include <asm/irq.h> |
96 | #include <asm/uaccess.h> | |
97 | #include <linux/module.h> | |
1da177e4 LT |
98 | #include <linux/dma-mapping.h> |
99 | #include <linux/crc32.h> | |
bb40dcbb AF |
100 | #include <linux/mii.h> |
101 | #include <linux/phy.h> | |
b31a1d8b AF |
102 | #include <linux/phy_fixed.h> |
103 | #include <linux/of.h> | |
4b6ba8aa | 104 | #include <linux/of_net.h> |
fd31a952 CM |
105 | #include <linux/of_address.h> |
106 | #include <linux/of_irq.h> | |
1da177e4 LT |
107 | |
108 | #include "gianfar.h" | |
1da177e4 LT |
109 | |
110 | #define TX_TIMEOUT (1*HZ) | |
1da177e4 | 111 | |
7f7f5316 | 112 | const char gfar_driver_version[] = "1.3"; |
1da177e4 | 113 | |
1da177e4 LT |
114 | static int gfar_enet_open(struct net_device *dev); |
115 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
ab939905 | 116 | static void gfar_reset_task(struct work_struct *work); |
1da177e4 LT |
117 | static void gfar_timeout(struct net_device *dev); |
118 | static int gfar_close(struct net_device *dev); | |
91c53f76 KH |
119 | static struct sk_buff *gfar_new_skb(struct net_device *dev, |
120 | dma_addr_t *bufaddr); | |
1da177e4 LT |
121 | static int gfar_set_mac_address(struct net_device *dev); |
122 | static int gfar_change_mtu(struct net_device *dev, int new_mtu); | |
7d12e780 DH |
123 | static irqreturn_t gfar_error(int irq, void *dev_id); |
124 | static irqreturn_t gfar_transmit(int irq, void *dev_id); | |
125 | static irqreturn_t gfar_interrupt(int irq, void *dev_id); | |
1da177e4 | 126 | static void adjust_link(struct net_device *dev); |
6ce29b0e | 127 | static noinline void gfar_update_link_state(struct gfar_private *priv); |
1da177e4 | 128 | static int init_phy(struct net_device *dev); |
74888760 | 129 | static int gfar_probe(struct platform_device *ofdev); |
2dc11581 | 130 | static int gfar_remove(struct platform_device *ofdev); |
bb40dcbb | 131 | static void free_skb_resources(struct gfar_private *priv); |
1da177e4 LT |
132 | static void gfar_set_multi(struct net_device *dev); |
133 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | |
d3c12873 | 134 | static void gfar_configure_serdes(struct net_device *dev); |
aeb12c5e CM |
135 | static int gfar_poll_rx(struct napi_struct *napi, int budget); |
136 | static int gfar_poll_tx(struct napi_struct *napi, int budget); | |
137 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); | |
138 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); | |
f2d71c2d VW |
139 | #ifdef CONFIG_NET_POLL_CONTROLLER |
140 | static void gfar_netpoll(struct net_device *dev); | |
141 | #endif | |
a12f801d | 142 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); |
c233cf40 | 143 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); |
61db26c6 CM |
144 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
145 | int amount_pull, struct napi_struct *napi); | |
c10650b6 | 146 | static void gfar_halt_nodisable(struct gfar_private *priv); |
7f7f5316 | 147 | static void gfar_clear_exact_match(struct net_device *dev); |
b6bc7650 JP |
148 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
149 | const u8 *addr); | |
26ccfc37 | 150 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
1da177e4 | 151 | |
1da177e4 LT |
152 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); |
153 | MODULE_DESCRIPTION("Gianfar Ethernet Driver"); | |
154 | MODULE_LICENSE("GPL"); | |
155 | ||
a12f801d | 156 | static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, |
8a102fe0 AV |
157 | dma_addr_t buf) |
158 | { | |
8a102fe0 AV |
159 | u32 lstatus; |
160 | ||
161 | bdp->bufPtr = buf; | |
162 | ||
163 | lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); | |
a12f801d | 164 | if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) |
8a102fe0 AV |
165 | lstatus |= BD_LFLAG(RXBD_WRAP); |
166 | ||
d55398ba | 167 | gfar_wmb(); |
8a102fe0 AV |
168 | |
169 | bdp->lstatus = lstatus; | |
170 | } | |
171 | ||
8728327e | 172 | static int gfar_init_bds(struct net_device *ndev) |
826aa4a0 | 173 | { |
8728327e | 174 | struct gfar_private *priv = netdev_priv(ndev); |
45b679c9 | 175 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
a12f801d SG |
176 | struct gfar_priv_tx_q *tx_queue = NULL; |
177 | struct gfar_priv_rx_q *rx_queue = NULL; | |
826aa4a0 AV |
178 | struct txbd8 *txbdp; |
179 | struct rxbd8 *rxbdp; | |
03366a33 | 180 | u32 __iomem *rfbptr; |
fba4ed03 | 181 | int i, j; |
0a4b5a24 | 182 | dma_addr_t bufaddr; |
a12f801d | 183 | |
fba4ed03 SG |
184 | for (i = 0; i < priv->num_tx_queues; i++) { |
185 | tx_queue = priv->tx_queue[i]; | |
186 | /* Initialize some variables in our dev structure */ | |
187 | tx_queue->num_txbdfree = tx_queue->tx_ring_size; | |
188 | tx_queue->dirty_tx = tx_queue->tx_bd_base; | |
189 | tx_queue->cur_tx = tx_queue->tx_bd_base; | |
190 | tx_queue->skb_curtx = 0; | |
191 | tx_queue->skb_dirtytx = 0; | |
192 | ||
193 | /* Initialize Transmit Descriptor Ring */ | |
194 | txbdp = tx_queue->tx_bd_base; | |
195 | for (j = 0; j < tx_queue->tx_ring_size; j++) { | |
196 | txbdp->lstatus = 0; | |
197 | txbdp->bufPtr = 0; | |
198 | txbdp++; | |
199 | } | |
8728327e | 200 | |
fba4ed03 SG |
201 | /* Set the last descriptor in the ring to indicate wrap */ |
202 | txbdp--; | |
203 | txbdp->status |= TXBD_WRAP; | |
8728327e AV |
204 | } |
205 | ||
45b679c9 | 206 | rfbptr = ®s->rfbptr0; |
fba4ed03 SG |
207 | for (i = 0; i < priv->num_rx_queues; i++) { |
208 | rx_queue = priv->rx_queue[i]; | |
209 | rx_queue->cur_rx = rx_queue->rx_bd_base; | |
210 | rx_queue->skb_currx = 0; | |
211 | rxbdp = rx_queue->rx_bd_base; | |
8728327e | 212 | |
fba4ed03 SG |
213 | for (j = 0; j < rx_queue->rx_ring_size; j++) { |
214 | struct sk_buff *skb = rx_queue->rx_skbuff[j]; | |
8728327e | 215 | |
fba4ed03 | 216 | if (skb) { |
0a4b5a24 | 217 | bufaddr = rxbdp->bufPtr; |
fba4ed03 | 218 | } else { |
0a4b5a24 | 219 | skb = gfar_new_skb(ndev, &bufaddr); |
fba4ed03 | 220 | if (!skb) { |
59deab26 | 221 | netdev_err(ndev, "Can't allocate RX buffers\n"); |
1eb8f7a7 | 222 | return -ENOMEM; |
fba4ed03 SG |
223 | } |
224 | rx_queue->rx_skbuff[j] = skb; | |
8728327e | 225 | } |
8728327e | 226 | |
0a4b5a24 | 227 | gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); |
fba4ed03 | 228 | rxbdp++; |
8728327e AV |
229 | } |
230 | ||
45b679c9 MP |
231 | rx_queue->rfbptr = rfbptr; |
232 | rfbptr += 2; | |
8728327e AV |
233 | } |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | static int gfar_alloc_skb_resources(struct net_device *ndev) | |
239 | { | |
826aa4a0 | 240 | void *vaddr; |
fba4ed03 SG |
241 | dma_addr_t addr; |
242 | int i, j, k; | |
826aa4a0 | 243 | struct gfar_private *priv = netdev_priv(ndev); |
369ec162 | 244 | struct device *dev = priv->dev; |
a12f801d SG |
245 | struct gfar_priv_tx_q *tx_queue = NULL; |
246 | struct gfar_priv_rx_q *rx_queue = NULL; | |
247 | ||
fba4ed03 SG |
248 | priv->total_tx_ring_size = 0; |
249 | for (i = 0; i < priv->num_tx_queues; i++) | |
250 | priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; | |
251 | ||
252 | priv->total_rx_ring_size = 0; | |
253 | for (i = 0; i < priv->num_rx_queues; i++) | |
254 | priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; | |
826aa4a0 AV |
255 | |
256 | /* Allocate memory for the buffer descriptors */ | |
8728327e | 257 | vaddr = dma_alloc_coherent(dev, |
d0320f75 JP |
258 | (priv->total_tx_ring_size * |
259 | sizeof(struct txbd8)) + | |
260 | (priv->total_rx_ring_size * | |
261 | sizeof(struct rxbd8)), | |
262 | &addr, GFP_KERNEL); | |
263 | if (!vaddr) | |
826aa4a0 | 264 | return -ENOMEM; |
826aa4a0 | 265 | |
fba4ed03 SG |
266 | for (i = 0; i < priv->num_tx_queues; i++) { |
267 | tx_queue = priv->tx_queue[i]; | |
43d620c8 | 268 | tx_queue->tx_bd_base = vaddr; |
fba4ed03 SG |
269 | tx_queue->tx_bd_dma_base = addr; |
270 | tx_queue->dev = ndev; | |
271 | /* enet DMA only understands physical addresses */ | |
bc4598bc JC |
272 | addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; |
273 | vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; | |
fba4ed03 | 274 | } |
826aa4a0 | 275 | |
826aa4a0 | 276 | /* Start the rx descriptor ring where the tx ring leaves off */ |
fba4ed03 SG |
277 | for (i = 0; i < priv->num_rx_queues; i++) { |
278 | rx_queue = priv->rx_queue[i]; | |
43d620c8 | 279 | rx_queue->rx_bd_base = vaddr; |
fba4ed03 SG |
280 | rx_queue->rx_bd_dma_base = addr; |
281 | rx_queue->dev = ndev; | |
bc4598bc JC |
282 | addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; |
283 | vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; | |
fba4ed03 | 284 | } |
826aa4a0 AV |
285 | |
286 | /* Setup the skbuff rings */ | |
fba4ed03 SG |
287 | for (i = 0; i < priv->num_tx_queues; i++) { |
288 | tx_queue = priv->tx_queue[i]; | |
14f8dc49 JP |
289 | tx_queue->tx_skbuff = |
290 | kmalloc_array(tx_queue->tx_ring_size, | |
291 | sizeof(*tx_queue->tx_skbuff), | |
292 | GFP_KERNEL); | |
293 | if (!tx_queue->tx_skbuff) | |
fba4ed03 | 294 | goto cleanup; |
826aa4a0 | 295 | |
fba4ed03 SG |
296 | for (k = 0; k < tx_queue->tx_ring_size; k++) |
297 | tx_queue->tx_skbuff[k] = NULL; | |
298 | } | |
826aa4a0 | 299 | |
fba4ed03 SG |
300 | for (i = 0; i < priv->num_rx_queues; i++) { |
301 | rx_queue = priv->rx_queue[i]; | |
14f8dc49 JP |
302 | rx_queue->rx_skbuff = |
303 | kmalloc_array(rx_queue->rx_ring_size, | |
304 | sizeof(*rx_queue->rx_skbuff), | |
305 | GFP_KERNEL); | |
306 | if (!rx_queue->rx_skbuff) | |
fba4ed03 | 307 | goto cleanup; |
fba4ed03 SG |
308 | |
309 | for (j = 0; j < rx_queue->rx_ring_size; j++) | |
310 | rx_queue->rx_skbuff[j] = NULL; | |
311 | } | |
826aa4a0 | 312 | |
8728327e AV |
313 | if (gfar_init_bds(ndev)) |
314 | goto cleanup; | |
826aa4a0 AV |
315 | |
316 | return 0; | |
317 | ||
318 | cleanup: | |
319 | free_skb_resources(priv); | |
320 | return -ENOMEM; | |
321 | } | |
322 | ||
fba4ed03 SG |
323 | static void gfar_init_tx_rx_base(struct gfar_private *priv) |
324 | { | |
46ceb60c | 325 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
18294ad1 | 326 | u32 __iomem *baddr; |
fba4ed03 SG |
327 | int i; |
328 | ||
329 | baddr = ®s->tbase0; | |
bc4598bc | 330 | for (i = 0; i < priv->num_tx_queues; i++) { |
fba4ed03 | 331 | gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); |
bc4598bc | 332 | baddr += 2; |
fba4ed03 SG |
333 | } |
334 | ||
335 | baddr = ®s->rbase0; | |
bc4598bc | 336 | for (i = 0; i < priv->num_rx_queues; i++) { |
fba4ed03 | 337 | gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); |
bc4598bc | 338 | baddr += 2; |
fba4ed03 SG |
339 | } |
340 | } | |
341 | ||
45b679c9 MP |
342 | static void gfar_init_rqprm(struct gfar_private *priv) |
343 | { | |
344 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
345 | u32 __iomem *baddr; | |
346 | int i; | |
347 | ||
348 | baddr = ®s->rqprm0; | |
349 | for (i = 0; i < priv->num_rx_queues; i++) { | |
350 | gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | | |
351 | (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); | |
352 | baddr++; | |
353 | } | |
354 | } | |
355 | ||
88302648 | 356 | static void gfar_rx_buff_size_config(struct gfar_private *priv) |
826aa4a0 | 357 | { |
f5b720b8 | 358 | int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; |
fba4ed03 | 359 | |
ba779711 CM |
360 | /* set this when rx hw offload (TOE) functions are being used */ |
361 | priv->uses_rxfcb = 0; | |
362 | ||
88302648 CM |
363 | if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) |
364 | priv->uses_rxfcb = 1; | |
365 | ||
366 | if (priv->hwts_rx_en) | |
367 | priv->uses_rxfcb = 1; | |
368 | ||
369 | if (priv->uses_rxfcb) | |
370 | frame_size += GMAC_FCB_LEN; | |
371 | ||
372 | frame_size += priv->padding; | |
373 | ||
374 | frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + | |
375 | INCREMENTAL_BUFFER_SIZE; | |
376 | ||
377 | priv->rx_buffer_size = frame_size; | |
378 | } | |
379 | ||
380 | static void gfar_mac_rx_config(struct gfar_private *priv) | |
381 | { | |
382 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
383 | u32 rctrl = 0; | |
384 | ||
1ccb8389 | 385 | if (priv->rx_filer_enable) { |
fba4ed03 | 386 | rctrl |= RCTRL_FILREN; |
1ccb8389 | 387 | /* Program the RIR0 reg with the required distribution */ |
71ff9e3d CM |
388 | if (priv->poll_mode == GFAR_SQ_POLLING) |
389 | gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); | |
390 | else /* GFAR_MQ_POLLING */ | |
391 | gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); | |
1ccb8389 | 392 | } |
826aa4a0 | 393 | |
f5ae6279 | 394 | /* Restore PROMISC mode */ |
a328ac92 | 395 | if (priv->ndev->flags & IFF_PROMISC) |
f5ae6279 CM |
396 | rctrl |= RCTRL_PROM; |
397 | ||
88302648 | 398 | if (priv->ndev->features & NETIF_F_RXCSUM) |
826aa4a0 AV |
399 | rctrl |= RCTRL_CHECKSUMMING; |
400 | ||
88302648 CM |
401 | if (priv->extended_hash) |
402 | rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; | |
826aa4a0 AV |
403 | |
404 | if (priv->padding) { | |
405 | rctrl &= ~RCTRL_PAL_MASK; | |
406 | rctrl |= RCTRL_PADDING(priv->padding); | |
407 | } | |
408 | ||
97553f7f | 409 | /* Enable HW time stamping if requested from user space */ |
88302648 | 410 | if (priv->hwts_rx_en) |
97553f7f MR |
411 | rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; |
412 | ||
88302648 | 413 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) |
b852b720 | 414 | rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; |
826aa4a0 | 415 | |
45b679c9 MP |
416 | /* Clear the LFC bit */ |
417 | gfar_write(®s->rctrl, rctrl); | |
418 | /* Init flow control threshold values */ | |
419 | gfar_init_rqprm(priv); | |
420 | gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); | |
421 | rctrl |= RCTRL_LFC; | |
422 | ||
826aa4a0 AV |
423 | /* Init rctrl based on our settings */ |
424 | gfar_write(®s->rctrl, rctrl); | |
a328ac92 | 425 | } |
826aa4a0 | 426 | |
a328ac92 CM |
427 | static void gfar_mac_tx_config(struct gfar_private *priv) |
428 | { | |
429 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
430 | u32 tctrl = 0; | |
431 | ||
432 | if (priv->ndev->features & NETIF_F_IP_CSUM) | |
826aa4a0 AV |
433 | tctrl |= TCTRL_INIT_CSUM; |
434 | ||
b98b8bab CM |
435 | if (priv->prio_sched_en) |
436 | tctrl |= TCTRL_TXSCHED_PRIO; | |
437 | else { | |
438 | tctrl |= TCTRL_TXSCHED_WRRS; | |
439 | gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); | |
440 | gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); | |
441 | } | |
fba4ed03 | 442 | |
88302648 CM |
443 | if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) |
444 | tctrl |= TCTRL_VLINS; | |
445 | ||
826aa4a0 | 446 | gfar_write(®s->tctrl, tctrl); |
826aa4a0 AV |
447 | } |
448 | ||
f19015ba CM |
449 | static void gfar_configure_coalescing(struct gfar_private *priv, |
450 | unsigned long tx_mask, unsigned long rx_mask) | |
451 | { | |
452 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
453 | u32 __iomem *baddr; | |
454 | ||
455 | if (priv->mode == MQ_MG_MODE) { | |
456 | int i = 0; | |
457 | ||
458 | baddr = ®s->txic0; | |
459 | for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { | |
460 | gfar_write(baddr + i, 0); | |
461 | if (likely(priv->tx_queue[i]->txcoalescing)) | |
462 | gfar_write(baddr + i, priv->tx_queue[i]->txic); | |
463 | } | |
464 | ||
465 | baddr = ®s->rxic0; | |
466 | for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { | |
467 | gfar_write(baddr + i, 0); | |
468 | if (likely(priv->rx_queue[i]->rxcoalescing)) | |
469 | gfar_write(baddr + i, priv->rx_queue[i]->rxic); | |
470 | } | |
471 | } else { | |
472 | /* Backward compatible case -- even if we enable | |
473 | * multiple queues, there's only single reg to program | |
474 | */ | |
475 | gfar_write(®s->txic, 0); | |
476 | if (likely(priv->tx_queue[0]->txcoalescing)) | |
477 | gfar_write(®s->txic, priv->tx_queue[0]->txic); | |
478 | ||
479 | gfar_write(®s->rxic, 0); | |
480 | if (unlikely(priv->rx_queue[0]->rxcoalescing)) | |
481 | gfar_write(®s->rxic, priv->rx_queue[0]->rxic); | |
482 | } | |
483 | } | |
484 | ||
485 | void gfar_configure_coalescing_all(struct gfar_private *priv) | |
486 | { | |
487 | gfar_configure_coalescing(priv, 0xFF, 0xFF); | |
488 | } | |
489 | ||
a7f38041 SG |
490 | static struct net_device_stats *gfar_get_stats(struct net_device *dev) |
491 | { | |
492 | struct gfar_private *priv = netdev_priv(dev); | |
a7f38041 SG |
493 | unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; |
494 | unsigned long tx_packets = 0, tx_bytes = 0; | |
3a2e16c8 | 495 | int i; |
a7f38041 SG |
496 | |
497 | for (i = 0; i < priv->num_rx_queues; i++) { | |
498 | rx_packets += priv->rx_queue[i]->stats.rx_packets; | |
bc4598bc | 499 | rx_bytes += priv->rx_queue[i]->stats.rx_bytes; |
a7f38041 SG |
500 | rx_dropped += priv->rx_queue[i]->stats.rx_dropped; |
501 | } | |
502 | ||
503 | dev->stats.rx_packets = rx_packets; | |
bc4598bc | 504 | dev->stats.rx_bytes = rx_bytes; |
a7f38041 SG |
505 | dev->stats.rx_dropped = rx_dropped; |
506 | ||
507 | for (i = 0; i < priv->num_tx_queues; i++) { | |
1ac9ad13 ED |
508 | tx_bytes += priv->tx_queue[i]->stats.tx_bytes; |
509 | tx_packets += priv->tx_queue[i]->stats.tx_packets; | |
a7f38041 SG |
510 | } |
511 | ||
bc4598bc | 512 | dev->stats.tx_bytes = tx_bytes; |
a7f38041 SG |
513 | dev->stats.tx_packets = tx_packets; |
514 | ||
515 | return &dev->stats; | |
516 | } | |
517 | ||
26ccfc37 AF |
518 | static const struct net_device_ops gfar_netdev_ops = { |
519 | .ndo_open = gfar_enet_open, | |
520 | .ndo_start_xmit = gfar_start_xmit, | |
521 | .ndo_stop = gfar_close, | |
522 | .ndo_change_mtu = gfar_change_mtu, | |
8b3afe95 | 523 | .ndo_set_features = gfar_set_features, |
afc4b13d | 524 | .ndo_set_rx_mode = gfar_set_multi, |
26ccfc37 AF |
525 | .ndo_tx_timeout = gfar_timeout, |
526 | .ndo_do_ioctl = gfar_ioctl, | |
a7f38041 | 527 | .ndo_get_stats = gfar_get_stats, |
240c102d BH |
528 | .ndo_set_mac_address = eth_mac_addr, |
529 | .ndo_validate_addr = eth_validate_addr, | |
26ccfc37 AF |
530 | #ifdef CONFIG_NET_POLL_CONTROLLER |
531 | .ndo_poll_controller = gfar_netpoll, | |
532 | #endif | |
533 | }; | |
534 | ||
efeddce7 CM |
535 | static void gfar_ints_disable(struct gfar_private *priv) |
536 | { | |
537 | int i; | |
538 | for (i = 0; i < priv->num_grps; i++) { | |
539 | struct gfar __iomem *regs = priv->gfargrp[i].regs; | |
540 | /* Clear IEVENT */ | |
541 | gfar_write(®s->ievent, IEVENT_INIT_CLEAR); | |
542 | ||
543 | /* Initialize IMASK */ | |
544 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | |
545 | } | |
546 | } | |
547 | ||
548 | static void gfar_ints_enable(struct gfar_private *priv) | |
549 | { | |
550 | int i; | |
551 | for (i = 0; i < priv->num_grps; i++) { | |
552 | struct gfar __iomem *regs = priv->gfargrp[i].regs; | |
553 | /* Unmask the interrupts we look for */ | |
554 | gfar_write(®s->imask, IMASK_DEFAULT); | |
555 | } | |
556 | } | |
557 | ||
91c53f76 | 558 | static void lock_tx_qs(struct gfar_private *priv) |
fba4ed03 | 559 | { |
3a2e16c8 | 560 | int i; |
fba4ed03 SG |
561 | |
562 | for (i = 0; i < priv->num_tx_queues; i++) | |
563 | spin_lock(&priv->tx_queue[i]->txlock); | |
564 | } | |
565 | ||
91c53f76 | 566 | static void unlock_tx_qs(struct gfar_private *priv) |
fba4ed03 | 567 | { |
3a2e16c8 | 568 | int i; |
fba4ed03 SG |
569 | |
570 | for (i = 0; i < priv->num_tx_queues; i++) | |
571 | spin_unlock(&priv->tx_queue[i]->txlock); | |
572 | } | |
573 | ||
20862788 CM |
574 | static int gfar_alloc_tx_queues(struct gfar_private *priv) |
575 | { | |
576 | int i; | |
577 | ||
578 | for (i = 0; i < priv->num_tx_queues; i++) { | |
579 | priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), | |
580 | GFP_KERNEL); | |
581 | if (!priv->tx_queue[i]) | |
582 | return -ENOMEM; | |
583 | ||
584 | priv->tx_queue[i]->tx_skbuff = NULL; | |
585 | priv->tx_queue[i]->qindex = i; | |
586 | priv->tx_queue[i]->dev = priv->ndev; | |
587 | spin_lock_init(&(priv->tx_queue[i]->txlock)); | |
588 | } | |
589 | return 0; | |
590 | } | |
591 | ||
592 | static int gfar_alloc_rx_queues(struct gfar_private *priv) | |
593 | { | |
594 | int i; | |
595 | ||
596 | for (i = 0; i < priv->num_rx_queues; i++) { | |
597 | priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), | |
598 | GFP_KERNEL); | |
599 | if (!priv->rx_queue[i]) | |
600 | return -ENOMEM; | |
601 | ||
602 | priv->rx_queue[i]->rx_skbuff = NULL; | |
603 | priv->rx_queue[i]->qindex = i; | |
604 | priv->rx_queue[i]->dev = priv->ndev; | |
20862788 CM |
605 | } |
606 | return 0; | |
607 | } | |
608 | ||
609 | static void gfar_free_tx_queues(struct gfar_private *priv) | |
fba4ed03 | 610 | { |
3a2e16c8 | 611 | int i; |
fba4ed03 SG |
612 | |
613 | for (i = 0; i < priv->num_tx_queues; i++) | |
614 | kfree(priv->tx_queue[i]); | |
615 | } | |
616 | ||
20862788 | 617 | static void gfar_free_rx_queues(struct gfar_private *priv) |
fba4ed03 | 618 | { |
3a2e16c8 | 619 | int i; |
fba4ed03 SG |
620 | |
621 | for (i = 0; i < priv->num_rx_queues; i++) | |
622 | kfree(priv->rx_queue[i]); | |
623 | } | |
624 | ||
46ceb60c SG |
625 | static void unmap_group_regs(struct gfar_private *priv) |
626 | { | |
3a2e16c8 | 627 | int i; |
46ceb60c SG |
628 | |
629 | for (i = 0; i < MAXGROUPS; i++) | |
630 | if (priv->gfargrp[i].regs) | |
631 | iounmap(priv->gfargrp[i].regs); | |
632 | } | |
633 | ||
ee873fda CM |
634 | static void free_gfar_dev(struct gfar_private *priv) |
635 | { | |
636 | int i, j; | |
637 | ||
638 | for (i = 0; i < priv->num_grps; i++) | |
639 | for (j = 0; j < GFAR_NUM_IRQS; j++) { | |
640 | kfree(priv->gfargrp[i].irqinfo[j]); | |
641 | priv->gfargrp[i].irqinfo[j] = NULL; | |
642 | } | |
643 | ||
644 | free_netdev(priv->ndev); | |
645 | } | |
646 | ||
46ceb60c SG |
647 | static void disable_napi(struct gfar_private *priv) |
648 | { | |
3a2e16c8 | 649 | int i; |
46ceb60c | 650 | |
aeb12c5e CM |
651 | for (i = 0; i < priv->num_grps; i++) { |
652 | napi_disable(&priv->gfargrp[i].napi_rx); | |
653 | napi_disable(&priv->gfargrp[i].napi_tx); | |
654 | } | |
46ceb60c SG |
655 | } |
656 | ||
657 | static void enable_napi(struct gfar_private *priv) | |
658 | { | |
3a2e16c8 | 659 | int i; |
46ceb60c | 660 | |
aeb12c5e CM |
661 | for (i = 0; i < priv->num_grps; i++) { |
662 | napi_enable(&priv->gfargrp[i].napi_rx); | |
663 | napi_enable(&priv->gfargrp[i].napi_tx); | |
664 | } | |
46ceb60c SG |
665 | } |
666 | ||
667 | static int gfar_parse_group(struct device_node *np, | |
bc4598bc | 668 | struct gfar_private *priv, const char *model) |
46ceb60c | 669 | { |
5fedcc14 | 670 | struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; |
ee873fda CM |
671 | int i; |
672 | ||
7c1e7e99 PG |
673 | for (i = 0; i < GFAR_NUM_IRQS; i++) { |
674 | grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), | |
675 | GFP_KERNEL); | |
676 | if (!grp->irqinfo[i]) | |
ee873fda | 677 | return -ENOMEM; |
ee873fda | 678 | } |
46ceb60c | 679 | |
5fedcc14 CM |
680 | grp->regs = of_iomap(np, 0); |
681 | if (!grp->regs) | |
46ceb60c SG |
682 | return -ENOMEM; |
683 | ||
ee873fda | 684 | gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); |
46ceb60c SG |
685 | |
686 | /* If we aren't the FEC we have multiple interrupts */ | |
687 | if (model && strcasecmp(model, "FEC")) { | |
ee873fda CM |
688 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
689 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); | |
690 | if (gfar_irq(grp, TX)->irq == NO_IRQ || | |
691 | gfar_irq(grp, RX)->irq == NO_IRQ || | |
692 | gfar_irq(grp, ER)->irq == NO_IRQ) | |
46ceb60c | 693 | return -EINVAL; |
46ceb60c SG |
694 | } |
695 | ||
5fedcc14 CM |
696 | grp->priv = priv; |
697 | spin_lock_init(&grp->grplock); | |
bc4598bc | 698 | if (priv->mode == MQ_MG_MODE) { |
71ff9e3d CM |
699 | u32 *rxq_mask, *txq_mask; |
700 | rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); | |
701 | txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); | |
702 | ||
703 | if (priv->poll_mode == GFAR_SQ_POLLING) { | |
704 | /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ | |
705 | grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
706 | grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); | |
707 | } else { /* GFAR_MQ_POLLING */ | |
708 | grp->rx_bit_map = rxq_mask ? | |
709 | *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
710 | grp->tx_bit_map = txq_mask ? | |
711 | *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); | |
712 | } | |
46ceb60c | 713 | } else { |
5fedcc14 CM |
714 | grp->rx_bit_map = 0xFF; |
715 | grp->tx_bit_map = 0xFF; | |
46ceb60c | 716 | } |
20862788 CM |
717 | |
718 | /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses | |
719 | * right to left, so we need to revert the 8 bits to get the q index | |
720 | */ | |
721 | grp->rx_bit_map = bitrev8(grp->rx_bit_map); | |
722 | grp->tx_bit_map = bitrev8(grp->tx_bit_map); | |
723 | ||
724 | /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, | |
725 | * also assign queues to groups | |
726 | */ | |
727 | for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { | |
71ff9e3d CM |
728 | if (!grp->rx_queue) |
729 | grp->rx_queue = priv->rx_queue[i]; | |
20862788 CM |
730 | grp->num_rx_queues++; |
731 | grp->rstat |= (RSTAT_CLEAR_RHALT >> i); | |
732 | priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); | |
733 | priv->rx_queue[i]->grp = grp; | |
734 | } | |
735 | ||
736 | for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { | |
71ff9e3d CM |
737 | if (!grp->tx_queue) |
738 | grp->tx_queue = priv->tx_queue[i]; | |
20862788 CM |
739 | grp->num_tx_queues++; |
740 | grp->tstat |= (TSTAT_CLEAR_THALT >> i); | |
741 | priv->tqueue |= (TQUEUE_EN0 >> i); | |
742 | priv->tx_queue[i]->grp = grp; | |
743 | } | |
744 | ||
46ceb60c SG |
745 | priv->num_grps++; |
746 | ||
747 | return 0; | |
748 | } | |
749 | ||
f50724cd TW |
750 | static int gfar_of_group_count(struct device_node *np) |
751 | { | |
752 | struct device_node *child; | |
753 | int num = 0; | |
754 | ||
755 | for_each_available_child_of_node(np, child) | |
756 | if (!of_node_cmp(child->name, "queue-group")) | |
757 | num++; | |
758 | ||
759 | return num; | |
760 | } | |
761 | ||
2dc11581 | 762 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
b31a1d8b | 763 | { |
b31a1d8b AF |
764 | const char *model; |
765 | const char *ctype; | |
766 | const void *mac_addr; | |
fba4ed03 SG |
767 | int err = 0, i; |
768 | struct net_device *dev = NULL; | |
769 | struct gfar_private *priv = NULL; | |
61c7a080 | 770 | struct device_node *np = ofdev->dev.of_node; |
46ceb60c | 771 | struct device_node *child = NULL; |
4d7902f2 AF |
772 | const u32 *stash; |
773 | const u32 *stash_len; | |
774 | const u32 *stash_idx; | |
fba4ed03 SG |
775 | unsigned int num_tx_qs, num_rx_qs; |
776 | u32 *tx_queues, *rx_queues; | |
b338ce27 | 777 | unsigned short mode, poll_mode; |
b31a1d8b | 778 | |
4b222ca6 | 779 | if (!np) |
b31a1d8b AF |
780 | return -ENODEV; |
781 | ||
b338ce27 CM |
782 | if (of_device_is_compatible(np, "fsl,etsec2")) { |
783 | mode = MQ_MG_MODE; | |
784 | poll_mode = GFAR_SQ_POLLING; | |
785 | } else { | |
786 | mode = SQ_SG_MODE; | |
787 | poll_mode = GFAR_SQ_POLLING; | |
788 | } | |
789 | ||
71ff9e3d | 790 | /* parse the num of HW tx and rx queues */ |
fba4ed03 | 791 | tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); |
71ff9e3d CM |
792 | rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); |
793 | ||
b338ce27 | 794 | if (mode == SQ_SG_MODE) { |
71ff9e3d CM |
795 | num_tx_qs = 1; |
796 | num_rx_qs = 1; | |
797 | } else { /* MQ_MG_MODE */ | |
c65d7533 | 798 | /* get the actual number of supported groups */ |
f50724cd | 799 | unsigned int num_grps = gfar_of_group_count(np); |
c65d7533 CM |
800 | |
801 | if (num_grps == 0 || num_grps > MAXGROUPS) { | |
802 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | |
803 | num_grps); | |
804 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
805 | return -EINVAL; | |
806 | } | |
807 | ||
b338ce27 | 808 | if (poll_mode == GFAR_SQ_POLLING) { |
c65d7533 CM |
809 | num_tx_qs = num_grps; /* one txq per int group */ |
810 | num_rx_qs = num_grps; /* one rxq per int group */ | |
71ff9e3d CM |
811 | } else { /* GFAR_MQ_POLLING */ |
812 | num_tx_qs = tx_queues ? *tx_queues : 1; | |
813 | num_rx_qs = rx_queues ? *rx_queues : 1; | |
814 | } | |
815 | } | |
fba4ed03 SG |
816 | |
817 | if (num_tx_qs > MAX_TX_QS) { | |
59deab26 JP |
818 | pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", |
819 | num_tx_qs, MAX_TX_QS); | |
820 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
821 | return -EINVAL; |
822 | } | |
823 | ||
fba4ed03 | 824 | if (num_rx_qs > MAX_RX_QS) { |
59deab26 JP |
825 | pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", |
826 | num_rx_qs, MAX_RX_QS); | |
827 | pr_err("Cannot do alloc_etherdev, aborting\n"); | |
fba4ed03 SG |
828 | return -EINVAL; |
829 | } | |
830 | ||
831 | *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); | |
832 | dev = *pdev; | |
833 | if (NULL == dev) | |
834 | return -ENOMEM; | |
835 | ||
836 | priv = netdev_priv(dev); | |
fba4ed03 SG |
837 | priv->ndev = dev; |
838 | ||
b338ce27 CM |
839 | priv->mode = mode; |
840 | priv->poll_mode = poll_mode; | |
841 | ||
fba4ed03 | 842 | priv->num_tx_queues = num_tx_qs; |
fe069123 | 843 | netif_set_real_num_rx_queues(dev, num_rx_qs); |
fba4ed03 | 844 | priv->num_rx_queues = num_rx_qs; |
20862788 CM |
845 | |
846 | err = gfar_alloc_tx_queues(priv); | |
847 | if (err) | |
848 | goto tx_alloc_failed; | |
849 | ||
850 | err = gfar_alloc_rx_queues(priv); | |
851 | if (err) | |
852 | goto rx_alloc_failed; | |
b31a1d8b | 853 | |
0977f817 | 854 | /* Init Rx queue filer rule set linked list */ |
4aa3a715 SP |
855 | INIT_LIST_HEAD(&priv->rx_list.list); |
856 | priv->rx_list.count = 0; | |
857 | mutex_init(&priv->rx_queue_access); | |
858 | ||
b31a1d8b AF |
859 | model = of_get_property(np, "model", NULL); |
860 | ||
46ceb60c SG |
861 | for (i = 0; i < MAXGROUPS; i++) |
862 | priv->gfargrp[i].regs = NULL; | |
b31a1d8b | 863 | |
46ceb60c | 864 | /* Parse and initialize group specific information */ |
b338ce27 | 865 | if (priv->mode == MQ_MG_MODE) { |
f50724cd TW |
866 | for_each_available_child_of_node(np, child) { |
867 | if (of_node_cmp(child->name, "queue-group")) | |
868 | continue; | |
869 | ||
46ceb60c SG |
870 | err = gfar_parse_group(child, priv, model); |
871 | if (err) | |
872 | goto err_grp_init; | |
b31a1d8b | 873 | } |
b338ce27 | 874 | } else { /* SQ_SG_MODE */ |
46ceb60c | 875 | err = gfar_parse_group(np, priv, model); |
bc4598bc | 876 | if (err) |
46ceb60c | 877 | goto err_grp_init; |
b31a1d8b AF |
878 | } |
879 | ||
4d7902f2 AF |
880 | stash = of_get_property(np, "bd-stash", NULL); |
881 | ||
a12f801d | 882 | if (stash) { |
4d7902f2 AF |
883 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; |
884 | priv->bd_stash_en = 1; | |
885 | } | |
886 | ||
887 | stash_len = of_get_property(np, "rx-stash-len", NULL); | |
888 | ||
889 | if (stash_len) | |
890 | priv->rx_stash_size = *stash_len; | |
891 | ||
892 | stash_idx = of_get_property(np, "rx-stash-idx", NULL); | |
893 | ||
894 | if (stash_idx) | |
895 | priv->rx_stash_index = *stash_idx; | |
896 | ||
897 | if (stash_len || stash_idx) | |
898 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; | |
899 | ||
b31a1d8b | 900 | mac_addr = of_get_mac_address(np); |
bc4598bc | 901 | |
b31a1d8b | 902 | if (mac_addr) |
6a3c910c | 903 | memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
b31a1d8b AF |
904 | |
905 | if (model && !strcasecmp(model, "TSEC")) | |
34018fd4 | 906 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
bc4598bc JC |
907 | FSL_GIANFAR_DEV_HAS_COALESCE | |
908 | FSL_GIANFAR_DEV_HAS_RMON | | |
909 | FSL_GIANFAR_DEV_HAS_MULTI_INTR; | |
910 | ||
b31a1d8b | 911 | if (model && !strcasecmp(model, "eTSEC")) |
34018fd4 | 912 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | |
bc4598bc JC |
913 | FSL_GIANFAR_DEV_HAS_COALESCE | |
914 | FSL_GIANFAR_DEV_HAS_RMON | | |
915 | FSL_GIANFAR_DEV_HAS_MULTI_INTR | | |
bc4598bc JC |
916 | FSL_GIANFAR_DEV_HAS_CSUM | |
917 | FSL_GIANFAR_DEV_HAS_VLAN | | |
918 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | |
919 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | |
920 | FSL_GIANFAR_DEV_HAS_TIMER; | |
b31a1d8b AF |
921 | |
922 | ctype = of_get_property(np, "phy-connection-type", NULL); | |
923 | ||
924 | /* We only care about rgmii-id. The rest are autodetected */ | |
925 | if (ctype && !strcmp(ctype, "rgmii-id")) | |
926 | priv->interface = PHY_INTERFACE_MODE_RGMII_ID; | |
927 | else | |
928 | priv->interface = PHY_INTERFACE_MODE_MII; | |
929 | ||
930 | if (of_get_property(np, "fsl,magic-packet", NULL)) | |
931 | priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; | |
932 | ||
fe192a49 | 933 | priv->phy_node = of_parse_phandle(np, "phy-handle", 0); |
b31a1d8b | 934 | |
be403645 FF |
935 | /* In the case of a fixed PHY, the DT node associated |
936 | * to the PHY is the Ethernet MAC DT node. | |
937 | */ | |
6f2c9bd8 | 938 | if (!priv->phy_node && of_phy_is_fixed_link(np)) { |
be403645 FF |
939 | err = of_phy_register_fixed_link(np); |
940 | if (err) | |
941 | goto err_grp_init; | |
942 | ||
6f2c9bd8 | 943 | priv->phy_node = of_node_get(np); |
be403645 FF |
944 | } |
945 | ||
b31a1d8b | 946 | /* Find the TBI PHY. If it's not there, we don't support SGMII */ |
fe192a49 | 947 | priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); |
b31a1d8b AF |
948 | |
949 | return 0; | |
950 | ||
46ceb60c SG |
951 | err_grp_init: |
952 | unmap_group_regs(priv); | |
20862788 CM |
953 | rx_alloc_failed: |
954 | gfar_free_rx_queues(priv); | |
955 | tx_alloc_failed: | |
956 | gfar_free_tx_queues(priv); | |
ee873fda | 957 | free_gfar_dev(priv); |
b31a1d8b AF |
958 | return err; |
959 | } | |
960 | ||
ca0c88c2 | 961 | static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
cc772ab7 MR |
962 | { |
963 | struct hwtstamp_config config; | |
964 | struct gfar_private *priv = netdev_priv(netdev); | |
965 | ||
966 | if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) | |
967 | return -EFAULT; | |
968 | ||
969 | /* reserved for future extensions */ | |
970 | if (config.flags) | |
971 | return -EINVAL; | |
972 | ||
f0ee7acf MR |
973 | switch (config.tx_type) { |
974 | case HWTSTAMP_TX_OFF: | |
975 | priv->hwts_tx_en = 0; | |
976 | break; | |
977 | case HWTSTAMP_TX_ON: | |
978 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
979 | return -ERANGE; | |
980 | priv->hwts_tx_en = 1; | |
981 | break; | |
982 | default: | |
cc772ab7 | 983 | return -ERANGE; |
f0ee7acf | 984 | } |
cc772ab7 MR |
985 | |
986 | switch (config.rx_filter) { | |
987 | case HWTSTAMP_FILTER_NONE: | |
97553f7f | 988 | if (priv->hwts_rx_en) { |
97553f7f | 989 | priv->hwts_rx_en = 0; |
0851133b | 990 | reset_gfar(netdev); |
97553f7f | 991 | } |
cc772ab7 MR |
992 | break; |
993 | default: | |
994 | if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) | |
995 | return -ERANGE; | |
97553f7f | 996 | if (!priv->hwts_rx_en) { |
97553f7f | 997 | priv->hwts_rx_en = 1; |
0851133b | 998 | reset_gfar(netdev); |
97553f7f | 999 | } |
cc772ab7 MR |
1000 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
1001 | break; | |
1002 | } | |
1003 | ||
1004 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
1005 | -EFAULT : 0; | |
1006 | } | |
1007 | ||
ca0c88c2 BH |
1008 | static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
1009 | { | |
1010 | struct hwtstamp_config config; | |
1011 | struct gfar_private *priv = netdev_priv(netdev); | |
1012 | ||
1013 | config.flags = 0; | |
1014 | config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; | |
1015 | config.rx_filter = (priv->hwts_rx_en ? | |
1016 | HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); | |
1017 | ||
1018 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | |
1019 | -EFAULT : 0; | |
1020 | } | |
1021 | ||
0faac9f7 CW |
1022 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1023 | { | |
1024 | struct gfar_private *priv = netdev_priv(dev); | |
1025 | ||
1026 | if (!netif_running(dev)) | |
1027 | return -EINVAL; | |
1028 | ||
cc772ab7 | 1029 | if (cmd == SIOCSHWTSTAMP) |
ca0c88c2 BH |
1030 | return gfar_hwtstamp_set(dev, rq); |
1031 | if (cmd == SIOCGHWTSTAMP) | |
1032 | return gfar_hwtstamp_get(dev, rq); | |
cc772ab7 | 1033 | |
0faac9f7 CW |
1034 | if (!priv->phydev) |
1035 | return -ENODEV; | |
1036 | ||
28b04113 | 1037 | return phy_mii_ioctl(priv->phydev, rq, cmd); |
0faac9f7 CW |
1038 | } |
1039 | ||
18294ad1 AV |
1040 | static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, |
1041 | u32 class) | |
7a8b3372 SG |
1042 | { |
1043 | u32 rqfpr = FPR_FILER_MASK; | |
1044 | u32 rqfcr = 0x0; | |
1045 | ||
1046 | rqfar--; | |
1047 | rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; | |
6c43e046 WJB |
1048 | priv->ftp_rqfpr[rqfar] = rqfpr; |
1049 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
1050 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1051 | ||
1052 | rqfar--; | |
1053 | rqfcr = RQFCR_CMP_NOMATCH; | |
6c43e046 WJB |
1054 | priv->ftp_rqfpr[rqfar] = rqfpr; |
1055 | priv->ftp_rqfcr[rqfar] = rqfcr; | |
7a8b3372 SG |
1056 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1057 | ||
1058 | rqfar--; | |
1059 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; | |
1060 | rqfpr = class; | |
6c43e046 WJB |
1061 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1062 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1063 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1064 | ||
1065 | rqfar--; | |
1066 | rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; | |
1067 | rqfpr = class; | |
6c43e046 WJB |
1068 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1069 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1070 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1071 | ||
1072 | return rqfar; | |
1073 | } | |
1074 | ||
1075 | static void gfar_init_filer_table(struct gfar_private *priv) | |
1076 | { | |
1077 | int i = 0x0; | |
1078 | u32 rqfar = MAX_FILER_IDX; | |
1079 | u32 rqfcr = 0x0; | |
1080 | u32 rqfpr = FPR_FILER_MASK; | |
1081 | ||
1082 | /* Default rule */ | |
1083 | rqfcr = RQFCR_CMP_MATCH; | |
6c43e046 WJB |
1084 | priv->ftp_rqfcr[rqfar] = rqfcr; |
1085 | priv->ftp_rqfpr[rqfar] = rqfpr; | |
7a8b3372 SG |
1086 | gfar_write_filer(priv, rqfar, rqfcr, rqfpr); |
1087 | ||
1088 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); | |
1089 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); | |
1090 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); | |
1091 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); | |
1092 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); | |
1093 | rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); | |
1094 | ||
85dd08eb | 1095 | /* cur_filer_idx indicated the first non-masked rule */ |
7a8b3372 SG |
1096 | priv->cur_filer_idx = rqfar; |
1097 | ||
1098 | /* Rest are masked rules */ | |
1099 | rqfcr = RQFCR_CMP_NOMATCH; | |
1100 | for (i = 0; i < rqfar; i++) { | |
6c43e046 WJB |
1101 | priv->ftp_rqfcr[i] = rqfcr; |
1102 | priv->ftp_rqfpr[i] = rqfpr; | |
7a8b3372 SG |
1103 | gfar_write_filer(priv, i, rqfcr, rqfpr); |
1104 | } | |
1105 | } | |
1106 | ||
d6ef0bcc | 1107 | #ifdef CONFIG_PPC |
2969b1f7 | 1108 | static void __gfar_detect_errata_83xx(struct gfar_private *priv) |
7d350977 | 1109 | { |
7d350977 AV |
1110 | unsigned int pvr = mfspr(SPRN_PVR); |
1111 | unsigned int svr = mfspr(SPRN_SVR); | |
1112 | unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ | |
1113 | unsigned int rev = svr & 0xffff; | |
1114 | ||
1115 | /* MPC8313 Rev 2.0 and higher; All MPC837x */ | |
1116 | if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || | |
bc4598bc | 1117 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
7d350977 AV |
1118 | priv->errata |= GFAR_ERRATA_74; |
1119 | ||
deb90eac AV |
1120 | /* MPC8313 and MPC837x all rev */ |
1121 | if ((pvr == 0x80850010 && mod == 0x80b0) || | |
bc4598bc | 1122 | (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) |
deb90eac AV |
1123 | priv->errata |= GFAR_ERRATA_76; |
1124 | ||
2969b1f7 CM |
1125 | /* MPC8313 Rev < 2.0 */ |
1126 | if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) | |
1127 | priv->errata |= GFAR_ERRATA_12; | |
1128 | } | |
1129 | ||
1130 | static void __gfar_detect_errata_85xx(struct gfar_private *priv) | |
1131 | { | |
1132 | unsigned int svr = mfspr(SPRN_SVR); | |
1133 | ||
1134 | if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) | |
4363c2fd | 1135 | priv->errata |= GFAR_ERRATA_12; |
53fad773 CM |
1136 | if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || |
1137 | ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) | |
1138 | priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ | |
2969b1f7 | 1139 | } |
d6ef0bcc | 1140 | #endif |
2969b1f7 CM |
1141 | |
1142 | static void gfar_detect_errata(struct gfar_private *priv) | |
1143 | { | |
1144 | struct device *dev = &priv->ofdev->dev; | |
1145 | ||
1146 | /* no plans to fix */ | |
1147 | priv->errata |= GFAR_ERRATA_A002; | |
1148 | ||
d6ef0bcc | 1149 | #ifdef CONFIG_PPC |
2969b1f7 CM |
1150 | if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) |
1151 | __gfar_detect_errata_85xx(priv); | |
1152 | else /* non-mpc85xx parts, i.e. e300 core based */ | |
1153 | __gfar_detect_errata_83xx(priv); | |
d6ef0bcc | 1154 | #endif |
4363c2fd | 1155 | |
7d350977 AV |
1156 | if (priv->errata) |
1157 | dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", | |
1158 | priv->errata); | |
1159 | } | |
1160 | ||
0851133b | 1161 | void gfar_mac_reset(struct gfar_private *priv) |
20862788 CM |
1162 | { |
1163 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
a328ac92 | 1164 | u32 tempval; |
20862788 CM |
1165 | |
1166 | /* Reset MAC layer */ | |
1167 | gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); | |
1168 | ||
1169 | /* We need to delay at least 3 TX clocks */ | |
a328ac92 | 1170 | udelay(3); |
20862788 CM |
1171 | |
1172 | /* the soft reset bit is not self-resetting, so we need to | |
1173 | * clear it before resuming normal operation | |
1174 | */ | |
1175 | gfar_write(®s->maccfg1, 0); | |
1176 | ||
a328ac92 CM |
1177 | udelay(3); |
1178 | ||
88302648 CM |
1179 | /* Compute rx_buff_size based on config flags */ |
1180 | gfar_rx_buff_size_config(priv); | |
1181 | ||
1182 | /* Initialize the max receive frame/buffer lengths */ | |
1183 | gfar_write(®s->maxfrm, priv->rx_buffer_size); | |
a328ac92 CM |
1184 | gfar_write(®s->mrblr, priv->rx_buffer_size); |
1185 | ||
1186 | /* Initialize the Minimum Frame Length Register */ | |
1187 | gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); | |
1188 | ||
20862788 CM |
1189 | /* Initialize MACCFG2. */ |
1190 | tempval = MACCFG2_INIT_SETTINGS; | |
88302648 CM |
1191 | |
1192 | /* If the mtu is larger than the max size for standard | |
1193 | * ethernet frames (ie, a jumbo frame), then set maccfg2 | |
1194 | * to allow huge frames, and to check the length | |
1195 | */ | |
1196 | if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || | |
1197 | gfar_has_errata(priv, GFAR_ERRATA_74)) | |
20862788 | 1198 | tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; |
88302648 | 1199 | |
20862788 CM |
1200 | gfar_write(®s->maccfg2, tempval); |
1201 | ||
a328ac92 CM |
1202 | /* Clear mac addr hash registers */ |
1203 | gfar_write(®s->igaddr0, 0); | |
1204 | gfar_write(®s->igaddr1, 0); | |
1205 | gfar_write(®s->igaddr2, 0); | |
1206 | gfar_write(®s->igaddr3, 0); | |
1207 | gfar_write(®s->igaddr4, 0); | |
1208 | gfar_write(®s->igaddr5, 0); | |
1209 | gfar_write(®s->igaddr6, 0); | |
1210 | gfar_write(®s->igaddr7, 0); | |
1211 | ||
1212 | gfar_write(®s->gaddr0, 0); | |
1213 | gfar_write(®s->gaddr1, 0); | |
1214 | gfar_write(®s->gaddr2, 0); | |
1215 | gfar_write(®s->gaddr3, 0); | |
1216 | gfar_write(®s->gaddr4, 0); | |
1217 | gfar_write(®s->gaddr5, 0); | |
1218 | gfar_write(®s->gaddr6, 0); | |
1219 | gfar_write(®s->gaddr7, 0); | |
1220 | ||
1221 | if (priv->extended_hash) | |
1222 | gfar_clear_exact_match(priv->ndev); | |
1223 | ||
1224 | gfar_mac_rx_config(priv); | |
1225 | ||
1226 | gfar_mac_tx_config(priv); | |
1227 | ||
1228 | gfar_set_mac_address(priv->ndev); | |
1229 | ||
1230 | gfar_set_multi(priv->ndev); | |
1231 | ||
1232 | /* clear ievent and imask before configuring coalescing */ | |
1233 | gfar_ints_disable(priv); | |
1234 | ||
1235 | /* Configure the coalescing support */ | |
1236 | gfar_configure_coalescing_all(priv); | |
1237 | } | |
1238 | ||
1239 | static void gfar_hw_init(struct gfar_private *priv) | |
1240 | { | |
1241 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1242 | u32 attrs; | |
1243 | ||
1244 | /* Stop the DMA engine now, in case it was running before | |
1245 | * (The firmware could have used it, and left it running). | |
1246 | */ | |
1247 | gfar_halt(priv); | |
1248 | ||
1249 | gfar_mac_reset(priv); | |
1250 | ||
1251 | /* Zero out the rmon mib registers if it has them */ | |
1252 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { | |
1253 | memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); | |
1254 | ||
1255 | /* Mask off the CAM interrupts */ | |
1256 | gfar_write(®s->rmon.cam1, 0xffffffff); | |
1257 | gfar_write(®s->rmon.cam2, 0xffffffff); | |
1258 | } | |
1259 | ||
20862788 CM |
1260 | /* Initialize ECNTRL */ |
1261 | gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); | |
1262 | ||
34018fd4 CM |
1263 | /* Set the extraction length and index */ |
1264 | attrs = ATTRELI_EL(priv->rx_stash_size) | | |
1265 | ATTRELI_EI(priv->rx_stash_index); | |
1266 | ||
1267 | gfar_write(®s->attreli, attrs); | |
1268 | ||
1269 | /* Start with defaults, and add stashing | |
1270 | * depending on driver parameters | |
1271 | */ | |
1272 | attrs = ATTR_INIT_SETTINGS; | |
1273 | ||
1274 | if (priv->bd_stash_en) | |
1275 | attrs |= ATTR_BDSTASH; | |
1276 | ||
1277 | if (priv->rx_stash_size != 0) | |
1278 | attrs |= ATTR_BUFSTASH; | |
1279 | ||
1280 | gfar_write(®s->attr, attrs); | |
1281 | ||
1282 | /* FIFO configs */ | |
1283 | gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); | |
1284 | gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); | |
1285 | gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); | |
1286 | ||
20862788 CM |
1287 | /* Program the interrupt steering regs, only for MG devices */ |
1288 | if (priv->num_grps > 1) | |
1289 | gfar_write_isrg(priv); | |
20862788 CM |
1290 | } |
1291 | ||
898157ed | 1292 | static void gfar_init_addr_hash_table(struct gfar_private *priv) |
20862788 CM |
1293 | { |
1294 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
1295 | ||
1296 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { | |
1297 | priv->extended_hash = 1; | |
1298 | priv->hash_width = 9; | |
1299 | ||
1300 | priv->hash_regs[0] = ®s->igaddr0; | |
1301 | priv->hash_regs[1] = ®s->igaddr1; | |
1302 | priv->hash_regs[2] = ®s->igaddr2; | |
1303 | priv->hash_regs[3] = ®s->igaddr3; | |
1304 | priv->hash_regs[4] = ®s->igaddr4; | |
1305 | priv->hash_regs[5] = ®s->igaddr5; | |
1306 | priv->hash_regs[6] = ®s->igaddr6; | |
1307 | priv->hash_regs[7] = ®s->igaddr7; | |
1308 | priv->hash_regs[8] = ®s->gaddr0; | |
1309 | priv->hash_regs[9] = ®s->gaddr1; | |
1310 | priv->hash_regs[10] = ®s->gaddr2; | |
1311 | priv->hash_regs[11] = ®s->gaddr3; | |
1312 | priv->hash_regs[12] = ®s->gaddr4; | |
1313 | priv->hash_regs[13] = ®s->gaddr5; | |
1314 | priv->hash_regs[14] = ®s->gaddr6; | |
1315 | priv->hash_regs[15] = ®s->gaddr7; | |
1316 | ||
1317 | } else { | |
1318 | priv->extended_hash = 0; | |
1319 | priv->hash_width = 8; | |
1320 | ||
1321 | priv->hash_regs[0] = ®s->gaddr0; | |
1322 | priv->hash_regs[1] = ®s->gaddr1; | |
1323 | priv->hash_regs[2] = ®s->gaddr2; | |
1324 | priv->hash_regs[3] = ®s->gaddr3; | |
1325 | priv->hash_regs[4] = ®s->gaddr4; | |
1326 | priv->hash_regs[5] = ®s->gaddr5; | |
1327 | priv->hash_regs[6] = ®s->gaddr6; | |
1328 | priv->hash_regs[7] = ®s->gaddr7; | |
1329 | } | |
1330 | } | |
1331 | ||
bb40dcbb | 1332 | /* Set up the ethernet device structure, private data, |
0977f817 JC |
1333 | * and anything else we need before we start |
1334 | */ | |
74888760 | 1335 | static int gfar_probe(struct platform_device *ofdev) |
1da177e4 | 1336 | { |
1da177e4 LT |
1337 | struct net_device *dev = NULL; |
1338 | struct gfar_private *priv = NULL; | |
20862788 | 1339 | int err = 0, i; |
1da177e4 | 1340 | |
fba4ed03 | 1341 | err = gfar_of_init(ofdev, &dev); |
1da177e4 | 1342 | |
fba4ed03 SG |
1343 | if (err) |
1344 | return err; | |
1da177e4 LT |
1345 | |
1346 | priv = netdev_priv(dev); | |
4826857f KG |
1347 | priv->ndev = dev; |
1348 | priv->ofdev = ofdev; | |
369ec162 | 1349 | priv->dev = &ofdev->dev; |
4826857f | 1350 | SET_NETDEV_DEV(dev, &ofdev->dev); |
1da177e4 | 1351 | |
d87eb127 | 1352 | spin_lock_init(&priv->bflock); |
ab939905 | 1353 | INIT_WORK(&priv->reset_task, gfar_reset_task); |
1da177e4 | 1354 | |
8513fbd8 | 1355 | platform_set_drvdata(ofdev, priv); |
1da177e4 | 1356 | |
7d350977 AV |
1357 | gfar_detect_errata(priv); |
1358 | ||
1da177e4 | 1359 | /* Set the dev->base_addr to the gfar reg region */ |
20862788 | 1360 | dev->base_addr = (unsigned long) priv->gfargrp[0].regs; |
1da177e4 | 1361 | |
1da177e4 | 1362 | /* Fill in the dev structure */ |
1da177e4 | 1363 | dev->watchdog_timeo = TX_TIMEOUT; |
1da177e4 | 1364 | dev->mtu = 1500; |
26ccfc37 | 1365 | dev->netdev_ops = &gfar_netdev_ops; |
0bbaf069 KG |
1366 | dev->ethtool_ops = &gfar_ethtool_ops; |
1367 | ||
fba4ed03 | 1368 | /* Register for napi ...We are registering NAPI for each grp */ |
71ff9e3d CM |
1369 | for (i = 0; i < priv->num_grps; i++) { |
1370 | if (priv->poll_mode == GFAR_SQ_POLLING) { | |
1371 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, | |
1372 | gfar_poll_rx_sq, GFAR_DEV_WEIGHT); | |
1373 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, | |
1374 | gfar_poll_tx_sq, 2); | |
1375 | } else { | |
aeb12c5e CM |
1376 | netif_napi_add(dev, &priv->gfargrp[i].napi_rx, |
1377 | gfar_poll_rx, GFAR_DEV_WEIGHT); | |
1378 | netif_napi_add(dev, &priv->gfargrp[i].napi_tx, | |
1379 | gfar_poll_tx, 2); | |
1380 | } | |
1381 | } | |
a12f801d | 1382 | |
b31a1d8b | 1383 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
8b3afe95 | 1384 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
bc4598bc | 1385 | NETIF_F_RXCSUM; |
8b3afe95 | 1386 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | |
bc4598bc | 1387 | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; |
8b3afe95 | 1388 | } |
0bbaf069 | 1389 | |
87c288c6 | 1390 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { |
f646968f PM |
1391 | dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | |
1392 | NETIF_F_HW_VLAN_CTAG_RX; | |
1393 | dev->features |= NETIF_F_HW_VLAN_CTAG_RX; | |
87c288c6 | 1394 | } |
0bbaf069 | 1395 | |
20862788 | 1396 | gfar_init_addr_hash_table(priv); |
0bbaf069 | 1397 | |
532c37bc CM |
1398 | /* Insert receive time stamps into padding alignment bytes */ |
1399 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) | |
1400 | priv->padding = 8; | |
0bbaf069 | 1401 | |
cc772ab7 | 1402 | if (dev->features & NETIF_F_IP_CSUM || |
bc4598bc | 1403 | priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) |
bee9e58c | 1404 | dev->needed_headroom = GMAC_FCB_LEN; |
1da177e4 LT |
1405 | |
1406 | priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; | |
1da177e4 | 1407 | |
a12f801d | 1408 | /* Initializing some of the rx/tx queue level parameters */ |
fba4ed03 SG |
1409 | for (i = 0; i < priv->num_tx_queues; i++) { |
1410 | priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; | |
1411 | priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; | |
1412 | priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; | |
1413 | priv->tx_queue[i]->txic = DEFAULT_TXIC; | |
1414 | } | |
a12f801d | 1415 | |
fba4ed03 SG |
1416 | for (i = 0; i < priv->num_rx_queues; i++) { |
1417 | priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; | |
1418 | priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; | |
1419 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | |
1420 | } | |
1da177e4 | 1421 | |
0977f817 | 1422 | /* always enable rx filer */ |
4aa3a715 | 1423 | priv->rx_filer_enable = 1; |
0bbaf069 KG |
1424 | /* Enable most messages by default */ |
1425 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | |
b98b8bab CM |
1426 | /* use pritority h/w tx queue scheduling for single queue devices */ |
1427 | if (priv->num_tx_queues == 1) | |
1428 | priv->prio_sched_en = 1; | |
0bbaf069 | 1429 | |
0851133b CM |
1430 | set_bit(GFAR_DOWN, &priv->state); |
1431 | ||
a328ac92 | 1432 | gfar_hw_init(priv); |
d3eab82b | 1433 | |
d4c642ea FE |
1434 | /* Carrier starts down, phylib will bring it up */ |
1435 | netif_carrier_off(dev); | |
1436 | ||
1da177e4 LT |
1437 | err = register_netdev(dev); |
1438 | ||
1439 | if (err) { | |
59deab26 | 1440 | pr_err("%s: Cannot register net device, aborting\n", dev->name); |
1da177e4 LT |
1441 | goto register_fail; |
1442 | } | |
1443 | ||
2884e5cc | 1444 | device_init_wakeup(&dev->dev, |
bc4598bc JC |
1445 | priv->device_flags & |
1446 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
2884e5cc | 1447 | |
c50a5d9a | 1448 | /* fill out IRQ number and name fields */ |
46ceb60c | 1449 | for (i = 0; i < priv->num_grps; i++) { |
ee873fda | 1450 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
46ceb60c | 1451 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
ee873fda | 1452 | sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", |
0015e551 | 1453 | dev->name, "_g", '0' + i, "_tx"); |
ee873fda | 1454 | sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", |
0015e551 | 1455 | dev->name, "_g", '0' + i, "_rx"); |
ee873fda | 1456 | sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", |
0015e551 | 1457 | dev->name, "_g", '0' + i, "_er"); |
46ceb60c | 1458 | } else |
ee873fda | 1459 | strcpy(gfar_irq(grp, TX)->name, dev->name); |
46ceb60c | 1460 | } |
c50a5d9a | 1461 | |
7a8b3372 SG |
1462 | /* Initialize the filer table */ |
1463 | gfar_init_filer_table(priv); | |
1464 | ||
1da177e4 | 1465 | /* Print out the device info */ |
59deab26 | 1466 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
1da177e4 | 1467 | |
0977f817 JC |
1468 | /* Even more device info helps when determining which kernel |
1469 | * provided which set of benchmarks. | |
1470 | */ | |
59deab26 | 1471 | netdev_info(dev, "Running with NAPI enabled\n"); |
fba4ed03 | 1472 | for (i = 0; i < priv->num_rx_queues; i++) |
59deab26 JP |
1473 | netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", |
1474 | i, priv->rx_queue[i]->rx_ring_size); | |
bc4598bc | 1475 | for (i = 0; i < priv->num_tx_queues; i++) |
59deab26 JP |
1476 | netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", |
1477 | i, priv->tx_queue[i]->tx_ring_size); | |
1da177e4 LT |
1478 | |
1479 | return 0; | |
1480 | ||
1481 | register_fail: | |
46ceb60c | 1482 | unmap_group_regs(priv); |
20862788 CM |
1483 | gfar_free_rx_queues(priv); |
1484 | gfar_free_tx_queues(priv); | |
888c88b8 UKK |
1485 | of_node_put(priv->phy_node); |
1486 | of_node_put(priv->tbi_node); | |
ee873fda | 1487 | free_gfar_dev(priv); |
bb40dcbb | 1488 | return err; |
1da177e4 LT |
1489 | } |
1490 | ||
2dc11581 | 1491 | static int gfar_remove(struct platform_device *ofdev) |
1da177e4 | 1492 | { |
8513fbd8 | 1493 | struct gfar_private *priv = platform_get_drvdata(ofdev); |
1da177e4 | 1494 | |
888c88b8 UKK |
1495 | of_node_put(priv->phy_node); |
1496 | of_node_put(priv->tbi_node); | |
fe192a49 | 1497 | |
d9d8e041 | 1498 | unregister_netdev(priv->ndev); |
46ceb60c | 1499 | unmap_group_regs(priv); |
20862788 CM |
1500 | gfar_free_rx_queues(priv); |
1501 | gfar_free_tx_queues(priv); | |
ee873fda | 1502 | free_gfar_dev(priv); |
1da177e4 LT |
1503 | |
1504 | return 0; | |
1505 | } | |
1506 | ||
d87eb127 | 1507 | #ifdef CONFIG_PM |
be926fc4 AV |
1508 | |
1509 | static int gfar_suspend(struct device *dev) | |
d87eb127 | 1510 | { |
be926fc4 AV |
1511 | struct gfar_private *priv = dev_get_drvdata(dev); |
1512 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1513 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1514 | unsigned long flags; |
1515 | u32 tempval; | |
1516 | ||
1517 | int magic_packet = priv->wol_en && | |
bc4598bc JC |
1518 | (priv->device_flags & |
1519 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
d87eb127 | 1520 | |
be926fc4 | 1521 | netif_device_detach(ndev); |
d87eb127 | 1522 | |
be926fc4 | 1523 | if (netif_running(ndev)) { |
fba4ed03 SG |
1524 | |
1525 | local_irq_save(flags); | |
1526 | lock_tx_qs(priv); | |
d87eb127 | 1527 | |
c10650b6 | 1528 | gfar_halt_nodisable(priv); |
d87eb127 SW |
1529 | |
1530 | /* Disable Tx, and Rx if wake-on-LAN is disabled. */ | |
f4983704 | 1531 | tempval = gfar_read(®s->maccfg1); |
d87eb127 SW |
1532 | |
1533 | tempval &= ~MACCFG1_TX_EN; | |
1534 | ||
1535 | if (!magic_packet) | |
1536 | tempval &= ~MACCFG1_RX_EN; | |
1537 | ||
f4983704 | 1538 | gfar_write(®s->maccfg1, tempval); |
d87eb127 | 1539 | |
fba4ed03 SG |
1540 | unlock_tx_qs(priv); |
1541 | local_irq_restore(flags); | |
d87eb127 | 1542 | |
46ceb60c | 1543 | disable_napi(priv); |
d87eb127 SW |
1544 | |
1545 | if (magic_packet) { | |
1546 | /* Enable interrupt on Magic Packet */ | |
f4983704 | 1547 | gfar_write(®s->imask, IMASK_MAG); |
d87eb127 SW |
1548 | |
1549 | /* Enable Magic Packet mode */ | |
f4983704 | 1550 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1551 | tempval |= MACCFG2_MPEN; |
f4983704 | 1552 | gfar_write(®s->maccfg2, tempval); |
d87eb127 SW |
1553 | } else { |
1554 | phy_stop(priv->phydev); | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | return 0; | |
1559 | } | |
1560 | ||
be926fc4 | 1561 | static int gfar_resume(struct device *dev) |
d87eb127 | 1562 | { |
be926fc4 AV |
1563 | struct gfar_private *priv = dev_get_drvdata(dev); |
1564 | struct net_device *ndev = priv->ndev; | |
46ceb60c | 1565 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 SW |
1566 | unsigned long flags; |
1567 | u32 tempval; | |
1568 | int magic_packet = priv->wol_en && | |
bc4598bc JC |
1569 | (priv->device_flags & |
1570 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); | |
d87eb127 | 1571 | |
be926fc4 AV |
1572 | if (!netif_running(ndev)) { |
1573 | netif_device_attach(ndev); | |
d87eb127 SW |
1574 | return 0; |
1575 | } | |
1576 | ||
1577 | if (!magic_packet && priv->phydev) | |
1578 | phy_start(priv->phydev); | |
1579 | ||
1580 | /* Disable Magic Packet mode, in case something | |
1581 | * else woke us up. | |
1582 | */ | |
fba4ed03 SG |
1583 | local_irq_save(flags); |
1584 | lock_tx_qs(priv); | |
d87eb127 | 1585 | |
f4983704 | 1586 | tempval = gfar_read(®s->maccfg2); |
d87eb127 | 1587 | tempval &= ~MACCFG2_MPEN; |
f4983704 | 1588 | gfar_write(®s->maccfg2, tempval); |
d87eb127 | 1589 | |
c10650b6 | 1590 | gfar_start(priv); |
d87eb127 | 1591 | |
fba4ed03 SG |
1592 | unlock_tx_qs(priv); |
1593 | local_irq_restore(flags); | |
d87eb127 | 1594 | |
be926fc4 AV |
1595 | netif_device_attach(ndev); |
1596 | ||
46ceb60c | 1597 | enable_napi(priv); |
be926fc4 AV |
1598 | |
1599 | return 0; | |
1600 | } | |
1601 | ||
1602 | static int gfar_restore(struct device *dev) | |
1603 | { | |
1604 | struct gfar_private *priv = dev_get_drvdata(dev); | |
1605 | struct net_device *ndev = priv->ndev; | |
1606 | ||
103cdd1d WD |
1607 | if (!netif_running(ndev)) { |
1608 | netif_device_attach(ndev); | |
1609 | ||
be926fc4 | 1610 | return 0; |
103cdd1d | 1611 | } |
be926fc4 | 1612 | |
1eb8f7a7 CM |
1613 | if (gfar_init_bds(ndev)) { |
1614 | free_skb_resources(priv); | |
1615 | return -ENOMEM; | |
1616 | } | |
1617 | ||
a328ac92 CM |
1618 | gfar_mac_reset(priv); |
1619 | ||
1620 | gfar_init_tx_rx_base(priv); | |
1621 | ||
c10650b6 | 1622 | gfar_start(priv); |
be926fc4 AV |
1623 | |
1624 | priv->oldlink = 0; | |
1625 | priv->oldspeed = 0; | |
1626 | priv->oldduplex = -1; | |
1627 | ||
1628 | if (priv->phydev) | |
1629 | phy_start(priv->phydev); | |
d87eb127 | 1630 | |
be926fc4 | 1631 | netif_device_attach(ndev); |
5ea681d4 | 1632 | enable_napi(priv); |
d87eb127 SW |
1633 | |
1634 | return 0; | |
1635 | } | |
be926fc4 AV |
1636 | |
1637 | static struct dev_pm_ops gfar_pm_ops = { | |
1638 | .suspend = gfar_suspend, | |
1639 | .resume = gfar_resume, | |
1640 | .freeze = gfar_suspend, | |
1641 | .thaw = gfar_resume, | |
1642 | .restore = gfar_restore, | |
1643 | }; | |
1644 | ||
1645 | #define GFAR_PM_OPS (&gfar_pm_ops) | |
1646 | ||
d87eb127 | 1647 | #else |
be926fc4 AV |
1648 | |
1649 | #define GFAR_PM_OPS NULL | |
be926fc4 | 1650 | |
d87eb127 | 1651 | #endif |
1da177e4 | 1652 | |
e8a2b6a4 AF |
1653 | /* Reads the controller's registers to determine what interface |
1654 | * connects it to the PHY. | |
1655 | */ | |
1656 | static phy_interface_t gfar_get_interface(struct net_device *dev) | |
1657 | { | |
1658 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 1659 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
f4983704 SG |
1660 | u32 ecntrl; |
1661 | ||
f4983704 | 1662 | ecntrl = gfar_read(®s->ecntrl); |
e8a2b6a4 AF |
1663 | |
1664 | if (ecntrl & ECNTRL_SGMII_MODE) | |
1665 | return PHY_INTERFACE_MODE_SGMII; | |
1666 | ||
1667 | if (ecntrl & ECNTRL_TBI_MODE) { | |
1668 | if (ecntrl & ECNTRL_REDUCED_MODE) | |
1669 | return PHY_INTERFACE_MODE_RTBI; | |
1670 | else | |
1671 | return PHY_INTERFACE_MODE_TBI; | |
1672 | } | |
1673 | ||
1674 | if (ecntrl & ECNTRL_REDUCED_MODE) { | |
bc4598bc | 1675 | if (ecntrl & ECNTRL_REDUCED_MII_MODE) { |
e8a2b6a4 | 1676 | return PHY_INTERFACE_MODE_RMII; |
bc4598bc | 1677 | } |
7132ab7f | 1678 | else { |
b31a1d8b | 1679 | phy_interface_t interface = priv->interface; |
7132ab7f | 1680 | |
0977f817 | 1681 | /* This isn't autodetected right now, so it must |
7132ab7f AF |
1682 | * be set by the device tree or platform code. |
1683 | */ | |
1684 | if (interface == PHY_INTERFACE_MODE_RGMII_ID) | |
1685 | return PHY_INTERFACE_MODE_RGMII_ID; | |
1686 | ||
e8a2b6a4 | 1687 | return PHY_INTERFACE_MODE_RGMII; |
7132ab7f | 1688 | } |
e8a2b6a4 AF |
1689 | } |
1690 | ||
b31a1d8b | 1691 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) |
e8a2b6a4 AF |
1692 | return PHY_INTERFACE_MODE_GMII; |
1693 | ||
1694 | return PHY_INTERFACE_MODE_MII; | |
1695 | } | |
1696 | ||
1697 | ||
bb40dcbb AF |
1698 | /* Initializes driver's PHY state, and attaches to the PHY. |
1699 | * Returns 0 on success. | |
1da177e4 LT |
1700 | */ |
1701 | static int init_phy(struct net_device *dev) | |
1702 | { | |
1703 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 1704 | uint gigabit_support = |
b31a1d8b | 1705 | priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? |
23402bdd | 1706 | GFAR_SUPPORTED_GBIT : 0; |
e8a2b6a4 | 1707 | phy_interface_t interface; |
1da177e4 LT |
1708 | |
1709 | priv->oldlink = 0; | |
1710 | priv->oldspeed = 0; | |
1711 | priv->oldduplex = -1; | |
1712 | ||
e8a2b6a4 AF |
1713 | interface = gfar_get_interface(dev); |
1714 | ||
1db780f8 AV |
1715 | priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, |
1716 | interface); | |
1db780f8 AV |
1717 | if (!priv->phydev) { |
1718 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1719 | return -ENODEV; | |
fe192a49 | 1720 | } |
1da177e4 | 1721 | |
d3c12873 KJ |
1722 | if (interface == PHY_INTERFACE_MODE_SGMII) |
1723 | gfar_configure_serdes(dev); | |
1724 | ||
bb40dcbb | 1725 | /* Remove any features not supported by the controller */ |
fe192a49 GL |
1726 | priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); |
1727 | priv->phydev->advertising = priv->phydev->supported; | |
1da177e4 | 1728 | |
cf987afc PMB |
1729 | /* Add support for flow control, but don't advertise it by default */ |
1730 | priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); | |
1731 | ||
1da177e4 | 1732 | return 0; |
1da177e4 LT |
1733 | } |
1734 | ||
0977f817 | 1735 | /* Initialize TBI PHY interface for communicating with the |
d0313587 PG |
1736 | * SERDES lynx PHY on the chip. We communicate with this PHY |
1737 | * through the MDIO bus on each controller, treating it as a | |
1738 | * "normal" PHY at the address found in the TBIPA register. We assume | |
1739 | * that the TBIPA register is valid. Either the MDIO bus code will set | |
1740 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1741 | * value doesn't matter, as there are no other PHYs on the bus. | |
1742 | */ | |
d3c12873 KJ |
1743 | static void gfar_configure_serdes(struct net_device *dev) |
1744 | { | |
1745 | struct gfar_private *priv = netdev_priv(dev); | |
fe192a49 GL |
1746 | struct phy_device *tbiphy; |
1747 | ||
1748 | if (!priv->tbi_node) { | |
1749 | dev_warn(&dev->dev, "error: SGMII mode requires that the " | |
1750 | "device tree specify a tbi-handle\n"); | |
1751 | return; | |
1752 | } | |
c132419e | 1753 | |
fe192a49 GL |
1754 | tbiphy = of_phy_find_device(priv->tbi_node); |
1755 | if (!tbiphy) { | |
1756 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
b31a1d8b AF |
1757 | return; |
1758 | } | |
d3c12873 | 1759 | |
0977f817 | 1760 | /* If the link is already up, we must already be ok, and don't need to |
bdb59f94 TP |
1761 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured |
1762 | * everything for us? Resetting it takes the link down and requires | |
1763 | * several seconds for it to come back. | |
1764 | */ | |
fe192a49 | 1765 | if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) |
b31a1d8b | 1766 | return; |
d3c12873 | 1767 | |
d0313587 | 1768 | /* Single clk mode, mii mode off(for serdes communication) */ |
fe192a49 | 1769 | phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); |
d3c12873 | 1770 | |
fe192a49 | 1771 | phy_write(tbiphy, MII_ADVERTISE, |
bc4598bc JC |
1772 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
1773 | ADVERTISE_1000XPSE_ASYM); | |
d3c12873 | 1774 | |
bc4598bc JC |
1775 | phy_write(tbiphy, MII_BMCR, |
1776 | BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | | |
1777 | BMCR_SPEED1000); | |
d3c12873 KJ |
1778 | } |
1779 | ||
511d934f AV |
1780 | static int __gfar_is_rx_idle(struct gfar_private *priv) |
1781 | { | |
1782 | u32 res; | |
1783 | ||
0977f817 | 1784 | /* Normaly TSEC should not hang on GRS commands, so we should |
511d934f AV |
1785 | * actually wait for IEVENT_GRSC flag. |
1786 | */ | |
ad3660c2 | 1787 | if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) |
511d934f AV |
1788 | return 0; |
1789 | ||
0977f817 | 1790 | /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are |
511d934f AV |
1791 | * the same as bits 23-30, the eTSEC Rx is assumed to be idle |
1792 | * and the Rx can be safely reset. | |
1793 | */ | |
1794 | res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); | |
1795 | res &= 0x7f807f80; | |
1796 | if ((res & 0xffff) == (res >> 16)) | |
1797 | return 1; | |
1798 | ||
1799 | return 0; | |
1800 | } | |
0bbaf069 KG |
1801 | |
1802 | /* Halt the receive and transmit queues */ | |
c10650b6 | 1803 | static void gfar_halt_nodisable(struct gfar_private *priv) |
1da177e4 | 1804 | { |
efeddce7 | 1805 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 | 1806 | u32 tempval; |
a4feee89 CM |
1807 | unsigned int timeout; |
1808 | int stopped; | |
1da177e4 | 1809 | |
efeddce7 | 1810 | gfar_ints_disable(priv); |
1da177e4 | 1811 | |
a4feee89 CM |
1812 | if (gfar_is_dma_stopped(priv)) |
1813 | return; | |
1814 | ||
1da177e4 | 1815 | /* Stop the DMA, and wait for it to stop */ |
f4983704 | 1816 | tempval = gfar_read(®s->dmactrl); |
a4feee89 CM |
1817 | tempval |= (DMACTRL_GRS | DMACTRL_GTS); |
1818 | gfar_write(®s->dmactrl, tempval); | |
1819 | ||
1820 | retry: | |
1821 | timeout = 1000; | |
1822 | while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { | |
1823 | cpu_relax(); | |
1824 | timeout--; | |
1da177e4 | 1825 | } |
a4feee89 CM |
1826 | |
1827 | if (!timeout) | |
1828 | stopped = gfar_is_dma_stopped(priv); | |
1829 | ||
1830 | if (!stopped && !gfar_is_rx_dma_stopped(priv) && | |
1831 | !__gfar_is_rx_idle(priv)) | |
1832 | goto retry; | |
d87eb127 | 1833 | } |
d87eb127 SW |
1834 | |
1835 | /* Halt the receive and transmit queues */ | |
c10650b6 | 1836 | void gfar_halt(struct gfar_private *priv) |
d87eb127 | 1837 | { |
46ceb60c | 1838 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
d87eb127 | 1839 | u32 tempval; |
1da177e4 | 1840 | |
c10650b6 CM |
1841 | /* Dissable the Rx/Tx hw queues */ |
1842 | gfar_write(®s->rqueue, 0); | |
1843 | gfar_write(®s->tqueue, 0); | |
2a54adc3 | 1844 | |
c10650b6 CM |
1845 | mdelay(10); |
1846 | ||
1847 | gfar_halt_nodisable(priv); | |
1848 | ||
1849 | /* Disable Rx/Tx DMA */ | |
1da177e4 LT |
1850 | tempval = gfar_read(®s->maccfg1); |
1851 | tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1852 | gfar_write(®s->maccfg1, tempval); | |
0bbaf069 KG |
1853 | } |
1854 | ||
1855 | void stop_gfar(struct net_device *dev) | |
1856 | { | |
1857 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 1858 | |
0851133b | 1859 | netif_tx_stop_all_queues(dev); |
bb40dcbb | 1860 | |
4e857c58 | 1861 | smp_mb__before_atomic(); |
0851133b | 1862 | set_bit(GFAR_DOWN, &priv->state); |
4e857c58 | 1863 | smp_mb__after_atomic(); |
a12f801d | 1864 | |
0851133b | 1865 | disable_napi(priv); |
0bbaf069 | 1866 | |
0851133b | 1867 | /* disable ints and gracefully shut down Rx/Tx DMA */ |
c10650b6 | 1868 | gfar_halt(priv); |
1da177e4 | 1869 | |
0851133b | 1870 | phy_stop(priv->phydev); |
1da177e4 | 1871 | |
1da177e4 | 1872 | free_skb_resources(priv); |
1da177e4 LT |
1873 | } |
1874 | ||
fba4ed03 | 1875 | static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 1876 | { |
1da177e4 | 1877 | struct txbd8 *txbdp; |
fba4ed03 | 1878 | struct gfar_private *priv = netdev_priv(tx_queue->dev); |
4669bc90 | 1879 | int i, j; |
1da177e4 | 1880 | |
a12f801d | 1881 | txbdp = tx_queue->tx_bd_base; |
1da177e4 | 1882 | |
a12f801d SG |
1883 | for (i = 0; i < tx_queue->tx_ring_size; i++) { |
1884 | if (!tx_queue->tx_skbuff[i]) | |
4669bc90 | 1885 | continue; |
1da177e4 | 1886 | |
369ec162 | 1887 | dma_unmap_single(priv->dev, txbdp->bufPtr, |
bc4598bc | 1888 | txbdp->length, DMA_TO_DEVICE); |
4669bc90 | 1889 | txbdp->lstatus = 0; |
fba4ed03 | 1890 | for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; |
bc4598bc | 1891 | j++) { |
4669bc90 | 1892 | txbdp++; |
369ec162 | 1893 | dma_unmap_page(priv->dev, txbdp->bufPtr, |
bc4598bc | 1894 | txbdp->length, DMA_TO_DEVICE); |
1da177e4 | 1895 | } |
ad5da7ab | 1896 | txbdp++; |
a12f801d SG |
1897 | dev_kfree_skb_any(tx_queue->tx_skbuff[i]); |
1898 | tx_queue->tx_skbuff[i] = NULL; | |
1da177e4 | 1899 | } |
a12f801d | 1900 | kfree(tx_queue->tx_skbuff); |
1eb8f7a7 | 1901 | tx_queue->tx_skbuff = NULL; |
fba4ed03 | 1902 | } |
1da177e4 | 1903 | |
fba4ed03 SG |
1904 | static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) |
1905 | { | |
1906 | struct rxbd8 *rxbdp; | |
1907 | struct gfar_private *priv = netdev_priv(rx_queue->dev); | |
1908 | int i; | |
1da177e4 | 1909 | |
fba4ed03 | 1910 | rxbdp = rx_queue->rx_bd_base; |
1da177e4 | 1911 | |
a12f801d SG |
1912 | for (i = 0; i < rx_queue->rx_ring_size; i++) { |
1913 | if (rx_queue->rx_skbuff[i]) { | |
369ec162 CM |
1914 | dma_unmap_single(priv->dev, rxbdp->bufPtr, |
1915 | priv->rx_buffer_size, | |
bc4598bc | 1916 | DMA_FROM_DEVICE); |
a12f801d SG |
1917 | dev_kfree_skb_any(rx_queue->rx_skbuff[i]); |
1918 | rx_queue->rx_skbuff[i] = NULL; | |
1da177e4 | 1919 | } |
e69edd21 AV |
1920 | rxbdp->lstatus = 0; |
1921 | rxbdp->bufPtr = 0; | |
1922 | rxbdp++; | |
1da177e4 | 1923 | } |
a12f801d | 1924 | kfree(rx_queue->rx_skbuff); |
1eb8f7a7 | 1925 | rx_queue->rx_skbuff = NULL; |
fba4ed03 | 1926 | } |
e69edd21 | 1927 | |
fba4ed03 | 1928 | /* If there are any tx skbs or rx skbs still around, free them. |
0977f817 JC |
1929 | * Then free tx_skbuff and rx_skbuff |
1930 | */ | |
fba4ed03 SG |
1931 | static void free_skb_resources(struct gfar_private *priv) |
1932 | { | |
1933 | struct gfar_priv_tx_q *tx_queue = NULL; | |
1934 | struct gfar_priv_rx_q *rx_queue = NULL; | |
1935 | int i; | |
1936 | ||
1937 | /* Go through all the buffer descriptors and free their data buffers */ | |
1938 | for (i = 0; i < priv->num_tx_queues; i++) { | |
d8a0f1b0 | 1939 | struct netdev_queue *txq; |
bc4598bc | 1940 | |
fba4ed03 | 1941 | tx_queue = priv->tx_queue[i]; |
d8a0f1b0 | 1942 | txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); |
bc4598bc | 1943 | if (tx_queue->tx_skbuff) |
fba4ed03 | 1944 | free_skb_tx_queue(tx_queue); |
d8a0f1b0 | 1945 | netdev_tx_reset_queue(txq); |
fba4ed03 SG |
1946 | } |
1947 | ||
1948 | for (i = 0; i < priv->num_rx_queues; i++) { | |
1949 | rx_queue = priv->rx_queue[i]; | |
bc4598bc | 1950 | if (rx_queue->rx_skbuff) |
fba4ed03 SG |
1951 | free_skb_rx_queue(rx_queue); |
1952 | } | |
1953 | ||
369ec162 | 1954 | dma_free_coherent(priv->dev, |
bc4598bc JC |
1955 | sizeof(struct txbd8) * priv->total_tx_ring_size + |
1956 | sizeof(struct rxbd8) * priv->total_rx_ring_size, | |
1957 | priv->tx_queue[0]->tx_bd_base, | |
1958 | priv->tx_queue[0]->tx_bd_dma_base); | |
1da177e4 LT |
1959 | } |
1960 | ||
c10650b6 | 1961 | void gfar_start(struct gfar_private *priv) |
0bbaf069 | 1962 | { |
46ceb60c | 1963 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
0bbaf069 | 1964 | u32 tempval; |
46ceb60c | 1965 | int i = 0; |
0bbaf069 | 1966 | |
c10650b6 CM |
1967 | /* Enable Rx/Tx hw queues */ |
1968 | gfar_write(®s->rqueue, priv->rqueue); | |
1969 | gfar_write(®s->tqueue, priv->tqueue); | |
0bbaf069 KG |
1970 | |
1971 | /* Initialize DMACTRL to have WWR and WOP */ | |
f4983704 | 1972 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1973 | tempval |= DMACTRL_INIT_SETTINGS; |
f4983704 | 1974 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1975 | |
0bbaf069 | 1976 | /* Make sure we aren't stopped */ |
f4983704 | 1977 | tempval = gfar_read(®s->dmactrl); |
0bbaf069 | 1978 | tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); |
f4983704 | 1979 | gfar_write(®s->dmactrl, tempval); |
0bbaf069 | 1980 | |
46ceb60c SG |
1981 | for (i = 0; i < priv->num_grps; i++) { |
1982 | regs = priv->gfargrp[i].regs; | |
1983 | /* Clear THLT/RHLT, so that the DMA starts polling now */ | |
1984 | gfar_write(®s->tstat, priv->gfargrp[i].tstat); | |
1985 | gfar_write(®s->rstat, priv->gfargrp[i].rstat); | |
46ceb60c | 1986 | } |
12dea57b | 1987 | |
c10650b6 CM |
1988 | /* Enable Rx/Tx DMA */ |
1989 | tempval = gfar_read(®s->maccfg1); | |
1990 | tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); | |
1991 | gfar_write(®s->maccfg1, tempval); | |
1992 | ||
efeddce7 CM |
1993 | gfar_ints_enable(priv); |
1994 | ||
c10650b6 | 1995 | priv->ndev->trans_start = jiffies; /* prevent tx timeout */ |
0bbaf069 KG |
1996 | } |
1997 | ||
80ec396c CM |
1998 | static void free_grp_irqs(struct gfar_priv_grp *grp) |
1999 | { | |
2000 | free_irq(gfar_irq(grp, TX)->irq, grp); | |
2001 | free_irq(gfar_irq(grp, RX)->irq, grp); | |
2002 | free_irq(gfar_irq(grp, ER)->irq, grp); | |
2003 | } | |
2004 | ||
46ceb60c SG |
2005 | static int register_grp_irqs(struct gfar_priv_grp *grp) |
2006 | { | |
2007 | struct gfar_private *priv = grp->priv; | |
2008 | struct net_device *dev = priv->ndev; | |
2009 | int err; | |
1da177e4 | 2010 | |
1da177e4 | 2011 | /* If the device has multiple interrupts, register for |
0977f817 JC |
2012 | * them. Otherwise, only register for the one |
2013 | */ | |
b31a1d8b | 2014 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
0bbaf069 | 2015 | /* Install our interrupt handlers for Error, |
0977f817 JC |
2016 | * Transmit, and Receive |
2017 | */ | |
ee873fda CM |
2018 | err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, |
2019 | gfar_irq(grp, ER)->name, grp); | |
2020 | if (err < 0) { | |
59deab26 | 2021 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2022 | gfar_irq(grp, ER)->irq); |
46ceb60c | 2023 | |
2145f1af | 2024 | goto err_irq_fail; |
1da177e4 | 2025 | } |
ee873fda CM |
2026 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, |
2027 | gfar_irq(grp, TX)->name, grp); | |
2028 | if (err < 0) { | |
59deab26 | 2029 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2030 | gfar_irq(grp, TX)->irq); |
1da177e4 LT |
2031 | goto tx_irq_fail; |
2032 | } | |
ee873fda CM |
2033 | err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, |
2034 | gfar_irq(grp, RX)->name, grp); | |
2035 | if (err < 0) { | |
59deab26 | 2036 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2037 | gfar_irq(grp, RX)->irq); |
1da177e4 LT |
2038 | goto rx_irq_fail; |
2039 | } | |
2040 | } else { | |
ee873fda CM |
2041 | err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, |
2042 | gfar_irq(grp, TX)->name, grp); | |
2043 | if (err < 0) { | |
59deab26 | 2044 | netif_err(priv, intr, dev, "Can't get IRQ %d\n", |
ee873fda | 2045 | gfar_irq(grp, TX)->irq); |
1da177e4 LT |
2046 | goto err_irq_fail; |
2047 | } | |
2048 | } | |
2049 | ||
46ceb60c SG |
2050 | return 0; |
2051 | ||
2052 | rx_irq_fail: | |
ee873fda | 2053 | free_irq(gfar_irq(grp, TX)->irq, grp); |
46ceb60c | 2054 | tx_irq_fail: |
ee873fda | 2055 | free_irq(gfar_irq(grp, ER)->irq, grp); |
46ceb60c SG |
2056 | err_irq_fail: |
2057 | return err; | |
2058 | ||
2059 | } | |
2060 | ||
80ec396c CM |
2061 | static void gfar_free_irq(struct gfar_private *priv) |
2062 | { | |
2063 | int i; | |
2064 | ||
2065 | /* Free the IRQs */ | |
2066 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { | |
2067 | for (i = 0; i < priv->num_grps; i++) | |
2068 | free_grp_irqs(&priv->gfargrp[i]); | |
2069 | } else { | |
2070 | for (i = 0; i < priv->num_grps; i++) | |
2071 | free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, | |
2072 | &priv->gfargrp[i]); | |
2073 | } | |
2074 | } | |
2075 | ||
2076 | static int gfar_request_irq(struct gfar_private *priv) | |
2077 | { | |
2078 | int err, i, j; | |
2079 | ||
2080 | for (i = 0; i < priv->num_grps; i++) { | |
2081 | err = register_grp_irqs(&priv->gfargrp[i]); | |
2082 | if (err) { | |
2083 | for (j = 0; j < i; j++) | |
2084 | free_grp_irqs(&priv->gfargrp[j]); | |
2085 | return err; | |
2086 | } | |
2087 | } | |
2088 | ||
2089 | return 0; | |
2090 | } | |
2091 | ||
46ceb60c SG |
2092 | /* Bring the controller up and running */ |
2093 | int startup_gfar(struct net_device *ndev) | |
2094 | { | |
2095 | struct gfar_private *priv = netdev_priv(ndev); | |
80ec396c | 2096 | int err; |
46ceb60c | 2097 | |
a328ac92 | 2098 | gfar_mac_reset(priv); |
46ceb60c | 2099 | |
46ceb60c SG |
2100 | err = gfar_alloc_skb_resources(ndev); |
2101 | if (err) | |
2102 | return err; | |
2103 | ||
a328ac92 | 2104 | gfar_init_tx_rx_base(priv); |
46ceb60c | 2105 | |
4e857c58 | 2106 | smp_mb__before_atomic(); |
0851133b | 2107 | clear_bit(GFAR_DOWN, &priv->state); |
4e857c58 | 2108 | smp_mb__after_atomic(); |
0851133b CM |
2109 | |
2110 | /* Start Rx/Tx DMA and enable the interrupts */ | |
c10650b6 | 2111 | gfar_start(priv); |
1da177e4 | 2112 | |
826aa4a0 AV |
2113 | phy_start(priv->phydev); |
2114 | ||
0851133b CM |
2115 | enable_napi(priv); |
2116 | ||
2117 | netif_tx_wake_all_queues(ndev); | |
2118 | ||
1da177e4 | 2119 | return 0; |
1da177e4 LT |
2120 | } |
2121 | ||
0977f817 JC |
2122 | /* Called when something needs to use the ethernet device |
2123 | * Returns 0 for success. | |
2124 | */ | |
1da177e4 LT |
2125 | static int gfar_enet_open(struct net_device *dev) |
2126 | { | |
94e8cc35 | 2127 | struct gfar_private *priv = netdev_priv(dev); |
1da177e4 LT |
2128 | int err; |
2129 | ||
1da177e4 | 2130 | err = init_phy(dev); |
0851133b | 2131 | if (err) |
1da177e4 LT |
2132 | return err; |
2133 | ||
80ec396c CM |
2134 | err = gfar_request_irq(priv); |
2135 | if (err) | |
2136 | return err; | |
2137 | ||
1da177e4 | 2138 | err = startup_gfar(dev); |
0851133b | 2139 | if (err) |
db0e8e3f | 2140 | return err; |
1da177e4 | 2141 | |
2884e5cc AV |
2142 | device_set_wakeup_enable(&dev->dev, priv->wol_en); |
2143 | ||
1da177e4 LT |
2144 | return err; |
2145 | } | |
2146 | ||
54dc79fe | 2147 | static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) |
0bbaf069 | 2148 | { |
54dc79fe | 2149 | struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); |
6c31d55f KG |
2150 | |
2151 | memset(fcb, 0, GMAC_FCB_LEN); | |
0bbaf069 | 2152 | |
0bbaf069 KG |
2153 | return fcb; |
2154 | } | |
2155 | ||
9c4886e5 | 2156 | static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, |
bc4598bc | 2157 | int fcb_length) |
0bbaf069 | 2158 | { |
0bbaf069 KG |
2159 | /* If we're here, it's a IP packet with a TCP or UDP |
2160 | * payload. We set it to checksum, using a pseudo-header | |
2161 | * we provide | |
2162 | */ | |
3a2e16c8 | 2163 | u8 flags = TXFCB_DEFAULT; |
0bbaf069 | 2164 | |
0977f817 JC |
2165 | /* Tell the controller what the protocol is |
2166 | * And provide the already calculated phcs | |
2167 | */ | |
eddc9ec5 | 2168 | if (ip_hdr(skb)->protocol == IPPROTO_UDP) { |
7f7f5316 | 2169 | flags |= TXFCB_UDP; |
4bedb452 | 2170 | fcb->phcs = udp_hdr(skb)->check; |
7f7f5316 | 2171 | } else |
8da32de5 | 2172 | fcb->phcs = tcp_hdr(skb)->check; |
0bbaf069 KG |
2173 | |
2174 | /* l3os is the distance between the start of the | |
2175 | * frame (skb->data) and the start of the IP hdr. | |
2176 | * l4os is the distance between the start of the | |
0977f817 JC |
2177 | * l3 hdr and the l4 hdr |
2178 | */ | |
9c4886e5 | 2179 | fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length); |
cfe1fc77 | 2180 | fcb->l4os = skb_network_header_len(skb); |
0bbaf069 | 2181 | |
7f7f5316 | 2182 | fcb->flags = flags; |
0bbaf069 KG |
2183 | } |
2184 | ||
7f7f5316 | 2185 | void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) |
0bbaf069 | 2186 | { |
7f7f5316 | 2187 | fcb->flags |= TXFCB_VLN; |
df8a39de | 2188 | fcb->vlctl = skb_vlan_tag_get(skb); |
0bbaf069 KG |
2189 | } |
2190 | ||
4669bc90 | 2191 | static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, |
bc4598bc | 2192 | struct txbd8 *base, int ring_size) |
4669bc90 DH |
2193 | { |
2194 | struct txbd8 *new_bd = bdp + stride; | |
2195 | ||
2196 | return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; | |
2197 | } | |
2198 | ||
2199 | static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, | |
bc4598bc | 2200 | int ring_size) |
4669bc90 DH |
2201 | { |
2202 | return skip_txbd(bdp, 1, base, ring_size); | |
2203 | } | |
2204 | ||
02d88fb4 CM |
2205 | /* eTSEC12: csum generation not supported for some fcb offsets */ |
2206 | static inline bool gfar_csum_errata_12(struct gfar_private *priv, | |
2207 | unsigned long fcb_addr) | |
2208 | { | |
2209 | return (gfar_has_errata(priv, GFAR_ERRATA_12) && | |
2210 | (fcb_addr % 0x20) > 0x18); | |
2211 | } | |
2212 | ||
2213 | /* eTSEC76: csum generation for frames larger than 2500 may | |
2214 | * cause excess delays before start of transmission | |
2215 | */ | |
2216 | static inline bool gfar_csum_errata_76(struct gfar_private *priv, | |
2217 | unsigned int len) | |
2218 | { | |
2219 | return (gfar_has_errata(priv, GFAR_ERRATA_76) && | |
2220 | (len > 2500)); | |
2221 | } | |
2222 | ||
0977f817 JC |
2223 | /* This is called by the kernel when a frame is ready for transmission. |
2224 | * It is pointed to by the dev->hard_start_xmit function pointer | |
2225 | */ | |
1da177e4 LT |
2226 | static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2227 | { | |
2228 | struct gfar_private *priv = netdev_priv(dev); | |
a12f801d | 2229 | struct gfar_priv_tx_q *tx_queue = NULL; |
fba4ed03 | 2230 | struct netdev_queue *txq; |
f4983704 | 2231 | struct gfar __iomem *regs = NULL; |
0bbaf069 | 2232 | struct txfcb *fcb = NULL; |
f0ee7acf | 2233 | struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; |
5a5efed4 | 2234 | u32 lstatus; |
0d0cffdc CM |
2235 | int i, rq = 0; |
2236 | int do_tstamp, do_csum, do_vlan; | |
4669bc90 | 2237 | u32 bufaddr; |
fef6108d | 2238 | unsigned long flags; |
50ad076b | 2239 | unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; |
fba4ed03 SG |
2240 | |
2241 | rq = skb->queue_mapping; | |
2242 | tx_queue = priv->tx_queue[rq]; | |
2243 | txq = netdev_get_tx_queue(dev, rq); | |
a12f801d | 2244 | base = tx_queue->tx_bd_base; |
46ceb60c | 2245 | regs = tx_queue->grp->regs; |
f0ee7acf | 2246 | |
0d0cffdc | 2247 | do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); |
df8a39de | 2248 | do_vlan = skb_vlan_tag_present(skb); |
0d0cffdc CM |
2249 | do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && |
2250 | priv->hwts_tx_en; | |
2251 | ||
2252 | if (do_csum || do_vlan) | |
2253 | fcb_len = GMAC_FCB_LEN; | |
2254 | ||
f0ee7acf | 2255 | /* check if time stamp should be generated */ |
0d0cffdc CM |
2256 | if (unlikely(do_tstamp)) |
2257 | fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; | |
4669bc90 | 2258 | |
5b28beaf | 2259 | /* make space for additional header when fcb is needed */ |
0d0cffdc | 2260 | if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { |
54dc79fe SH |
2261 | struct sk_buff *skb_new; |
2262 | ||
0d0cffdc | 2263 | skb_new = skb_realloc_headroom(skb, fcb_len); |
54dc79fe SH |
2264 | if (!skb_new) { |
2265 | dev->stats.tx_errors++; | |
c9974ad4 | 2266 | dev_kfree_skb_any(skb); |
54dc79fe SH |
2267 | return NETDEV_TX_OK; |
2268 | } | |
db83d136 | 2269 | |
313b037c ED |
2270 | if (skb->sk) |
2271 | skb_set_owner_w(skb_new, skb->sk); | |
c9974ad4 | 2272 | dev_consume_skb_any(skb); |
54dc79fe SH |
2273 | skb = skb_new; |
2274 | } | |
2275 | ||
4669bc90 DH |
2276 | /* total number of fragments in the SKB */ |
2277 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2278 | ||
f0ee7acf MR |
2279 | /* calculate the required number of TxBDs for this skb */ |
2280 | if (unlikely(do_tstamp)) | |
2281 | nr_txbds = nr_frags + 2; | |
2282 | else | |
2283 | nr_txbds = nr_frags + 1; | |
2284 | ||
4669bc90 | 2285 | /* check if there is space to queue this packet */ |
f0ee7acf | 2286 | if (nr_txbds > tx_queue->num_txbdfree) { |
4669bc90 | 2287 | /* no space, stop the queue */ |
fba4ed03 | 2288 | netif_tx_stop_queue(txq); |
4669bc90 | 2289 | dev->stats.tx_fifo_errors++; |
4669bc90 DH |
2290 | return NETDEV_TX_BUSY; |
2291 | } | |
1da177e4 LT |
2292 | |
2293 | /* Update transmit stats */ | |
50ad076b CM |
2294 | bytes_sent = skb->len; |
2295 | tx_queue->stats.tx_bytes += bytes_sent; | |
2296 | /* keep Tx bytes on wire for BQL accounting */ | |
2297 | GFAR_CB(skb)->bytes_sent = bytes_sent; | |
1ac9ad13 | 2298 | tx_queue->stats.tx_packets++; |
1da177e4 | 2299 | |
a12f801d | 2300 | txbdp = txbdp_start = tx_queue->cur_tx; |
f0ee7acf MR |
2301 | lstatus = txbdp->lstatus; |
2302 | ||
2303 | /* Time stamp insertion requires one additional TxBD */ | |
2304 | if (unlikely(do_tstamp)) | |
2305 | txbdp_tstamp = txbdp = next_txbd(txbdp, base, | |
bc4598bc | 2306 | tx_queue->tx_ring_size); |
1da177e4 | 2307 | |
4669bc90 | 2308 | if (nr_frags == 0) { |
f0ee7acf MR |
2309 | if (unlikely(do_tstamp)) |
2310 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | | |
bc4598bc | 2311 | TXBD_INTERRUPT); |
f0ee7acf MR |
2312 | else |
2313 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
4669bc90 DH |
2314 | } else { |
2315 | /* Place the fragment addresses and lengths into the TxBDs */ | |
2316 | for (i = 0; i < nr_frags; i++) { | |
50ad076b | 2317 | unsigned int frag_len; |
4669bc90 | 2318 | /* Point at the next BD, wrapping as needed */ |
a12f801d | 2319 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 | 2320 | |
50ad076b | 2321 | frag_len = skb_shinfo(skb)->frags[i].size; |
4669bc90 | 2322 | |
50ad076b | 2323 | lstatus = txbdp->lstatus | frag_len | |
bc4598bc | 2324 | BD_LFLAG(TXBD_READY); |
4669bc90 DH |
2325 | |
2326 | /* Handle the last BD specially */ | |
2327 | if (i == nr_frags - 1) | |
2328 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | |
1da177e4 | 2329 | |
369ec162 | 2330 | bufaddr = skb_frag_dma_map(priv->dev, |
2234a722 IC |
2331 | &skb_shinfo(skb)->frags[i], |
2332 | 0, | |
50ad076b | 2333 | frag_len, |
2234a722 | 2334 | DMA_TO_DEVICE); |
0a4b5a24 KH |
2335 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) |
2336 | goto dma_map_err; | |
4669bc90 DH |
2337 | |
2338 | /* set the TxBD length and buffer pointer */ | |
2339 | txbdp->bufPtr = bufaddr; | |
2340 | txbdp->lstatus = lstatus; | |
2341 | } | |
2342 | ||
2343 | lstatus = txbdp_start->lstatus; | |
2344 | } | |
1da177e4 | 2345 | |
9c4886e5 MR |
2346 | /* Add TxPAL between FCB and frame if required */ |
2347 | if (unlikely(do_tstamp)) { | |
2348 | skb_push(skb, GMAC_TXPAL_LEN); | |
2349 | memset(skb->data, 0, GMAC_TXPAL_LEN); | |
2350 | } | |
2351 | ||
0d0cffdc CM |
2352 | /* Add TxFCB if required */ |
2353 | if (fcb_len) { | |
54dc79fe | 2354 | fcb = gfar_add_fcb(skb); |
02d88fb4 | 2355 | lstatus |= BD_LFLAG(TXBD_TOE); |
0d0cffdc CM |
2356 | } |
2357 | ||
2358 | /* Set up checksumming */ | |
2359 | if (do_csum) { | |
2360 | gfar_tx_checksum(skb, fcb, fcb_len); | |
02d88fb4 CM |
2361 | |
2362 | if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || | |
2363 | unlikely(gfar_csum_errata_76(priv, skb->len))) { | |
4363c2fd AD |
2364 | __skb_pull(skb, GMAC_FCB_LEN); |
2365 | skb_checksum_help(skb); | |
0d0cffdc CM |
2366 | if (do_vlan || do_tstamp) { |
2367 | /* put back a new fcb for vlan/tstamp TOE */ | |
2368 | fcb = gfar_add_fcb(skb); | |
2369 | } else { | |
2370 | /* Tx TOE not used */ | |
2371 | lstatus &= ~(BD_LFLAG(TXBD_TOE)); | |
2372 | fcb = NULL; | |
2373 | } | |
4363c2fd | 2374 | } |
0bbaf069 KG |
2375 | } |
2376 | ||
0d0cffdc | 2377 | if (do_vlan) |
54dc79fe | 2378 | gfar_tx_vlan(skb, fcb); |
0bbaf069 | 2379 | |
f0ee7acf MR |
2380 | /* Setup tx hardware time stamping if requested */ |
2381 | if (unlikely(do_tstamp)) { | |
2244d07b | 2382 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
f0ee7acf | 2383 | fcb->ptp = 1; |
f0ee7acf MR |
2384 | } |
2385 | ||
0a4b5a24 KH |
2386 | bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), |
2387 | DMA_TO_DEVICE); | |
2388 | if (unlikely(dma_mapping_error(priv->dev, bufaddr))) | |
2389 | goto dma_map_err; | |
2390 | ||
2391 | txbdp_start->bufPtr = bufaddr; | |
1da177e4 | 2392 | |
0977f817 | 2393 | /* If time stamping is requested one additional TxBD must be set up. The |
f0ee7acf MR |
2394 | * first TxBD points to the FCB and must have a data length of |
2395 | * GMAC_FCB_LEN. The second TxBD points to the actual frame data with | |
2396 | * the full frame length. | |
2397 | */ | |
2398 | if (unlikely(do_tstamp)) { | |
0d0cffdc | 2399 | txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len; |
f0ee7acf | 2400 | txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | |
0d0cffdc | 2401 | (skb_headlen(skb) - fcb_len); |
f0ee7acf MR |
2402 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; |
2403 | } else { | |
2404 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | |
2405 | } | |
1da177e4 | 2406 | |
50ad076b | 2407 | netdev_tx_sent_queue(txq, bytes_sent); |
d8a0f1b0 | 2408 | |
0977f817 | 2409 | /* We can work in parallel with gfar_clean_tx_ring(), except |
a3bc1f11 AV |
2410 | * when modifying num_txbdfree. Note that we didn't grab the lock |
2411 | * when we were reading the num_txbdfree and checking for available | |
2412 | * space, that's because outside of this function it can only grow, | |
2413 | * and once we've got needed space, it cannot suddenly disappear. | |
2414 | * | |
2415 | * The lock also protects us from gfar_error(), which can modify | |
2416 | * regs->tstat and thus retrigger the transfers, which is why we | |
2417 | * also must grab the lock before setting ready bit for the first | |
2418 | * to be transmitted BD. | |
2419 | */ | |
2420 | spin_lock_irqsave(&tx_queue->txlock, flags); | |
2421 | ||
d55398ba | 2422 | gfar_wmb(); |
7f7f5316 | 2423 | |
4669bc90 DH |
2424 | txbdp_start->lstatus = lstatus; |
2425 | ||
d55398ba | 2426 | gfar_wmb(); /* force lstatus write before tx_skbuff */ |
0eddba52 AV |
2427 | |
2428 | tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; | |
2429 | ||
4669bc90 | 2430 | /* Update the current skb pointer to the next entry we will use |
0977f817 JC |
2431 | * (wrapping if necessary) |
2432 | */ | |
a12f801d | 2433 | tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & |
bc4598bc | 2434 | TX_RING_MOD_MASK(tx_queue->tx_ring_size); |
4669bc90 | 2435 | |
a12f801d | 2436 | tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); |
4669bc90 DH |
2437 | |
2438 | /* reduce TxBD free count */ | |
f0ee7acf | 2439 | tx_queue->num_txbdfree -= (nr_txbds); |
1da177e4 LT |
2440 | |
2441 | /* If the next BD still needs to be cleaned up, then the bds | |
0977f817 JC |
2442 | * are full. We need to tell the kernel to stop sending us stuff. |
2443 | */ | |
a12f801d | 2444 | if (!tx_queue->num_txbdfree) { |
fba4ed03 | 2445 | netif_tx_stop_queue(txq); |
1da177e4 | 2446 | |
09f75cd7 | 2447 | dev->stats.tx_fifo_errors++; |
1da177e4 LT |
2448 | } |
2449 | ||
1da177e4 | 2450 | /* Tell the DMA to go go go */ |
fba4ed03 | 2451 | gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); |
1da177e4 LT |
2452 | |
2453 | /* Unlock priv */ | |
a12f801d | 2454 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
1da177e4 | 2455 | |
54dc79fe | 2456 | return NETDEV_TX_OK; |
0a4b5a24 KH |
2457 | |
2458 | dma_map_err: | |
2459 | txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); | |
2460 | if (do_tstamp) | |
2461 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | |
2462 | for (i = 0; i < nr_frags; i++) { | |
2463 | lstatus = txbdp->lstatus; | |
2464 | if (!(lstatus & BD_LFLAG(TXBD_READY))) | |
2465 | break; | |
2466 | ||
2467 | txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY); | |
2468 | bufaddr = txbdp->bufPtr; | |
2469 | dma_unmap_page(priv->dev, bufaddr, txbdp->length, | |
2470 | DMA_TO_DEVICE); | |
2471 | txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); | |
2472 | } | |
2473 | gfar_wmb(); | |
2474 | dev_kfree_skb_any(skb); | |
2475 | return NETDEV_TX_OK; | |
1da177e4 LT |
2476 | } |
2477 | ||
2478 | /* Stops the kernel queue, and halts the controller */ | |
2479 | static int gfar_close(struct net_device *dev) | |
2480 | { | |
2481 | struct gfar_private *priv = netdev_priv(dev); | |
bea3348e | 2482 | |
ab939905 | 2483 | cancel_work_sync(&priv->reset_task); |
1da177e4 LT |
2484 | stop_gfar(dev); |
2485 | ||
bb40dcbb AF |
2486 | /* Disconnect from the PHY */ |
2487 | phy_disconnect(priv->phydev); | |
2488 | priv->phydev = NULL; | |
1da177e4 | 2489 | |
80ec396c CM |
2490 | gfar_free_irq(priv); |
2491 | ||
1da177e4 LT |
2492 | return 0; |
2493 | } | |
2494 | ||
1da177e4 | 2495 | /* Changes the mac address if the controller is not running. */ |
f162b9d5 | 2496 | static int gfar_set_mac_address(struct net_device *dev) |
1da177e4 | 2497 | { |
7f7f5316 | 2498 | gfar_set_mac_for_addr(dev, 0, dev->dev_addr); |
1da177e4 LT |
2499 | |
2500 | return 0; | |
2501 | } | |
2502 | ||
1da177e4 LT |
2503 | static int gfar_change_mtu(struct net_device *dev, int new_mtu) |
2504 | { | |
1da177e4 | 2505 | struct gfar_private *priv = netdev_priv(dev); |
0bbaf069 KG |
2506 | int frame_size = new_mtu + ETH_HLEN; |
2507 | ||
1da177e4 | 2508 | if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { |
59deab26 | 2509 | netif_err(priv, drv, dev, "Invalid MTU setting\n"); |
1da177e4 LT |
2510 | return -EINVAL; |
2511 | } | |
2512 | ||
0851133b CM |
2513 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) |
2514 | cpu_relax(); | |
2515 | ||
88302648 | 2516 | if (dev->flags & IFF_UP) |
1da177e4 LT |
2517 | stop_gfar(dev); |
2518 | ||
1da177e4 LT |
2519 | dev->mtu = new_mtu; |
2520 | ||
88302648 | 2521 | if (dev->flags & IFF_UP) |
1da177e4 LT |
2522 | startup_gfar(dev); |
2523 | ||
0851133b CM |
2524 | clear_bit_unlock(GFAR_RESETTING, &priv->state); |
2525 | ||
1da177e4 LT |
2526 | return 0; |
2527 | } | |
2528 | ||
0851133b CM |
2529 | void reset_gfar(struct net_device *ndev) |
2530 | { | |
2531 | struct gfar_private *priv = netdev_priv(ndev); | |
2532 | ||
2533 | while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) | |
2534 | cpu_relax(); | |
2535 | ||
2536 | stop_gfar(ndev); | |
2537 | startup_gfar(ndev); | |
2538 | ||
2539 | clear_bit_unlock(GFAR_RESETTING, &priv->state); | |
2540 | } | |
2541 | ||
ab939905 | 2542 | /* gfar_reset_task gets scheduled when a packet has not been |
1da177e4 LT |
2543 | * transmitted after a set amount of time. |
2544 | * For now, assume that clearing out all the structures, and | |
ab939905 SS |
2545 | * starting over will fix the problem. |
2546 | */ | |
2547 | static void gfar_reset_task(struct work_struct *work) | |
1da177e4 | 2548 | { |
ab939905 | 2549 | struct gfar_private *priv = container_of(work, struct gfar_private, |
bc4598bc | 2550 | reset_task); |
0851133b | 2551 | reset_gfar(priv->ndev); |
1da177e4 LT |
2552 | } |
2553 | ||
ab939905 SS |
2554 | static void gfar_timeout(struct net_device *dev) |
2555 | { | |
2556 | struct gfar_private *priv = netdev_priv(dev); | |
2557 | ||
2558 | dev->stats.tx_errors++; | |
2559 | schedule_work(&priv->reset_task); | |
2560 | } | |
2561 | ||
acbc0f03 EL |
2562 | static void gfar_align_skb(struct sk_buff *skb) |
2563 | { | |
2564 | /* We need the data buffer to be aligned properly. We will reserve | |
2565 | * as many bytes as needed to align the data properly | |
2566 | */ | |
2567 | skb_reserve(skb, RXBUF_ALIGNMENT - | |
bc4598bc | 2568 | (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); |
acbc0f03 EL |
2569 | } |
2570 | ||
1da177e4 | 2571 | /* Interrupt Handler for Transmit complete */ |
c233cf40 | 2572 | static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) |
1da177e4 | 2573 | { |
a12f801d | 2574 | struct net_device *dev = tx_queue->dev; |
d8a0f1b0 | 2575 | struct netdev_queue *txq; |
d080cd63 | 2576 | struct gfar_private *priv = netdev_priv(dev); |
f0ee7acf | 2577 | struct txbd8 *bdp, *next = NULL; |
4669bc90 | 2578 | struct txbd8 *lbdp = NULL; |
a12f801d | 2579 | struct txbd8 *base = tx_queue->tx_bd_base; |
4669bc90 DH |
2580 | struct sk_buff *skb; |
2581 | int skb_dirtytx; | |
a12f801d | 2582 | int tx_ring_size = tx_queue->tx_ring_size; |
f0ee7acf | 2583 | int frags = 0, nr_txbds = 0; |
4669bc90 | 2584 | int i; |
d080cd63 | 2585 | int howmany = 0; |
d8a0f1b0 PG |
2586 | int tqi = tx_queue->qindex; |
2587 | unsigned int bytes_sent = 0; | |
4669bc90 | 2588 | u32 lstatus; |
f0ee7acf | 2589 | size_t buflen; |
1da177e4 | 2590 | |
d8a0f1b0 | 2591 | txq = netdev_get_tx_queue(dev, tqi); |
a12f801d SG |
2592 | bdp = tx_queue->dirty_tx; |
2593 | skb_dirtytx = tx_queue->skb_dirtytx; | |
1da177e4 | 2594 | |
a12f801d | 2595 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
a3bc1f11 AV |
2596 | unsigned long flags; |
2597 | ||
4669bc90 | 2598 | frags = skb_shinfo(skb)->nr_frags; |
f0ee7acf | 2599 | |
0977f817 | 2600 | /* When time stamping, one additional TxBD must be freed. |
f0ee7acf MR |
2601 | * Also, we need to dma_unmap_single() the TxPAL. |
2602 | */ | |
2244d07b | 2603 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
f0ee7acf MR |
2604 | nr_txbds = frags + 2; |
2605 | else | |
2606 | nr_txbds = frags + 1; | |
2607 | ||
2608 | lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); | |
1da177e4 | 2609 | |
4669bc90 | 2610 | lstatus = lbdp->lstatus; |
1da177e4 | 2611 | |
4669bc90 DH |
2612 | /* Only clean completed frames */ |
2613 | if ((lstatus & BD_LFLAG(TXBD_READY)) && | |
bc4598bc | 2614 | (lstatus & BD_LENGTH_MASK)) |
4669bc90 DH |
2615 | break; |
2616 | ||
2244d07b | 2617 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf | 2618 | next = next_txbd(bdp, base, tx_ring_size); |
9c4886e5 | 2619 | buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN; |
f0ee7acf MR |
2620 | } else |
2621 | buflen = bdp->length; | |
2622 | ||
369ec162 | 2623 | dma_unmap_single(priv->dev, bdp->bufPtr, |
bc4598bc | 2624 | buflen, DMA_TO_DEVICE); |
f0ee7acf | 2625 | |
2244d07b | 2626 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { |
f0ee7acf MR |
2627 | struct skb_shared_hwtstamps shhwtstamps; |
2628 | u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); | |
bc4598bc | 2629 | |
f0ee7acf MR |
2630 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
2631 | shhwtstamps.hwtstamp = ns_to_ktime(*ns); | |
9c4886e5 | 2632 | skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); |
f0ee7acf MR |
2633 | skb_tstamp_tx(skb, &shhwtstamps); |
2634 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); | |
2635 | bdp = next; | |
2636 | } | |
81183059 | 2637 | |
4669bc90 DH |
2638 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2639 | bdp = next_txbd(bdp, base, tx_ring_size); | |
d080cd63 | 2640 | |
4669bc90 | 2641 | for (i = 0; i < frags; i++) { |
369ec162 | 2642 | dma_unmap_page(priv->dev, bdp->bufPtr, |
bc4598bc | 2643 | bdp->length, DMA_TO_DEVICE); |
4669bc90 DH |
2644 | bdp->lstatus &= BD_LFLAG(TXBD_WRAP); |
2645 | bdp = next_txbd(bdp, base, tx_ring_size); | |
2646 | } | |
1da177e4 | 2647 | |
50ad076b | 2648 | bytes_sent += GFAR_CB(skb)->bytes_sent; |
d8a0f1b0 | 2649 | |
acb600de | 2650 | dev_kfree_skb_any(skb); |
0fd56bb5 | 2651 | |
a12f801d | 2652 | tx_queue->tx_skbuff[skb_dirtytx] = NULL; |
d080cd63 | 2653 | |
4669bc90 | 2654 | skb_dirtytx = (skb_dirtytx + 1) & |
bc4598bc | 2655 | TX_RING_MOD_MASK(tx_ring_size); |
4669bc90 DH |
2656 | |
2657 | howmany++; | |
a3bc1f11 | 2658 | spin_lock_irqsave(&tx_queue->txlock, flags); |
f0ee7acf | 2659 | tx_queue->num_txbdfree += nr_txbds; |
a3bc1f11 | 2660 | spin_unlock_irqrestore(&tx_queue->txlock, flags); |
4669bc90 | 2661 | } |
1da177e4 | 2662 | |
4669bc90 | 2663 | /* If we freed a buffer, we can restart transmission, if necessary */ |
0851133b CM |
2664 | if (tx_queue->num_txbdfree && |
2665 | netif_tx_queue_stopped(txq) && | |
2666 | !(test_bit(GFAR_DOWN, &priv->state))) | |
2667 | netif_wake_subqueue(priv->ndev, tqi); | |
1da177e4 | 2668 | |
4669bc90 | 2669 | /* Update dirty indicators */ |
a12f801d SG |
2670 | tx_queue->skb_dirtytx = skb_dirtytx; |
2671 | tx_queue->dirty_tx = bdp; | |
1da177e4 | 2672 | |
d8a0f1b0 | 2673 | netdev_tx_completed_queue(txq, howmany, bytes_sent); |
d080cd63 DH |
2674 | } |
2675 | ||
2281a0f3 | 2676 | static struct sk_buff *gfar_alloc_skb(struct net_device *dev) |
1da177e4 LT |
2677 | { |
2678 | struct gfar_private *priv = netdev_priv(dev); | |
acb600de | 2679 | struct sk_buff *skb; |
1da177e4 | 2680 | |
acbc0f03 | 2681 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); |
815b97c6 | 2682 | if (!skb) |
1da177e4 LT |
2683 | return NULL; |
2684 | ||
acbc0f03 | 2685 | gfar_align_skb(skb); |
7f7f5316 | 2686 | |
acbc0f03 EL |
2687 | return skb; |
2688 | } | |
2689 | ||
91c53f76 | 2690 | static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) |
acbc0f03 | 2691 | { |
0a4b5a24 KH |
2692 | struct gfar_private *priv = netdev_priv(dev); |
2693 | struct sk_buff *skb; | |
2694 | dma_addr_t addr; | |
2695 | ||
2696 | skb = gfar_alloc_skb(dev); | |
2697 | if (!skb) | |
2698 | return NULL; | |
2699 | ||
2700 | addr = dma_map_single(priv->dev, skb->data, | |
2701 | priv->rx_buffer_size, DMA_FROM_DEVICE); | |
2702 | if (unlikely(dma_mapping_error(priv->dev, addr))) { | |
2703 | dev_kfree_skb_any(skb); | |
2704 | return NULL; | |
2705 | } | |
2706 | ||
2707 | *bufaddr = addr; | |
2708 | return skb; | |
1da177e4 LT |
2709 | } |
2710 | ||
298e1a9e | 2711 | static inline void count_errors(unsigned short status, struct net_device *dev) |
1da177e4 | 2712 | { |
298e1a9e | 2713 | struct gfar_private *priv = netdev_priv(dev); |
09f75cd7 | 2714 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
2715 | struct gfar_extra_stats *estats = &priv->extra_stats; |
2716 | ||
0977f817 | 2717 | /* If the packet was truncated, none of the other errors matter */ |
1da177e4 LT |
2718 | if (status & RXBD_TRUNCATED) { |
2719 | stats->rx_length_errors++; | |
2720 | ||
212079df | 2721 | atomic64_inc(&estats->rx_trunc); |
1da177e4 LT |
2722 | |
2723 | return; | |
2724 | } | |
2725 | /* Count the errors, if there were any */ | |
2726 | if (status & (RXBD_LARGE | RXBD_SHORT)) { | |
2727 | stats->rx_length_errors++; | |
2728 | ||
2729 | if (status & RXBD_LARGE) | |
212079df | 2730 | atomic64_inc(&estats->rx_large); |
1da177e4 | 2731 | else |
212079df | 2732 | atomic64_inc(&estats->rx_short); |
1da177e4 LT |
2733 | } |
2734 | if (status & RXBD_NONOCTET) { | |
2735 | stats->rx_frame_errors++; | |
212079df | 2736 | atomic64_inc(&estats->rx_nonoctet); |
1da177e4 LT |
2737 | } |
2738 | if (status & RXBD_CRCERR) { | |
212079df | 2739 | atomic64_inc(&estats->rx_crcerr); |
1da177e4 LT |
2740 | stats->rx_crc_errors++; |
2741 | } | |
2742 | if (status & RXBD_OVERRUN) { | |
212079df | 2743 | atomic64_inc(&estats->rx_overrun); |
1da177e4 LT |
2744 | stats->rx_crc_errors++; |
2745 | } | |
2746 | } | |
2747 | ||
f4983704 | 2748 | irqreturn_t gfar_receive(int irq, void *grp_id) |
1da177e4 | 2749 | { |
aeb12c5e CM |
2750 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; |
2751 | unsigned long flags; | |
2752 | u32 imask; | |
2753 | ||
2754 | if (likely(napi_schedule_prep(&grp->napi_rx))) { | |
2755 | spin_lock_irqsave(&grp->grplock, flags); | |
2756 | imask = gfar_read(&grp->regs->imask); | |
2757 | imask &= IMASK_RX_DISABLED; | |
2758 | gfar_write(&grp->regs->imask, imask); | |
2759 | spin_unlock_irqrestore(&grp->grplock, flags); | |
2760 | __napi_schedule(&grp->napi_rx); | |
2761 | } else { | |
2762 | /* Clear IEVENT, so interrupts aren't called again | |
2763 | * because of the packets that have already arrived. | |
2764 | */ | |
2765 | gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); | |
2766 | } | |
2767 | ||
2768 | return IRQ_HANDLED; | |
2769 | } | |
2770 | ||
2771 | /* Interrupt Handler for Transmit complete */ | |
2772 | static irqreturn_t gfar_transmit(int irq, void *grp_id) | |
2773 | { | |
2774 | struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; | |
2775 | unsigned long flags; | |
2776 | u32 imask; | |
2777 | ||
2778 | if (likely(napi_schedule_prep(&grp->napi_tx))) { | |
2779 | spin_lock_irqsave(&grp->grplock, flags); | |
2780 | imask = gfar_read(&grp->regs->imask); | |
2781 | imask &= IMASK_TX_DISABLED; | |
2782 | gfar_write(&grp->regs->imask, imask); | |
2783 | spin_unlock_irqrestore(&grp->grplock, flags); | |
2784 | __napi_schedule(&grp->napi_tx); | |
2785 | } else { | |
2786 | /* Clear IEVENT, so interrupts aren't called again | |
2787 | * because of the packets that have already arrived. | |
2788 | */ | |
2789 | gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); | |
2790 | } | |
2791 | ||
1da177e4 LT |
2792 | return IRQ_HANDLED; |
2793 | } | |
2794 | ||
0bbaf069 KG |
2795 | static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) |
2796 | { | |
2797 | /* If valid headers were found, and valid sums | |
2798 | * were verified, then we tell the kernel that no | |
0977f817 JC |
2799 | * checksumming is necessary. Otherwise, it is [FIXME] |
2800 | */ | |
7f7f5316 | 2801 | if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) |
0bbaf069 KG |
2802 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2803 | else | |
bc8acf2c | 2804 | skb_checksum_none_assert(skb); |
0bbaf069 KG |
2805 | } |
2806 | ||
2807 | ||
0977f817 | 2808 | /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ |
61db26c6 CM |
2809 | static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, |
2810 | int amount_pull, struct napi_struct *napi) | |
1da177e4 LT |
2811 | { |
2812 | struct gfar_private *priv = netdev_priv(dev); | |
0bbaf069 | 2813 | struct rxfcb *fcb = NULL; |
1da177e4 | 2814 | |
2c2db48a DH |
2815 | /* fcb is at the beginning if exists */ |
2816 | fcb = (struct rxfcb *)skb->data; | |
0bbaf069 | 2817 | |
0977f817 JC |
2818 | /* Remove the FCB from the skb |
2819 | * Remove the padded bytes, if there are any | |
2820 | */ | |
f74dac08 SG |
2821 | if (amount_pull) { |
2822 | skb_record_rx_queue(skb, fcb->rq); | |
2c2db48a | 2823 | skb_pull(skb, amount_pull); |
f74dac08 | 2824 | } |
0bbaf069 | 2825 | |
cc772ab7 MR |
2826 | /* Get receive timestamp from the skb */ |
2827 | if (priv->hwts_rx_en) { | |
2828 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | |
2829 | u64 *ns = (u64 *) skb->data; | |
bc4598bc | 2830 | |
cc772ab7 MR |
2831 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
2832 | shhwtstamps->hwtstamp = ns_to_ktime(*ns); | |
2833 | } | |
2834 | ||
2835 | if (priv->padding) | |
2836 | skb_pull(skb, priv->padding); | |
2837 | ||
8b3afe95 | 2838 | if (dev->features & NETIF_F_RXCSUM) |
2c2db48a | 2839 | gfar_rx_checksum(skb, fcb); |
0bbaf069 | 2840 | |
2c2db48a DH |
2841 | /* Tell the skb what kind of packet this is */ |
2842 | skb->protocol = eth_type_trans(skb, dev); | |
1da177e4 | 2843 | |
f646968f | 2844 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
32f7fd44 JP |
2845 | * Even if vlan rx accel is disabled, on some chips |
2846 | * RXFCB_VLN is pseudo randomly set. | |
2847 | */ | |
f646968f | 2848 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && |
32f7fd44 | 2849 | fcb->flags & RXFCB_VLN) |
e5905c83 | 2850 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl); |
87c288c6 | 2851 | |
2c2db48a | 2852 | /* Send the packet up the stack */ |
953d2768 | 2853 | napi_gro_receive(napi, skb); |
0bbaf069 | 2854 | |
1da177e4 LT |
2855 | } |
2856 | ||
2857 | /* gfar_clean_rx_ring() -- Processes each frame in the rx ring | |
2281a0f3 JC |
2858 | * until the budget/quota has been reached. Returns the number |
2859 | * of frames handled | |
1da177e4 | 2860 | */ |
a12f801d | 2861 | int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) |
1da177e4 | 2862 | { |
a12f801d | 2863 | struct net_device *dev = rx_queue->dev; |
31de198b | 2864 | struct rxbd8 *bdp, *base; |
1da177e4 | 2865 | struct sk_buff *skb; |
2c2db48a DH |
2866 | int pkt_len; |
2867 | int amount_pull; | |
1da177e4 LT |
2868 | int howmany = 0; |
2869 | struct gfar_private *priv = netdev_priv(dev); | |
2870 | ||
2871 | /* Get the first full descriptor */ | |
a12f801d SG |
2872 | bdp = rx_queue->cur_rx; |
2873 | base = rx_queue->rx_bd_base; | |
1da177e4 | 2874 | |
ba779711 | 2875 | amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; |
2c2db48a | 2876 | |
1da177e4 | 2877 | while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { |
815b97c6 | 2878 | struct sk_buff *newskb; |
0a4b5a24 | 2879 | dma_addr_t bufaddr; |
bc4598bc | 2880 | |
3b6330ce | 2881 | rmb(); |
815b97c6 AF |
2882 | |
2883 | /* Add another skb for the future */ | |
0a4b5a24 | 2884 | newskb = gfar_new_skb(dev, &bufaddr); |
815b97c6 | 2885 | |
a12f801d | 2886 | skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; |
1da177e4 | 2887 | |
369ec162 | 2888 | dma_unmap_single(priv->dev, bdp->bufPtr, |
bc4598bc | 2889 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
81183059 | 2890 | |
63b88b90 | 2891 | if (unlikely(!(bdp->status & RXBD_ERR) && |
bc4598bc | 2892 | bdp->length > priv->rx_buffer_size)) |
63b88b90 AV |
2893 | bdp->status = RXBD_LARGE; |
2894 | ||
815b97c6 AF |
2895 | /* We drop the frame if we failed to allocate a new buffer */ |
2896 | if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || | |
bc4598bc | 2897 | bdp->status & RXBD_ERR)) { |
815b97c6 AF |
2898 | count_errors(bdp->status, dev); |
2899 | ||
0a4b5a24 | 2900 | if (unlikely(!newskb)) { |
815b97c6 | 2901 | newskb = skb; |
0a4b5a24 KH |
2902 | bufaddr = bdp->bufPtr; |
2903 | } else if (skb) | |
acb600de | 2904 | dev_kfree_skb(skb); |
815b97c6 | 2905 | } else { |
1da177e4 | 2906 | /* Increment the number of packets */ |
a7f38041 | 2907 | rx_queue->stats.rx_packets++; |
1da177e4 LT |
2908 | howmany++; |
2909 | ||
2c2db48a DH |
2910 | if (likely(skb)) { |
2911 | pkt_len = bdp->length - ETH_FCS_LEN; | |
2912 | /* Remove the FCS from the packet length */ | |
2913 | skb_put(skb, pkt_len); | |
a7f38041 | 2914 | rx_queue->stats.rx_bytes += pkt_len; |
f74dac08 | 2915 | skb_record_rx_queue(skb, rx_queue->qindex); |
cd754a57 | 2916 | gfar_process_frame(dev, skb, amount_pull, |
aeb12c5e | 2917 | &rx_queue->grp->napi_rx); |
2c2db48a DH |
2918 | |
2919 | } else { | |
59deab26 | 2920 | netif_warn(priv, rx_err, dev, "Missing skb!\n"); |
a7f38041 | 2921 | rx_queue->stats.rx_dropped++; |
212079df | 2922 | atomic64_inc(&priv->extra_stats.rx_skbmissing); |
2c2db48a | 2923 | } |
1da177e4 | 2924 | |
1da177e4 LT |
2925 | } |
2926 | ||
a12f801d | 2927 | rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; |
1da177e4 | 2928 | |
815b97c6 | 2929 | /* Setup the new bdp */ |
0a4b5a24 | 2930 | gfar_init_rxbdp(rx_queue, bdp, bufaddr); |
1da177e4 | 2931 | |
45b679c9 MP |
2932 | /* Update Last Free RxBD pointer for LFC */ |
2933 | if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) | |
2934 | gfar_write(rx_queue->rfbptr, (u32)bdp); | |
2935 | ||
1da177e4 | 2936 | /* Update to the next pointer */ |
a12f801d | 2937 | bdp = next_bd(bdp, base, rx_queue->rx_ring_size); |
1da177e4 LT |
2938 | |
2939 | /* update to point at the next skb */ | |
bc4598bc JC |
2940 | rx_queue->skb_currx = (rx_queue->skb_currx + 1) & |
2941 | RX_RING_MOD_MASK(rx_queue->rx_ring_size); | |
1da177e4 LT |
2942 | } |
2943 | ||
2944 | /* Update the current rxbd pointer to be the next one */ | |
a12f801d | 2945 | rx_queue->cur_rx = bdp; |
1da177e4 | 2946 | |
1da177e4 LT |
2947 | return howmany; |
2948 | } | |
2949 | ||
aeb12c5e | 2950 | static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) |
5eaedf31 CM |
2951 | { |
2952 | struct gfar_priv_grp *gfargrp = | |
aeb12c5e | 2953 | container_of(napi, struct gfar_priv_grp, napi_rx); |
5eaedf31 | 2954 | struct gfar __iomem *regs = gfargrp->regs; |
71ff9e3d | 2955 | struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; |
5eaedf31 CM |
2956 | int work_done = 0; |
2957 | ||
2958 | /* Clear IEVENT, so interrupts aren't called again | |
2959 | * because of the packets that have already arrived | |
2960 | */ | |
aeb12c5e | 2961 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
5eaedf31 CM |
2962 | |
2963 | work_done = gfar_clean_rx_ring(rx_queue, budget); | |
2964 | ||
2965 | if (work_done < budget) { | |
aeb12c5e | 2966 | u32 imask; |
5eaedf31 CM |
2967 | napi_complete(napi); |
2968 | /* Clear the halt bit in RSTAT */ | |
2969 | gfar_write(®s->rstat, gfargrp->rstat); | |
2970 | ||
aeb12c5e CM |
2971 | spin_lock_irq(&gfargrp->grplock); |
2972 | imask = gfar_read(®s->imask); | |
2973 | imask |= IMASK_RX_DEFAULT; | |
2974 | gfar_write(®s->imask, imask); | |
2975 | spin_unlock_irq(&gfargrp->grplock); | |
5eaedf31 CM |
2976 | } |
2977 | ||
2978 | return work_done; | |
2979 | } | |
2980 | ||
aeb12c5e | 2981 | static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) |
1da177e4 | 2982 | { |
bc4598bc | 2983 | struct gfar_priv_grp *gfargrp = |
aeb12c5e CM |
2984 | container_of(napi, struct gfar_priv_grp, napi_tx); |
2985 | struct gfar __iomem *regs = gfargrp->regs; | |
71ff9e3d | 2986 | struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; |
aeb12c5e CM |
2987 | u32 imask; |
2988 | ||
2989 | /* Clear IEVENT, so interrupts aren't called again | |
2990 | * because of the packets that have already arrived | |
2991 | */ | |
2992 | gfar_write(®s->ievent, IEVENT_TX_MASK); | |
2993 | ||
2994 | /* run Tx cleanup to completion */ | |
2995 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) | |
2996 | gfar_clean_tx_ring(tx_queue); | |
2997 | ||
2998 | napi_complete(napi); | |
2999 | ||
3000 | spin_lock_irq(&gfargrp->grplock); | |
3001 | imask = gfar_read(®s->imask); | |
3002 | imask |= IMASK_TX_DEFAULT; | |
3003 | gfar_write(®s->imask, imask); | |
3004 | spin_unlock_irq(&gfargrp->grplock); | |
3005 | ||
3006 | return 0; | |
3007 | } | |
3008 | ||
3009 | static int gfar_poll_rx(struct napi_struct *napi, int budget) | |
3010 | { | |
3011 | struct gfar_priv_grp *gfargrp = | |
3012 | container_of(napi, struct gfar_priv_grp, napi_rx); | |
fba4ed03 | 3013 | struct gfar_private *priv = gfargrp->priv; |
46ceb60c | 3014 | struct gfar __iomem *regs = gfargrp->regs; |
fba4ed03 | 3015 | struct gfar_priv_rx_q *rx_queue = NULL; |
c233cf40 | 3016 | int work_done = 0, work_done_per_q = 0; |
39c0a0d5 | 3017 | int i, budget_per_q = 0; |
6be5ed3f CM |
3018 | unsigned long rstat_rxf; |
3019 | int num_act_queues; | |
fba4ed03 | 3020 | |
8c7396ae | 3021 | /* Clear IEVENT, so interrupts aren't called again |
0977f817 JC |
3022 | * because of the packets that have already arrived |
3023 | */ | |
aeb12c5e | 3024 | gfar_write(®s->ievent, IEVENT_RX_MASK); |
8c7396ae | 3025 | |
6be5ed3f CM |
3026 | rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; |
3027 | ||
3028 | num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); | |
3029 | if (num_act_queues) | |
3030 | budget_per_q = budget/num_act_queues; | |
3031 | ||
3ba405db CM |
3032 | for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { |
3033 | /* skip queue if not active */ | |
3034 | if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) | |
3035 | continue; | |
1da177e4 | 3036 | |
3ba405db CM |
3037 | rx_queue = priv->rx_queue[i]; |
3038 | work_done_per_q = | |
3039 | gfar_clean_rx_ring(rx_queue, budget_per_q); | |
3040 | work_done += work_done_per_q; | |
3041 | ||
3042 | /* finished processing this queue */ | |
3043 | if (work_done_per_q < budget_per_q) { | |
3044 | /* clear active queue hw indication */ | |
3045 | gfar_write(®s->rstat, | |
3046 | RSTAT_CLEAR_RXF0 >> i); | |
3047 | num_act_queues--; | |
3048 | ||
3049 | if (!num_act_queues) | |
3050 | break; | |
3051 | } | |
3052 | } | |
42199884 | 3053 | |
aeb12c5e CM |
3054 | if (!num_act_queues) { |
3055 | u32 imask; | |
3ba405db | 3056 | napi_complete(napi); |
1da177e4 | 3057 | |
3ba405db CM |
3058 | /* Clear the halt bit in RSTAT */ |
3059 | gfar_write(®s->rstat, gfargrp->rstat); | |
1da177e4 | 3060 | |
aeb12c5e CM |
3061 | spin_lock_irq(&gfargrp->grplock); |
3062 | imask = gfar_read(®s->imask); | |
3063 | imask |= IMASK_RX_DEFAULT; | |
3064 | gfar_write(®s->imask, imask); | |
3065 | spin_unlock_irq(&gfargrp->grplock); | |
1da177e4 LT |
3066 | } |
3067 | ||
c233cf40 | 3068 | return work_done; |
1da177e4 | 3069 | } |
1da177e4 | 3070 | |
aeb12c5e CM |
3071 | static int gfar_poll_tx(struct napi_struct *napi, int budget) |
3072 | { | |
3073 | struct gfar_priv_grp *gfargrp = | |
3074 | container_of(napi, struct gfar_priv_grp, napi_tx); | |
3075 | struct gfar_private *priv = gfargrp->priv; | |
3076 | struct gfar __iomem *regs = gfargrp->regs; | |
3077 | struct gfar_priv_tx_q *tx_queue = NULL; | |
3078 | int has_tx_work = 0; | |
3079 | int i; | |
3080 | ||
3081 | /* Clear IEVENT, so interrupts aren't called again | |
3082 | * because of the packets that have already arrived | |
3083 | */ | |
3084 | gfar_write(®s->ievent, IEVENT_TX_MASK); | |
3085 | ||
3086 | for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { | |
3087 | tx_queue = priv->tx_queue[i]; | |
3088 | /* run Tx cleanup to completion */ | |
3089 | if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { | |
3090 | gfar_clean_tx_ring(tx_queue); | |
3091 | has_tx_work = 1; | |
3092 | } | |
3093 | } | |
3094 | ||
3095 | if (!has_tx_work) { | |
3096 | u32 imask; | |
3097 | napi_complete(napi); | |
3098 | ||
3099 | spin_lock_irq(&gfargrp->grplock); | |
3100 | imask = gfar_read(®s->imask); | |
3101 | imask |= IMASK_TX_DEFAULT; | |
3102 | gfar_write(®s->imask, imask); | |
3103 | spin_unlock_irq(&gfargrp->grplock); | |
3104 | } | |
3105 | ||
3106 | return 0; | |
3107 | } | |
3108 | ||
3109 | ||
f2d71c2d | 3110 | #ifdef CONFIG_NET_POLL_CONTROLLER |
0977f817 | 3111 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
f2d71c2d VW |
3112 | * without having to re-enable interrupts. It's not called while |
3113 | * the interrupt routine is executing. | |
3114 | */ | |
3115 | static void gfar_netpoll(struct net_device *dev) | |
3116 | { | |
3117 | struct gfar_private *priv = netdev_priv(dev); | |
3a2e16c8 | 3118 | int i; |
f2d71c2d VW |
3119 | |
3120 | /* If the device has multiple interrupts, run tx/rx */ | |
b31a1d8b | 3121 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { |
46ceb60c | 3122 | for (i = 0; i < priv->num_grps; i++) { |
62ed839d PG |
3123 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3124 | ||
3125 | disable_irq(gfar_irq(grp, TX)->irq); | |
3126 | disable_irq(gfar_irq(grp, RX)->irq); | |
3127 | disable_irq(gfar_irq(grp, ER)->irq); | |
3128 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); | |
3129 | enable_irq(gfar_irq(grp, ER)->irq); | |
3130 | enable_irq(gfar_irq(grp, RX)->irq); | |
3131 | enable_irq(gfar_irq(grp, TX)->irq); | |
46ceb60c | 3132 | } |
f2d71c2d | 3133 | } else { |
46ceb60c | 3134 | for (i = 0; i < priv->num_grps; i++) { |
62ed839d PG |
3135 | struct gfar_priv_grp *grp = &priv->gfargrp[i]; |
3136 | ||
3137 | disable_irq(gfar_irq(grp, TX)->irq); | |
3138 | gfar_interrupt(gfar_irq(grp, TX)->irq, grp); | |
3139 | enable_irq(gfar_irq(grp, TX)->irq); | |
43de004b | 3140 | } |
f2d71c2d VW |
3141 | } |
3142 | } | |
3143 | #endif | |
3144 | ||
1da177e4 | 3145 | /* The interrupt handler for devices with one interrupt */ |
f4983704 | 3146 | static irqreturn_t gfar_interrupt(int irq, void *grp_id) |
1da177e4 | 3147 | { |
f4983704 | 3148 | struct gfar_priv_grp *gfargrp = grp_id; |
1da177e4 LT |
3149 | |
3150 | /* Save ievent for future reference */ | |
f4983704 | 3151 | u32 events = gfar_read(&gfargrp->regs->ievent); |
1da177e4 | 3152 | |
1da177e4 | 3153 | /* Check for reception */ |
538cc7ee | 3154 | if (events & IEVENT_RX_MASK) |
f4983704 | 3155 | gfar_receive(irq, grp_id); |
1da177e4 LT |
3156 | |
3157 | /* Check for transmit completion */ | |
538cc7ee | 3158 | if (events & IEVENT_TX_MASK) |
f4983704 | 3159 | gfar_transmit(irq, grp_id); |
1da177e4 | 3160 | |
538cc7ee SS |
3161 | /* Check for errors */ |
3162 | if (events & IEVENT_ERR_MASK) | |
f4983704 | 3163 | gfar_error(irq, grp_id); |
1da177e4 LT |
3164 | |
3165 | return IRQ_HANDLED; | |
3166 | } | |
3167 | ||
1da177e4 LT |
3168 | /* Called every time the controller might need to be made |
3169 | * aware of new link state. The PHY code conveys this | |
bb40dcbb | 3170 | * information through variables in the phydev structure, and this |
1da177e4 LT |
3171 | * function converts those variables into the appropriate |
3172 | * register values, and can bring down the device if needed. | |
3173 | */ | |
3174 | static void adjust_link(struct net_device *dev) | |
3175 | { | |
3176 | struct gfar_private *priv = netdev_priv(dev); | |
bb40dcbb | 3177 | struct phy_device *phydev = priv->phydev; |
bb40dcbb | 3178 | |
6ce29b0e | 3179 | if (unlikely(phydev->link != priv->oldlink || |
0ae93b2c GR |
3180 | (phydev->link && (phydev->duplex != priv->oldduplex || |
3181 | phydev->speed != priv->oldspeed)))) | |
6ce29b0e | 3182 | gfar_update_link_state(priv); |
bb40dcbb | 3183 | } |
1da177e4 LT |
3184 | |
3185 | /* Update the hash table based on the current list of multicast | |
3186 | * addresses we subscribe to. Also, change the promiscuity of | |
3187 | * the device based on the flags (this function is called | |
0977f817 JC |
3188 | * whenever dev->flags is changed |
3189 | */ | |
1da177e4 LT |
3190 | static void gfar_set_multi(struct net_device *dev) |
3191 | { | |
22bedad3 | 3192 | struct netdev_hw_addr *ha; |
1da177e4 | 3193 | struct gfar_private *priv = netdev_priv(dev); |
46ceb60c | 3194 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
1da177e4 LT |
3195 | u32 tempval; |
3196 | ||
a12f801d | 3197 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
3198 | /* Set RCTRL to PROM */ |
3199 | tempval = gfar_read(®s->rctrl); | |
3200 | tempval |= RCTRL_PROM; | |
3201 | gfar_write(®s->rctrl, tempval); | |
3202 | } else { | |
3203 | /* Set RCTRL to not PROM */ | |
3204 | tempval = gfar_read(®s->rctrl); | |
3205 | tempval &= ~(RCTRL_PROM); | |
3206 | gfar_write(®s->rctrl, tempval); | |
3207 | } | |
6aa20a22 | 3208 | |
a12f801d | 3209 | if (dev->flags & IFF_ALLMULTI) { |
1da177e4 | 3210 | /* Set the hash to rx all multicast frames */ |
0bbaf069 KG |
3211 | gfar_write(®s->igaddr0, 0xffffffff); |
3212 | gfar_write(®s->igaddr1, 0xffffffff); | |
3213 | gfar_write(®s->igaddr2, 0xffffffff); | |
3214 | gfar_write(®s->igaddr3, 0xffffffff); | |
3215 | gfar_write(®s->igaddr4, 0xffffffff); | |
3216 | gfar_write(®s->igaddr5, 0xffffffff); | |
3217 | gfar_write(®s->igaddr6, 0xffffffff); | |
3218 | gfar_write(®s->igaddr7, 0xffffffff); | |
1da177e4 LT |
3219 | gfar_write(®s->gaddr0, 0xffffffff); |
3220 | gfar_write(®s->gaddr1, 0xffffffff); | |
3221 | gfar_write(®s->gaddr2, 0xffffffff); | |
3222 | gfar_write(®s->gaddr3, 0xffffffff); | |
3223 | gfar_write(®s->gaddr4, 0xffffffff); | |
3224 | gfar_write(®s->gaddr5, 0xffffffff); | |
3225 | gfar_write(®s->gaddr6, 0xffffffff); | |
3226 | gfar_write(®s->gaddr7, 0xffffffff); | |
3227 | } else { | |
7f7f5316 AF |
3228 | int em_num; |
3229 | int idx; | |
3230 | ||
1da177e4 | 3231 | /* zero out the hash */ |
0bbaf069 KG |
3232 | gfar_write(®s->igaddr0, 0x0); |
3233 | gfar_write(®s->igaddr1, 0x0); | |
3234 | gfar_write(®s->igaddr2, 0x0); | |
3235 | gfar_write(®s->igaddr3, 0x0); | |
3236 | gfar_write(®s->igaddr4, 0x0); | |
3237 | gfar_write(®s->igaddr5, 0x0); | |
3238 | gfar_write(®s->igaddr6, 0x0); | |
3239 | gfar_write(®s->igaddr7, 0x0); | |
1da177e4 LT |
3240 | gfar_write(®s->gaddr0, 0x0); |
3241 | gfar_write(®s->gaddr1, 0x0); | |
3242 | gfar_write(®s->gaddr2, 0x0); | |
3243 | gfar_write(®s->gaddr3, 0x0); | |
3244 | gfar_write(®s->gaddr4, 0x0); | |
3245 | gfar_write(®s->gaddr5, 0x0); | |
3246 | gfar_write(®s->gaddr6, 0x0); | |
3247 | gfar_write(®s->gaddr7, 0x0); | |
3248 | ||
7f7f5316 AF |
3249 | /* If we have extended hash tables, we need to |
3250 | * clear the exact match registers to prepare for | |
0977f817 JC |
3251 | * setting them |
3252 | */ | |
7f7f5316 AF |
3253 | if (priv->extended_hash) { |
3254 | em_num = GFAR_EM_NUM + 1; | |
3255 | gfar_clear_exact_match(dev); | |
3256 | idx = 1; | |
3257 | } else { | |
3258 | idx = 0; | |
3259 | em_num = 0; | |
3260 | } | |
3261 | ||
4cd24eaf | 3262 | if (netdev_mc_empty(dev)) |
1da177e4 LT |
3263 | return; |
3264 | ||
3265 | /* Parse the list, and set the appropriate bits */ | |
22bedad3 | 3266 | netdev_for_each_mc_addr(ha, dev) { |
7f7f5316 | 3267 | if (idx < em_num) { |
22bedad3 | 3268 | gfar_set_mac_for_addr(dev, idx, ha->addr); |
7f7f5316 AF |
3269 | idx++; |
3270 | } else | |
22bedad3 | 3271 | gfar_set_hash_for_addr(dev, ha->addr); |
1da177e4 LT |
3272 | } |
3273 | } | |
1da177e4 LT |
3274 | } |
3275 | ||
7f7f5316 AF |
3276 | |
3277 | /* Clears each of the exact match registers to zero, so they | |
0977f817 JC |
3278 | * don't interfere with normal reception |
3279 | */ | |
7f7f5316 AF |
3280 | static void gfar_clear_exact_match(struct net_device *dev) |
3281 | { | |
3282 | int idx; | |
6a3c910c | 3283 | static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
7f7f5316 | 3284 | |
bc4598bc | 3285 | for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) |
b6bc7650 | 3286 | gfar_set_mac_for_addr(dev, idx, zero_arr); |
7f7f5316 AF |
3287 | } |
3288 | ||
1da177e4 LT |
3289 | /* Set the appropriate hash bit for the given addr */ |
3290 | /* The algorithm works like so: | |
3291 | * 1) Take the Destination Address (ie the multicast address), and | |
3292 | * do a CRC on it (little endian), and reverse the bits of the | |
3293 | * result. | |
3294 | * 2) Use the 8 most significant bits as a hash into a 256-entry | |
3295 | * table. The table is controlled through 8 32-bit registers: | |
3296 | * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is | |
3297 | * gaddr7. This means that the 3 most significant bits in the | |
3298 | * hash index which gaddr register to use, and the 5 other bits | |
3299 | * indicate which bit (assuming an IBM numbering scheme, which | |
3300 | * for PowerPC (tm) is usually the case) in the register holds | |
0977f817 JC |
3301 | * the entry. |
3302 | */ | |
1da177e4 LT |
3303 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) |
3304 | { | |
3305 | u32 tempval; | |
3306 | struct gfar_private *priv = netdev_priv(dev); | |
6a3c910c | 3307 | u32 result = ether_crc(ETH_ALEN, addr); |
0bbaf069 KG |
3308 | int width = priv->hash_width; |
3309 | u8 whichbit = (result >> (32 - width)) & 0x1f; | |
3310 | u8 whichreg = result >> (32 - width + 5); | |
1da177e4 LT |
3311 | u32 value = (1 << (31-whichbit)); |
3312 | ||
0bbaf069 | 3313 | tempval = gfar_read(priv->hash_regs[whichreg]); |
1da177e4 | 3314 | tempval |= value; |
0bbaf069 | 3315 | gfar_write(priv->hash_regs[whichreg], tempval); |
1da177e4 LT |
3316 | } |
3317 | ||
7f7f5316 AF |
3318 | |
3319 | /* There are multiple MAC Address register pairs on some controllers | |
3320 | * This function sets the numth pair to a given address | |
3321 | */ | |
b6bc7650 JP |
3322 | static void gfar_set_mac_for_addr(struct net_device *dev, int num, |
3323 | const u8 *addr) | |
7f7f5316 AF |
3324 | { |
3325 | struct gfar_private *priv = netdev_priv(dev); | |
46ceb60c | 3326 | struct gfar __iomem *regs = priv->gfargrp[0].regs; |
7f7f5316 | 3327 | u32 tempval; |
f4983704 | 3328 | u32 __iomem *macptr = ®s->macstnaddr1; |
7f7f5316 AF |
3329 | |
3330 | macptr += num*2; | |
3331 | ||
83bfc3c4 CM |
3332 | /* For a station address of 0x12345678ABCD in transmission |
3333 | * order (BE), MACnADDR1 is set to 0xCDAB7856 and | |
3334 | * MACnADDR2 is set to 0x34120000. | |
0977f817 | 3335 | */ |
83bfc3c4 CM |
3336 | tempval = (addr[5] << 24) | (addr[4] << 16) | |
3337 | (addr[3] << 8) | addr[2]; | |
7f7f5316 | 3338 | |
83bfc3c4 | 3339 | gfar_write(macptr, tempval); |
7f7f5316 | 3340 | |
83bfc3c4 | 3341 | tempval = (addr[1] << 24) | (addr[0] << 16); |
7f7f5316 AF |
3342 | |
3343 | gfar_write(macptr+1, tempval); | |
3344 | } | |
3345 | ||
1da177e4 | 3346 | /* GFAR error interrupt handler */ |
f4983704 | 3347 | static irqreturn_t gfar_error(int irq, void *grp_id) |
1da177e4 | 3348 | { |
f4983704 SG |
3349 | struct gfar_priv_grp *gfargrp = grp_id; |
3350 | struct gfar __iomem *regs = gfargrp->regs; | |
3351 | struct gfar_private *priv= gfargrp->priv; | |
3352 | struct net_device *dev = priv->ndev; | |
1da177e4 LT |
3353 | |
3354 | /* Save ievent for future reference */ | |
f4983704 | 3355 | u32 events = gfar_read(®s->ievent); |
1da177e4 LT |
3356 | |
3357 | /* Clear IEVENT */ | |
f4983704 | 3358 | gfar_write(®s->ievent, events & IEVENT_ERR_MASK); |
d87eb127 SW |
3359 | |
3360 | /* Magic Packet is not an error. */ | |
b31a1d8b | 3361 | if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && |
d87eb127 SW |
3362 | (events & IEVENT_MAG)) |
3363 | events &= ~IEVENT_MAG; | |
1da177e4 LT |
3364 | |
3365 | /* Hmm... */ | |
0bbaf069 | 3366 | if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) |
bc4598bc JC |
3367 | netdev_dbg(dev, |
3368 | "error interrupt (ievent=0x%08x imask=0x%08x)\n", | |
59deab26 | 3369 | events, gfar_read(®s->imask)); |
1da177e4 LT |
3370 | |
3371 | /* Update the error counters */ | |
3372 | if (events & IEVENT_TXE) { | |
09f75cd7 | 3373 | dev->stats.tx_errors++; |
1da177e4 LT |
3374 | |
3375 | if (events & IEVENT_LC) | |
09f75cd7 | 3376 | dev->stats.tx_window_errors++; |
1da177e4 | 3377 | if (events & IEVENT_CRL) |
09f75cd7 | 3378 | dev->stats.tx_aborted_errors++; |
1da177e4 | 3379 | if (events & IEVENT_XFUN) { |
836cf7fa AV |
3380 | unsigned long flags; |
3381 | ||
59deab26 JP |
3382 | netif_dbg(priv, tx_err, dev, |
3383 | "TX FIFO underrun, packet dropped\n"); | |
09f75cd7 | 3384 | dev->stats.tx_dropped++; |
212079df | 3385 | atomic64_inc(&priv->extra_stats.tx_underrun); |
1da177e4 | 3386 | |
836cf7fa AV |
3387 | local_irq_save(flags); |
3388 | lock_tx_qs(priv); | |
3389 | ||
1da177e4 | 3390 | /* Reactivate the Tx Queues */ |
fba4ed03 | 3391 | gfar_write(®s->tstat, gfargrp->tstat); |
836cf7fa AV |
3392 | |
3393 | unlock_tx_qs(priv); | |
3394 | local_irq_restore(flags); | |
1da177e4 | 3395 | } |
59deab26 | 3396 | netif_dbg(priv, tx_err, dev, "Transmit Error\n"); |
1da177e4 LT |
3397 | } |
3398 | if (events & IEVENT_BSY) { | |
09f75cd7 | 3399 | dev->stats.rx_errors++; |
212079df | 3400 | atomic64_inc(&priv->extra_stats.rx_bsy); |
1da177e4 | 3401 | |
f4983704 | 3402 | gfar_receive(irq, grp_id); |
1da177e4 | 3403 | |
59deab26 JP |
3404 | netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", |
3405 | gfar_read(®s->rstat)); | |
1da177e4 LT |
3406 | } |
3407 | if (events & IEVENT_BABR) { | |
09f75cd7 | 3408 | dev->stats.rx_errors++; |
212079df | 3409 | atomic64_inc(&priv->extra_stats.rx_babr); |
1da177e4 | 3410 | |
59deab26 | 3411 | netif_dbg(priv, rx_err, dev, "babbling RX error\n"); |
1da177e4 LT |
3412 | } |
3413 | if (events & IEVENT_EBERR) { | |
212079df | 3414 | atomic64_inc(&priv->extra_stats.eberr); |
59deab26 | 3415 | netif_dbg(priv, rx_err, dev, "bus error\n"); |
1da177e4 | 3416 | } |
59deab26 JP |
3417 | if (events & IEVENT_RXC) |
3418 | netif_dbg(priv, rx_status, dev, "control frame\n"); | |
1da177e4 LT |
3419 | |
3420 | if (events & IEVENT_BABT) { | |
212079df | 3421 | atomic64_inc(&priv->extra_stats.tx_babt); |
59deab26 | 3422 | netif_dbg(priv, tx_err, dev, "babbling TX error\n"); |
1da177e4 LT |
3423 | } |
3424 | return IRQ_HANDLED; | |
3425 | } | |
3426 | ||
6ce29b0e CM |
3427 | static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) |
3428 | { | |
3429 | struct phy_device *phydev = priv->phydev; | |
3430 | u32 val = 0; | |
3431 | ||
3432 | if (!phydev->duplex) | |
3433 | return val; | |
3434 | ||
3435 | if (!priv->pause_aneg_en) { | |
3436 | if (priv->tx_pause_en) | |
3437 | val |= MACCFG1_TX_FLOW; | |
3438 | if (priv->rx_pause_en) | |
3439 | val |= MACCFG1_RX_FLOW; | |
3440 | } else { | |
3441 | u16 lcl_adv, rmt_adv; | |
3442 | u8 flowctrl; | |
3443 | /* get link partner capabilities */ | |
3444 | rmt_adv = 0; | |
3445 | if (phydev->pause) | |
3446 | rmt_adv = LPA_PAUSE_CAP; | |
3447 | if (phydev->asym_pause) | |
3448 | rmt_adv |= LPA_PAUSE_ASYM; | |
3449 | ||
43ef8d29 PMB |
3450 | lcl_adv = 0; |
3451 | if (phydev->advertising & ADVERTISED_Pause) | |
3452 | lcl_adv |= ADVERTISE_PAUSE_CAP; | |
3453 | if (phydev->advertising & ADVERTISED_Asym_Pause) | |
3454 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | |
6ce29b0e CM |
3455 | |
3456 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | |
3457 | if (flowctrl & FLOW_CTRL_TX) | |
3458 | val |= MACCFG1_TX_FLOW; | |
3459 | if (flowctrl & FLOW_CTRL_RX) | |
3460 | val |= MACCFG1_RX_FLOW; | |
3461 | } | |
3462 | ||
3463 | return val; | |
3464 | } | |
3465 | ||
3466 | static noinline void gfar_update_link_state(struct gfar_private *priv) | |
3467 | { | |
3468 | struct gfar __iomem *regs = priv->gfargrp[0].regs; | |
3469 | struct phy_device *phydev = priv->phydev; | |
45b679c9 MP |
3470 | struct gfar_priv_rx_q *rx_queue = NULL; |
3471 | int i; | |
3472 | struct rxbd8 *bdp; | |
6ce29b0e CM |
3473 | |
3474 | if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) | |
3475 | return; | |
3476 | ||
3477 | if (phydev->link) { | |
3478 | u32 tempval1 = gfar_read(®s->maccfg1); | |
3479 | u32 tempval = gfar_read(®s->maccfg2); | |
3480 | u32 ecntrl = gfar_read(®s->ecntrl); | |
45b679c9 | 3481 | u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); |
6ce29b0e CM |
3482 | |
3483 | if (phydev->duplex != priv->oldduplex) { | |
3484 | if (!(phydev->duplex)) | |
3485 | tempval &= ~(MACCFG2_FULL_DUPLEX); | |
3486 | else | |
3487 | tempval |= MACCFG2_FULL_DUPLEX; | |
3488 | ||
3489 | priv->oldduplex = phydev->duplex; | |
3490 | } | |
3491 | ||
3492 | if (phydev->speed != priv->oldspeed) { | |
3493 | switch (phydev->speed) { | |
3494 | case 1000: | |
3495 | tempval = | |
3496 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); | |
3497 | ||
3498 | ecntrl &= ~(ECNTRL_R100); | |
3499 | break; | |
3500 | case 100: | |
3501 | case 10: | |
3502 | tempval = | |
3503 | ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); | |
3504 | ||
3505 | /* Reduced mode distinguishes | |
3506 | * between 10 and 100 | |
3507 | */ | |
3508 | if (phydev->speed == SPEED_100) | |
3509 | ecntrl |= ECNTRL_R100; | |
3510 | else | |
3511 | ecntrl &= ~(ECNTRL_R100); | |
3512 | break; | |
3513 | default: | |
3514 | netif_warn(priv, link, priv->ndev, | |
3515 | "Ack! Speed (%d) is not 10/100/1000!\n", | |
3516 | phydev->speed); | |
3517 | break; | |
3518 | } | |
3519 | ||
3520 | priv->oldspeed = phydev->speed; | |
3521 | } | |
3522 | ||
3523 | tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); | |
3524 | tempval1 |= gfar_get_flowctrl_cfg(priv); | |
3525 | ||
45b679c9 MP |
3526 | /* Turn last free buffer recording on */ |
3527 | if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { | |
3528 | for (i = 0; i < priv->num_rx_queues; i++) { | |
3529 | rx_queue = priv->rx_queue[i]; | |
3530 | bdp = rx_queue->cur_rx; | |
3531 | /* skip to previous bd */ | |
3532 | bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, | |
3533 | rx_queue->rx_bd_base, | |
3534 | rx_queue->rx_ring_size); | |
3535 | ||
3536 | if (rx_queue->rfbptr) | |
3537 | gfar_write(rx_queue->rfbptr, (u32)bdp); | |
3538 | } | |
3539 | ||
3540 | priv->tx_actual_en = 1; | |
3541 | } | |
3542 | ||
3543 | if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) | |
3544 | priv->tx_actual_en = 0; | |
3545 | ||
6ce29b0e CM |
3546 | gfar_write(®s->maccfg1, tempval1); |
3547 | gfar_write(®s->maccfg2, tempval); | |
3548 | gfar_write(®s->ecntrl, ecntrl); | |
3549 | ||
3550 | if (!priv->oldlink) | |
3551 | priv->oldlink = 1; | |
3552 | ||
3553 | } else if (priv->oldlink) { | |
3554 | priv->oldlink = 0; | |
3555 | priv->oldspeed = 0; | |
3556 | priv->oldduplex = -1; | |
3557 | } | |
3558 | ||
3559 | if (netif_msg_link(priv)) | |
3560 | phy_print_status(phydev); | |
3561 | } | |
3562 | ||
b31a1d8b AF |
3563 | static struct of_device_id gfar_match[] = |
3564 | { | |
3565 | { | |
3566 | .type = "network", | |
3567 | .compatible = "gianfar", | |
3568 | }, | |
46ceb60c SG |
3569 | { |
3570 | .compatible = "fsl,etsec2", | |
3571 | }, | |
b31a1d8b AF |
3572 | {}, |
3573 | }; | |
e72701ac | 3574 | MODULE_DEVICE_TABLE(of, gfar_match); |
b31a1d8b | 3575 | |
1da177e4 | 3576 | /* Structure for a device driver */ |
74888760 | 3577 | static struct platform_driver gfar_driver = { |
4018294b GL |
3578 | .driver = { |
3579 | .name = "fsl-gianfar", | |
4018294b GL |
3580 | .pm = GFAR_PM_OPS, |
3581 | .of_match_table = gfar_match, | |
3582 | }, | |
1da177e4 LT |
3583 | .probe = gfar_probe, |
3584 | .remove = gfar_remove, | |
3585 | }; | |
3586 | ||
db62f684 | 3587 | module_platform_driver(gfar_driver); |