1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * Gianfar: AKA Lambda Draconis, "Dragon"
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
63 #include <linux/kernel.h>
64 #include <linux/string.h>
65 #include <linux/errno.h>
66 #include <linux/unistd.h>
67 #include <linux/slab.h>
68 #include <linux/interrupt.h>
69 #include <linux/delay.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/skbuff.h>
73 #include <linux/if_vlan.h>
74 #include <linux/spinlock.h>
76 #include <linux/of_address.h>
77 #include <linux/of_irq.h>
78 #include <linux/of_mdio.h>
79 #include <linux/of_platform.h>
81 #include <linux/tcp.h>
82 #include <linux/udp.h>
84 #include <linux/net_tstamp.h>
89 #include <asm/mpc85xx.h>
92 #include <linux/uaccess.h>
93 #include <linux/module.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
96 #include <linux/mii.h>
97 #include <linux/phy.h>
98 #include <linux/phy_fixed.h>
100 #include <linux/of_net.h>
104 #define TX_TIMEOUT (5*HZ)
106 const char gfar_driver_version
[] = "2.0";
108 static int gfar_enet_open(struct net_device
*dev
);
109 static netdev_tx_t
gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
110 static void gfar_reset_task(struct work_struct
*work
);
111 static void gfar_timeout(struct net_device
*dev
);
112 static int gfar_close(struct net_device
*dev
);
113 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q
*rx_queue
,
115 static int gfar_set_mac_address(struct net_device
*dev
);
116 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
117 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
118 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
119 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
120 static void adjust_link(struct net_device
*dev
);
121 static noinline
void gfar_update_link_state(struct gfar_private
*priv
);
122 static int init_phy(struct net_device
*dev
);
123 static int gfar_probe(struct platform_device
*ofdev
);
124 static int gfar_remove(struct platform_device
*ofdev
);
125 static void free_skb_resources(struct gfar_private
*priv
);
126 static void gfar_set_multi(struct net_device
*dev
);
127 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
128 static void gfar_configure_serdes(struct net_device
*dev
);
129 static int gfar_poll_rx(struct napi_struct
*napi
, int budget
);
130 static int gfar_poll_tx(struct napi_struct
*napi
, int budget
);
131 static int gfar_poll_rx_sq(struct napi_struct
*napi
, int budget
);
132 static int gfar_poll_tx_sq(struct napi_struct
*napi
, int budget
);
133 #ifdef CONFIG_NET_POLL_CONTROLLER
134 static void gfar_netpoll(struct net_device
*dev
);
136 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
);
137 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
);
138 static void gfar_process_frame(struct net_device
*ndev
, struct sk_buff
*skb
);
139 static void gfar_halt_nodisable(struct gfar_private
*priv
);
140 static void gfar_clear_exact_match(struct net_device
*dev
);
141 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
143 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
145 MODULE_AUTHOR("Freescale Semiconductor, Inc");
146 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147 MODULE_LICENSE("GPL");
149 static void gfar_init_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
154 bdp
->bufPtr
= cpu_to_be32(buf
);
156 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
157 if (bdp
== rx_queue
->rx_bd_base
+ rx_queue
->rx_ring_size
- 1)
158 lstatus
|= BD_LFLAG(RXBD_WRAP
);
162 bdp
->lstatus
= cpu_to_be32(lstatus
);
165 static void gfar_init_bds(struct net_device
*ndev
)
167 struct gfar_private
*priv
= netdev_priv(ndev
);
168 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
169 struct gfar_priv_tx_q
*tx_queue
= NULL
;
170 struct gfar_priv_rx_q
*rx_queue
= NULL
;
175 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
176 tx_queue
= priv
->tx_queue
[i
];
177 /* Initialize some variables in our dev structure */
178 tx_queue
->num_txbdfree
= tx_queue
->tx_ring_size
;
179 tx_queue
->dirty_tx
= tx_queue
->tx_bd_base
;
180 tx_queue
->cur_tx
= tx_queue
->tx_bd_base
;
181 tx_queue
->skb_curtx
= 0;
182 tx_queue
->skb_dirtytx
= 0;
184 /* Initialize Transmit Descriptor Ring */
185 txbdp
= tx_queue
->tx_bd_base
;
186 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++) {
192 /* Set the last descriptor in the ring to indicate wrap */
194 txbdp
->status
= cpu_to_be16(be16_to_cpu(txbdp
->status
) |
198 rfbptr
= ®s
->rfbptr0
;
199 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
200 rx_queue
= priv
->rx_queue
[i
];
202 rx_queue
->next_to_clean
= 0;
203 rx_queue
->next_to_use
= 0;
204 rx_queue
->next_to_alloc
= 0;
206 /* make sure next_to_clean != next_to_use after this
207 * by leaving at least 1 unused descriptor
209 gfar_alloc_rx_buffs(rx_queue
, gfar_rxbd_unused(rx_queue
));
211 rx_queue
->rfbptr
= rfbptr
;
216 static int gfar_alloc_skb_resources(struct net_device
*ndev
)
221 struct gfar_private
*priv
= netdev_priv(ndev
);
222 struct device
*dev
= priv
->dev
;
223 struct gfar_priv_tx_q
*tx_queue
= NULL
;
224 struct gfar_priv_rx_q
*rx_queue
= NULL
;
226 priv
->total_tx_ring_size
= 0;
227 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
228 priv
->total_tx_ring_size
+= priv
->tx_queue
[i
]->tx_ring_size
;
230 priv
->total_rx_ring_size
= 0;
231 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
232 priv
->total_rx_ring_size
+= priv
->rx_queue
[i
]->rx_ring_size
;
234 /* Allocate memory for the buffer descriptors */
235 vaddr
= dma_alloc_coherent(dev
,
236 (priv
->total_tx_ring_size
*
237 sizeof(struct txbd8
)) +
238 (priv
->total_rx_ring_size
*
239 sizeof(struct rxbd8
)),
244 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
245 tx_queue
= priv
->tx_queue
[i
];
246 tx_queue
->tx_bd_base
= vaddr
;
247 tx_queue
->tx_bd_dma_base
= addr
;
248 tx_queue
->dev
= ndev
;
249 /* enet DMA only understands physical addresses */
250 addr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
251 vaddr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
254 /* Start the rx descriptor ring where the tx ring leaves off */
255 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
256 rx_queue
= priv
->rx_queue
[i
];
257 rx_queue
->rx_bd_base
= vaddr
;
258 rx_queue
->rx_bd_dma_base
= addr
;
259 rx_queue
->ndev
= ndev
;
261 addr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
262 vaddr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
265 /* Setup the skbuff rings */
266 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
267 tx_queue
= priv
->tx_queue
[i
];
268 tx_queue
->tx_skbuff
=
269 kmalloc_array(tx_queue
->tx_ring_size
,
270 sizeof(*tx_queue
->tx_skbuff
),
272 if (!tx_queue
->tx_skbuff
)
275 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++)
276 tx_queue
->tx_skbuff
[j
] = NULL
;
279 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
280 rx_queue
= priv
->rx_queue
[i
];
281 rx_queue
->rx_buff
= kcalloc(rx_queue
->rx_ring_size
,
282 sizeof(*rx_queue
->rx_buff
),
284 if (!rx_queue
->rx_buff
)
293 free_skb_resources(priv
);
297 static void gfar_init_tx_rx_base(struct gfar_private
*priv
)
299 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
303 baddr
= ®s
->tbase0
;
304 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
305 gfar_write(baddr
, priv
->tx_queue
[i
]->tx_bd_dma_base
);
309 baddr
= ®s
->rbase0
;
310 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
311 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_bd_dma_base
);
316 static void gfar_init_rqprm(struct gfar_private
*priv
)
318 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
322 baddr
= ®s
->rqprm0
;
323 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
324 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_ring_size
|
325 (DEFAULT_RX_LFC_THR
<< FBTHR_SHIFT
));
330 static void gfar_rx_offload_en(struct gfar_private
*priv
)
332 /* set this when rx hw offload (TOE) functions are being used */
333 priv
->uses_rxfcb
= 0;
335 if (priv
->ndev
->features
& (NETIF_F_RXCSUM
| NETIF_F_HW_VLAN_CTAG_RX
))
336 priv
->uses_rxfcb
= 1;
338 if (priv
->hwts_rx_en
|| priv
->rx_filer_enable
)
339 priv
->uses_rxfcb
= 1;
342 static void gfar_mac_rx_config(struct gfar_private
*priv
)
344 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
347 if (priv
->rx_filer_enable
) {
348 rctrl
|= RCTRL_FILREN
| RCTRL_PRSDEP_INIT
;
349 /* Program the RIR0 reg with the required distribution */
350 if (priv
->poll_mode
== GFAR_SQ_POLLING
)
351 gfar_write(®s
->rir0
, DEFAULT_2RXQ_RIR0
);
352 else /* GFAR_MQ_POLLING */
353 gfar_write(®s
->rir0
, DEFAULT_8RXQ_RIR0
);
356 /* Restore PROMISC mode */
357 if (priv
->ndev
->flags
& IFF_PROMISC
)
360 if (priv
->ndev
->features
& NETIF_F_RXCSUM
)
361 rctrl
|= RCTRL_CHECKSUMMING
;
363 if (priv
->extended_hash
)
364 rctrl
|= RCTRL_EXTHASH
| RCTRL_EMEN
;
367 rctrl
&= ~RCTRL_PAL_MASK
;
368 rctrl
|= RCTRL_PADDING(priv
->padding
);
371 /* Enable HW time stamping if requested from user space */
372 if (priv
->hwts_rx_en
)
373 rctrl
|= RCTRL_PRSDEP_INIT
| RCTRL_TS_ENABLE
;
375 if (priv
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
376 rctrl
|= RCTRL_VLEX
| RCTRL_PRSDEP_INIT
;
378 /* Clear the LFC bit */
379 gfar_write(®s
->rctrl
, rctrl
);
380 /* Init flow control threshold values */
381 gfar_init_rqprm(priv
);
382 gfar_write(®s
->ptv
, DEFAULT_LFC_PTVVAL
);
385 /* Init rctrl based on our settings */
386 gfar_write(®s
->rctrl
, rctrl
);
389 static void gfar_mac_tx_config(struct gfar_private
*priv
)
391 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
394 if (priv
->ndev
->features
& NETIF_F_IP_CSUM
)
395 tctrl
|= TCTRL_INIT_CSUM
;
397 if (priv
->prio_sched_en
)
398 tctrl
|= TCTRL_TXSCHED_PRIO
;
400 tctrl
|= TCTRL_TXSCHED_WRRS
;
401 gfar_write(®s
->tr03wt
, DEFAULT_WRRS_WEIGHT
);
402 gfar_write(®s
->tr47wt
, DEFAULT_WRRS_WEIGHT
);
405 if (priv
->ndev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)
406 tctrl
|= TCTRL_VLINS
;
408 gfar_write(®s
->tctrl
, tctrl
);
411 static void gfar_configure_coalescing(struct gfar_private
*priv
,
412 unsigned long tx_mask
, unsigned long rx_mask
)
414 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
417 if (priv
->mode
== MQ_MG_MODE
) {
420 baddr
= ®s
->txic0
;
421 for_each_set_bit(i
, &tx_mask
, priv
->num_tx_queues
) {
422 gfar_write(baddr
+ i
, 0);
423 if (likely(priv
->tx_queue
[i
]->txcoalescing
))
424 gfar_write(baddr
+ i
, priv
->tx_queue
[i
]->txic
);
427 baddr
= ®s
->rxic0
;
428 for_each_set_bit(i
, &rx_mask
, priv
->num_rx_queues
) {
429 gfar_write(baddr
+ i
, 0);
430 if (likely(priv
->rx_queue
[i
]->rxcoalescing
))
431 gfar_write(baddr
+ i
, priv
->rx_queue
[i
]->rxic
);
434 /* Backward compatible case -- even if we enable
435 * multiple queues, there's only single reg to program
437 gfar_write(®s
->txic
, 0);
438 if (likely(priv
->tx_queue
[0]->txcoalescing
))
439 gfar_write(®s
->txic
, priv
->tx_queue
[0]->txic
);
441 gfar_write(®s
->rxic
, 0);
442 if (unlikely(priv
->rx_queue
[0]->rxcoalescing
))
443 gfar_write(®s
->rxic
, priv
->rx_queue
[0]->rxic
);
447 void gfar_configure_coalescing_all(struct gfar_private
*priv
)
449 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
452 static struct net_device_stats
*gfar_get_stats(struct net_device
*dev
)
454 struct gfar_private
*priv
= netdev_priv(dev
);
455 unsigned long rx_packets
= 0, rx_bytes
= 0, rx_dropped
= 0;
456 unsigned long tx_packets
= 0, tx_bytes
= 0;
459 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
460 rx_packets
+= priv
->rx_queue
[i
]->stats
.rx_packets
;
461 rx_bytes
+= priv
->rx_queue
[i
]->stats
.rx_bytes
;
462 rx_dropped
+= priv
->rx_queue
[i
]->stats
.rx_dropped
;
465 dev
->stats
.rx_packets
= rx_packets
;
466 dev
->stats
.rx_bytes
= rx_bytes
;
467 dev
->stats
.rx_dropped
= rx_dropped
;
469 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
470 tx_bytes
+= priv
->tx_queue
[i
]->stats
.tx_bytes
;
471 tx_packets
+= priv
->tx_queue
[i
]->stats
.tx_packets
;
474 dev
->stats
.tx_bytes
= tx_bytes
;
475 dev
->stats
.tx_packets
= tx_packets
;
480 static int gfar_set_mac_addr(struct net_device
*dev
, void *p
)
482 eth_mac_addr(dev
, p
);
484 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
489 static const struct net_device_ops gfar_netdev_ops
= {
490 .ndo_open
= gfar_enet_open
,
491 .ndo_start_xmit
= gfar_start_xmit
,
492 .ndo_stop
= gfar_close
,
493 .ndo_change_mtu
= gfar_change_mtu
,
494 .ndo_set_features
= gfar_set_features
,
495 .ndo_set_rx_mode
= gfar_set_multi
,
496 .ndo_tx_timeout
= gfar_timeout
,
497 .ndo_do_ioctl
= gfar_ioctl
,
498 .ndo_get_stats
= gfar_get_stats
,
499 .ndo_change_carrier
= fixed_phy_change_carrier
,
500 .ndo_set_mac_address
= gfar_set_mac_addr
,
501 .ndo_validate_addr
= eth_validate_addr
,
502 #ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller
= gfar_netpoll
,
507 static void gfar_ints_disable(struct gfar_private
*priv
)
510 for (i
= 0; i
< priv
->num_grps
; i
++) {
511 struct gfar __iomem
*regs
= priv
->gfargrp
[i
].regs
;
513 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
515 /* Initialize IMASK */
516 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
520 static void gfar_ints_enable(struct gfar_private
*priv
)
523 for (i
= 0; i
< priv
->num_grps
; i
++) {
524 struct gfar __iomem
*regs
= priv
->gfargrp
[i
].regs
;
525 /* Unmask the interrupts we look for */
526 gfar_write(®s
->imask
, IMASK_DEFAULT
);
530 static int gfar_alloc_tx_queues(struct gfar_private
*priv
)
534 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
535 priv
->tx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_tx_q
),
537 if (!priv
->tx_queue
[i
])
540 priv
->tx_queue
[i
]->tx_skbuff
= NULL
;
541 priv
->tx_queue
[i
]->qindex
= i
;
542 priv
->tx_queue
[i
]->dev
= priv
->ndev
;
543 spin_lock_init(&(priv
->tx_queue
[i
]->txlock
));
548 static int gfar_alloc_rx_queues(struct gfar_private
*priv
)
552 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
553 priv
->rx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_rx_q
),
555 if (!priv
->rx_queue
[i
])
558 priv
->rx_queue
[i
]->qindex
= i
;
559 priv
->rx_queue
[i
]->ndev
= priv
->ndev
;
564 static void gfar_free_tx_queues(struct gfar_private
*priv
)
568 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
569 kfree(priv
->tx_queue
[i
]);
572 static void gfar_free_rx_queues(struct gfar_private
*priv
)
576 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
577 kfree(priv
->rx_queue
[i
]);
580 static void unmap_group_regs(struct gfar_private
*priv
)
584 for (i
= 0; i
< MAXGROUPS
; i
++)
585 if (priv
->gfargrp
[i
].regs
)
586 iounmap(priv
->gfargrp
[i
].regs
);
589 static void free_gfar_dev(struct gfar_private
*priv
)
593 for (i
= 0; i
< priv
->num_grps
; i
++)
594 for (j
= 0; j
< GFAR_NUM_IRQS
; j
++) {
595 kfree(priv
->gfargrp
[i
].irqinfo
[j
]);
596 priv
->gfargrp
[i
].irqinfo
[j
] = NULL
;
599 free_netdev(priv
->ndev
);
602 static void disable_napi(struct gfar_private
*priv
)
606 for (i
= 0; i
< priv
->num_grps
; i
++) {
607 napi_disable(&priv
->gfargrp
[i
].napi_rx
);
608 napi_disable(&priv
->gfargrp
[i
].napi_tx
);
612 static void enable_napi(struct gfar_private
*priv
)
616 for (i
= 0; i
< priv
->num_grps
; i
++) {
617 napi_enable(&priv
->gfargrp
[i
].napi_rx
);
618 napi_enable(&priv
->gfargrp
[i
].napi_tx
);
622 static int gfar_parse_group(struct device_node
*np
,
623 struct gfar_private
*priv
, const char *model
)
625 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[priv
->num_grps
];
628 for (i
= 0; i
< GFAR_NUM_IRQS
; i
++) {
629 grp
->irqinfo
[i
] = kzalloc(sizeof(struct gfar_irqinfo
),
631 if (!grp
->irqinfo
[i
])
635 grp
->regs
= of_iomap(np
, 0);
639 gfar_irq(grp
, TX
)->irq
= irq_of_parse_and_map(np
, 0);
641 /* If we aren't the FEC we have multiple interrupts */
642 if (model
&& strcasecmp(model
, "FEC")) {
643 gfar_irq(grp
, RX
)->irq
= irq_of_parse_and_map(np
, 1);
644 gfar_irq(grp
, ER
)->irq
= irq_of_parse_and_map(np
, 2);
645 if (!gfar_irq(grp
, TX
)->irq
||
646 !gfar_irq(grp
, RX
)->irq
||
647 !gfar_irq(grp
, ER
)->irq
)
652 spin_lock_init(&grp
->grplock
);
653 if (priv
->mode
== MQ_MG_MODE
) {
654 u32 rxq_mask
, txq_mask
;
657 grp
->rx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
658 grp
->tx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
660 ret
= of_property_read_u32(np
, "fsl,rx-bit-map", &rxq_mask
);
662 grp
->rx_bit_map
= rxq_mask
?
663 rxq_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
666 ret
= of_property_read_u32(np
, "fsl,tx-bit-map", &txq_mask
);
668 grp
->tx_bit_map
= txq_mask
?
669 txq_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
672 if (priv
->poll_mode
== GFAR_SQ_POLLING
) {
673 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
674 grp
->rx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
675 grp
->tx_bit_map
= (DEFAULT_MAPPING
>> priv
->num_grps
);
678 grp
->rx_bit_map
= 0xFF;
679 grp
->tx_bit_map
= 0xFF;
682 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
683 * right to left, so we need to revert the 8 bits to get the q index
685 grp
->rx_bit_map
= bitrev8(grp
->rx_bit_map
);
686 grp
->tx_bit_map
= bitrev8(grp
->tx_bit_map
);
688 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
689 * also assign queues to groups
691 for_each_set_bit(i
, &grp
->rx_bit_map
, priv
->num_rx_queues
) {
693 grp
->rx_queue
= priv
->rx_queue
[i
];
694 grp
->num_rx_queues
++;
695 grp
->rstat
|= (RSTAT_CLEAR_RHALT
>> i
);
696 priv
->rqueue
|= ((RQUEUE_EN0
| RQUEUE_EX0
) >> i
);
697 priv
->rx_queue
[i
]->grp
= grp
;
700 for_each_set_bit(i
, &grp
->tx_bit_map
, priv
->num_tx_queues
) {
702 grp
->tx_queue
= priv
->tx_queue
[i
];
703 grp
->num_tx_queues
++;
704 grp
->tstat
|= (TSTAT_CLEAR_THALT
>> i
);
705 priv
->tqueue
|= (TQUEUE_EN0
>> i
);
706 priv
->tx_queue
[i
]->grp
= grp
;
714 static int gfar_of_group_count(struct device_node
*np
)
716 struct device_node
*child
;
719 for_each_available_child_of_node(np
, child
)
720 if (of_node_name_eq(child
, "queue-group"))
726 static int gfar_of_init(struct platform_device
*ofdev
, struct net_device
**pdev
)
730 const void *mac_addr
;
732 struct net_device
*dev
= NULL
;
733 struct gfar_private
*priv
= NULL
;
734 struct device_node
*np
= ofdev
->dev
.of_node
;
735 struct device_node
*child
= NULL
;
738 unsigned int num_tx_qs
, num_rx_qs
;
739 unsigned short mode
, poll_mode
;
744 if (of_device_is_compatible(np
, "fsl,etsec2")) {
746 poll_mode
= GFAR_SQ_POLLING
;
749 poll_mode
= GFAR_SQ_POLLING
;
752 if (mode
== SQ_SG_MODE
) {
755 } else { /* MQ_MG_MODE */
756 /* get the actual number of supported groups */
757 unsigned int num_grps
= gfar_of_group_count(np
);
759 if (num_grps
== 0 || num_grps
> MAXGROUPS
) {
760 dev_err(&ofdev
->dev
, "Invalid # of int groups(%d)\n",
762 pr_err("Cannot do alloc_etherdev, aborting\n");
766 if (poll_mode
== GFAR_SQ_POLLING
) {
767 num_tx_qs
= num_grps
; /* one txq per int group */
768 num_rx_qs
= num_grps
; /* one rxq per int group */
769 } else { /* GFAR_MQ_POLLING */
770 u32 tx_queues
, rx_queues
;
773 /* parse the num of HW tx and rx queues */
774 ret
= of_property_read_u32(np
, "fsl,num_tx_queues",
776 num_tx_qs
= ret
? 1 : tx_queues
;
778 ret
= of_property_read_u32(np
, "fsl,num_rx_queues",
780 num_rx_qs
= ret
? 1 : rx_queues
;
784 if (num_tx_qs
> MAX_TX_QS
) {
785 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
786 num_tx_qs
, MAX_TX_QS
);
787 pr_err("Cannot do alloc_etherdev, aborting\n");
791 if (num_rx_qs
> MAX_RX_QS
) {
792 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
793 num_rx_qs
, MAX_RX_QS
);
794 pr_err("Cannot do alloc_etherdev, aborting\n");
798 *pdev
= alloc_etherdev_mq(sizeof(*priv
), num_tx_qs
);
803 priv
= netdev_priv(dev
);
807 priv
->poll_mode
= poll_mode
;
809 priv
->num_tx_queues
= num_tx_qs
;
810 netif_set_real_num_rx_queues(dev
, num_rx_qs
);
811 priv
->num_rx_queues
= num_rx_qs
;
813 err
= gfar_alloc_tx_queues(priv
);
815 goto tx_alloc_failed
;
817 err
= gfar_alloc_rx_queues(priv
);
819 goto rx_alloc_failed
;
821 err
= of_property_read_string(np
, "model", &model
);
823 pr_err("Device model property missing, aborting\n");
824 goto rx_alloc_failed
;
827 /* Init Rx queue filer rule set linked list */
828 INIT_LIST_HEAD(&priv
->rx_list
.list
);
829 priv
->rx_list
.count
= 0;
830 mutex_init(&priv
->rx_queue_access
);
832 for (i
= 0; i
< MAXGROUPS
; i
++)
833 priv
->gfargrp
[i
].regs
= NULL
;
835 /* Parse and initialize group specific information */
836 if (priv
->mode
== MQ_MG_MODE
) {
837 for_each_available_child_of_node(np
, child
) {
838 if (!of_node_name_eq(child
, "queue-group"))
841 err
= gfar_parse_group(child
, priv
, model
);
845 } else { /* SQ_SG_MODE */
846 err
= gfar_parse_group(np
, priv
, model
);
851 if (of_property_read_bool(np
, "bd-stash")) {
852 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
853 priv
->bd_stash_en
= 1;
856 err
= of_property_read_u32(np
, "rx-stash-len", &stash_len
);
859 priv
->rx_stash_size
= stash_len
;
861 err
= of_property_read_u32(np
, "rx-stash-idx", &stash_idx
);
864 priv
->rx_stash_index
= stash_idx
;
866 if (stash_len
|| stash_idx
)
867 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
869 mac_addr
= of_get_mac_address(np
);
871 if (!IS_ERR(mac_addr
))
872 ether_addr_copy(dev
->dev_addr
, mac_addr
);
874 if (model
&& !strcasecmp(model
, "TSEC"))
875 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_GIGABIT
|
876 FSL_GIANFAR_DEV_HAS_COALESCE
|
877 FSL_GIANFAR_DEV_HAS_RMON
|
878 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
880 if (model
&& !strcasecmp(model
, "eTSEC"))
881 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_GIGABIT
|
882 FSL_GIANFAR_DEV_HAS_COALESCE
|
883 FSL_GIANFAR_DEV_HAS_RMON
|
884 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
885 FSL_GIANFAR_DEV_HAS_CSUM
|
886 FSL_GIANFAR_DEV_HAS_VLAN
|
887 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
888 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
|
889 FSL_GIANFAR_DEV_HAS_TIMER
|
890 FSL_GIANFAR_DEV_HAS_RX_FILER
;
892 err
= of_property_read_string(np
, "phy-connection-type", &ctype
);
894 /* We only care about rgmii-id. The rest are autodetected */
895 if (err
== 0 && !strcmp(ctype
, "rgmii-id"))
896 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
898 priv
->interface
= PHY_INTERFACE_MODE_MII
;
900 if (of_find_property(np
, "fsl,magic-packet", NULL
))
901 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
903 if (of_get_property(np
, "fsl,wake-on-filer", NULL
))
904 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER
;
906 priv
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
908 /* In the case of a fixed PHY, the DT node associated
909 * to the PHY is the Ethernet MAC DT node.
911 if (!priv
->phy_node
&& of_phy_is_fixed_link(np
)) {
912 err
= of_phy_register_fixed_link(np
);
916 priv
->phy_node
= of_node_get(np
);
919 /* Find the TBI PHY. If it's not there, we don't support SGMII */
920 priv
->tbi_node
= of_parse_phandle(np
, "tbi-handle", 0);
925 unmap_group_regs(priv
);
927 gfar_free_rx_queues(priv
);
929 gfar_free_tx_queues(priv
);
934 static int gfar_hwtstamp_set(struct net_device
*netdev
, struct ifreq
*ifr
)
936 struct hwtstamp_config config
;
937 struct gfar_private
*priv
= netdev_priv(netdev
);
939 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
942 /* reserved for future extensions */
946 switch (config
.tx_type
) {
947 case HWTSTAMP_TX_OFF
:
948 priv
->hwts_tx_en
= 0;
951 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
953 priv
->hwts_tx_en
= 1;
959 switch (config
.rx_filter
) {
960 case HWTSTAMP_FILTER_NONE
:
961 if (priv
->hwts_rx_en
) {
962 priv
->hwts_rx_en
= 0;
967 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
969 if (!priv
->hwts_rx_en
) {
970 priv
->hwts_rx_en
= 1;
973 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
977 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
981 static int gfar_hwtstamp_get(struct net_device
*netdev
, struct ifreq
*ifr
)
983 struct hwtstamp_config config
;
984 struct gfar_private
*priv
= netdev_priv(netdev
);
987 config
.tx_type
= priv
->hwts_tx_en
? HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
988 config
.rx_filter
= (priv
->hwts_rx_en
?
989 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
);
991 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
995 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
997 struct phy_device
*phydev
= dev
->phydev
;
999 if (!netif_running(dev
))
1002 if (cmd
== SIOCSHWTSTAMP
)
1003 return gfar_hwtstamp_set(dev
, rq
);
1004 if (cmd
== SIOCGHWTSTAMP
)
1005 return gfar_hwtstamp_get(dev
, rq
);
1010 return phy_mii_ioctl(phydev
, rq
, cmd
);
1013 static u32
cluster_entry_per_class(struct gfar_private
*priv
, u32 rqfar
,
1016 u32 rqfpr
= FPR_FILER_MASK
;
1020 rqfcr
= RQFCR_CLE
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
1021 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1022 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1023 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1026 rqfcr
= RQFCR_CMP_NOMATCH
;
1027 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1028 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1029 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1032 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
;
1034 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1035 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1036 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1039 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_MASK
| RQFCR_AND
;
1041 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1042 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1043 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1048 static void gfar_init_filer_table(struct gfar_private
*priv
)
1051 u32 rqfar
= MAX_FILER_IDX
;
1053 u32 rqfpr
= FPR_FILER_MASK
;
1056 rqfcr
= RQFCR_CMP_MATCH
;
1057 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
1058 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
1059 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
1061 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
);
1062 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_UDP
);
1063 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_TCP
);
1064 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
);
1065 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_UDP
);
1066 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_TCP
);
1068 /* cur_filer_idx indicated the first non-masked rule */
1069 priv
->cur_filer_idx
= rqfar
;
1071 /* Rest are masked rules */
1072 rqfcr
= RQFCR_CMP_NOMATCH
;
1073 for (i
= 0; i
< rqfar
; i
++) {
1074 priv
->ftp_rqfcr
[i
] = rqfcr
;
1075 priv
->ftp_rqfpr
[i
] = rqfpr
;
1076 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
1081 static void __gfar_detect_errata_83xx(struct gfar_private
*priv
)
1083 unsigned int pvr
= mfspr(SPRN_PVR
);
1084 unsigned int svr
= mfspr(SPRN_SVR
);
1085 unsigned int mod
= (svr
>> 16) & 0xfff6; /* w/o E suffix */
1086 unsigned int rev
= svr
& 0xffff;
1088 /* MPC8313 Rev 2.0 and higher; All MPC837x */
1089 if ((pvr
== 0x80850010 && mod
== 0x80b0 && rev
>= 0x0020) ||
1090 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
1091 priv
->errata
|= GFAR_ERRATA_74
;
1093 /* MPC8313 and MPC837x all rev */
1094 if ((pvr
== 0x80850010 && mod
== 0x80b0) ||
1095 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
1096 priv
->errata
|= GFAR_ERRATA_76
;
1098 /* MPC8313 Rev < 2.0 */
1099 if (pvr
== 0x80850010 && mod
== 0x80b0 && rev
< 0x0020)
1100 priv
->errata
|= GFAR_ERRATA_12
;
1103 static void __gfar_detect_errata_85xx(struct gfar_private
*priv
)
1105 unsigned int svr
= mfspr(SPRN_SVR
);
1107 if ((SVR_SOC_VER(svr
) == SVR_8548
) && (SVR_REV(svr
) == 0x20))
1108 priv
->errata
|= GFAR_ERRATA_12
;
1109 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
1110 if (((SVR_SOC_VER(svr
) == SVR_P2020
) && (SVR_REV(svr
) < 0x20)) ||
1111 ((SVR_SOC_VER(svr
) == SVR_P2010
) && (SVR_REV(svr
) < 0x20)) ||
1112 ((SVR_SOC_VER(svr
) == SVR_8548
) && (SVR_REV(svr
) < 0x31)))
1113 priv
->errata
|= GFAR_ERRATA_76
; /* aka eTSEC 20 */
1117 static void gfar_detect_errata(struct gfar_private
*priv
)
1119 struct device
*dev
= &priv
->ofdev
->dev
;
1121 /* no plans to fix */
1122 priv
->errata
|= GFAR_ERRATA_A002
;
1125 if (pvr_version_is(PVR_VER_E500V1
) || pvr_version_is(PVR_VER_E500V2
))
1126 __gfar_detect_errata_85xx(priv
);
1127 else /* non-mpc85xx parts, i.e. e300 core based */
1128 __gfar_detect_errata_83xx(priv
);
1132 dev_info(dev
, "enabled errata workarounds, flags: 0x%x\n",
1136 void gfar_mac_reset(struct gfar_private
*priv
)
1138 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1141 /* Reset MAC layer */
1142 gfar_write(®s
->maccfg1
, MACCFG1_SOFT_RESET
);
1144 /* We need to delay at least 3 TX clocks */
1147 /* the soft reset bit is not self-resetting, so we need to
1148 * clear it before resuming normal operation
1150 gfar_write(®s
->maccfg1
, 0);
1154 gfar_rx_offload_en(priv
);
1156 /* Initialize the max receive frame/buffer lengths */
1157 gfar_write(®s
->maxfrm
, GFAR_JUMBO_FRAME_SIZE
);
1158 gfar_write(®s
->mrblr
, GFAR_RXB_SIZE
);
1160 /* Initialize the Minimum Frame Length Register */
1161 gfar_write(®s
->minflr
, MINFLR_INIT_SETTINGS
);
1163 /* Initialize MACCFG2. */
1164 tempval
= MACCFG2_INIT_SETTINGS
;
1166 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
1167 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
1168 * and by checking RxBD[LG] and discarding larger than MAXFRM.
1170 if (gfar_has_errata(priv
, GFAR_ERRATA_74
))
1171 tempval
|= MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
;
1173 gfar_write(®s
->maccfg2
, tempval
);
1175 /* Clear mac addr hash registers */
1176 gfar_write(®s
->igaddr0
, 0);
1177 gfar_write(®s
->igaddr1
, 0);
1178 gfar_write(®s
->igaddr2
, 0);
1179 gfar_write(®s
->igaddr3
, 0);
1180 gfar_write(®s
->igaddr4
, 0);
1181 gfar_write(®s
->igaddr5
, 0);
1182 gfar_write(®s
->igaddr6
, 0);
1183 gfar_write(®s
->igaddr7
, 0);
1185 gfar_write(®s
->gaddr0
, 0);
1186 gfar_write(®s
->gaddr1
, 0);
1187 gfar_write(®s
->gaddr2
, 0);
1188 gfar_write(®s
->gaddr3
, 0);
1189 gfar_write(®s
->gaddr4
, 0);
1190 gfar_write(®s
->gaddr5
, 0);
1191 gfar_write(®s
->gaddr6
, 0);
1192 gfar_write(®s
->gaddr7
, 0);
1194 if (priv
->extended_hash
)
1195 gfar_clear_exact_match(priv
->ndev
);
1197 gfar_mac_rx_config(priv
);
1199 gfar_mac_tx_config(priv
);
1201 gfar_set_mac_address(priv
->ndev
);
1203 gfar_set_multi(priv
->ndev
);
1205 /* clear ievent and imask before configuring coalescing */
1206 gfar_ints_disable(priv
);
1208 /* Configure the coalescing support */
1209 gfar_configure_coalescing_all(priv
);
1212 static void gfar_hw_init(struct gfar_private
*priv
)
1214 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1217 /* Stop the DMA engine now, in case it was running before
1218 * (The firmware could have used it, and left it running).
1222 gfar_mac_reset(priv
);
1224 /* Zero out the rmon mib registers if it has them */
1225 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
1226 memset_io(&(regs
->rmon
), 0, sizeof(struct rmon_mib
));
1228 /* Mask off the CAM interrupts */
1229 gfar_write(®s
->rmon
.cam1
, 0xffffffff);
1230 gfar_write(®s
->rmon
.cam2
, 0xffffffff);
1233 /* Initialize ECNTRL */
1234 gfar_write(®s
->ecntrl
, ECNTRL_INIT_SETTINGS
);
1236 /* Set the extraction length and index */
1237 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
1238 ATTRELI_EI(priv
->rx_stash_index
);
1240 gfar_write(®s
->attreli
, attrs
);
1242 /* Start with defaults, and add stashing
1243 * depending on driver parameters
1245 attrs
= ATTR_INIT_SETTINGS
;
1247 if (priv
->bd_stash_en
)
1248 attrs
|= ATTR_BDSTASH
;
1250 if (priv
->rx_stash_size
!= 0)
1251 attrs
|= ATTR_BUFSTASH
;
1253 gfar_write(®s
->attr
, attrs
);
1256 gfar_write(®s
->fifo_tx_thr
, DEFAULT_FIFO_TX_THR
);
1257 gfar_write(®s
->fifo_tx_starve
, DEFAULT_FIFO_TX_STARVE
);
1258 gfar_write(®s
->fifo_tx_starve_shutoff
, DEFAULT_FIFO_TX_STARVE_OFF
);
1260 /* Program the interrupt steering regs, only for MG devices */
1261 if (priv
->num_grps
> 1)
1262 gfar_write_isrg(priv
);
1265 static void gfar_init_addr_hash_table(struct gfar_private
*priv
)
1267 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1269 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
1270 priv
->extended_hash
= 1;
1271 priv
->hash_width
= 9;
1273 priv
->hash_regs
[0] = ®s
->igaddr0
;
1274 priv
->hash_regs
[1] = ®s
->igaddr1
;
1275 priv
->hash_regs
[2] = ®s
->igaddr2
;
1276 priv
->hash_regs
[3] = ®s
->igaddr3
;
1277 priv
->hash_regs
[4] = ®s
->igaddr4
;
1278 priv
->hash_regs
[5] = ®s
->igaddr5
;
1279 priv
->hash_regs
[6] = ®s
->igaddr6
;
1280 priv
->hash_regs
[7] = ®s
->igaddr7
;
1281 priv
->hash_regs
[8] = ®s
->gaddr0
;
1282 priv
->hash_regs
[9] = ®s
->gaddr1
;
1283 priv
->hash_regs
[10] = ®s
->gaddr2
;
1284 priv
->hash_regs
[11] = ®s
->gaddr3
;
1285 priv
->hash_regs
[12] = ®s
->gaddr4
;
1286 priv
->hash_regs
[13] = ®s
->gaddr5
;
1287 priv
->hash_regs
[14] = ®s
->gaddr6
;
1288 priv
->hash_regs
[15] = ®s
->gaddr7
;
1291 priv
->extended_hash
= 0;
1292 priv
->hash_width
= 8;
1294 priv
->hash_regs
[0] = ®s
->gaddr0
;
1295 priv
->hash_regs
[1] = ®s
->gaddr1
;
1296 priv
->hash_regs
[2] = ®s
->gaddr2
;
1297 priv
->hash_regs
[3] = ®s
->gaddr3
;
1298 priv
->hash_regs
[4] = ®s
->gaddr4
;
1299 priv
->hash_regs
[5] = ®s
->gaddr5
;
1300 priv
->hash_regs
[6] = ®s
->gaddr6
;
1301 priv
->hash_regs
[7] = ®s
->gaddr7
;
1305 /* Set up the ethernet device structure, private data,
1306 * and anything else we need before we start
1308 static int gfar_probe(struct platform_device
*ofdev
)
1310 struct device_node
*np
= ofdev
->dev
.of_node
;
1311 struct net_device
*dev
= NULL
;
1312 struct gfar_private
*priv
= NULL
;
1315 err
= gfar_of_init(ofdev
, &dev
);
1320 priv
= netdev_priv(dev
);
1322 priv
->ofdev
= ofdev
;
1323 priv
->dev
= &ofdev
->dev
;
1324 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
1326 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
1328 platform_set_drvdata(ofdev
, priv
);
1330 gfar_detect_errata(priv
);
1332 /* Set the dev->base_addr to the gfar reg region */
1333 dev
->base_addr
= (unsigned long) priv
->gfargrp
[0].regs
;
1335 /* Fill in the dev structure */
1336 dev
->watchdog_timeo
= TX_TIMEOUT
;
1337 /* MTU range: 50 - 9586 */
1340 dev
->max_mtu
= GFAR_JUMBO_FRAME_SIZE
- ETH_HLEN
;
1341 dev
->netdev_ops
= &gfar_netdev_ops
;
1342 dev
->ethtool_ops
= &gfar_ethtool_ops
;
1344 /* Register for napi ...We are registering NAPI for each grp */
1345 for (i
= 0; i
< priv
->num_grps
; i
++) {
1346 if (priv
->poll_mode
== GFAR_SQ_POLLING
) {
1347 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_rx
,
1348 gfar_poll_rx_sq
, GFAR_DEV_WEIGHT
);
1349 netif_tx_napi_add(dev
, &priv
->gfargrp
[i
].napi_tx
,
1350 gfar_poll_tx_sq
, 2);
1352 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi_rx
,
1353 gfar_poll_rx
, GFAR_DEV_WEIGHT
);
1354 netif_tx_napi_add(dev
, &priv
->gfargrp
[i
].napi_tx
,
1359 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
1360 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1362 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1363 NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
;
1366 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
) {
1367 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
|
1368 NETIF_F_HW_VLAN_CTAG_RX
;
1369 dev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1372 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
1374 gfar_init_addr_hash_table(priv
);
1376 /* Insert receive time stamps into padding alignment bytes, and
1377 * plus 2 bytes padding to ensure the cpu alignment.
1379 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1380 priv
->padding
= 8 + DEFAULT_PADDING
;
1382 if (dev
->features
& NETIF_F_IP_CSUM
||
1383 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1384 dev
->needed_headroom
= GMAC_FCB_LEN
;
1386 /* Initializing some of the rx/tx queue level parameters */
1387 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1388 priv
->tx_queue
[i
]->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
1389 priv
->tx_queue
[i
]->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
1390 priv
->tx_queue
[i
]->txcoalescing
= DEFAULT_TX_COALESCE
;
1391 priv
->tx_queue
[i
]->txic
= DEFAULT_TXIC
;
1394 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1395 priv
->rx_queue
[i
]->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
1396 priv
->rx_queue
[i
]->rxcoalescing
= DEFAULT_RX_COALESCE
;
1397 priv
->rx_queue
[i
]->rxic
= DEFAULT_RXIC
;
1400 /* Always enable rx filer if available */
1401 priv
->rx_filer_enable
=
1402 (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RX_FILER
) ? 1 : 0;
1403 /* Enable most messages by default */
1404 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1405 /* use pritority h/w tx queue scheduling for single queue devices */
1406 if (priv
->num_tx_queues
== 1)
1407 priv
->prio_sched_en
= 1;
1409 set_bit(GFAR_DOWN
, &priv
->state
);
1413 /* Carrier starts down, phylib will bring it up */
1414 netif_carrier_off(dev
);
1416 err
= register_netdev(dev
);
1419 pr_err("%s: Cannot register net device, aborting\n", dev
->name
);
1423 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
)
1424 priv
->wol_supported
|= GFAR_WOL_MAGIC
;
1426 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER
) &&
1427 priv
->rx_filer_enable
)
1428 priv
->wol_supported
|= GFAR_WOL_FILER_UCAST
;
1430 device_set_wakeup_capable(&ofdev
->dev
, priv
->wol_supported
);
1432 /* fill out IRQ number and name fields */
1433 for (i
= 0; i
< priv
->num_grps
; i
++) {
1434 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
1435 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1436 sprintf(gfar_irq(grp
, TX
)->name
, "%s%s%c%s",
1437 dev
->name
, "_g", '0' + i
, "_tx");
1438 sprintf(gfar_irq(grp
, RX
)->name
, "%s%s%c%s",
1439 dev
->name
, "_g", '0' + i
, "_rx");
1440 sprintf(gfar_irq(grp
, ER
)->name
, "%s%s%c%s",
1441 dev
->name
, "_g", '0' + i
, "_er");
1443 strcpy(gfar_irq(grp
, TX
)->name
, dev
->name
);
1446 /* Initialize the filer table */
1447 gfar_init_filer_table(priv
);
1449 /* Print out the device info */
1450 netdev_info(dev
, "mac: %pM\n", dev
->dev_addr
);
1452 /* Even more device info helps when determining which kernel
1453 * provided which set of benchmarks.
1455 netdev_info(dev
, "Running with NAPI enabled\n");
1456 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
1457 netdev_info(dev
, "RX BD ring size for Q[%d]: %d\n",
1458 i
, priv
->rx_queue
[i
]->rx_ring_size
);
1459 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
1460 netdev_info(dev
, "TX BD ring size for Q[%d]: %d\n",
1461 i
, priv
->tx_queue
[i
]->tx_ring_size
);
1466 if (of_phy_is_fixed_link(np
))
1467 of_phy_deregister_fixed_link(np
);
1468 unmap_group_regs(priv
);
1469 gfar_free_rx_queues(priv
);
1470 gfar_free_tx_queues(priv
);
1471 of_node_put(priv
->phy_node
);
1472 of_node_put(priv
->tbi_node
);
1473 free_gfar_dev(priv
);
1477 static int gfar_remove(struct platform_device
*ofdev
)
1479 struct gfar_private
*priv
= platform_get_drvdata(ofdev
);
1480 struct device_node
*np
= ofdev
->dev
.of_node
;
1482 of_node_put(priv
->phy_node
);
1483 of_node_put(priv
->tbi_node
);
1485 unregister_netdev(priv
->ndev
);
1487 if (of_phy_is_fixed_link(np
))
1488 of_phy_deregister_fixed_link(np
);
1490 unmap_group_regs(priv
);
1491 gfar_free_rx_queues(priv
);
1492 gfar_free_tx_queues(priv
);
1493 free_gfar_dev(priv
);
1500 static void __gfar_filer_disable(struct gfar_private
*priv
)
1502 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1505 temp
= gfar_read(®s
->rctrl
);
1506 temp
&= ~(RCTRL_FILREN
| RCTRL_PRSDEP_INIT
);
1507 gfar_write(®s
->rctrl
, temp
);
1510 static void __gfar_filer_enable(struct gfar_private
*priv
)
1512 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1515 temp
= gfar_read(®s
->rctrl
);
1516 temp
|= RCTRL_FILREN
| RCTRL_PRSDEP_INIT
;
1517 gfar_write(®s
->rctrl
, temp
);
1520 /* Filer rules implementing wol capabilities */
1521 static void gfar_filer_config_wol(struct gfar_private
*priv
)
1526 __gfar_filer_disable(priv
);
1528 /* clear the filer table, reject any packet by default */
1529 rqfcr
= RQFCR_RJE
| RQFCR_CMP_MATCH
;
1530 for (i
= 0; i
<= MAX_FILER_IDX
; i
++)
1531 gfar_write_filer(priv
, i
, rqfcr
, 0);
1534 if (priv
->wol_opts
& GFAR_WOL_FILER_UCAST
) {
1535 /* unicast packet, accept it */
1536 struct net_device
*ndev
= priv
->ndev
;
1537 /* get the default rx queue index */
1538 u8 qindex
= (u8
)priv
->gfargrp
[0].rx_queue
->qindex
;
1539 u32 dest_mac_addr
= (ndev
->dev_addr
[0] << 16) |
1540 (ndev
->dev_addr
[1] << 8) |
1543 rqfcr
= (qindex
<< 10) | RQFCR_AND
|
1544 RQFCR_CMP_EXACT
| RQFCR_PID_DAH
;
1546 gfar_write_filer(priv
, i
++, rqfcr
, dest_mac_addr
);
1548 dest_mac_addr
= (ndev
->dev_addr
[3] << 16) |
1549 (ndev
->dev_addr
[4] << 8) |
1551 rqfcr
= (qindex
<< 10) | RQFCR_GPI
|
1552 RQFCR_CMP_EXACT
| RQFCR_PID_DAL
;
1553 gfar_write_filer(priv
, i
++, rqfcr
, dest_mac_addr
);
1556 __gfar_filer_enable(priv
);
1559 static void gfar_filer_restore_table(struct gfar_private
*priv
)
1564 __gfar_filer_disable(priv
);
1566 for (i
= 0; i
<= MAX_FILER_IDX
; i
++) {
1567 rqfcr
= priv
->ftp_rqfcr
[i
];
1568 rqfpr
= priv
->ftp_rqfpr
[i
];
1569 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
1572 __gfar_filer_enable(priv
);
1575 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
1576 static void gfar_start_wol_filer(struct gfar_private
*priv
)
1578 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1582 /* Enable Rx hw queues */
1583 gfar_write(®s
->rqueue
, priv
->rqueue
);
1585 /* Initialize DMACTRL to have WWR and WOP */
1586 tempval
= gfar_read(®s
->dmactrl
);
1587 tempval
|= DMACTRL_INIT_SETTINGS
;
1588 gfar_write(®s
->dmactrl
, tempval
);
1590 /* Make sure we aren't stopped */
1591 tempval
= gfar_read(®s
->dmactrl
);
1592 tempval
&= ~DMACTRL_GRS
;
1593 gfar_write(®s
->dmactrl
, tempval
);
1595 for (i
= 0; i
< priv
->num_grps
; i
++) {
1596 regs
= priv
->gfargrp
[i
].regs
;
1597 /* Clear RHLT, so that the DMA starts polling now */
1598 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
1599 /* enable the Filer General Purpose Interrupt */
1600 gfar_write(®s
->imask
, IMASK_FGPI
);
1604 tempval
= gfar_read(®s
->maccfg1
);
1605 tempval
|= MACCFG1_RX_EN
;
1606 gfar_write(®s
->maccfg1
, tempval
);
1609 static int gfar_suspend(struct device
*dev
)
1611 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1612 struct net_device
*ndev
= priv
->ndev
;
1613 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1615 u16 wol
= priv
->wol_opts
;
1617 if (!netif_running(ndev
))
1621 netif_tx_lock(ndev
);
1622 netif_device_detach(ndev
);
1623 netif_tx_unlock(ndev
);
1627 if (wol
& GFAR_WOL_MAGIC
) {
1628 /* Enable interrupt on Magic Packet */
1629 gfar_write(®s
->imask
, IMASK_MAG
);
1631 /* Enable Magic Packet mode */
1632 tempval
= gfar_read(®s
->maccfg2
);
1633 tempval
|= MACCFG2_MPEN
;
1634 gfar_write(®s
->maccfg2
, tempval
);
1636 /* re-enable the Rx block */
1637 tempval
= gfar_read(®s
->maccfg1
);
1638 tempval
|= MACCFG1_RX_EN
;
1639 gfar_write(®s
->maccfg1
, tempval
);
1641 } else if (wol
& GFAR_WOL_FILER_UCAST
) {
1642 gfar_filer_config_wol(priv
);
1643 gfar_start_wol_filer(priv
);
1646 phy_stop(ndev
->phydev
);
1652 static int gfar_resume(struct device
*dev
)
1654 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1655 struct net_device
*ndev
= priv
->ndev
;
1656 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1658 u16 wol
= priv
->wol_opts
;
1660 if (!netif_running(ndev
))
1663 if (wol
& GFAR_WOL_MAGIC
) {
1664 /* Disable Magic Packet mode */
1665 tempval
= gfar_read(®s
->maccfg2
);
1666 tempval
&= ~MACCFG2_MPEN
;
1667 gfar_write(®s
->maccfg2
, tempval
);
1669 } else if (wol
& GFAR_WOL_FILER_UCAST
) {
1670 /* need to stop rx only, tx is already down */
1672 gfar_filer_restore_table(priv
);
1675 phy_start(ndev
->phydev
);
1680 netif_device_attach(ndev
);
1686 static int gfar_restore(struct device
*dev
)
1688 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1689 struct net_device
*ndev
= priv
->ndev
;
1691 if (!netif_running(ndev
)) {
1692 netif_device_attach(ndev
);
1697 gfar_init_bds(ndev
);
1699 gfar_mac_reset(priv
);
1701 gfar_init_tx_rx_base(priv
);
1707 priv
->oldduplex
= -1;
1710 phy_start(ndev
->phydev
);
1712 netif_device_attach(ndev
);
1718 static const struct dev_pm_ops gfar_pm_ops
= {
1719 .suspend
= gfar_suspend
,
1720 .resume
= gfar_resume
,
1721 .freeze
= gfar_suspend
,
1722 .thaw
= gfar_resume
,
1723 .restore
= gfar_restore
,
1726 #define GFAR_PM_OPS (&gfar_pm_ops)
1730 #define GFAR_PM_OPS NULL
1734 /* Reads the controller's registers to determine what interface
1735 * connects it to the PHY.
1737 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
1739 struct gfar_private
*priv
= netdev_priv(dev
);
1740 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1743 ecntrl
= gfar_read(®s
->ecntrl
);
1745 if (ecntrl
& ECNTRL_SGMII_MODE
)
1746 return PHY_INTERFACE_MODE_SGMII
;
1748 if (ecntrl
& ECNTRL_TBI_MODE
) {
1749 if (ecntrl
& ECNTRL_REDUCED_MODE
)
1750 return PHY_INTERFACE_MODE_RTBI
;
1752 return PHY_INTERFACE_MODE_TBI
;
1755 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
1756 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
) {
1757 return PHY_INTERFACE_MODE_RMII
;
1760 phy_interface_t interface
= priv
->interface
;
1762 /* This isn't autodetected right now, so it must
1763 * be set by the device tree or platform code.
1765 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
1766 return PHY_INTERFACE_MODE_RGMII_ID
;
1768 return PHY_INTERFACE_MODE_RGMII
;
1772 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1773 return PHY_INTERFACE_MODE_GMII
;
1775 return PHY_INTERFACE_MODE_MII
;
1779 /* Initializes driver's PHY state, and attaches to the PHY.
1780 * Returns 0 on success.
1782 static int init_phy(struct net_device
*dev
)
1784 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
1785 struct gfar_private
*priv
= netdev_priv(dev
);
1786 phy_interface_t interface
;
1787 struct phy_device
*phydev
;
1788 struct ethtool_eee edata
;
1790 linkmode_set_bit_array(phy_10_100_features_array
,
1791 ARRAY_SIZE(phy_10_100_features_array
),
1793 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
, mask
);
1794 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT
, mask
);
1795 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1796 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
, mask
);
1800 priv
->oldduplex
= -1;
1802 interface
= gfar_get_interface(dev
);
1804 phydev
= of_phy_connect(dev
, priv
->phy_node
, &adjust_link
, 0,
1807 dev_err(&dev
->dev
, "could not attach to PHY\n");
1811 if (interface
== PHY_INTERFACE_MODE_SGMII
)
1812 gfar_configure_serdes(dev
);
1814 /* Remove any features not supported by the controller */
1815 linkmode_and(phydev
->supported
, phydev
->supported
, mask
);
1816 linkmode_copy(phydev
->advertising
, phydev
->supported
);
1818 /* Add support for flow control */
1819 phy_support_asym_pause(phydev
);
1821 /* disable EEE autoneg, EEE not supported by eTSEC */
1822 memset(&edata
, 0, sizeof(struct ethtool_eee
));
1823 phy_ethtool_set_eee(phydev
, &edata
);
1828 /* Initialize TBI PHY interface for communicating with the
1829 * SERDES lynx PHY on the chip. We communicate with this PHY
1830 * through the MDIO bus on each controller, treating it as a
1831 * "normal" PHY at the address found in the TBIPA register. We assume
1832 * that the TBIPA register is valid. Either the MDIO bus code will set
1833 * it to a value that doesn't conflict with other PHYs on the bus, or the
1834 * value doesn't matter, as there are no other PHYs on the bus.
1836 static void gfar_configure_serdes(struct net_device
*dev
)
1838 struct gfar_private
*priv
= netdev_priv(dev
);
1839 struct phy_device
*tbiphy
;
1841 if (!priv
->tbi_node
) {
1842 dev_warn(&dev
->dev
, "error: SGMII mode requires that the "
1843 "device tree specify a tbi-handle\n");
1847 tbiphy
= of_phy_find_device(priv
->tbi_node
);
1849 dev_err(&dev
->dev
, "error: Could not get TBI device\n");
1853 /* If the link is already up, we must already be ok, and don't need to
1854 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1855 * everything for us? Resetting it takes the link down and requires
1856 * several seconds for it to come back.
1858 if (phy_read(tbiphy
, MII_BMSR
) & BMSR_LSTATUS
) {
1859 put_device(&tbiphy
->mdio
.dev
);
1863 /* Single clk mode, mii mode off(for serdes communication) */
1864 phy_write(tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
1866 phy_write(tbiphy
, MII_ADVERTISE
,
1867 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
1868 ADVERTISE_1000XPSE_ASYM
);
1870 phy_write(tbiphy
, MII_BMCR
,
1871 BMCR_ANENABLE
| BMCR_ANRESTART
| BMCR_FULLDPLX
|
1874 put_device(&tbiphy
->mdio
.dev
);
1877 static int __gfar_is_rx_idle(struct gfar_private
*priv
)
1881 /* Normaly TSEC should not hang on GRS commands, so we should
1882 * actually wait for IEVENT_GRSC flag.
1884 if (!gfar_has_errata(priv
, GFAR_ERRATA_A002
))
1887 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1888 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1889 * and the Rx can be safely reset.
1891 res
= gfar_read((void __iomem
*)priv
->gfargrp
[0].regs
+ 0xd1c);
1893 if ((res
& 0xffff) == (res
>> 16))
1899 /* Halt the receive and transmit queues */
1900 static void gfar_halt_nodisable(struct gfar_private
*priv
)
1902 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1904 unsigned int timeout
;
1907 gfar_ints_disable(priv
);
1909 if (gfar_is_dma_stopped(priv
))
1912 /* Stop the DMA, and wait for it to stop */
1913 tempval
= gfar_read(®s
->dmactrl
);
1914 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
1915 gfar_write(®s
->dmactrl
, tempval
);
1919 while (!(stopped
= gfar_is_dma_stopped(priv
)) && timeout
) {
1925 stopped
= gfar_is_dma_stopped(priv
);
1927 if (!stopped
&& !gfar_is_rx_dma_stopped(priv
) &&
1928 !__gfar_is_rx_idle(priv
))
1932 /* Halt the receive and transmit queues */
1933 void gfar_halt(struct gfar_private
*priv
)
1935 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1938 /* Dissable the Rx/Tx hw queues */
1939 gfar_write(®s
->rqueue
, 0);
1940 gfar_write(®s
->tqueue
, 0);
1944 gfar_halt_nodisable(priv
);
1946 /* Disable Rx/Tx DMA */
1947 tempval
= gfar_read(®s
->maccfg1
);
1948 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1949 gfar_write(®s
->maccfg1
, tempval
);
1952 void stop_gfar(struct net_device
*dev
)
1954 struct gfar_private
*priv
= netdev_priv(dev
);
1956 netif_tx_stop_all_queues(dev
);
1958 smp_mb__before_atomic();
1959 set_bit(GFAR_DOWN
, &priv
->state
);
1960 smp_mb__after_atomic();
1964 /* disable ints and gracefully shut down Rx/Tx DMA */
1967 phy_stop(dev
->phydev
);
1969 free_skb_resources(priv
);
1972 static void free_skb_tx_queue(struct gfar_priv_tx_q
*tx_queue
)
1974 struct txbd8
*txbdp
;
1975 struct gfar_private
*priv
= netdev_priv(tx_queue
->dev
);
1978 txbdp
= tx_queue
->tx_bd_base
;
1980 for (i
= 0; i
< tx_queue
->tx_ring_size
; i
++) {
1981 if (!tx_queue
->tx_skbuff
[i
])
1984 dma_unmap_single(priv
->dev
, be32_to_cpu(txbdp
->bufPtr
),
1985 be16_to_cpu(txbdp
->length
), DMA_TO_DEVICE
);
1987 for (j
= 0; j
< skb_shinfo(tx_queue
->tx_skbuff
[i
])->nr_frags
;
1990 dma_unmap_page(priv
->dev
, be32_to_cpu(txbdp
->bufPtr
),
1991 be16_to_cpu(txbdp
->length
),
1995 dev_kfree_skb_any(tx_queue
->tx_skbuff
[i
]);
1996 tx_queue
->tx_skbuff
[i
] = NULL
;
1998 kfree(tx_queue
->tx_skbuff
);
1999 tx_queue
->tx_skbuff
= NULL
;
2002 static void free_skb_rx_queue(struct gfar_priv_rx_q
*rx_queue
)
2006 struct rxbd8
*rxbdp
= rx_queue
->rx_bd_base
;
2009 dev_kfree_skb(rx_queue
->skb
);
2011 for (i
= 0; i
< rx_queue
->rx_ring_size
; i
++) {
2012 struct gfar_rx_buff
*rxb
= &rx_queue
->rx_buff
[i
];
2021 dma_unmap_page(rx_queue
->dev
, rxb
->dma
,
2022 PAGE_SIZE
, DMA_FROM_DEVICE
);
2023 __free_page(rxb
->page
);
2028 kfree(rx_queue
->rx_buff
);
2029 rx_queue
->rx_buff
= NULL
;
2032 /* If there are any tx skbs or rx skbs still around, free them.
2033 * Then free tx_skbuff and rx_skbuff
2035 static void free_skb_resources(struct gfar_private
*priv
)
2037 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2038 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2041 /* Go through all the buffer descriptors and free their data buffers */
2042 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
2043 struct netdev_queue
*txq
;
2045 tx_queue
= priv
->tx_queue
[i
];
2046 txq
= netdev_get_tx_queue(tx_queue
->dev
, tx_queue
->qindex
);
2047 if (tx_queue
->tx_skbuff
)
2048 free_skb_tx_queue(tx_queue
);
2049 netdev_tx_reset_queue(txq
);
2052 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
2053 rx_queue
= priv
->rx_queue
[i
];
2054 if (rx_queue
->rx_buff
)
2055 free_skb_rx_queue(rx_queue
);
2058 dma_free_coherent(priv
->dev
,
2059 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
2060 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
2061 priv
->tx_queue
[0]->tx_bd_base
,
2062 priv
->tx_queue
[0]->tx_bd_dma_base
);
2065 void gfar_start(struct gfar_private
*priv
)
2067 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2071 /* Enable Rx/Tx hw queues */
2072 gfar_write(®s
->rqueue
, priv
->rqueue
);
2073 gfar_write(®s
->tqueue
, priv
->tqueue
);
2075 /* Initialize DMACTRL to have WWR and WOP */
2076 tempval
= gfar_read(®s
->dmactrl
);
2077 tempval
|= DMACTRL_INIT_SETTINGS
;
2078 gfar_write(®s
->dmactrl
, tempval
);
2080 /* Make sure we aren't stopped */
2081 tempval
= gfar_read(®s
->dmactrl
);
2082 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
2083 gfar_write(®s
->dmactrl
, tempval
);
2085 for (i
= 0; i
< priv
->num_grps
; i
++) {
2086 regs
= priv
->gfargrp
[i
].regs
;
2087 /* Clear THLT/RHLT, so that the DMA starts polling now */
2088 gfar_write(®s
->tstat
, priv
->gfargrp
[i
].tstat
);
2089 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
2092 /* Enable Rx/Tx DMA */
2093 tempval
= gfar_read(®s
->maccfg1
);
2094 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
2095 gfar_write(®s
->maccfg1
, tempval
);
2097 gfar_ints_enable(priv
);
2099 netif_trans_update(priv
->ndev
); /* prevent tx timeout */
2102 static void free_grp_irqs(struct gfar_priv_grp
*grp
)
2104 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
2105 free_irq(gfar_irq(grp
, RX
)->irq
, grp
);
2106 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
2109 static int register_grp_irqs(struct gfar_priv_grp
*grp
)
2111 struct gfar_private
*priv
= grp
->priv
;
2112 struct net_device
*dev
= priv
->ndev
;
2115 /* If the device has multiple interrupts, register for
2116 * them. Otherwise, only register for the one
2118 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2119 /* Install our interrupt handlers for Error,
2120 * Transmit, and Receive
2122 err
= request_irq(gfar_irq(grp
, ER
)->irq
, gfar_error
, 0,
2123 gfar_irq(grp
, ER
)->name
, grp
);
2125 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2126 gfar_irq(grp
, ER
)->irq
);
2130 enable_irq_wake(gfar_irq(grp
, ER
)->irq
);
2132 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_transmit
, 0,
2133 gfar_irq(grp
, TX
)->name
, grp
);
2135 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2136 gfar_irq(grp
, TX
)->irq
);
2139 err
= request_irq(gfar_irq(grp
, RX
)->irq
, gfar_receive
, 0,
2140 gfar_irq(grp
, RX
)->name
, grp
);
2142 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2143 gfar_irq(grp
, RX
)->irq
);
2146 enable_irq_wake(gfar_irq(grp
, RX
)->irq
);
2149 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_interrupt
, 0,
2150 gfar_irq(grp
, TX
)->name
, grp
);
2152 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
2153 gfar_irq(grp
, TX
)->irq
);
2156 enable_irq_wake(gfar_irq(grp
, TX
)->irq
);
2162 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
2164 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
2170 static void gfar_free_irq(struct gfar_private
*priv
)
2175 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
2176 for (i
= 0; i
< priv
->num_grps
; i
++)
2177 free_grp_irqs(&priv
->gfargrp
[i
]);
2179 for (i
= 0; i
< priv
->num_grps
; i
++)
2180 free_irq(gfar_irq(&priv
->gfargrp
[i
], TX
)->irq
,
2185 static int gfar_request_irq(struct gfar_private
*priv
)
2189 for (i
= 0; i
< priv
->num_grps
; i
++) {
2190 err
= register_grp_irqs(&priv
->gfargrp
[i
]);
2192 for (j
= 0; j
< i
; j
++)
2193 free_grp_irqs(&priv
->gfargrp
[j
]);
2201 /* Bring the controller up and running */
2202 int startup_gfar(struct net_device
*ndev
)
2204 struct gfar_private
*priv
= netdev_priv(ndev
);
2207 gfar_mac_reset(priv
);
2209 err
= gfar_alloc_skb_resources(ndev
);
2213 gfar_init_tx_rx_base(priv
);
2215 smp_mb__before_atomic();
2216 clear_bit(GFAR_DOWN
, &priv
->state
);
2217 smp_mb__after_atomic();
2219 /* Start Rx/Tx DMA and enable the interrupts */
2222 /* force link state update after mac reset */
2225 priv
->oldduplex
= -1;
2227 phy_start(ndev
->phydev
);
2231 netif_tx_wake_all_queues(ndev
);
2236 /* Called when something needs to use the ethernet device
2237 * Returns 0 for success.
2239 static int gfar_enet_open(struct net_device
*dev
)
2241 struct gfar_private
*priv
= netdev_priv(dev
);
2244 err
= init_phy(dev
);
2248 err
= gfar_request_irq(priv
);
2252 err
= startup_gfar(dev
);
2259 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
2261 struct txfcb
*fcb
= skb_push(skb
, GMAC_FCB_LEN
);
2263 memset(fcb
, 0, GMAC_FCB_LEN
);
2268 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
,
2271 /* If we're here, it's a IP packet with a TCP or UDP
2272 * payload. We set it to checksum, using a pseudo-header
2275 u8 flags
= TXFCB_DEFAULT
;
2277 /* Tell the controller what the protocol is
2278 * And provide the already calculated phcs
2280 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
2282 fcb
->phcs
= (__force __be16
)(udp_hdr(skb
)->check
);
2284 fcb
->phcs
= (__force __be16
)(tcp_hdr(skb
)->check
);
2286 /* l3os is the distance between the start of the
2287 * frame (skb->data) and the start of the IP hdr.
2288 * l4os is the distance between the start of the
2289 * l3 hdr and the l4 hdr
2291 fcb
->l3os
= (u8
)(skb_network_offset(skb
) - fcb_length
);
2292 fcb
->l4os
= skb_network_header_len(skb
);
2297 static inline void gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
2299 fcb
->flags
|= TXFCB_VLN
;
2300 fcb
->vlctl
= cpu_to_be16(skb_vlan_tag_get(skb
));
2303 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
2304 struct txbd8
*base
, int ring_size
)
2306 struct txbd8
*new_bd
= bdp
+ stride
;
2308 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
2311 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
2314 return skip_txbd(bdp
, 1, base
, ring_size
);
2317 /* eTSEC12: csum generation not supported for some fcb offsets */
2318 static inline bool gfar_csum_errata_12(struct gfar_private
*priv
,
2319 unsigned long fcb_addr
)
2321 return (gfar_has_errata(priv
, GFAR_ERRATA_12
) &&
2322 (fcb_addr
% 0x20) > 0x18);
2325 /* eTSEC76: csum generation for frames larger than 2500 may
2326 * cause excess delays before start of transmission
2328 static inline bool gfar_csum_errata_76(struct gfar_private
*priv
,
2331 return (gfar_has_errata(priv
, GFAR_ERRATA_76
) &&
2335 /* This is called by the kernel when a frame is ready for transmission.
2336 * It is pointed to by the dev->hard_start_xmit function pointer
2338 static netdev_tx_t
gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2340 struct gfar_private
*priv
= netdev_priv(dev
);
2341 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2342 struct netdev_queue
*txq
;
2343 struct gfar __iomem
*regs
= NULL
;
2344 struct txfcb
*fcb
= NULL
;
2345 struct txbd8
*txbdp
, *txbdp_start
, *base
, *txbdp_tstamp
= NULL
;
2349 int do_tstamp
, do_csum
, do_vlan
;
2351 unsigned int nr_frags
, nr_txbds
, bytes_sent
, fcb_len
= 0;
2353 rq
= skb
->queue_mapping
;
2354 tx_queue
= priv
->tx_queue
[rq
];
2355 txq
= netdev_get_tx_queue(dev
, rq
);
2356 base
= tx_queue
->tx_bd_base
;
2357 regs
= tx_queue
->grp
->regs
;
2359 do_csum
= (CHECKSUM_PARTIAL
== skb
->ip_summed
);
2360 do_vlan
= skb_vlan_tag_present(skb
);
2361 do_tstamp
= (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
2364 if (do_csum
|| do_vlan
)
2365 fcb_len
= GMAC_FCB_LEN
;
2367 /* check if time stamp should be generated */
2368 if (unlikely(do_tstamp
))
2369 fcb_len
= GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2371 /* make space for additional header when fcb is needed */
2372 if (fcb_len
&& unlikely(skb_headroom(skb
) < fcb_len
)) {
2373 struct sk_buff
*skb_new
;
2375 skb_new
= skb_realloc_headroom(skb
, fcb_len
);
2377 dev
->stats
.tx_errors
++;
2378 dev_kfree_skb_any(skb
);
2379 return NETDEV_TX_OK
;
2383 skb_set_owner_w(skb_new
, skb
->sk
);
2384 dev_consume_skb_any(skb
);
2388 /* total number of fragments in the SKB */
2389 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2391 /* calculate the required number of TxBDs for this skb */
2392 if (unlikely(do_tstamp
))
2393 nr_txbds
= nr_frags
+ 2;
2395 nr_txbds
= nr_frags
+ 1;
2397 /* check if there is space to queue this packet */
2398 if (nr_txbds
> tx_queue
->num_txbdfree
) {
2399 /* no space, stop the queue */
2400 netif_tx_stop_queue(txq
);
2401 dev
->stats
.tx_fifo_errors
++;
2402 return NETDEV_TX_BUSY
;
2405 /* Update transmit stats */
2406 bytes_sent
= skb
->len
;
2407 tx_queue
->stats
.tx_bytes
+= bytes_sent
;
2408 /* keep Tx bytes on wire for BQL accounting */
2409 GFAR_CB(skb
)->bytes_sent
= bytes_sent
;
2410 tx_queue
->stats
.tx_packets
++;
2412 txbdp
= txbdp_start
= tx_queue
->cur_tx
;
2413 lstatus
= be32_to_cpu(txbdp
->lstatus
);
2415 /* Add TxPAL between FCB and frame if required */
2416 if (unlikely(do_tstamp
)) {
2417 skb_push(skb
, GMAC_TXPAL_LEN
);
2418 memset(skb
->data
, 0, GMAC_TXPAL_LEN
);
2421 /* Add TxFCB if required */
2423 fcb
= gfar_add_fcb(skb
);
2424 lstatus
|= BD_LFLAG(TXBD_TOE
);
2427 /* Set up checksumming */
2429 gfar_tx_checksum(skb
, fcb
, fcb_len
);
2431 if (unlikely(gfar_csum_errata_12(priv
, (unsigned long)fcb
)) ||
2432 unlikely(gfar_csum_errata_76(priv
, skb
->len
))) {
2433 __skb_pull(skb
, GMAC_FCB_LEN
);
2434 skb_checksum_help(skb
);
2435 if (do_vlan
|| do_tstamp
) {
2436 /* put back a new fcb for vlan/tstamp TOE */
2437 fcb
= gfar_add_fcb(skb
);
2439 /* Tx TOE not used */
2440 lstatus
&= ~(BD_LFLAG(TXBD_TOE
));
2447 gfar_tx_vlan(skb
, fcb
);
2449 bufaddr
= dma_map_single(priv
->dev
, skb
->data
, skb_headlen(skb
),
2451 if (unlikely(dma_mapping_error(priv
->dev
, bufaddr
)))
2454 txbdp_start
->bufPtr
= cpu_to_be32(bufaddr
);
2456 /* Time stamp insertion requires one additional TxBD */
2457 if (unlikely(do_tstamp
))
2458 txbdp_tstamp
= txbdp
= next_txbd(txbdp
, base
,
2459 tx_queue
->tx_ring_size
);
2461 if (likely(!nr_frags
)) {
2462 if (likely(!do_tstamp
))
2463 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2465 u32 lstatus_start
= lstatus
;
2467 /* Place the fragment addresses and lengths into the TxBDs */
2468 frag
= &skb_shinfo(skb
)->frags
[0];
2469 for (i
= 0; i
< nr_frags
; i
++, frag
++) {
2472 /* Point at the next BD, wrapping as needed */
2473 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2475 size
= skb_frag_size(frag
);
2477 lstatus
= be32_to_cpu(txbdp
->lstatus
) | size
|
2478 BD_LFLAG(TXBD_READY
);
2480 /* Handle the last BD specially */
2481 if (i
== nr_frags
- 1)
2482 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2484 bufaddr
= skb_frag_dma_map(priv
->dev
, frag
, 0,
2485 size
, DMA_TO_DEVICE
);
2486 if (unlikely(dma_mapping_error(priv
->dev
, bufaddr
)))
2489 /* set the TxBD length and buffer pointer */
2490 txbdp
->bufPtr
= cpu_to_be32(bufaddr
);
2491 txbdp
->lstatus
= cpu_to_be32(lstatus
);
2494 lstatus
= lstatus_start
;
2497 /* If time stamping is requested one additional TxBD must be set up. The
2498 * first TxBD points to the FCB and must have a data length of
2499 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2500 * the full frame length.
2502 if (unlikely(do_tstamp
)) {
2503 u32 lstatus_ts
= be32_to_cpu(txbdp_tstamp
->lstatus
);
2505 bufaddr
= be32_to_cpu(txbdp_start
->bufPtr
);
2508 lstatus_ts
|= BD_LFLAG(TXBD_READY
) |
2509 (skb_headlen(skb
) - fcb_len
);
2511 lstatus_ts
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2513 txbdp_tstamp
->bufPtr
= cpu_to_be32(bufaddr
);
2514 txbdp_tstamp
->lstatus
= cpu_to_be32(lstatus_ts
);
2515 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | GMAC_FCB_LEN
;
2517 /* Setup tx hardware time stamping */
2518 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2521 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
2524 netdev_tx_sent_queue(txq
, bytes_sent
);
2528 txbdp_start
->lstatus
= cpu_to_be32(lstatus
);
2530 gfar_wmb(); /* force lstatus write before tx_skbuff */
2532 tx_queue
->tx_skbuff
[tx_queue
->skb_curtx
] = skb
;
2534 /* Update the current skb pointer to the next entry we will use
2535 * (wrapping if necessary)
2537 tx_queue
->skb_curtx
= (tx_queue
->skb_curtx
+ 1) &
2538 TX_RING_MOD_MASK(tx_queue
->tx_ring_size
);
2540 tx_queue
->cur_tx
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2542 /* We can work in parallel with gfar_clean_tx_ring(), except
2543 * when modifying num_txbdfree. Note that we didn't grab the lock
2544 * when we were reading the num_txbdfree and checking for available
2545 * space, that's because outside of this function it can only grow.
2547 spin_lock_bh(&tx_queue
->txlock
);
2548 /* reduce TxBD free count */
2549 tx_queue
->num_txbdfree
-= (nr_txbds
);
2550 spin_unlock_bh(&tx_queue
->txlock
);
2552 /* If the next BD still needs to be cleaned up, then the bds
2553 * are full. We need to tell the kernel to stop sending us stuff.
2555 if (!tx_queue
->num_txbdfree
) {
2556 netif_tx_stop_queue(txq
);
2558 dev
->stats
.tx_fifo_errors
++;
2561 /* Tell the DMA to go go go */
2562 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
>> tx_queue
->qindex
);
2564 return NETDEV_TX_OK
;
2567 txbdp
= next_txbd(txbdp_start
, base
, tx_queue
->tx_ring_size
);
2569 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2570 for (i
= 0; i
< nr_frags
; i
++) {
2571 lstatus
= be32_to_cpu(txbdp
->lstatus
);
2572 if (!(lstatus
& BD_LFLAG(TXBD_READY
)))
2575 lstatus
&= ~BD_LFLAG(TXBD_READY
);
2576 txbdp
->lstatus
= cpu_to_be32(lstatus
);
2577 bufaddr
= be32_to_cpu(txbdp
->bufPtr
);
2578 dma_unmap_page(priv
->dev
, bufaddr
, be16_to_cpu(txbdp
->length
),
2580 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2583 dev_kfree_skb_any(skb
);
2584 return NETDEV_TX_OK
;
2587 /* Stops the kernel queue, and halts the controller */
2588 static int gfar_close(struct net_device
*dev
)
2590 struct gfar_private
*priv
= netdev_priv(dev
);
2592 cancel_work_sync(&priv
->reset_task
);
2595 /* Disconnect from the PHY */
2596 phy_disconnect(dev
->phydev
);
2598 gfar_free_irq(priv
);
2603 /* Changes the mac address if the controller is not running. */
2604 static int gfar_set_mac_address(struct net_device
*dev
)
2606 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
2611 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
2613 struct gfar_private
*priv
= netdev_priv(dev
);
2615 while (test_and_set_bit_lock(GFAR_RESETTING
, &priv
->state
))
2618 if (dev
->flags
& IFF_UP
)
2623 if (dev
->flags
& IFF_UP
)
2626 clear_bit_unlock(GFAR_RESETTING
, &priv
->state
);
2631 void reset_gfar(struct net_device
*ndev
)
2633 struct gfar_private
*priv
= netdev_priv(ndev
);
2635 while (test_and_set_bit_lock(GFAR_RESETTING
, &priv
->state
))
2641 clear_bit_unlock(GFAR_RESETTING
, &priv
->state
);
2644 /* gfar_reset_task gets scheduled when a packet has not been
2645 * transmitted after a set amount of time.
2646 * For now, assume that clearing out all the structures, and
2647 * starting over will fix the problem.
2649 static void gfar_reset_task(struct work_struct
*work
)
2651 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
2653 reset_gfar(priv
->ndev
);
2656 static void gfar_timeout(struct net_device
*dev
)
2658 struct gfar_private
*priv
= netdev_priv(dev
);
2660 dev
->stats
.tx_errors
++;
2661 schedule_work(&priv
->reset_task
);
2664 /* Interrupt Handler for Transmit complete */
2665 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
)
2667 struct net_device
*dev
= tx_queue
->dev
;
2668 struct netdev_queue
*txq
;
2669 struct gfar_private
*priv
= netdev_priv(dev
);
2670 struct txbd8
*bdp
, *next
= NULL
;
2671 struct txbd8
*lbdp
= NULL
;
2672 struct txbd8
*base
= tx_queue
->tx_bd_base
;
2673 struct sk_buff
*skb
;
2675 int tx_ring_size
= tx_queue
->tx_ring_size
;
2676 int frags
= 0, nr_txbds
= 0;
2679 int tqi
= tx_queue
->qindex
;
2680 unsigned int bytes_sent
= 0;
2684 txq
= netdev_get_tx_queue(dev
, tqi
);
2685 bdp
= tx_queue
->dirty_tx
;
2686 skb_dirtytx
= tx_queue
->skb_dirtytx
;
2688 while ((skb
= tx_queue
->tx_skbuff
[skb_dirtytx
])) {
2690 frags
= skb_shinfo(skb
)->nr_frags
;
2692 /* When time stamping, one additional TxBD must be freed.
2693 * Also, we need to dma_unmap_single() the TxPAL.
2695 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
2696 nr_txbds
= frags
+ 2;
2698 nr_txbds
= frags
+ 1;
2700 lbdp
= skip_txbd(bdp
, nr_txbds
- 1, base
, tx_ring_size
);
2702 lstatus
= be32_to_cpu(lbdp
->lstatus
);
2704 /* Only clean completed frames */
2705 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
2706 (lstatus
& BD_LENGTH_MASK
))
2709 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2710 next
= next_txbd(bdp
, base
, tx_ring_size
);
2711 buflen
= be16_to_cpu(next
->length
) +
2712 GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2714 buflen
= be16_to_cpu(bdp
->length
);
2716 dma_unmap_single(priv
->dev
, be32_to_cpu(bdp
->bufPtr
),
2717 buflen
, DMA_TO_DEVICE
);
2719 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2720 struct skb_shared_hwtstamps shhwtstamps
;
2721 u64
*ns
= (u64
*)(((uintptr_t)skb
->data
+ 0x10) &
2724 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
2725 shhwtstamps
.hwtstamp
= ns_to_ktime(be64_to_cpu(*ns
));
2726 skb_pull(skb
, GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
);
2727 skb_tstamp_tx(skb
, &shhwtstamps
);
2728 gfar_clear_txbd_status(bdp
);
2732 gfar_clear_txbd_status(bdp
);
2733 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2735 for (i
= 0; i
< frags
; i
++) {
2736 dma_unmap_page(priv
->dev
, be32_to_cpu(bdp
->bufPtr
),
2737 be16_to_cpu(bdp
->length
),
2739 gfar_clear_txbd_status(bdp
);
2740 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2743 bytes_sent
+= GFAR_CB(skb
)->bytes_sent
;
2745 dev_kfree_skb_any(skb
);
2747 tx_queue
->tx_skbuff
[skb_dirtytx
] = NULL
;
2749 skb_dirtytx
= (skb_dirtytx
+ 1) &
2750 TX_RING_MOD_MASK(tx_ring_size
);
2753 spin_lock(&tx_queue
->txlock
);
2754 tx_queue
->num_txbdfree
+= nr_txbds
;
2755 spin_unlock(&tx_queue
->txlock
);
2758 /* If we freed a buffer, we can restart transmission, if necessary */
2759 if (tx_queue
->num_txbdfree
&&
2760 netif_tx_queue_stopped(txq
) &&
2761 !(test_bit(GFAR_DOWN
, &priv
->state
)))
2762 netif_wake_subqueue(priv
->ndev
, tqi
);
2764 /* Update dirty indicators */
2765 tx_queue
->skb_dirtytx
= skb_dirtytx
;
2766 tx_queue
->dirty_tx
= bdp
;
2768 netdev_tx_completed_queue(txq
, howmany
, bytes_sent
);
2771 static bool gfar_new_page(struct gfar_priv_rx_q
*rxq
, struct gfar_rx_buff
*rxb
)
2776 page
= dev_alloc_page();
2777 if (unlikely(!page
))
2780 addr
= dma_map_page(rxq
->dev
, page
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
2781 if (unlikely(dma_mapping_error(rxq
->dev
, addr
))) {
2789 rxb
->page_offset
= 0;
2794 static void gfar_rx_alloc_err(struct gfar_priv_rx_q
*rx_queue
)
2796 struct gfar_private
*priv
= netdev_priv(rx_queue
->ndev
);
2797 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2799 netdev_err(rx_queue
->ndev
, "Can't alloc RX buffers\n");
2800 atomic64_inc(&estats
->rx_alloc_err
);
2803 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q
*rx_queue
,
2807 struct gfar_rx_buff
*rxb
;
2810 i
= rx_queue
->next_to_use
;
2811 bdp
= &rx_queue
->rx_bd_base
[i
];
2812 rxb
= &rx_queue
->rx_buff
[i
];
2814 while (alloc_cnt
--) {
2815 /* try reuse page */
2816 if (unlikely(!rxb
->page
)) {
2817 if (unlikely(!gfar_new_page(rx_queue
, rxb
))) {
2818 gfar_rx_alloc_err(rx_queue
);
2823 /* Setup the new RxBD */
2824 gfar_init_rxbdp(rx_queue
, bdp
,
2825 rxb
->dma
+ rxb
->page_offset
+ RXBUF_ALIGNMENT
);
2827 /* Update to the next pointer */
2831 if (unlikely(++i
== rx_queue
->rx_ring_size
)) {
2833 bdp
= rx_queue
->rx_bd_base
;
2834 rxb
= rx_queue
->rx_buff
;
2838 rx_queue
->next_to_use
= i
;
2839 rx_queue
->next_to_alloc
= i
;
2842 static void count_errors(u32 lstatus
, struct net_device
*ndev
)
2844 struct gfar_private
*priv
= netdev_priv(ndev
);
2845 struct net_device_stats
*stats
= &ndev
->stats
;
2846 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2848 /* If the packet was truncated, none of the other errors matter */
2849 if (lstatus
& BD_LFLAG(RXBD_TRUNCATED
)) {
2850 stats
->rx_length_errors
++;
2852 atomic64_inc(&estats
->rx_trunc
);
2856 /* Count the errors, if there were any */
2857 if (lstatus
& BD_LFLAG(RXBD_LARGE
| RXBD_SHORT
)) {
2858 stats
->rx_length_errors
++;
2860 if (lstatus
& BD_LFLAG(RXBD_LARGE
))
2861 atomic64_inc(&estats
->rx_large
);
2863 atomic64_inc(&estats
->rx_short
);
2865 if (lstatus
& BD_LFLAG(RXBD_NONOCTET
)) {
2866 stats
->rx_frame_errors
++;
2867 atomic64_inc(&estats
->rx_nonoctet
);
2869 if (lstatus
& BD_LFLAG(RXBD_CRCERR
)) {
2870 atomic64_inc(&estats
->rx_crcerr
);
2871 stats
->rx_crc_errors
++;
2873 if (lstatus
& BD_LFLAG(RXBD_OVERRUN
)) {
2874 atomic64_inc(&estats
->rx_overrun
);
2875 stats
->rx_over_errors
++;
2879 irqreturn_t
gfar_receive(int irq
, void *grp_id
)
2881 struct gfar_priv_grp
*grp
= (struct gfar_priv_grp
*)grp_id
;
2882 unsigned long flags
;
2885 ievent
= gfar_read(&grp
->regs
->ievent
);
2887 if (unlikely(ievent
& IEVENT_FGPI
)) {
2888 gfar_write(&grp
->regs
->ievent
, IEVENT_FGPI
);
2892 if (likely(napi_schedule_prep(&grp
->napi_rx
))) {
2893 spin_lock_irqsave(&grp
->grplock
, flags
);
2894 imask
= gfar_read(&grp
->regs
->imask
);
2895 imask
&= IMASK_RX_DISABLED
;
2896 gfar_write(&grp
->regs
->imask
, imask
);
2897 spin_unlock_irqrestore(&grp
->grplock
, flags
);
2898 __napi_schedule(&grp
->napi_rx
);
2900 /* Clear IEVENT, so interrupts aren't called again
2901 * because of the packets that have already arrived.
2903 gfar_write(&grp
->regs
->ievent
, IEVENT_RX_MASK
);
2909 /* Interrupt Handler for Transmit complete */
2910 static irqreturn_t
gfar_transmit(int irq
, void *grp_id
)
2912 struct gfar_priv_grp
*grp
= (struct gfar_priv_grp
*)grp_id
;
2913 unsigned long flags
;
2916 if (likely(napi_schedule_prep(&grp
->napi_tx
))) {
2917 spin_lock_irqsave(&grp
->grplock
, flags
);
2918 imask
= gfar_read(&grp
->regs
->imask
);
2919 imask
&= IMASK_TX_DISABLED
;
2920 gfar_write(&grp
->regs
->imask
, imask
);
2921 spin_unlock_irqrestore(&grp
->grplock
, flags
);
2922 __napi_schedule(&grp
->napi_tx
);
2924 /* Clear IEVENT, so interrupts aren't called again
2925 * because of the packets that have already arrived.
2927 gfar_write(&grp
->regs
->ievent
, IEVENT_TX_MASK
);
2933 static bool gfar_add_rx_frag(struct gfar_rx_buff
*rxb
, u32 lstatus
,
2934 struct sk_buff
*skb
, bool first
)
2936 int size
= lstatus
& BD_LENGTH_MASK
;
2937 struct page
*page
= rxb
->page
;
2939 if (likely(first
)) {
2942 /* the last fragments' length contains the full frame length */
2943 if (lstatus
& BD_LFLAG(RXBD_LAST
))
2946 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
2947 rxb
->page_offset
+ RXBUF_ALIGNMENT
,
2948 size
, GFAR_RXB_TRUESIZE
);
2951 /* try reuse page */
2952 if (unlikely(page_count(page
) != 1 || page_is_pfmemalloc(page
)))
2955 /* change offset to the other half */
2956 rxb
->page_offset
^= GFAR_RXB_TRUESIZE
;
2963 static void gfar_reuse_rx_page(struct gfar_priv_rx_q
*rxq
,
2964 struct gfar_rx_buff
*old_rxb
)
2966 struct gfar_rx_buff
*new_rxb
;
2967 u16 nta
= rxq
->next_to_alloc
;
2969 new_rxb
= &rxq
->rx_buff
[nta
];
2971 /* find next buf that can reuse a page */
2973 rxq
->next_to_alloc
= (nta
< rxq
->rx_ring_size
) ? nta
: 0;
2975 /* copy page reference */
2976 *new_rxb
= *old_rxb
;
2978 /* sync for use by the device */
2979 dma_sync_single_range_for_device(rxq
->dev
, old_rxb
->dma
,
2980 old_rxb
->page_offset
,
2981 GFAR_RXB_TRUESIZE
, DMA_FROM_DEVICE
);
2984 static struct sk_buff
*gfar_get_next_rxbuff(struct gfar_priv_rx_q
*rx_queue
,
2985 u32 lstatus
, struct sk_buff
*skb
)
2987 struct gfar_rx_buff
*rxb
= &rx_queue
->rx_buff
[rx_queue
->next_to_clean
];
2988 struct page
*page
= rxb
->page
;
2992 void *buff_addr
= page_address(page
) + rxb
->page_offset
;
2994 skb
= build_skb(buff_addr
, GFAR_SKBFRAG_SIZE
);
2995 if (unlikely(!skb
)) {
2996 gfar_rx_alloc_err(rx_queue
);
2999 skb_reserve(skb
, RXBUF_ALIGNMENT
);
3003 dma_sync_single_range_for_cpu(rx_queue
->dev
, rxb
->dma
, rxb
->page_offset
,
3004 GFAR_RXB_TRUESIZE
, DMA_FROM_DEVICE
);
3006 if (gfar_add_rx_frag(rxb
, lstatus
, skb
, first
)) {
3007 /* reuse the free half of the page */
3008 gfar_reuse_rx_page(rx_queue
, rxb
);
3010 /* page cannot be reused, unmap it */
3011 dma_unmap_page(rx_queue
->dev
, rxb
->dma
,
3012 PAGE_SIZE
, DMA_FROM_DEVICE
);
3015 /* clear rxb content */
3021 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
3023 /* If valid headers were found, and valid sums
3024 * were verified, then we tell the kernel that no
3025 * checksumming is necessary. Otherwise, it is [FIXME]
3027 if ((be16_to_cpu(fcb
->flags
) & RXFCB_CSUM_MASK
) ==
3028 (RXFCB_CIP
| RXFCB_CTU
))
3029 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3031 skb_checksum_none_assert(skb
);
3034 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
3035 static void gfar_process_frame(struct net_device
*ndev
, struct sk_buff
*skb
)
3037 struct gfar_private
*priv
= netdev_priv(ndev
);
3038 struct rxfcb
*fcb
= NULL
;
3040 /* fcb is at the beginning if exists */
3041 fcb
= (struct rxfcb
*)skb
->data
;
3043 /* Remove the FCB from the skb
3044 * Remove the padded bytes, if there are any
3046 if (priv
->uses_rxfcb
)
3047 skb_pull(skb
, GMAC_FCB_LEN
);
3049 /* Get receive timestamp from the skb */
3050 if (priv
->hwts_rx_en
) {
3051 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
3052 u64
*ns
= (u64
*) skb
->data
;
3054 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
3055 shhwtstamps
->hwtstamp
= ns_to_ktime(be64_to_cpu(*ns
));
3059 skb_pull(skb
, priv
->padding
);
3061 /* Trim off the FCS */
3062 pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
3064 if (ndev
->features
& NETIF_F_RXCSUM
)
3065 gfar_rx_checksum(skb
, fcb
);
3067 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3068 * Even if vlan rx accel is disabled, on some chips
3069 * RXFCB_VLN is pseudo randomly set.
3071 if (ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
3072 be16_to_cpu(fcb
->flags
) & RXFCB_VLN
)
3073 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
3074 be16_to_cpu(fcb
->vlctl
));
3077 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
3078 * until the budget/quota has been reached. Returns the number
3081 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
)
3083 struct net_device
*ndev
= rx_queue
->ndev
;
3084 struct gfar_private
*priv
= netdev_priv(ndev
);
3087 struct sk_buff
*skb
= rx_queue
->skb
;
3088 int cleaned_cnt
= gfar_rxbd_unused(rx_queue
);
3089 unsigned int total_bytes
= 0, total_pkts
= 0;
3091 /* Get the first full descriptor */
3092 i
= rx_queue
->next_to_clean
;
3094 while (rx_work_limit
--) {
3097 if (cleaned_cnt
>= GFAR_RX_BUFF_ALLOC
) {
3098 gfar_alloc_rx_buffs(rx_queue
, cleaned_cnt
);
3102 bdp
= &rx_queue
->rx_bd_base
[i
];
3103 lstatus
= be32_to_cpu(bdp
->lstatus
);
3104 if (lstatus
& BD_LFLAG(RXBD_EMPTY
))
3107 /* order rx buffer descriptor reads */
3110 /* fetch next to clean buffer from the ring */
3111 skb
= gfar_get_next_rxbuff(rx_queue
, lstatus
, skb
);
3118 if (unlikely(++i
== rx_queue
->rx_ring_size
))
3121 rx_queue
->next_to_clean
= i
;
3123 /* fetch next buffer if not the last in frame */
3124 if (!(lstatus
& BD_LFLAG(RXBD_LAST
)))
3127 if (unlikely(lstatus
& BD_LFLAG(RXBD_ERR
))) {
3128 count_errors(lstatus
, ndev
);
3130 /* discard faulty buffer */
3133 rx_queue
->stats
.rx_dropped
++;
3137 gfar_process_frame(ndev
, skb
);
3139 /* Increment the number of packets */
3141 total_bytes
+= skb
->len
;
3143 skb_record_rx_queue(skb
, rx_queue
->qindex
);
3145 skb
->protocol
= eth_type_trans(skb
, ndev
);
3147 /* Send the packet up the stack */
3148 napi_gro_receive(&rx_queue
->grp
->napi_rx
, skb
);
3153 /* Store incomplete frames for completion */
3154 rx_queue
->skb
= skb
;
3156 rx_queue
->stats
.rx_packets
+= total_pkts
;
3157 rx_queue
->stats
.rx_bytes
+= total_bytes
;
3160 gfar_alloc_rx_buffs(rx_queue
, cleaned_cnt
);
3162 /* Update Last Free RxBD pointer for LFC */
3163 if (unlikely(priv
->tx_actual_en
)) {
3164 u32 bdp_dma
= gfar_rxbd_dma_lastfree(rx_queue
);
3166 gfar_write(rx_queue
->rfbptr
, bdp_dma
);
3172 static int gfar_poll_rx_sq(struct napi_struct
*napi
, int budget
)
3174 struct gfar_priv_grp
*gfargrp
=
3175 container_of(napi
, struct gfar_priv_grp
, napi_rx
);
3176 struct gfar __iomem
*regs
= gfargrp
->regs
;
3177 struct gfar_priv_rx_q
*rx_queue
= gfargrp
->rx_queue
;
3180 /* Clear IEVENT, so interrupts aren't called again
3181 * because of the packets that have already arrived
3183 gfar_write(®s
->ievent
, IEVENT_RX_MASK
);
3185 work_done
= gfar_clean_rx_ring(rx_queue
, budget
);
3187 if (work_done
< budget
) {
3189 napi_complete_done(napi
, work_done
);
3190 /* Clear the halt bit in RSTAT */
3191 gfar_write(®s
->rstat
, gfargrp
->rstat
);
3193 spin_lock_irq(&gfargrp
->grplock
);
3194 imask
= gfar_read(®s
->imask
);
3195 imask
|= IMASK_RX_DEFAULT
;
3196 gfar_write(®s
->imask
, imask
);
3197 spin_unlock_irq(&gfargrp
->grplock
);
3203 static int gfar_poll_tx_sq(struct napi_struct
*napi
, int budget
)
3205 struct gfar_priv_grp
*gfargrp
=
3206 container_of(napi
, struct gfar_priv_grp
, napi_tx
);
3207 struct gfar __iomem
*regs
= gfargrp
->regs
;
3208 struct gfar_priv_tx_q
*tx_queue
= gfargrp
->tx_queue
;
3211 /* Clear IEVENT, so interrupts aren't called again
3212 * because of the packets that have already arrived
3214 gfar_write(®s
->ievent
, IEVENT_TX_MASK
);
3216 /* run Tx cleanup to completion */
3217 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
])
3218 gfar_clean_tx_ring(tx_queue
);
3220 napi_complete(napi
);
3222 spin_lock_irq(&gfargrp
->grplock
);
3223 imask
= gfar_read(®s
->imask
);
3224 imask
|= IMASK_TX_DEFAULT
;
3225 gfar_write(®s
->imask
, imask
);
3226 spin_unlock_irq(&gfargrp
->grplock
);
3231 static int gfar_poll_rx(struct napi_struct
*napi
, int budget
)
3233 struct gfar_priv_grp
*gfargrp
=
3234 container_of(napi
, struct gfar_priv_grp
, napi_rx
);
3235 struct gfar_private
*priv
= gfargrp
->priv
;
3236 struct gfar __iomem
*regs
= gfargrp
->regs
;
3237 struct gfar_priv_rx_q
*rx_queue
= NULL
;
3238 int work_done
= 0, work_done_per_q
= 0;
3239 int i
, budget_per_q
= 0;
3240 unsigned long rstat_rxf
;
3243 /* Clear IEVENT, so interrupts aren't called again
3244 * because of the packets that have already arrived
3246 gfar_write(®s
->ievent
, IEVENT_RX_MASK
);
3248 rstat_rxf
= gfar_read(®s
->rstat
) & RSTAT_RXF_MASK
;
3250 num_act_queues
= bitmap_weight(&rstat_rxf
, MAX_RX_QS
);
3252 budget_per_q
= budget
/num_act_queues
;
3254 for_each_set_bit(i
, &gfargrp
->rx_bit_map
, priv
->num_rx_queues
) {
3255 /* skip queue if not active */
3256 if (!(rstat_rxf
& (RSTAT_CLEAR_RXF0
>> i
)))
3259 rx_queue
= priv
->rx_queue
[i
];
3261 gfar_clean_rx_ring(rx_queue
, budget_per_q
);
3262 work_done
+= work_done_per_q
;
3264 /* finished processing this queue */
3265 if (work_done_per_q
< budget_per_q
) {
3266 /* clear active queue hw indication */
3267 gfar_write(®s
->rstat
,
3268 RSTAT_CLEAR_RXF0
>> i
);
3271 if (!num_act_queues
)
3276 if (!num_act_queues
) {
3278 napi_complete_done(napi
, work_done
);
3280 /* Clear the halt bit in RSTAT */
3281 gfar_write(®s
->rstat
, gfargrp
->rstat
);
3283 spin_lock_irq(&gfargrp
->grplock
);
3284 imask
= gfar_read(®s
->imask
);
3285 imask
|= IMASK_RX_DEFAULT
;
3286 gfar_write(®s
->imask
, imask
);
3287 spin_unlock_irq(&gfargrp
->grplock
);
3293 static int gfar_poll_tx(struct napi_struct
*napi
, int budget
)
3295 struct gfar_priv_grp
*gfargrp
=
3296 container_of(napi
, struct gfar_priv_grp
, napi_tx
);
3297 struct gfar_private
*priv
= gfargrp
->priv
;
3298 struct gfar __iomem
*regs
= gfargrp
->regs
;
3299 struct gfar_priv_tx_q
*tx_queue
= NULL
;
3300 int has_tx_work
= 0;
3303 /* Clear IEVENT, so interrupts aren't called again
3304 * because of the packets that have already arrived
3306 gfar_write(®s
->ievent
, IEVENT_TX_MASK
);
3308 for_each_set_bit(i
, &gfargrp
->tx_bit_map
, priv
->num_tx_queues
) {
3309 tx_queue
= priv
->tx_queue
[i
];
3310 /* run Tx cleanup to completion */
3311 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
]) {
3312 gfar_clean_tx_ring(tx_queue
);
3319 napi_complete(napi
);
3321 spin_lock_irq(&gfargrp
->grplock
);
3322 imask
= gfar_read(®s
->imask
);
3323 imask
|= IMASK_TX_DEFAULT
;
3324 gfar_write(®s
->imask
, imask
);
3325 spin_unlock_irq(&gfargrp
->grplock
);
3332 #ifdef CONFIG_NET_POLL_CONTROLLER
3333 /* Polling 'interrupt' - used by things like netconsole to send skbs
3334 * without having to re-enable interrupts. It's not called while
3335 * the interrupt routine is executing.
3337 static void gfar_netpoll(struct net_device
*dev
)
3339 struct gfar_private
*priv
= netdev_priv(dev
);
3342 /* If the device has multiple interrupts, run tx/rx */
3343 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
3344 for (i
= 0; i
< priv
->num_grps
; i
++) {
3345 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3347 disable_irq(gfar_irq(grp
, TX
)->irq
);
3348 disable_irq(gfar_irq(grp
, RX
)->irq
);
3349 disable_irq(gfar_irq(grp
, ER
)->irq
);
3350 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3351 enable_irq(gfar_irq(grp
, ER
)->irq
);
3352 enable_irq(gfar_irq(grp
, RX
)->irq
);
3353 enable_irq(gfar_irq(grp
, TX
)->irq
);
3356 for (i
= 0; i
< priv
->num_grps
; i
++) {
3357 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3359 disable_irq(gfar_irq(grp
, TX
)->irq
);
3360 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3361 enable_irq(gfar_irq(grp
, TX
)->irq
);
3367 /* The interrupt handler for devices with one interrupt */
3368 static irqreturn_t
gfar_interrupt(int irq
, void *grp_id
)
3370 struct gfar_priv_grp
*gfargrp
= grp_id
;
3372 /* Save ievent for future reference */
3373 u32 events
= gfar_read(&gfargrp
->regs
->ievent
);
3375 /* Check for reception */
3376 if (events
& IEVENT_RX_MASK
)
3377 gfar_receive(irq
, grp_id
);
3379 /* Check for transmit completion */
3380 if (events
& IEVENT_TX_MASK
)
3381 gfar_transmit(irq
, grp_id
);
3383 /* Check for errors */
3384 if (events
& IEVENT_ERR_MASK
)
3385 gfar_error(irq
, grp_id
);
3390 /* Called every time the controller might need to be made
3391 * aware of new link state. The PHY code conveys this
3392 * information through variables in the phydev structure, and this
3393 * function converts those variables into the appropriate
3394 * register values, and can bring down the device if needed.
3396 static void adjust_link(struct net_device
*dev
)
3398 struct gfar_private
*priv
= netdev_priv(dev
);
3399 struct phy_device
*phydev
= dev
->phydev
;
3401 if (unlikely(phydev
->link
!= priv
->oldlink
||
3402 (phydev
->link
&& (phydev
->duplex
!= priv
->oldduplex
||
3403 phydev
->speed
!= priv
->oldspeed
))))
3404 gfar_update_link_state(priv
);
3407 /* Update the hash table based on the current list of multicast
3408 * addresses we subscribe to. Also, change the promiscuity of
3409 * the device based on the flags (this function is called
3410 * whenever dev->flags is changed
3412 static void gfar_set_multi(struct net_device
*dev
)
3414 struct netdev_hw_addr
*ha
;
3415 struct gfar_private
*priv
= netdev_priv(dev
);
3416 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3419 if (dev
->flags
& IFF_PROMISC
) {
3420 /* Set RCTRL to PROM */
3421 tempval
= gfar_read(®s
->rctrl
);
3422 tempval
|= RCTRL_PROM
;
3423 gfar_write(®s
->rctrl
, tempval
);
3425 /* Set RCTRL to not PROM */
3426 tempval
= gfar_read(®s
->rctrl
);
3427 tempval
&= ~(RCTRL_PROM
);
3428 gfar_write(®s
->rctrl
, tempval
);
3431 if (dev
->flags
& IFF_ALLMULTI
) {
3432 /* Set the hash to rx all multicast frames */
3433 gfar_write(®s
->igaddr0
, 0xffffffff);
3434 gfar_write(®s
->igaddr1
, 0xffffffff);
3435 gfar_write(®s
->igaddr2
, 0xffffffff);
3436 gfar_write(®s
->igaddr3
, 0xffffffff);
3437 gfar_write(®s
->igaddr4
, 0xffffffff);
3438 gfar_write(®s
->igaddr5
, 0xffffffff);
3439 gfar_write(®s
->igaddr6
, 0xffffffff);
3440 gfar_write(®s
->igaddr7
, 0xffffffff);
3441 gfar_write(®s
->gaddr0
, 0xffffffff);
3442 gfar_write(®s
->gaddr1
, 0xffffffff);
3443 gfar_write(®s
->gaddr2
, 0xffffffff);
3444 gfar_write(®s
->gaddr3
, 0xffffffff);
3445 gfar_write(®s
->gaddr4
, 0xffffffff);
3446 gfar_write(®s
->gaddr5
, 0xffffffff);
3447 gfar_write(®s
->gaddr6
, 0xffffffff);
3448 gfar_write(®s
->gaddr7
, 0xffffffff);
3453 /* zero out the hash */
3454 gfar_write(®s
->igaddr0
, 0x0);
3455 gfar_write(®s
->igaddr1
, 0x0);
3456 gfar_write(®s
->igaddr2
, 0x0);
3457 gfar_write(®s
->igaddr3
, 0x0);
3458 gfar_write(®s
->igaddr4
, 0x0);
3459 gfar_write(®s
->igaddr5
, 0x0);
3460 gfar_write(®s
->igaddr6
, 0x0);
3461 gfar_write(®s
->igaddr7
, 0x0);
3462 gfar_write(®s
->gaddr0
, 0x0);
3463 gfar_write(®s
->gaddr1
, 0x0);
3464 gfar_write(®s
->gaddr2
, 0x0);
3465 gfar_write(®s
->gaddr3
, 0x0);
3466 gfar_write(®s
->gaddr4
, 0x0);
3467 gfar_write(®s
->gaddr5
, 0x0);
3468 gfar_write(®s
->gaddr6
, 0x0);
3469 gfar_write(®s
->gaddr7
, 0x0);
3471 /* If we have extended hash tables, we need to
3472 * clear the exact match registers to prepare for
3475 if (priv
->extended_hash
) {
3476 em_num
= GFAR_EM_NUM
+ 1;
3477 gfar_clear_exact_match(dev
);
3484 if (netdev_mc_empty(dev
))
3487 /* Parse the list, and set the appropriate bits */
3488 netdev_for_each_mc_addr(ha
, dev
) {
3490 gfar_set_mac_for_addr(dev
, idx
, ha
->addr
);
3493 gfar_set_hash_for_addr(dev
, ha
->addr
);
3499 /* Clears each of the exact match registers to zero, so they
3500 * don't interfere with normal reception
3502 static void gfar_clear_exact_match(struct net_device
*dev
)
3505 static const u8 zero_arr
[ETH_ALEN
] = {0, 0, 0, 0, 0, 0};
3507 for (idx
= 1; idx
< GFAR_EM_NUM
+ 1; idx
++)
3508 gfar_set_mac_for_addr(dev
, idx
, zero_arr
);
3511 /* Set the appropriate hash bit for the given addr */
3512 /* The algorithm works like so:
3513 * 1) Take the Destination Address (ie the multicast address), and
3514 * do a CRC on it (little endian), and reverse the bits of the
3516 * 2) Use the 8 most significant bits as a hash into a 256-entry
3517 * table. The table is controlled through 8 32-bit registers:
3518 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3519 * gaddr7. This means that the 3 most significant bits in the
3520 * hash index which gaddr register to use, and the 5 other bits
3521 * indicate which bit (assuming an IBM numbering scheme, which
3522 * for PowerPC (tm) is usually the case) in the register holds
3525 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
3528 struct gfar_private
*priv
= netdev_priv(dev
);
3529 u32 result
= ether_crc(ETH_ALEN
, addr
);
3530 int width
= priv
->hash_width
;
3531 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
3532 u8 whichreg
= result
>> (32 - width
+ 5);
3533 u32 value
= (1 << (31-whichbit
));
3535 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
3537 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
3541 /* There are multiple MAC Address register pairs on some controllers
3542 * This function sets the numth pair to a given address
3544 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
3547 struct gfar_private
*priv
= netdev_priv(dev
);
3548 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3550 u32 __iomem
*macptr
= ®s
->macstnaddr1
;
3554 /* For a station address of 0x12345678ABCD in transmission
3555 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3556 * MACnADDR2 is set to 0x34120000.
3558 tempval
= (addr
[5] << 24) | (addr
[4] << 16) |
3559 (addr
[3] << 8) | addr
[2];
3561 gfar_write(macptr
, tempval
);
3563 tempval
= (addr
[1] << 24) | (addr
[0] << 16);
3565 gfar_write(macptr
+1, tempval
);
3568 /* GFAR error interrupt handler */
3569 static irqreturn_t
gfar_error(int irq
, void *grp_id
)
3571 struct gfar_priv_grp
*gfargrp
= grp_id
;
3572 struct gfar __iomem
*regs
= gfargrp
->regs
;
3573 struct gfar_private
*priv
= gfargrp
->priv
;
3574 struct net_device
*dev
= priv
->ndev
;
3576 /* Save ievent for future reference */
3577 u32 events
= gfar_read(®s
->ievent
);
3580 gfar_write(®s
->ievent
, events
& IEVENT_ERR_MASK
);
3582 /* Magic Packet is not an error. */
3583 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
3584 (events
& IEVENT_MAG
))
3585 events
&= ~IEVENT_MAG
;
3588 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
3590 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3591 events
, gfar_read(®s
->imask
));
3593 /* Update the error counters */
3594 if (events
& IEVENT_TXE
) {
3595 dev
->stats
.tx_errors
++;
3597 if (events
& IEVENT_LC
)
3598 dev
->stats
.tx_window_errors
++;
3599 if (events
& IEVENT_CRL
)
3600 dev
->stats
.tx_aborted_errors
++;
3601 if (events
& IEVENT_XFUN
) {
3602 netif_dbg(priv
, tx_err
, dev
,
3603 "TX FIFO underrun, packet dropped\n");
3604 dev
->stats
.tx_dropped
++;
3605 atomic64_inc(&priv
->extra_stats
.tx_underrun
);
3607 schedule_work(&priv
->reset_task
);
3609 netif_dbg(priv
, tx_err
, dev
, "Transmit Error\n");
3611 if (events
& IEVENT_BSY
) {
3612 dev
->stats
.rx_over_errors
++;
3613 atomic64_inc(&priv
->extra_stats
.rx_bsy
);
3615 netif_dbg(priv
, rx_err
, dev
, "busy error (rstat: %x)\n",
3616 gfar_read(®s
->rstat
));
3618 if (events
& IEVENT_BABR
) {
3619 dev
->stats
.rx_errors
++;
3620 atomic64_inc(&priv
->extra_stats
.rx_babr
);
3622 netif_dbg(priv
, rx_err
, dev
, "babbling RX error\n");
3624 if (events
& IEVENT_EBERR
) {
3625 atomic64_inc(&priv
->extra_stats
.eberr
);
3626 netif_dbg(priv
, rx_err
, dev
, "bus error\n");
3628 if (events
& IEVENT_RXC
)
3629 netif_dbg(priv
, rx_status
, dev
, "control frame\n");
3631 if (events
& IEVENT_BABT
) {
3632 atomic64_inc(&priv
->extra_stats
.tx_babt
);
3633 netif_dbg(priv
, tx_err
, dev
, "babbling TX error\n");
3638 static u32
gfar_get_flowctrl_cfg(struct gfar_private
*priv
)
3640 struct net_device
*ndev
= priv
->ndev
;
3641 struct phy_device
*phydev
= ndev
->phydev
;
3644 if (!phydev
->duplex
)
3647 if (!priv
->pause_aneg_en
) {
3648 if (priv
->tx_pause_en
)
3649 val
|= MACCFG1_TX_FLOW
;
3650 if (priv
->rx_pause_en
)
3651 val
|= MACCFG1_RX_FLOW
;
3653 u16 lcl_adv
, rmt_adv
;
3655 /* get link partner capabilities */
3658 rmt_adv
= LPA_PAUSE_CAP
;
3659 if (phydev
->asym_pause
)
3660 rmt_adv
|= LPA_PAUSE_ASYM
;
3662 lcl_adv
= linkmode_adv_to_lcl_adv_t(phydev
->advertising
);
3663 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
3664 if (flowctrl
& FLOW_CTRL_TX
)
3665 val
|= MACCFG1_TX_FLOW
;
3666 if (flowctrl
& FLOW_CTRL_RX
)
3667 val
|= MACCFG1_RX_FLOW
;
3673 static noinline
void gfar_update_link_state(struct gfar_private
*priv
)
3675 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3676 struct net_device
*ndev
= priv
->ndev
;
3677 struct phy_device
*phydev
= ndev
->phydev
;
3678 struct gfar_priv_rx_q
*rx_queue
= NULL
;
3681 if (unlikely(test_bit(GFAR_RESETTING
, &priv
->state
)))
3685 u32 tempval1
= gfar_read(®s
->maccfg1
);
3686 u32 tempval
= gfar_read(®s
->maccfg2
);
3687 u32 ecntrl
= gfar_read(®s
->ecntrl
);
3688 u32 tx_flow_oldval
= (tempval1
& MACCFG1_TX_FLOW
);
3690 if (phydev
->duplex
!= priv
->oldduplex
) {
3691 if (!(phydev
->duplex
))
3692 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
3694 tempval
|= MACCFG2_FULL_DUPLEX
;
3696 priv
->oldduplex
= phydev
->duplex
;
3699 if (phydev
->speed
!= priv
->oldspeed
) {
3700 switch (phydev
->speed
) {
3703 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
3705 ecntrl
&= ~(ECNTRL_R100
);
3710 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
3712 /* Reduced mode distinguishes
3713 * between 10 and 100
3715 if (phydev
->speed
== SPEED_100
)
3716 ecntrl
|= ECNTRL_R100
;
3718 ecntrl
&= ~(ECNTRL_R100
);
3721 netif_warn(priv
, link
, priv
->ndev
,
3722 "Ack! Speed (%d) is not 10/100/1000!\n",
3727 priv
->oldspeed
= phydev
->speed
;
3730 tempval1
&= ~(MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
3731 tempval1
|= gfar_get_flowctrl_cfg(priv
);
3733 /* Turn last free buffer recording on */
3734 if ((tempval1
& MACCFG1_TX_FLOW
) && !tx_flow_oldval
) {
3735 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
3738 rx_queue
= priv
->rx_queue
[i
];
3739 bdp_dma
= gfar_rxbd_dma_lastfree(rx_queue
);
3740 gfar_write(rx_queue
->rfbptr
, bdp_dma
);
3743 priv
->tx_actual_en
= 1;
3746 if (unlikely(!(tempval1
& MACCFG1_TX_FLOW
) && tx_flow_oldval
))
3747 priv
->tx_actual_en
= 0;
3749 gfar_write(®s
->maccfg1
, tempval1
);
3750 gfar_write(®s
->maccfg2
, tempval
);
3751 gfar_write(®s
->ecntrl
, ecntrl
);
3756 } else if (priv
->oldlink
) {
3759 priv
->oldduplex
= -1;
3762 if (netif_msg_link(priv
))
3763 phy_print_status(phydev
);
3766 static const struct of_device_id gfar_match
[] =
3770 .compatible
= "gianfar",
3773 .compatible
= "fsl,etsec2",
3777 MODULE_DEVICE_TABLE(of
, gfar_match
);
3779 /* Structure for a device driver */
3780 static struct platform_driver gfar_driver
= {
3782 .name
= "fsl-gianfar",
3784 .of_match_table
= gfar_match
,
3786 .probe
= gfar_probe
,
3787 .remove
= gfar_remove
,
3790 module_platform_driver(gfar_driver
);