]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/marvell/mv643xx_eth.c
net: mv643xx_eth: Use dma_map_single() to map the skb fragments
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / marvell / mv643xx_eth.c
CommitLineData
1da177e4 1/*
9c1bbdfe 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
1da177e4
LT
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 *
5 * Based on the 64360 driver from:
4547fa61
LB
6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
1da177e4
LT
8 *
9 * Copyright (C) 2003 PMC-Sierra, Inc.,
3bb8a18a 10 * written by Manish Lachwani
1da177e4
LT
11 *
12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
13 *
c8aaea25 14 * Copyright (C) 2004-2006 MontaVista Software, Inc.
1da177e4
LT
15 * Dale Farnsworth <dale@farnsworth.org>
16 *
17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
18 * <sjhill@realitydiluted.com>
19 *
4547fa61
LB
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
22 *
3871c387
MS
23 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
24 *
1da177e4
LT
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version 2
28 * of the License, or (at your option) any later version.
29 *
30 * This program is distributed in the hope that it will be useful,
31 * but WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
33 * GNU General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
0ab75ae8 36 * along with this program; if not, see <http://www.gnu.org/licenses/>.
1da177e4 37 */
a779d38c 38
7542db8b
JP
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
1da177e4
LT
41#include <linux/init.h>
42#include <linux/dma-mapping.h>
b6298c22 43#include <linux/in.h>
c3efab8e 44#include <linux/ip.h>
1da177e4
LT
45#include <linux/tcp.h>
46#include <linux/udp.h>
47#include <linux/etherdevice.h>
1da177e4
LT
48#include <linux/delay.h>
49#include <linux/ethtool.h>
d052d1be 50#include <linux/platform_device.h>
fbd6a754
LB
51#include <linux/module.h>
52#include <linux/kernel.h>
53#include <linux/spinlock.h>
54#include <linux/workqueue.h>
ed94493f 55#include <linux/phy.h>
fbd6a754 56#include <linux/mv643xx_eth.h>
10a9948d 57#include <linux/io.h>
3619eb85 58#include <linux/interrupt.h>
10a9948d 59#include <linux/types.h>
5a0e3ad6 60#include <linux/slab.h>
452503eb 61#include <linux/clk.h>
76723bca
SH
62#include <linux/of.h>
63#include <linux/of_irq.h>
64#include <linux/of_net.h>
cc9d4598 65#include <linux/of_mdio.h>
fbd6a754 66
e5371493 67static char mv643xx_eth_driver_name[] = "mv643xx_eth";
042af53c 68static char mv643xx_eth_driver_version[] = "1.4";
c9df406f 69
fbd6a754 70
fbd6a754
LB
71/*
72 * Registers shared between all ports.
73 */
3cb4667c 74#define PHY_ADDR 0x0000
3cb4667c
LB
75#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
76#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
77#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
78#define WINDOW_BAR_ENABLE 0x0290
79#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
fbd6a754
LB
80
81/*
37a6084f
LB
82 * Main per-port registers. These live at offset 0x0400 for
83 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
fbd6a754 84 */
37a6084f 85#define PORT_CONFIG 0x0000
d9a073ea 86#define UNICAST_PROMISCUOUS_MODE 0x00000001
37a6084f
LB
87#define PORT_CONFIG_EXT 0x0004
88#define MAC_ADDR_LOW 0x0014
89#define MAC_ADDR_HIGH 0x0018
90#define SDMA_CONFIG 0x001c
becfad97
LB
91#define TX_BURST_SIZE_16_64BIT 0x01000000
92#define TX_BURST_SIZE_4_64BIT 0x00800000
93#define BLM_TX_NO_SWAP 0x00000020
94#define BLM_RX_NO_SWAP 0x00000010
95#define RX_BURST_SIZE_16_64BIT 0x00000008
96#define RX_BURST_SIZE_4_64BIT 0x00000004
37a6084f 97#define PORT_SERIAL_CONTROL 0x003c
becfad97
LB
98#define SET_MII_SPEED_TO_100 0x01000000
99#define SET_GMII_SPEED_TO_1000 0x00800000
100#define SET_FULL_DUPLEX_MODE 0x00200000
101#define MAX_RX_PACKET_9700BYTE 0x000a0000
102#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
103#define DO_NOT_FORCE_LINK_FAIL 0x00000400
104#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
105#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
106#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
107#define FORCE_LINK_PASS 0x00000002
108#define SERIAL_PORT_ENABLE 0x00000001
37a6084f 109#define PORT_STATUS 0x0044
a2a41689 110#define TX_FIFO_EMPTY 0x00000400
ae9ae064 111#define TX_IN_PROGRESS 0x00000080
2f7eb47a
LB
112#define PORT_SPEED_MASK 0x00000030
113#define PORT_SPEED_1000 0x00000010
114#define PORT_SPEED_100 0x00000020
115#define PORT_SPEED_10 0x00000000
116#define FLOW_CONTROL_ENABLED 0x00000008
117#define FULL_DUPLEX 0x00000004
81600eea 118#define LINK_UP 0x00000002
37a6084f
LB
119#define TXQ_COMMAND 0x0048
120#define TXQ_FIX_PRIO_CONF 0x004c
cb85215f
SH
121#define PORT_SERIAL_CONTROL1 0x004c
122#define CLK125_BYPASS_EN 0x00000010
37a6084f
LB
123#define TX_BW_RATE 0x0050
124#define TX_BW_MTU 0x0058
125#define TX_BW_BURST 0x005c
126#define INT_CAUSE 0x0060
226bb6b7 127#define INT_TX_END 0x07f80000
e0ca8410 128#define INT_TX_END_0 0x00080000
befefe21 129#define INT_RX 0x000003fc
e0ca8410 130#define INT_RX_0 0x00000004
073a345c 131#define INT_EXT 0x00000002
37a6084f 132#define INT_CAUSE_EXT 0x0064
befefe21
LB
133#define INT_EXT_LINK_PHY 0x00110000
134#define INT_EXT_TX 0x000000ff
37a6084f
LB
135#define INT_MASK 0x0068
136#define INT_MASK_EXT 0x006c
137#define TX_FIFO_URGENT_THRESHOLD 0x0074
302476c9
PZ
138#define RX_DISCARD_FRAME_CNT 0x0084
139#define RX_OVERRUN_FRAME_CNT 0x0088
37a6084f
LB
140#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
141#define TX_BW_RATE_MOVED 0x00e0
142#define TX_BW_MTU_MOVED 0x00e8
143#define TX_BW_BURST_MOVED 0x00ec
144#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
145#define RXQ_COMMAND 0x0280
146#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
147#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
148#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
149#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
150
151/*
152 * Misc per-port registers.
153 */
3cb4667c
LB
154#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
155#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
156#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
157#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
fbd6a754 158
2679a550
LB
159
160/*
becfad97 161 * SDMA configuration register default value.
2679a550 162 */
fbd6a754
LB
163#if defined(__BIG_ENDIAN)
164#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
e0c6ef93
LB
165 (RX_BURST_SIZE_4_64BIT | \
166 TX_BURST_SIZE_4_64BIT)
fbd6a754
LB
167#elif defined(__LITTLE_ENDIAN)
168#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
e0c6ef93
LB
169 (RX_BURST_SIZE_4_64BIT | \
170 BLM_RX_NO_SWAP | \
171 BLM_TX_NO_SWAP | \
172 TX_BURST_SIZE_4_64BIT)
fbd6a754
LB
173#else
174#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
175#endif
176
2beff77b
LB
177
178/*
becfad97 179 * Misc definitions.
2beff77b 180 */
becfad97
LB
181#define DEFAULT_RX_QUEUE_SIZE 128
182#define DEFAULT_TX_QUEUE_SIZE 256
7fd96ce4 183#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
fbd6a754 184
fbd6a754 185
7ca72a3b
LB
186/*
187 * RX/TX descriptors.
fbd6a754
LB
188 */
189#if defined(__BIG_ENDIAN)
cc9754b3 190struct rx_desc {
fbd6a754
LB
191 u16 byte_cnt; /* Descriptor buffer byte count */
192 u16 buf_size; /* Buffer size */
193 u32 cmd_sts; /* Descriptor command status */
194 u32 next_desc_ptr; /* Next descriptor pointer */
195 u32 buf_ptr; /* Descriptor buffer pointer */
196};
197
cc9754b3 198struct tx_desc {
fbd6a754
LB
199 u16 byte_cnt; /* buffer byte count */
200 u16 l4i_chk; /* CPU provided TCP checksum */
201 u32 cmd_sts; /* Command/status field */
202 u32 next_desc_ptr; /* Pointer to next descriptor */
203 u32 buf_ptr; /* pointer to buffer for this descriptor*/
204};
205#elif defined(__LITTLE_ENDIAN)
cc9754b3 206struct rx_desc {
fbd6a754
LB
207 u32 cmd_sts; /* Descriptor command status */
208 u16 buf_size; /* Buffer size */
209 u16 byte_cnt; /* Descriptor buffer byte count */
210 u32 buf_ptr; /* Descriptor buffer pointer */
211 u32 next_desc_ptr; /* Next descriptor pointer */
212};
213
cc9754b3 214struct tx_desc {
fbd6a754
LB
215 u32 cmd_sts; /* Command/status field */
216 u16 l4i_chk; /* CPU provided TCP checksum */
217 u16 byte_cnt; /* buffer byte count */
218 u32 buf_ptr; /* pointer to buffer for this descriptor*/
219 u32 next_desc_ptr; /* Pointer to next descriptor */
220};
221#else
222#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
223#endif
224
7ca72a3b 225/* RX & TX descriptor command */
cc9754b3 226#define BUFFER_OWNED_BY_DMA 0x80000000
7ca72a3b
LB
227
228/* RX & TX descriptor status */
cc9754b3 229#define ERROR_SUMMARY 0x00000001
7ca72a3b
LB
230
231/* RX descriptor status */
cc9754b3
LB
232#define LAYER_4_CHECKSUM_OK 0x40000000
233#define RX_ENABLE_INTERRUPT 0x20000000
234#define RX_FIRST_DESC 0x08000000
235#define RX_LAST_DESC 0x04000000
eaf5d590
LB
236#define RX_IP_HDR_OK 0x02000000
237#define RX_PKT_IS_IPV4 0x01000000
238#define RX_PKT_IS_ETHERNETV2 0x00800000
239#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
240#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
241#define RX_PKT_IS_VLAN_TAGGED 0x00080000
7ca72a3b
LB
242
243/* TX descriptor command */
cc9754b3
LB
244#define TX_ENABLE_INTERRUPT 0x00800000
245#define GEN_CRC 0x00400000
246#define TX_FIRST_DESC 0x00200000
247#define TX_LAST_DESC 0x00100000
248#define ZERO_PADDING 0x00080000
249#define GEN_IP_V4_CHECKSUM 0x00040000
250#define GEN_TCP_UDP_CHECKSUM 0x00020000
251#define UDP_FRAME 0x00010000
e32b6617 252#define MAC_HDR_EXTRA_4_BYTES 0x00008000
84411f73 253#define GEN_TCP_UDP_CHK_FULL 0x00000400
e32b6617 254#define MAC_HDR_EXTRA_8_BYTES 0x00000200
7ca72a3b 255
cc9754b3 256#define TX_IHL_SHIFT 11
7ca72a3b
LB
257
258
c9df406f 259/* global *******************************************************************/
e5371493 260struct mv643xx_eth_shared_private {
fc32b0e2
LB
261 /*
262 * Ethernet controller base address.
263 */
cc9754b3 264 void __iomem *base;
c9df406f 265
fc32b0e2
LB
266 /*
267 * Per-port MBUS window access register value.
268 */
c9df406f
LB
269 u32 win_protect;
270
fc32b0e2
LB
271 /*
272 * Hardware-specific parameters.
273 */
773fc3ee 274 int extended_rx_coal_limit;
457b1d5a 275 int tx_bw_control;
9b2c2ff7 276 int tx_csum_limit;
20922486 277 struct clk *clk;
c9df406f
LB
278};
279
457b1d5a
LB
280#define TX_BW_CONTROL_ABSENT 0
281#define TX_BW_CONTROL_OLD_LAYOUT 1
282#define TX_BW_CONTROL_NEW_LAYOUT 2
283
e7d2f4db
LB
284static int mv643xx_eth_open(struct net_device *dev);
285static int mv643xx_eth_stop(struct net_device *dev);
286
c9df406f
LB
287
288/* per-port *****************************************************************/
e5371493 289struct mib_counters {
fbd6a754
LB
290 u64 good_octets_received;
291 u32 bad_octets_received;
292 u32 internal_mac_transmit_err;
293 u32 good_frames_received;
294 u32 bad_frames_received;
295 u32 broadcast_frames_received;
296 u32 multicast_frames_received;
297 u32 frames_64_octets;
298 u32 frames_65_to_127_octets;
299 u32 frames_128_to_255_octets;
300 u32 frames_256_to_511_octets;
301 u32 frames_512_to_1023_octets;
302 u32 frames_1024_to_max_octets;
303 u64 good_octets_sent;
304 u32 good_frames_sent;
305 u32 excessive_collision;
306 u32 multicast_frames_sent;
307 u32 broadcast_frames_sent;
308 u32 unrec_mac_control_received;
309 u32 fc_sent;
310 u32 good_fc_received;
311 u32 bad_fc_received;
312 u32 undersize_received;
313 u32 fragments_received;
314 u32 oversize_received;
315 u32 jabber_received;
316 u32 mac_receive_error;
317 u32 bad_crc_event;
318 u32 collision;
319 u32 late_collision;
302476c9
PZ
320 /* Non MIB hardware counters */
321 u32 rx_discard;
322 u32 rx_overrun;
fbd6a754
LB
323};
324
8a578111 325struct rx_queue {
64da80a2
LB
326 int index;
327
8a578111
LB
328 int rx_ring_size;
329
330 int rx_desc_count;
331 int rx_curr_desc;
332 int rx_used_desc;
333
334 struct rx_desc *rx_desc_area;
335 dma_addr_t rx_desc_dma;
336 int rx_desc_area_size;
337 struct sk_buff **rx_skb;
8a578111
LB
338};
339
13d64285 340struct tx_queue {
3d6b35bc
LB
341 int index;
342
13d64285 343 int tx_ring_size;
fbd6a754 344
13d64285
LB
345 int tx_desc_count;
346 int tx_curr_desc;
347 int tx_used_desc;
fbd6a754 348
5daffe94 349 struct tx_desc *tx_desc_area;
fbd6a754
LB
350 dma_addr_t tx_desc_dma;
351 int tx_desc_area_size;
99ab08e0
LB
352
353 struct sk_buff_head tx_skb;
8fd89211
LB
354
355 unsigned long tx_packets;
356 unsigned long tx_bytes;
357 unsigned long tx_dropped;
13d64285
LB
358};
359
360struct mv643xx_eth_private {
361 struct mv643xx_eth_shared_private *shared;
37a6084f 362 void __iomem *base;
fc32b0e2 363 int port_num;
13d64285 364
fc32b0e2 365 struct net_device *dev;
fbd6a754 366
ed94493f 367 struct phy_device *phy;
fbd6a754 368
4ff3495a
LB
369 struct timer_list mib_counters_timer;
370 spinlock_t mib_counters_lock;
fc32b0e2 371 struct mib_counters mib_counters;
4ff3495a 372
fc32b0e2 373 struct work_struct tx_timeout_task;
8a578111 374
1fa38c58 375 struct napi_struct napi;
e0ca8410 376 u32 int_mask;
1319ebad 377 u8 oom;
1fa38c58
LB
378 u8 work_link;
379 u8 work_tx;
380 u8 work_tx_end;
381 u8 work_rx;
382 u8 work_rx_refill;
1fa38c58 383
2bcb4b0f 384 int skb_size;
2bcb4b0f 385
8a578111
LB
386 /*
387 * RX state.
388 */
e7d2f4db 389 int rx_ring_size;
8a578111
LB
390 unsigned long rx_desc_sram_addr;
391 int rx_desc_sram_size;
f7981c1c 392 int rxq_count;
2257e05c 393 struct timer_list rx_oom;
64da80a2 394 struct rx_queue rxq[8];
13d64285
LB
395
396 /*
397 * TX state.
398 */
e7d2f4db 399 int tx_ring_size;
13d64285
LB
400 unsigned long tx_desc_sram_addr;
401 int tx_desc_sram_size;
f7981c1c 402 int txq_count;
3d6b35bc 403 struct tx_queue txq[8];
452503eb
AL
404
405 /*
406 * Hardware-specific parameters.
407 */
408 struct clk *clk;
409 unsigned int t_clk;
fbd6a754 410};
1da177e4 411
fbd6a754 412
c9df406f 413/* port register accessors **************************************************/
e5371493 414static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
c9df406f 415{
cc9754b3 416 return readl(mp->shared->base + offset);
c9df406f 417}
fbd6a754 418
37a6084f
LB
419static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
420{
421 return readl(mp->base + offset);
422}
423
e5371493 424static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
c9df406f 425{
cc9754b3 426 writel(data, mp->shared->base + offset);
c9df406f 427}
fbd6a754 428
37a6084f
LB
429static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
430{
431 writel(data, mp->base + offset);
432}
433
fbd6a754 434
c9df406f 435/* rxq/txq helper functions *************************************************/
8a578111 436static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
c9df406f 437{
64da80a2 438 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
c9df406f 439}
fbd6a754 440
13d64285
LB
441static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
442{
3d6b35bc 443 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
13d64285
LB
444}
445
8a578111 446static void rxq_enable(struct rx_queue *rxq)
c9df406f 447{
8a578111 448 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
37a6084f 449 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
8a578111 450}
1da177e4 451
8a578111
LB
452static void rxq_disable(struct rx_queue *rxq)
453{
454 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
64da80a2 455 u8 mask = 1 << rxq->index;
1da177e4 456
37a6084f
LB
457 wrlp(mp, RXQ_COMMAND, mask << 8);
458 while (rdlp(mp, RXQ_COMMAND) & mask)
8a578111 459 udelay(10);
c9df406f
LB
460}
461
6b368f68
LB
462static void txq_reset_hw_ptr(struct tx_queue *txq)
463{
464 struct mv643xx_eth_private *mp = txq_to_mp(txq);
6b368f68
LB
465 u32 addr;
466
467 addr = (u32)txq->tx_desc_dma;
468 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
37a6084f 469 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
6b368f68
LB
470}
471
13d64285 472static void txq_enable(struct tx_queue *txq)
1da177e4 473{
13d64285 474 struct mv643xx_eth_private *mp = txq_to_mp(txq);
37a6084f 475 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
1da177e4
LT
476}
477
13d64285 478static void txq_disable(struct tx_queue *txq)
1da177e4 479{
13d64285 480 struct mv643xx_eth_private *mp = txq_to_mp(txq);
3d6b35bc 481 u8 mask = 1 << txq->index;
c9df406f 482
37a6084f
LB
483 wrlp(mp, TXQ_COMMAND, mask << 8);
484 while (rdlp(mp, TXQ_COMMAND) & mask)
13d64285
LB
485 udelay(10);
486}
487
1fa38c58 488static void txq_maybe_wake(struct tx_queue *txq)
13d64285
LB
489{
490 struct mv643xx_eth_private *mp = txq_to_mp(txq);
e5ef1de1 491 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
3d6b35bc 492
8fd89211
LB
493 if (netif_tx_queue_stopped(nq)) {
494 __netif_tx_lock(nq, smp_processor_id());
495 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
496 netif_tx_wake_queue(nq);
497 __netif_tx_unlock(nq);
498 }
1da177e4
LT
499}
500
8a578111 501static int rxq_process(struct rx_queue *rxq, int budget)
1da177e4 502{
8a578111
LB
503 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
504 struct net_device_stats *stats = &mp->dev->stats;
505 int rx;
1da177e4 506
8a578111 507 rx = 0;
9e1f3772 508 while (rx < budget && rxq->rx_desc_count) {
fc32b0e2 509 struct rx_desc *rx_desc;
96587661 510 unsigned int cmd_sts;
fc32b0e2 511 struct sk_buff *skb;
6b8f90c2 512 u16 byte_cnt;
ff561eef 513
8a578111 514 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
1da177e4 515
96587661 516 cmd_sts = rx_desc->cmd_sts;
2257e05c 517 if (cmd_sts & BUFFER_OWNED_BY_DMA)
96587661 518 break;
96587661 519 rmb();
1da177e4 520
8a578111
LB
521 skb = rxq->rx_skb[rxq->rx_curr_desc];
522 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
ff561eef 523
9da78745
LB
524 rxq->rx_curr_desc++;
525 if (rxq->rx_curr_desc == rxq->rx_ring_size)
526 rxq->rx_curr_desc = 0;
ff561eef 527
eb0519b5 528 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
abe78717 529 rx_desc->buf_size, DMA_FROM_DEVICE);
8a578111
LB
530 rxq->rx_desc_count--;
531 rx++;
b1dd9ca1 532
1fa38c58
LB
533 mp->work_rx_refill |= 1 << rxq->index;
534
6b8f90c2
LB
535 byte_cnt = rx_desc->byte_cnt;
536
468d09f8
DF
537 /*
538 * Update statistics.
fc32b0e2
LB
539 *
540 * Note that the descriptor byte count includes 2 dummy
541 * bytes automatically inserted by the hardware at the
542 * start of the packet (which we don't count), and a 4
543 * byte CRC at the end of the packet (which we do count).
468d09f8 544 */
1da177e4 545 stats->rx_packets++;
6b8f90c2 546 stats->rx_bytes += byte_cnt - 2;
96587661 547
1da177e4 548 /*
fc32b0e2
LB
549 * In case we received a packet without first / last bits
550 * on, or the error summary bit is set, the packet needs
551 * to be dropped.
1da177e4 552 */
f61e5547
LB
553 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
554 != (RX_FIRST_DESC | RX_LAST_DESC))
555 goto err;
556
557 /*
558 * The -4 is for the CRC in the trailer of the
559 * received packet
560 */
561 skb_put(skb, byte_cnt - 2 - 4);
562
563 if (cmd_sts & LAYER_4_CHECKSUM_OK)
564 skb->ip_summed = CHECKSUM_UNNECESSARY;
565 skb->protocol = eth_type_trans(skb, mp->dev);
eaf5d590 566
3619eb85 567 napi_gro_receive(&mp->napi, skb);
f61e5547
LB
568
569 continue;
570
571err:
572 stats->rx_dropped++;
573
574 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
575 (RX_FIRST_DESC | RX_LAST_DESC)) {
576 if (net_ratelimit())
7542db8b
JP
577 netdev_err(mp->dev,
578 "received packet spanning multiple descriptors\n");
1da177e4 579 }
f61e5547
LB
580
581 if (cmd_sts & ERROR_SUMMARY)
582 stats->rx_errors++;
583
584 dev_kfree_skb(skb);
1da177e4 585 }
fc32b0e2 586
1fa38c58
LB
587 if (rx < budget)
588 mp->work_rx &= ~(1 << rxq->index);
589
8a578111 590 return rx;
1da177e4
LT
591}
592
1fa38c58 593static int rxq_refill(struct rx_queue *rxq, int budget)
d0412d96 594{
1fa38c58 595 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1fa38c58 596 int refilled;
8a578111 597
1fa38c58
LB
598 refilled = 0;
599 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
600 struct sk_buff *skb;
1fa38c58 601 int rx;
53771522 602 struct rx_desc *rx_desc;
530e557a 603 int size;
d0412d96 604
acb600de 605 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
2bcb4b0f 606
1fa38c58 607 if (skb == NULL) {
1319ebad 608 mp->oom = 1;
1fa38c58
LB
609 goto oom;
610 }
d0412d96 611
7fd96ce4
LB
612 if (SKB_DMA_REALIGN)
613 skb_reserve(skb, SKB_DMA_REALIGN);
2257e05c 614
1fa38c58
LB
615 refilled++;
616 rxq->rx_desc_count++;
c9df406f 617
1fa38c58
LB
618 rx = rxq->rx_used_desc++;
619 if (rxq->rx_used_desc == rxq->rx_ring_size)
620 rxq->rx_used_desc = 0;
2257e05c 621
53771522
LB
622 rx_desc = rxq->rx_desc_area + rx;
623
18f1d054 624 size = skb_end_pointer(skb) - skb->data;
eb0519b5 625 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
530e557a 626 skb->data, size,
eb0519b5 627 DMA_FROM_DEVICE);
530e557a 628 rx_desc->buf_size = size;
1fa38c58
LB
629 rxq->rx_skb[rx] = skb;
630 wmb();
53771522 631 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
1fa38c58 632 wmb();
2257e05c 633
1fa38c58
LB
634 /*
635 * The hardware automatically prepends 2 bytes of
636 * dummy data to each received packet, so that the
637 * IP header ends up 16-byte aligned.
638 */
639 skb_reserve(skb, 2);
640 }
641
642 if (refilled < budget)
643 mp->work_rx_refill &= ~(1 << rxq->index);
644
645oom:
646 return refilled;
d0412d96
JC
647}
648
c9df406f
LB
649
650/* tx ***********************************************************************/
c9df406f 651static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1da177e4 652{
13d64285 653 int frag;
1da177e4 654
c9df406f 655 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
9e903e08
ED
656 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
657
658 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
c9df406f 659 return 1;
1da177e4 660 }
13d64285 661
c9df406f
LB
662 return 0;
663}
7303fde8 664
0a8fa933
EG
665static inline __be16 sum16_as_be(__sum16 sum)
666{
667 return (__force __be16)sum;
668}
669
670static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
671 u16 *l4i_chk, u32 *command, int length)
672{
673 int ret;
674 u32 cmd = 0;
675
676 if (skb->ip_summed == CHECKSUM_PARTIAL) {
677 int hdr_len;
678 int tag_bytes;
679
680 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
681 skb->protocol != htons(ETH_P_8021Q));
682
683 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
684 tag_bytes = hdr_len - ETH_HLEN;
685
686 if (length - hdr_len > mp->shared->tx_csum_limit ||
687 unlikely(tag_bytes & ~12)) {
688 ret = skb_checksum_help(skb);
689 if (!ret)
690 goto no_csum;
691 return ret;
692 }
693
694 if (tag_bytes & 4)
695 cmd |= MAC_HDR_EXTRA_4_BYTES;
696 if (tag_bytes & 8)
697 cmd |= MAC_HDR_EXTRA_8_BYTES;
698
84411f73 699 cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
0a8fa933
EG
700 GEN_IP_V4_CHECKSUM |
701 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
702
84411f73
EG
703 /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
704 * it seems we don't need to pass the initial checksum. */
0a8fa933
EG
705 switch (ip_hdr(skb)->protocol) {
706 case IPPROTO_UDP:
707 cmd |= UDP_FRAME;
84411f73 708 *l4i_chk = 0;
0a8fa933
EG
709 break;
710 case IPPROTO_TCP:
84411f73 711 *l4i_chk = 0;
0a8fa933
EG
712 break;
713 default:
714 WARN(1, "protocol not supported");
715 }
716 } else {
717no_csum:
718 /* Errata BTS #50, IHL must be 5 if no HW checksum */
719 cmd |= 5 << TX_IHL_SHIFT;
720 }
721 *command = cmd;
722 return 0;
723}
724
13d64285 725static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
c9df406f 726{
eb0519b5 727 struct mv643xx_eth_private *mp = txq_to_mp(txq);
13d64285 728 int nr_frags = skb_shinfo(skb)->nr_frags;
c9df406f 729 int frag;
1da177e4 730
13d64285
LB
731 for (frag = 0; frag < nr_frags; frag++) {
732 skb_frag_t *this_frag;
733 int tx_index;
734 struct tx_desc *desc;
69ad0dd7 735 void *addr;
13d64285
LB
736
737 this_frag = &skb_shinfo(skb)->frags[frag];
69ad0dd7 738 addr = page_address(this_frag->page.p) + this_frag->page_offset;
66823b92
LB
739 tx_index = txq->tx_curr_desc++;
740 if (txq->tx_curr_desc == txq->tx_ring_size)
741 txq->tx_curr_desc = 0;
13d64285
LB
742 desc = &txq->tx_desc_area[tx_index];
743
744 /*
745 * The last fragment will generate an interrupt
746 * which will free the skb on TX completion.
747 */
748 if (frag == nr_frags - 1) {
749 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
750 ZERO_PADDING | TX_LAST_DESC |
751 TX_ENABLE_INTERRUPT;
13d64285
LB
752 } else {
753 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
13d64285
LB
754 }
755
c9df406f 756 desc->l4i_chk = 0;
9e903e08 757 desc->byte_cnt = skb_frag_size(this_frag);
69ad0dd7
EG
758 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
759 desc->byte_cnt, DMA_TO_DEVICE);
c9df406f 760 }
1da177e4
LT
761}
762
4df89bd5 763static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
1da177e4 764{
8fa89bf5 765 struct mv643xx_eth_private *mp = txq_to_mp(txq);
13d64285 766 int nr_frags = skb_shinfo(skb)->nr_frags;
c9df406f 767 int tx_index;
cc9754b3 768 struct tx_desc *desc;
c9df406f 769 u32 cmd_sts;
4df89bd5 770 u16 l4i_chk;
0a8fa933 771 int length, ret;
1da177e4 772
0a8fa933 773 cmd_sts = 0;
4df89bd5 774 l4i_chk = 0;
c9df406f 775
0a8fa933
EG
776 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
777 if (ret) {
778 dev_kfree_skb_any(skb);
779 return ret;
c9df406f 780 }
0a8fa933 781 cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
c9df406f 782
66823b92
LB
783 tx_index = txq->tx_curr_desc++;
784 if (txq->tx_curr_desc == txq->tx_ring_size)
785 txq->tx_curr_desc = 0;
4df89bd5
LB
786 desc = &txq->tx_desc_area[tx_index];
787
788 if (nr_frags) {
789 txq_submit_frag_skb(txq, skb);
790 length = skb_headlen(skb);
791 } else {
792 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
793 length = skb->len;
794 }
795
796 desc->l4i_chk = l4i_chk;
797 desc->byte_cnt = length;
eb0519b5
GP
798 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
799 length, DMA_TO_DEVICE);
4df89bd5 800
99ab08e0
LB
801 __skb_queue_tail(&txq->tx_skb, skb);
802
3b182d7d
RC
803 skb_tx_timestamp(skb);
804
c9df406f
LB
805 /* ensure all other descriptors are written before first cmd_sts */
806 wmb();
807 desc->cmd_sts = cmd_sts;
808
1fa38c58
LB
809 /* clear TX_END status */
810 mp->work_tx_end &= ~(1 << txq->index);
8fa89bf5 811
c9df406f
LB
812 /* ensure all descriptors are written before poking hardware */
813 wmb();
13d64285 814 txq_enable(txq);
c9df406f 815
13d64285 816 txq->tx_desc_count += nr_frags + 1;
4df89bd5
LB
817
818 return 0;
1da177e4 819}
1da177e4 820
0ccfe64d 821static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 822{
e5371493 823 struct mv643xx_eth_private *mp = netdev_priv(dev);
73151ce3 824 int length, queue;
13d64285 825 struct tx_queue *txq;
e5ef1de1 826 struct netdev_queue *nq;
afdb57a2 827
8fd89211
LB
828 queue = skb_get_queue_mapping(skb);
829 txq = mp->txq + queue;
830 nq = netdev_get_tx_queue(dev, queue);
831
c9df406f 832 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
8fd89211 833 txq->tx_dropped++;
7542db8b
JP
834 netdev_printk(KERN_DEBUG, dev,
835 "failed to linearize skb with tiny unaligned fragment\n");
c9df406f
LB
836 return NETDEV_TX_BUSY;
837 }
838
17cd0a59 839 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
e5ef1de1 840 if (net_ratelimit())
7542db8b 841 netdev_err(dev, "tx queue full?!\n");
43f54377 842 dev_kfree_skb_any(skb);
3d6b35bc 843 return NETDEV_TX_OK;
c9df406f
LB
844 }
845
73151ce3
RC
846 length = skb->len;
847
4df89bd5
LB
848 if (!txq_submit_skb(txq, skb)) {
849 int entries_left;
850
73151ce3 851 txq->tx_bytes += length;
4df89bd5 852 txq->tx_packets++;
c9df406f 853
4df89bd5
LB
854 entries_left = txq->tx_ring_size - txq->tx_desc_count;
855 if (entries_left < MAX_SKB_FRAGS + 1)
856 netif_tx_stop_queue(nq);
857 }
c9df406f 858
c9df406f 859 return NETDEV_TX_OK;
1da177e4
LT
860}
861
c9df406f 862
1fa38c58
LB
863/* tx napi ******************************************************************/
864static void txq_kick(struct tx_queue *txq)
865{
866 struct mv643xx_eth_private *mp = txq_to_mp(txq);
8fd89211 867 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1fa38c58
LB
868 u32 hw_desc_ptr;
869 u32 expected_ptr;
870
8fd89211 871 __netif_tx_lock(nq, smp_processor_id());
1fa38c58 872
37a6084f 873 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1fa38c58
LB
874 goto out;
875
37a6084f 876 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1fa38c58
LB
877 expected_ptr = (u32)txq->tx_desc_dma +
878 txq->tx_curr_desc * sizeof(struct tx_desc);
879
880 if (hw_desc_ptr != expected_ptr)
881 txq_enable(txq);
882
883out:
8fd89211 884 __netif_tx_unlock(nq);
1fa38c58
LB
885
886 mp->work_tx_end &= ~(1 << txq->index);
887}
888
889static int txq_reclaim(struct tx_queue *txq, int budget, int force)
890{
891 struct mv643xx_eth_private *mp = txq_to_mp(txq);
8fd89211 892 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1fa38c58
LB
893 int reclaimed;
894
3aefe2b4 895 __netif_tx_lock_bh(nq);
1fa38c58
LB
896
897 reclaimed = 0;
898 while (reclaimed < budget && txq->tx_desc_count > 0) {
899 int tx_index;
900 struct tx_desc *desc;
901 u32 cmd_sts;
902 struct sk_buff *skb;
1fa38c58
LB
903
904 tx_index = txq->tx_used_desc;
905 desc = &txq->tx_desc_area[tx_index];
906 cmd_sts = desc->cmd_sts;
907
908 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
909 if (!force)
910 break;
911 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
912 }
913
914 txq->tx_used_desc = tx_index + 1;
915 if (txq->tx_used_desc == txq->tx_ring_size)
916 txq->tx_used_desc = 0;
917
918 reclaimed++;
919 txq->tx_desc_count--;
920
99ab08e0
LB
921 skb = NULL;
922 if (cmd_sts & TX_LAST_DESC)
923 skb = __skb_dequeue(&txq->tx_skb);
1fa38c58
LB
924
925 if (cmd_sts & ERROR_SUMMARY) {
7542db8b 926 netdev_info(mp->dev, "tx error\n");
1fa38c58
LB
927 mp->dev->stats.tx_errors++;
928 }
929
69ad0dd7
EG
930 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
931 desc->byte_cnt, DMA_TO_DEVICE);
acb600de 932 dev_kfree_skb(skb);
1fa38c58
LB
933 }
934
3aefe2b4 935 __netif_tx_unlock_bh(nq);
8fd89211 936
1fa38c58
LB
937 if (reclaimed < budget)
938 mp->work_tx &= ~(1 << txq->index);
939
1fa38c58
LB
940 return reclaimed;
941}
942
943
89df5fdc
LB
944/* tx rate control **********************************************************/
945/*
946 * Set total maximum TX rate (shared by all TX queues for this port)
947 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
948 */
949static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
950{
951 int token_rate;
952 int mtu;
953 int bucket_size;
954
452503eb 955 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
89df5fdc
LB
956 if (token_rate > 1023)
957 token_rate = 1023;
958
959 mtu = (mp->dev->mtu + 255) >> 8;
960 if (mtu > 63)
961 mtu = 63;
962
963 bucket_size = (burst + 255) >> 8;
964 if (bucket_size > 65535)
965 bucket_size = 65535;
966
457b1d5a
LB
967 switch (mp->shared->tx_bw_control) {
968 case TX_BW_CONTROL_OLD_LAYOUT:
37a6084f
LB
969 wrlp(mp, TX_BW_RATE, token_rate);
970 wrlp(mp, TX_BW_MTU, mtu);
971 wrlp(mp, TX_BW_BURST, bucket_size);
457b1d5a
LB
972 break;
973 case TX_BW_CONTROL_NEW_LAYOUT:
37a6084f
LB
974 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
975 wrlp(mp, TX_BW_MTU_MOVED, mtu);
976 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
457b1d5a 977 break;
1e881592 978 }
89df5fdc
LB
979}
980
981static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
982{
983 struct mv643xx_eth_private *mp = txq_to_mp(txq);
984 int token_rate;
985 int bucket_size;
986
452503eb 987 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
89df5fdc
LB
988 if (token_rate > 1023)
989 token_rate = 1023;
990
991 bucket_size = (burst + 255) >> 8;
992 if (bucket_size > 65535)
993 bucket_size = 65535;
994
37a6084f
LB
995 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
996 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
89df5fdc
LB
997}
998
999static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1000{
1001 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1002 int off;
1003 u32 val;
1004
1005 /*
1006 * Turn on fixed priority mode.
1007 */
457b1d5a
LB
1008 off = 0;
1009 switch (mp->shared->tx_bw_control) {
1010 case TX_BW_CONTROL_OLD_LAYOUT:
37a6084f 1011 off = TXQ_FIX_PRIO_CONF;
457b1d5a
LB
1012 break;
1013 case TX_BW_CONTROL_NEW_LAYOUT:
37a6084f 1014 off = TXQ_FIX_PRIO_CONF_MOVED;
457b1d5a
LB
1015 break;
1016 }
89df5fdc 1017
457b1d5a 1018 if (off) {
37a6084f 1019 val = rdlp(mp, off);
457b1d5a 1020 val |= 1 << txq->index;
37a6084f 1021 wrlp(mp, off, val);
457b1d5a 1022 }
89df5fdc
LB
1023}
1024
89df5fdc 1025
c9df406f 1026/* mii management interface *************************************************/
260055bb
PS
1027static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
1028{
1029 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1030 u32 autoneg_disable = FORCE_LINK_PASS |
1031 DISABLE_AUTO_NEG_SPEED_GMII |
1032 DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1033 DISABLE_AUTO_NEG_FOR_DUPLEX;
1034
1035 if (mp->phy->autoneg == AUTONEG_ENABLE) {
1036 /* enable auto negotiation */
1037 pscr &= ~autoneg_disable;
1038 goto out_write;
1039 }
1040
1041 pscr |= autoneg_disable;
1042
1043 if (mp->phy->speed == SPEED_1000) {
1044 /* force gigabit, half duplex not supported */
1045 pscr |= SET_GMII_SPEED_TO_1000;
1046 pscr |= SET_FULL_DUPLEX_MODE;
1047 goto out_write;
1048 }
1049
1050 pscr &= ~SET_GMII_SPEED_TO_1000;
1051
1052 if (mp->phy->speed == SPEED_100)
1053 pscr |= SET_MII_SPEED_TO_100;
1054 else
1055 pscr &= ~SET_MII_SPEED_TO_100;
1056
1057 if (mp->phy->duplex == DUPLEX_FULL)
1058 pscr |= SET_FULL_DUPLEX_MODE;
1059 else
1060 pscr &= ~SET_FULL_DUPLEX_MODE;
1061
1062out_write:
1063 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1064}
1065
8fd89211
LB
1066/* statistics ***************************************************************/
1067static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1068{
1069 struct mv643xx_eth_private *mp = netdev_priv(dev);
1070 struct net_device_stats *stats = &dev->stats;
1071 unsigned long tx_packets = 0;
1072 unsigned long tx_bytes = 0;
1073 unsigned long tx_dropped = 0;
1074 int i;
1075
1076 for (i = 0; i < mp->txq_count; i++) {
1077 struct tx_queue *txq = mp->txq + i;
1078
1079 tx_packets += txq->tx_packets;
1080 tx_bytes += txq->tx_bytes;
1081 tx_dropped += txq->tx_dropped;
1082 }
1083
1084 stats->tx_packets = tx_packets;
1085 stats->tx_bytes = tx_bytes;
1086 stats->tx_dropped = tx_dropped;
1087
1088 return stats;
1089}
1090
fc32b0e2 1091static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
c9df406f 1092{
fc32b0e2 1093 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1da177e4
LT
1094}
1095
fc32b0e2 1096static void mib_counters_clear(struct mv643xx_eth_private *mp)
d0412d96 1097{
fc32b0e2
LB
1098 int i;
1099
1100 for (i = 0; i < 0x80; i += 4)
1101 mib_read(mp, i);
302476c9
PZ
1102
1103 /* Clear non MIB hw counters also */
1104 rdlp(mp, RX_DISCARD_FRAME_CNT);
1105 rdlp(mp, RX_OVERRUN_FRAME_CNT);
c9df406f 1106}
d0412d96 1107
fc32b0e2 1108static void mib_counters_update(struct mv643xx_eth_private *mp)
c9df406f 1109{
e5371493 1110 struct mib_counters *p = &mp->mib_counters;
4b8e3655 1111
57e8f26a 1112 spin_lock_bh(&mp->mib_counters_lock);
fc32b0e2 1113 p->good_octets_received += mib_read(mp, 0x00);
fc32b0e2
LB
1114 p->bad_octets_received += mib_read(mp, 0x08);
1115 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1116 p->good_frames_received += mib_read(mp, 0x10);
1117 p->bad_frames_received += mib_read(mp, 0x14);
1118 p->broadcast_frames_received += mib_read(mp, 0x18);
1119 p->multicast_frames_received += mib_read(mp, 0x1c);
1120 p->frames_64_octets += mib_read(mp, 0x20);
1121 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1122 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1123 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1124 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1125 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1126 p->good_octets_sent += mib_read(mp, 0x38);
fc32b0e2
LB
1127 p->good_frames_sent += mib_read(mp, 0x40);
1128 p->excessive_collision += mib_read(mp, 0x44);
1129 p->multicast_frames_sent += mib_read(mp, 0x48);
1130 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1131 p->unrec_mac_control_received += mib_read(mp, 0x50);
1132 p->fc_sent += mib_read(mp, 0x54);
1133 p->good_fc_received += mib_read(mp, 0x58);
1134 p->bad_fc_received += mib_read(mp, 0x5c);
1135 p->undersize_received += mib_read(mp, 0x60);
1136 p->fragments_received += mib_read(mp, 0x64);
1137 p->oversize_received += mib_read(mp, 0x68);
1138 p->jabber_received += mib_read(mp, 0x6c);
1139 p->mac_receive_error += mib_read(mp, 0x70);
1140 p->bad_crc_event += mib_read(mp, 0x74);
1141 p->collision += mib_read(mp, 0x78);
1142 p->late_collision += mib_read(mp, 0x7c);
302476c9
PZ
1143 /* Non MIB hardware counters */
1144 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1145 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
57e8f26a 1146 spin_unlock_bh(&mp->mib_counters_lock);
4ff3495a
LB
1147}
1148
1149static void mib_counters_timer_wrapper(unsigned long _mp)
1150{
1151 struct mv643xx_eth_private *mp = (void *)_mp;
4ff3495a 1152 mib_counters_update(mp);
041b4ddb 1153 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
d0412d96
JC
1154}
1155
c9df406f 1156
3e508034
LB
1157/* interrupt coalescing *****************************************************/
1158/*
1159 * Hardware coalescing parameters are set in units of 64 t_clk
1160 * cycles. I.e.:
1161 *
1162 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate
1163 *
1164 * register_value = coal_delay_in_usec * t_clk_rate / 64000000
1165 *
1166 * In the ->set*() methods, we round the computed register value
1167 * to the nearest integer.
1168 */
1169static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1170{
1171 u32 val = rdlp(mp, SDMA_CONFIG);
1172 u64 temp;
1173
1174 if (mp->shared->extended_rx_coal_limit)
1175 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
1176 else
1177 temp = (val & 0x003fff00) >> 8;
1178
1179 temp *= 64000000;
452503eb 1180 do_div(temp, mp->t_clk);
3e508034
LB
1181
1182 return (unsigned int)temp;
1183}
1184
1185static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1186{
1187 u64 temp;
1188 u32 val;
1189
452503eb 1190 temp = (u64)usec * mp->t_clk;
3e508034
LB
1191 temp += 31999999;
1192 do_div(temp, 64000000);
1193
1194 val = rdlp(mp, SDMA_CONFIG);
1195 if (mp->shared->extended_rx_coal_limit) {
1196 if (temp > 0xffff)
1197 temp = 0xffff;
1198 val &= ~0x023fff80;
1199 val |= (temp & 0x8000) << 10;
1200 val |= (temp & 0x7fff) << 7;
1201 } else {
1202 if (temp > 0x3fff)
1203 temp = 0x3fff;
1204 val &= ~0x003fff00;
1205 val |= (temp & 0x3fff) << 8;
1206 }
1207 wrlp(mp, SDMA_CONFIG, val);
1208}
1209
1210static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1211{
1212 u64 temp;
1213
1214 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1215 temp *= 64000000;
452503eb 1216 do_div(temp, mp->t_clk);
3e508034
LB
1217
1218 return (unsigned int)temp;
1219}
1220
1221static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1222{
1223 u64 temp;
1224
452503eb 1225 temp = (u64)usec * mp->t_clk;
3e508034
LB
1226 temp += 31999999;
1227 do_div(temp, 64000000);
1228
1229 if (temp > 0x3fff)
1230 temp = 0x3fff;
1231
1232 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1233}
1234
1235
c9df406f 1236/* ethtool ******************************************************************/
e5371493 1237struct mv643xx_eth_stats {
c9df406f
LB
1238 char stat_string[ETH_GSTRING_LEN];
1239 int sizeof_stat;
16820054
LB
1240 int netdev_off;
1241 int mp_off;
c9df406f
LB
1242};
1243
16820054
LB
1244#define SSTAT(m) \
1245 { #m, FIELD_SIZEOF(struct net_device_stats, m), \
1246 offsetof(struct net_device, stats.m), -1 }
1247
1248#define MIBSTAT(m) \
1249 { #m, FIELD_SIZEOF(struct mib_counters, m), \
1250 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
1251
1252static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1253 SSTAT(rx_packets),
1254 SSTAT(tx_packets),
1255 SSTAT(rx_bytes),
1256 SSTAT(tx_bytes),
1257 SSTAT(rx_errors),
1258 SSTAT(tx_errors),
1259 SSTAT(rx_dropped),
1260 SSTAT(tx_dropped),
1261 MIBSTAT(good_octets_received),
1262 MIBSTAT(bad_octets_received),
1263 MIBSTAT(internal_mac_transmit_err),
1264 MIBSTAT(good_frames_received),
1265 MIBSTAT(bad_frames_received),
1266 MIBSTAT(broadcast_frames_received),
1267 MIBSTAT(multicast_frames_received),
1268 MIBSTAT(frames_64_octets),
1269 MIBSTAT(frames_65_to_127_octets),
1270 MIBSTAT(frames_128_to_255_octets),
1271 MIBSTAT(frames_256_to_511_octets),
1272 MIBSTAT(frames_512_to_1023_octets),
1273 MIBSTAT(frames_1024_to_max_octets),
1274 MIBSTAT(good_octets_sent),
1275 MIBSTAT(good_frames_sent),
1276 MIBSTAT(excessive_collision),
1277 MIBSTAT(multicast_frames_sent),
1278 MIBSTAT(broadcast_frames_sent),
1279 MIBSTAT(unrec_mac_control_received),
1280 MIBSTAT(fc_sent),
1281 MIBSTAT(good_fc_received),
1282 MIBSTAT(bad_fc_received),
1283 MIBSTAT(undersize_received),
1284 MIBSTAT(fragments_received),
1285 MIBSTAT(oversize_received),
1286 MIBSTAT(jabber_received),
1287 MIBSTAT(mac_receive_error),
1288 MIBSTAT(bad_crc_event),
1289 MIBSTAT(collision),
1290 MIBSTAT(late_collision),
302476c9
PZ
1291 MIBSTAT(rx_discard),
1292 MIBSTAT(rx_overrun),
c9df406f
LB
1293};
1294
10a9948d 1295static int
6bdf576e
LB
1296mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
1297 struct ethtool_cmd *cmd)
d0412d96 1298{
d0412d96
JC
1299 int err;
1300
ed94493f
LB
1301 err = phy_read_status(mp->phy);
1302 if (err == 0)
1303 err = phy_ethtool_gset(mp->phy, cmd);
d0412d96 1304
fc32b0e2
LB
1305 /*
1306 * The MAC does not support 1000baseT_Half.
1307 */
d0412d96
JC
1308 cmd->supported &= ~SUPPORTED_1000baseT_Half;
1309 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1310
1311 return err;
1312}
1313
10a9948d 1314static int
6bdf576e 1315mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
10a9948d 1316 struct ethtool_cmd *cmd)
bedfe324 1317{
81600eea
LB
1318 u32 port_status;
1319
37a6084f 1320 port_status = rdlp(mp, PORT_STATUS);
81600eea 1321
bedfe324
LB
1322 cmd->supported = SUPPORTED_MII;
1323 cmd->advertising = ADVERTISED_MII;
81600eea
LB
1324 switch (port_status & PORT_SPEED_MASK) {
1325 case PORT_SPEED_10:
70739497 1326 ethtool_cmd_speed_set(cmd, SPEED_10);
81600eea
LB
1327 break;
1328 case PORT_SPEED_100:
70739497 1329 ethtool_cmd_speed_set(cmd, SPEED_100);
81600eea
LB
1330 break;
1331 case PORT_SPEED_1000:
70739497 1332 ethtool_cmd_speed_set(cmd, SPEED_1000);
81600eea
LB
1333 break;
1334 default:
1335 cmd->speed = -1;
1336 break;
1337 }
1338 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
bedfe324
LB
1339 cmd->port = PORT_MII;
1340 cmd->phy_address = 0;
1341 cmd->transceiver = XCVR_INTERNAL;
1342 cmd->autoneg = AUTONEG_DISABLE;
1343 cmd->maxtxpkt = 1;
1344 cmd->maxrxpkt = 1;
1345
1346 return 0;
1347}
1348
3871c387
MS
1349static void
1350mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1351{
1352 struct mv643xx_eth_private *mp = netdev_priv(dev);
1353 wol->supported = 0;
1354 wol->wolopts = 0;
1355 if (mp->phy)
1356 phy_ethtool_get_wol(mp->phy, wol);
1357}
1358
1359static int
1360mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1361{
1362 struct mv643xx_eth_private *mp = netdev_priv(dev);
1363 int err;
1364
1365 if (mp->phy == NULL)
1366 return -EOPNOTSUPP;
1367
1368 err = phy_ethtool_set_wol(mp->phy, wol);
1369 /* Given that mv643xx_eth works without the marvell-specific PHY driver,
1370 * this debugging hint is useful to have.
1371 */
1372 if (err == -EOPNOTSUPP)
1373 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1374 return err;
1375}
1376
6bdf576e
LB
1377static int
1378mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1379{
1380 struct mv643xx_eth_private *mp = netdev_priv(dev);
1381
1382 if (mp->phy != NULL)
1383 return mv643xx_eth_get_settings_phy(mp, cmd);
1384 else
1385 return mv643xx_eth_get_settings_phyless(mp, cmd);
1386}
1387
10a9948d
LB
1388static int
1389mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1da177e4 1390{
e5371493 1391 struct mv643xx_eth_private *mp = netdev_priv(dev);
260055bb 1392 int ret;
ab4384a6 1393
6bdf576e
LB
1394 if (mp->phy == NULL)
1395 return -EINVAL;
1396
fc32b0e2
LB
1397 /*
1398 * The MAC does not support 1000baseT_Half.
1399 */
1400 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1401
260055bb
PS
1402 ret = phy_ethtool_sset(mp->phy, cmd);
1403 if (!ret)
1404 mv643xx_adjust_pscr(mp);
1405 return ret;
c9df406f 1406}
1da177e4 1407
fc32b0e2
LB
1408static void mv643xx_eth_get_drvinfo(struct net_device *dev,
1409 struct ethtool_drvinfo *drvinfo)
c9df406f 1410{
6f39da2c
AL
1411 strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
1412 sizeof(drvinfo->driver));
68aad78c 1413 strlcpy(drvinfo->version, mv643xx_eth_driver_version,
6f39da2c
AL
1414 sizeof(drvinfo->version));
1415 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
1416 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
16820054 1417 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
c9df406f 1418}
1da177e4 1419
fc32b0e2 1420static int mv643xx_eth_nway_reset(struct net_device *dev)
c9df406f 1421{
e5371493 1422 struct mv643xx_eth_private *mp = netdev_priv(dev);
1da177e4 1423
6bdf576e
LB
1424 if (mp->phy == NULL)
1425 return -EINVAL;
1da177e4 1426
6bdf576e 1427 return genphy_restart_aneg(mp->phy);
bedfe324
LB
1428}
1429
3e508034
LB
1430static int
1431mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1432{
1433 struct mv643xx_eth_private *mp = netdev_priv(dev);
1434
1435 ec->rx_coalesce_usecs = get_rx_coal(mp);
1436 ec->tx_coalesce_usecs = get_tx_coal(mp);
1437
1438 return 0;
1439}
1440
1441static int
1442mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1443{
1444 struct mv643xx_eth_private *mp = netdev_priv(dev);
1445
1446 set_rx_coal(mp, ec->rx_coalesce_usecs);
1447 set_tx_coal(mp, ec->tx_coalesce_usecs);
1448
1449 return 0;
1450}
1451
e7d2f4db
LB
1452static void
1453mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1454{
1455 struct mv643xx_eth_private *mp = netdev_priv(dev);
1456
1457 er->rx_max_pending = 4096;
1458 er->tx_max_pending = 4096;
e7d2f4db
LB
1459
1460 er->rx_pending = mp->rx_ring_size;
1461 er->tx_pending = mp->tx_ring_size;
e7d2f4db
LB
1462}
1463
1464static int
1465mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1466{
1467 struct mv643xx_eth_private *mp = netdev_priv(dev);
1468
1469 if (er->rx_mini_pending || er->rx_jumbo_pending)
1470 return -EINVAL;
1471
1472 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1473 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
1474
1475 if (netif_running(dev)) {
1476 mv643xx_eth_stop(dev);
1477 if (mv643xx_eth_open(dev)) {
7542db8b
JP
1478 netdev_err(dev,
1479 "fatal error on re-opening device after ring param change\n");
e7d2f4db
LB
1480 return -ENOMEM;
1481 }
1482 }
1483
1484 return 0;
1485}
1486
d888b373
LB
1487
1488static int
c8f44aff 1489mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
d888b373
LB
1490{
1491 struct mv643xx_eth_private *mp = netdev_priv(dev);
3ad9b358 1492 bool rx_csum = features & NETIF_F_RXCSUM;
d888b373
LB
1493
1494 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1495
1496 return 0;
1497}
1498
fc32b0e2
LB
1499static void mv643xx_eth_get_strings(struct net_device *dev,
1500 uint32_t stringset, uint8_t *data)
c9df406f
LB
1501{
1502 int i;
1da177e4 1503
fc32b0e2
LB
1504 if (stringset == ETH_SS_STATS) {
1505 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
c9df406f 1506 memcpy(data + i * ETH_GSTRING_LEN,
16820054 1507 mv643xx_eth_stats[i].stat_string,
e5371493 1508 ETH_GSTRING_LEN);
c9df406f 1509 }
c9df406f
LB
1510 }
1511}
1da177e4 1512
fc32b0e2
LB
1513static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1514 struct ethtool_stats *stats,
1515 uint64_t *data)
c9df406f 1516{
b9873841 1517 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 1518 int i;
1da177e4 1519
8fd89211 1520 mv643xx_eth_get_stats(dev);
fc32b0e2 1521 mib_counters_update(mp);
1da177e4 1522
16820054
LB
1523 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1524 const struct mv643xx_eth_stats *stat;
1525 void *p;
1526
1527 stat = mv643xx_eth_stats + i;
1528
1529 if (stat->netdev_off >= 0)
1530 p = ((void *)mp->dev) + stat->netdev_off;
1531 else
1532 p = ((void *)mp) + stat->mp_off;
1533
1534 data[i] = (stat->sizeof_stat == 8) ?
1535 *(uint64_t *)p : *(uint32_t *)p;
1da177e4 1536 }
c9df406f 1537}
1da177e4 1538
fc32b0e2 1539static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
c9df406f 1540{
fc32b0e2 1541 if (sset == ETH_SS_STATS)
16820054 1542 return ARRAY_SIZE(mv643xx_eth_stats);
fc32b0e2
LB
1543
1544 return -EOPNOTSUPP;
c9df406f 1545}
1da177e4 1546
e5371493 1547static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
fc32b0e2
LB
1548 .get_settings = mv643xx_eth_get_settings,
1549 .set_settings = mv643xx_eth_set_settings,
1550 .get_drvinfo = mv643xx_eth_get_drvinfo,
1551 .nway_reset = mv643xx_eth_nway_reset,
ed4ba4b5 1552 .get_link = ethtool_op_get_link,
3e508034
LB
1553 .get_coalesce = mv643xx_eth_get_coalesce,
1554 .set_coalesce = mv643xx_eth_set_coalesce,
e7d2f4db
LB
1555 .get_ringparam = mv643xx_eth_get_ringparam,
1556 .set_ringparam = mv643xx_eth_set_ringparam,
fc32b0e2
LB
1557 .get_strings = mv643xx_eth_get_strings,
1558 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
e5371493 1559 .get_sset_count = mv643xx_eth_get_sset_count,
ebad0a8d 1560 .get_ts_info = ethtool_op_get_ts_info,
3871c387
MS
1561 .get_wol = mv643xx_eth_get_wol,
1562 .set_wol = mv643xx_eth_set_wol,
c9df406f 1563};
1da177e4 1564
bea3348e 1565
c9df406f 1566/* address handling *********************************************************/
5daffe94 1567static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
c9df406f 1568{
66e63ffb
LB
1569 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1570 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1da177e4 1571
5daffe94
LB
1572 addr[0] = (mac_h >> 24) & 0xff;
1573 addr[1] = (mac_h >> 16) & 0xff;
1574 addr[2] = (mac_h >> 8) & 0xff;
1575 addr[3] = mac_h & 0xff;
1576 addr[4] = (mac_l >> 8) & 0xff;
1577 addr[5] = mac_l & 0xff;
c9df406f 1578}
1da177e4 1579
66e63ffb 1580static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
c9df406f 1581{
66e63ffb
LB
1582 wrlp(mp, MAC_ADDR_HIGH,
1583 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1584 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
c9df406f 1585}
d0412d96 1586
66e63ffb 1587static u32 uc_addr_filter_mask(struct net_device *dev)
c9df406f 1588{
ccffad25 1589 struct netdev_hw_addr *ha;
66e63ffb 1590 u32 nibbles;
1da177e4 1591
66e63ffb
LB
1592 if (dev->flags & IFF_PROMISC)
1593 return 0;
1da177e4 1594
66e63ffb 1595 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
32e7bfc4 1596 netdev_for_each_uc_addr(ha, dev) {
ccffad25 1597 if (memcmp(dev->dev_addr, ha->addr, 5))
66e63ffb 1598 return 0;
ccffad25 1599 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
66e63ffb 1600 return 0;
ff561eef 1601
ccffad25 1602 nibbles |= 1 << (ha->addr[5] & 0x0f);
66e63ffb 1603 }
1da177e4 1604
66e63ffb 1605 return nibbles;
1da177e4
LT
1606}
1607
66e63ffb 1608static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1da177e4 1609{
e5371493 1610 struct mv643xx_eth_private *mp = netdev_priv(dev);
66e63ffb
LB
1611 u32 port_config;
1612 u32 nibbles;
1613 int i;
1da177e4 1614
cc9754b3 1615 uc_addr_set(mp, dev->dev_addr);
1da177e4 1616
6877f54e
PS
1617 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1618
66e63ffb
LB
1619 nibbles = uc_addr_filter_mask(dev);
1620 if (!nibbles) {
1621 port_config |= UNICAST_PROMISCUOUS_MODE;
6877f54e 1622 nibbles = 0xffff;
66e63ffb
LB
1623 }
1624
1625 for (i = 0; i < 16; i += 4) {
1626 int off = UNICAST_TABLE(mp->port_num) + i;
1627 u32 v;
1628
1629 v = 0;
1630 if (nibbles & 1)
1631 v |= 0x00000001;
1632 if (nibbles & 2)
1633 v |= 0x00000100;
1634 if (nibbles & 4)
1635 v |= 0x00010000;
1636 if (nibbles & 8)
1637 v |= 0x01000000;
1638 nibbles >>= 4;
1639
1640 wrl(mp, off, v);
1641 }
1642
66e63ffb 1643 wrlp(mp, PORT_CONFIG, port_config);
1da177e4
LT
1644}
1645
69876569
LB
1646static int addr_crc(unsigned char *addr)
1647{
1648 int crc = 0;
1649 int i;
1650
1651 for (i = 0; i < 6; i++) {
1652 int j;
1653
1654 crc = (crc ^ addr[i]) << 8;
1655 for (j = 7; j >= 0; j--) {
1656 if (crc & (0x100 << j))
1657 crc ^= 0x107 << j;
1658 }
1659 }
1660
1661 return crc;
1662}
1663
66e63ffb 1664static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1da177e4 1665{
fc32b0e2 1666 struct mv643xx_eth_private *mp = netdev_priv(dev);
66e63ffb
LB
1667 u32 *mc_spec;
1668 u32 *mc_other;
22bedad3 1669 struct netdev_hw_addr *ha;
fc32b0e2 1670 int i;
c8aaea25 1671
fc32b0e2 1672 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
66e63ffb
LB
1673 int port_num;
1674 u32 accept;
c8aaea25 1675
66e63ffb
LB
1676oom:
1677 port_num = mp->port_num;
1678 accept = 0x01010101;
fc32b0e2
LB
1679 for (i = 0; i < 0x100; i += 4) {
1680 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1681 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
c9df406f
LB
1682 }
1683 return;
1684 }
c8aaea25 1685
82a5bd6a 1686 mc_spec = kmalloc(0x200, GFP_ATOMIC);
66e63ffb
LB
1687 if (mc_spec == NULL)
1688 goto oom;
1689 mc_other = mc_spec + (0x100 >> 2);
1690
1691 memset(mc_spec, 0, 0x100);
1692 memset(mc_other, 0, 0x100);
1da177e4 1693
22bedad3
JP
1694 netdev_for_each_mc_addr(ha, dev) {
1695 u8 *a = ha->addr;
66e63ffb
LB
1696 u32 *table;
1697 int entry;
1da177e4 1698
fc32b0e2 1699 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
66e63ffb
LB
1700 table = mc_spec;
1701 entry = a[5];
fc32b0e2 1702 } else {
66e63ffb
LB
1703 table = mc_other;
1704 entry = addr_crc(a);
fc32b0e2 1705 }
66e63ffb 1706
2b448334 1707 table[entry >> 2] |= 1 << (8 * (entry & 3));
fc32b0e2 1708 }
66e63ffb
LB
1709
1710 for (i = 0; i < 0x100; i += 4) {
1711 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1712 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1713 }
1714
1715 kfree(mc_spec);
1716}
1717
1718static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1719{
1720 mv643xx_eth_program_unicast_filter(dev);
1721 mv643xx_eth_program_multicast_filter(dev);
1722}
1723
1724static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1725{
1726 struct sockaddr *sa = addr;
1727
a29ec08a 1728 if (!is_valid_ether_addr(sa->sa_data))
504f9b5a 1729 return -EADDRNOTAVAIL;
a29ec08a 1730
66e63ffb
LB
1731 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1732
1733 netif_addr_lock_bh(dev);
1734 mv643xx_eth_program_unicast_filter(dev);
1735 netif_addr_unlock_bh(dev);
1736
1737 return 0;
c9df406f 1738}
c8aaea25 1739
c8aaea25 1740
c9df406f 1741/* rx/tx queue initialisation ***********************************************/
64da80a2 1742static int rxq_init(struct mv643xx_eth_private *mp, int index)
c9df406f 1743{
64da80a2 1744 struct rx_queue *rxq = mp->rxq + index;
8a578111
LB
1745 struct rx_desc *rx_desc;
1746 int size;
c9df406f
LB
1747 int i;
1748
64da80a2
LB
1749 rxq->index = index;
1750
e7d2f4db 1751 rxq->rx_ring_size = mp->rx_ring_size;
8a578111
LB
1752
1753 rxq->rx_desc_count = 0;
1754 rxq->rx_curr_desc = 0;
1755 rxq->rx_used_desc = 0;
1756
1757 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1758
f7981c1c 1759 if (index == 0 && size <= mp->rx_desc_sram_size) {
8a578111
LB
1760 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1761 mp->rx_desc_sram_size);
1762 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1763 } else {
eb0519b5
GP
1764 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1765 size, &rxq->rx_desc_dma,
1766 GFP_KERNEL);
f7ea3337
PJ
1767 }
1768
8a578111 1769 if (rxq->rx_desc_area == NULL) {
7542db8b 1770 netdev_err(mp->dev,
8a578111
LB
1771 "can't allocate rx ring (%d bytes)\n", size);
1772 goto out;
1773 }
1774 memset(rxq->rx_desc_area, 0, size);
1da177e4 1775
8a578111 1776 rxq->rx_desc_area_size = size;
9fa8e980 1777 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
b2adaca9
JP
1778 GFP_KERNEL);
1779 if (rxq->rx_skb == NULL)
8a578111 1780 goto out_free;
8a578111 1781
64699336 1782 rx_desc = rxq->rx_desc_area;
8a578111 1783 for (i = 0; i < rxq->rx_ring_size; i++) {
9da78745
LB
1784 int nexti;
1785
1786 nexti = i + 1;
1787 if (nexti == rxq->rx_ring_size)
1788 nexti = 0;
1789
8a578111
LB
1790 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1791 nexti * sizeof(struct rx_desc);
1792 }
1793
8a578111
LB
1794 return 0;
1795
1796
1797out_free:
f7981c1c 1798 if (index == 0 && size <= mp->rx_desc_sram_size)
8a578111
LB
1799 iounmap(rxq->rx_desc_area);
1800 else
eb0519b5 1801 dma_free_coherent(mp->dev->dev.parent, size,
8a578111
LB
1802 rxq->rx_desc_area,
1803 rxq->rx_desc_dma);
1804
1805out:
1806 return -ENOMEM;
c9df406f 1807}
c8aaea25 1808
8a578111 1809static void rxq_deinit(struct rx_queue *rxq)
c9df406f 1810{
8a578111
LB
1811 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
1812 int i;
1813
1814 rxq_disable(rxq);
c8aaea25 1815
8a578111
LB
1816 for (i = 0; i < rxq->rx_ring_size; i++) {
1817 if (rxq->rx_skb[i]) {
1818 dev_kfree_skb(rxq->rx_skb[i]);
1819 rxq->rx_desc_count--;
1da177e4 1820 }
c8aaea25 1821 }
1da177e4 1822
8a578111 1823 if (rxq->rx_desc_count) {
7542db8b 1824 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
8a578111
LB
1825 rxq->rx_desc_count);
1826 }
1827
f7981c1c 1828 if (rxq->index == 0 &&
64da80a2 1829 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
8a578111 1830 iounmap(rxq->rx_desc_area);
c9df406f 1831 else
eb0519b5 1832 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
8a578111
LB
1833 rxq->rx_desc_area, rxq->rx_desc_dma);
1834
1835 kfree(rxq->rx_skb);
c9df406f 1836}
1da177e4 1837
3d6b35bc 1838static int txq_init(struct mv643xx_eth_private *mp, int index)
c9df406f 1839{
3d6b35bc 1840 struct tx_queue *txq = mp->txq + index;
13d64285
LB
1841 struct tx_desc *tx_desc;
1842 int size;
c9df406f 1843 int i;
1da177e4 1844
3d6b35bc
LB
1845 txq->index = index;
1846
e7d2f4db 1847 txq->tx_ring_size = mp->tx_ring_size;
13d64285
LB
1848
1849 txq->tx_desc_count = 0;
1850 txq->tx_curr_desc = 0;
1851 txq->tx_used_desc = 0;
1852
1853 size = txq->tx_ring_size * sizeof(struct tx_desc);
1854
f7981c1c 1855 if (index == 0 && size <= mp->tx_desc_sram_size) {
13d64285
LB
1856 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1857 mp->tx_desc_sram_size);
1858 txq->tx_desc_dma = mp->tx_desc_sram_addr;
1859 } else {
eb0519b5
GP
1860 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1861 size, &txq->tx_desc_dma,
1862 GFP_KERNEL);
13d64285
LB
1863 }
1864
1865 if (txq->tx_desc_area == NULL) {
7542db8b 1866 netdev_err(mp->dev,
13d64285 1867 "can't allocate tx ring (%d bytes)\n", size);
99ab08e0 1868 return -ENOMEM;
c9df406f 1869 }
13d64285
LB
1870 memset(txq->tx_desc_area, 0, size);
1871
1872 txq->tx_desc_area_size = size;
13d64285 1873
64699336 1874 tx_desc = txq->tx_desc_area;
13d64285 1875 for (i = 0; i < txq->tx_ring_size; i++) {
6b368f68 1876 struct tx_desc *txd = tx_desc + i;
9da78745
LB
1877 int nexti;
1878
1879 nexti = i + 1;
1880 if (nexti == txq->tx_ring_size)
1881 nexti = 0;
6b368f68
LB
1882
1883 txd->cmd_sts = 0;
1884 txd->next_desc_ptr = txq->tx_desc_dma +
13d64285
LB
1885 nexti * sizeof(struct tx_desc);
1886 }
1887
99ab08e0 1888 skb_queue_head_init(&txq->tx_skb);
c9df406f 1889
99ab08e0 1890 return 0;
c8aaea25 1891}
1da177e4 1892
13d64285 1893static void txq_deinit(struct tx_queue *txq)
c9df406f 1894{
13d64285 1895 struct mv643xx_eth_private *mp = txq_to_mp(txq);
fa3959f4 1896
13d64285 1897 txq_disable(txq);
1fa38c58 1898 txq_reclaim(txq, txq->tx_ring_size, 1);
1da177e4 1899
13d64285 1900 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1da177e4 1901
f7981c1c 1902 if (txq->index == 0 &&
3d6b35bc 1903 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
13d64285 1904 iounmap(txq->tx_desc_area);
c9df406f 1905 else
eb0519b5 1906 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
13d64285 1907 txq->tx_desc_area, txq->tx_desc_dma);
c9df406f 1908}
1da177e4 1909
1da177e4 1910
c9df406f 1911/* netdev ops and related ***************************************************/
1fa38c58
LB
1912static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
1913{
1914 u32 int_cause;
1915 u32 int_cause_ext;
1916
e0ca8410 1917 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
1fa38c58
LB
1918 if (int_cause == 0)
1919 return 0;
1920
1921 int_cause_ext = 0;
e0ca8410
SB
1922 if (int_cause & INT_EXT) {
1923 int_cause &= ~INT_EXT;
37a6084f 1924 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
e0ca8410 1925 }
1fa38c58 1926
1fa38c58 1927 if (int_cause) {
37a6084f 1928 wrlp(mp, INT_CAUSE, ~int_cause);
1fa38c58 1929 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
37a6084f 1930 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
1fa38c58
LB
1931 mp->work_rx |= (int_cause & INT_RX) >> 2;
1932 }
1933
1934 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
1935 if (int_cause_ext) {
37a6084f 1936 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
1fa38c58
LB
1937 if (int_cause_ext & INT_EXT_LINK_PHY)
1938 mp->work_link = 1;
1939 mp->work_tx |= int_cause_ext & INT_EXT_TX;
1940 }
1941
1942 return 1;
1943}
1944
1945static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1946{
1947 struct net_device *dev = (struct net_device *)dev_id;
1948 struct mv643xx_eth_private *mp = netdev_priv(dev);
1949
1950 if (unlikely(!mv643xx_eth_collect_events(mp)))
1951 return IRQ_NONE;
1952
37a6084f 1953 wrlp(mp, INT_MASK, 0);
1fa38c58
LB
1954 napi_schedule(&mp->napi);
1955
1956 return IRQ_HANDLED;
1957}
1958
2f7eb47a
LB
1959static void handle_link_event(struct mv643xx_eth_private *mp)
1960{
1961 struct net_device *dev = mp->dev;
1962 u32 port_status;
1963 int speed;
1964 int duplex;
1965 int fc;
1966
37a6084f 1967 port_status = rdlp(mp, PORT_STATUS);
2f7eb47a
LB
1968 if (!(port_status & LINK_UP)) {
1969 if (netif_carrier_ok(dev)) {
1970 int i;
1971
7542db8b 1972 netdev_info(dev, "link down\n");
2f7eb47a
LB
1973
1974 netif_carrier_off(dev);
2f7eb47a 1975
f7981c1c 1976 for (i = 0; i < mp->txq_count; i++) {
2f7eb47a
LB
1977 struct tx_queue *txq = mp->txq + i;
1978
1fa38c58 1979 txq_reclaim(txq, txq->tx_ring_size, 1);
f7981c1c 1980 txq_reset_hw_ptr(txq);
2f7eb47a
LB
1981 }
1982 }
1983 return;
1984 }
1985
1986 switch (port_status & PORT_SPEED_MASK) {
1987 case PORT_SPEED_10:
1988 speed = 10;
1989 break;
1990 case PORT_SPEED_100:
1991 speed = 100;
1992 break;
1993 case PORT_SPEED_1000:
1994 speed = 1000;
1995 break;
1996 default:
1997 speed = -1;
1998 break;
1999 }
2000 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2001 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2002
7542db8b
JP
2003 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2004 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2f7eb47a 2005
4fdeca3f 2006 if (!netif_carrier_ok(dev))
2f7eb47a 2007 netif_carrier_on(dev);
2f7eb47a
LB
2008}
2009
1fa38c58 2010static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
c9df406f 2011{
1fa38c58
LB
2012 struct mv643xx_eth_private *mp;
2013 int work_done;
ce4e2e45 2014
1fa38c58 2015 mp = container_of(napi, struct mv643xx_eth_private, napi);
fc32b0e2 2016
1319ebad
LB
2017 if (unlikely(mp->oom)) {
2018 mp->oom = 0;
2019 del_timer(&mp->rx_oom);
2020 }
1da177e4 2021
1fa38c58
LB
2022 work_done = 0;
2023 while (work_done < budget) {
2024 u8 queue_mask;
2025 int queue;
2026 int work_tbd;
2027
2028 if (mp->work_link) {
2029 mp->work_link = 0;
2030 handle_link_event(mp);
26ef1f17 2031 work_done++;
1fa38c58
LB
2032 continue;
2033 }
1da177e4 2034
1319ebad
LB
2035 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2036 if (likely(!mp->oom))
2037 queue_mask |= mp->work_rx_refill;
2038
1fa38c58
LB
2039 if (!queue_mask) {
2040 if (mv643xx_eth_collect_events(mp))
2041 continue;
2042 break;
2043 }
1da177e4 2044
1fa38c58
LB
2045 queue = fls(queue_mask) - 1;
2046 queue_mask = 1 << queue;
2047
2048 work_tbd = budget - work_done;
2049 if (work_tbd > 16)
2050 work_tbd = 16;
2051
2052 if (mp->work_tx_end & queue_mask) {
2053 txq_kick(mp->txq + queue);
2054 } else if (mp->work_tx & queue_mask) {
2055 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2056 txq_maybe_wake(mp->txq + queue);
2057 } else if (mp->work_rx & queue_mask) {
2058 work_done += rxq_process(mp->rxq + queue, work_tbd);
1319ebad 2059 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
1fa38c58
LB
2060 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2061 } else {
2062 BUG();
2063 }
84dd619e 2064 }
fc32b0e2 2065
1fa38c58 2066 if (work_done < budget) {
1319ebad 2067 if (mp->oom)
1fa38c58
LB
2068 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2069 napi_complete(napi);
e0ca8410 2070 wrlp(mp, INT_MASK, mp->int_mask);
226bb6b7 2071 }
3d6b35bc 2072
1fa38c58
LB
2073 return work_done;
2074}
8fa89bf5 2075
1fa38c58
LB
2076static inline void oom_timer_wrapper(unsigned long data)
2077{
2078 struct mv643xx_eth_private *mp = (void *)data;
1da177e4 2079
1fa38c58 2080 napi_schedule(&mp->napi);
1da177e4
LT
2081}
2082
fc32b0e2 2083static void port_start(struct mv643xx_eth_private *mp)
1da177e4 2084{
d0412d96 2085 u32 pscr;
8a578111 2086 int i;
1da177e4 2087
bedfe324
LB
2088 /*
2089 * Perform PHY reset, if there is a PHY.
2090 */
ed94493f 2091 if (mp->phy != NULL) {
bedfe324
LB
2092 struct ethtool_cmd cmd;
2093
2094 mv643xx_eth_get_settings(mp->dev, &cmd);
7cd14636 2095 phy_init_hw(mp->phy);
bedfe324 2096 mv643xx_eth_set_settings(mp->dev, &cmd);
58911151 2097 phy_start(mp->phy);
bedfe324 2098 }
1da177e4 2099
81600eea
LB
2100 /*
2101 * Configure basic link parameters.
2102 */
37a6084f 2103 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
81600eea
LB
2104
2105 pscr |= SERIAL_PORT_ENABLE;
37a6084f 2106 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
2107
2108 pscr |= DO_NOT_FORCE_LINK_FAIL;
ed94493f 2109 if (mp->phy == NULL)
81600eea 2110 pscr |= FORCE_LINK_PASS;
37a6084f 2111 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea 2112
13d64285
LB
2113 /*
2114 * Configure TX path and queues.
2115 */
89df5fdc 2116 tx_set_rate(mp, 1000000000, 16777216);
f7981c1c 2117 for (i = 0; i < mp->txq_count; i++) {
3d6b35bc 2118 struct tx_queue *txq = mp->txq + i;
13d64285 2119
6b368f68 2120 txq_reset_hw_ptr(txq);
89df5fdc
LB
2121 txq_set_rate(txq, 1000000000, 16777216);
2122 txq_set_fixed_prio_mode(txq);
13d64285
LB
2123 }
2124
d9a073ea
LB
2125 /*
2126 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
170e7108
LB
2127 * frames to RX queue #0, and include the pseudo-header when
2128 * calculating receive checksums.
d9a073ea 2129 */
e138f96b 2130 mv643xx_eth_set_features(mp->dev, mp->dev->features);
01999873 2131
376489a2
LB
2132 /*
2133 * Treat BPDUs as normal multicasts, and disable partition mode.
2134 */
37a6084f 2135 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
01999873 2136
5a893922
LB
2137 /*
2138 * Add configured unicast addresses to address filter table.
2139 */
2140 mv643xx_eth_program_unicast_filter(mp->dev);
2141
8a578111 2142 /*
64da80a2 2143 * Enable the receive queues.
8a578111 2144 */
f7981c1c 2145 for (i = 0; i < mp->rxq_count; i++) {
64da80a2 2146 struct rx_queue *rxq = mp->rxq + i;
8a578111 2147 u32 addr;
1da177e4 2148
8a578111
LB
2149 addr = (u32)rxq->rx_desc_dma;
2150 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
37a6084f 2151 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
1da177e4 2152
8a578111
LB
2153 rxq_enable(rxq);
2154 }
1da177e4
LT
2155}
2156
2bcb4b0f
LB
2157static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2158{
2159 int skb_size;
2160
2161 /*
2162 * Reserve 2+14 bytes for an ethernet header (the hardware
2163 * automatically prepends 2 bytes of dummy data to each
2164 * received packet), 16 bytes for up to four VLAN tags, and
2165 * 4 bytes for the trailing FCS -- 36 bytes total.
2166 */
2167 skb_size = mp->dev->mtu + 36;
2168
2169 /*
2170 * Make sure that the skb size is a multiple of 8 bytes, as
2171 * the lower three bits of the receive descriptor's buffer
2172 * size field are ignored by the hardware.
2173 */
2174 mp->skb_size = (skb_size + 7) & ~7;
7fd96ce4
LB
2175
2176 /*
2177 * If NET_SKB_PAD is smaller than a cache line,
2178 * netdev_alloc_skb() will cause skb->data to be misaligned
2179 * to a cache line boundary. If this is the case, include
2180 * some extra space to allow re-aligning the data area.
2181 */
2182 mp->skb_size += SKB_DMA_REALIGN;
2bcb4b0f
LB
2183}
2184
c9df406f 2185static int mv643xx_eth_open(struct net_device *dev)
16e03018 2186{
e5371493 2187 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 2188 int err;
64da80a2 2189 int i;
16e03018 2190
37a6084f
LB
2191 wrlp(mp, INT_CAUSE, 0);
2192 wrlp(mp, INT_CAUSE_EXT, 0);
2193 rdlp(mp, INT_CAUSE_EXT);
c9df406f 2194
fc32b0e2 2195 err = request_irq(dev->irq, mv643xx_eth_irq,
2a1867a7 2196 IRQF_SHARED, dev->name, dev);
c9df406f 2197 if (err) {
7542db8b 2198 netdev_err(dev, "can't assign irq\n");
c9df406f 2199 return -EAGAIN;
16e03018
DF
2200 }
2201
2bcb4b0f
LB
2202 mv643xx_eth_recalc_skb_size(mp);
2203
2257e05c
LB
2204 napi_enable(&mp->napi);
2205
e0ca8410
SB
2206 mp->int_mask = INT_EXT;
2207
f7981c1c 2208 for (i = 0; i < mp->rxq_count; i++) {
64da80a2
LB
2209 err = rxq_init(mp, i);
2210 if (err) {
2211 while (--i >= 0)
f7981c1c 2212 rxq_deinit(mp->rxq + i);
64da80a2
LB
2213 goto out;
2214 }
2215
1fa38c58 2216 rxq_refill(mp->rxq + i, INT_MAX);
e0ca8410 2217 mp->int_mask |= INT_RX_0 << i;
2257e05c
LB
2218 }
2219
1319ebad 2220 if (mp->oom) {
2257e05c
LB
2221 mp->rx_oom.expires = jiffies + (HZ / 10);
2222 add_timer(&mp->rx_oom);
64da80a2 2223 }
8a578111 2224
f7981c1c 2225 for (i = 0; i < mp->txq_count; i++) {
3d6b35bc
LB
2226 err = txq_init(mp, i);
2227 if (err) {
2228 while (--i >= 0)
f7981c1c 2229 txq_deinit(mp->txq + i);
3d6b35bc
LB
2230 goto out_free;
2231 }
e0ca8410 2232 mp->int_mask |= INT_TX_END_0 << i;
3d6b35bc 2233 }
16e03018 2234
f564412c 2235 add_timer(&mp->mib_counters_timer);
fc32b0e2 2236 port_start(mp);
16e03018 2237
37a6084f 2238 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
e0ca8410 2239 wrlp(mp, INT_MASK, mp->int_mask);
16e03018 2240
c9df406f
LB
2241 return 0;
2242
13d64285 2243
fc32b0e2 2244out_free:
f7981c1c
LB
2245 for (i = 0; i < mp->rxq_count; i++)
2246 rxq_deinit(mp->rxq + i);
fc32b0e2 2247out:
c9df406f
LB
2248 free_irq(dev->irq, dev);
2249
2250 return err;
16e03018
DF
2251}
2252
e5371493 2253static void port_reset(struct mv643xx_eth_private *mp)
1da177e4 2254{
fc32b0e2 2255 unsigned int data;
64da80a2 2256 int i;
1da177e4 2257
f7981c1c
LB
2258 for (i = 0; i < mp->rxq_count; i++)
2259 rxq_disable(mp->rxq + i);
2260 for (i = 0; i < mp->txq_count; i++)
2261 txq_disable(mp->txq + i);
ae9ae064
LB
2262
2263 while (1) {
37a6084f 2264 u32 ps = rdlp(mp, PORT_STATUS);
ae9ae064
LB
2265
2266 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2267 break;
13d64285 2268 udelay(10);
ae9ae064 2269 }
1da177e4 2270
c9df406f 2271 /* Reset the Enable bit in the Configuration Register */
37a6084f 2272 data = rdlp(mp, PORT_SERIAL_CONTROL);
fc32b0e2
LB
2273 data &= ~(SERIAL_PORT_ENABLE |
2274 DO_NOT_FORCE_LINK_FAIL |
2275 FORCE_LINK_PASS);
37a6084f 2276 wrlp(mp, PORT_SERIAL_CONTROL, data);
1da177e4
LT
2277}
2278
c9df406f 2279static int mv643xx_eth_stop(struct net_device *dev)
1da177e4 2280{
e5371493 2281 struct mv643xx_eth_private *mp = netdev_priv(dev);
64da80a2 2282 int i;
1da177e4 2283
fe65e704 2284 wrlp(mp, INT_MASK_EXT, 0x00000000);
37a6084f
LB
2285 wrlp(mp, INT_MASK, 0x00000000);
2286 rdlp(mp, INT_MASK);
1da177e4 2287
c9df406f 2288 napi_disable(&mp->napi);
78fff83b 2289
2257e05c
LB
2290 del_timer_sync(&mp->rx_oom);
2291
c9df406f 2292 netif_carrier_off(dev);
58911151
SH
2293 if (mp->phy)
2294 phy_stop(mp->phy);
fc32b0e2
LB
2295 free_irq(dev->irq, dev);
2296
cc9754b3 2297 port_reset(mp);
8fd89211 2298 mv643xx_eth_get_stats(dev);
fc32b0e2 2299 mib_counters_update(mp);
57e8f26a 2300 del_timer_sync(&mp->mib_counters_timer);
1da177e4 2301
f7981c1c
LB
2302 for (i = 0; i < mp->rxq_count; i++)
2303 rxq_deinit(mp->rxq + i);
2304 for (i = 0; i < mp->txq_count; i++)
2305 txq_deinit(mp->txq + i);
1da177e4 2306
c9df406f 2307 return 0;
1da177e4
LT
2308}
2309
fc32b0e2 2310static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1da177e4 2311{
e5371493 2312 struct mv643xx_eth_private *mp = netdev_priv(dev);
260055bb 2313 int ret;
1da177e4 2314
260055bb
PS
2315 if (mp->phy == NULL)
2316 return -ENOTSUPP;
bedfe324 2317
260055bb
PS
2318 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2319 if (!ret)
2320 mv643xx_adjust_pscr(mp);
2321 return ret;
1da177e4
LT
2322}
2323
c9df406f 2324static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 2325{
89df5fdc
LB
2326 struct mv643xx_eth_private *mp = netdev_priv(dev);
2327
fc32b0e2 2328 if (new_mtu < 64 || new_mtu > 9500)
c9df406f 2329 return -EINVAL;
1da177e4 2330
c9df406f 2331 dev->mtu = new_mtu;
2bcb4b0f 2332 mv643xx_eth_recalc_skb_size(mp);
89df5fdc
LB
2333 tx_set_rate(mp, 1000000000, 16777216);
2334
c9df406f
LB
2335 if (!netif_running(dev))
2336 return 0;
1da177e4 2337
c9df406f
LB
2338 /*
2339 * Stop and then re-open the interface. This will allocate RX
2340 * skbs of the new MTU.
2341 * There is a possible danger that the open will not succeed,
fc32b0e2 2342 * due to memory being full.
c9df406f
LB
2343 */
2344 mv643xx_eth_stop(dev);
2345 if (mv643xx_eth_open(dev)) {
7542db8b
JP
2346 netdev_err(dev,
2347 "fatal error on re-opening device after MTU change\n");
c9df406f
LB
2348 }
2349
2350 return 0;
1da177e4
LT
2351}
2352
fc32b0e2 2353static void tx_timeout_task(struct work_struct *ugly)
1da177e4 2354{
fc32b0e2 2355 struct mv643xx_eth_private *mp;
1da177e4 2356
fc32b0e2
LB
2357 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2358 if (netif_running(mp->dev)) {
e5ef1de1 2359 netif_tx_stop_all_queues(mp->dev);
fc32b0e2
LB
2360 port_reset(mp);
2361 port_start(mp);
e5ef1de1 2362 netif_tx_wake_all_queues(mp->dev);
fc32b0e2 2363 }
c9df406f
LB
2364}
2365
c9df406f 2366static void mv643xx_eth_tx_timeout(struct net_device *dev)
1da177e4 2367{
e5371493 2368 struct mv643xx_eth_private *mp = netdev_priv(dev);
1da177e4 2369
7542db8b 2370 netdev_info(dev, "tx timeout\n");
d0412d96 2371
c9df406f 2372 schedule_work(&mp->tx_timeout_task);
1da177e4
LT
2373}
2374
c9df406f 2375#ifdef CONFIG_NET_POLL_CONTROLLER
fc32b0e2 2376static void mv643xx_eth_netpoll(struct net_device *dev)
9f8dd319 2377{
fc32b0e2 2378 struct mv643xx_eth_private *mp = netdev_priv(dev);
c9df406f 2379
37a6084f
LB
2380 wrlp(mp, INT_MASK, 0x00000000);
2381 rdlp(mp, INT_MASK);
c9df406f 2382
fc32b0e2 2383 mv643xx_eth_irq(dev->irq, dev);
c9df406f 2384
e0ca8410 2385 wrlp(mp, INT_MASK, mp->int_mask);
9f8dd319 2386}
c9df406f 2387#endif
9f8dd319 2388
9f8dd319 2389
c9df406f 2390/* platform glue ************************************************************/
e5371493
LB
2391static void
2392mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
63a9332b 2393 const struct mbus_dram_target_info *dram)
c9df406f 2394{
cc9754b3 2395 void __iomem *base = msp->base;
c9df406f
LB
2396 u32 win_enable;
2397 u32 win_protect;
2398 int i;
9f8dd319 2399
c9df406f
LB
2400 for (i = 0; i < 6; i++) {
2401 writel(0, base + WINDOW_BASE(i));
2402 writel(0, base + WINDOW_SIZE(i));
2403 if (i < 4)
2404 writel(0, base + WINDOW_REMAP_HIGH(i));
9f8dd319
DF
2405 }
2406
c9df406f
LB
2407 win_enable = 0x3f;
2408 win_protect = 0;
2409
2410 for (i = 0; i < dram->num_cs; i++) {
63a9332b 2411 const struct mbus_dram_window *cs = dram->cs + i;
c9df406f
LB
2412
2413 writel((cs->base & 0xffff0000) |
2414 (cs->mbus_attr << 8) |
2415 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
2416 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
2417
2418 win_enable &= ~(1 << i);
2419 win_protect |= 3 << (2 * i);
2420 }
2421
2422 writel(win_enable, base + WINDOW_BAR_ENABLE);
2423 msp->win_protect = win_protect;
9f8dd319
DF
2424}
2425
773fc3ee
LB
2426static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2427{
2428 /*
2429 * Check whether we have a 14-bit coal limit field in bits
2430 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2431 * SDMA config register.
2432 */
37a6084f
LB
2433 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2434 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
773fc3ee
LB
2435 msp->extended_rx_coal_limit = 1;
2436 else
2437 msp->extended_rx_coal_limit = 0;
1e881592
LB
2438
2439 /*
457b1d5a
LB
2440 * Check whether the MAC supports TX rate control, and if
2441 * yes, whether its associated registers are in the old or
2442 * the new place.
1e881592 2443 */
37a6084f
LB
2444 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2445 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
457b1d5a
LB
2446 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2447 } else {
37a6084f
LB
2448 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2449 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
457b1d5a
LB
2450 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2451 else
2452 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2453 }
773fc3ee
LB
2454}
2455
76723bca
SH
2456#if defined(CONFIG_OF)
2457static const struct of_device_id mv643xx_eth_shared_ids[] = {
2458 { .compatible = "marvell,orion-eth", },
2459 { .compatible = "marvell,kirkwood-eth", },
2460 { }
2461};
2462MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
2463#endif
2464
2465#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
2466#define mv643xx_eth_property(_np, _name, _v) \
2467 do { \
2468 u32 tmp; \
2469 if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
2470 _v = tmp; \
2471 } while (0)
2472
2473static struct platform_device *port_platdev[3];
2474
2475static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2476 struct device_node *pnp)
2477{
2478 struct platform_device *ppdev;
2479 struct mv643xx_eth_platform_data ppd;
2480 struct resource res;
2481 const char *mac_addr;
2482 int ret;
785bf6f7 2483 int dev_num = 0;
76723bca
SH
2484
2485 memset(&ppd, 0, sizeof(ppd));
2486 ppd.shared = pdev;
2487
2488 memset(&res, 0, sizeof(res));
2489 if (!of_irq_to_resource(pnp, 0, &res)) {
2490 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
2491 return -EINVAL;
2492 }
2493
2494 if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
2495 dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
2496 return -EINVAL;
2497 }
2498
2499 if (ppd.port_number >= 3) {
2500 dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
2501 return -EINVAL;
2502 }
2503
785bf6f7
JG
2504 while (dev_num < 3 && port_platdev[dev_num])
2505 dev_num++;
2506
2507 if (dev_num == 3) {
2508 dev_err(&pdev->dev, "too many ports registered\n");
2509 return -EINVAL;
2510 }
2511
76723bca
SH
2512 mac_addr = of_get_mac_address(pnp);
2513 if (mac_addr)
d458cdf7 2514 memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
76723bca
SH
2515
2516 mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
2517 mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
2518 mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
2519 mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
2520 mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
2521 mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
2522
2523 ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
2524 if (!ppd.phy_node) {
2525 ppd.phy_addr = MV643XX_ETH_PHY_NONE;
2526 of_property_read_u32(pnp, "speed", &ppd.speed);
2527 of_property_read_u32(pnp, "duplex", &ppd.duplex);
2528 }
2529
785bf6f7 2530 ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
76723bca
SH
2531 if (!ppdev)
2532 return -ENOMEM;
2533 ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
b5d82db8 2534 ppdev->dev.of_node = pnp;
76723bca
SH
2535
2536 ret = platform_device_add_resources(ppdev, &res, 1);
2537 if (ret)
2538 goto port_err;
2539
2540 ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
2541 if (ret)
2542 goto port_err;
2543
2544 ret = platform_device_add(ppdev);
2545 if (ret)
2546 goto port_err;
2547
785bf6f7 2548 port_platdev[dev_num] = ppdev;
76723bca
SH
2549
2550 return 0;
2551
2552port_err:
2553 platform_device_put(ppdev);
2554 return ret;
2555}
2556
2557static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2558{
2559 struct mv643xx_eth_shared_platform_data *pd;
2560 struct device_node *pnp, *np = pdev->dev.of_node;
2561 int ret;
2562
2563 /* bail out if not registered from DT */
2564 if (!np)
2565 return 0;
2566
2567 pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
2568 if (!pd)
2569 return -ENOMEM;
2570 pdev->dev.platform_data = pd;
2571
2572 mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
2573
2574 for_each_available_child_of_node(np, pnp) {
2575 ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
2576 if (ret)
2577 return ret;
2578 }
2579 return 0;
2580}
2581
2582static void mv643xx_eth_shared_of_remove(void)
2583{
2584 int n;
2585
2586 for (n = 0; n < 3; n++) {
2587 platform_device_del(port_platdev[n]);
2588 port_platdev[n] = NULL;
2589 }
2590}
2591#else
ff20877a 2592static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
76723bca 2593{
ff20877a 2594 return 0;
76723bca
SH
2595}
2596
ff20877a
AB
2597static inline void mv643xx_eth_shared_of_remove(void)
2598{
2599}
76723bca
SH
2600#endif
2601
c9df406f 2602static int mv643xx_eth_shared_probe(struct platform_device *pdev)
9f8dd319 2603{
10a9948d 2604 static int mv643xx_eth_version_printed;
76723bca 2605 struct mv643xx_eth_shared_platform_data *pd;
e5371493 2606 struct mv643xx_eth_shared_private *msp;
63a9332b 2607 const struct mbus_dram_target_info *dram;
c9df406f 2608 struct resource *res;
76723bca 2609 int ret;
9f8dd319 2610
e5371493 2611 if (!mv643xx_eth_version_printed++)
7542db8b
JP
2612 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2613 mv643xx_eth_driver_version);
9f8dd319 2614
c9df406f
LB
2615 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2616 if (res == NULL)
727f957a 2617 return -EINVAL;
9f8dd319 2618
727f957a 2619 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
c9df406f 2620 if (msp == NULL)
727f957a 2621 return -ENOMEM;
76723bca 2622 platform_set_drvdata(pdev, msp);
c9df406f 2623
65a6f969 2624 msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
cc9754b3 2625 if (msp->base == NULL)
727f957a 2626 return -ENOMEM;
c9df406f 2627
20922486
SH
2628 msp->clk = devm_clk_get(&pdev->dev, NULL);
2629 if (!IS_ERR(msp->clk))
2630 clk_prepare_enable(msp->clk);
2631
c9df406f
LB
2632 /*
2633 * (Re-)program MBUS remapping windows if we are asked to.
2634 */
63a9332b
AL
2635 dram = mv_mbus_dram_info();
2636 if (dram)
2637 mv643xx_eth_conf_mbus_windows(msp, dram);
c9df406f 2638
76723bca
SH
2639 ret = mv643xx_eth_shared_of_probe(pdev);
2640 if (ret)
2641 return ret;
bbfa6d0a 2642 pd = dev_get_platdata(&pdev->dev);
76723bca 2643
50a749c1
DC
2644 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2645 pd->tx_csum_limit : 9 * 1024;
773fc3ee 2646 infer_hw_params(msp);
fc32b0e2 2647
c9df406f 2648 return 0;
c9df406f
LB
2649}
2650
2651static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2652{
e5371493 2653 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
c9df406f 2654
76723bca 2655 mv643xx_eth_shared_of_remove();
20922486
SH
2656 if (!IS_ERR(msp->clk))
2657 clk_disable_unprepare(msp->clk);
c9df406f 2658 return 0;
9f8dd319
DF
2659}
2660
c9df406f 2661static struct platform_driver mv643xx_eth_shared_driver = {
fc32b0e2
LB
2662 .probe = mv643xx_eth_shared_probe,
2663 .remove = mv643xx_eth_shared_remove,
c9df406f 2664 .driver = {
fc32b0e2 2665 .name = MV643XX_ETH_SHARED_NAME,
c9df406f 2666 .owner = THIS_MODULE,
76723bca 2667 .of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
c9df406f
LB
2668 },
2669};
2670
e5371493 2671static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
1da177e4 2672{
c9df406f 2673 int addr_shift = 5 * mp->port_num;
fc32b0e2 2674 u32 data;
1da177e4 2675
fc32b0e2
LB
2676 data = rdl(mp, PHY_ADDR);
2677 data &= ~(0x1f << addr_shift);
2678 data |= (phy_addr & 0x1f) << addr_shift;
2679 wrl(mp, PHY_ADDR, data);
1da177e4
LT
2680}
2681
e5371493 2682static int phy_addr_get(struct mv643xx_eth_private *mp)
1da177e4 2683{
fc32b0e2
LB
2684 unsigned int data;
2685
2686 data = rdl(mp, PHY_ADDR);
2687
2688 return (data >> (5 * mp->port_num)) & 0x1f;
2689}
2690
2691static void set_params(struct mv643xx_eth_private *mp,
2692 struct mv643xx_eth_platform_data *pd)
2693{
2694 struct net_device *dev = mp->dev;
2695
2696 if (is_valid_ether_addr(pd->mac_addr))
d458cdf7 2697 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
fc32b0e2
LB
2698 else
2699 uc_addr_get(mp, dev->dev_addr);
2700
e7d2f4db 2701 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
fc32b0e2 2702 if (pd->rx_queue_size)
e7d2f4db 2703 mp->rx_ring_size = pd->rx_queue_size;
fc32b0e2
LB
2704 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2705 mp->rx_desc_sram_size = pd->rx_sram_size;
1da177e4 2706
f7981c1c 2707 mp->rxq_count = pd->rx_queue_count ? : 1;
64da80a2 2708
e7d2f4db 2709 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
fc32b0e2 2710 if (pd->tx_queue_size)
e7d2f4db 2711 mp->tx_ring_size = pd->tx_queue_size;
fc32b0e2
LB
2712 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2713 mp->tx_desc_sram_size = pd->tx_sram_size;
3d6b35bc 2714
f7981c1c 2715 mp->txq_count = pd->tx_queue_count ? : 1;
1da177e4
LT
2716}
2717
c3a07134
FF
2718static void mv643xx_eth_adjust_link(struct net_device *dev)
2719{
2720 struct mv643xx_eth_private *mp = netdev_priv(dev);
2721
2722 mv643xx_adjust_pscr(mp);
2723}
2724
ed94493f
LB
2725static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2726 int phy_addr)
1da177e4 2727{
ed94493f
LB
2728 struct phy_device *phydev;
2729 int start;
2730 int num;
2731 int i;
c3a07134 2732 char phy_id[MII_BUS_ID_SIZE + 3];
45c5d3bc 2733
ed94493f
LB
2734 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2735 start = phy_addr_get(mp) & 0x1f;
2736 num = 32;
2737 } else {
2738 start = phy_addr & 0x1f;
2739 num = 1;
2740 }
45c5d3bc 2741
c3a07134 2742 /* Attempt to connect to the PHY using orion-mdio */
976c90b9 2743 phydev = ERR_PTR(-ENODEV);
ed94493f
LB
2744 for (i = 0; i < num; i++) {
2745 int addr = (start + i) & 0x1f;
fc32b0e2 2746
c3a07134
FF
2747 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2748 "orion-mdio-mii", addr);
1da177e4 2749
c3a07134
FF
2750 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2751 PHY_INTERFACE_MODE_GMII);
2752 if (!IS_ERR(phydev)) {
2753 phy_addr_set(mp, addr);
2754 break;
ed94493f
LB
2755 }
2756 }
1da177e4 2757
ed94493f 2758 return phydev;
1da177e4
LT
2759}
2760
ed94493f 2761static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
c28a4f89 2762{
ed94493f 2763 struct phy_device *phy = mp->phy;
c28a4f89 2764
ed94493f
LB
2765 if (speed == 0) {
2766 phy->autoneg = AUTONEG_ENABLE;
2767 phy->speed = 0;
2768 phy->duplex = 0;
2769 phy->advertising = phy->supported | ADVERTISED_Autoneg;
c9df406f 2770 } else {
ed94493f
LB
2771 phy->autoneg = AUTONEG_DISABLE;
2772 phy->advertising = 0;
2773 phy->speed = speed;
2774 phy->duplex = duplex;
c9df406f 2775 }
ed94493f 2776 phy_start_aneg(phy);
c28a4f89
JC
2777}
2778
81600eea
LB
2779static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2780{
2781 u32 pscr;
2782
37a6084f 2783 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
81600eea
LB
2784 if (pscr & SERIAL_PORT_ENABLE) {
2785 pscr &= ~SERIAL_PORT_ENABLE;
37a6084f 2786 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
2787 }
2788
2789 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
ed94493f 2790 if (mp->phy == NULL) {
81600eea
LB
2791 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2792 if (speed == SPEED_1000)
2793 pscr |= SET_GMII_SPEED_TO_1000;
2794 else if (speed == SPEED_100)
2795 pscr |= SET_MII_SPEED_TO_100;
2796
2797 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
2798
2799 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
2800 if (duplex == DUPLEX_FULL)
2801 pscr |= SET_FULL_DUPLEX_MODE;
2802 }
2803
37a6084f 2804 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
81600eea
LB
2805}
2806
ea8a8642
LB
2807static const struct net_device_ops mv643xx_eth_netdev_ops = {
2808 .ndo_open = mv643xx_eth_open,
2809 .ndo_stop = mv643xx_eth_stop,
2810 .ndo_start_xmit = mv643xx_eth_xmit,
2811 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
2812 .ndo_set_mac_address = mv643xx_eth_set_mac_address,
1d4bd947 2813 .ndo_validate_addr = eth_validate_addr,
ea8a8642
LB
2814 .ndo_do_ioctl = mv643xx_eth_ioctl,
2815 .ndo_change_mtu = mv643xx_eth_change_mtu,
aad59c43 2816 .ndo_set_features = mv643xx_eth_set_features,
ea8a8642
LB
2817 .ndo_tx_timeout = mv643xx_eth_tx_timeout,
2818 .ndo_get_stats = mv643xx_eth_get_stats,
2819#ifdef CONFIG_NET_POLL_CONTROLLER
2820 .ndo_poll_controller = mv643xx_eth_netpoll,
2821#endif
2822};
2823
c9df406f 2824static int mv643xx_eth_probe(struct platform_device *pdev)
1da177e4 2825{
c9df406f 2826 struct mv643xx_eth_platform_data *pd;
e5371493 2827 struct mv643xx_eth_private *mp;
c9df406f 2828 struct net_device *dev;
c9df406f 2829 struct resource *res;
fc32b0e2 2830 int err;
1da177e4 2831
bbfa6d0a 2832 pd = dev_get_platdata(&pdev->dev);
c9df406f 2833 if (pd == NULL) {
7542db8b 2834 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
c9df406f
LB
2835 return -ENODEV;
2836 }
1da177e4 2837
c9df406f 2838 if (pd->shared == NULL) {
7542db8b 2839 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
c9df406f
LB
2840 return -ENODEV;
2841 }
8f518703 2842
e5ef1de1 2843 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
c9df406f
LB
2844 if (!dev)
2845 return -ENOMEM;
1da177e4 2846
c9df406f 2847 mp = netdev_priv(dev);
fc32b0e2
LB
2848 platform_set_drvdata(pdev, mp);
2849
2850 mp->shared = platform_get_drvdata(pd->shared);
37a6084f 2851 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
fc32b0e2
LB
2852 mp->port_num = pd->port_number;
2853
c9df406f 2854 mp->dev = dev;
78fff83b 2855
cb85215f
SH
2856 /* Kirkwood resets some registers on gated clocks. Especially
2857 * CLK125_BYPASS_EN must be cleared but is not available on
2858 * all other SoCs/System Controllers using this driver.
2859 */
2860 if (of_device_is_compatible(pdev->dev.of_node,
2861 "marvell,kirkwood-eth-port"))
2862 wrlp(mp, PORT_SERIAL_CONTROL1,
2863 rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
2864
452503eb 2865 /*
9a43a026
AL
2866 * Start with a default rate, and if there is a clock, allow
2867 * it to override the default.
452503eb 2868 */
9a43a026 2869 mp->t_clk = 133000000;
20922486 2870 mp->clk = devm_clk_get(&pdev->dev, NULL);
452503eb
AL
2871 if (!IS_ERR(mp->clk)) {
2872 clk_prepare_enable(mp->clk);
2873 mp->t_clk = clk_get_rate(mp->clk);
76723bca
SH
2874 } else if (!IS_ERR(mp->shared->clk)) {
2875 mp->t_clk = clk_get_rate(mp->shared->clk);
452503eb 2876 }
20922486 2877
fc32b0e2 2878 set_params(mp, pd);
206d6b32
BH
2879 netif_set_real_num_tx_queues(dev, mp->txq_count);
2880 netif_set_real_num_rx_queues(dev, mp->rxq_count);
fc32b0e2 2881
cc9d4598
SH
2882 err = 0;
2883 if (pd->phy_node) {
2884 mp->phy = of_phy_connect(mp->dev, pd->phy_node,
2885 mv643xx_eth_adjust_link, 0,
2886 PHY_INTERFACE_MODE_GMII);
2887 if (!mp->phy)
2888 err = -ENODEV;
6115c11f
DC
2889 else
2890 phy_addr_set(mp, mp->phy->addr);
cc9d4598 2891 } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
ed94493f 2892 mp->phy = phy_scan(mp, pd->phy_addr);
bedfe324 2893
cc9d4598 2894 if (IS_ERR(mp->phy))
976c90b9 2895 err = PTR_ERR(mp->phy);
cc9d4598
SH
2896 else
2897 phy_init(mp, pd->speed, pd->duplex);
976c90b9 2898 }
cc9d4598
SH
2899 if (err == -ENODEV) {
2900 err = -EPROBE_DEFER;
2901 goto out;
2902 }
2903 if (err)
2904 goto out;
6bdf576e 2905
7ad24ea4 2906 dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
ed94493f 2907
81600eea 2908 init_pscr(mp, pd->speed, pd->duplex);
fc32b0e2 2909
4ff3495a
LB
2910
2911 mib_counters_clear(mp);
2912
2913 init_timer(&mp->mib_counters_timer);
2914 mp->mib_counters_timer.data = (unsigned long)mp;
2915 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2916 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
4ff3495a
LB
2917
2918 spin_lock_init(&mp->mib_counters_lock);
2919
2920 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2921
a3659aa0 2922 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
2257e05c
LB
2923
2924 init_timer(&mp->rx_oom);
2925 mp->rx_oom.data = (unsigned long)mp;
2926 mp->rx_oom.function = oom_timer_wrapper;
2927
fc32b0e2 2928
c9df406f
LB
2929 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2930 BUG_ON(!res);
2931 dev->irq = res->start;
1da177e4 2932
ea8a8642
LB
2933 dev->netdev_ops = &mv643xx_eth_netdev_ops;
2934
c9df406f
LB
2935 dev->watchdog_timeo = 2 * HZ;
2936 dev->base_addr = 0;
1da177e4 2937
4d48d589
EG
2938 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2939 dev->vlan_features = dev->features;
2940
2941 dev->features |= NETIF_F_RXCSUM;
2942 dev->hw_features = dev->features;
1da177e4 2943
01789349
JP
2944 dev->priv_flags |= IFF_UNICAST_FLT;
2945
fc32b0e2 2946 SET_NETDEV_DEV(dev, &pdev->dev);
8f518703 2947
c9df406f 2948 if (mp->shared->win_protect)
fc32b0e2 2949 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
1da177e4 2950
a5fe3616
LB
2951 netif_carrier_off(dev);
2952
b5e86db4
LB
2953 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2954
4fb0a54a 2955 set_rx_coal(mp, 250);
a5fe3616
LB
2956 set_tx_coal(mp, 0);
2957
c9df406f
LB
2958 err = register_netdev(dev);
2959 if (err)
2960 goto out;
1da177e4 2961
7542db8b
JP
2962 netdev_notice(dev, "port %d with MAC address %pM\n",
2963 mp->port_num, dev->dev_addr);
1da177e4 2964
13d64285 2965 if (mp->tx_desc_sram_size > 0)
7542db8b 2966 netdev_notice(dev, "configured with sram\n");
1da177e4 2967
c9df406f 2968 return 0;
1da177e4 2969
c9df406f 2970out:
20922486 2971 if (!IS_ERR(mp->clk))
baffab28 2972 clk_disable_unprepare(mp->clk);
c9df406f 2973 free_netdev(dev);
1da177e4 2974
c9df406f 2975 return err;
1da177e4
LT
2976}
2977
c9df406f 2978static int mv643xx_eth_remove(struct platform_device *pdev)
1da177e4 2979{
fc32b0e2 2980 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
1da177e4 2981
fc32b0e2 2982 unregister_netdev(mp->dev);
ed94493f 2983 if (mp->phy != NULL)
cec753f5 2984 phy_disconnect(mp->phy);
23f333a2 2985 cancel_work_sync(&mp->tx_timeout_task);
452503eb 2986
20922486 2987 if (!IS_ERR(mp->clk))
452503eb 2988 clk_disable_unprepare(mp->clk);
9a43a026 2989
fc32b0e2 2990 free_netdev(mp->dev);
c9df406f 2991
c9df406f 2992 return 0;
1da177e4
LT
2993}
2994
c9df406f 2995static void mv643xx_eth_shutdown(struct platform_device *pdev)
d0412d96 2996{
fc32b0e2 2997 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
d0412d96 2998
c9df406f 2999 /* Mask all interrupts on ethernet port */
37a6084f
LB
3000 wrlp(mp, INT_MASK, 0);
3001 rdlp(mp, INT_MASK);
c9df406f 3002
fc32b0e2
LB
3003 if (netif_running(mp->dev))
3004 port_reset(mp);
d0412d96
JC
3005}
3006
c9df406f 3007static struct platform_driver mv643xx_eth_driver = {
fc32b0e2
LB
3008 .probe = mv643xx_eth_probe,
3009 .remove = mv643xx_eth_remove,
3010 .shutdown = mv643xx_eth_shutdown,
c9df406f 3011 .driver = {
fc32b0e2 3012 .name = MV643XX_ETH_NAME,
c9df406f
LB
3013 .owner = THIS_MODULE,
3014 },
3015};
3016
e5371493 3017static int __init mv643xx_eth_init_module(void)
d0412d96 3018{
c9df406f 3019 int rc;
d0412d96 3020
c9df406f
LB
3021 rc = platform_driver_register(&mv643xx_eth_shared_driver);
3022 if (!rc) {
3023 rc = platform_driver_register(&mv643xx_eth_driver);
3024 if (rc)
3025 platform_driver_unregister(&mv643xx_eth_shared_driver);
3026 }
fc32b0e2 3027
c9df406f 3028 return rc;
d0412d96 3029}
fc32b0e2 3030module_init(mv643xx_eth_init_module);
d0412d96 3031
e5371493 3032static void __exit mv643xx_eth_cleanup_module(void)
d0412d96 3033{
c9df406f
LB
3034 platform_driver_unregister(&mv643xx_eth_driver);
3035 platform_driver_unregister(&mv643xx_eth_shared_driver);
d0412d96 3036}
e5371493 3037module_exit(mv643xx_eth_cleanup_module);
1da177e4 3038
45675bc6
LB
3039MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
3040 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
c9df406f 3041MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
fc32b0e2 3042MODULE_LICENSE("GPL");
c9df406f 3043MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
fc32b0e2 3044MODULE_ALIAS("platform:" MV643XX_ETH_NAME);