2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2009-2012 Cavium, Inc
9 #include <linux/platform_device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/capability.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/interrupt.h>
15 #include <linux/netdevice.h>
16 #include <linux/spinlock.h>
17 #include <linux/if_vlan.h>
18 #include <linux/of_mdio.h>
19 #include <linux/module.h>
20 #include <linux/of_net.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <linux/phy.h>
26 #include <asm/octeon/octeon.h>
27 #include <asm/octeon/cvmx-mixx-defs.h>
28 #include <asm/octeon/cvmx-agl-defs.h>
30 #define DRV_NAME "octeon_mgmt"
31 #define DRV_VERSION "2.0"
32 #define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
35 #define OCTEON_MGMT_NAPI_WEIGHT 16
37 /* Ring sizes that are powers of two allow for more efficient modulo
40 #define OCTEON_MGMT_RX_RING_SIZE 512
41 #define OCTEON_MGMT_TX_RING_SIZE 128
43 /* Allow 8 bytes for vlan and FCS. */
44 #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
46 union mgmt_port_ring_entry
{
50 /* Length of the buffer/packet in bytes */
52 /* For TX, signals that the packet should be timestamped */
54 /* The RX error code */
56 #define RING_ENTRY_CODE_DONE 0xf
57 #define RING_ENTRY_CODE_MORE 0x10
58 /* Physical address of the buffer */
63 #define MIX_ORING1 0x0
64 #define MIX_ORING2 0x8
65 #define MIX_IRING1 0x10
66 #define MIX_IRING2 0x18
68 #define MIX_IRHWM 0x28
69 #define MIX_IRCNT 0x30
70 #define MIX_ORHWM 0x38
71 #define MIX_ORCNT 0x40
73 #define MIX_INTENA 0x50
74 #define MIX_REMCNT 0x58
77 #define AGL_GMX_PRT_CFG 0x10
78 #define AGL_GMX_RX_FRM_CTL 0x18
79 #define AGL_GMX_RX_FRM_MAX 0x30
80 #define AGL_GMX_RX_JABBER 0x38
81 #define AGL_GMX_RX_STATS_CTL 0x50
83 #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
84 #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
85 #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
87 #define AGL_GMX_RX_ADR_CTL 0x100
88 #define AGL_GMX_RX_ADR_CAM_EN 0x108
89 #define AGL_GMX_RX_ADR_CAM0 0x180
90 #define AGL_GMX_RX_ADR_CAM1 0x188
91 #define AGL_GMX_RX_ADR_CAM2 0x190
92 #define AGL_GMX_RX_ADR_CAM3 0x198
93 #define AGL_GMX_RX_ADR_CAM4 0x1a0
94 #define AGL_GMX_RX_ADR_CAM5 0x1a8
96 #define AGL_GMX_TX_CLK 0x208
97 #define AGL_GMX_TX_STATS_CTL 0x268
98 #define AGL_GMX_TX_CTL 0x270
99 #define AGL_GMX_TX_STAT0 0x280
100 #define AGL_GMX_TX_STAT1 0x288
101 #define AGL_GMX_TX_STAT2 0x290
102 #define AGL_GMX_TX_STAT3 0x298
103 #define AGL_GMX_TX_STAT4 0x2a0
104 #define AGL_GMX_TX_STAT5 0x2a8
105 #define AGL_GMX_TX_STAT6 0x2b0
106 #define AGL_GMX_TX_STAT7 0x2b8
107 #define AGL_GMX_TX_STAT8 0x2c0
108 #define AGL_GMX_TX_STAT9 0x2c8
111 struct net_device
*netdev
;
119 dma_addr_t tx_ring_handle
;
120 unsigned int tx_next
;
121 unsigned int tx_next_clean
;
122 unsigned int tx_current_fill
;
123 /* The tx_list lock also protects the ring related variables */
124 struct sk_buff_head tx_list
;
126 /* RX variables only touched in napi_poll. No locking necessary. */
128 dma_addr_t rx_ring_handle
;
129 unsigned int rx_next
;
130 unsigned int rx_next_fill
;
131 unsigned int rx_current_fill
;
132 struct sk_buff_head rx_list
;
135 unsigned int last_duplex
;
136 unsigned int last_link
;
137 unsigned int last_speed
;
139 struct napi_struct napi
;
140 struct tasklet_struct tx_clean_tasklet
;
141 struct phy_device
*phydev
;
142 struct device_node
*phy_np
;
143 resource_size_t mix_phys
;
144 resource_size_t mix_size
;
145 resource_size_t agl_phys
;
146 resource_size_t agl_size
;
147 resource_size_t agl_prt_ctl_phys
;
148 resource_size_t agl_prt_ctl_size
;
151 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt
*p
, int enable
)
153 union cvmx_mixx_intena mix_intena
;
156 spin_lock_irqsave(&p
->lock
, flags
);
157 mix_intena
.u64
= cvmx_read_csr(p
->mix
+ MIX_INTENA
);
158 mix_intena
.s
.ithena
= enable
? 1 : 0;
159 cvmx_write_csr(p
->mix
+ MIX_INTENA
, mix_intena
.u64
);
160 spin_unlock_irqrestore(&p
->lock
, flags
);
163 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt
*p
, int enable
)
165 union cvmx_mixx_intena mix_intena
;
168 spin_lock_irqsave(&p
->lock
, flags
);
169 mix_intena
.u64
= cvmx_read_csr(p
->mix
+ MIX_INTENA
);
170 mix_intena
.s
.othena
= enable
? 1 : 0;
171 cvmx_write_csr(p
->mix
+ MIX_INTENA
, mix_intena
.u64
);
172 spin_unlock_irqrestore(&p
->lock
, flags
);
175 static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt
*p
)
177 octeon_mgmt_set_rx_irq(p
, 1);
180 static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt
*p
)
182 octeon_mgmt_set_rx_irq(p
, 0);
185 static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt
*p
)
187 octeon_mgmt_set_tx_irq(p
, 1);
190 static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt
*p
)
192 octeon_mgmt_set_tx_irq(p
, 0);
195 static unsigned int ring_max_fill(unsigned int ring_size
)
197 return ring_size
- 8;
200 static unsigned int ring_size_to_bytes(unsigned int ring_size
)
202 return ring_size
* sizeof(union mgmt_port_ring_entry
);
205 static void octeon_mgmt_rx_fill_ring(struct net_device
*netdev
)
207 struct octeon_mgmt
*p
= netdev_priv(netdev
);
209 while (p
->rx_current_fill
< ring_max_fill(OCTEON_MGMT_RX_RING_SIZE
)) {
211 union mgmt_port_ring_entry re
;
214 /* CN56XX pass 1 needs 8 bytes of padding. */
215 size
= netdev
->mtu
+ OCTEON_MGMT_RX_HEADROOM
+ 8 + NET_IP_ALIGN
;
217 skb
= netdev_alloc_skb(netdev
, size
);
220 skb_reserve(skb
, NET_IP_ALIGN
);
221 __skb_queue_tail(&p
->rx_list
, skb
);
225 re
.s
.addr
= dma_map_single(p
->dev
, skb
->data
,
229 /* Put it in the ring. */
230 p
->rx_ring
[p
->rx_next_fill
] = re
.d64
;
231 dma_sync_single_for_device(p
->dev
, p
->rx_ring_handle
,
232 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE
),
235 (p
->rx_next_fill
+ 1) % OCTEON_MGMT_RX_RING_SIZE
;
236 p
->rx_current_fill
++;
238 cvmx_write_csr(p
->mix
+ MIX_IRING2
, 1);
242 static ktime_t
ptp_to_ktime(u64 ptptime
)
248 local_irq_save(flags
);
249 /* Fill the icache with the code */
251 /* Flush all pending operations */
253 /* Read the time and PTP clock as close together as
254 * possible. It is important that this sequence take the same
255 * amount of time to reduce jitter
257 ktimebase
= ktime_get_real();
258 ptpbase
= cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI
);
259 local_irq_restore(flags
);
261 return ktime_sub_ns(ktimebase
, ptpbase
- ptptime
);
264 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt
*p
)
266 union cvmx_mixx_orcnt mix_orcnt
;
267 union mgmt_port_ring_entry re
;
272 mix_orcnt
.u64
= cvmx_read_csr(p
->mix
+ MIX_ORCNT
);
273 while (mix_orcnt
.s
.orcnt
) {
274 spin_lock_irqsave(&p
->tx_list
.lock
, flags
);
276 mix_orcnt
.u64
= cvmx_read_csr(p
->mix
+ MIX_ORCNT
);
278 if (mix_orcnt
.s
.orcnt
== 0) {
279 spin_unlock_irqrestore(&p
->tx_list
.lock
, flags
);
283 dma_sync_single_for_cpu(p
->dev
, p
->tx_ring_handle
,
284 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE
),
287 re
.d64
= p
->tx_ring
[p
->tx_next_clean
];
289 (p
->tx_next_clean
+ 1) % OCTEON_MGMT_TX_RING_SIZE
;
290 skb
= __skb_dequeue(&p
->tx_list
);
293 mix_orcnt
.s
.orcnt
= 1;
295 /* Acknowledge to hardware that we have the buffer. */
296 cvmx_write_csr(p
->mix
+ MIX_ORCNT
, mix_orcnt
.u64
);
297 p
->tx_current_fill
--;
299 spin_unlock_irqrestore(&p
->tx_list
.lock
, flags
);
301 dma_unmap_single(p
->dev
, re
.s
.addr
, re
.s
.len
,
304 /* Read the hardware TX timestamp if one was recorded */
305 if (unlikely(re
.s
.tstamp
)) {
306 struct skb_shared_hwtstamps ts
;
307 /* Read the timestamp */
308 u64 ns
= cvmx_read_csr(CVMX_MIXX_TSTAMP(p
->port
));
309 /* Remove the timestamp from the FIFO */
310 cvmx_write_csr(CVMX_MIXX_TSCTL(p
->port
), 0);
311 /* Tell the kernel about the timestamp */
312 ts
.syststamp
= ptp_to_ktime(ns
);
313 ts
.hwtstamp
= ns_to_ktime(ns
);
314 skb_tstamp_tx(skb
, &ts
);
317 dev_kfree_skb_any(skb
);
320 mix_orcnt
.u64
= cvmx_read_csr(p
->mix
+ MIX_ORCNT
);
323 if (cleaned
&& netif_queue_stopped(p
->netdev
))
324 netif_wake_queue(p
->netdev
);
327 static void octeon_mgmt_clean_tx_tasklet(unsigned long arg
)
329 struct octeon_mgmt
*p
= (struct octeon_mgmt
*)arg
;
330 octeon_mgmt_clean_tx_buffers(p
);
331 octeon_mgmt_enable_tx_irq(p
);
334 static void octeon_mgmt_update_rx_stats(struct net_device
*netdev
)
336 struct octeon_mgmt
*p
= netdev_priv(netdev
);
340 /* These reads also clear the count registers. */
341 drop
= cvmx_read_csr(p
->agl
+ AGL_GMX_RX_STATS_PKTS_DRP
);
342 bad
= cvmx_read_csr(p
->agl
+ AGL_GMX_RX_STATS_PKTS_BAD
);
345 /* Do an atomic update. */
346 spin_lock_irqsave(&p
->lock
, flags
);
347 netdev
->stats
.rx_errors
+= bad
;
348 netdev
->stats
.rx_dropped
+= drop
;
349 spin_unlock_irqrestore(&p
->lock
, flags
);
353 static void octeon_mgmt_update_tx_stats(struct net_device
*netdev
)
355 struct octeon_mgmt
*p
= netdev_priv(netdev
);
358 union cvmx_agl_gmx_txx_stat0 s0
;
359 union cvmx_agl_gmx_txx_stat1 s1
;
361 /* These reads also clear the count registers. */
362 s0
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_TX_STAT0
);
363 s1
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_TX_STAT1
);
365 if (s0
.s
.xsdef
|| s0
.s
.xscol
|| s1
.s
.scol
|| s1
.s
.mcol
) {
366 /* Do an atomic update. */
367 spin_lock_irqsave(&p
->lock
, flags
);
368 netdev
->stats
.tx_errors
+= s0
.s
.xsdef
+ s0
.s
.xscol
;
369 netdev
->stats
.collisions
+= s1
.s
.scol
+ s1
.s
.mcol
;
370 spin_unlock_irqrestore(&p
->lock
, flags
);
375 * Dequeue a receive skb and its corresponding ring entry. The ring
376 * entry is returned, *pskb is updated to point to the skb.
378 static u64
octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt
*p
,
379 struct sk_buff
**pskb
)
381 union mgmt_port_ring_entry re
;
383 dma_sync_single_for_cpu(p
->dev
, p
->rx_ring_handle
,
384 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE
),
387 re
.d64
= p
->rx_ring
[p
->rx_next
];
388 p
->rx_next
= (p
->rx_next
+ 1) % OCTEON_MGMT_RX_RING_SIZE
;
389 p
->rx_current_fill
--;
390 *pskb
= __skb_dequeue(&p
->rx_list
);
392 dma_unmap_single(p
->dev
, re
.s
.addr
,
393 ETH_FRAME_LEN
+ OCTEON_MGMT_RX_HEADROOM
,
400 static int octeon_mgmt_receive_one(struct octeon_mgmt
*p
)
402 struct net_device
*netdev
= p
->netdev
;
403 union cvmx_mixx_ircnt mix_ircnt
;
404 union mgmt_port_ring_entry re
;
406 struct sk_buff
*skb2
;
407 struct sk_buff
*skb_new
;
408 union mgmt_port_ring_entry re2
;
412 re
.d64
= octeon_mgmt_dequeue_rx_buffer(p
, &skb
);
413 if (likely(re
.s
.code
== RING_ENTRY_CODE_DONE
)) {
414 /* A good packet, send it up. */
415 skb_put(skb
, re
.s
.len
);
417 /* Process the RX timestamp if it was recorded */
418 if (p
->has_rx_tstamp
) {
419 /* The first 8 bytes are the timestamp */
420 u64 ns
= *(u64
*)skb
->data
;
421 struct skb_shared_hwtstamps
*ts
;
422 ts
= skb_hwtstamps(skb
);
423 ts
->hwtstamp
= ns_to_ktime(ns
);
424 ts
->syststamp
= ptp_to_ktime(ns
);
427 skb
->protocol
= eth_type_trans(skb
, netdev
);
428 netdev
->stats
.rx_packets
++;
429 netdev
->stats
.rx_bytes
+= skb
->len
;
430 netif_receive_skb(skb
);
432 } else if (re
.s
.code
== RING_ENTRY_CODE_MORE
) {
433 /* Packet split across skbs. This can happen if we
434 * increase the MTU. Buffers that are already in the
435 * rx ring can then end up being too small. As the rx
436 * ring is refilled, buffers sized for the new MTU
437 * will be used and we should go back to the normal
440 skb_put(skb
, re
.s
.len
);
442 re2
.d64
= octeon_mgmt_dequeue_rx_buffer(p
, &skb2
);
443 if (re2
.s
.code
!= RING_ENTRY_CODE_MORE
444 && re2
.s
.code
!= RING_ENTRY_CODE_DONE
)
446 skb_put(skb2
, re2
.s
.len
);
447 skb_new
= skb_copy_expand(skb
, 0, skb2
->len
,
451 if (skb_copy_bits(skb2
, 0, skb_tail_pointer(skb_new
),
454 skb_put(skb_new
, skb2
->len
);
455 dev_kfree_skb_any(skb
);
456 dev_kfree_skb_any(skb2
);
458 } while (re2
.s
.code
== RING_ENTRY_CODE_MORE
);
461 /* Some other error, discard it. */
462 dev_kfree_skb_any(skb
);
463 /* Error statistics are accumulated in
464 * octeon_mgmt_update_rx_stats.
469 /* Discard the whole mess. */
470 dev_kfree_skb_any(skb
);
471 dev_kfree_skb_any(skb2
);
472 while (re2
.s
.code
== RING_ENTRY_CODE_MORE
) {
473 re2
.d64
= octeon_mgmt_dequeue_rx_buffer(p
, &skb2
);
474 dev_kfree_skb_any(skb2
);
476 netdev
->stats
.rx_errors
++;
479 /* Tell the hardware we processed a packet. */
481 mix_ircnt
.s
.ircnt
= 1;
482 cvmx_write_csr(p
->mix
+ MIX_IRCNT
, mix_ircnt
.u64
);
486 static int octeon_mgmt_receive_packets(struct octeon_mgmt
*p
, int budget
)
488 unsigned int work_done
= 0;
489 union cvmx_mixx_ircnt mix_ircnt
;
492 mix_ircnt
.u64
= cvmx_read_csr(p
->mix
+ MIX_IRCNT
);
493 while (work_done
< budget
&& mix_ircnt
.s
.ircnt
) {
495 rc
= octeon_mgmt_receive_one(p
);
499 /* Check for more packets. */
500 mix_ircnt
.u64
= cvmx_read_csr(p
->mix
+ MIX_IRCNT
);
503 octeon_mgmt_rx_fill_ring(p
->netdev
);
508 static int octeon_mgmt_napi_poll(struct napi_struct
*napi
, int budget
)
510 struct octeon_mgmt
*p
= container_of(napi
, struct octeon_mgmt
, napi
);
511 struct net_device
*netdev
= p
->netdev
;
512 unsigned int work_done
= 0;
514 work_done
= octeon_mgmt_receive_packets(p
, budget
);
516 if (work_done
< budget
) {
517 /* We stopped because no more packets were available. */
519 octeon_mgmt_enable_rx_irq(p
);
521 octeon_mgmt_update_rx_stats(netdev
);
526 /* Reset the hardware to clean state. */
527 static void octeon_mgmt_reset_hw(struct octeon_mgmt
*p
)
529 union cvmx_mixx_ctl mix_ctl
;
530 union cvmx_mixx_bist mix_bist
;
531 union cvmx_agl_gmx_bist agl_gmx_bist
;
534 cvmx_write_csr(p
->mix
+ MIX_CTL
, mix_ctl
.u64
);
536 mix_ctl
.u64
= cvmx_read_csr(p
->mix
+ MIX_CTL
);
537 } while (mix_ctl
.s
.busy
);
539 cvmx_write_csr(p
->mix
+ MIX_CTL
, mix_ctl
.u64
);
540 cvmx_read_csr(p
->mix
+ MIX_CTL
);
541 octeon_io_clk_delay(64);
543 mix_bist
.u64
= cvmx_read_csr(p
->mix
+ MIX_BIST
);
545 dev_warn(p
->dev
, "MIX failed BIST (0x%016llx)\n",
546 (unsigned long long)mix_bist
.u64
);
548 agl_gmx_bist
.u64
= cvmx_read_csr(CVMX_AGL_GMX_BIST
);
549 if (agl_gmx_bist
.u64
)
550 dev_warn(p
->dev
, "AGL failed BIST (0x%016llx)\n",
551 (unsigned long long)agl_gmx_bist
.u64
);
554 struct octeon_mgmt_cam_state
{
560 static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state
*cs
,
565 for (i
= 0; i
< 6; i
++)
566 cs
->cam
[i
] |= (u64
)addr
[i
] << (8 * (cs
->cam_index
));
567 cs
->cam_mask
|= (1ULL << cs
->cam_index
);
571 static void octeon_mgmt_set_rx_filtering(struct net_device
*netdev
)
573 struct octeon_mgmt
*p
= netdev_priv(netdev
);
574 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl
;
575 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx
;
577 unsigned int prev_packet_enable
;
578 unsigned int cam_mode
= 1; /* 1 - Accept on CAM match */
579 unsigned int multicast_mode
= 1; /* 1 - Reject all multicast. */
580 struct octeon_mgmt_cam_state cam_state
;
581 struct netdev_hw_addr
*ha
;
582 int available_cam_entries
;
584 memset(&cam_state
, 0, sizeof(cam_state
));
586 if ((netdev
->flags
& IFF_PROMISC
) || netdev
->uc
.count
> 7) {
588 available_cam_entries
= 8;
590 /* One CAM entry for the primary address, leaves seven
591 * for the secondary addresses.
593 available_cam_entries
= 7 - netdev
->uc
.count
;
596 if (netdev
->flags
& IFF_MULTICAST
) {
597 if (cam_mode
== 0 || (netdev
->flags
& IFF_ALLMULTI
) ||
598 netdev_mc_count(netdev
) > available_cam_entries
)
599 multicast_mode
= 2; /* 2 - Accept all multicast. */
601 multicast_mode
= 0; /* 0 - Use CAM. */
605 /* Add primary address. */
606 octeon_mgmt_cam_state_add(&cam_state
, netdev
->dev_addr
);
607 netdev_for_each_uc_addr(ha
, netdev
)
608 octeon_mgmt_cam_state_add(&cam_state
, ha
->addr
);
610 if (multicast_mode
== 0) {
611 netdev_for_each_mc_addr(ha
, netdev
)
612 octeon_mgmt_cam_state_add(&cam_state
, ha
->addr
);
615 spin_lock_irqsave(&p
->lock
, flags
);
617 /* Disable packet I/O. */
618 agl_gmx_prtx
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_PRT_CFG
);
619 prev_packet_enable
= agl_gmx_prtx
.s
.en
;
620 agl_gmx_prtx
.s
.en
= 0;
621 cvmx_write_csr(p
->agl
+ AGL_GMX_PRT_CFG
, agl_gmx_prtx
.u64
);
624 adr_ctl
.s
.cam_mode
= cam_mode
;
625 adr_ctl
.s
.mcst
= multicast_mode
;
626 adr_ctl
.s
.bcst
= 1; /* Allow broadcast */
628 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CTL
, adr_ctl
.u64
);
630 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM0
, cam_state
.cam
[0]);
631 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM1
, cam_state
.cam
[1]);
632 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM2
, cam_state
.cam
[2]);
633 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM3
, cam_state
.cam
[3]);
634 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM4
, cam_state
.cam
[4]);
635 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM5
, cam_state
.cam
[5]);
636 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_ADR_CAM_EN
, cam_state
.cam_mask
);
638 /* Restore packet I/O. */
639 agl_gmx_prtx
.s
.en
= prev_packet_enable
;
640 cvmx_write_csr(p
->agl
+ AGL_GMX_PRT_CFG
, agl_gmx_prtx
.u64
);
642 spin_unlock_irqrestore(&p
->lock
, flags
);
645 static int octeon_mgmt_set_mac_address(struct net_device
*netdev
, void *addr
)
647 int r
= eth_mac_addr(netdev
, addr
);
652 octeon_mgmt_set_rx_filtering(netdev
);
657 static int octeon_mgmt_change_mtu(struct net_device
*netdev
, int new_mtu
)
659 struct octeon_mgmt
*p
= netdev_priv(netdev
);
660 int size_without_fcs
= new_mtu
+ OCTEON_MGMT_RX_HEADROOM
;
662 /* Limit the MTU to make sure the ethernet packets are between
663 * 64 bytes and 16383 bytes.
665 if (size_without_fcs
< 64 || size_without_fcs
> 16383) {
666 dev_warn(p
->dev
, "MTU must be between %d and %d.\n",
667 64 - OCTEON_MGMT_RX_HEADROOM
,
668 16383 - OCTEON_MGMT_RX_HEADROOM
);
672 netdev
->mtu
= new_mtu
;
674 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_FRM_MAX
, size_without_fcs
);
675 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_JABBER
,
676 (size_without_fcs
+ 7) & 0xfff8);
681 static irqreturn_t
octeon_mgmt_interrupt(int cpl
, void *dev_id
)
683 struct net_device
*netdev
= dev_id
;
684 struct octeon_mgmt
*p
= netdev_priv(netdev
);
685 union cvmx_mixx_isr mixx_isr
;
687 mixx_isr
.u64
= cvmx_read_csr(p
->mix
+ MIX_ISR
);
689 /* Clear any pending interrupts */
690 cvmx_write_csr(p
->mix
+ MIX_ISR
, mixx_isr
.u64
);
691 cvmx_read_csr(p
->mix
+ MIX_ISR
);
693 if (mixx_isr
.s
.irthresh
) {
694 octeon_mgmt_disable_rx_irq(p
);
695 napi_schedule(&p
->napi
);
697 if (mixx_isr
.s
.orthresh
) {
698 octeon_mgmt_disable_tx_irq(p
);
699 tasklet_schedule(&p
->tx_clean_tasklet
);
705 static int octeon_mgmt_ioctl_hwtstamp(struct net_device
*netdev
,
706 struct ifreq
*rq
, int cmd
)
708 struct octeon_mgmt
*p
= netdev_priv(netdev
);
709 struct hwtstamp_config config
;
710 union cvmx_mio_ptp_clock_cfg ptp
;
711 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl
;
712 bool have_hw_timestamps
= false;
714 if (copy_from_user(&config
, rq
->ifr_data
, sizeof(config
)))
717 if (config
.flags
) /* reserved for future extensions */
720 /* Check the status of hardware for tiemstamps */
721 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
722 /* Get the current state of the PTP clock */
723 ptp
.u64
= cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG
);
724 if (!ptp
.s
.ext_clk_en
) {
725 /* The clock has not been configured to use an
726 * external source. Program it to use the main clock
729 u64 clock_comp
= (NSEC_PER_SEC
<< 32) / octeon_get_io_clock_rate();
731 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP
, clock_comp
);
732 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
733 (NSEC_PER_SEC
<< 32) / clock_comp
);
735 /* The clock is already programmed to use a GPIO */
736 u64 clock_comp
= cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP
);
737 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
739 (NSEC_PER_SEC
<< 32) / clock_comp
);
742 /* Enable the clock if it wasn't done already */
745 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG
, ptp
.u64
);
747 have_hw_timestamps
= true;
750 if (!have_hw_timestamps
)
753 switch (config
.tx_type
) {
754 case HWTSTAMP_TX_OFF
:
761 switch (config
.rx_filter
) {
762 case HWTSTAMP_FILTER_NONE
:
763 p
->has_rx_tstamp
= false;
764 rxx_frm_ctl
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_RX_FRM_CTL
);
765 rxx_frm_ctl
.s
.ptp_mode
= 0;
766 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_FRM_CTL
, rxx_frm_ctl
.u64
);
768 case HWTSTAMP_FILTER_ALL
:
769 case HWTSTAMP_FILTER_SOME
:
770 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
771 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
772 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
773 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
774 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
775 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
776 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
777 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
778 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
779 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
780 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
781 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
782 p
->has_rx_tstamp
= have_hw_timestamps
;
783 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
784 if (p
->has_rx_tstamp
) {
785 rxx_frm_ctl
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_RX_FRM_CTL
);
786 rxx_frm_ctl
.s
.ptp_mode
= 1;
787 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_FRM_CTL
, rxx_frm_ctl
.u64
);
794 if (copy_to_user(rq
->ifr_data
, &config
, sizeof(config
)))
800 static int octeon_mgmt_ioctl(struct net_device
*netdev
,
801 struct ifreq
*rq
, int cmd
)
803 struct octeon_mgmt
*p
= netdev_priv(netdev
);
807 return octeon_mgmt_ioctl_hwtstamp(netdev
, rq
, cmd
);
810 return phy_mii_ioctl(p
->phydev
, rq
, cmd
);
815 static void octeon_mgmt_disable_link(struct octeon_mgmt
*p
)
817 union cvmx_agl_gmx_prtx_cfg prtx_cfg
;
819 /* Disable GMX before we make any changes. */
820 prtx_cfg
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_PRT_CFG
);
822 prtx_cfg
.s
.tx_en
= 0;
823 prtx_cfg
.s
.rx_en
= 0;
824 cvmx_write_csr(p
->agl
+ AGL_GMX_PRT_CFG
, prtx_cfg
.u64
);
826 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
828 for (i
= 0; i
< 10; i
++) {
829 prtx_cfg
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_PRT_CFG
);
830 if (prtx_cfg
.s
.tx_idle
== 1 || prtx_cfg
.s
.rx_idle
== 1)
838 static void octeon_mgmt_enable_link(struct octeon_mgmt
*p
)
840 union cvmx_agl_gmx_prtx_cfg prtx_cfg
;
842 /* Restore the GMX enable state only if link is set */
843 prtx_cfg
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_PRT_CFG
);
844 prtx_cfg
.s
.tx_en
= 1;
845 prtx_cfg
.s
.rx_en
= 1;
847 cvmx_write_csr(p
->agl
+ AGL_GMX_PRT_CFG
, prtx_cfg
.u64
);
850 static void octeon_mgmt_update_link(struct octeon_mgmt
*p
)
852 union cvmx_agl_gmx_prtx_cfg prtx_cfg
;
854 prtx_cfg
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_PRT_CFG
);
856 if (!p
->phydev
->link
)
857 prtx_cfg
.s
.duplex
= 1;
859 prtx_cfg
.s
.duplex
= p
->phydev
->duplex
;
861 switch (p
->phydev
->speed
) {
863 prtx_cfg
.s
.speed
= 0;
864 prtx_cfg
.s
.slottime
= 0;
866 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
867 prtx_cfg
.s
.burst
= 1;
868 prtx_cfg
.s
.speed_msb
= 1;
872 prtx_cfg
.s
.speed
= 0;
873 prtx_cfg
.s
.slottime
= 0;
875 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
876 prtx_cfg
.s
.burst
= 1;
877 prtx_cfg
.s
.speed_msb
= 0;
881 /* 1000 MBits is only supported on 6XXX chips */
882 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
883 prtx_cfg
.s
.speed
= 1;
884 prtx_cfg
.s
.speed_msb
= 0;
885 /* Only matters for half-duplex */
886 prtx_cfg
.s
.slottime
= 1;
887 prtx_cfg
.s
.burst
= p
->phydev
->duplex
;
890 case 0: /* No link */
895 /* Write the new GMX setting with the port still disabled. */
896 cvmx_write_csr(p
->agl
+ AGL_GMX_PRT_CFG
, prtx_cfg
.u64
);
898 /* Read GMX CFG again to make sure the config is completed. */
899 prtx_cfg
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_PRT_CFG
);
901 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
)) {
902 union cvmx_agl_gmx_txx_clk agl_clk
;
903 union cvmx_agl_prtx_ctl prtx_ctl
;
905 prtx_ctl
.u64
= cvmx_read_csr(p
->agl_prt_ctl
);
906 agl_clk
.u64
= cvmx_read_csr(p
->agl
+ AGL_GMX_TX_CLK
);
907 /* MII (both speeds) and RGMII 1000 speed. */
908 agl_clk
.s
.clk_cnt
= 1;
909 if (prtx_ctl
.s
.mode
== 0) { /* RGMII mode */
910 if (p
->phydev
->speed
== 10)
911 agl_clk
.s
.clk_cnt
= 50;
912 else if (p
->phydev
->speed
== 100)
913 agl_clk
.s
.clk_cnt
= 5;
915 cvmx_write_csr(p
->agl
+ AGL_GMX_TX_CLK
, agl_clk
.u64
);
919 static void octeon_mgmt_adjust_link(struct net_device
*netdev
)
921 struct octeon_mgmt
*p
= netdev_priv(netdev
);
923 int link_changed
= 0;
928 spin_lock_irqsave(&p
->lock
, flags
);
931 if (!p
->phydev
->link
&& p
->last_link
)
935 && (p
->last_duplex
!= p
->phydev
->duplex
936 || p
->last_link
!= p
->phydev
->link
937 || p
->last_speed
!= p
->phydev
->speed
)) {
938 octeon_mgmt_disable_link(p
);
940 octeon_mgmt_update_link(p
);
941 octeon_mgmt_enable_link(p
);
944 p
->last_link
= p
->phydev
->link
;
945 p
->last_speed
= p
->phydev
->speed
;
946 p
->last_duplex
= p
->phydev
->duplex
;
948 spin_unlock_irqrestore(&p
->lock
, flags
);
950 if (link_changed
!= 0) {
951 if (link_changed
> 0) {
952 pr_info("%s: Link is up - %d/%s\n", netdev
->name
,
954 DUPLEX_FULL
== p
->phydev
->duplex
?
957 pr_info("%s: Link is down\n", netdev
->name
);
962 static int octeon_mgmt_init_phy(struct net_device
*netdev
)
964 struct octeon_mgmt
*p
= netdev_priv(netdev
);
966 if (octeon_is_simulation() || p
->phy_np
== NULL
) {
967 /* No PHYs in the simulator. */
968 netif_carrier_on(netdev
);
972 p
->phydev
= of_phy_connect(netdev
, p
->phy_np
,
973 octeon_mgmt_adjust_link
, 0,
974 PHY_INTERFACE_MODE_MII
);
982 static int octeon_mgmt_open(struct net_device
*netdev
)
984 struct octeon_mgmt
*p
= netdev_priv(netdev
);
985 union cvmx_mixx_ctl mix_ctl
;
986 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode
;
987 union cvmx_mixx_oring1 oring1
;
988 union cvmx_mixx_iring1 iring1
;
989 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl
;
990 union cvmx_mixx_irhwm mix_irhwm
;
991 union cvmx_mixx_orhwm mix_orhwm
;
992 union cvmx_mixx_intena mix_intena
;
995 /* Allocate ring buffers. */
996 p
->tx_ring
= kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE
),
1001 dma_map_single(p
->dev
, p
->tx_ring
,
1002 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE
),
1005 p
->tx_next_clean
= 0;
1006 p
->tx_current_fill
= 0;
1009 p
->rx_ring
= kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE
),
1014 dma_map_single(p
->dev
, p
->rx_ring
,
1015 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE
),
1019 p
->rx_next_fill
= 0;
1020 p
->rx_current_fill
= 0;
1022 octeon_mgmt_reset_hw(p
);
1024 mix_ctl
.u64
= cvmx_read_csr(p
->mix
+ MIX_CTL
);
1026 /* Bring it out of reset if needed. */
1027 if (mix_ctl
.s
.reset
) {
1028 mix_ctl
.s
.reset
= 0;
1029 cvmx_write_csr(p
->mix
+ MIX_CTL
, mix_ctl
.u64
);
1031 mix_ctl
.u64
= cvmx_read_csr(p
->mix
+ MIX_CTL
);
1032 } while (mix_ctl
.s
.reset
);
1035 if (OCTEON_IS_MODEL(OCTEON_CN5XXX
)) {
1036 agl_gmx_inf_mode
.u64
= 0;
1037 agl_gmx_inf_mode
.s
.en
= 1;
1038 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE
, agl_gmx_inf_mode
.u64
);
1040 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X
)
1041 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X
)) {
1042 /* Force compensation values, as they are not
1043 * determined properly by HW
1045 union cvmx_agl_gmx_drv_ctl drv_ctl
;
1047 drv_ctl
.u64
= cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL
);
1049 drv_ctl
.s
.byp_en1
= 1;
1050 drv_ctl
.s
.nctl1
= 6;
1051 drv_ctl
.s
.pctl1
= 6;
1053 drv_ctl
.s
.byp_en
= 1;
1057 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL
, drv_ctl
.u64
);
1061 oring1
.s
.obase
= p
->tx_ring_handle
>> 3;
1062 oring1
.s
.osize
= OCTEON_MGMT_TX_RING_SIZE
;
1063 cvmx_write_csr(p
->mix
+ MIX_ORING1
, oring1
.u64
);
1066 iring1
.s
.ibase
= p
->rx_ring_handle
>> 3;
1067 iring1
.s
.isize
= OCTEON_MGMT_RX_RING_SIZE
;
1068 cvmx_write_csr(p
->mix
+ MIX_IRING1
, iring1
.u64
);
1070 memcpy(sa
.sa_data
, netdev
->dev_addr
, ETH_ALEN
);
1071 octeon_mgmt_set_mac_address(netdev
, &sa
);
1073 octeon_mgmt_change_mtu(netdev
, netdev
->mtu
);
1075 /* Enable the port HW. Packets are not allowed until
1076 * cvmx_mgmt_port_enable() is called.
1079 mix_ctl
.s
.crc_strip
= 1; /* Strip the ending CRC */
1080 mix_ctl
.s
.en
= 1; /* Enable the port */
1081 mix_ctl
.s
.nbtarb
= 0; /* Arbitration mode */
1082 /* MII CB-request FIFO programmable high watermark */
1083 mix_ctl
.s
.mrq_hwm
= 1;
1084 #ifdef __LITTLE_ENDIAN
1085 mix_ctl
.s
.lendian
= 1;
1087 cvmx_write_csr(p
->mix
+ MIX_CTL
, mix_ctl
.u64
);
1089 /* Read the PHY to find the mode of the interface. */
1090 if (octeon_mgmt_init_phy(netdev
)) {
1091 dev_err(p
->dev
, "Cannot initialize PHY on MIX%d.\n", p
->port
);
1095 /* Set the mode of the interface, RGMII/MII. */
1096 if (OCTEON_IS_MODEL(OCTEON_CN6XXX
) && p
->phydev
) {
1097 union cvmx_agl_prtx_ctl agl_prtx_ctl
;
1098 int rgmii_mode
= (p
->phydev
->supported
&
1099 (SUPPORTED_1000baseT_Half
| SUPPORTED_1000baseT_Full
)) != 0;
1101 agl_prtx_ctl
.u64
= cvmx_read_csr(p
->agl_prt_ctl
);
1102 agl_prtx_ctl
.s
.mode
= rgmii_mode
? 0 : 1;
1103 cvmx_write_csr(p
->agl_prt_ctl
, agl_prtx_ctl
.u64
);
1105 /* MII clocks counts are based on the 125Mhz
1106 * reference, which has an 8nS period. So our delays
1107 * need to be multiplied by this factor.
1109 #define NS_PER_PHY_CLK 8
1111 /* Take the DLL and clock tree out of reset */
1112 agl_prtx_ctl
.u64
= cvmx_read_csr(p
->agl_prt_ctl
);
1113 agl_prtx_ctl
.s
.clkrst
= 0;
1115 agl_prtx_ctl
.s
.dllrst
= 0;
1116 agl_prtx_ctl
.s
.clktx_byp
= 0;
1118 cvmx_write_csr(p
->agl_prt_ctl
, agl_prtx_ctl
.u64
);
1119 cvmx_read_csr(p
->agl_prt_ctl
); /* Force write out before wait */
1121 /* Wait for the DLL to lock. External 125 MHz
1122 * reference clock must be stable at this point.
1124 ndelay(256 * NS_PER_PHY_CLK
);
1126 /* Enable the interface */
1127 agl_prtx_ctl
.u64
= cvmx_read_csr(p
->agl_prt_ctl
);
1128 agl_prtx_ctl
.s
.enable
= 1;
1129 cvmx_write_csr(p
->agl_prt_ctl
, agl_prtx_ctl
.u64
);
1131 /* Read the value back to force the previous write */
1132 agl_prtx_ctl
.u64
= cvmx_read_csr(p
->agl_prt_ctl
);
1134 /* Enable the compensation controller */
1135 agl_prtx_ctl
.s
.comp
= 1;
1136 agl_prtx_ctl
.s
.drv_byp
= 0;
1137 cvmx_write_csr(p
->agl_prt_ctl
, agl_prtx_ctl
.u64
);
1138 /* Force write out before wait. */
1139 cvmx_read_csr(p
->agl_prt_ctl
);
1141 /* For compensation state to lock. */
1142 ndelay(1040 * NS_PER_PHY_CLK
);
1144 /* Some Ethernet switches cannot handle standard
1145 * Interframe Gap, increase to 16 bytes.
1147 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG
, 0x88);
1150 octeon_mgmt_rx_fill_ring(netdev
);
1152 /* Clear statistics. */
1153 /* Clear on read. */
1154 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_STATS_CTL
, 1);
1155 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_STATS_PKTS_DRP
, 0);
1156 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_STATS_PKTS_BAD
, 0);
1158 cvmx_write_csr(p
->agl
+ AGL_GMX_TX_STATS_CTL
, 1);
1159 cvmx_write_csr(p
->agl
+ AGL_GMX_TX_STAT0
, 0);
1160 cvmx_write_csr(p
->agl
+ AGL_GMX_TX_STAT1
, 0);
1162 /* Clear any pending interrupts */
1163 cvmx_write_csr(p
->mix
+ MIX_ISR
, cvmx_read_csr(p
->mix
+ MIX_ISR
));
1165 if (request_irq(p
->irq
, octeon_mgmt_interrupt
, 0, netdev
->name
,
1167 dev_err(p
->dev
, "request_irq(%d) failed.\n", p
->irq
);
1171 /* Interrupt every single RX packet */
1173 mix_irhwm
.s
.irhwm
= 0;
1174 cvmx_write_csr(p
->mix
+ MIX_IRHWM
, mix_irhwm
.u64
);
1176 /* Interrupt when we have 1 or more packets to clean. */
1178 mix_orhwm
.s
.orhwm
= 0;
1179 cvmx_write_csr(p
->mix
+ MIX_ORHWM
, mix_orhwm
.u64
);
1181 /* Enable receive and transmit interrupts */
1183 mix_intena
.s
.ithena
= 1;
1184 mix_intena
.s
.othena
= 1;
1185 cvmx_write_csr(p
->mix
+ MIX_INTENA
, mix_intena
.u64
);
1187 /* Enable packet I/O. */
1189 rxx_frm_ctl
.u64
= 0;
1190 rxx_frm_ctl
.s
.ptp_mode
= p
->has_rx_tstamp
? 1 : 0;
1191 rxx_frm_ctl
.s
.pre_align
= 1;
1192 /* When set, disables the length check for non-min sized pkts
1193 * with padding in the client data.
1195 rxx_frm_ctl
.s
.pad_len
= 1;
1196 /* When set, disables the length check for VLAN pkts */
1197 rxx_frm_ctl
.s
.vlan_len
= 1;
1198 /* When set, PREAMBLE checking is less strict */
1199 rxx_frm_ctl
.s
.pre_free
= 1;
1200 /* Control Pause Frames can match station SMAC */
1201 rxx_frm_ctl
.s
.ctl_smac
= 0;
1202 /* Control Pause Frames can match globally assign Multicast address */
1203 rxx_frm_ctl
.s
.ctl_mcst
= 1;
1204 /* Forward pause information to TX block */
1205 rxx_frm_ctl
.s
.ctl_bck
= 1;
1206 /* Drop Control Pause Frames */
1207 rxx_frm_ctl
.s
.ctl_drp
= 1;
1208 /* Strip off the preamble */
1209 rxx_frm_ctl
.s
.pre_strp
= 1;
1210 /* This port is configured to send PREAMBLE+SFD to begin every
1211 * frame. GMX checks that the PREAMBLE is sent correctly.
1213 rxx_frm_ctl
.s
.pre_chk
= 1;
1214 cvmx_write_csr(p
->agl
+ AGL_GMX_RX_FRM_CTL
, rxx_frm_ctl
.u64
);
1216 /* Configure the port duplex, speed and enables */
1217 octeon_mgmt_disable_link(p
);
1219 octeon_mgmt_update_link(p
);
1220 octeon_mgmt_enable_link(p
);
1224 /* PHY is not present in simulator. The carrier is enabled
1225 * while initializing the phy for simulator, leave it enabled.
1228 netif_carrier_off(netdev
);
1229 phy_start_aneg(p
->phydev
);
1232 netif_wake_queue(netdev
);
1233 napi_enable(&p
->napi
);
1237 octeon_mgmt_reset_hw(p
);
1238 dma_unmap_single(p
->dev
, p
->rx_ring_handle
,
1239 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE
),
1243 dma_unmap_single(p
->dev
, p
->tx_ring_handle
,
1244 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE
),
1250 static int octeon_mgmt_stop(struct net_device
*netdev
)
1252 struct octeon_mgmt
*p
= netdev_priv(netdev
);
1254 napi_disable(&p
->napi
);
1255 netif_stop_queue(netdev
);
1258 phy_disconnect(p
->phydev
);
1261 netif_carrier_off(netdev
);
1263 octeon_mgmt_reset_hw(p
);
1265 free_irq(p
->irq
, netdev
);
1267 /* dma_unmap is a nop on Octeon, so just free everything. */
1268 skb_queue_purge(&p
->tx_list
);
1269 skb_queue_purge(&p
->rx_list
);
1271 dma_unmap_single(p
->dev
, p
->rx_ring_handle
,
1272 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE
),
1276 dma_unmap_single(p
->dev
, p
->tx_ring_handle
,
1277 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE
),
1284 static int octeon_mgmt_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1286 struct octeon_mgmt
*p
= netdev_priv(netdev
);
1287 union mgmt_port_ring_entry re
;
1288 unsigned long flags
;
1289 int rv
= NETDEV_TX_BUSY
;
1292 re
.s
.tstamp
= ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) != 0);
1293 re
.s
.len
= skb
->len
;
1294 re
.s
.addr
= dma_map_single(p
->dev
, skb
->data
,
1298 spin_lock_irqsave(&p
->tx_list
.lock
, flags
);
1300 if (unlikely(p
->tx_current_fill
>= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE
) - 1)) {
1301 spin_unlock_irqrestore(&p
->tx_list
.lock
, flags
);
1302 netif_stop_queue(netdev
);
1303 spin_lock_irqsave(&p
->tx_list
.lock
, flags
);
1306 if (unlikely(p
->tx_current_fill
>=
1307 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE
))) {
1308 spin_unlock_irqrestore(&p
->tx_list
.lock
, flags
);
1309 dma_unmap_single(p
->dev
, re
.s
.addr
, re
.s
.len
,
1314 __skb_queue_tail(&p
->tx_list
, skb
);
1316 /* Put it in the ring. */
1317 p
->tx_ring
[p
->tx_next
] = re
.d64
;
1318 p
->tx_next
= (p
->tx_next
+ 1) % OCTEON_MGMT_TX_RING_SIZE
;
1319 p
->tx_current_fill
++;
1321 spin_unlock_irqrestore(&p
->tx_list
.lock
, flags
);
1323 dma_sync_single_for_device(p
->dev
, p
->tx_ring_handle
,
1324 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE
),
1327 netdev
->stats
.tx_packets
++;
1328 netdev
->stats
.tx_bytes
+= skb
->len
;
1330 /* Ring the bell. */
1331 cvmx_write_csr(p
->mix
+ MIX_ORING2
, 1);
1333 netdev
->trans_start
= jiffies
;
1336 octeon_mgmt_update_tx_stats(netdev
);
1340 #ifdef CONFIG_NET_POLL_CONTROLLER
1341 static void octeon_mgmt_poll_controller(struct net_device
*netdev
)
1343 struct octeon_mgmt
*p
= netdev_priv(netdev
);
1345 octeon_mgmt_receive_packets(p
, 16);
1346 octeon_mgmt_update_rx_stats(netdev
);
1350 static void octeon_mgmt_get_drvinfo(struct net_device
*netdev
,
1351 struct ethtool_drvinfo
*info
)
1353 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
1354 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1355 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
1356 strlcpy(info
->bus_info
, "N/A", sizeof(info
->bus_info
));
1358 info
->testinfo_len
= 0;
1359 info
->regdump_len
= 0;
1360 info
->eedump_len
= 0;
1363 static int octeon_mgmt_get_settings(struct net_device
*netdev
,
1364 struct ethtool_cmd
*cmd
)
1366 struct octeon_mgmt
*p
= netdev_priv(netdev
);
1369 return phy_ethtool_gset(p
->phydev
, cmd
);
1374 static int octeon_mgmt_set_settings(struct net_device
*netdev
,
1375 struct ethtool_cmd
*cmd
)
1377 struct octeon_mgmt
*p
= netdev_priv(netdev
);
1379 if (!capable(CAP_NET_ADMIN
))
1383 return phy_ethtool_sset(p
->phydev
, cmd
);
1388 static int octeon_mgmt_nway_reset(struct net_device
*dev
)
1390 struct octeon_mgmt
*p
= netdev_priv(dev
);
1392 if (!capable(CAP_NET_ADMIN
))
1396 return phy_start_aneg(p
->phydev
);
1401 static const struct ethtool_ops octeon_mgmt_ethtool_ops
= {
1402 .get_drvinfo
= octeon_mgmt_get_drvinfo
,
1403 .get_settings
= octeon_mgmt_get_settings
,
1404 .set_settings
= octeon_mgmt_set_settings
,
1405 .nway_reset
= octeon_mgmt_nway_reset
,
1406 .get_link
= ethtool_op_get_link
,
1409 static const struct net_device_ops octeon_mgmt_ops
= {
1410 .ndo_open
= octeon_mgmt_open
,
1411 .ndo_stop
= octeon_mgmt_stop
,
1412 .ndo_start_xmit
= octeon_mgmt_xmit
,
1413 .ndo_set_rx_mode
= octeon_mgmt_set_rx_filtering
,
1414 .ndo_set_mac_address
= octeon_mgmt_set_mac_address
,
1415 .ndo_do_ioctl
= octeon_mgmt_ioctl
,
1416 .ndo_change_mtu
= octeon_mgmt_change_mtu
,
1417 #ifdef CONFIG_NET_POLL_CONTROLLER
1418 .ndo_poll_controller
= octeon_mgmt_poll_controller
,
1422 static int octeon_mgmt_probe(struct platform_device
*pdev
)
1424 struct net_device
*netdev
;
1425 struct octeon_mgmt
*p
;
1428 struct resource
*res_mix
;
1429 struct resource
*res_agl
;
1430 struct resource
*res_agl_prt_ctl
;
1434 netdev
= alloc_etherdev(sizeof(struct octeon_mgmt
));
1438 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1440 dev_set_drvdata(&pdev
->dev
, netdev
);
1441 p
= netdev_priv(netdev
);
1442 netif_napi_add(netdev
, &p
->napi
, octeon_mgmt_napi_poll
,
1443 OCTEON_MGMT_NAPI_WEIGHT
);
1446 p
->dev
= &pdev
->dev
;
1447 p
->has_rx_tstamp
= false;
1449 data
= of_get_property(pdev
->dev
.of_node
, "cell-index", &len
);
1450 if (data
&& len
== sizeof(*data
)) {
1451 p
->port
= be32_to_cpup(data
);
1453 dev_err(&pdev
->dev
, "no 'cell-index' property\n");
1458 snprintf(netdev
->name
, IFNAMSIZ
, "mgmt%d", p
->port
);
1460 result
= platform_get_irq(pdev
, 0);
1466 res_mix
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1467 if (res_mix
== NULL
) {
1468 dev_err(&pdev
->dev
, "no 'reg' resource\n");
1473 res_agl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1474 if (res_agl
== NULL
) {
1475 dev_err(&pdev
->dev
, "no 'reg' resource\n");
1480 res_agl_prt_ctl
= platform_get_resource(pdev
, IORESOURCE_MEM
, 3);
1481 if (res_agl_prt_ctl
== NULL
) {
1482 dev_err(&pdev
->dev
, "no 'reg' resource\n");
1487 p
->mix_phys
= res_mix
->start
;
1488 p
->mix_size
= resource_size(res_mix
);
1489 p
->agl_phys
= res_agl
->start
;
1490 p
->agl_size
= resource_size(res_agl
);
1491 p
->agl_prt_ctl_phys
= res_agl_prt_ctl
->start
;
1492 p
->agl_prt_ctl_size
= resource_size(res_agl_prt_ctl
);
1495 if (!devm_request_mem_region(&pdev
->dev
, p
->mix_phys
, p
->mix_size
,
1497 dev_err(&pdev
->dev
, "request_mem_region (%s) failed\n",
1503 if (!devm_request_mem_region(&pdev
->dev
, p
->agl_phys
, p
->agl_size
,
1506 dev_err(&pdev
->dev
, "request_mem_region (%s) failed\n",
1511 if (!devm_request_mem_region(&pdev
->dev
, p
->agl_prt_ctl_phys
,
1512 p
->agl_prt_ctl_size
, res_agl_prt_ctl
->name
)) {
1514 dev_err(&pdev
->dev
, "request_mem_region (%s) failed\n",
1515 res_agl_prt_ctl
->name
);
1519 p
->mix
= (u64
)devm_ioremap(&pdev
->dev
, p
->mix_phys
, p
->mix_size
);
1520 p
->agl
= (u64
)devm_ioremap(&pdev
->dev
, p
->agl_phys
, p
->agl_size
);
1521 p
->agl_prt_ctl
= (u64
)devm_ioremap(&pdev
->dev
, p
->agl_prt_ctl_phys
,
1522 p
->agl_prt_ctl_size
);
1523 spin_lock_init(&p
->lock
);
1525 skb_queue_head_init(&p
->tx_list
);
1526 skb_queue_head_init(&p
->rx_list
);
1527 tasklet_init(&p
->tx_clean_tasklet
,
1528 octeon_mgmt_clean_tx_tasklet
, (unsigned long)p
);
1530 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1532 netdev
->netdev_ops
= &octeon_mgmt_ops
;
1533 netdev
->ethtool_ops
= &octeon_mgmt_ethtool_ops
;
1535 mac
= of_get_mac_address(pdev
->dev
.of_node
);
1537 if (mac
&& is_valid_ether_addr(mac
))
1538 memcpy(netdev
->dev_addr
, mac
, ETH_ALEN
);
1540 eth_hw_addr_random(netdev
);
1542 p
->phy_np
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
1544 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(64);
1545 pdev
->dev
.dma_mask
= &pdev
->dev
.coherent_dma_mask
;
1547 netif_carrier_off(netdev
);
1548 result
= register_netdev(netdev
);
1552 dev_info(&pdev
->dev
, "Version " DRV_VERSION
"\n");
1556 free_netdev(netdev
);
1560 static int octeon_mgmt_remove(struct platform_device
*pdev
)
1562 struct net_device
*netdev
= dev_get_drvdata(&pdev
->dev
);
1564 unregister_netdev(netdev
);
1565 free_netdev(netdev
);
1569 static struct of_device_id octeon_mgmt_match
[] = {
1571 .compatible
= "cavium,octeon-5750-mix",
1575 MODULE_DEVICE_TABLE(of
, octeon_mgmt_match
);
1577 static struct platform_driver octeon_mgmt_driver
= {
1579 .name
= "octeon_mgmt",
1580 .owner
= THIS_MODULE
,
1581 .of_match_table
= octeon_mgmt_match
,
1583 .probe
= octeon_mgmt_probe
,
1584 .remove
= octeon_mgmt_remove
,
1587 extern void octeon_mdiobus_force_mod_depencency(void);
1589 static int __init
octeon_mgmt_mod_init(void)
1591 /* Force our mdiobus driver module to be loaded first. */
1592 octeon_mdiobus_force_mod_depencency();
1593 return platform_driver_register(&octeon_mgmt_driver
);
1596 static void __exit
octeon_mgmt_mod_exit(void)
1598 platform_driver_unregister(&octeon_mgmt_driver
);
1601 module_init(octeon_mgmt_mod_init
);
1602 module_exit(octeon_mgmt_mod_exit
);
1604 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
1605 MODULE_AUTHOR("David Daney");
1606 MODULE_LICENSE("GPL");
1607 MODULE_VERSION(DRV_VERSION
);