2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
29 #include "bcmsysport.h"
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
35 u32 reg = readl_relaxed(priv->base + offset + off); \
38 static inline void name##_writel(struct bcm_sysport_priv *priv, \
41 writel_relaxed(val, priv->base + offset + off); \
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1
, SYS_PORT_INTRL2_1_OFFSET
);
46 BCM_SYSPORT_IO_MACRO(umac
, SYS_PORT_UMAC_OFFSET
);
47 BCM_SYSPORT_IO_MACRO(gib
, SYS_PORT_GIB_OFFSET
);
48 BCM_SYSPORT_IO_MACRO(tdma
, SYS_PORT_TDMA_OFFSET
);
49 BCM_SYSPORT_IO_MACRO(rxchk
, SYS_PORT_RXCHK_OFFSET
);
50 BCM_SYSPORT_IO_MACRO(txchk
, SYS_PORT_TXCHK_OFFSET
);
51 BCM_SYSPORT_IO_MACRO(rbuf
, SYS_PORT_RBUF_OFFSET
);
52 BCM_SYSPORT_IO_MACRO(tbuf
, SYS_PORT_TBUF_OFFSET
);
53 BCM_SYSPORT_IO_MACRO(topctrl
, SYS_PORT_TOPCTRL_OFFSET
);
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56 * same layout, except it has been moved by 4 bytes up, *sigh*
58 static inline u32
rdma_readl(struct bcm_sysport_priv
*priv
, u32 off
)
60 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
62 return readl_relaxed(priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
65 static inline void rdma_writel(struct bcm_sysport_priv
*priv
, u32 val
, u32 off
)
67 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
69 writel_relaxed(val
, priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
72 static inline u32
tdma_control_bit(struct bcm_sysport_priv
*priv
, u32 bit
)
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
87 #define BCM_SYSPORT_INTR_L2(which) \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
91 priv->irq##which##_mask &= ~(mask); \
92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105 * nanoseconds), so keep the check for 64-bits explicit here to save
106 * one register write per-packet on 32-bits platforms.
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv
*priv
,
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113 writel_relaxed(upper_32_bits(addr
) & DESC_ADDR_HI_MASK
,
114 d
+ DESC_ADDR_HI_STATUS_LEN
);
116 writel_relaxed(lower_32_bits(addr
), d
+ DESC_ADDR_LO
);
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv
*priv
,
120 struct dma_desc
*desc
,
123 /* Ports are latched, so write upper address first */
124 tdma_writel(priv
, desc
->addr_status_len
, TDMA_WRITE_PORT_HI(port
));
125 tdma_writel(priv
, desc
->addr_lo
, TDMA_WRITE_PORT_LO(port
));
128 /* Ethtool operations */
129 static int bcm_sysport_set_rx_csum(struct net_device
*dev
,
130 netdev_features_t wanted
)
132 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
135 priv
->rx_chk_en
= !!(wanted
& NETIF_F_RXCSUM
);
136 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
142 /* If UniMAC forwards CRC, we need to skip over it to get
143 * a valid CHK bit to be set in the per-packet status word
145 if (priv
->rx_chk_en
&& priv
->crc_fwd
)
146 reg
|= RXCHK_SKIP_FCS
;
148 reg
&= ~RXCHK_SKIP_FCS
;
150 /* If Broadcom tags are enabled (e.g: using a switch), make
151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 * tag after the Ethernet MAC Source Address.
154 if (netdev_uses_dsa(dev
))
155 reg
|= RXCHK_BRCM_TAG_EN
;
157 reg
&= ~RXCHK_BRCM_TAG_EN
;
159 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
164 static int bcm_sysport_set_tx_csum(struct net_device
*dev
,
165 netdev_features_t wanted
)
167 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
170 /* Hardware transmit checksum requires us to enable the Transmit status
171 * block prepended to the packet contents
173 priv
->tsb_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
174 reg
= tdma_readl(priv
, TDMA_CONTROL
);
176 reg
|= tdma_control_bit(priv
, TSB_EN
);
178 reg
&= ~tdma_control_bit(priv
, TSB_EN
);
179 tdma_writel(priv
, reg
, TDMA_CONTROL
);
184 static int bcm_sysport_set_features(struct net_device
*dev
,
185 netdev_features_t features
)
187 netdev_features_t changed
= features
^ dev
->features
;
188 netdev_features_t wanted
= dev
->wanted_features
;
191 if (changed
& NETIF_F_RXCSUM
)
192 ret
= bcm_sysport_set_rx_csum(dev
, wanted
);
193 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
194 ret
= bcm_sysport_set_tx_csum(dev
, wanted
);
199 /* Hardware counters must be kept in sync because the order/offset
200 * is important here (order in structure declaration = order in hardware)
202 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats
[] = {
204 STAT_NETDEV64(rx_packets
),
205 STAT_NETDEV64(tx_packets
),
206 STAT_NETDEV64(rx_bytes
),
207 STAT_NETDEV64(tx_bytes
),
208 STAT_NETDEV(rx_errors
),
209 STAT_NETDEV(tx_errors
),
210 STAT_NETDEV(rx_dropped
),
211 STAT_NETDEV(tx_dropped
),
212 STAT_NETDEV(multicast
),
213 /* UniMAC RSV counters */
214 STAT_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
215 STAT_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
216 STAT_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
217 STAT_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
218 STAT_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
219 STAT_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
221 STAT_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
222 STAT_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
223 STAT_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
224 STAT_MIB_RX("rx_pkts", mib
.rx
.pkt
),
225 STAT_MIB_RX("rx_bytes", mib
.rx
.bytes
),
226 STAT_MIB_RX("rx_multicast", mib
.rx
.mca
),
227 STAT_MIB_RX("rx_broadcast", mib
.rx
.bca
),
228 STAT_MIB_RX("rx_fcs", mib
.rx
.fcs
),
229 STAT_MIB_RX("rx_control", mib
.rx
.cf
),
230 STAT_MIB_RX("rx_pause", mib
.rx
.pf
),
231 STAT_MIB_RX("rx_unknown", mib
.rx
.uo
),
232 STAT_MIB_RX("rx_align", mib
.rx
.aln
),
233 STAT_MIB_RX("rx_outrange", mib
.rx
.flr
),
234 STAT_MIB_RX("rx_code", mib
.rx
.cde
),
235 STAT_MIB_RX("rx_carrier", mib
.rx
.fcr
),
236 STAT_MIB_RX("rx_oversize", mib
.rx
.ovr
),
237 STAT_MIB_RX("rx_jabber", mib
.rx
.jbr
),
238 STAT_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
239 STAT_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
240 STAT_MIB_RX("rx_unicast", mib
.rx
.uc
),
241 STAT_MIB_RX("rx_ppp", mib
.rx
.ppp
),
242 STAT_MIB_RX("rx_crc", mib
.rx
.rcrc
),
243 /* UniMAC TSV counters */
244 STAT_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
245 STAT_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
246 STAT_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
247 STAT_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
248 STAT_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
249 STAT_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
251 STAT_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
252 STAT_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
253 STAT_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
254 STAT_MIB_TX("tx_pkts", mib
.tx
.pkts
),
255 STAT_MIB_TX("tx_multicast", mib
.tx
.mca
),
256 STAT_MIB_TX("tx_broadcast", mib
.tx
.bca
),
257 STAT_MIB_TX("tx_pause", mib
.tx
.pf
),
258 STAT_MIB_TX("tx_control", mib
.tx
.cf
),
259 STAT_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
260 STAT_MIB_TX("tx_oversize", mib
.tx
.ovr
),
261 STAT_MIB_TX("tx_defer", mib
.tx
.drf
),
262 STAT_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
263 STAT_MIB_TX("tx_single_col", mib
.tx
.scl
),
264 STAT_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
265 STAT_MIB_TX("tx_late_col", mib
.tx
.lcl
),
266 STAT_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
267 STAT_MIB_TX("tx_frags", mib
.tx
.frg
),
268 STAT_MIB_TX("tx_total_col", mib
.tx
.ncl
),
269 STAT_MIB_TX("tx_jabber", mib
.tx
.jbr
),
270 STAT_MIB_TX("tx_bytes", mib
.tx
.bytes
),
271 STAT_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
272 STAT_MIB_TX("tx_unicast", mib
.tx
.uc
),
273 /* UniMAC RUNT counters */
274 STAT_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
275 STAT_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
276 STAT_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
277 STAT_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
278 /* RXCHK misc statistics */
279 STAT_RXCHK("rxchk_bad_csum", mib
.rxchk_bad_csum
, RXCHK_BAD_CSUM_CNTR
),
280 STAT_RXCHK("rxchk_other_pkt_disc", mib
.rxchk_other_pkt_disc
,
281 RXCHK_OTHER_DISC_CNTR
),
282 /* RBUF misc statistics */
283 STAT_RBUF("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
, RBUF_OVFL_DISC_CNTR
),
284 STAT_RBUF("rbuf_err_cnt", mib
.rbuf_err_cnt
, RBUF_ERR_PKT_CNTR
),
285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
286 STAT_MIB_SOFT("rx_dma_failed", mib
.rx_dma_failed
),
287 STAT_MIB_SOFT("tx_dma_failed", mib
.tx_dma_failed
),
288 /* Per TX-queue statistics are dynamically appended */
291 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
293 static void bcm_sysport_get_drvinfo(struct net_device
*dev
,
294 struct ethtool_drvinfo
*info
)
296 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
297 strlcpy(info
->version
, "0.1", sizeof(info
->version
));
298 strlcpy(info
->bus_info
, "platform", sizeof(info
->bus_info
));
301 static u32
bcm_sysport_get_msglvl(struct net_device
*dev
)
303 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
305 return priv
->msg_enable
;
308 static void bcm_sysport_set_msglvl(struct net_device
*dev
, u32 enable
)
310 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
312 priv
->msg_enable
= enable
;
315 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type
)
318 case BCM_SYSPORT_STAT_NETDEV
:
319 case BCM_SYSPORT_STAT_NETDEV64
:
320 case BCM_SYSPORT_STAT_RXCHK
:
321 case BCM_SYSPORT_STAT_RBUF
:
322 case BCM_SYSPORT_STAT_SOFT
:
329 static int bcm_sysport_get_sset_count(struct net_device
*dev
, int string_set
)
331 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
332 const struct bcm_sysport_stats
*s
;
335 switch (string_set
) {
337 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
338 s
= &bcm_sysport_gstrings_stats
[i
];
340 !bcm_sysport_lite_stat_valid(s
->type
))
344 /* Include per-queue statistics */
345 return j
+ dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
351 static void bcm_sysport_get_strings(struct net_device
*dev
,
352 u32 stringset
, u8
*data
)
354 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
355 const struct bcm_sysport_stats
*s
;
361 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
362 s
= &bcm_sysport_gstrings_stats
[i
];
364 !bcm_sysport_lite_stat_valid(s
->type
))
367 memcpy(data
+ j
* ETH_GSTRING_LEN
, s
->stat_string
,
372 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
373 snprintf(buf
, sizeof(buf
), "txq%d_packets", i
);
374 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
378 snprintf(buf
, sizeof(buf
), "txq%d_bytes", i
);
379 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
389 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv
*priv
)
393 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
394 const struct bcm_sysport_stats
*s
;
399 s
= &bcm_sysport_gstrings_stats
[i
];
401 case BCM_SYSPORT_STAT_NETDEV
:
402 case BCM_SYSPORT_STAT_NETDEV64
:
403 case BCM_SYSPORT_STAT_SOFT
:
405 case BCM_SYSPORT_STAT_MIB_RX
:
406 case BCM_SYSPORT_STAT_MIB_TX
:
407 case BCM_SYSPORT_STAT_RUNT
:
411 if (s
->type
!= BCM_SYSPORT_STAT_MIB_RX
)
412 offset
= UMAC_MIB_STAT_OFFSET
;
413 val
= umac_readl(priv
, UMAC_MIB_START
+ j
+ offset
);
415 case BCM_SYSPORT_STAT_RXCHK
:
416 val
= rxchk_readl(priv
, s
->reg_offset
);
418 rxchk_writel(priv
, 0, s
->reg_offset
);
420 case BCM_SYSPORT_STAT_RBUF
:
421 val
= rbuf_readl(priv
, s
->reg_offset
);
423 rbuf_writel(priv
, 0, s
->reg_offset
);
428 p
= (char *)priv
+ s
->stat_offset
;
432 netif_dbg(priv
, hw
, priv
->netdev
, "updated MIB counters\n");
435 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv
*priv
,
436 u64
*tx_bytes
, u64
*tx_packets
)
438 struct bcm_sysport_tx_ring
*ring
;
439 u64 bytes
= 0, packets
= 0;
443 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++) {
444 ring
= &priv
->tx_rings
[q
];
446 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
448 packets
= ring
->packets
;
449 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
452 *tx_packets
+= packets
;
456 static void bcm_sysport_get_stats(struct net_device
*dev
,
457 struct ethtool_stats
*stats
, u64
*data
)
459 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
460 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
461 struct u64_stats_sync
*syncp
= &priv
->syncp
;
462 struct bcm_sysport_tx_ring
*ring
;
463 u64 tx_bytes
= 0, tx_packets
= 0;
467 if (netif_running(dev
)) {
468 bcm_sysport_update_mib_counters(priv
);
469 bcm_sysport_update_tx_stats(priv
, &tx_bytes
, &tx_packets
);
470 stats64
->tx_bytes
= tx_bytes
;
471 stats64
->tx_packets
= tx_packets
;
474 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
475 const struct bcm_sysport_stats
*s
;
478 s
= &bcm_sysport_gstrings_stats
[i
];
479 if (s
->type
== BCM_SYSPORT_STAT_NETDEV
)
480 p
= (char *)&dev
->stats
;
481 else if (s
->type
== BCM_SYSPORT_STAT_NETDEV64
)
486 if (priv
->is_lite
&& !bcm_sysport_lite_stat_valid(s
->type
))
490 if (s
->stat_sizeof
== sizeof(u64
) &&
491 s
->type
== BCM_SYSPORT_STAT_NETDEV64
) {
493 start
= u64_stats_fetch_begin_irq(syncp
);
495 } while (u64_stats_fetch_retry_irq(syncp
, start
));
501 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
502 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
503 * needs to point to how many total statistics we have minus the
504 * number of per TX queue statistics
506 j
= bcm_sysport_get_sset_count(dev
, ETH_SS_STATS
) -
507 dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
509 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
510 ring
= &priv
->tx_rings
[i
];
511 data
[j
] = ring
->packets
;
513 data
[j
] = ring
->bytes
;
518 static void bcm_sysport_get_wol(struct net_device
*dev
,
519 struct ethtool_wolinfo
*wol
)
521 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
524 wol
->supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
;
525 wol
->wolopts
= priv
->wolopts
;
527 if (!(priv
->wolopts
& WAKE_MAGICSECURE
))
530 /* Return the programmed SecureOn password */
531 reg
= umac_readl(priv
, UMAC_PSW_MS
);
532 put_unaligned_be16(reg
, &wol
->sopass
[0]);
533 reg
= umac_readl(priv
, UMAC_PSW_LS
);
534 put_unaligned_be32(reg
, &wol
->sopass
[2]);
537 static int bcm_sysport_set_wol(struct net_device
*dev
,
538 struct ethtool_wolinfo
*wol
)
540 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
541 struct device
*kdev
= &priv
->pdev
->dev
;
542 u32 supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
;
544 if (!device_can_wakeup(kdev
))
547 if (wol
->wolopts
& ~supported
)
550 /* Program the SecureOn password */
551 if (wol
->wolopts
& WAKE_MAGICSECURE
) {
552 umac_writel(priv
, get_unaligned_be16(&wol
->sopass
[0]),
554 umac_writel(priv
, get_unaligned_be32(&wol
->sopass
[2]),
558 /* Flag the device and relevant IRQ as wakeup capable */
560 device_set_wakeup_enable(kdev
, 1);
561 if (priv
->wol_irq_disabled
)
562 enable_irq_wake(priv
->wol_irq
);
563 priv
->wol_irq_disabled
= 0;
565 device_set_wakeup_enable(kdev
, 0);
566 /* Avoid unbalanced disable_irq_wake calls */
567 if (!priv
->wol_irq_disabled
)
568 disable_irq_wake(priv
->wol_irq
);
569 priv
->wol_irq_disabled
= 1;
572 priv
->wolopts
= wol
->wolopts
;
577 static int bcm_sysport_get_coalesce(struct net_device
*dev
,
578 struct ethtool_coalesce
*ec
)
580 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
583 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(0));
585 ec
->tx_coalesce_usecs
= (reg
>> RING_TIMEOUT_SHIFT
) * 8192 / 1000;
586 ec
->tx_max_coalesced_frames
= reg
& RING_INTR_THRESH_MASK
;
588 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
590 ec
->rx_coalesce_usecs
= (reg
>> RDMA_TIMEOUT_SHIFT
) * 8192 / 1000;
591 ec
->rx_max_coalesced_frames
= reg
& RDMA_INTR_THRESH_MASK
;
596 static int bcm_sysport_set_coalesce(struct net_device
*dev
,
597 struct ethtool_coalesce
*ec
)
599 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
603 /* Base system clock is 125Mhz, DMA timeout is this reference clock
604 * divided by 1024, which yield roughly 8.192 us, our maximum value has
605 * to fit in the RING_TIMEOUT_MASK (16 bits).
607 if (ec
->tx_max_coalesced_frames
> RING_INTR_THRESH_MASK
||
608 ec
->tx_coalesce_usecs
> (RING_TIMEOUT_MASK
* 8) + 1 ||
609 ec
->rx_max_coalesced_frames
> RDMA_INTR_THRESH_MASK
||
610 ec
->rx_coalesce_usecs
> (RDMA_TIMEOUT_MASK
* 8) + 1)
613 if ((ec
->tx_coalesce_usecs
== 0 && ec
->tx_max_coalesced_frames
== 0) ||
614 (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0))
617 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
618 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(i
));
619 reg
&= ~(RING_INTR_THRESH_MASK
|
620 RING_TIMEOUT_MASK
<< RING_TIMEOUT_SHIFT
);
621 reg
|= ec
->tx_max_coalesced_frames
;
622 reg
|= DIV_ROUND_UP(ec
->tx_coalesce_usecs
* 1000, 8192) <<
624 tdma_writel(priv
, reg
, TDMA_DESC_RING_INTR_CONTROL(i
));
627 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
628 reg
&= ~(RDMA_INTR_THRESH_MASK
|
629 RDMA_TIMEOUT_MASK
<< RDMA_TIMEOUT_SHIFT
);
630 reg
|= ec
->rx_max_coalesced_frames
;
631 reg
|= DIV_ROUND_UP(ec
->rx_coalesce_usecs
* 1000, 8192) <<
633 rdma_writel(priv
, reg
, RDMA_MBDONE_INTR
);
638 static void bcm_sysport_free_cb(struct bcm_sysport_cb
*cb
)
640 dev_consume_skb_any(cb
->skb
);
642 dma_unmap_addr_set(cb
, dma_addr
, 0);
645 static struct sk_buff
*bcm_sysport_rx_refill(struct bcm_sysport_priv
*priv
,
646 struct bcm_sysport_cb
*cb
)
648 struct device
*kdev
= &priv
->pdev
->dev
;
649 struct net_device
*ndev
= priv
->netdev
;
650 struct sk_buff
*skb
, *rx_skb
;
653 /* Allocate a new SKB for a new packet */
654 skb
= netdev_alloc_skb(priv
->netdev
, RX_BUF_LENGTH
);
656 priv
->mib
.alloc_rx_buff_failed
++;
657 netif_err(priv
, rx_err
, ndev
, "SKB alloc failed\n");
661 mapping
= dma_map_single(kdev
, skb
->data
,
662 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
663 if (dma_mapping_error(kdev
, mapping
)) {
664 priv
->mib
.rx_dma_failed
++;
665 dev_kfree_skb_any(skb
);
666 netif_err(priv
, rx_err
, ndev
, "DMA mapping failure\n");
670 /* Grab the current SKB on the ring */
673 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
674 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
676 /* Put the new SKB on the ring */
678 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
679 dma_desc_set_addr(priv
, cb
->bd_addr
, mapping
);
681 netif_dbg(priv
, rx_status
, ndev
, "RX refill\n");
683 /* Return the current SKB to the caller */
687 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv
*priv
)
689 struct bcm_sysport_cb
*cb
;
693 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
694 cb
= &priv
->rx_cbs
[i
];
695 skb
= bcm_sysport_rx_refill(priv
, cb
);
705 /* Poll the hardware for up to budget packets to process */
706 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv
*priv
,
709 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
710 struct net_device
*ndev
= priv
->netdev
;
711 unsigned int processed
= 0, to_process
;
712 struct bcm_sysport_cb
*cb
;
714 unsigned int p_index
;
718 /* Clear status before servicing to reduce spurious interrupts */
719 intrl2_0_writel(priv
, INTRL2_0_RDMA_MBDONE
, INTRL2_CPU_CLEAR
);
721 /* Determine how much we should process since last call, SYSTEMPORT Lite
722 * groups the producer and consumer indexes into the same 32-bit
723 * which we access using RDMA_CONS_INDEX
726 p_index
= rdma_readl(priv
, RDMA_PROD_INDEX
);
728 p_index
= rdma_readl(priv
, RDMA_CONS_INDEX
);
729 p_index
&= RDMA_PROD_INDEX_MASK
;
731 to_process
= (p_index
- priv
->rx_c_index
) & RDMA_CONS_INDEX_MASK
;
733 netif_dbg(priv
, rx_status
, ndev
,
734 "p_index=%d rx_c_index=%d to_process=%d\n",
735 p_index
, priv
->rx_c_index
, to_process
);
737 while ((processed
< to_process
) && (processed
< budget
)) {
738 cb
= &priv
->rx_cbs
[priv
->rx_read_ptr
];
739 skb
= bcm_sysport_rx_refill(priv
, cb
);
742 /* We do not have a backing SKB, so we do not a corresponding
743 * DMA mapping for this incoming packet since
744 * bcm_sysport_rx_refill always either has both skb and mapping
747 if (unlikely(!skb
)) {
748 netif_err(priv
, rx_err
, ndev
, "out of memory!\n");
749 ndev
->stats
.rx_dropped
++;
750 ndev
->stats
.rx_errors
++;
754 /* Extract the Receive Status Block prepended */
755 rsb
= (struct bcm_rsb
*)skb
->data
;
756 len
= (rsb
->rx_status_len
>> DESC_LEN_SHIFT
) & DESC_LEN_MASK
;
757 status
= (rsb
->rx_status_len
>> DESC_STATUS_SHIFT
) &
760 netif_dbg(priv
, rx_status
, ndev
,
761 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
762 p_index
, priv
->rx_c_index
, priv
->rx_read_ptr
,
765 if (unlikely(len
> RX_BUF_LENGTH
)) {
766 netif_err(priv
, rx_status
, ndev
, "oversized packet\n");
767 ndev
->stats
.rx_length_errors
++;
768 ndev
->stats
.rx_errors
++;
769 dev_kfree_skb_any(skb
);
773 if (unlikely(!(status
& DESC_EOP
) || !(status
& DESC_SOP
))) {
774 netif_err(priv
, rx_status
, ndev
, "fragmented packet!\n");
775 ndev
->stats
.rx_dropped
++;
776 ndev
->stats
.rx_errors
++;
777 dev_kfree_skb_any(skb
);
781 if (unlikely(status
& (RX_STATUS_ERR
| RX_STATUS_OVFLOW
))) {
782 netif_err(priv
, rx_err
, ndev
, "error packet\n");
783 if (status
& RX_STATUS_OVFLOW
)
784 ndev
->stats
.rx_over_errors
++;
785 ndev
->stats
.rx_dropped
++;
786 ndev
->stats
.rx_errors
++;
787 dev_kfree_skb_any(skb
);
793 /* Hardware validated our checksum */
794 if (likely(status
& DESC_L4_CSUM
))
795 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
797 /* Hardware pre-pends packets with 2bytes before Ethernet
798 * header plus we have the Receive Status Block, strip off all
799 * of this from the SKB.
801 skb_pull(skb
, sizeof(*rsb
) + 2);
802 len
-= (sizeof(*rsb
) + 2);
804 /* UniMAC may forward CRC */
806 skb_trim(skb
, len
- ETH_FCS_LEN
);
810 skb
->protocol
= eth_type_trans(skb
, ndev
);
811 ndev
->stats
.rx_packets
++;
812 ndev
->stats
.rx_bytes
+= len
;
813 u64_stats_update_begin(&priv
->syncp
);
814 stats64
->rx_packets
++;
815 stats64
->rx_bytes
+= len
;
816 u64_stats_update_end(&priv
->syncp
);
818 napi_gro_receive(&priv
->napi
, skb
);
823 if (priv
->rx_read_ptr
== priv
->num_rx_bds
)
824 priv
->rx_read_ptr
= 0;
830 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring
*ring
,
831 struct bcm_sysport_cb
*cb
,
832 unsigned int *bytes_compl
,
833 unsigned int *pkts_compl
)
835 struct bcm_sysport_priv
*priv
= ring
->priv
;
836 struct device
*kdev
= &priv
->pdev
->dev
;
839 *bytes_compl
+= cb
->skb
->len
;
840 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
841 dma_unmap_len(cb
, dma_len
),
844 bcm_sysport_free_cb(cb
);
846 } else if (dma_unmap_addr(cb
, dma_addr
)) {
847 *bytes_compl
+= dma_unmap_len(cb
, dma_len
);
848 dma_unmap_page(kdev
, dma_unmap_addr(cb
, dma_addr
),
849 dma_unmap_len(cb
, dma_len
), DMA_TO_DEVICE
);
850 dma_unmap_addr_set(cb
, dma_addr
, 0);
854 /* Reclaim queued SKBs for transmission completion, lockless version */
855 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
856 struct bcm_sysport_tx_ring
*ring
)
858 unsigned int pkts_compl
= 0, bytes_compl
= 0;
859 struct net_device
*ndev
= priv
->netdev
;
860 unsigned int txbds_processed
= 0;
861 struct bcm_sysport_cb
*cb
;
862 unsigned int txbds_ready
;
863 unsigned int c_index
;
866 /* Clear status before servicing to reduce spurious interrupts */
867 if (!ring
->priv
->is_lite
)
868 intrl2_1_writel(ring
->priv
, BIT(ring
->index
), INTRL2_CPU_CLEAR
);
870 intrl2_0_writel(ring
->priv
, BIT(ring
->index
+
871 INTRL2_0_TDMA_MBDONE_SHIFT
), INTRL2_CPU_CLEAR
);
873 /* Compute how many descriptors have been processed since last call */
874 hw_ind
= tdma_readl(priv
, TDMA_DESC_RING_PROD_CONS_INDEX(ring
->index
));
875 c_index
= (hw_ind
>> RING_CONS_INDEX_SHIFT
) & RING_CONS_INDEX_MASK
;
876 txbds_ready
= (c_index
- ring
->c_index
) & RING_CONS_INDEX_MASK
;
878 netif_dbg(priv
, tx_done
, ndev
,
879 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
880 ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
882 while (txbds_processed
< txbds_ready
) {
883 cb
= &ring
->cbs
[ring
->clean_index
];
884 bcm_sysport_tx_reclaim_one(ring
, cb
, &bytes_compl
, &pkts_compl
);
889 if (likely(ring
->clean_index
< ring
->size
- 1))
892 ring
->clean_index
= 0;
895 u64_stats_update_begin(&priv
->syncp
);
896 ring
->packets
+= pkts_compl
;
897 ring
->bytes
+= bytes_compl
;
898 u64_stats_update_end(&priv
->syncp
);
900 ring
->c_index
= c_index
;
902 netif_dbg(priv
, tx_done
, ndev
,
903 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
904 ring
->index
, ring
->c_index
, pkts_compl
, bytes_compl
);
909 /* Locked version of the per-ring TX reclaim routine */
910 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
911 struct bcm_sysport_tx_ring
*ring
)
913 struct netdev_queue
*txq
;
914 unsigned int released
;
917 txq
= netdev_get_tx_queue(priv
->netdev
, ring
->index
);
919 spin_lock_irqsave(&ring
->lock
, flags
);
920 released
= __bcm_sysport_tx_reclaim(priv
, ring
);
922 netif_tx_wake_queue(txq
);
924 spin_unlock_irqrestore(&ring
->lock
, flags
);
929 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
930 static void bcm_sysport_tx_clean(struct bcm_sysport_priv
*priv
,
931 struct bcm_sysport_tx_ring
*ring
)
935 spin_lock_irqsave(&ring
->lock
, flags
);
936 __bcm_sysport_tx_reclaim(priv
, ring
);
937 spin_unlock_irqrestore(&ring
->lock
, flags
);
940 static int bcm_sysport_tx_poll(struct napi_struct
*napi
, int budget
)
942 struct bcm_sysport_tx_ring
*ring
=
943 container_of(napi
, struct bcm_sysport_tx_ring
, napi
);
944 unsigned int work_done
= 0;
946 work_done
= bcm_sysport_tx_reclaim(ring
->priv
, ring
);
948 if (work_done
== 0) {
950 /* re-enable TX interrupt */
951 if (!ring
->priv
->is_lite
)
952 intrl2_1_mask_clear(ring
->priv
, BIT(ring
->index
));
954 intrl2_0_mask_clear(ring
->priv
, BIT(ring
->index
+
955 INTRL2_0_TDMA_MBDONE_SHIFT
));
963 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv
*priv
)
967 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++)
968 bcm_sysport_tx_reclaim(priv
, &priv
->tx_rings
[q
]);
971 static int bcm_sysport_poll(struct napi_struct
*napi
, int budget
)
973 struct bcm_sysport_priv
*priv
=
974 container_of(napi
, struct bcm_sysport_priv
, napi
);
975 unsigned int work_done
= 0;
977 work_done
= bcm_sysport_desc_rx(priv
, budget
);
979 priv
->rx_c_index
+= work_done
;
980 priv
->rx_c_index
&= RDMA_CONS_INDEX_MASK
;
982 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
983 * maintained by HW, but writes to it will be ignore while RDMA
987 rdma_writel(priv
, priv
->rx_c_index
, RDMA_CONS_INDEX
);
989 rdma_writel(priv
, priv
->rx_c_index
<< 16, RDMA_CONS_INDEX
);
991 if (work_done
< budget
) {
992 napi_complete_done(napi
, work_done
);
993 /* re-enable RX interrupts */
994 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
);
1000 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv
*priv
)
1004 /* Stop monitoring MPD interrupt */
1005 intrl2_0_mask_set(priv
, INTRL2_0_MPD
);
1007 /* Clear the MagicPacket detection logic */
1008 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
1010 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
1012 netif_dbg(priv
, wol
, priv
->netdev
, "resumed from WOL\n");
1015 /* RX and misc interrupt routine */
1016 static irqreturn_t
bcm_sysport_rx_isr(int irq
, void *dev_id
)
1018 struct net_device
*dev
= dev_id
;
1019 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1020 struct bcm_sysport_tx_ring
*txr
;
1021 unsigned int ring
, ring_bit
;
1023 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
1024 ~intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1025 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
1027 if (unlikely(priv
->irq0_stat
== 0)) {
1028 netdev_warn(priv
->netdev
, "spurious RX interrupt\n");
1032 if (priv
->irq0_stat
& INTRL2_0_RDMA_MBDONE
) {
1033 if (likely(napi_schedule_prep(&priv
->napi
))) {
1034 /* disable RX interrupts */
1035 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
);
1036 __napi_schedule_irqoff(&priv
->napi
);
1040 /* TX ring is full, perform a full reclaim since we do not know
1041 * which one would trigger this interrupt
1043 if (priv
->irq0_stat
& INTRL2_0_TX_RING_FULL
)
1044 bcm_sysport_tx_reclaim_all(priv
);
1046 if (priv
->irq0_stat
& INTRL2_0_MPD
) {
1047 netdev_info(priv
->netdev
, "Wake-on-LAN interrupt!\n");
1048 bcm_sysport_resume_from_wol(priv
);
1054 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1055 ring_bit
= BIT(ring
+ INTRL2_0_TDMA_MBDONE_SHIFT
);
1056 if (!(priv
->irq0_stat
& ring_bit
))
1059 txr
= &priv
->tx_rings
[ring
];
1061 if (likely(napi_schedule_prep(&txr
->napi
))) {
1062 intrl2_0_mask_set(priv
, ring_bit
);
1063 __napi_schedule(&txr
->napi
);
1070 /* TX interrupt service routine */
1071 static irqreturn_t
bcm_sysport_tx_isr(int irq
, void *dev_id
)
1073 struct net_device
*dev
= dev_id
;
1074 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1075 struct bcm_sysport_tx_ring
*txr
;
1078 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
1079 ~intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1080 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1082 if (unlikely(priv
->irq1_stat
== 0)) {
1083 netdev_warn(priv
->netdev
, "spurious TX interrupt\n");
1087 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1088 if (!(priv
->irq1_stat
& BIT(ring
)))
1091 txr
= &priv
->tx_rings
[ring
];
1093 if (likely(napi_schedule_prep(&txr
->napi
))) {
1094 intrl2_1_mask_set(priv
, BIT(ring
));
1095 __napi_schedule_irqoff(&txr
->napi
);
1102 static irqreturn_t
bcm_sysport_wol_isr(int irq
, void *dev_id
)
1104 struct bcm_sysport_priv
*priv
= dev_id
;
1106 pm_wakeup_event(&priv
->pdev
->dev
, 0);
1111 #ifdef CONFIG_NET_POLL_CONTROLLER
1112 static void bcm_sysport_poll_controller(struct net_device
*dev
)
1114 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1116 disable_irq(priv
->irq0
);
1117 bcm_sysport_rx_isr(priv
->irq0
, priv
);
1118 enable_irq(priv
->irq0
);
1120 if (!priv
->is_lite
) {
1121 disable_irq(priv
->irq1
);
1122 bcm_sysport_tx_isr(priv
->irq1
, priv
);
1123 enable_irq(priv
->irq1
);
1128 static struct sk_buff
*bcm_sysport_insert_tsb(struct sk_buff
*skb
,
1129 struct net_device
*dev
)
1131 struct sk_buff
*nskb
;
1132 struct bcm_tsb
*tsb
;
1138 /* Re-allocate SKB if needed */
1139 if (unlikely(skb_headroom(skb
) < sizeof(*tsb
))) {
1140 nskb
= skb_realloc_headroom(skb
, sizeof(*tsb
));
1143 dev
->stats
.tx_errors
++;
1144 dev
->stats
.tx_dropped
++;
1150 tsb
= skb_push(skb
, sizeof(*tsb
));
1151 /* Zero-out TSB by default */
1152 memset(tsb
, 0, sizeof(*tsb
));
1154 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1155 ip_ver
= htons(skb
->protocol
);
1158 ip_proto
= ip_hdr(skb
)->protocol
;
1161 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1167 /* Get the checksum offset and the L4 (transport) offset */
1168 csum_start
= skb_checksum_start_offset(skb
) - sizeof(*tsb
);
1169 csum_info
= (csum_start
+ skb
->csum_offset
) & L4_CSUM_PTR_MASK
;
1170 csum_info
|= (csum_start
<< L4_PTR_SHIFT
);
1172 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1173 csum_info
|= L4_LENGTH_VALID
;
1174 if (ip_proto
== IPPROTO_UDP
&& ip_ver
== ETH_P_IP
)
1175 csum_info
|= L4_UDP
;
1180 tsb
->l4_ptr_dest_map
= csum_info
;
1186 static netdev_tx_t
bcm_sysport_xmit(struct sk_buff
*skb
,
1187 struct net_device
*dev
)
1189 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1190 struct device
*kdev
= &priv
->pdev
->dev
;
1191 struct bcm_sysport_tx_ring
*ring
;
1192 struct bcm_sysport_cb
*cb
;
1193 struct netdev_queue
*txq
;
1194 struct dma_desc
*desc
;
1195 unsigned int skb_len
;
1196 unsigned long flags
;
1202 queue
= skb_get_queue_mapping(skb
);
1203 txq
= netdev_get_tx_queue(dev
, queue
);
1204 ring
= &priv
->tx_rings
[queue
];
1206 /* lock against tx reclaim in BH context and TX ring full interrupt */
1207 spin_lock_irqsave(&ring
->lock
, flags
);
1208 if (unlikely(ring
->desc_count
== 0)) {
1209 netif_tx_stop_queue(txq
);
1210 netdev_err(dev
, "queue %d awake and ring full!\n", queue
);
1211 ret
= NETDEV_TX_BUSY
;
1215 /* Insert TSB and checksum infos */
1217 skb
= bcm_sysport_insert_tsb(skb
, dev
);
1226 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
1227 if (dma_mapping_error(kdev
, mapping
)) {
1228 priv
->mib
.tx_dma_failed
++;
1229 netif_err(priv
, tx_err
, dev
, "DMA map failed at %p (len=%d)\n",
1230 skb
->data
, skb_len
);
1235 /* Remember the SKB for future freeing */
1236 cb
= &ring
->cbs
[ring
->curr_desc
];
1238 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1239 dma_unmap_len_set(cb
, dma_len
, skb_len
);
1241 /* Fetch a descriptor entry from our pool */
1242 desc
= ring
->desc_cpu
;
1244 desc
->addr_lo
= lower_32_bits(mapping
);
1245 len_status
= upper_32_bits(mapping
) & DESC_ADDR_HI_MASK
;
1246 len_status
|= (skb_len
<< DESC_LEN_SHIFT
);
1247 len_status
|= (DESC_SOP
| DESC_EOP
| TX_STATUS_APP_CRC
) <<
1249 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1250 len_status
|= (DESC_L4_CSUM
<< DESC_STATUS_SHIFT
);
1253 if (ring
->curr_desc
== ring
->size
)
1254 ring
->curr_desc
= 0;
1257 /* Ensure write completion of the descriptor status/length
1258 * in DRAM before the System Port WRITE_PORT register latches
1262 desc
->addr_status_len
= len_status
;
1265 /* Write this descriptor address to the RING write port */
1266 tdma_port_write_desc_addr(priv
, desc
, ring
->index
);
1268 /* Check ring space and update SW control flow */
1269 if (ring
->desc_count
== 0)
1270 netif_tx_stop_queue(txq
);
1272 netif_dbg(priv
, tx_queued
, dev
, "ring=%d desc_count=%d, curr_desc=%d\n",
1273 ring
->index
, ring
->desc_count
, ring
->curr_desc
);
1277 spin_unlock_irqrestore(&ring
->lock
, flags
);
1281 static void bcm_sysport_tx_timeout(struct net_device
*dev
)
1283 netdev_warn(dev
, "transmit timeout!\n");
1285 netif_trans_update(dev
);
1286 dev
->stats
.tx_errors
++;
1288 netif_tx_wake_all_queues(dev
);
1291 /* phylib adjust link callback */
1292 static void bcm_sysport_adj_link(struct net_device
*dev
)
1294 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1295 struct phy_device
*phydev
= dev
->phydev
;
1296 unsigned int changed
= 0;
1297 u32 cmd_bits
= 0, reg
;
1299 if (priv
->old_link
!= phydev
->link
) {
1301 priv
->old_link
= phydev
->link
;
1304 if (priv
->old_duplex
!= phydev
->duplex
) {
1306 priv
->old_duplex
= phydev
->duplex
;
1312 switch (phydev
->speed
) {
1314 cmd_bits
= CMD_SPEED_2500
;
1317 cmd_bits
= CMD_SPEED_1000
;
1320 cmd_bits
= CMD_SPEED_100
;
1323 cmd_bits
= CMD_SPEED_10
;
1328 cmd_bits
<<= CMD_SPEED_SHIFT
;
1330 if (phydev
->duplex
== DUPLEX_HALF
)
1331 cmd_bits
|= CMD_HD_EN
;
1333 if (priv
->old_pause
!= phydev
->pause
) {
1335 priv
->old_pause
= phydev
->pause
;
1339 cmd_bits
|= CMD_RX_PAUSE_IGNORE
| CMD_TX_PAUSE_IGNORE
;
1345 reg
= umac_readl(priv
, UMAC_CMD
);
1346 reg
&= ~((CMD_SPEED_MASK
<< CMD_SPEED_SHIFT
) |
1347 CMD_HD_EN
| CMD_RX_PAUSE_IGNORE
|
1348 CMD_TX_PAUSE_IGNORE
);
1350 umac_writel(priv
, reg
, UMAC_CMD
);
1354 phy_print_status(phydev
);
1357 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv
*priv
,
1360 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1361 struct device
*kdev
= &priv
->pdev
->dev
;
1366 /* Simple descriptors partitioning for now */
1369 /* We just need one DMA descriptor which is DMA-able, since writing to
1370 * the port will allocate a new descriptor in its internal linked-list
1372 p
= dma_zalloc_coherent(kdev
, sizeof(struct dma_desc
), &ring
->desc_dma
,
1375 netif_err(priv
, hw
, priv
->netdev
, "DMA alloc failed\n");
1379 ring
->cbs
= kcalloc(size
, sizeof(struct bcm_sysport_cb
), GFP_KERNEL
);
1381 dma_free_coherent(kdev
, sizeof(struct dma_desc
),
1382 ring
->desc_cpu
, ring
->desc_dma
);
1383 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1387 /* Initialize SW view of the ring */
1388 spin_lock_init(&ring
->lock
);
1390 netif_tx_napi_add(priv
->netdev
, &ring
->napi
, bcm_sysport_tx_poll
, 64);
1391 ring
->index
= index
;
1393 ring
->clean_index
= 0;
1394 ring
->alloc_size
= ring
->size
;
1396 ring
->desc_count
= ring
->size
;
1397 ring
->curr_desc
= 0;
1399 /* Initialize HW ring */
1400 tdma_writel(priv
, RING_EN
, TDMA_DESC_RING_HEAD_TAIL_PTR(index
));
1401 tdma_writel(priv
, 0, TDMA_DESC_RING_COUNT(index
));
1402 tdma_writel(priv
, 1, TDMA_DESC_RING_INTR_CONTROL(index
));
1403 tdma_writel(priv
, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index
));
1405 /* Configure QID and port mapping */
1406 reg
= tdma_readl(priv
, TDMA_DESC_RING_MAPPING(index
));
1407 reg
&= ~(RING_QID_MASK
| RING_PORT_ID_MASK
<< RING_PORT_ID_SHIFT
);
1408 if (ring
->inspect
) {
1409 reg
|= ring
->switch_queue
& RING_QID_MASK
;
1410 reg
|= ring
->switch_port
<< RING_PORT_ID_SHIFT
;
1412 reg
|= RING_IGNORE_STATUS
;
1414 tdma_writel(priv
, reg
, TDMA_DESC_RING_MAPPING(index
));
1415 tdma_writel(priv
, 0, TDMA_DESC_RING_PCP_DEI_VID(index
));
1417 /* Enable ACB algorithm 2 */
1418 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1419 reg
|= tdma_control_bit(priv
, ACB_ALGO
);
1420 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1422 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1423 * with the original definition of ACB_ALGO
1425 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1427 reg
&= ~BIT(TSB_SWAP1
);
1428 /* Set a correct TSB format based on host endian */
1429 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1430 reg
|= tdma_control_bit(priv
, TSB_SWAP0
);
1432 reg
&= ~tdma_control_bit(priv
, TSB_SWAP0
);
1433 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1435 /* Program the number of descriptors as MAX_THRESHOLD and half of
1436 * its size for the hysteresis trigger
1438 tdma_writel(priv
, ring
->size
|
1439 1 << RING_HYST_THRESH_SHIFT
,
1440 TDMA_DESC_RING_MAX_HYST(index
));
1442 /* Enable the ring queue in the arbiter */
1443 reg
= tdma_readl(priv
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1444 reg
|= (1 << index
);
1445 tdma_writel(priv
, reg
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1447 napi_enable(&ring
->napi
);
1449 netif_dbg(priv
, hw
, priv
->netdev
,
1450 "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
1451 ring
->size
, ring
->desc_cpu
, ring
->switch_queue
,
1457 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv
*priv
,
1460 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1461 struct device
*kdev
= &priv
->pdev
->dev
;
1464 /* Caller should stop the TDMA engine */
1465 reg
= tdma_readl(priv
, TDMA_STATUS
);
1466 if (!(reg
& TDMA_DISABLED
))
1467 netdev_warn(priv
->netdev
, "TDMA not stopped!\n");
1469 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1470 * fail, so by checking this pointer we know whether the TX ring was
1471 * fully initialized or not.
1476 napi_disable(&ring
->napi
);
1477 netif_napi_del(&ring
->napi
);
1479 bcm_sysport_tx_clean(priv
, ring
);
1484 if (ring
->desc_dma
) {
1485 dma_free_coherent(kdev
, sizeof(struct dma_desc
),
1486 ring
->desc_cpu
, ring
->desc_dma
);
1490 ring
->alloc_size
= 0;
1492 netif_dbg(priv
, hw
, priv
->netdev
, "TDMA fini done\n");
1496 static inline int rdma_enable_set(struct bcm_sysport_priv
*priv
,
1497 unsigned int enable
)
1499 unsigned int timeout
= 1000;
1502 reg
= rdma_readl(priv
, RDMA_CONTROL
);
1507 rdma_writel(priv
, reg
, RDMA_CONTROL
);
1509 /* Poll for RMDA disabling completion */
1511 reg
= rdma_readl(priv
, RDMA_STATUS
);
1512 if (!!(reg
& RDMA_DISABLED
) == !enable
)
1514 usleep_range(1000, 2000);
1515 } while (timeout
-- > 0);
1517 netdev_err(priv
->netdev
, "timeout waiting for RDMA to finish\n");
1523 static inline int tdma_enable_set(struct bcm_sysport_priv
*priv
,
1524 unsigned int enable
)
1526 unsigned int timeout
= 1000;
1529 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1531 reg
|= tdma_control_bit(priv
, TDMA_EN
);
1533 reg
&= ~tdma_control_bit(priv
, TDMA_EN
);
1534 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1536 /* Poll for TMDA disabling completion */
1538 reg
= tdma_readl(priv
, TDMA_STATUS
);
1539 if (!!(reg
& TDMA_DISABLED
) == !enable
)
1542 usleep_range(1000, 2000);
1543 } while (timeout
-- > 0);
1545 netdev_err(priv
->netdev
, "timeout waiting for TDMA to finish\n");
1550 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv
*priv
)
1552 struct bcm_sysport_cb
*cb
;
1557 /* Initialize SW view of the RX ring */
1558 priv
->num_rx_bds
= priv
->num_rx_desc_words
/ WORDS_PER_DESC
;
1559 priv
->rx_bds
= priv
->base
+ SYS_PORT_RDMA_OFFSET
;
1560 priv
->rx_c_index
= 0;
1561 priv
->rx_read_ptr
= 0;
1562 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct bcm_sysport_cb
),
1564 if (!priv
->rx_cbs
) {
1565 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1569 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1570 cb
= priv
->rx_cbs
+ i
;
1571 cb
->bd_addr
= priv
->rx_bds
+ i
* DESC_SIZE
;
1574 ret
= bcm_sysport_alloc_rx_bufs(priv
);
1576 netif_err(priv
, hw
, priv
->netdev
, "SKB allocation failed\n");
1580 /* Initialize HW, ensure RDMA is disabled */
1581 reg
= rdma_readl(priv
, RDMA_STATUS
);
1582 if (!(reg
& RDMA_DISABLED
))
1583 rdma_enable_set(priv
, 0);
1585 rdma_writel(priv
, 0, RDMA_WRITE_PTR_LO
);
1586 rdma_writel(priv
, 0, RDMA_WRITE_PTR_HI
);
1587 rdma_writel(priv
, 0, RDMA_PROD_INDEX
);
1588 rdma_writel(priv
, 0, RDMA_CONS_INDEX
);
1589 rdma_writel(priv
, priv
->num_rx_bds
<< RDMA_RING_SIZE_SHIFT
|
1590 RX_BUF_LENGTH
, RDMA_RING_BUF_SIZE
);
1591 /* Operate the queue in ring mode */
1592 rdma_writel(priv
, 0, RDMA_START_ADDR_HI
);
1593 rdma_writel(priv
, 0, RDMA_START_ADDR_LO
);
1594 rdma_writel(priv
, 0, RDMA_END_ADDR_HI
);
1595 rdma_writel(priv
, priv
->num_rx_desc_words
- 1, RDMA_END_ADDR_LO
);
1597 rdma_writel(priv
, 1, RDMA_MBDONE_INTR
);
1599 netif_dbg(priv
, hw
, priv
->netdev
,
1600 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1601 priv
->num_rx_bds
, priv
->rx_bds
);
1606 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv
*priv
)
1608 struct bcm_sysport_cb
*cb
;
1612 /* Caller should ensure RDMA is disabled */
1613 reg
= rdma_readl(priv
, RDMA_STATUS
);
1614 if (!(reg
& RDMA_DISABLED
))
1615 netdev_warn(priv
->netdev
, "RDMA not stopped!\n");
1617 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1618 cb
= &priv
->rx_cbs
[i
];
1619 if (dma_unmap_addr(cb
, dma_addr
))
1620 dma_unmap_single(&priv
->pdev
->dev
,
1621 dma_unmap_addr(cb
, dma_addr
),
1622 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
1623 bcm_sysport_free_cb(cb
);
1626 kfree(priv
->rx_cbs
);
1627 priv
->rx_cbs
= NULL
;
1629 netif_dbg(priv
, hw
, priv
->netdev
, "RDMA fini done\n");
1632 static void bcm_sysport_set_rx_mode(struct net_device
*dev
)
1634 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1640 reg
= umac_readl(priv
, UMAC_CMD
);
1641 if (dev
->flags
& IFF_PROMISC
)
1644 reg
&= ~CMD_PROMISC
;
1645 umac_writel(priv
, reg
, UMAC_CMD
);
1647 /* No support for ALLMULTI */
1648 if (dev
->flags
& IFF_ALLMULTI
)
1652 static inline void umac_enable_set(struct bcm_sysport_priv
*priv
,
1653 u32 mask
, unsigned int enable
)
1657 if (!priv
->is_lite
) {
1658 reg
= umac_readl(priv
, UMAC_CMD
);
1663 umac_writel(priv
, reg
, UMAC_CMD
);
1665 reg
= gib_readl(priv
, GIB_CONTROL
);
1670 gib_writel(priv
, reg
, GIB_CONTROL
);
1673 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1674 * to be processed (1 msec).
1677 usleep_range(1000, 2000);
1680 static inline void umac_reset(struct bcm_sysport_priv
*priv
)
1687 reg
= umac_readl(priv
, UMAC_CMD
);
1688 reg
|= CMD_SW_RESET
;
1689 umac_writel(priv
, reg
, UMAC_CMD
);
1691 reg
= umac_readl(priv
, UMAC_CMD
);
1692 reg
&= ~CMD_SW_RESET
;
1693 umac_writel(priv
, reg
, UMAC_CMD
);
1696 static void umac_set_hw_addr(struct bcm_sysport_priv
*priv
,
1697 unsigned char *addr
)
1699 u32 mac0
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) |
1701 u32 mac1
= (addr
[4] << 8) | addr
[5];
1703 if (!priv
->is_lite
) {
1704 umac_writel(priv
, mac0
, UMAC_MAC0
);
1705 umac_writel(priv
, mac1
, UMAC_MAC1
);
1707 gib_writel(priv
, mac0
, GIB_MAC0
);
1708 gib_writel(priv
, mac1
, GIB_MAC1
);
1712 static void topctrl_flush(struct bcm_sysport_priv
*priv
)
1714 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
1715 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
1717 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
1718 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
1721 static int bcm_sysport_change_mac(struct net_device
*dev
, void *p
)
1723 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1724 struct sockaddr
*addr
= p
;
1726 if (!is_valid_ether_addr(addr
->sa_data
))
1729 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1731 /* interface is disabled, changes to MAC will be reflected on next
1734 if (!netif_running(dev
))
1737 umac_set_hw_addr(priv
, dev
->dev_addr
);
1742 static void bcm_sysport_get_stats64(struct net_device
*dev
,
1743 struct rtnl_link_stats64
*stats
)
1745 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1746 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
1749 netdev_stats_to_stats64(stats
, &dev
->stats
);
1751 bcm_sysport_update_tx_stats(priv
, &stats
->tx_bytes
,
1752 &stats
->tx_packets
);
1755 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
1756 stats
->rx_packets
= stats64
->rx_packets
;
1757 stats
->rx_bytes
= stats64
->rx_bytes
;
1758 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
1761 static void bcm_sysport_netif_start(struct net_device
*dev
)
1763 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1766 napi_enable(&priv
->napi
);
1768 /* Enable RX interrupt and TX ring full interrupt */
1769 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1771 phy_start(dev
->phydev
);
1773 /* Enable TX interrupts for the TXQs */
1775 intrl2_1_mask_clear(priv
, 0xffffffff);
1777 intrl2_0_mask_clear(priv
, INTRL2_0_TDMA_MBDONE_MASK
);
1779 /* Last call before we start the real business */
1780 netif_tx_start_all_queues(dev
);
1783 static void rbuf_init(struct bcm_sysport_priv
*priv
)
1787 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1788 reg
|= RBUF_4B_ALGN
| RBUF_RSB_EN
;
1789 /* Set a correct RSB format on SYSTEMPORT Lite */
1791 reg
&= ~RBUF_RSB_SWAP1
;
1793 /* Set a correct RSB format based on host endian */
1794 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1795 reg
|= RBUF_RSB_SWAP0
;
1797 reg
&= ~RBUF_RSB_SWAP0
;
1798 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1801 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv
*priv
)
1803 intrl2_0_mask_set(priv
, 0xffffffff);
1804 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1805 if (!priv
->is_lite
) {
1806 intrl2_1_mask_set(priv
, 0xffffffff);
1807 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1811 static inline void gib_set_pad_extension(struct bcm_sysport_priv
*priv
)
1815 reg
= gib_readl(priv
, GIB_CONTROL
);
1816 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1817 if (netdev_uses_dsa(priv
->netdev
)) {
1818 reg
&= ~(GIB_PAD_EXTENSION_MASK
<< GIB_PAD_EXTENSION_SHIFT
);
1819 reg
|= ENET_BRCM_TAG_LEN
<< GIB_PAD_EXTENSION_SHIFT
;
1821 reg
&= ~(GIB_IPG_LEN_MASK
<< GIB_IPG_LEN_SHIFT
);
1822 reg
|= 12 << GIB_IPG_LEN_SHIFT
;
1823 gib_writel(priv
, reg
, GIB_CONTROL
);
1826 static int bcm_sysport_open(struct net_device
*dev
)
1828 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1829 struct phy_device
*phydev
;
1836 /* Flush TX and RX FIFOs at TOPCTRL level */
1837 topctrl_flush(priv
);
1839 /* Disable the UniMAC RX/TX */
1840 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 0);
1842 /* Enable RBUF 2bytes alignment and Receive Status Block */
1845 /* Set maximum frame length */
1847 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1849 gib_set_pad_extension(priv
);
1851 /* Set MAC address */
1852 umac_set_hw_addr(priv
, dev
->dev_addr
);
1854 /* Read CRC forward */
1856 priv
->crc_fwd
= !!(umac_readl(priv
, UMAC_CMD
) & CMD_CRC_FWD
);
1858 priv
->crc_fwd
= !!(gib_readl(priv
, GIB_CONTROL
) &
1861 phydev
= of_phy_connect(dev
, priv
->phy_dn
, bcm_sysport_adj_link
,
1862 0, priv
->phy_interface
);
1864 netdev_err(dev
, "could not attach to PHY\n");
1868 /* Reset house keeping link status */
1869 priv
->old_duplex
= -1;
1870 priv
->old_link
= -1;
1871 priv
->old_pause
= -1;
1873 /* mask all interrupts and request them */
1874 bcm_sysport_mask_all_intrs(priv
);
1876 ret
= request_irq(priv
->irq0
, bcm_sysport_rx_isr
, 0, dev
->name
, dev
);
1878 netdev_err(dev
, "failed to request RX interrupt\n");
1879 goto out_phy_disconnect
;
1882 if (!priv
->is_lite
) {
1883 ret
= request_irq(priv
->irq1
, bcm_sysport_tx_isr
, 0,
1886 netdev_err(dev
, "failed to request TX interrupt\n");
1891 /* Initialize both hardware and software ring */
1892 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1893 ret
= bcm_sysport_init_tx_ring(priv
, i
);
1895 netdev_err(dev
, "failed to initialize TX ring %d\n",
1897 goto out_free_tx_ring
;
1901 /* Initialize linked-list */
1902 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
1904 /* Initialize RX ring */
1905 ret
= bcm_sysport_init_rx_ring(priv
);
1907 netdev_err(dev
, "failed to initialize RX ring\n");
1908 goto out_free_rx_ring
;
1912 ret
= rdma_enable_set(priv
, 1);
1914 goto out_free_rx_ring
;
1917 ret
= tdma_enable_set(priv
, 1);
1919 goto out_clear_rx_int
;
1921 /* Turn on UniMAC TX/RX */
1922 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 1);
1924 bcm_sysport_netif_start(dev
);
1929 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1931 bcm_sysport_fini_rx_ring(priv
);
1933 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1934 bcm_sysport_fini_tx_ring(priv
, i
);
1936 free_irq(priv
->irq1
, dev
);
1938 free_irq(priv
->irq0
, dev
);
1940 phy_disconnect(phydev
);
1944 static void bcm_sysport_netif_stop(struct net_device
*dev
)
1946 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1948 /* stop all software from updating hardware */
1949 netif_tx_stop_all_queues(dev
);
1950 napi_disable(&priv
->napi
);
1951 phy_stop(dev
->phydev
);
1953 /* mask all interrupts */
1954 bcm_sysport_mask_all_intrs(priv
);
1957 static int bcm_sysport_stop(struct net_device
*dev
)
1959 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1963 bcm_sysport_netif_stop(dev
);
1965 /* Disable UniMAC RX */
1966 umac_enable_set(priv
, CMD_RX_EN
, 0);
1968 ret
= tdma_enable_set(priv
, 0);
1970 netdev_err(dev
, "timeout disabling RDMA\n");
1974 /* Wait for a maximum packet size to be drained */
1975 usleep_range(2000, 3000);
1977 ret
= rdma_enable_set(priv
, 0);
1979 netdev_err(dev
, "timeout disabling TDMA\n");
1983 /* Disable UniMAC TX */
1984 umac_enable_set(priv
, CMD_TX_EN
, 0);
1986 /* Free RX/TX rings SW structures */
1987 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
1988 bcm_sysport_fini_tx_ring(priv
, i
);
1989 bcm_sysport_fini_rx_ring(priv
);
1991 free_irq(priv
->irq0
, dev
);
1993 free_irq(priv
->irq1
, dev
);
1995 /* Disconnect from PHY */
1996 phy_disconnect(dev
->phydev
);
2001 static const struct ethtool_ops bcm_sysport_ethtool_ops
= {
2002 .get_drvinfo
= bcm_sysport_get_drvinfo
,
2003 .get_msglevel
= bcm_sysport_get_msglvl
,
2004 .set_msglevel
= bcm_sysport_set_msglvl
,
2005 .get_link
= ethtool_op_get_link
,
2006 .get_strings
= bcm_sysport_get_strings
,
2007 .get_ethtool_stats
= bcm_sysport_get_stats
,
2008 .get_sset_count
= bcm_sysport_get_sset_count
,
2009 .get_wol
= bcm_sysport_get_wol
,
2010 .set_wol
= bcm_sysport_set_wol
,
2011 .get_coalesce
= bcm_sysport_get_coalesce
,
2012 .set_coalesce
= bcm_sysport_set_coalesce
,
2013 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2014 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2017 static u16
bcm_sysport_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2019 select_queue_fallback_t fallback
)
2021 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2022 u16 queue
= skb_get_queue_mapping(skb
);
2023 struct bcm_sysport_tx_ring
*tx_ring
;
2024 unsigned int q
, port
;
2026 if (!netdev_uses_dsa(dev
))
2027 return fallback(dev
, skb
);
2029 /* DSA tagging layer will have configured the correct queue */
2030 q
= BRCM_TAG_GET_QUEUE(queue
);
2031 port
= BRCM_TAG_GET_PORT(queue
);
2032 tx_ring
= priv
->ring_map
[q
+ port
* priv
->per_port_num_tx_queues
];
2034 if (unlikely(!tx_ring
))
2035 return fallback(dev
, skb
);
2037 return tx_ring
->index
;
2040 static const struct net_device_ops bcm_sysport_netdev_ops
= {
2041 .ndo_start_xmit
= bcm_sysport_xmit
,
2042 .ndo_tx_timeout
= bcm_sysport_tx_timeout
,
2043 .ndo_open
= bcm_sysport_open
,
2044 .ndo_stop
= bcm_sysport_stop
,
2045 .ndo_set_features
= bcm_sysport_set_features
,
2046 .ndo_set_rx_mode
= bcm_sysport_set_rx_mode
,
2047 .ndo_set_mac_address
= bcm_sysport_change_mac
,
2048 #ifdef CONFIG_NET_POLL_CONTROLLER
2049 .ndo_poll_controller
= bcm_sysport_poll_controller
,
2051 .ndo_get_stats64
= bcm_sysport_get_stats64
,
2052 .ndo_select_queue
= bcm_sysport_select_queue
,
2055 static int bcm_sysport_map_queues(struct net_device
*dev
,
2056 struct dsa_notifier_register_info
*info
)
2058 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2059 struct bcm_sysport_tx_ring
*ring
;
2060 struct net_device
*slave_dev
;
2061 unsigned int num_tx_queues
;
2062 unsigned int q
, start
, port
;
2064 /* We can't be setting up queue inspection for non directly attached
2067 if (info
->switch_number
)
2070 if (dev
->netdev_ops
!= &bcm_sysport_netdev_ops
)
2073 port
= info
->port_number
;
2074 slave_dev
= info
->info
.dev
;
2076 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2077 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2078 * per-port (slave_dev) network devices queue, we achieve just that.
2079 * This need to happen now before any slave network device is used such
2080 * it accurately reflects the number of real TX queues.
2083 netif_set_real_num_tx_queues(slave_dev
,
2084 slave_dev
->num_tx_queues
/ 2);
2085 num_tx_queues
= slave_dev
->real_num_tx_queues
;
2087 if (priv
->per_port_num_tx_queues
&&
2088 priv
->per_port_num_tx_queues
!= num_tx_queues
)
2089 netdev_warn(slave_dev
, "asymetric number of per-port queues\n");
2091 priv
->per_port_num_tx_queues
= num_tx_queues
;
2093 start
= find_first_zero_bit(&priv
->queue_bitmap
, dev
->num_tx_queues
);
2094 for (q
= 0; q
< num_tx_queues
; q
++) {
2095 ring
= &priv
->tx_rings
[q
+ start
];
2097 /* Just remember the mapping actual programming done
2098 * during bcm_sysport_init_tx_ring
2100 ring
->switch_queue
= q
;
2101 ring
->switch_port
= port
;
2102 ring
->inspect
= true;
2103 priv
->ring_map
[q
+ port
* num_tx_queues
] = ring
;
2105 /* Set all queues as being used now */
2106 set_bit(q
+ start
, &priv
->queue_bitmap
);
2112 static int bcm_sysport_dsa_notifier(struct notifier_block
*unused
,
2113 unsigned long event
, void *ptr
)
2115 struct dsa_notifier_register_info
*info
;
2117 if (event
!= DSA_PORT_REGISTER
)
2122 return notifier_from_errno(bcm_sysport_map_queues(info
->master
, info
));
2125 #define REV_FMT "v%2x.%02x"
2127 static const struct bcm_sysport_hw_params bcm_sysport_params
[] = {
2130 .num_rx_desc_words
= SP_NUM_HW_RX_DESC_WORDS
,
2132 [SYSTEMPORT_LITE
] = {
2134 .num_rx_desc_words
= SP_LT_NUM_HW_RX_DESC_WORDS
,
2138 static const struct of_device_id bcm_sysport_of_match
[] = {
2139 { .compatible
= "brcm,systemportlite-v1.00",
2140 .data
= &bcm_sysport_params
[SYSTEMPORT_LITE
] },
2141 { .compatible
= "brcm,systemport-v1.00",
2142 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2143 { .compatible
= "brcm,systemport",
2144 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2147 MODULE_DEVICE_TABLE(of
, bcm_sysport_of_match
);
2149 static int bcm_sysport_probe(struct platform_device
*pdev
)
2151 const struct bcm_sysport_hw_params
*params
;
2152 const struct of_device_id
*of_id
= NULL
;
2153 struct bcm_sysport_priv
*priv
;
2154 struct device_node
*dn
;
2155 struct net_device
*dev
;
2156 const void *macaddr
;
2161 dn
= pdev
->dev
.of_node
;
2162 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2163 of_id
= of_match_node(bcm_sysport_of_match
, dn
);
2164 if (!of_id
|| !of_id
->data
)
2167 /* Fairly quickly we need to know the type of adapter we have */
2168 params
= of_id
->data
;
2170 /* Read the Transmit/Receive Queue properties */
2171 if (of_property_read_u32(dn
, "systemport,num-txq", &txq
))
2172 txq
= TDMA_NUM_RINGS
;
2173 if (of_property_read_u32(dn
, "systemport,num-rxq", &rxq
))
2176 /* Sanity check the number of transmit queues */
2177 if (!txq
|| txq
> TDMA_NUM_RINGS
)
2180 dev
= alloc_etherdev_mqs(sizeof(*priv
), txq
, rxq
);
2184 /* Initialize private members */
2185 priv
= netdev_priv(dev
);
2187 /* Allocate number of TX rings */
2188 priv
->tx_rings
= devm_kcalloc(&pdev
->dev
, txq
,
2189 sizeof(struct bcm_sysport_tx_ring
),
2191 if (!priv
->tx_rings
)
2194 priv
->is_lite
= params
->is_lite
;
2195 priv
->num_rx_desc_words
= params
->num_rx_desc_words
;
2197 priv
->irq0
= platform_get_irq(pdev
, 0);
2198 if (!priv
->is_lite
) {
2199 priv
->irq1
= platform_get_irq(pdev
, 1);
2200 priv
->wol_irq
= platform_get_irq(pdev
, 2);
2202 priv
->wol_irq
= platform_get_irq(pdev
, 1);
2204 if (priv
->irq0
<= 0 || (priv
->irq1
<= 0 && !priv
->is_lite
)) {
2205 dev_err(&pdev
->dev
, "invalid interrupts\n");
2207 goto err_free_netdev
;
2210 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
2211 if (IS_ERR(priv
->base
)) {
2212 ret
= PTR_ERR(priv
->base
);
2213 goto err_free_netdev
;
2219 priv
->phy_interface
= of_get_phy_mode(dn
);
2220 /* Default to GMII interface mode */
2221 if (priv
->phy_interface
< 0)
2222 priv
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
2224 /* In the case of a fixed PHY, the DT node associated
2225 * to the PHY is the Ethernet MAC DT node.
2227 if (of_phy_is_fixed_link(dn
)) {
2228 ret
= of_phy_register_fixed_link(dn
);
2230 dev_err(&pdev
->dev
, "failed to register fixed PHY\n");
2231 goto err_free_netdev
;
2237 /* Initialize netdevice members */
2238 macaddr
= of_get_mac_address(dn
);
2239 if (!macaddr
|| !is_valid_ether_addr(macaddr
)) {
2240 dev_warn(&pdev
->dev
, "using random Ethernet MAC\n");
2241 eth_hw_addr_random(dev
);
2243 ether_addr_copy(dev
->dev_addr
, macaddr
);
2246 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2247 dev_set_drvdata(&pdev
->dev
, dev
);
2248 dev
->ethtool_ops
= &bcm_sysport_ethtool_ops
;
2249 dev
->netdev_ops
= &bcm_sysport_netdev_ops
;
2250 netif_napi_add(dev
, &priv
->napi
, bcm_sysport_poll
, 64);
2252 /* HW supported features, none enabled by default */
2253 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
|
2254 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2256 /* Request the WOL interrupt and advertise suspend if available */
2257 priv
->wol_irq_disabled
= 1;
2258 ret
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
,
2259 bcm_sysport_wol_isr
, 0, dev
->name
, priv
);
2261 device_set_wakeup_capable(&pdev
->dev
, 1);
2263 /* Set the needed headroom once and for all */
2264 BUILD_BUG_ON(sizeof(struct bcm_tsb
) != 8);
2265 dev
->needed_headroom
+= sizeof(struct bcm_tsb
);
2267 /* libphy will adjust the link state accordingly */
2268 netif_carrier_off(dev
);
2270 u64_stats_init(&priv
->syncp
);
2272 priv
->dsa_notifier
.notifier_call
= bcm_sysport_dsa_notifier
;
2274 ret
= register_dsa_notifier(&priv
->dsa_notifier
);
2276 dev_err(&pdev
->dev
, "failed to register DSA notifier\n");
2277 goto err_deregister_fixed_link
;
2280 ret
= register_netdev(dev
);
2282 dev_err(&pdev
->dev
, "failed to register net_device\n");
2283 goto err_deregister_notifier
;
2286 priv
->rev
= topctrl_readl(priv
, REV_CNTL
) & REV_MASK
;
2287 dev_info(&pdev
->dev
,
2288 "Broadcom SYSTEMPORT%s" REV_FMT
2289 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2290 priv
->is_lite
? " Lite" : "",
2291 (priv
->rev
>> 8) & 0xff, priv
->rev
& 0xff,
2292 priv
->base
, priv
->irq0
, priv
->irq1
, txq
, rxq
);
2296 err_deregister_notifier
:
2297 unregister_dsa_notifier(&priv
->dsa_notifier
);
2298 err_deregister_fixed_link
:
2299 if (of_phy_is_fixed_link(dn
))
2300 of_phy_deregister_fixed_link(dn
);
2306 static int bcm_sysport_remove(struct platform_device
*pdev
)
2308 struct net_device
*dev
= dev_get_drvdata(&pdev
->dev
);
2309 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2310 struct device_node
*dn
= pdev
->dev
.of_node
;
2312 /* Not much to do, ndo_close has been called
2313 * and we use managed allocations
2315 unregister_dsa_notifier(&priv
->dsa_notifier
);
2316 unregister_netdev(dev
);
2317 if (of_phy_is_fixed_link(dn
))
2318 of_phy_deregister_fixed_link(dn
);
2320 dev_set_drvdata(&pdev
->dev
, NULL
);
2325 #ifdef CONFIG_PM_SLEEP
2326 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv
*priv
)
2328 struct net_device
*ndev
= priv
->netdev
;
2329 unsigned int timeout
= 1000;
2332 /* Password has already been programmed */
2333 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
2336 if (priv
->wolopts
& WAKE_MAGICSECURE
)
2338 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
2340 /* Make sure RBUF entered WoL mode as result */
2342 reg
= rbuf_readl(priv
, RBUF_STATUS
);
2343 if (reg
& RBUF_WOL_MODE
)
2347 } while (timeout
-- > 0);
2349 /* Do not leave the UniMAC RBUF matching only MPD packets */
2351 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
2353 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
2354 netif_err(priv
, wol
, ndev
, "failed to enter WOL mode\n");
2358 /* UniMAC receive needs to be turned on */
2359 umac_enable_set(priv
, CMD_RX_EN
, 1);
2361 /* Enable the interrupt wake-up source */
2362 intrl2_0_mask_clear(priv
, INTRL2_0_MPD
);
2364 netif_dbg(priv
, wol
, ndev
, "entered WOL mode\n");
2369 static int bcm_sysport_suspend(struct device
*d
)
2371 struct net_device
*dev
= dev_get_drvdata(d
);
2372 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2377 if (!netif_running(dev
))
2380 bcm_sysport_netif_stop(dev
);
2382 phy_suspend(dev
->phydev
);
2384 netif_device_detach(dev
);
2386 /* Disable UniMAC RX */
2387 umac_enable_set(priv
, CMD_RX_EN
, 0);
2389 ret
= rdma_enable_set(priv
, 0);
2391 netdev_err(dev
, "RDMA timeout!\n");
2395 /* Disable RXCHK if enabled */
2396 if (priv
->rx_chk_en
) {
2397 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2399 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2404 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
2406 ret
= tdma_enable_set(priv
, 0);
2408 netdev_err(dev
, "TDMA timeout!\n");
2412 /* Wait for a packet boundary */
2413 usleep_range(2000, 3000);
2415 umac_enable_set(priv
, CMD_TX_EN
, 0);
2417 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
2419 /* Free RX/TX rings SW structures */
2420 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2421 bcm_sysport_fini_tx_ring(priv
, i
);
2422 bcm_sysport_fini_rx_ring(priv
);
2424 /* Get prepared for Wake-on-LAN */
2425 if (device_may_wakeup(d
) && priv
->wolopts
)
2426 ret
= bcm_sysport_suspend_to_wol(priv
);
2431 static int bcm_sysport_resume(struct device
*d
)
2433 struct net_device
*dev
= dev_get_drvdata(d
);
2434 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2439 if (!netif_running(dev
))
2444 /* We may have been suspended and never received a WOL event that
2445 * would turn off MPD detection, take care of that now
2447 bcm_sysport_resume_from_wol(priv
);
2449 /* Initialize both hardware and software ring */
2450 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2451 ret
= bcm_sysport_init_tx_ring(priv
, i
);
2453 netdev_err(dev
, "failed to initialize TX ring %d\n",
2455 goto out_free_tx_rings
;
2459 /* Initialize linked-list */
2460 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
2462 /* Initialize RX ring */
2463 ret
= bcm_sysport_init_rx_ring(priv
);
2465 netdev_err(dev
, "failed to initialize RX ring\n");
2466 goto out_free_rx_ring
;
2469 netif_device_attach(dev
);
2471 /* RX pipe enable */
2472 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
2474 ret
= rdma_enable_set(priv
, 1);
2476 netdev_err(dev
, "failed to enable RDMA\n");
2477 goto out_free_rx_ring
;
2481 if (priv
->rx_chk_en
) {
2482 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2484 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2489 /* Set maximum frame length */
2491 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
2493 gib_set_pad_extension(priv
);
2495 /* Set MAC address */
2496 umac_set_hw_addr(priv
, dev
->dev_addr
);
2498 umac_enable_set(priv
, CMD_RX_EN
, 1);
2500 /* TX pipe enable */
2501 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
2503 umac_enable_set(priv
, CMD_TX_EN
, 1);
2505 ret
= tdma_enable_set(priv
, 1);
2507 netdev_err(dev
, "TDMA timeout!\n");
2508 goto out_free_rx_ring
;
2511 phy_resume(dev
->phydev
);
2513 bcm_sysport_netif_start(dev
);
2518 bcm_sysport_fini_rx_ring(priv
);
2520 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2521 bcm_sysport_fini_tx_ring(priv
, i
);
2526 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops
,
2527 bcm_sysport_suspend
, bcm_sysport_resume
);
2529 static struct platform_driver bcm_sysport_driver
= {
2530 .probe
= bcm_sysport_probe
,
2531 .remove
= bcm_sysport_remove
,
2533 .name
= "brcm-systemport",
2534 .of_match_table
= bcm_sysport_of_match
,
2535 .pm
= &bcm_sysport_pm_ops
,
2538 module_platform_driver(bcm_sysport_driver
);
2540 MODULE_AUTHOR("Broadcom Corporation");
2541 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2542 MODULE_ALIAS("platform:brcm-systemport");
2543 MODULE_LICENSE("GPL");