2 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 * Copyright (C) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/platform_device.h>
21 #include <linux/of_net.h>
22 #include <linux/of_mdio.h>
23 #include <linux/phy.h>
24 #include <linux/phy_fixed.h>
29 #include "bcmsysport.h"
31 /* I/O accessors register helpers */
32 #define BCM_SYSPORT_IO_MACRO(name, offset) \
33 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
35 u32 reg = readl_relaxed(priv->base + offset + off); \
38 static inline void name##_writel(struct bcm_sysport_priv *priv, \
41 writel_relaxed(val, priv->base + offset + off); \
44 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45 BCM_SYSPORT_IO_MACRO(intrl2_1
, SYS_PORT_INTRL2_1_OFFSET
);
46 BCM_SYSPORT_IO_MACRO(umac
, SYS_PORT_UMAC_OFFSET
);
47 BCM_SYSPORT_IO_MACRO(gib
, SYS_PORT_GIB_OFFSET
);
48 BCM_SYSPORT_IO_MACRO(tdma
, SYS_PORT_TDMA_OFFSET
);
49 BCM_SYSPORT_IO_MACRO(rxchk
, SYS_PORT_RXCHK_OFFSET
);
50 BCM_SYSPORT_IO_MACRO(txchk
, SYS_PORT_TXCHK_OFFSET
);
51 BCM_SYSPORT_IO_MACRO(rbuf
, SYS_PORT_RBUF_OFFSET
);
52 BCM_SYSPORT_IO_MACRO(tbuf
, SYS_PORT_TBUF_OFFSET
);
53 BCM_SYSPORT_IO_MACRO(topctrl
, SYS_PORT_TOPCTRL_OFFSET
);
55 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56 * same layout, except it has been moved by 4 bytes up, *sigh*
58 static inline u32
rdma_readl(struct bcm_sysport_priv
*priv
, u32 off
)
60 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
62 return readl_relaxed(priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
65 static inline void rdma_writel(struct bcm_sysport_priv
*priv
, u32 val
, u32 off
)
67 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
69 writel_relaxed(val
, priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
72 static inline u32
tdma_control_bit(struct bcm_sysport_priv
*priv
, u32 bit
)
84 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
87 #define BCM_SYSPORT_INTR_L2(which) \
88 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
91 priv->irq##which##_mask &= ~(mask); \
92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
94 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
101 BCM_SYSPORT_INTR_L2(0)
102 BCM_SYSPORT_INTR_L2(1)
104 /* Register accesses to GISB/RBUS registers are expensive (few hundred
105 * nanoseconds), so keep the check for 64-bits explicit here to save
106 * one register write per-packet on 32-bits platforms.
108 static inline void dma_desc_set_addr(struct bcm_sysport_priv
*priv
,
112 #ifdef CONFIG_PHYS_ADDR_T_64BIT
113 writel_relaxed(upper_32_bits(addr
) & DESC_ADDR_HI_MASK
,
114 d
+ DESC_ADDR_HI_STATUS_LEN
);
116 writel_relaxed(lower_32_bits(addr
), d
+ DESC_ADDR_LO
);
119 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv
*priv
,
120 struct dma_desc
*desc
,
123 /* Ports are latched, so write upper address first */
124 tdma_writel(priv
, desc
->addr_status_len
, TDMA_WRITE_PORT_HI(port
));
125 tdma_writel(priv
, desc
->addr_lo
, TDMA_WRITE_PORT_LO(port
));
128 /* Ethtool operations */
129 static void bcm_sysport_set_rx_csum(struct net_device
*dev
,
130 netdev_features_t wanted
)
132 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
135 priv
->rx_chk_en
= !!(wanted
& NETIF_F_RXCSUM
);
136 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
142 /* If UniMAC forwards CRC, we need to skip over it to get
143 * a valid CHK bit to be set in the per-packet status word
145 if (priv
->rx_chk_en
&& priv
->crc_fwd
)
146 reg
|= RXCHK_SKIP_FCS
;
148 reg
&= ~RXCHK_SKIP_FCS
;
150 /* If Broadcom tags are enabled (e.g: using a switch), make
151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 * tag after the Ethernet MAC Source Address.
154 if (netdev_uses_dsa(dev
))
155 reg
|= RXCHK_BRCM_TAG_EN
;
157 reg
&= ~RXCHK_BRCM_TAG_EN
;
159 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
162 static void bcm_sysport_set_tx_csum(struct net_device
*dev
,
163 netdev_features_t wanted
)
165 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
168 /* Hardware transmit checksum requires us to enable the Transmit status
169 * block prepended to the packet contents
171 priv
->tsb_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
172 reg
= tdma_readl(priv
, TDMA_CONTROL
);
174 reg
|= tdma_control_bit(priv
, TSB_EN
);
176 reg
&= ~tdma_control_bit(priv
, TSB_EN
);
177 tdma_writel(priv
, reg
, TDMA_CONTROL
);
180 static int bcm_sysport_set_features(struct net_device
*dev
,
181 netdev_features_t features
)
183 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
185 /* Read CRC forward */
187 priv
->crc_fwd
= !!(umac_readl(priv
, UMAC_CMD
) & CMD_CRC_FWD
);
189 priv
->crc_fwd
= !((gib_readl(priv
, GIB_CONTROL
) &
190 GIB_FCS_STRIP
) >> GIB_FCS_STRIP_SHIFT
);
192 bcm_sysport_set_rx_csum(dev
, features
);
193 bcm_sysport_set_tx_csum(dev
, features
);
198 /* Hardware counters must be kept in sync because the order/offset
199 * is important here (order in structure declaration = order in hardware)
201 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats
[] = {
203 STAT_NETDEV64(rx_packets
),
204 STAT_NETDEV64(tx_packets
),
205 STAT_NETDEV64(rx_bytes
),
206 STAT_NETDEV64(tx_bytes
),
207 STAT_NETDEV(rx_errors
),
208 STAT_NETDEV(tx_errors
),
209 STAT_NETDEV(rx_dropped
),
210 STAT_NETDEV(tx_dropped
),
211 STAT_NETDEV(multicast
),
212 /* UniMAC RSV counters */
213 STAT_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
214 STAT_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
215 STAT_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
216 STAT_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
217 STAT_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
218 STAT_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
219 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
220 STAT_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
221 STAT_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
222 STAT_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
223 STAT_MIB_RX("rx_pkts", mib
.rx
.pkt
),
224 STAT_MIB_RX("rx_bytes", mib
.rx
.bytes
),
225 STAT_MIB_RX("rx_multicast", mib
.rx
.mca
),
226 STAT_MIB_RX("rx_broadcast", mib
.rx
.bca
),
227 STAT_MIB_RX("rx_fcs", mib
.rx
.fcs
),
228 STAT_MIB_RX("rx_control", mib
.rx
.cf
),
229 STAT_MIB_RX("rx_pause", mib
.rx
.pf
),
230 STAT_MIB_RX("rx_unknown", mib
.rx
.uo
),
231 STAT_MIB_RX("rx_align", mib
.rx
.aln
),
232 STAT_MIB_RX("rx_outrange", mib
.rx
.flr
),
233 STAT_MIB_RX("rx_code", mib
.rx
.cde
),
234 STAT_MIB_RX("rx_carrier", mib
.rx
.fcr
),
235 STAT_MIB_RX("rx_oversize", mib
.rx
.ovr
),
236 STAT_MIB_RX("rx_jabber", mib
.rx
.jbr
),
237 STAT_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
238 STAT_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
239 STAT_MIB_RX("rx_unicast", mib
.rx
.uc
),
240 STAT_MIB_RX("rx_ppp", mib
.rx
.ppp
),
241 STAT_MIB_RX("rx_crc", mib
.rx
.rcrc
),
242 /* UniMAC TSV counters */
243 STAT_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
244 STAT_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
245 STAT_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
246 STAT_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
247 STAT_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
248 STAT_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
249 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
250 STAT_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
251 STAT_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
252 STAT_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
253 STAT_MIB_TX("tx_pkts", mib
.tx
.pkts
),
254 STAT_MIB_TX("tx_multicast", mib
.tx
.mca
),
255 STAT_MIB_TX("tx_broadcast", mib
.tx
.bca
),
256 STAT_MIB_TX("tx_pause", mib
.tx
.pf
),
257 STAT_MIB_TX("tx_control", mib
.tx
.cf
),
258 STAT_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
259 STAT_MIB_TX("tx_oversize", mib
.tx
.ovr
),
260 STAT_MIB_TX("tx_defer", mib
.tx
.drf
),
261 STAT_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
262 STAT_MIB_TX("tx_single_col", mib
.tx
.scl
),
263 STAT_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
264 STAT_MIB_TX("tx_late_col", mib
.tx
.lcl
),
265 STAT_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
266 STAT_MIB_TX("tx_frags", mib
.tx
.frg
),
267 STAT_MIB_TX("tx_total_col", mib
.tx
.ncl
),
268 STAT_MIB_TX("tx_jabber", mib
.tx
.jbr
),
269 STAT_MIB_TX("tx_bytes", mib
.tx
.bytes
),
270 STAT_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
271 STAT_MIB_TX("tx_unicast", mib
.tx
.uc
),
272 /* UniMAC RUNT counters */
273 STAT_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
274 STAT_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
275 STAT_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
276 STAT_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
277 /* RXCHK misc statistics */
278 STAT_RXCHK("rxchk_bad_csum", mib
.rxchk_bad_csum
, RXCHK_BAD_CSUM_CNTR
),
279 STAT_RXCHK("rxchk_other_pkt_disc", mib
.rxchk_other_pkt_disc
,
280 RXCHK_OTHER_DISC_CNTR
),
281 /* RBUF misc statistics */
282 STAT_RBUF("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
, RBUF_OVFL_DISC_CNTR
),
283 STAT_RBUF("rbuf_err_cnt", mib
.rbuf_err_cnt
, RBUF_ERR_PKT_CNTR
),
284 STAT_MIB_SOFT("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
285 STAT_MIB_SOFT("rx_dma_failed", mib
.rx_dma_failed
),
286 STAT_MIB_SOFT("tx_dma_failed", mib
.tx_dma_failed
),
287 STAT_MIB_SOFT("tx_realloc_tsb", mib
.tx_realloc_tsb
),
288 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib
.tx_realloc_tsb_failed
),
289 /* Per TX-queue statistics are dynamically appended */
292 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
294 static void bcm_sysport_get_drvinfo(struct net_device
*dev
,
295 struct ethtool_drvinfo
*info
)
297 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
298 strlcpy(info
->version
, "0.1", sizeof(info
->version
));
299 strlcpy(info
->bus_info
, "platform", sizeof(info
->bus_info
));
302 static u32
bcm_sysport_get_msglvl(struct net_device
*dev
)
304 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
306 return priv
->msg_enable
;
309 static void bcm_sysport_set_msglvl(struct net_device
*dev
, u32 enable
)
311 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
313 priv
->msg_enable
= enable
;
316 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type
)
319 case BCM_SYSPORT_STAT_NETDEV
:
320 case BCM_SYSPORT_STAT_NETDEV64
:
321 case BCM_SYSPORT_STAT_RXCHK
:
322 case BCM_SYSPORT_STAT_RBUF
:
323 case BCM_SYSPORT_STAT_SOFT
:
330 static int bcm_sysport_get_sset_count(struct net_device
*dev
, int string_set
)
332 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
333 const struct bcm_sysport_stats
*s
;
336 switch (string_set
) {
338 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
339 s
= &bcm_sysport_gstrings_stats
[i
];
341 !bcm_sysport_lite_stat_valid(s
->type
))
345 /* Include per-queue statistics */
346 return j
+ dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
352 static void bcm_sysport_get_strings(struct net_device
*dev
,
353 u32 stringset
, u8
*data
)
355 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
356 const struct bcm_sysport_stats
*s
;
362 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
363 s
= &bcm_sysport_gstrings_stats
[i
];
365 !bcm_sysport_lite_stat_valid(s
->type
))
368 memcpy(data
+ j
* ETH_GSTRING_LEN
, s
->stat_string
,
373 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
374 snprintf(buf
, sizeof(buf
), "txq%d_packets", i
);
375 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
379 snprintf(buf
, sizeof(buf
), "txq%d_bytes", i
);
380 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
390 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv
*priv
)
394 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
395 const struct bcm_sysport_stats
*s
;
400 s
= &bcm_sysport_gstrings_stats
[i
];
402 case BCM_SYSPORT_STAT_NETDEV
:
403 case BCM_SYSPORT_STAT_NETDEV64
:
404 case BCM_SYSPORT_STAT_SOFT
:
406 case BCM_SYSPORT_STAT_MIB_RX
:
407 case BCM_SYSPORT_STAT_MIB_TX
:
408 case BCM_SYSPORT_STAT_RUNT
:
412 if (s
->type
!= BCM_SYSPORT_STAT_MIB_RX
)
413 offset
= UMAC_MIB_STAT_OFFSET
;
414 val
= umac_readl(priv
, UMAC_MIB_START
+ j
+ offset
);
416 case BCM_SYSPORT_STAT_RXCHK
:
417 val
= rxchk_readl(priv
, s
->reg_offset
);
419 rxchk_writel(priv
, 0, s
->reg_offset
);
421 case BCM_SYSPORT_STAT_RBUF
:
422 val
= rbuf_readl(priv
, s
->reg_offset
);
424 rbuf_writel(priv
, 0, s
->reg_offset
);
429 p
= (char *)priv
+ s
->stat_offset
;
433 netif_dbg(priv
, hw
, priv
->netdev
, "updated MIB counters\n");
436 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv
*priv
,
437 u64
*tx_bytes
, u64
*tx_packets
)
439 struct bcm_sysport_tx_ring
*ring
;
440 u64 bytes
= 0, packets
= 0;
444 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++) {
445 ring
= &priv
->tx_rings
[q
];
447 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
449 packets
= ring
->packets
;
450 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
453 *tx_packets
+= packets
;
457 static void bcm_sysport_get_stats(struct net_device
*dev
,
458 struct ethtool_stats
*stats
, u64
*data
)
460 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
461 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
462 struct u64_stats_sync
*syncp
= &priv
->syncp
;
463 struct bcm_sysport_tx_ring
*ring
;
464 u64 tx_bytes
= 0, tx_packets
= 0;
468 if (netif_running(dev
)) {
469 bcm_sysport_update_mib_counters(priv
);
470 bcm_sysport_update_tx_stats(priv
, &tx_bytes
, &tx_packets
);
471 stats64
->tx_bytes
= tx_bytes
;
472 stats64
->tx_packets
= tx_packets
;
475 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
476 const struct bcm_sysport_stats
*s
;
479 s
= &bcm_sysport_gstrings_stats
[i
];
480 if (s
->type
== BCM_SYSPORT_STAT_NETDEV
)
481 p
= (char *)&dev
->stats
;
482 else if (s
->type
== BCM_SYSPORT_STAT_NETDEV64
)
487 if (priv
->is_lite
&& !bcm_sysport_lite_stat_valid(s
->type
))
491 if (s
->stat_sizeof
== sizeof(u64
) &&
492 s
->type
== BCM_SYSPORT_STAT_NETDEV64
) {
494 start
= u64_stats_fetch_begin_irq(syncp
);
496 } while (u64_stats_fetch_retry_irq(syncp
, start
));
502 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
503 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
504 * needs to point to how many total statistics we have minus the
505 * number of per TX queue statistics
507 j
= bcm_sysport_get_sset_count(dev
, ETH_SS_STATS
) -
508 dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
510 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
511 ring
= &priv
->tx_rings
[i
];
512 data
[j
] = ring
->packets
;
514 data
[j
] = ring
->bytes
;
519 static void bcm_sysport_get_wol(struct net_device
*dev
,
520 struct ethtool_wolinfo
*wol
)
522 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
524 wol
->supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
| WAKE_FILTER
;
525 wol
->wolopts
= priv
->wolopts
;
527 if (!(priv
->wolopts
& WAKE_MAGICSECURE
))
530 memcpy(wol
->sopass
, priv
->sopass
, sizeof(priv
->sopass
));
533 static int bcm_sysport_set_wol(struct net_device
*dev
,
534 struct ethtool_wolinfo
*wol
)
536 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
537 struct device
*kdev
= &priv
->pdev
->dev
;
538 u32 supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
| WAKE_FILTER
;
540 if (!device_can_wakeup(kdev
))
543 if (wol
->wolopts
& ~supported
)
546 if (wol
->wolopts
& WAKE_MAGICSECURE
)
547 memcpy(priv
->sopass
, wol
->sopass
, sizeof(priv
->sopass
));
549 /* Flag the device and relevant IRQ as wakeup capable */
551 device_set_wakeup_enable(kdev
, 1);
552 if (priv
->wol_irq_disabled
)
553 enable_irq_wake(priv
->wol_irq
);
554 priv
->wol_irq_disabled
= 0;
556 device_set_wakeup_enable(kdev
, 0);
557 /* Avoid unbalanced disable_irq_wake calls */
558 if (!priv
->wol_irq_disabled
)
559 disable_irq_wake(priv
->wol_irq
);
560 priv
->wol_irq_disabled
= 1;
563 priv
->wolopts
= wol
->wolopts
;
568 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv
*priv
,
573 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
574 reg
&= ~(RDMA_INTR_THRESH_MASK
|
575 RDMA_TIMEOUT_MASK
<< RDMA_TIMEOUT_SHIFT
);
577 reg
|= DIV_ROUND_UP(usecs
* 1000, 8192) << RDMA_TIMEOUT_SHIFT
;
578 rdma_writel(priv
, reg
, RDMA_MBDONE_INTR
);
581 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring
*ring
,
582 struct ethtool_coalesce
*ec
)
584 struct bcm_sysport_priv
*priv
= ring
->priv
;
587 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(ring
->index
));
588 reg
&= ~(RING_INTR_THRESH_MASK
|
589 RING_TIMEOUT_MASK
<< RING_TIMEOUT_SHIFT
);
590 reg
|= ec
->tx_max_coalesced_frames
;
591 reg
|= DIV_ROUND_UP(ec
->tx_coalesce_usecs
* 1000, 8192) <<
593 tdma_writel(priv
, reg
, TDMA_DESC_RING_INTR_CONTROL(ring
->index
));
596 static int bcm_sysport_get_coalesce(struct net_device
*dev
,
597 struct ethtool_coalesce
*ec
)
599 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
602 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(0));
604 ec
->tx_coalesce_usecs
= (reg
>> RING_TIMEOUT_SHIFT
) * 8192 / 1000;
605 ec
->tx_max_coalesced_frames
= reg
& RING_INTR_THRESH_MASK
;
607 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
609 ec
->rx_coalesce_usecs
= (reg
>> RDMA_TIMEOUT_SHIFT
) * 8192 / 1000;
610 ec
->rx_max_coalesced_frames
= reg
& RDMA_INTR_THRESH_MASK
;
611 ec
->use_adaptive_rx_coalesce
= priv
->dim
.use_dim
;
616 static int bcm_sysport_set_coalesce(struct net_device
*dev
,
617 struct ethtool_coalesce
*ec
)
619 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
620 struct net_dim_cq_moder moder
;
624 /* Base system clock is 125Mhz, DMA timeout is this reference clock
625 * divided by 1024, which yield roughly 8.192 us, our maximum value has
626 * to fit in the RING_TIMEOUT_MASK (16 bits).
628 if (ec
->tx_max_coalesced_frames
> RING_INTR_THRESH_MASK
||
629 ec
->tx_coalesce_usecs
> (RING_TIMEOUT_MASK
* 8) + 1 ||
630 ec
->rx_max_coalesced_frames
> RDMA_INTR_THRESH_MASK
||
631 ec
->rx_coalesce_usecs
> (RDMA_TIMEOUT_MASK
* 8) + 1)
634 if ((ec
->tx_coalesce_usecs
== 0 && ec
->tx_max_coalesced_frames
== 0) ||
635 (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0) ||
636 ec
->use_adaptive_tx_coalesce
)
639 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
640 bcm_sysport_set_tx_coalesce(&priv
->tx_rings
[i
], ec
);
642 priv
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
643 priv
->rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
644 usecs
= priv
->rx_coalesce_usecs
;
645 pkts
= priv
->rx_max_coalesced_frames
;
647 if (ec
->use_adaptive_rx_coalesce
&& !priv
->dim
.use_dim
) {
648 moder
= net_dim_get_def_rx_moderation(priv
->dim
.dim
.mode
);
653 priv
->dim
.use_dim
= ec
->use_adaptive_rx_coalesce
;
655 /* Apply desired coalescing parameters */
656 bcm_sysport_set_rx_coalesce(priv
, usecs
, pkts
);
661 static void bcm_sysport_free_cb(struct bcm_sysport_cb
*cb
)
663 dev_consume_skb_any(cb
->skb
);
665 dma_unmap_addr_set(cb
, dma_addr
, 0);
668 static struct sk_buff
*bcm_sysport_rx_refill(struct bcm_sysport_priv
*priv
,
669 struct bcm_sysport_cb
*cb
)
671 struct device
*kdev
= &priv
->pdev
->dev
;
672 struct net_device
*ndev
= priv
->netdev
;
673 struct sk_buff
*skb
, *rx_skb
;
676 /* Allocate a new SKB for a new packet */
677 skb
= netdev_alloc_skb(priv
->netdev
, RX_BUF_LENGTH
);
679 priv
->mib
.alloc_rx_buff_failed
++;
680 netif_err(priv
, rx_err
, ndev
, "SKB alloc failed\n");
684 mapping
= dma_map_single(kdev
, skb
->data
,
685 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
686 if (dma_mapping_error(kdev
, mapping
)) {
687 priv
->mib
.rx_dma_failed
++;
688 dev_kfree_skb_any(skb
);
689 netif_err(priv
, rx_err
, ndev
, "DMA mapping failure\n");
693 /* Grab the current SKB on the ring */
696 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
697 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
699 /* Put the new SKB on the ring */
701 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
702 dma_desc_set_addr(priv
, cb
->bd_addr
, mapping
);
704 netif_dbg(priv
, rx_status
, ndev
, "RX refill\n");
706 /* Return the current SKB to the caller */
710 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv
*priv
)
712 struct bcm_sysport_cb
*cb
;
716 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
717 cb
= &priv
->rx_cbs
[i
];
718 skb
= bcm_sysport_rx_refill(priv
, cb
);
728 /* Poll the hardware for up to budget packets to process */
729 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv
*priv
,
732 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
733 struct net_device
*ndev
= priv
->netdev
;
734 unsigned int processed
= 0, to_process
;
735 unsigned int processed_bytes
= 0;
736 struct bcm_sysport_cb
*cb
;
738 unsigned int p_index
;
742 /* Clear status before servicing to reduce spurious interrupts */
743 intrl2_0_writel(priv
, INTRL2_0_RDMA_MBDONE
, INTRL2_CPU_CLEAR
);
745 /* Determine how much we should process since last call, SYSTEMPORT Lite
746 * groups the producer and consumer indexes into the same 32-bit
747 * which we access using RDMA_CONS_INDEX
750 p_index
= rdma_readl(priv
, RDMA_PROD_INDEX
);
752 p_index
= rdma_readl(priv
, RDMA_CONS_INDEX
);
753 p_index
&= RDMA_PROD_INDEX_MASK
;
755 to_process
= (p_index
- priv
->rx_c_index
) & RDMA_CONS_INDEX_MASK
;
757 netif_dbg(priv
, rx_status
, ndev
,
758 "p_index=%d rx_c_index=%d to_process=%d\n",
759 p_index
, priv
->rx_c_index
, to_process
);
761 while ((processed
< to_process
) && (processed
< budget
)) {
762 cb
= &priv
->rx_cbs
[priv
->rx_read_ptr
];
763 skb
= bcm_sysport_rx_refill(priv
, cb
);
766 /* We do not have a backing SKB, so we do not a corresponding
767 * DMA mapping for this incoming packet since
768 * bcm_sysport_rx_refill always either has both skb and mapping
771 if (unlikely(!skb
)) {
772 netif_err(priv
, rx_err
, ndev
, "out of memory!\n");
773 ndev
->stats
.rx_dropped
++;
774 ndev
->stats
.rx_errors
++;
778 /* Extract the Receive Status Block prepended */
779 rsb
= (struct bcm_rsb
*)skb
->data
;
780 len
= (rsb
->rx_status_len
>> DESC_LEN_SHIFT
) & DESC_LEN_MASK
;
781 status
= (rsb
->rx_status_len
>> DESC_STATUS_SHIFT
) &
784 netif_dbg(priv
, rx_status
, ndev
,
785 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
786 p_index
, priv
->rx_c_index
, priv
->rx_read_ptr
,
789 if (unlikely(len
> RX_BUF_LENGTH
)) {
790 netif_err(priv
, rx_status
, ndev
, "oversized packet\n");
791 ndev
->stats
.rx_length_errors
++;
792 ndev
->stats
.rx_errors
++;
793 dev_kfree_skb_any(skb
);
797 if (unlikely(!(status
& DESC_EOP
) || !(status
& DESC_SOP
))) {
798 netif_err(priv
, rx_status
, ndev
, "fragmented packet!\n");
799 ndev
->stats
.rx_dropped
++;
800 ndev
->stats
.rx_errors
++;
801 dev_kfree_skb_any(skb
);
805 if (unlikely(status
& (RX_STATUS_ERR
| RX_STATUS_OVFLOW
))) {
806 netif_err(priv
, rx_err
, ndev
, "error packet\n");
807 if (status
& RX_STATUS_OVFLOW
)
808 ndev
->stats
.rx_over_errors
++;
809 ndev
->stats
.rx_dropped
++;
810 ndev
->stats
.rx_errors
++;
811 dev_kfree_skb_any(skb
);
817 /* Hardware validated our checksum */
818 if (likely(status
& DESC_L4_CSUM
))
819 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
821 /* Hardware pre-pends packets with 2bytes before Ethernet
822 * header plus we have the Receive Status Block, strip off all
823 * of this from the SKB.
825 skb_pull(skb
, sizeof(*rsb
) + 2);
826 len
-= (sizeof(*rsb
) + 2);
827 processed_bytes
+= len
;
829 /* UniMAC may forward CRC */
831 skb_trim(skb
, len
- ETH_FCS_LEN
);
835 skb
->protocol
= eth_type_trans(skb
, ndev
);
836 ndev
->stats
.rx_packets
++;
837 ndev
->stats
.rx_bytes
+= len
;
838 u64_stats_update_begin(&priv
->syncp
);
839 stats64
->rx_packets
++;
840 stats64
->rx_bytes
+= len
;
841 u64_stats_update_end(&priv
->syncp
);
843 napi_gro_receive(&priv
->napi
, skb
);
848 if (priv
->rx_read_ptr
== priv
->num_rx_bds
)
849 priv
->rx_read_ptr
= 0;
852 priv
->dim
.packets
= processed
;
853 priv
->dim
.bytes
= processed_bytes
;
858 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring
*ring
,
859 struct bcm_sysport_cb
*cb
,
860 unsigned int *bytes_compl
,
861 unsigned int *pkts_compl
)
863 struct bcm_sysport_priv
*priv
= ring
->priv
;
864 struct device
*kdev
= &priv
->pdev
->dev
;
867 *bytes_compl
+= cb
->skb
->len
;
868 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
869 dma_unmap_len(cb
, dma_len
),
872 bcm_sysport_free_cb(cb
);
874 } else if (dma_unmap_addr(cb
, dma_addr
)) {
875 *bytes_compl
+= dma_unmap_len(cb
, dma_len
);
876 dma_unmap_page(kdev
, dma_unmap_addr(cb
, dma_addr
),
877 dma_unmap_len(cb
, dma_len
), DMA_TO_DEVICE
);
878 dma_unmap_addr_set(cb
, dma_addr
, 0);
882 /* Reclaim queued SKBs for transmission completion, lockless version */
883 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
884 struct bcm_sysport_tx_ring
*ring
)
886 unsigned int pkts_compl
= 0, bytes_compl
= 0;
887 struct net_device
*ndev
= priv
->netdev
;
888 unsigned int txbds_processed
= 0;
889 struct bcm_sysport_cb
*cb
;
890 unsigned int txbds_ready
;
891 unsigned int c_index
;
894 /* Clear status before servicing to reduce spurious interrupts */
895 if (!ring
->priv
->is_lite
)
896 intrl2_1_writel(ring
->priv
, BIT(ring
->index
), INTRL2_CPU_CLEAR
);
898 intrl2_0_writel(ring
->priv
, BIT(ring
->index
+
899 INTRL2_0_TDMA_MBDONE_SHIFT
), INTRL2_CPU_CLEAR
);
901 /* Compute how many descriptors have been processed since last call */
902 hw_ind
= tdma_readl(priv
, TDMA_DESC_RING_PROD_CONS_INDEX(ring
->index
));
903 c_index
= (hw_ind
>> RING_CONS_INDEX_SHIFT
) & RING_CONS_INDEX_MASK
;
904 txbds_ready
= (c_index
- ring
->c_index
) & RING_CONS_INDEX_MASK
;
906 netif_dbg(priv
, tx_done
, ndev
,
907 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
908 ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
910 while (txbds_processed
< txbds_ready
) {
911 cb
= &ring
->cbs
[ring
->clean_index
];
912 bcm_sysport_tx_reclaim_one(ring
, cb
, &bytes_compl
, &pkts_compl
);
917 if (likely(ring
->clean_index
< ring
->size
- 1))
920 ring
->clean_index
= 0;
923 u64_stats_update_begin(&priv
->syncp
);
924 ring
->packets
+= pkts_compl
;
925 ring
->bytes
+= bytes_compl
;
926 u64_stats_update_end(&priv
->syncp
);
928 ring
->c_index
= c_index
;
930 netif_dbg(priv
, tx_done
, ndev
,
931 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
932 ring
->index
, ring
->c_index
, pkts_compl
, bytes_compl
);
937 /* Locked version of the per-ring TX reclaim routine */
938 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
939 struct bcm_sysport_tx_ring
*ring
)
941 struct netdev_queue
*txq
;
942 unsigned int released
;
945 txq
= netdev_get_tx_queue(priv
->netdev
, ring
->index
);
947 spin_lock_irqsave(&ring
->lock
, flags
);
948 released
= __bcm_sysport_tx_reclaim(priv
, ring
);
950 netif_tx_wake_queue(txq
);
952 spin_unlock_irqrestore(&ring
->lock
, flags
);
957 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
958 static void bcm_sysport_tx_clean(struct bcm_sysport_priv
*priv
,
959 struct bcm_sysport_tx_ring
*ring
)
963 spin_lock_irqsave(&ring
->lock
, flags
);
964 __bcm_sysport_tx_reclaim(priv
, ring
);
965 spin_unlock_irqrestore(&ring
->lock
, flags
);
968 static int bcm_sysport_tx_poll(struct napi_struct
*napi
, int budget
)
970 struct bcm_sysport_tx_ring
*ring
=
971 container_of(napi
, struct bcm_sysport_tx_ring
, napi
);
972 unsigned int work_done
= 0;
974 work_done
= bcm_sysport_tx_reclaim(ring
->priv
, ring
);
976 if (work_done
== 0) {
978 /* re-enable TX interrupt */
979 if (!ring
->priv
->is_lite
)
980 intrl2_1_mask_clear(ring
->priv
, BIT(ring
->index
));
982 intrl2_0_mask_clear(ring
->priv
, BIT(ring
->index
+
983 INTRL2_0_TDMA_MBDONE_SHIFT
));
991 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv
*priv
)
995 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++)
996 bcm_sysport_tx_reclaim(priv
, &priv
->tx_rings
[q
]);
999 static int bcm_sysport_poll(struct napi_struct
*napi
, int budget
)
1001 struct bcm_sysport_priv
*priv
=
1002 container_of(napi
, struct bcm_sysport_priv
, napi
);
1003 struct net_dim_sample dim_sample
;
1004 unsigned int work_done
= 0;
1006 work_done
= bcm_sysport_desc_rx(priv
, budget
);
1008 priv
->rx_c_index
+= work_done
;
1009 priv
->rx_c_index
&= RDMA_CONS_INDEX_MASK
;
1011 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1012 * maintained by HW, but writes to it will be ignore while RDMA
1016 rdma_writel(priv
, priv
->rx_c_index
, RDMA_CONS_INDEX
);
1018 rdma_writel(priv
, priv
->rx_c_index
<< 16, RDMA_CONS_INDEX
);
1020 if (work_done
< budget
) {
1021 napi_complete_done(napi
, work_done
);
1022 /* re-enable RX interrupts */
1023 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
);
1026 if (priv
->dim
.use_dim
) {
1027 net_dim_sample(priv
->dim
.event_ctr
, priv
->dim
.packets
,
1028 priv
->dim
.bytes
, &dim_sample
);
1029 net_dim(&priv
->dim
.dim
, dim_sample
);
1035 static void mpd_enable_set(struct bcm_sysport_priv
*priv
, bool enable
)
1039 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
1044 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
1047 bit
= RBUF_ACPI_EN_LITE
;
1051 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1056 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1059 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv
*priv
)
1064 /* Disable RXCHK, active filters and Broadcom tag matching */
1065 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
1066 reg
&= ~(RXCHK_BRCM_TAG_MATCH_MASK
<<
1067 RXCHK_BRCM_TAG_MATCH_SHIFT
| RXCHK_EN
| RXCHK_BRCM_TAG_EN
);
1068 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
1070 /* Make sure we restore correct CID index in case HW lost
1071 * its context during deep idle state
1073 for_each_set_bit(index
, priv
->filters
, RXCHK_BRCM_TAG_MAX
) {
1074 rxchk_writel(priv
, priv
->filters_loc
[index
] <<
1075 RXCHK_BRCM_TAG_CID_SHIFT
, RXCHK_BRCM_TAG(index
));
1076 rxchk_writel(priv
, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index
));
1079 /* Clear the MagicPacket detection logic */
1080 mpd_enable_set(priv
, false);
1082 reg
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
);
1083 if (reg
& INTRL2_0_MPD
)
1084 netdev_info(priv
->netdev
, "Wake-on-LAN (MPD) interrupt!\n");
1086 if (reg
& INTRL2_0_BRCM_MATCH_TAG
) {
1087 reg
= rxchk_readl(priv
, RXCHK_BRCM_TAG_MATCH_STATUS
) &
1088 RXCHK_BRCM_TAG_MATCH_MASK
;
1089 netdev_info(priv
->netdev
,
1090 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg
);
1093 netif_dbg(priv
, wol
, priv
->netdev
, "resumed from WOL\n");
1096 static void bcm_sysport_dim_work(struct work_struct
*work
)
1098 struct net_dim
*dim
= container_of(work
, struct net_dim
, work
);
1099 struct bcm_sysport_net_dim
*ndim
=
1100 container_of(dim
, struct bcm_sysport_net_dim
, dim
);
1101 struct bcm_sysport_priv
*priv
=
1102 container_of(ndim
, struct bcm_sysport_priv
, dim
);
1103 struct net_dim_cq_moder cur_profile
=
1104 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1106 bcm_sysport_set_rx_coalesce(priv
, cur_profile
.usec
, cur_profile
.pkts
);
1107 dim
->state
= NET_DIM_START_MEASURE
;
1110 /* RX and misc interrupt routine */
1111 static irqreturn_t
bcm_sysport_rx_isr(int irq
, void *dev_id
)
1113 struct net_device
*dev
= dev_id
;
1114 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1115 struct bcm_sysport_tx_ring
*txr
;
1116 unsigned int ring
, ring_bit
;
1118 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
1119 ~intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1120 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
1122 if (unlikely(priv
->irq0_stat
== 0)) {
1123 netdev_warn(priv
->netdev
, "spurious RX interrupt\n");
1127 if (priv
->irq0_stat
& INTRL2_0_RDMA_MBDONE
) {
1128 priv
->dim
.event_ctr
++;
1129 if (likely(napi_schedule_prep(&priv
->napi
))) {
1130 /* disable RX interrupts */
1131 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
);
1132 __napi_schedule_irqoff(&priv
->napi
);
1136 /* TX ring is full, perform a full reclaim since we do not know
1137 * which one would trigger this interrupt
1139 if (priv
->irq0_stat
& INTRL2_0_TX_RING_FULL
)
1140 bcm_sysport_tx_reclaim_all(priv
);
1145 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1146 ring_bit
= BIT(ring
+ INTRL2_0_TDMA_MBDONE_SHIFT
);
1147 if (!(priv
->irq0_stat
& ring_bit
))
1150 txr
= &priv
->tx_rings
[ring
];
1152 if (likely(napi_schedule_prep(&txr
->napi
))) {
1153 intrl2_0_mask_set(priv
, ring_bit
);
1154 __napi_schedule(&txr
->napi
);
1161 /* TX interrupt service routine */
1162 static irqreturn_t
bcm_sysport_tx_isr(int irq
, void *dev_id
)
1164 struct net_device
*dev
= dev_id
;
1165 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1166 struct bcm_sysport_tx_ring
*txr
;
1169 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
1170 ~intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1171 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1173 if (unlikely(priv
->irq1_stat
== 0)) {
1174 netdev_warn(priv
->netdev
, "spurious TX interrupt\n");
1178 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1179 if (!(priv
->irq1_stat
& BIT(ring
)))
1182 txr
= &priv
->tx_rings
[ring
];
1184 if (likely(napi_schedule_prep(&txr
->napi
))) {
1185 intrl2_1_mask_set(priv
, BIT(ring
));
1186 __napi_schedule_irqoff(&txr
->napi
);
1193 static irqreturn_t
bcm_sysport_wol_isr(int irq
, void *dev_id
)
1195 struct bcm_sysport_priv
*priv
= dev_id
;
1197 pm_wakeup_event(&priv
->pdev
->dev
, 0);
1202 #ifdef CONFIG_NET_POLL_CONTROLLER
1203 static void bcm_sysport_poll_controller(struct net_device
*dev
)
1205 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1207 disable_irq(priv
->irq0
);
1208 bcm_sysport_rx_isr(priv
->irq0
, priv
);
1209 enable_irq(priv
->irq0
);
1211 if (!priv
->is_lite
) {
1212 disable_irq(priv
->irq1
);
1213 bcm_sysport_tx_isr(priv
->irq1
, priv
);
1214 enable_irq(priv
->irq1
);
1219 static struct sk_buff
*bcm_sysport_insert_tsb(struct sk_buff
*skb
,
1220 struct net_device
*dev
)
1222 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1223 struct sk_buff
*nskb
;
1224 struct bcm_tsb
*tsb
;
1230 /* Re-allocate SKB if needed */
1231 if (unlikely(skb_headroom(skb
) < sizeof(*tsb
))) {
1232 nskb
= skb_realloc_headroom(skb
, sizeof(*tsb
));
1234 dev_kfree_skb_any(skb
);
1235 priv
->mib
.tx_realloc_tsb_failed
++;
1236 dev
->stats
.tx_errors
++;
1237 dev
->stats
.tx_dropped
++;
1240 dev_consume_skb_any(skb
);
1242 priv
->mib
.tx_realloc_tsb
++;
1245 tsb
= skb_push(skb
, sizeof(*tsb
));
1246 /* Zero-out TSB by default */
1247 memset(tsb
, 0, sizeof(*tsb
));
1249 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1250 ip_ver
= skb
->protocol
;
1252 case htons(ETH_P_IP
):
1253 ip_proto
= ip_hdr(skb
)->protocol
;
1255 case htons(ETH_P_IPV6
):
1256 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1262 /* Get the checksum offset and the L4 (transport) offset */
1263 csum_start
= skb_checksum_start_offset(skb
) - sizeof(*tsb
);
1264 csum_info
= (csum_start
+ skb
->csum_offset
) & L4_CSUM_PTR_MASK
;
1265 csum_info
|= (csum_start
<< L4_PTR_SHIFT
);
1267 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1268 csum_info
|= L4_LENGTH_VALID
;
1269 if (ip_proto
== IPPROTO_UDP
&&
1270 ip_ver
== htons(ETH_P_IP
))
1271 csum_info
|= L4_UDP
;
1276 tsb
->l4_ptr_dest_map
= csum_info
;
1282 static netdev_tx_t
bcm_sysport_xmit(struct sk_buff
*skb
,
1283 struct net_device
*dev
)
1285 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1286 struct device
*kdev
= &priv
->pdev
->dev
;
1287 struct bcm_sysport_tx_ring
*ring
;
1288 struct bcm_sysport_cb
*cb
;
1289 struct netdev_queue
*txq
;
1290 struct dma_desc
*desc
;
1291 unsigned int skb_len
;
1292 unsigned long flags
;
1298 queue
= skb_get_queue_mapping(skb
);
1299 txq
= netdev_get_tx_queue(dev
, queue
);
1300 ring
= &priv
->tx_rings
[queue
];
1302 /* lock against tx reclaim in BH context and TX ring full interrupt */
1303 spin_lock_irqsave(&ring
->lock
, flags
);
1304 if (unlikely(ring
->desc_count
== 0)) {
1305 netif_tx_stop_queue(txq
);
1306 netdev_err(dev
, "queue %d awake and ring full!\n", queue
);
1307 ret
= NETDEV_TX_BUSY
;
1311 /* Insert TSB and checksum infos */
1313 skb
= bcm_sysport_insert_tsb(skb
, dev
);
1322 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
1323 if (dma_mapping_error(kdev
, mapping
)) {
1324 priv
->mib
.tx_dma_failed
++;
1325 netif_err(priv
, tx_err
, dev
, "DMA map failed at %p (len=%d)\n",
1326 skb
->data
, skb_len
);
1331 /* Remember the SKB for future freeing */
1332 cb
= &ring
->cbs
[ring
->curr_desc
];
1334 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1335 dma_unmap_len_set(cb
, dma_len
, skb_len
);
1337 /* Fetch a descriptor entry from our pool */
1338 desc
= ring
->desc_cpu
;
1340 desc
->addr_lo
= lower_32_bits(mapping
);
1341 len_status
= upper_32_bits(mapping
) & DESC_ADDR_HI_MASK
;
1342 len_status
|= (skb_len
<< DESC_LEN_SHIFT
);
1343 len_status
|= (DESC_SOP
| DESC_EOP
| TX_STATUS_APP_CRC
) <<
1345 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1346 len_status
|= (DESC_L4_CSUM
<< DESC_STATUS_SHIFT
);
1349 if (ring
->curr_desc
== ring
->size
)
1350 ring
->curr_desc
= 0;
1353 /* Ensure write completion of the descriptor status/length
1354 * in DRAM before the System Port WRITE_PORT register latches
1358 desc
->addr_status_len
= len_status
;
1361 /* Write this descriptor address to the RING write port */
1362 tdma_port_write_desc_addr(priv
, desc
, ring
->index
);
1364 /* Check ring space and update SW control flow */
1365 if (ring
->desc_count
== 0)
1366 netif_tx_stop_queue(txq
);
1368 netif_dbg(priv
, tx_queued
, dev
, "ring=%d desc_count=%d, curr_desc=%d\n",
1369 ring
->index
, ring
->desc_count
, ring
->curr_desc
);
1373 spin_unlock_irqrestore(&ring
->lock
, flags
);
1377 static void bcm_sysport_tx_timeout(struct net_device
*dev
)
1379 netdev_warn(dev
, "transmit timeout!\n");
1381 netif_trans_update(dev
);
1382 dev
->stats
.tx_errors
++;
1384 netif_tx_wake_all_queues(dev
);
1387 /* phylib adjust link callback */
1388 static void bcm_sysport_adj_link(struct net_device
*dev
)
1390 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1391 struct phy_device
*phydev
= dev
->phydev
;
1392 unsigned int changed
= 0;
1393 u32 cmd_bits
= 0, reg
;
1395 if (priv
->old_link
!= phydev
->link
) {
1397 priv
->old_link
= phydev
->link
;
1400 if (priv
->old_duplex
!= phydev
->duplex
) {
1402 priv
->old_duplex
= phydev
->duplex
;
1408 switch (phydev
->speed
) {
1410 cmd_bits
= CMD_SPEED_2500
;
1413 cmd_bits
= CMD_SPEED_1000
;
1416 cmd_bits
= CMD_SPEED_100
;
1419 cmd_bits
= CMD_SPEED_10
;
1424 cmd_bits
<<= CMD_SPEED_SHIFT
;
1426 if (phydev
->duplex
== DUPLEX_HALF
)
1427 cmd_bits
|= CMD_HD_EN
;
1429 if (priv
->old_pause
!= phydev
->pause
) {
1431 priv
->old_pause
= phydev
->pause
;
1435 cmd_bits
|= CMD_RX_PAUSE_IGNORE
| CMD_TX_PAUSE_IGNORE
;
1441 reg
= umac_readl(priv
, UMAC_CMD
);
1442 reg
&= ~((CMD_SPEED_MASK
<< CMD_SPEED_SHIFT
) |
1443 CMD_HD_EN
| CMD_RX_PAUSE_IGNORE
|
1444 CMD_TX_PAUSE_IGNORE
);
1446 umac_writel(priv
, reg
, UMAC_CMD
);
1450 phy_print_status(phydev
);
1453 static void bcm_sysport_init_dim(struct bcm_sysport_priv
*priv
,
1454 void (*cb
)(struct work_struct
*work
))
1456 struct bcm_sysport_net_dim
*dim
= &priv
->dim
;
1458 INIT_WORK(&dim
->dim
.work
, cb
);
1459 dim
->dim
.mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
1465 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv
*priv
)
1467 struct bcm_sysport_net_dim
*dim
= &priv
->dim
;
1468 struct net_dim_cq_moder moder
;
1471 usecs
= priv
->rx_coalesce_usecs
;
1472 pkts
= priv
->rx_max_coalesced_frames
;
1474 /* If DIM was enabled, re-apply default parameters */
1476 moder
= net_dim_get_def_rx_moderation(dim
->dim
.mode
);
1481 bcm_sysport_set_rx_coalesce(priv
, usecs
, pkts
);
1484 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv
*priv
,
1487 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1488 struct device
*kdev
= &priv
->pdev
->dev
;
1493 /* Simple descriptors partitioning for now */
1496 /* We just need one DMA descriptor which is DMA-able, since writing to
1497 * the port will allocate a new descriptor in its internal linked-list
1499 p
= dma_alloc_coherent(kdev
, sizeof(struct dma_desc
), &ring
->desc_dma
,
1502 netif_err(priv
, hw
, priv
->netdev
, "DMA alloc failed\n");
1506 ring
->cbs
= kcalloc(size
, sizeof(struct bcm_sysport_cb
), GFP_KERNEL
);
1508 dma_free_coherent(kdev
, sizeof(struct dma_desc
),
1509 ring
->desc_cpu
, ring
->desc_dma
);
1510 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1514 /* Initialize SW view of the ring */
1515 spin_lock_init(&ring
->lock
);
1517 netif_tx_napi_add(priv
->netdev
, &ring
->napi
, bcm_sysport_tx_poll
, 64);
1518 ring
->index
= index
;
1520 ring
->clean_index
= 0;
1521 ring
->alloc_size
= ring
->size
;
1523 ring
->desc_count
= ring
->size
;
1524 ring
->curr_desc
= 0;
1526 /* Initialize HW ring */
1527 tdma_writel(priv
, RING_EN
, TDMA_DESC_RING_HEAD_TAIL_PTR(index
));
1528 tdma_writel(priv
, 0, TDMA_DESC_RING_COUNT(index
));
1529 tdma_writel(priv
, 1, TDMA_DESC_RING_INTR_CONTROL(index
));
1530 tdma_writel(priv
, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index
));
1532 /* Configure QID and port mapping */
1533 reg
= tdma_readl(priv
, TDMA_DESC_RING_MAPPING(index
));
1534 reg
&= ~(RING_QID_MASK
| RING_PORT_ID_MASK
<< RING_PORT_ID_SHIFT
);
1535 if (ring
->inspect
) {
1536 reg
|= ring
->switch_queue
& RING_QID_MASK
;
1537 reg
|= ring
->switch_port
<< RING_PORT_ID_SHIFT
;
1539 reg
|= RING_IGNORE_STATUS
;
1541 tdma_writel(priv
, reg
, TDMA_DESC_RING_MAPPING(index
));
1542 tdma_writel(priv
, 0, TDMA_DESC_RING_PCP_DEI_VID(index
));
1544 /* Enable ACB algorithm 2 */
1545 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1546 reg
|= tdma_control_bit(priv
, ACB_ALGO
);
1547 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1549 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1550 * with the original definition of ACB_ALGO
1552 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1554 reg
&= ~BIT(TSB_SWAP1
);
1555 /* Set a correct TSB format based on host endian */
1556 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1557 reg
|= tdma_control_bit(priv
, TSB_SWAP0
);
1559 reg
&= ~tdma_control_bit(priv
, TSB_SWAP0
);
1560 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1562 /* Program the number of descriptors as MAX_THRESHOLD and half of
1563 * its size for the hysteresis trigger
1565 tdma_writel(priv
, ring
->size
|
1566 1 << RING_HYST_THRESH_SHIFT
,
1567 TDMA_DESC_RING_MAX_HYST(index
));
1569 /* Enable the ring queue in the arbiter */
1570 reg
= tdma_readl(priv
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1571 reg
|= (1 << index
);
1572 tdma_writel(priv
, reg
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1574 napi_enable(&ring
->napi
);
1576 netif_dbg(priv
, hw
, priv
->netdev
,
1577 "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
1578 ring
->size
, ring
->desc_cpu
, ring
->switch_queue
,
1584 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv
*priv
,
1587 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1588 struct device
*kdev
= &priv
->pdev
->dev
;
1591 /* Caller should stop the TDMA engine */
1592 reg
= tdma_readl(priv
, TDMA_STATUS
);
1593 if (!(reg
& TDMA_DISABLED
))
1594 netdev_warn(priv
->netdev
, "TDMA not stopped!\n");
1596 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1597 * fail, so by checking this pointer we know whether the TX ring was
1598 * fully initialized or not.
1603 napi_disable(&ring
->napi
);
1604 netif_napi_del(&ring
->napi
);
1606 bcm_sysport_tx_clean(priv
, ring
);
1611 if (ring
->desc_dma
) {
1612 dma_free_coherent(kdev
, sizeof(struct dma_desc
),
1613 ring
->desc_cpu
, ring
->desc_dma
);
1617 ring
->alloc_size
= 0;
1619 netif_dbg(priv
, hw
, priv
->netdev
, "TDMA fini done\n");
1623 static inline int rdma_enable_set(struct bcm_sysport_priv
*priv
,
1624 unsigned int enable
)
1626 unsigned int timeout
= 1000;
1629 reg
= rdma_readl(priv
, RDMA_CONTROL
);
1634 rdma_writel(priv
, reg
, RDMA_CONTROL
);
1636 /* Poll for RMDA disabling completion */
1638 reg
= rdma_readl(priv
, RDMA_STATUS
);
1639 if (!!(reg
& RDMA_DISABLED
) == !enable
)
1641 usleep_range(1000, 2000);
1642 } while (timeout
-- > 0);
1644 netdev_err(priv
->netdev
, "timeout waiting for RDMA to finish\n");
1650 static inline int tdma_enable_set(struct bcm_sysport_priv
*priv
,
1651 unsigned int enable
)
1653 unsigned int timeout
= 1000;
1656 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1658 reg
|= tdma_control_bit(priv
, TDMA_EN
);
1660 reg
&= ~tdma_control_bit(priv
, TDMA_EN
);
1661 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1663 /* Poll for TMDA disabling completion */
1665 reg
= tdma_readl(priv
, TDMA_STATUS
);
1666 if (!!(reg
& TDMA_DISABLED
) == !enable
)
1669 usleep_range(1000, 2000);
1670 } while (timeout
-- > 0);
1672 netdev_err(priv
->netdev
, "timeout waiting for TDMA to finish\n");
1677 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv
*priv
)
1679 struct bcm_sysport_cb
*cb
;
1684 /* Initialize SW view of the RX ring */
1685 priv
->num_rx_bds
= priv
->num_rx_desc_words
/ WORDS_PER_DESC
;
1686 priv
->rx_bds
= priv
->base
+ SYS_PORT_RDMA_OFFSET
;
1687 priv
->rx_c_index
= 0;
1688 priv
->rx_read_ptr
= 0;
1689 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct bcm_sysport_cb
),
1691 if (!priv
->rx_cbs
) {
1692 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1696 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1697 cb
= priv
->rx_cbs
+ i
;
1698 cb
->bd_addr
= priv
->rx_bds
+ i
* DESC_SIZE
;
1701 ret
= bcm_sysport_alloc_rx_bufs(priv
);
1703 netif_err(priv
, hw
, priv
->netdev
, "SKB allocation failed\n");
1707 /* Initialize HW, ensure RDMA is disabled */
1708 reg
= rdma_readl(priv
, RDMA_STATUS
);
1709 if (!(reg
& RDMA_DISABLED
))
1710 rdma_enable_set(priv
, 0);
1712 rdma_writel(priv
, 0, RDMA_WRITE_PTR_LO
);
1713 rdma_writel(priv
, 0, RDMA_WRITE_PTR_HI
);
1714 rdma_writel(priv
, 0, RDMA_PROD_INDEX
);
1715 rdma_writel(priv
, 0, RDMA_CONS_INDEX
);
1716 rdma_writel(priv
, priv
->num_rx_bds
<< RDMA_RING_SIZE_SHIFT
|
1717 RX_BUF_LENGTH
, RDMA_RING_BUF_SIZE
);
1718 /* Operate the queue in ring mode */
1719 rdma_writel(priv
, 0, RDMA_START_ADDR_HI
);
1720 rdma_writel(priv
, 0, RDMA_START_ADDR_LO
);
1721 rdma_writel(priv
, 0, RDMA_END_ADDR_HI
);
1722 rdma_writel(priv
, priv
->num_rx_desc_words
- 1, RDMA_END_ADDR_LO
);
1724 netif_dbg(priv
, hw
, priv
->netdev
,
1725 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1726 priv
->num_rx_bds
, priv
->rx_bds
);
1731 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv
*priv
)
1733 struct bcm_sysport_cb
*cb
;
1737 /* Caller should ensure RDMA is disabled */
1738 reg
= rdma_readl(priv
, RDMA_STATUS
);
1739 if (!(reg
& RDMA_DISABLED
))
1740 netdev_warn(priv
->netdev
, "RDMA not stopped!\n");
1742 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1743 cb
= &priv
->rx_cbs
[i
];
1744 if (dma_unmap_addr(cb
, dma_addr
))
1745 dma_unmap_single(&priv
->pdev
->dev
,
1746 dma_unmap_addr(cb
, dma_addr
),
1747 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
1748 bcm_sysport_free_cb(cb
);
1751 kfree(priv
->rx_cbs
);
1752 priv
->rx_cbs
= NULL
;
1754 netif_dbg(priv
, hw
, priv
->netdev
, "RDMA fini done\n");
1757 static void bcm_sysport_set_rx_mode(struct net_device
*dev
)
1759 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1765 reg
= umac_readl(priv
, UMAC_CMD
);
1766 if (dev
->flags
& IFF_PROMISC
)
1769 reg
&= ~CMD_PROMISC
;
1770 umac_writel(priv
, reg
, UMAC_CMD
);
1772 /* No support for ALLMULTI */
1773 if (dev
->flags
& IFF_ALLMULTI
)
1777 static inline void umac_enable_set(struct bcm_sysport_priv
*priv
,
1778 u32 mask
, unsigned int enable
)
1782 if (!priv
->is_lite
) {
1783 reg
= umac_readl(priv
, UMAC_CMD
);
1788 umac_writel(priv
, reg
, UMAC_CMD
);
1790 reg
= gib_readl(priv
, GIB_CONTROL
);
1795 gib_writel(priv
, reg
, GIB_CONTROL
);
1798 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1799 * to be processed (1 msec).
1802 usleep_range(1000, 2000);
1805 static inline void umac_reset(struct bcm_sysport_priv
*priv
)
1812 reg
= umac_readl(priv
, UMAC_CMD
);
1813 reg
|= CMD_SW_RESET
;
1814 umac_writel(priv
, reg
, UMAC_CMD
);
1816 reg
= umac_readl(priv
, UMAC_CMD
);
1817 reg
&= ~CMD_SW_RESET
;
1818 umac_writel(priv
, reg
, UMAC_CMD
);
1821 static void umac_set_hw_addr(struct bcm_sysport_priv
*priv
,
1822 unsigned char *addr
)
1824 u32 mac0
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) |
1826 u32 mac1
= (addr
[4] << 8) | addr
[5];
1828 if (!priv
->is_lite
) {
1829 umac_writel(priv
, mac0
, UMAC_MAC0
);
1830 umac_writel(priv
, mac1
, UMAC_MAC1
);
1832 gib_writel(priv
, mac0
, GIB_MAC0
);
1833 gib_writel(priv
, mac1
, GIB_MAC1
);
1837 static void topctrl_flush(struct bcm_sysport_priv
*priv
)
1839 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
1840 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
1842 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
1843 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
1846 static int bcm_sysport_change_mac(struct net_device
*dev
, void *p
)
1848 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1849 struct sockaddr
*addr
= p
;
1851 if (!is_valid_ether_addr(addr
->sa_data
))
1854 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1856 /* interface is disabled, changes to MAC will be reflected on next
1859 if (!netif_running(dev
))
1862 umac_set_hw_addr(priv
, dev
->dev_addr
);
1867 static void bcm_sysport_get_stats64(struct net_device
*dev
,
1868 struct rtnl_link_stats64
*stats
)
1870 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1871 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
1874 netdev_stats_to_stats64(stats
, &dev
->stats
);
1876 bcm_sysport_update_tx_stats(priv
, &stats
->tx_bytes
,
1877 &stats
->tx_packets
);
1880 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
1881 stats
->rx_packets
= stats64
->rx_packets
;
1882 stats
->rx_bytes
= stats64
->rx_bytes
;
1883 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
1886 static void bcm_sysport_netif_start(struct net_device
*dev
)
1888 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1891 bcm_sysport_init_dim(priv
, bcm_sysport_dim_work
);
1892 bcm_sysport_init_rx_coalesce(priv
);
1893 napi_enable(&priv
->napi
);
1895 /* Enable RX interrupt and TX ring full interrupt */
1896 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1898 phy_start(dev
->phydev
);
1900 /* Enable TX interrupts for the TXQs */
1902 intrl2_1_mask_clear(priv
, 0xffffffff);
1904 intrl2_0_mask_clear(priv
, INTRL2_0_TDMA_MBDONE_MASK
);
1907 static void rbuf_init(struct bcm_sysport_priv
*priv
)
1911 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1912 reg
|= RBUF_4B_ALGN
| RBUF_RSB_EN
;
1913 /* Set a correct RSB format on SYSTEMPORT Lite */
1915 reg
&= ~RBUF_RSB_SWAP1
;
1917 /* Set a correct RSB format based on host endian */
1918 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1919 reg
|= RBUF_RSB_SWAP0
;
1921 reg
&= ~RBUF_RSB_SWAP0
;
1922 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1925 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv
*priv
)
1927 intrl2_0_mask_set(priv
, 0xffffffff);
1928 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1929 if (!priv
->is_lite
) {
1930 intrl2_1_mask_set(priv
, 0xffffffff);
1931 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1935 static inline void gib_set_pad_extension(struct bcm_sysport_priv
*priv
)
1939 reg
= gib_readl(priv
, GIB_CONTROL
);
1940 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1941 if (netdev_uses_dsa(priv
->netdev
)) {
1942 reg
&= ~(GIB_PAD_EXTENSION_MASK
<< GIB_PAD_EXTENSION_SHIFT
);
1943 reg
|= ENET_BRCM_TAG_LEN
<< GIB_PAD_EXTENSION_SHIFT
;
1945 reg
&= ~(GIB_IPG_LEN_MASK
<< GIB_IPG_LEN_SHIFT
);
1946 reg
|= 12 << GIB_IPG_LEN_SHIFT
;
1947 gib_writel(priv
, reg
, GIB_CONTROL
);
1950 static int bcm_sysport_open(struct net_device
*dev
)
1952 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1953 struct phy_device
*phydev
;
1960 /* Flush TX and RX FIFOs at TOPCTRL level */
1961 topctrl_flush(priv
);
1963 /* Disable the UniMAC RX/TX */
1964 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 0);
1966 /* Enable RBUF 2bytes alignment and Receive Status Block */
1969 /* Set maximum frame length */
1971 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1973 gib_set_pad_extension(priv
);
1975 /* Apply features again in case we changed them while interface was
1978 bcm_sysport_set_features(dev
, dev
->features
);
1980 /* Set MAC address */
1981 umac_set_hw_addr(priv
, dev
->dev_addr
);
1983 phydev
= of_phy_connect(dev
, priv
->phy_dn
, bcm_sysport_adj_link
,
1984 0, priv
->phy_interface
);
1986 netdev_err(dev
, "could not attach to PHY\n");
1990 /* Reset house keeping link status */
1991 priv
->old_duplex
= -1;
1992 priv
->old_link
= -1;
1993 priv
->old_pause
= -1;
1995 /* mask all interrupts and request them */
1996 bcm_sysport_mask_all_intrs(priv
);
1998 ret
= request_irq(priv
->irq0
, bcm_sysport_rx_isr
, 0, dev
->name
, dev
);
2000 netdev_err(dev
, "failed to request RX interrupt\n");
2001 goto out_phy_disconnect
;
2004 if (!priv
->is_lite
) {
2005 ret
= request_irq(priv
->irq1
, bcm_sysport_tx_isr
, 0,
2008 netdev_err(dev
, "failed to request TX interrupt\n");
2013 /* Initialize both hardware and software ring */
2014 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2015 ret
= bcm_sysport_init_tx_ring(priv
, i
);
2017 netdev_err(dev
, "failed to initialize TX ring %d\n",
2019 goto out_free_tx_ring
;
2023 /* Initialize linked-list */
2024 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
2026 /* Initialize RX ring */
2027 ret
= bcm_sysport_init_rx_ring(priv
);
2029 netdev_err(dev
, "failed to initialize RX ring\n");
2030 goto out_free_rx_ring
;
2034 ret
= rdma_enable_set(priv
, 1);
2036 goto out_free_rx_ring
;
2039 ret
= tdma_enable_set(priv
, 1);
2041 goto out_clear_rx_int
;
2043 /* Turn on UniMAC TX/RX */
2044 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 1);
2046 bcm_sysport_netif_start(dev
);
2048 netif_tx_start_all_queues(dev
);
2053 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
2055 bcm_sysport_fini_rx_ring(priv
);
2057 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2058 bcm_sysport_fini_tx_ring(priv
, i
);
2060 free_irq(priv
->irq1
, dev
);
2062 free_irq(priv
->irq0
, dev
);
2064 phy_disconnect(phydev
);
2068 static void bcm_sysport_netif_stop(struct net_device
*dev
)
2070 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2072 /* stop all software from updating hardware */
2073 netif_tx_disable(dev
);
2074 napi_disable(&priv
->napi
);
2075 cancel_work_sync(&priv
->dim
.dim
.work
);
2076 phy_stop(dev
->phydev
);
2078 /* mask all interrupts */
2079 bcm_sysport_mask_all_intrs(priv
);
2082 static int bcm_sysport_stop(struct net_device
*dev
)
2084 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2088 bcm_sysport_netif_stop(dev
);
2090 /* Disable UniMAC RX */
2091 umac_enable_set(priv
, CMD_RX_EN
, 0);
2093 ret
= tdma_enable_set(priv
, 0);
2095 netdev_err(dev
, "timeout disabling RDMA\n");
2099 /* Wait for a maximum packet size to be drained */
2100 usleep_range(2000, 3000);
2102 ret
= rdma_enable_set(priv
, 0);
2104 netdev_err(dev
, "timeout disabling TDMA\n");
2108 /* Disable UniMAC TX */
2109 umac_enable_set(priv
, CMD_TX_EN
, 0);
2111 /* Free RX/TX rings SW structures */
2112 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2113 bcm_sysport_fini_tx_ring(priv
, i
);
2114 bcm_sysport_fini_rx_ring(priv
);
2116 free_irq(priv
->irq0
, dev
);
2118 free_irq(priv
->irq1
, dev
);
2120 /* Disconnect from PHY */
2121 phy_disconnect(dev
->phydev
);
2126 static int bcm_sysport_rule_find(struct bcm_sysport_priv
*priv
,
2132 for_each_set_bit(index
, priv
->filters
, RXCHK_BRCM_TAG_MAX
) {
2133 reg
= rxchk_readl(priv
, RXCHK_BRCM_TAG(index
));
2134 reg
>>= RXCHK_BRCM_TAG_CID_SHIFT
;
2135 reg
&= RXCHK_BRCM_TAG_CID_MASK
;
2136 if (reg
== location
)
2143 static int bcm_sysport_rule_get(struct bcm_sysport_priv
*priv
,
2144 struct ethtool_rxnfc
*nfc
)
2148 /* This is not a rule that we know about */
2149 index
= bcm_sysport_rule_find(priv
, nfc
->fs
.location
);
2153 nfc
->fs
.ring_cookie
= RX_CLS_FLOW_WAKE
;
2158 static int bcm_sysport_rule_set(struct bcm_sysport_priv
*priv
,
2159 struct ethtool_rxnfc
*nfc
)
2164 /* We cannot match locations greater than what the classification ID
2165 * permits (256 entries)
2167 if (nfc
->fs
.location
> RXCHK_BRCM_TAG_CID_MASK
)
2170 /* We cannot support flows that are not destined for a wake-up */
2171 if (nfc
->fs
.ring_cookie
!= RX_CLS_FLOW_WAKE
)
2174 /* All filters are already in use, we cannot match more rules */
2175 if (bitmap_weight(priv
->filters
, RXCHK_BRCM_TAG_MAX
) ==
2179 index
= find_first_zero_bit(priv
->filters
, RXCHK_BRCM_TAG_MAX
);
2180 if (index
> RXCHK_BRCM_TAG_MAX
)
2183 /* Location is the classification ID, and index is the position
2184 * within one of our 8 possible filters to be programmed
2186 reg
= rxchk_readl(priv
, RXCHK_BRCM_TAG(index
));
2187 reg
&= ~(RXCHK_BRCM_TAG_CID_MASK
<< RXCHK_BRCM_TAG_CID_SHIFT
);
2188 reg
|= nfc
->fs
.location
<< RXCHK_BRCM_TAG_CID_SHIFT
;
2189 rxchk_writel(priv
, reg
, RXCHK_BRCM_TAG(index
));
2190 rxchk_writel(priv
, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index
));
2192 priv
->filters_loc
[index
] = nfc
->fs
.location
;
2193 set_bit(index
, priv
->filters
);
2198 static int bcm_sysport_rule_del(struct bcm_sysport_priv
*priv
,
2203 /* This is not a rule that we know about */
2204 index
= bcm_sysport_rule_find(priv
, location
);
2208 /* No need to disable this filter if it was enabled, this will
2209 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2211 clear_bit(index
, priv
->filters
);
2212 priv
->filters_loc
[index
] = 0;
2217 static int bcm_sysport_get_rxnfc(struct net_device
*dev
,
2218 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
2220 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2221 int ret
= -EOPNOTSUPP
;
2224 case ETHTOOL_GRXCLSRULE
:
2225 ret
= bcm_sysport_rule_get(priv
, nfc
);
2234 static int bcm_sysport_set_rxnfc(struct net_device
*dev
,
2235 struct ethtool_rxnfc
*nfc
)
2237 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2238 int ret
= -EOPNOTSUPP
;
2241 case ETHTOOL_SRXCLSRLINS
:
2242 ret
= bcm_sysport_rule_set(priv
, nfc
);
2244 case ETHTOOL_SRXCLSRLDEL
:
2245 ret
= bcm_sysport_rule_del(priv
, nfc
->fs
.location
);
2254 static const struct ethtool_ops bcm_sysport_ethtool_ops
= {
2255 .get_drvinfo
= bcm_sysport_get_drvinfo
,
2256 .get_msglevel
= bcm_sysport_get_msglvl
,
2257 .set_msglevel
= bcm_sysport_set_msglvl
,
2258 .get_link
= ethtool_op_get_link
,
2259 .get_strings
= bcm_sysport_get_strings
,
2260 .get_ethtool_stats
= bcm_sysport_get_stats
,
2261 .get_sset_count
= bcm_sysport_get_sset_count
,
2262 .get_wol
= bcm_sysport_get_wol
,
2263 .set_wol
= bcm_sysport_set_wol
,
2264 .get_coalesce
= bcm_sysport_get_coalesce
,
2265 .set_coalesce
= bcm_sysport_set_coalesce
,
2266 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2267 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2268 .get_rxnfc
= bcm_sysport_get_rxnfc
,
2269 .set_rxnfc
= bcm_sysport_set_rxnfc
,
2272 static u16
bcm_sysport_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2273 struct net_device
*sb_dev
,
2274 select_queue_fallback_t fallback
)
2276 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2277 u16 queue
= skb_get_queue_mapping(skb
);
2278 struct bcm_sysport_tx_ring
*tx_ring
;
2279 unsigned int q
, port
;
2281 if (!netdev_uses_dsa(dev
))
2282 return fallback(dev
, skb
, NULL
);
2284 /* DSA tagging layer will have configured the correct queue */
2285 q
= BRCM_TAG_GET_QUEUE(queue
);
2286 port
= BRCM_TAG_GET_PORT(queue
);
2287 tx_ring
= priv
->ring_map
[q
+ port
* priv
->per_port_num_tx_queues
];
2289 if (unlikely(!tx_ring
))
2290 return fallback(dev
, skb
, NULL
);
2292 return tx_ring
->index
;
2295 static const struct net_device_ops bcm_sysport_netdev_ops
= {
2296 .ndo_start_xmit
= bcm_sysport_xmit
,
2297 .ndo_tx_timeout
= bcm_sysport_tx_timeout
,
2298 .ndo_open
= bcm_sysport_open
,
2299 .ndo_stop
= bcm_sysport_stop
,
2300 .ndo_set_features
= bcm_sysport_set_features
,
2301 .ndo_set_rx_mode
= bcm_sysport_set_rx_mode
,
2302 .ndo_set_mac_address
= bcm_sysport_change_mac
,
2303 #ifdef CONFIG_NET_POLL_CONTROLLER
2304 .ndo_poll_controller
= bcm_sysport_poll_controller
,
2306 .ndo_get_stats64
= bcm_sysport_get_stats64
,
2307 .ndo_select_queue
= bcm_sysport_select_queue
,
2310 static int bcm_sysport_map_queues(struct notifier_block
*nb
,
2311 struct dsa_notifier_register_info
*info
)
2313 struct bcm_sysport_tx_ring
*ring
;
2314 struct bcm_sysport_priv
*priv
;
2315 struct net_device
*slave_dev
;
2316 unsigned int num_tx_queues
;
2317 unsigned int q
, qp
, port
;
2318 struct net_device
*dev
;
2320 priv
= container_of(nb
, struct bcm_sysport_priv
, dsa_notifier
);
2321 if (priv
->netdev
!= info
->master
)
2326 /* We can't be setting up queue inspection for non directly attached
2329 if (info
->switch_number
)
2332 if (dev
->netdev_ops
!= &bcm_sysport_netdev_ops
)
2335 port
= info
->port_number
;
2336 slave_dev
= info
->info
.dev
;
2338 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2339 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2340 * per-port (slave_dev) network devices queue, we achieve just that.
2341 * This need to happen now before any slave network device is used such
2342 * it accurately reflects the number of real TX queues.
2345 netif_set_real_num_tx_queues(slave_dev
,
2346 slave_dev
->num_tx_queues
/ 2);
2348 num_tx_queues
= slave_dev
->real_num_tx_queues
;
2350 if (priv
->per_port_num_tx_queues
&&
2351 priv
->per_port_num_tx_queues
!= num_tx_queues
)
2352 netdev_warn(slave_dev
, "asymmetric number of per-port queues\n");
2354 priv
->per_port_num_tx_queues
= num_tx_queues
;
2356 for (q
= 0, qp
= 0; q
< dev
->num_tx_queues
&& qp
< num_tx_queues
;
2358 ring
= &priv
->tx_rings
[q
];
2363 /* Just remember the mapping actual programming done
2364 * during bcm_sysport_init_tx_ring
2366 ring
->switch_queue
= qp
;
2367 ring
->switch_port
= port
;
2368 ring
->inspect
= true;
2369 priv
->ring_map
[q
+ port
* num_tx_queues
] = ring
;
2376 static int bcm_sysport_unmap_queues(struct notifier_block
*nb
,
2377 struct dsa_notifier_register_info
*info
)
2379 struct bcm_sysport_tx_ring
*ring
;
2380 struct bcm_sysport_priv
*priv
;
2381 struct net_device
*slave_dev
;
2382 unsigned int num_tx_queues
;
2383 struct net_device
*dev
;
2384 unsigned int q
, port
;
2386 priv
= container_of(nb
, struct bcm_sysport_priv
, dsa_notifier
);
2387 if (priv
->netdev
!= info
->master
)
2392 if (dev
->netdev_ops
!= &bcm_sysport_netdev_ops
)
2395 port
= info
->port_number
;
2396 slave_dev
= info
->info
.dev
;
2398 num_tx_queues
= slave_dev
->real_num_tx_queues
;
2400 for (q
= 0; q
< dev
->num_tx_queues
; q
++) {
2401 ring
= &priv
->tx_rings
[q
];
2403 if (ring
->switch_port
!= port
)
2409 ring
->inspect
= false;
2410 priv
->ring_map
[q
+ port
* num_tx_queues
] = NULL
;
2416 static int bcm_sysport_dsa_notifier(struct notifier_block
*nb
,
2417 unsigned long event
, void *ptr
)
2419 int ret
= NOTIFY_DONE
;
2422 case DSA_PORT_REGISTER
:
2423 ret
= bcm_sysport_map_queues(nb
, ptr
);
2425 case DSA_PORT_UNREGISTER
:
2426 ret
= bcm_sysport_unmap_queues(nb
, ptr
);
2430 return notifier_from_errno(ret
);
2433 #define REV_FMT "v%2x.%02x"
2435 static const struct bcm_sysport_hw_params bcm_sysport_params
[] = {
2438 .num_rx_desc_words
= SP_NUM_HW_RX_DESC_WORDS
,
2440 [SYSTEMPORT_LITE
] = {
2442 .num_rx_desc_words
= SP_LT_NUM_HW_RX_DESC_WORDS
,
2446 static const struct of_device_id bcm_sysport_of_match
[] = {
2447 { .compatible
= "brcm,systemportlite-v1.00",
2448 .data
= &bcm_sysport_params
[SYSTEMPORT_LITE
] },
2449 { .compatible
= "brcm,systemport-v1.00",
2450 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2451 { .compatible
= "brcm,systemport",
2452 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2455 MODULE_DEVICE_TABLE(of
, bcm_sysport_of_match
);
2457 static int bcm_sysport_probe(struct platform_device
*pdev
)
2459 const struct bcm_sysport_hw_params
*params
;
2460 const struct of_device_id
*of_id
= NULL
;
2461 struct bcm_sysport_priv
*priv
;
2462 struct device_node
*dn
;
2463 struct net_device
*dev
;
2464 const void *macaddr
;
2469 dn
= pdev
->dev
.of_node
;
2470 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2471 of_id
= of_match_node(bcm_sysport_of_match
, dn
);
2472 if (!of_id
|| !of_id
->data
)
2475 /* Fairly quickly we need to know the type of adapter we have */
2476 params
= of_id
->data
;
2478 /* Read the Transmit/Receive Queue properties */
2479 if (of_property_read_u32(dn
, "systemport,num-txq", &txq
))
2480 txq
= TDMA_NUM_RINGS
;
2481 if (of_property_read_u32(dn
, "systemport,num-rxq", &rxq
))
2484 /* Sanity check the number of transmit queues */
2485 if (!txq
|| txq
> TDMA_NUM_RINGS
)
2488 dev
= alloc_etherdev_mqs(sizeof(*priv
), txq
, rxq
);
2492 /* Initialize private members */
2493 priv
= netdev_priv(dev
);
2495 /* Allocate number of TX rings */
2496 priv
->tx_rings
= devm_kcalloc(&pdev
->dev
, txq
,
2497 sizeof(struct bcm_sysport_tx_ring
),
2499 if (!priv
->tx_rings
)
2502 priv
->is_lite
= params
->is_lite
;
2503 priv
->num_rx_desc_words
= params
->num_rx_desc_words
;
2505 priv
->irq0
= platform_get_irq(pdev
, 0);
2506 if (!priv
->is_lite
) {
2507 priv
->irq1
= platform_get_irq(pdev
, 1);
2508 priv
->wol_irq
= platform_get_irq(pdev
, 2);
2510 priv
->wol_irq
= platform_get_irq(pdev
, 1);
2512 if (priv
->irq0
<= 0 || (priv
->irq1
<= 0 && !priv
->is_lite
)) {
2513 dev_err(&pdev
->dev
, "invalid interrupts\n");
2515 goto err_free_netdev
;
2518 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
2519 if (IS_ERR(priv
->base
)) {
2520 ret
= PTR_ERR(priv
->base
);
2521 goto err_free_netdev
;
2527 priv
->phy_interface
= of_get_phy_mode(dn
);
2528 /* Default to GMII interface mode */
2529 if (priv
->phy_interface
< 0)
2530 priv
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
2532 /* In the case of a fixed PHY, the DT node associated
2533 * to the PHY is the Ethernet MAC DT node.
2535 if (of_phy_is_fixed_link(dn
)) {
2536 ret
= of_phy_register_fixed_link(dn
);
2538 dev_err(&pdev
->dev
, "failed to register fixed PHY\n");
2539 goto err_free_netdev
;
2545 /* Initialize netdevice members */
2546 macaddr
= of_get_mac_address(dn
);
2547 if (!macaddr
|| !is_valid_ether_addr(macaddr
)) {
2548 dev_warn(&pdev
->dev
, "using random Ethernet MAC\n");
2549 eth_hw_addr_random(dev
);
2551 ether_addr_copy(dev
->dev_addr
, macaddr
);
2554 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2555 dev_set_drvdata(&pdev
->dev
, dev
);
2556 dev
->ethtool_ops
= &bcm_sysport_ethtool_ops
;
2557 dev
->netdev_ops
= &bcm_sysport_netdev_ops
;
2558 netif_napi_add(dev
, &priv
->napi
, bcm_sysport_poll
, 64);
2560 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
|
2561 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2562 dev
->hw_features
|= dev
->features
;
2563 dev
->vlan_features
|= dev
->features
;
2565 /* Request the WOL interrupt and advertise suspend if available */
2566 priv
->wol_irq_disabled
= 1;
2567 ret
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
,
2568 bcm_sysport_wol_isr
, 0, dev
->name
, priv
);
2570 device_set_wakeup_capable(&pdev
->dev
, 1);
2572 /* Set the needed headroom once and for all */
2573 BUILD_BUG_ON(sizeof(struct bcm_tsb
) != 8);
2574 dev
->needed_headroom
+= sizeof(struct bcm_tsb
);
2576 /* libphy will adjust the link state accordingly */
2577 netif_carrier_off(dev
);
2579 priv
->rx_max_coalesced_frames
= 1;
2580 u64_stats_init(&priv
->syncp
);
2582 priv
->dsa_notifier
.notifier_call
= bcm_sysport_dsa_notifier
;
2584 ret
= register_dsa_notifier(&priv
->dsa_notifier
);
2586 dev_err(&pdev
->dev
, "failed to register DSA notifier\n");
2587 goto err_deregister_fixed_link
;
2590 ret
= register_netdev(dev
);
2592 dev_err(&pdev
->dev
, "failed to register net_device\n");
2593 goto err_deregister_notifier
;
2596 priv
->rev
= topctrl_readl(priv
, REV_CNTL
) & REV_MASK
;
2597 dev_info(&pdev
->dev
,
2598 "Broadcom SYSTEMPORT%s" REV_FMT
2599 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2600 priv
->is_lite
? " Lite" : "",
2601 (priv
->rev
>> 8) & 0xff, priv
->rev
& 0xff,
2602 priv
->base
, priv
->irq0
, priv
->irq1
, txq
, rxq
);
2606 err_deregister_notifier
:
2607 unregister_dsa_notifier(&priv
->dsa_notifier
);
2608 err_deregister_fixed_link
:
2609 if (of_phy_is_fixed_link(dn
))
2610 of_phy_deregister_fixed_link(dn
);
2616 static int bcm_sysport_remove(struct platform_device
*pdev
)
2618 struct net_device
*dev
= dev_get_drvdata(&pdev
->dev
);
2619 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2620 struct device_node
*dn
= pdev
->dev
.of_node
;
2622 /* Not much to do, ndo_close has been called
2623 * and we use managed allocations
2625 unregister_dsa_notifier(&priv
->dsa_notifier
);
2626 unregister_netdev(dev
);
2627 if (of_phy_is_fixed_link(dn
))
2628 of_phy_deregister_fixed_link(dn
);
2630 dev_set_drvdata(&pdev
->dev
, NULL
);
2635 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv
*priv
)
2637 struct net_device
*ndev
= priv
->netdev
;
2638 unsigned int timeout
= 1000;
2639 unsigned int index
, i
= 0;
2642 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
2643 if (priv
->wolopts
& (WAKE_MAGIC
| WAKE_MAGICSECURE
))
2646 if (priv
->wolopts
& WAKE_MAGICSECURE
) {
2647 /* Program the SecureOn password */
2648 umac_writel(priv
, get_unaligned_be16(&priv
->sopass
[0]),
2650 umac_writel(priv
, get_unaligned_be32(&priv
->sopass
[2]),
2654 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
2656 if (priv
->wolopts
& WAKE_FILTER
) {
2657 /* Turn on ACPI matching to steal packets from RBUF */
2658 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
2660 reg
|= RBUF_ACPI_EN_LITE
;
2662 reg
|= RBUF_ACPI_EN
;
2663 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
2665 /* Enable RXCHK, active filters and Broadcom tag matching */
2666 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2667 reg
&= ~(RXCHK_BRCM_TAG_MATCH_MASK
<<
2668 RXCHK_BRCM_TAG_MATCH_SHIFT
);
2669 for_each_set_bit(index
, priv
->filters
, RXCHK_BRCM_TAG_MAX
) {
2670 reg
|= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT
+ i
);
2673 reg
|= RXCHK_EN
| RXCHK_BRCM_TAG_EN
;
2674 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2677 /* Make sure RBUF entered WoL mode as result */
2679 reg
= rbuf_readl(priv
, RBUF_STATUS
);
2680 if (reg
& RBUF_WOL_MODE
)
2684 } while (timeout
-- > 0);
2686 /* Do not leave the UniMAC RBUF matching only MPD packets */
2688 mpd_enable_set(priv
, false);
2689 netif_err(priv
, wol
, ndev
, "failed to enter WOL mode\n");
2693 /* UniMAC receive needs to be turned on */
2694 umac_enable_set(priv
, CMD_RX_EN
, 1);
2696 netif_dbg(priv
, wol
, ndev
, "entered WOL mode\n");
2701 static int __maybe_unused
bcm_sysport_suspend(struct device
*d
)
2703 struct net_device
*dev
= dev_get_drvdata(d
);
2704 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2709 if (!netif_running(dev
))
2712 netif_device_detach(dev
);
2714 bcm_sysport_netif_stop(dev
);
2716 phy_suspend(dev
->phydev
);
2718 /* Disable UniMAC RX */
2719 umac_enable_set(priv
, CMD_RX_EN
, 0);
2721 ret
= rdma_enable_set(priv
, 0);
2723 netdev_err(dev
, "RDMA timeout!\n");
2727 /* Disable RXCHK if enabled */
2728 if (priv
->rx_chk_en
) {
2729 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2731 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2736 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
2738 ret
= tdma_enable_set(priv
, 0);
2740 netdev_err(dev
, "TDMA timeout!\n");
2744 /* Wait for a packet boundary */
2745 usleep_range(2000, 3000);
2747 umac_enable_set(priv
, CMD_TX_EN
, 0);
2749 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
2751 /* Free RX/TX rings SW structures */
2752 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2753 bcm_sysport_fini_tx_ring(priv
, i
);
2754 bcm_sysport_fini_rx_ring(priv
);
2756 /* Get prepared for Wake-on-LAN */
2757 if (device_may_wakeup(d
) && priv
->wolopts
)
2758 ret
= bcm_sysport_suspend_to_wol(priv
);
2763 static int __maybe_unused
bcm_sysport_resume(struct device
*d
)
2765 struct net_device
*dev
= dev_get_drvdata(d
);
2766 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2770 if (!netif_running(dev
))
2775 /* We may have been suspended and never received a WOL event that
2776 * would turn off MPD detection, take care of that now
2778 bcm_sysport_resume_from_wol(priv
);
2780 /* Initialize both hardware and software ring */
2781 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2782 ret
= bcm_sysport_init_tx_ring(priv
, i
);
2784 netdev_err(dev
, "failed to initialize TX ring %d\n",
2786 goto out_free_tx_rings
;
2790 /* Initialize linked-list */
2791 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
2793 /* Initialize RX ring */
2794 ret
= bcm_sysport_init_rx_ring(priv
);
2796 netdev_err(dev
, "failed to initialize RX ring\n");
2797 goto out_free_rx_ring
;
2800 /* RX pipe enable */
2801 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
2803 ret
= rdma_enable_set(priv
, 1);
2805 netdev_err(dev
, "failed to enable RDMA\n");
2806 goto out_free_rx_ring
;
2809 /* Restore enabled features */
2810 bcm_sysport_set_features(dev
, dev
->features
);
2814 /* Set maximum frame length */
2816 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
2818 gib_set_pad_extension(priv
);
2820 /* Set MAC address */
2821 umac_set_hw_addr(priv
, dev
->dev_addr
);
2823 umac_enable_set(priv
, CMD_RX_EN
, 1);
2825 /* TX pipe enable */
2826 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
2828 umac_enable_set(priv
, CMD_TX_EN
, 1);
2830 ret
= tdma_enable_set(priv
, 1);
2832 netdev_err(dev
, "TDMA timeout!\n");
2833 goto out_free_rx_ring
;
2836 phy_resume(dev
->phydev
);
2838 bcm_sysport_netif_start(dev
);
2840 netif_device_attach(dev
);
2845 bcm_sysport_fini_rx_ring(priv
);
2847 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2848 bcm_sysport_fini_tx_ring(priv
, i
);
2852 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops
,
2853 bcm_sysport_suspend
, bcm_sysport_resume
);
2855 static struct platform_driver bcm_sysport_driver
= {
2856 .probe
= bcm_sysport_probe
,
2857 .remove
= bcm_sysport_remove
,
2859 .name
= "brcm-systemport",
2860 .of_match_table
= bcm_sysport_of_match
,
2861 .pm
= &bcm_sysport_pm_ops
,
2864 module_platform_driver(bcm_sysport_driver
);
2866 MODULE_AUTHOR("Broadcom Corporation");
2867 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2868 MODULE_ALIAS("platform:brcm-systemport");
2869 MODULE_LICENSE("GPL");