1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom BCM7xxx System Port Ethernet MAC driver
5 * Copyright (C) 2014 Broadcom Corporation
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
18 #include <linux/of_net.h>
19 #include <linux/of_mdio.h>
20 #include <linux/phy.h>
21 #include <linux/phy_fixed.h>
26 #include "bcmsysport.h"
28 /* I/O accessors register helpers */
29 #define BCM_SYSPORT_IO_MACRO(name, offset) \
30 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
32 u32 reg = readl_relaxed(priv->base + offset + off); \
35 static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 writel_relaxed(val, priv->base + offset + off); \
41 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
42 BCM_SYSPORT_IO_MACRO(intrl2_1
, SYS_PORT_INTRL2_1_OFFSET
);
43 BCM_SYSPORT_IO_MACRO(umac
, SYS_PORT_UMAC_OFFSET
);
44 BCM_SYSPORT_IO_MACRO(gib
, SYS_PORT_GIB_OFFSET
);
45 BCM_SYSPORT_IO_MACRO(tdma
, SYS_PORT_TDMA_OFFSET
);
46 BCM_SYSPORT_IO_MACRO(rxchk
, SYS_PORT_RXCHK_OFFSET
);
47 BCM_SYSPORT_IO_MACRO(txchk
, SYS_PORT_TXCHK_OFFSET
);
48 BCM_SYSPORT_IO_MACRO(rbuf
, SYS_PORT_RBUF_OFFSET
);
49 BCM_SYSPORT_IO_MACRO(tbuf
, SYS_PORT_TBUF_OFFSET
);
50 BCM_SYSPORT_IO_MACRO(topctrl
, SYS_PORT_TOPCTRL_OFFSET
);
52 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
53 * same layout, except it has been moved by 4 bytes up, *sigh*
55 static inline u32
rdma_readl(struct bcm_sysport_priv
*priv
, u32 off
)
57 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
59 return readl_relaxed(priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
62 static inline void rdma_writel(struct bcm_sysport_priv
*priv
, u32 val
, u32 off
)
64 if (priv
->is_lite
&& off
>= RDMA_STATUS
)
66 writel_relaxed(val
, priv
->base
+ SYS_PORT_RDMA_OFFSET
+ off
);
69 static inline u32
tdma_control_bit(struct bcm_sysport_priv
*priv
, u32 bit
)
81 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
82 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
84 #define BCM_SYSPORT_INTR_L2(which) \
85 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
88 priv->irq##which##_mask &= ~(mask); \
89 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
91 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
94 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
95 priv->irq##which##_mask |= (mask); \
98 BCM_SYSPORT_INTR_L2(0)
99 BCM_SYSPORT_INTR_L2(1)
101 /* Register accesses to GISB/RBUS registers are expensive (few hundred
102 * nanoseconds), so keep the check for 64-bits explicit here to save
103 * one register write per-packet on 32-bits platforms.
105 static inline void dma_desc_set_addr(struct bcm_sysport_priv
*priv
,
109 #ifdef CONFIG_PHYS_ADDR_T_64BIT
110 writel_relaxed(upper_32_bits(addr
) & DESC_ADDR_HI_MASK
,
111 d
+ DESC_ADDR_HI_STATUS_LEN
);
113 writel_relaxed(lower_32_bits(addr
), d
+ DESC_ADDR_LO
);
116 /* Ethtool operations */
117 static void bcm_sysport_set_rx_csum(struct net_device
*dev
,
118 netdev_features_t wanted
)
120 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
123 priv
->rx_chk_en
= !!(wanted
& NETIF_F_RXCSUM
);
124 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
125 /* Clear L2 header checks, which would prevent BPDUs
126 * from being received.
128 reg
&= ~RXCHK_L2_HDR_DIS
;
134 /* If UniMAC forwards CRC, we need to skip over it to get
135 * a valid CHK bit to be set in the per-packet status word
137 if (priv
->rx_chk_en
&& priv
->crc_fwd
)
138 reg
|= RXCHK_SKIP_FCS
;
140 reg
&= ~RXCHK_SKIP_FCS
;
142 /* If Broadcom tags are enabled (e.g: using a switch), make
143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
144 * tag after the Ethernet MAC Source Address.
146 if (netdev_uses_dsa(dev
))
147 reg
|= RXCHK_BRCM_TAG_EN
;
149 reg
&= ~RXCHK_BRCM_TAG_EN
;
151 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
154 static void bcm_sysport_set_tx_csum(struct net_device
*dev
,
155 netdev_features_t wanted
)
157 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
160 /* Hardware transmit checksum requires us to enable the Transmit status
161 * block prepended to the packet contents
163 priv
->tsb_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
164 reg
= tdma_readl(priv
, TDMA_CONTROL
);
166 reg
|= tdma_control_bit(priv
, TSB_EN
);
168 reg
&= ~tdma_control_bit(priv
, TSB_EN
);
169 tdma_writel(priv
, reg
, TDMA_CONTROL
);
172 static int bcm_sysport_set_features(struct net_device
*dev
,
173 netdev_features_t features
)
175 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
177 /* Read CRC forward */
179 priv
->crc_fwd
= !!(umac_readl(priv
, UMAC_CMD
) & CMD_CRC_FWD
);
181 priv
->crc_fwd
= !((gib_readl(priv
, GIB_CONTROL
) &
182 GIB_FCS_STRIP
) >> GIB_FCS_STRIP_SHIFT
);
184 bcm_sysport_set_rx_csum(dev
, features
);
185 bcm_sysport_set_tx_csum(dev
, features
);
190 /* Hardware counters must be kept in sync because the order/offset
191 * is important here (order in structure declaration = order in hardware)
193 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats
[] = {
195 STAT_NETDEV64(rx_packets
),
196 STAT_NETDEV64(tx_packets
),
197 STAT_NETDEV64(rx_bytes
),
198 STAT_NETDEV64(tx_bytes
),
199 STAT_NETDEV(rx_errors
),
200 STAT_NETDEV(tx_errors
),
201 STAT_NETDEV(rx_dropped
),
202 STAT_NETDEV(tx_dropped
),
203 STAT_NETDEV(multicast
),
204 /* UniMAC RSV counters */
205 STAT_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
206 STAT_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
207 STAT_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
208 STAT_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
209 STAT_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
210 STAT_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
211 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
212 STAT_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
213 STAT_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
214 STAT_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
215 STAT_MIB_RX("rx_pkts", mib
.rx
.pkt
),
216 STAT_MIB_RX("rx_bytes", mib
.rx
.bytes
),
217 STAT_MIB_RX("rx_multicast", mib
.rx
.mca
),
218 STAT_MIB_RX("rx_broadcast", mib
.rx
.bca
),
219 STAT_MIB_RX("rx_fcs", mib
.rx
.fcs
),
220 STAT_MIB_RX("rx_control", mib
.rx
.cf
),
221 STAT_MIB_RX("rx_pause", mib
.rx
.pf
),
222 STAT_MIB_RX("rx_unknown", mib
.rx
.uo
),
223 STAT_MIB_RX("rx_align", mib
.rx
.aln
),
224 STAT_MIB_RX("rx_outrange", mib
.rx
.flr
),
225 STAT_MIB_RX("rx_code", mib
.rx
.cde
),
226 STAT_MIB_RX("rx_carrier", mib
.rx
.fcr
),
227 STAT_MIB_RX("rx_oversize", mib
.rx
.ovr
),
228 STAT_MIB_RX("rx_jabber", mib
.rx
.jbr
),
229 STAT_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
230 STAT_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
231 STAT_MIB_RX("rx_unicast", mib
.rx
.uc
),
232 STAT_MIB_RX("rx_ppp", mib
.rx
.ppp
),
233 STAT_MIB_RX("rx_crc", mib
.rx
.rcrc
),
234 /* UniMAC TSV counters */
235 STAT_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
236 STAT_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
237 STAT_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
238 STAT_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
239 STAT_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
240 STAT_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
241 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
242 STAT_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
243 STAT_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
244 STAT_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
245 STAT_MIB_TX("tx_pkts", mib
.tx
.pkts
),
246 STAT_MIB_TX("tx_multicast", mib
.tx
.mca
),
247 STAT_MIB_TX("tx_broadcast", mib
.tx
.bca
),
248 STAT_MIB_TX("tx_pause", mib
.tx
.pf
),
249 STAT_MIB_TX("tx_control", mib
.tx
.cf
),
250 STAT_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
251 STAT_MIB_TX("tx_oversize", mib
.tx
.ovr
),
252 STAT_MIB_TX("tx_defer", mib
.tx
.drf
),
253 STAT_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
254 STAT_MIB_TX("tx_single_col", mib
.tx
.scl
),
255 STAT_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
256 STAT_MIB_TX("tx_late_col", mib
.tx
.lcl
),
257 STAT_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
258 STAT_MIB_TX("tx_frags", mib
.tx
.frg
),
259 STAT_MIB_TX("tx_total_col", mib
.tx
.ncl
),
260 STAT_MIB_TX("tx_jabber", mib
.tx
.jbr
),
261 STAT_MIB_TX("tx_bytes", mib
.tx
.bytes
),
262 STAT_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
263 STAT_MIB_TX("tx_unicast", mib
.tx
.uc
),
264 /* UniMAC RUNT counters */
265 STAT_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
266 STAT_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
267 STAT_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
268 STAT_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
269 /* RXCHK misc statistics */
270 STAT_RXCHK("rxchk_bad_csum", mib
.rxchk_bad_csum
, RXCHK_BAD_CSUM_CNTR
),
271 STAT_RXCHK("rxchk_other_pkt_disc", mib
.rxchk_other_pkt_disc
,
272 RXCHK_OTHER_DISC_CNTR
),
273 /* RBUF misc statistics */
274 STAT_RBUF("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
, RBUF_OVFL_DISC_CNTR
),
275 STAT_RBUF("rbuf_err_cnt", mib
.rbuf_err_cnt
, RBUF_ERR_PKT_CNTR
),
276 STAT_MIB_SOFT("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
277 STAT_MIB_SOFT("rx_dma_failed", mib
.rx_dma_failed
),
278 STAT_MIB_SOFT("tx_dma_failed", mib
.tx_dma_failed
),
279 STAT_MIB_SOFT("tx_realloc_tsb", mib
.tx_realloc_tsb
),
280 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib
.tx_realloc_tsb_failed
),
281 /* Per TX-queue statistics are dynamically appended */
284 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
286 static void bcm_sysport_get_drvinfo(struct net_device
*dev
,
287 struct ethtool_drvinfo
*info
)
289 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
290 strlcpy(info
->version
, "0.1", sizeof(info
->version
));
291 strlcpy(info
->bus_info
, "platform", sizeof(info
->bus_info
));
294 static u32
bcm_sysport_get_msglvl(struct net_device
*dev
)
296 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
298 return priv
->msg_enable
;
301 static void bcm_sysport_set_msglvl(struct net_device
*dev
, u32 enable
)
303 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
305 priv
->msg_enable
= enable
;
308 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type
)
311 case BCM_SYSPORT_STAT_NETDEV
:
312 case BCM_SYSPORT_STAT_NETDEV64
:
313 case BCM_SYSPORT_STAT_RXCHK
:
314 case BCM_SYSPORT_STAT_RBUF
:
315 case BCM_SYSPORT_STAT_SOFT
:
322 static int bcm_sysport_get_sset_count(struct net_device
*dev
, int string_set
)
324 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
325 const struct bcm_sysport_stats
*s
;
328 switch (string_set
) {
330 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
331 s
= &bcm_sysport_gstrings_stats
[i
];
333 !bcm_sysport_lite_stat_valid(s
->type
))
337 /* Include per-queue statistics */
338 return j
+ dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
344 static void bcm_sysport_get_strings(struct net_device
*dev
,
345 u32 stringset
, u8
*data
)
347 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
348 const struct bcm_sysport_stats
*s
;
354 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
355 s
= &bcm_sysport_gstrings_stats
[i
];
357 !bcm_sysport_lite_stat_valid(s
->type
))
360 memcpy(data
+ j
* ETH_GSTRING_LEN
, s
->stat_string
,
365 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
366 snprintf(buf
, sizeof(buf
), "txq%d_packets", i
);
367 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
371 snprintf(buf
, sizeof(buf
), "txq%d_bytes", i
);
372 memcpy(data
+ j
* ETH_GSTRING_LEN
, buf
,
382 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv
*priv
)
386 for (i
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
387 const struct bcm_sysport_stats
*s
;
392 s
= &bcm_sysport_gstrings_stats
[i
];
394 case BCM_SYSPORT_STAT_NETDEV
:
395 case BCM_SYSPORT_STAT_NETDEV64
:
396 case BCM_SYSPORT_STAT_SOFT
:
398 case BCM_SYSPORT_STAT_MIB_RX
:
399 case BCM_SYSPORT_STAT_MIB_TX
:
400 case BCM_SYSPORT_STAT_RUNT
:
404 if (s
->type
!= BCM_SYSPORT_STAT_MIB_RX
)
405 offset
= UMAC_MIB_STAT_OFFSET
;
406 val
= umac_readl(priv
, UMAC_MIB_START
+ j
+ offset
);
408 case BCM_SYSPORT_STAT_RXCHK
:
409 val
= rxchk_readl(priv
, s
->reg_offset
);
411 rxchk_writel(priv
, 0, s
->reg_offset
);
413 case BCM_SYSPORT_STAT_RBUF
:
414 val
= rbuf_readl(priv
, s
->reg_offset
);
416 rbuf_writel(priv
, 0, s
->reg_offset
);
421 p
= (char *)priv
+ s
->stat_offset
;
425 netif_dbg(priv
, hw
, priv
->netdev
, "updated MIB counters\n");
428 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv
*priv
,
429 u64
*tx_bytes
, u64
*tx_packets
)
431 struct bcm_sysport_tx_ring
*ring
;
432 u64 bytes
= 0, packets
= 0;
436 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++) {
437 ring
= &priv
->tx_rings
[q
];
439 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
441 packets
= ring
->packets
;
442 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
445 *tx_packets
+= packets
;
449 static void bcm_sysport_get_stats(struct net_device
*dev
,
450 struct ethtool_stats
*stats
, u64
*data
)
452 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
453 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
454 struct u64_stats_sync
*syncp
= &priv
->syncp
;
455 struct bcm_sysport_tx_ring
*ring
;
456 u64 tx_bytes
= 0, tx_packets
= 0;
460 if (netif_running(dev
)) {
461 bcm_sysport_update_mib_counters(priv
);
462 bcm_sysport_update_tx_stats(priv
, &tx_bytes
, &tx_packets
);
463 stats64
->tx_bytes
= tx_bytes
;
464 stats64
->tx_packets
= tx_packets
;
467 for (i
= 0, j
= 0; i
< BCM_SYSPORT_STATS_LEN
; i
++) {
468 const struct bcm_sysport_stats
*s
;
471 s
= &bcm_sysport_gstrings_stats
[i
];
472 if (s
->type
== BCM_SYSPORT_STAT_NETDEV
)
473 p
= (char *)&dev
->stats
;
474 else if (s
->type
== BCM_SYSPORT_STAT_NETDEV64
)
479 if (priv
->is_lite
&& !bcm_sysport_lite_stat_valid(s
->type
))
483 if (s
->stat_sizeof
== sizeof(u64
) &&
484 s
->type
== BCM_SYSPORT_STAT_NETDEV64
) {
486 start
= u64_stats_fetch_begin_irq(syncp
);
488 } while (u64_stats_fetch_retry_irq(syncp
, start
));
494 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
495 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
496 * needs to point to how many total statistics we have minus the
497 * number of per TX queue statistics
499 j
= bcm_sysport_get_sset_count(dev
, ETH_SS_STATS
) -
500 dev
->num_tx_queues
* NUM_SYSPORT_TXQ_STAT
;
502 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
503 ring
= &priv
->tx_rings
[i
];
504 data
[j
] = ring
->packets
;
506 data
[j
] = ring
->bytes
;
511 static void bcm_sysport_get_wol(struct net_device
*dev
,
512 struct ethtool_wolinfo
*wol
)
514 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
516 wol
->supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
| WAKE_FILTER
;
517 wol
->wolopts
= priv
->wolopts
;
519 if (!(priv
->wolopts
& WAKE_MAGICSECURE
))
522 memcpy(wol
->sopass
, priv
->sopass
, sizeof(priv
->sopass
));
525 static int bcm_sysport_set_wol(struct net_device
*dev
,
526 struct ethtool_wolinfo
*wol
)
528 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
529 struct device
*kdev
= &priv
->pdev
->dev
;
530 u32 supported
= WAKE_MAGIC
| WAKE_MAGICSECURE
| WAKE_FILTER
;
532 if (!device_can_wakeup(kdev
))
535 if (wol
->wolopts
& ~supported
)
538 if (wol
->wolopts
& WAKE_MAGICSECURE
)
539 memcpy(priv
->sopass
, wol
->sopass
, sizeof(priv
->sopass
));
541 /* Flag the device and relevant IRQ as wakeup capable */
543 device_set_wakeup_enable(kdev
, 1);
544 if (priv
->wol_irq_disabled
)
545 enable_irq_wake(priv
->wol_irq
);
546 priv
->wol_irq_disabled
= 0;
548 device_set_wakeup_enable(kdev
, 0);
549 /* Avoid unbalanced disable_irq_wake calls */
550 if (!priv
->wol_irq_disabled
)
551 disable_irq_wake(priv
->wol_irq
);
552 priv
->wol_irq_disabled
= 1;
555 priv
->wolopts
= wol
->wolopts
;
560 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv
*priv
,
565 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
566 reg
&= ~(RDMA_INTR_THRESH_MASK
|
567 RDMA_TIMEOUT_MASK
<< RDMA_TIMEOUT_SHIFT
);
569 reg
|= DIV_ROUND_UP(usecs
* 1000, 8192) << RDMA_TIMEOUT_SHIFT
;
570 rdma_writel(priv
, reg
, RDMA_MBDONE_INTR
);
573 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring
*ring
,
574 struct ethtool_coalesce
*ec
)
576 struct bcm_sysport_priv
*priv
= ring
->priv
;
579 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(ring
->index
));
580 reg
&= ~(RING_INTR_THRESH_MASK
|
581 RING_TIMEOUT_MASK
<< RING_TIMEOUT_SHIFT
);
582 reg
|= ec
->tx_max_coalesced_frames
;
583 reg
|= DIV_ROUND_UP(ec
->tx_coalesce_usecs
* 1000, 8192) <<
585 tdma_writel(priv
, reg
, TDMA_DESC_RING_INTR_CONTROL(ring
->index
));
588 static int bcm_sysport_get_coalesce(struct net_device
*dev
,
589 struct ethtool_coalesce
*ec
)
591 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
594 reg
= tdma_readl(priv
, TDMA_DESC_RING_INTR_CONTROL(0));
596 ec
->tx_coalesce_usecs
= (reg
>> RING_TIMEOUT_SHIFT
) * 8192 / 1000;
597 ec
->tx_max_coalesced_frames
= reg
& RING_INTR_THRESH_MASK
;
599 reg
= rdma_readl(priv
, RDMA_MBDONE_INTR
);
601 ec
->rx_coalesce_usecs
= (reg
>> RDMA_TIMEOUT_SHIFT
) * 8192 / 1000;
602 ec
->rx_max_coalesced_frames
= reg
& RDMA_INTR_THRESH_MASK
;
603 ec
->use_adaptive_rx_coalesce
= priv
->dim
.use_dim
;
608 static int bcm_sysport_set_coalesce(struct net_device
*dev
,
609 struct ethtool_coalesce
*ec
)
611 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
612 struct dim_cq_moder moder
;
616 /* Base system clock is 125Mhz, DMA timeout is this reference clock
617 * divided by 1024, which yield roughly 8.192 us, our maximum value has
618 * to fit in the RING_TIMEOUT_MASK (16 bits).
620 if (ec
->tx_max_coalesced_frames
> RING_INTR_THRESH_MASK
||
621 ec
->tx_coalesce_usecs
> (RING_TIMEOUT_MASK
* 8) + 1 ||
622 ec
->rx_max_coalesced_frames
> RDMA_INTR_THRESH_MASK
||
623 ec
->rx_coalesce_usecs
> (RDMA_TIMEOUT_MASK
* 8) + 1)
626 if ((ec
->tx_coalesce_usecs
== 0 && ec
->tx_max_coalesced_frames
== 0) ||
627 (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0) ||
628 ec
->use_adaptive_tx_coalesce
)
631 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
632 bcm_sysport_set_tx_coalesce(&priv
->tx_rings
[i
], ec
);
634 priv
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
635 priv
->rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
636 usecs
= priv
->rx_coalesce_usecs
;
637 pkts
= priv
->rx_max_coalesced_frames
;
639 if (ec
->use_adaptive_rx_coalesce
&& !priv
->dim
.use_dim
) {
640 moder
= net_dim_get_def_rx_moderation(priv
->dim
.dim
.mode
);
645 priv
->dim
.use_dim
= ec
->use_adaptive_rx_coalesce
;
647 /* Apply desired coalescing parameters */
648 bcm_sysport_set_rx_coalesce(priv
, usecs
, pkts
);
653 static void bcm_sysport_free_cb(struct bcm_sysport_cb
*cb
)
655 dev_consume_skb_any(cb
->skb
);
657 dma_unmap_addr_set(cb
, dma_addr
, 0);
660 static struct sk_buff
*bcm_sysport_rx_refill(struct bcm_sysport_priv
*priv
,
661 struct bcm_sysport_cb
*cb
)
663 struct device
*kdev
= &priv
->pdev
->dev
;
664 struct net_device
*ndev
= priv
->netdev
;
665 struct sk_buff
*skb
, *rx_skb
;
668 /* Allocate a new SKB for a new packet */
669 skb
= netdev_alloc_skb(priv
->netdev
, RX_BUF_LENGTH
);
671 priv
->mib
.alloc_rx_buff_failed
++;
672 netif_err(priv
, rx_err
, ndev
, "SKB alloc failed\n");
676 mapping
= dma_map_single(kdev
, skb
->data
,
677 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
678 if (dma_mapping_error(kdev
, mapping
)) {
679 priv
->mib
.rx_dma_failed
++;
680 dev_kfree_skb_any(skb
);
681 netif_err(priv
, rx_err
, ndev
, "DMA mapping failure\n");
685 /* Grab the current SKB on the ring */
688 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
689 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
691 /* Put the new SKB on the ring */
693 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
694 dma_desc_set_addr(priv
, cb
->bd_addr
, mapping
);
696 netif_dbg(priv
, rx_status
, ndev
, "RX refill\n");
698 /* Return the current SKB to the caller */
702 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv
*priv
)
704 struct bcm_sysport_cb
*cb
;
708 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
709 cb
= &priv
->rx_cbs
[i
];
710 skb
= bcm_sysport_rx_refill(priv
, cb
);
720 /* Poll the hardware for up to budget packets to process */
721 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv
*priv
,
724 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
725 struct net_device
*ndev
= priv
->netdev
;
726 unsigned int processed
= 0, to_process
;
727 unsigned int processed_bytes
= 0;
728 struct bcm_sysport_cb
*cb
;
730 unsigned int p_index
;
734 /* Clear status before servicing to reduce spurious interrupts */
735 intrl2_0_writel(priv
, INTRL2_0_RDMA_MBDONE
, INTRL2_CPU_CLEAR
);
737 /* Determine how much we should process since last call, SYSTEMPORT Lite
738 * groups the producer and consumer indexes into the same 32-bit
739 * which we access using RDMA_CONS_INDEX
742 p_index
= rdma_readl(priv
, RDMA_PROD_INDEX
);
744 p_index
= rdma_readl(priv
, RDMA_CONS_INDEX
);
745 p_index
&= RDMA_PROD_INDEX_MASK
;
747 to_process
= (p_index
- priv
->rx_c_index
) & RDMA_CONS_INDEX_MASK
;
749 netif_dbg(priv
, rx_status
, ndev
,
750 "p_index=%d rx_c_index=%d to_process=%d\n",
751 p_index
, priv
->rx_c_index
, to_process
);
753 while ((processed
< to_process
) && (processed
< budget
)) {
754 cb
= &priv
->rx_cbs
[priv
->rx_read_ptr
];
755 skb
= bcm_sysport_rx_refill(priv
, cb
);
758 /* We do not have a backing SKB, so we do not a corresponding
759 * DMA mapping for this incoming packet since
760 * bcm_sysport_rx_refill always either has both skb and mapping
763 if (unlikely(!skb
)) {
764 netif_err(priv
, rx_err
, ndev
, "out of memory!\n");
765 ndev
->stats
.rx_dropped
++;
766 ndev
->stats
.rx_errors
++;
770 /* Extract the Receive Status Block prepended */
771 rsb
= (struct bcm_rsb
*)skb
->data
;
772 len
= (rsb
->rx_status_len
>> DESC_LEN_SHIFT
) & DESC_LEN_MASK
;
773 status
= (rsb
->rx_status_len
>> DESC_STATUS_SHIFT
) &
776 netif_dbg(priv
, rx_status
, ndev
,
777 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
778 p_index
, priv
->rx_c_index
, priv
->rx_read_ptr
,
781 if (unlikely(len
> RX_BUF_LENGTH
)) {
782 netif_err(priv
, rx_status
, ndev
, "oversized packet\n");
783 ndev
->stats
.rx_length_errors
++;
784 ndev
->stats
.rx_errors
++;
785 dev_kfree_skb_any(skb
);
789 if (unlikely(!(status
& DESC_EOP
) || !(status
& DESC_SOP
))) {
790 netif_err(priv
, rx_status
, ndev
, "fragmented packet!\n");
791 ndev
->stats
.rx_dropped
++;
792 ndev
->stats
.rx_errors
++;
793 dev_kfree_skb_any(skb
);
797 if (unlikely(status
& (RX_STATUS_ERR
| RX_STATUS_OVFLOW
))) {
798 netif_err(priv
, rx_err
, ndev
, "error packet\n");
799 if (status
& RX_STATUS_OVFLOW
)
800 ndev
->stats
.rx_over_errors
++;
801 ndev
->stats
.rx_dropped
++;
802 ndev
->stats
.rx_errors
++;
803 dev_kfree_skb_any(skb
);
809 /* Hardware validated our checksum */
810 if (likely(status
& DESC_L4_CSUM
))
811 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
813 /* Hardware pre-pends packets with 2bytes before Ethernet
814 * header plus we have the Receive Status Block, strip off all
815 * of this from the SKB.
817 skb_pull(skb
, sizeof(*rsb
) + 2);
818 len
-= (sizeof(*rsb
) + 2);
819 processed_bytes
+= len
;
821 /* UniMAC may forward CRC */
823 skb_trim(skb
, len
- ETH_FCS_LEN
);
827 skb
->protocol
= eth_type_trans(skb
, ndev
);
828 ndev
->stats
.rx_packets
++;
829 ndev
->stats
.rx_bytes
+= len
;
830 u64_stats_update_begin(&priv
->syncp
);
831 stats64
->rx_packets
++;
832 stats64
->rx_bytes
+= len
;
833 u64_stats_update_end(&priv
->syncp
);
835 napi_gro_receive(&priv
->napi
, skb
);
840 if (priv
->rx_read_ptr
== priv
->num_rx_bds
)
841 priv
->rx_read_ptr
= 0;
844 priv
->dim
.packets
= processed
;
845 priv
->dim
.bytes
= processed_bytes
;
850 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring
*ring
,
851 struct bcm_sysport_cb
*cb
,
852 unsigned int *bytes_compl
,
853 unsigned int *pkts_compl
)
855 struct bcm_sysport_priv
*priv
= ring
->priv
;
856 struct device
*kdev
= &priv
->pdev
->dev
;
859 *bytes_compl
+= cb
->skb
->len
;
860 dma_unmap_single(kdev
, dma_unmap_addr(cb
, dma_addr
),
861 dma_unmap_len(cb
, dma_len
),
864 bcm_sysport_free_cb(cb
);
866 } else if (dma_unmap_addr(cb
, dma_addr
)) {
867 *bytes_compl
+= dma_unmap_len(cb
, dma_len
);
868 dma_unmap_page(kdev
, dma_unmap_addr(cb
, dma_addr
),
869 dma_unmap_len(cb
, dma_len
), DMA_TO_DEVICE
);
870 dma_unmap_addr_set(cb
, dma_addr
, 0);
874 /* Reclaim queued SKBs for transmission completion, lockless version */
875 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
876 struct bcm_sysport_tx_ring
*ring
)
878 unsigned int pkts_compl
= 0, bytes_compl
= 0;
879 struct net_device
*ndev
= priv
->netdev
;
880 unsigned int txbds_processed
= 0;
881 struct bcm_sysport_cb
*cb
;
882 unsigned int txbds_ready
;
883 unsigned int c_index
;
886 /* Clear status before servicing to reduce spurious interrupts */
887 if (!ring
->priv
->is_lite
)
888 intrl2_1_writel(ring
->priv
, BIT(ring
->index
), INTRL2_CPU_CLEAR
);
890 intrl2_0_writel(ring
->priv
, BIT(ring
->index
+
891 INTRL2_0_TDMA_MBDONE_SHIFT
), INTRL2_CPU_CLEAR
);
893 /* Compute how many descriptors have been processed since last call */
894 hw_ind
= tdma_readl(priv
, TDMA_DESC_RING_PROD_CONS_INDEX(ring
->index
));
895 c_index
= (hw_ind
>> RING_CONS_INDEX_SHIFT
) & RING_CONS_INDEX_MASK
;
896 txbds_ready
= (c_index
- ring
->c_index
) & RING_CONS_INDEX_MASK
;
898 netif_dbg(priv
, tx_done
, ndev
,
899 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
900 ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
902 while (txbds_processed
< txbds_ready
) {
903 cb
= &ring
->cbs
[ring
->clean_index
];
904 bcm_sysport_tx_reclaim_one(ring
, cb
, &bytes_compl
, &pkts_compl
);
909 if (likely(ring
->clean_index
< ring
->size
- 1))
912 ring
->clean_index
= 0;
915 u64_stats_update_begin(&priv
->syncp
);
916 ring
->packets
+= pkts_compl
;
917 ring
->bytes
+= bytes_compl
;
918 u64_stats_update_end(&priv
->syncp
);
920 ring
->c_index
= c_index
;
922 netif_dbg(priv
, tx_done
, ndev
,
923 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
924 ring
->index
, ring
->c_index
, pkts_compl
, bytes_compl
);
929 /* Locked version of the per-ring TX reclaim routine */
930 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv
*priv
,
931 struct bcm_sysport_tx_ring
*ring
)
933 struct netdev_queue
*txq
;
934 unsigned int released
;
937 txq
= netdev_get_tx_queue(priv
->netdev
, ring
->index
);
939 spin_lock_irqsave(&ring
->lock
, flags
);
940 released
= __bcm_sysport_tx_reclaim(priv
, ring
);
942 netif_tx_wake_queue(txq
);
944 spin_unlock_irqrestore(&ring
->lock
, flags
);
949 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
950 static void bcm_sysport_tx_clean(struct bcm_sysport_priv
*priv
,
951 struct bcm_sysport_tx_ring
*ring
)
955 spin_lock_irqsave(&ring
->lock
, flags
);
956 __bcm_sysport_tx_reclaim(priv
, ring
);
957 spin_unlock_irqrestore(&ring
->lock
, flags
);
960 static int bcm_sysport_tx_poll(struct napi_struct
*napi
, int budget
)
962 struct bcm_sysport_tx_ring
*ring
=
963 container_of(napi
, struct bcm_sysport_tx_ring
, napi
);
964 unsigned int work_done
= 0;
966 work_done
= bcm_sysport_tx_reclaim(ring
->priv
, ring
);
968 if (work_done
== 0) {
970 /* re-enable TX interrupt */
971 if (!ring
->priv
->is_lite
)
972 intrl2_1_mask_clear(ring
->priv
, BIT(ring
->index
));
974 intrl2_0_mask_clear(ring
->priv
, BIT(ring
->index
+
975 INTRL2_0_TDMA_MBDONE_SHIFT
));
983 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv
*priv
)
987 for (q
= 0; q
< priv
->netdev
->num_tx_queues
; q
++)
988 bcm_sysport_tx_reclaim(priv
, &priv
->tx_rings
[q
]);
991 static int bcm_sysport_poll(struct napi_struct
*napi
, int budget
)
993 struct bcm_sysport_priv
*priv
=
994 container_of(napi
, struct bcm_sysport_priv
, napi
);
995 struct dim_sample dim_sample
= {};
996 unsigned int work_done
= 0;
998 work_done
= bcm_sysport_desc_rx(priv
, budget
);
1000 priv
->rx_c_index
+= work_done
;
1001 priv
->rx_c_index
&= RDMA_CONS_INDEX_MASK
;
1003 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1004 * maintained by HW, but writes to it will be ignore while RDMA
1008 rdma_writel(priv
, priv
->rx_c_index
, RDMA_CONS_INDEX
);
1010 rdma_writel(priv
, priv
->rx_c_index
<< 16, RDMA_CONS_INDEX
);
1012 if (work_done
< budget
) {
1013 napi_complete_done(napi
, work_done
);
1014 /* re-enable RX interrupts */
1015 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
);
1018 if (priv
->dim
.use_dim
) {
1019 dim_update_sample(priv
->dim
.event_ctr
, priv
->dim
.packets
,
1020 priv
->dim
.bytes
, &dim_sample
);
1021 net_dim(&priv
->dim
.dim
, dim_sample
);
1027 static void mpd_enable_set(struct bcm_sysport_priv
*priv
, bool enable
)
1031 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
1036 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
1039 bit
= RBUF_ACPI_EN_LITE
;
1043 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1048 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1051 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv
*priv
)
1056 /* Disable RXCHK, active filters and Broadcom tag matching */
1057 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
1058 reg
&= ~(RXCHK_BRCM_TAG_MATCH_MASK
<<
1059 RXCHK_BRCM_TAG_MATCH_SHIFT
| RXCHK_EN
| RXCHK_BRCM_TAG_EN
);
1060 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
1062 /* Make sure we restore correct CID index in case HW lost
1063 * its context during deep idle state
1065 for_each_set_bit(index
, priv
->filters
, RXCHK_BRCM_TAG_MAX
) {
1066 rxchk_writel(priv
, priv
->filters_loc
[index
] <<
1067 RXCHK_BRCM_TAG_CID_SHIFT
, RXCHK_BRCM_TAG(index
));
1068 rxchk_writel(priv
, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index
));
1071 /* Clear the MagicPacket detection logic */
1072 mpd_enable_set(priv
, false);
1074 reg
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
);
1075 if (reg
& INTRL2_0_MPD
)
1076 netdev_info(priv
->netdev
, "Wake-on-LAN (MPD) interrupt!\n");
1078 if (reg
& INTRL2_0_BRCM_MATCH_TAG
) {
1079 reg
= rxchk_readl(priv
, RXCHK_BRCM_TAG_MATCH_STATUS
) &
1080 RXCHK_BRCM_TAG_MATCH_MASK
;
1081 netdev_info(priv
->netdev
,
1082 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg
);
1085 netif_dbg(priv
, wol
, priv
->netdev
, "resumed from WOL\n");
1088 static void bcm_sysport_dim_work(struct work_struct
*work
)
1090 struct dim
*dim
= container_of(work
, struct dim
, work
);
1091 struct bcm_sysport_net_dim
*ndim
=
1092 container_of(dim
, struct bcm_sysport_net_dim
, dim
);
1093 struct bcm_sysport_priv
*priv
=
1094 container_of(ndim
, struct bcm_sysport_priv
, dim
);
1095 struct dim_cq_moder cur_profile
= net_dim_get_rx_moderation(dim
->mode
,
1098 bcm_sysport_set_rx_coalesce(priv
, cur_profile
.usec
, cur_profile
.pkts
);
1099 dim
->state
= DIM_START_MEASURE
;
1102 /* RX and misc interrupt routine */
1103 static irqreturn_t
bcm_sysport_rx_isr(int irq
, void *dev_id
)
1105 struct net_device
*dev
= dev_id
;
1106 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1107 struct bcm_sysport_tx_ring
*txr
;
1108 unsigned int ring
, ring_bit
;
1110 priv
->irq0_stat
= intrl2_0_readl(priv
, INTRL2_CPU_STATUS
) &
1111 ~intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1112 intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
1114 if (unlikely(priv
->irq0_stat
== 0)) {
1115 netdev_warn(priv
->netdev
, "spurious RX interrupt\n");
1119 if (priv
->irq0_stat
& INTRL2_0_RDMA_MBDONE
) {
1120 priv
->dim
.event_ctr
++;
1121 if (likely(napi_schedule_prep(&priv
->napi
))) {
1122 /* disable RX interrupts */
1123 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
);
1124 __napi_schedule_irqoff(&priv
->napi
);
1128 /* TX ring is full, perform a full reclaim since we do not know
1129 * which one would trigger this interrupt
1131 if (priv
->irq0_stat
& INTRL2_0_TX_RING_FULL
)
1132 bcm_sysport_tx_reclaim_all(priv
);
1137 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1138 ring_bit
= BIT(ring
+ INTRL2_0_TDMA_MBDONE_SHIFT
);
1139 if (!(priv
->irq0_stat
& ring_bit
))
1142 txr
= &priv
->tx_rings
[ring
];
1144 if (likely(napi_schedule_prep(&txr
->napi
))) {
1145 intrl2_0_mask_set(priv
, ring_bit
);
1146 __napi_schedule(&txr
->napi
);
1153 /* TX interrupt service routine */
1154 static irqreturn_t
bcm_sysport_tx_isr(int irq
, void *dev_id
)
1156 struct net_device
*dev
= dev_id
;
1157 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1158 struct bcm_sysport_tx_ring
*txr
;
1161 priv
->irq1_stat
= intrl2_1_readl(priv
, INTRL2_CPU_STATUS
) &
1162 ~intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1163 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1165 if (unlikely(priv
->irq1_stat
== 0)) {
1166 netdev_warn(priv
->netdev
, "spurious TX interrupt\n");
1170 for (ring
= 0; ring
< dev
->num_tx_queues
; ring
++) {
1171 if (!(priv
->irq1_stat
& BIT(ring
)))
1174 txr
= &priv
->tx_rings
[ring
];
1176 if (likely(napi_schedule_prep(&txr
->napi
))) {
1177 intrl2_1_mask_set(priv
, BIT(ring
));
1178 __napi_schedule_irqoff(&txr
->napi
);
1185 static irqreturn_t
bcm_sysport_wol_isr(int irq
, void *dev_id
)
1187 struct bcm_sysport_priv
*priv
= dev_id
;
1189 pm_wakeup_event(&priv
->pdev
->dev
, 0);
1194 #ifdef CONFIG_NET_POLL_CONTROLLER
1195 static void bcm_sysport_poll_controller(struct net_device
*dev
)
1197 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1199 disable_irq(priv
->irq0
);
1200 bcm_sysport_rx_isr(priv
->irq0
, priv
);
1201 enable_irq(priv
->irq0
);
1203 if (!priv
->is_lite
) {
1204 disable_irq(priv
->irq1
);
1205 bcm_sysport_tx_isr(priv
->irq1
, priv
);
1206 enable_irq(priv
->irq1
);
1211 static struct sk_buff
*bcm_sysport_insert_tsb(struct sk_buff
*skb
,
1212 struct net_device
*dev
)
1214 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1215 struct sk_buff
*nskb
;
1216 struct bcm_tsb
*tsb
;
1222 /* Re-allocate SKB if needed */
1223 if (unlikely(skb_headroom(skb
) < sizeof(*tsb
))) {
1224 nskb
= skb_realloc_headroom(skb
, sizeof(*tsb
));
1226 dev_kfree_skb_any(skb
);
1227 priv
->mib
.tx_realloc_tsb_failed
++;
1228 dev
->stats
.tx_errors
++;
1229 dev
->stats
.tx_dropped
++;
1232 dev_consume_skb_any(skb
);
1234 priv
->mib
.tx_realloc_tsb
++;
1237 tsb
= skb_push(skb
, sizeof(*tsb
));
1238 /* Zero-out TSB by default */
1239 memset(tsb
, 0, sizeof(*tsb
));
1241 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1242 ip_ver
= skb
->protocol
;
1244 case htons(ETH_P_IP
):
1245 ip_proto
= ip_hdr(skb
)->protocol
;
1247 case htons(ETH_P_IPV6
):
1248 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1254 /* Get the checksum offset and the L4 (transport) offset */
1255 csum_start
= skb_checksum_start_offset(skb
) - sizeof(*tsb
);
1256 csum_info
= (csum_start
+ skb
->csum_offset
) & L4_CSUM_PTR_MASK
;
1257 csum_info
|= (csum_start
<< L4_PTR_SHIFT
);
1259 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1260 csum_info
|= L4_LENGTH_VALID
;
1261 if (ip_proto
== IPPROTO_UDP
&&
1262 ip_ver
== htons(ETH_P_IP
))
1263 csum_info
|= L4_UDP
;
1268 tsb
->l4_ptr_dest_map
= csum_info
;
1274 static netdev_tx_t
bcm_sysport_xmit(struct sk_buff
*skb
,
1275 struct net_device
*dev
)
1277 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1278 struct device
*kdev
= &priv
->pdev
->dev
;
1279 struct bcm_sysport_tx_ring
*ring
;
1280 struct bcm_sysport_cb
*cb
;
1281 struct netdev_queue
*txq
;
1282 u32 len_status
, addr_lo
;
1283 unsigned int skb_len
;
1284 unsigned long flags
;
1289 queue
= skb_get_queue_mapping(skb
);
1290 txq
= netdev_get_tx_queue(dev
, queue
);
1291 ring
= &priv
->tx_rings
[queue
];
1293 /* lock against tx reclaim in BH context and TX ring full interrupt */
1294 spin_lock_irqsave(&ring
->lock
, flags
);
1295 if (unlikely(ring
->desc_count
== 0)) {
1296 netif_tx_stop_queue(txq
);
1297 netdev_err(dev
, "queue %d awake and ring full!\n", queue
);
1298 ret
= NETDEV_TX_BUSY
;
1302 /* Insert TSB and checksum infos */
1304 skb
= bcm_sysport_insert_tsb(skb
, dev
);
1313 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
1314 if (dma_mapping_error(kdev
, mapping
)) {
1315 priv
->mib
.tx_dma_failed
++;
1316 netif_err(priv
, tx_err
, dev
, "DMA map failed at %p (len=%d)\n",
1317 skb
->data
, skb_len
);
1322 /* Remember the SKB for future freeing */
1323 cb
= &ring
->cbs
[ring
->curr_desc
];
1325 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1326 dma_unmap_len_set(cb
, dma_len
, skb_len
);
1328 addr_lo
= lower_32_bits(mapping
);
1329 len_status
= upper_32_bits(mapping
) & DESC_ADDR_HI_MASK
;
1330 len_status
|= (skb_len
<< DESC_LEN_SHIFT
);
1331 len_status
|= (DESC_SOP
| DESC_EOP
| TX_STATUS_APP_CRC
) <<
1333 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1334 len_status
|= (DESC_L4_CSUM
<< DESC_STATUS_SHIFT
);
1337 if (ring
->curr_desc
== ring
->size
)
1338 ring
->curr_desc
= 0;
1341 /* Ports are latched, so write upper address first */
1342 tdma_writel(priv
, len_status
, TDMA_WRITE_PORT_HI(ring
->index
));
1343 tdma_writel(priv
, addr_lo
, TDMA_WRITE_PORT_LO(ring
->index
));
1345 /* Check ring space and update SW control flow */
1346 if (ring
->desc_count
== 0)
1347 netif_tx_stop_queue(txq
);
1349 netif_dbg(priv
, tx_queued
, dev
, "ring=%d desc_count=%d, curr_desc=%d\n",
1350 ring
->index
, ring
->desc_count
, ring
->curr_desc
);
1354 spin_unlock_irqrestore(&ring
->lock
, flags
);
1358 static void bcm_sysport_tx_timeout(struct net_device
*dev
)
1360 netdev_warn(dev
, "transmit timeout!\n");
1362 netif_trans_update(dev
);
1363 dev
->stats
.tx_errors
++;
1365 netif_tx_wake_all_queues(dev
);
1368 /* phylib adjust link callback */
1369 static void bcm_sysport_adj_link(struct net_device
*dev
)
1371 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1372 struct phy_device
*phydev
= dev
->phydev
;
1373 unsigned int changed
= 0;
1374 u32 cmd_bits
= 0, reg
;
1376 if (priv
->old_link
!= phydev
->link
) {
1378 priv
->old_link
= phydev
->link
;
1381 if (priv
->old_duplex
!= phydev
->duplex
) {
1383 priv
->old_duplex
= phydev
->duplex
;
1389 switch (phydev
->speed
) {
1391 cmd_bits
= CMD_SPEED_2500
;
1394 cmd_bits
= CMD_SPEED_1000
;
1397 cmd_bits
= CMD_SPEED_100
;
1400 cmd_bits
= CMD_SPEED_10
;
1405 cmd_bits
<<= CMD_SPEED_SHIFT
;
1407 if (phydev
->duplex
== DUPLEX_HALF
)
1408 cmd_bits
|= CMD_HD_EN
;
1410 if (priv
->old_pause
!= phydev
->pause
) {
1412 priv
->old_pause
= phydev
->pause
;
1416 cmd_bits
|= CMD_RX_PAUSE_IGNORE
| CMD_TX_PAUSE_IGNORE
;
1422 reg
= umac_readl(priv
, UMAC_CMD
);
1423 reg
&= ~((CMD_SPEED_MASK
<< CMD_SPEED_SHIFT
) |
1424 CMD_HD_EN
| CMD_RX_PAUSE_IGNORE
|
1425 CMD_TX_PAUSE_IGNORE
);
1427 umac_writel(priv
, reg
, UMAC_CMD
);
1431 phy_print_status(phydev
);
1434 static void bcm_sysport_init_dim(struct bcm_sysport_priv
*priv
,
1435 void (*cb
)(struct work_struct
*work
))
1437 struct bcm_sysport_net_dim
*dim
= &priv
->dim
;
1439 INIT_WORK(&dim
->dim
.work
, cb
);
1440 dim
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
1446 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv
*priv
)
1448 struct bcm_sysport_net_dim
*dim
= &priv
->dim
;
1449 struct dim_cq_moder moder
;
1452 usecs
= priv
->rx_coalesce_usecs
;
1453 pkts
= priv
->rx_max_coalesced_frames
;
1455 /* If DIM was enabled, re-apply default parameters */
1457 moder
= net_dim_get_def_rx_moderation(dim
->dim
.mode
);
1462 bcm_sysport_set_rx_coalesce(priv
, usecs
, pkts
);
1465 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv
*priv
,
1468 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1472 /* Simple descriptors partitioning for now */
1475 ring
->cbs
= kcalloc(size
, sizeof(struct bcm_sysport_cb
), GFP_KERNEL
);
1477 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1481 /* Initialize SW view of the ring */
1482 spin_lock_init(&ring
->lock
);
1484 netif_tx_napi_add(priv
->netdev
, &ring
->napi
, bcm_sysport_tx_poll
, 64);
1485 ring
->index
= index
;
1487 ring
->clean_index
= 0;
1488 ring
->alloc_size
= ring
->size
;
1489 ring
->desc_count
= ring
->size
;
1490 ring
->curr_desc
= 0;
1492 /* Initialize HW ring */
1493 tdma_writel(priv
, RING_EN
, TDMA_DESC_RING_HEAD_TAIL_PTR(index
));
1494 tdma_writel(priv
, 0, TDMA_DESC_RING_COUNT(index
));
1495 tdma_writel(priv
, 1, TDMA_DESC_RING_INTR_CONTROL(index
));
1496 tdma_writel(priv
, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index
));
1498 /* Configure QID and port mapping */
1499 reg
= tdma_readl(priv
, TDMA_DESC_RING_MAPPING(index
));
1500 reg
&= ~(RING_QID_MASK
| RING_PORT_ID_MASK
<< RING_PORT_ID_SHIFT
);
1501 if (ring
->inspect
) {
1502 reg
|= ring
->switch_queue
& RING_QID_MASK
;
1503 reg
|= ring
->switch_port
<< RING_PORT_ID_SHIFT
;
1505 reg
|= RING_IGNORE_STATUS
;
1507 tdma_writel(priv
, reg
, TDMA_DESC_RING_MAPPING(index
));
1508 tdma_writel(priv
, 0, TDMA_DESC_RING_PCP_DEI_VID(index
));
1510 /* Enable ACB algorithm 2 */
1511 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1512 reg
|= tdma_control_bit(priv
, ACB_ALGO
);
1513 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1515 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1516 * with the original definition of ACB_ALGO
1518 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1520 reg
&= ~BIT(TSB_SWAP1
);
1521 /* Set a correct TSB format based on host endian */
1522 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1523 reg
|= tdma_control_bit(priv
, TSB_SWAP0
);
1525 reg
&= ~tdma_control_bit(priv
, TSB_SWAP0
);
1526 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1528 /* Program the number of descriptors as MAX_THRESHOLD and half of
1529 * its size for the hysteresis trigger
1531 tdma_writel(priv
, ring
->size
|
1532 1 << RING_HYST_THRESH_SHIFT
,
1533 TDMA_DESC_RING_MAX_HYST(index
));
1535 /* Enable the ring queue in the arbiter */
1536 reg
= tdma_readl(priv
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1537 reg
|= (1 << index
);
1538 tdma_writel(priv
, reg
, TDMA_TIER1_ARB_0_QUEUE_EN
);
1540 napi_enable(&ring
->napi
);
1542 netif_dbg(priv
, hw
, priv
->netdev
,
1543 "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1544 ring
->size
, ring
->switch_queue
,
1550 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv
*priv
,
1553 struct bcm_sysport_tx_ring
*ring
= &priv
->tx_rings
[index
];
1556 /* Caller should stop the TDMA engine */
1557 reg
= tdma_readl(priv
, TDMA_STATUS
);
1558 if (!(reg
& TDMA_DISABLED
))
1559 netdev_warn(priv
->netdev
, "TDMA not stopped!\n");
1561 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1562 * fail, so by checking this pointer we know whether the TX ring was
1563 * fully initialized or not.
1568 napi_disable(&ring
->napi
);
1569 netif_napi_del(&ring
->napi
);
1571 bcm_sysport_tx_clean(priv
, ring
);
1576 ring
->alloc_size
= 0;
1578 netif_dbg(priv
, hw
, priv
->netdev
, "TDMA fini done\n");
1582 static inline int rdma_enable_set(struct bcm_sysport_priv
*priv
,
1583 unsigned int enable
)
1585 unsigned int timeout
= 1000;
1588 reg
= rdma_readl(priv
, RDMA_CONTROL
);
1593 rdma_writel(priv
, reg
, RDMA_CONTROL
);
1595 /* Poll for RMDA disabling completion */
1597 reg
= rdma_readl(priv
, RDMA_STATUS
);
1598 if (!!(reg
& RDMA_DISABLED
) == !enable
)
1600 usleep_range(1000, 2000);
1601 } while (timeout
-- > 0);
1603 netdev_err(priv
->netdev
, "timeout waiting for RDMA to finish\n");
1609 static inline int tdma_enable_set(struct bcm_sysport_priv
*priv
,
1610 unsigned int enable
)
1612 unsigned int timeout
= 1000;
1615 reg
= tdma_readl(priv
, TDMA_CONTROL
);
1617 reg
|= tdma_control_bit(priv
, TDMA_EN
);
1619 reg
&= ~tdma_control_bit(priv
, TDMA_EN
);
1620 tdma_writel(priv
, reg
, TDMA_CONTROL
);
1622 /* Poll for TMDA disabling completion */
1624 reg
= tdma_readl(priv
, TDMA_STATUS
);
1625 if (!!(reg
& TDMA_DISABLED
) == !enable
)
1628 usleep_range(1000, 2000);
1629 } while (timeout
-- > 0);
1631 netdev_err(priv
->netdev
, "timeout waiting for TDMA to finish\n");
1636 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv
*priv
)
1638 struct bcm_sysport_cb
*cb
;
1643 /* Initialize SW view of the RX ring */
1644 priv
->num_rx_bds
= priv
->num_rx_desc_words
/ WORDS_PER_DESC
;
1645 priv
->rx_bds
= priv
->base
+ SYS_PORT_RDMA_OFFSET
;
1646 priv
->rx_c_index
= 0;
1647 priv
->rx_read_ptr
= 0;
1648 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct bcm_sysport_cb
),
1650 if (!priv
->rx_cbs
) {
1651 netif_err(priv
, hw
, priv
->netdev
, "CB allocation failed\n");
1655 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1656 cb
= priv
->rx_cbs
+ i
;
1657 cb
->bd_addr
= priv
->rx_bds
+ i
* DESC_SIZE
;
1660 ret
= bcm_sysport_alloc_rx_bufs(priv
);
1662 netif_err(priv
, hw
, priv
->netdev
, "SKB allocation failed\n");
1666 /* Initialize HW, ensure RDMA is disabled */
1667 reg
= rdma_readl(priv
, RDMA_STATUS
);
1668 if (!(reg
& RDMA_DISABLED
))
1669 rdma_enable_set(priv
, 0);
1671 rdma_writel(priv
, 0, RDMA_WRITE_PTR_LO
);
1672 rdma_writel(priv
, 0, RDMA_WRITE_PTR_HI
);
1673 rdma_writel(priv
, 0, RDMA_PROD_INDEX
);
1674 rdma_writel(priv
, 0, RDMA_CONS_INDEX
);
1675 rdma_writel(priv
, priv
->num_rx_bds
<< RDMA_RING_SIZE_SHIFT
|
1676 RX_BUF_LENGTH
, RDMA_RING_BUF_SIZE
);
1677 /* Operate the queue in ring mode */
1678 rdma_writel(priv
, 0, RDMA_START_ADDR_HI
);
1679 rdma_writel(priv
, 0, RDMA_START_ADDR_LO
);
1680 rdma_writel(priv
, 0, RDMA_END_ADDR_HI
);
1681 rdma_writel(priv
, priv
->num_rx_desc_words
- 1, RDMA_END_ADDR_LO
);
1683 netif_dbg(priv
, hw
, priv
->netdev
,
1684 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1685 priv
->num_rx_bds
, priv
->rx_bds
);
1690 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv
*priv
)
1692 struct bcm_sysport_cb
*cb
;
1696 /* Caller should ensure RDMA is disabled */
1697 reg
= rdma_readl(priv
, RDMA_STATUS
);
1698 if (!(reg
& RDMA_DISABLED
))
1699 netdev_warn(priv
->netdev
, "RDMA not stopped!\n");
1701 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1702 cb
= &priv
->rx_cbs
[i
];
1703 if (dma_unmap_addr(cb
, dma_addr
))
1704 dma_unmap_single(&priv
->pdev
->dev
,
1705 dma_unmap_addr(cb
, dma_addr
),
1706 RX_BUF_LENGTH
, DMA_FROM_DEVICE
);
1707 bcm_sysport_free_cb(cb
);
1710 kfree(priv
->rx_cbs
);
1711 priv
->rx_cbs
= NULL
;
1713 netif_dbg(priv
, hw
, priv
->netdev
, "RDMA fini done\n");
1716 static void bcm_sysport_set_rx_mode(struct net_device
*dev
)
1718 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1724 reg
= umac_readl(priv
, UMAC_CMD
);
1725 if (dev
->flags
& IFF_PROMISC
)
1728 reg
&= ~CMD_PROMISC
;
1729 umac_writel(priv
, reg
, UMAC_CMD
);
1731 /* No support for ALLMULTI */
1732 if (dev
->flags
& IFF_ALLMULTI
)
1736 static inline void umac_enable_set(struct bcm_sysport_priv
*priv
,
1737 u32 mask
, unsigned int enable
)
1741 if (!priv
->is_lite
) {
1742 reg
= umac_readl(priv
, UMAC_CMD
);
1747 umac_writel(priv
, reg
, UMAC_CMD
);
1749 reg
= gib_readl(priv
, GIB_CONTROL
);
1754 gib_writel(priv
, reg
, GIB_CONTROL
);
1757 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1758 * to be processed (1 msec).
1761 usleep_range(1000, 2000);
1764 static inline void umac_reset(struct bcm_sysport_priv
*priv
)
1771 reg
= umac_readl(priv
, UMAC_CMD
);
1772 reg
|= CMD_SW_RESET
;
1773 umac_writel(priv
, reg
, UMAC_CMD
);
1775 reg
= umac_readl(priv
, UMAC_CMD
);
1776 reg
&= ~CMD_SW_RESET
;
1777 umac_writel(priv
, reg
, UMAC_CMD
);
1780 static void umac_set_hw_addr(struct bcm_sysport_priv
*priv
,
1781 unsigned char *addr
)
1783 u32 mac0
= (addr
[0] << 24) | (addr
[1] << 16) | (addr
[2] << 8) |
1785 u32 mac1
= (addr
[4] << 8) | addr
[5];
1787 if (!priv
->is_lite
) {
1788 umac_writel(priv
, mac0
, UMAC_MAC0
);
1789 umac_writel(priv
, mac1
, UMAC_MAC1
);
1791 gib_writel(priv
, mac0
, GIB_MAC0
);
1792 gib_writel(priv
, mac1
, GIB_MAC1
);
1796 static void topctrl_flush(struct bcm_sysport_priv
*priv
)
1798 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
1799 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
1801 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
1802 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
1805 static int bcm_sysport_change_mac(struct net_device
*dev
, void *p
)
1807 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1808 struct sockaddr
*addr
= p
;
1810 if (!is_valid_ether_addr(addr
->sa_data
))
1813 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1815 /* interface is disabled, changes to MAC will be reflected on next
1818 if (!netif_running(dev
))
1821 umac_set_hw_addr(priv
, dev
->dev_addr
);
1826 static void bcm_sysport_get_stats64(struct net_device
*dev
,
1827 struct rtnl_link_stats64
*stats
)
1829 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1830 struct bcm_sysport_stats64
*stats64
= &priv
->stats64
;
1833 netdev_stats_to_stats64(stats
, &dev
->stats
);
1835 bcm_sysport_update_tx_stats(priv
, &stats
->tx_bytes
,
1836 &stats
->tx_packets
);
1839 start
= u64_stats_fetch_begin_irq(&priv
->syncp
);
1840 stats
->rx_packets
= stats64
->rx_packets
;
1841 stats
->rx_bytes
= stats64
->rx_bytes
;
1842 } while (u64_stats_fetch_retry_irq(&priv
->syncp
, start
));
1845 static void bcm_sysport_netif_start(struct net_device
*dev
)
1847 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1850 bcm_sysport_init_dim(priv
, bcm_sysport_dim_work
);
1851 bcm_sysport_init_rx_coalesce(priv
);
1852 napi_enable(&priv
->napi
);
1854 /* Enable RX interrupt and TX ring full interrupt */
1855 intrl2_0_mask_clear(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
1857 phy_start(dev
->phydev
);
1859 /* Enable TX interrupts for the TXQs */
1861 intrl2_1_mask_clear(priv
, 0xffffffff);
1863 intrl2_0_mask_clear(priv
, INTRL2_0_TDMA_MBDONE_MASK
);
1866 static void rbuf_init(struct bcm_sysport_priv
*priv
)
1870 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
1871 reg
|= RBUF_4B_ALGN
| RBUF_RSB_EN
;
1872 /* Set a correct RSB format on SYSTEMPORT Lite */
1874 reg
&= ~RBUF_RSB_SWAP1
;
1876 /* Set a correct RSB format based on host endian */
1877 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1878 reg
|= RBUF_RSB_SWAP0
;
1880 reg
&= ~RBUF_RSB_SWAP0
;
1881 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
1884 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv
*priv
)
1886 intrl2_0_mask_set(priv
, 0xffffffff);
1887 intrl2_0_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1888 if (!priv
->is_lite
) {
1889 intrl2_1_mask_set(priv
, 0xffffffff);
1890 intrl2_1_writel(priv
, 0xffffffff, INTRL2_CPU_CLEAR
);
1894 static inline void gib_set_pad_extension(struct bcm_sysport_priv
*priv
)
1898 reg
= gib_readl(priv
, GIB_CONTROL
);
1899 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1900 if (netdev_uses_dsa(priv
->netdev
)) {
1901 reg
&= ~(GIB_PAD_EXTENSION_MASK
<< GIB_PAD_EXTENSION_SHIFT
);
1902 reg
|= ENET_BRCM_TAG_LEN
<< GIB_PAD_EXTENSION_SHIFT
;
1904 reg
&= ~(GIB_IPG_LEN_MASK
<< GIB_IPG_LEN_SHIFT
);
1905 reg
|= 12 << GIB_IPG_LEN_SHIFT
;
1906 gib_writel(priv
, reg
, GIB_CONTROL
);
1909 static int bcm_sysport_open(struct net_device
*dev
)
1911 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
1912 struct phy_device
*phydev
;
1919 /* Flush TX and RX FIFOs at TOPCTRL level */
1920 topctrl_flush(priv
);
1922 /* Disable the UniMAC RX/TX */
1923 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 0);
1925 /* Enable RBUF 2bytes alignment and Receive Status Block */
1928 /* Set maximum frame length */
1930 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1932 gib_set_pad_extension(priv
);
1934 /* Apply features again in case we changed them while interface was
1937 bcm_sysport_set_features(dev
, dev
->features
);
1939 /* Set MAC address */
1940 umac_set_hw_addr(priv
, dev
->dev_addr
);
1942 phydev
= of_phy_connect(dev
, priv
->phy_dn
, bcm_sysport_adj_link
,
1943 0, priv
->phy_interface
);
1945 netdev_err(dev
, "could not attach to PHY\n");
1949 /* Reset house keeping link status */
1950 priv
->old_duplex
= -1;
1951 priv
->old_link
= -1;
1952 priv
->old_pause
= -1;
1954 /* mask all interrupts and request them */
1955 bcm_sysport_mask_all_intrs(priv
);
1957 ret
= request_irq(priv
->irq0
, bcm_sysport_rx_isr
, 0, dev
->name
, dev
);
1959 netdev_err(dev
, "failed to request RX interrupt\n");
1960 goto out_phy_disconnect
;
1963 if (!priv
->is_lite
) {
1964 ret
= request_irq(priv
->irq1
, bcm_sysport_tx_isr
, 0,
1967 netdev_err(dev
, "failed to request TX interrupt\n");
1972 /* Initialize both hardware and software ring */
1973 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1974 ret
= bcm_sysport_init_tx_ring(priv
, i
);
1976 netdev_err(dev
, "failed to initialize TX ring %d\n",
1978 goto out_free_tx_ring
;
1982 /* Initialize linked-list */
1983 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
1985 /* Initialize RX ring */
1986 ret
= bcm_sysport_init_rx_ring(priv
);
1988 netdev_err(dev
, "failed to initialize RX ring\n");
1989 goto out_free_rx_ring
;
1993 ret
= rdma_enable_set(priv
, 1);
1995 goto out_free_rx_ring
;
1998 ret
= tdma_enable_set(priv
, 1);
2000 goto out_clear_rx_int
;
2002 /* Turn on UniMAC TX/RX */
2003 umac_enable_set(priv
, CMD_RX_EN
| CMD_TX_EN
, 1);
2005 bcm_sysport_netif_start(dev
);
2007 netif_tx_start_all_queues(dev
);
2012 intrl2_0_mask_set(priv
, INTRL2_0_RDMA_MBDONE
| INTRL2_0_TX_RING_FULL
);
2014 bcm_sysport_fini_rx_ring(priv
);
2016 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2017 bcm_sysport_fini_tx_ring(priv
, i
);
2019 free_irq(priv
->irq1
, dev
);
2021 free_irq(priv
->irq0
, dev
);
2023 phy_disconnect(phydev
);
2027 static void bcm_sysport_netif_stop(struct net_device
*dev
)
2029 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2031 /* stop all software from updating hardware */
2032 netif_tx_disable(dev
);
2033 napi_disable(&priv
->napi
);
2034 cancel_work_sync(&priv
->dim
.dim
.work
);
2035 phy_stop(dev
->phydev
);
2037 /* mask all interrupts */
2038 bcm_sysport_mask_all_intrs(priv
);
2041 static int bcm_sysport_stop(struct net_device
*dev
)
2043 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2047 bcm_sysport_netif_stop(dev
);
2049 /* Disable UniMAC RX */
2050 umac_enable_set(priv
, CMD_RX_EN
, 0);
2052 ret
= tdma_enable_set(priv
, 0);
2054 netdev_err(dev
, "timeout disabling RDMA\n");
2058 /* Wait for a maximum packet size to be drained */
2059 usleep_range(2000, 3000);
2061 ret
= rdma_enable_set(priv
, 0);
2063 netdev_err(dev
, "timeout disabling TDMA\n");
2067 /* Disable UniMAC TX */
2068 umac_enable_set(priv
, CMD_TX_EN
, 0);
2070 /* Free RX/TX rings SW structures */
2071 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2072 bcm_sysport_fini_tx_ring(priv
, i
);
2073 bcm_sysport_fini_rx_ring(priv
);
2075 free_irq(priv
->irq0
, dev
);
2077 free_irq(priv
->irq1
, dev
);
2079 /* Disconnect from PHY */
2080 phy_disconnect(dev
->phydev
);
2085 static int bcm_sysport_rule_find(struct bcm_sysport_priv
*priv
,
2091 for_each_set_bit(index
, priv
->filters
, RXCHK_BRCM_TAG_MAX
) {
2092 reg
= rxchk_readl(priv
, RXCHK_BRCM_TAG(index
));
2093 reg
>>= RXCHK_BRCM_TAG_CID_SHIFT
;
2094 reg
&= RXCHK_BRCM_TAG_CID_MASK
;
2095 if (reg
== location
)
2102 static int bcm_sysport_rule_get(struct bcm_sysport_priv
*priv
,
2103 struct ethtool_rxnfc
*nfc
)
2107 /* This is not a rule that we know about */
2108 index
= bcm_sysport_rule_find(priv
, nfc
->fs
.location
);
2112 nfc
->fs
.ring_cookie
= RX_CLS_FLOW_WAKE
;
2117 static int bcm_sysport_rule_set(struct bcm_sysport_priv
*priv
,
2118 struct ethtool_rxnfc
*nfc
)
2123 /* We cannot match locations greater than what the classification ID
2124 * permits (256 entries)
2126 if (nfc
->fs
.location
> RXCHK_BRCM_TAG_CID_MASK
)
2129 /* We cannot support flows that are not destined for a wake-up */
2130 if (nfc
->fs
.ring_cookie
!= RX_CLS_FLOW_WAKE
)
2133 /* All filters are already in use, we cannot match more rules */
2134 if (bitmap_weight(priv
->filters
, RXCHK_BRCM_TAG_MAX
) ==
2138 index
= find_first_zero_bit(priv
->filters
, RXCHK_BRCM_TAG_MAX
);
2139 if (index
> RXCHK_BRCM_TAG_MAX
)
2142 /* Location is the classification ID, and index is the position
2143 * within one of our 8 possible filters to be programmed
2145 reg
= rxchk_readl(priv
, RXCHK_BRCM_TAG(index
));
2146 reg
&= ~(RXCHK_BRCM_TAG_CID_MASK
<< RXCHK_BRCM_TAG_CID_SHIFT
);
2147 reg
|= nfc
->fs
.location
<< RXCHK_BRCM_TAG_CID_SHIFT
;
2148 rxchk_writel(priv
, reg
, RXCHK_BRCM_TAG(index
));
2149 rxchk_writel(priv
, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index
));
2151 priv
->filters_loc
[index
] = nfc
->fs
.location
;
2152 set_bit(index
, priv
->filters
);
2157 static int bcm_sysport_rule_del(struct bcm_sysport_priv
*priv
,
2162 /* This is not a rule that we know about */
2163 index
= bcm_sysport_rule_find(priv
, location
);
2167 /* No need to disable this filter if it was enabled, this will
2168 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2170 clear_bit(index
, priv
->filters
);
2171 priv
->filters_loc
[index
] = 0;
2176 static int bcm_sysport_get_rxnfc(struct net_device
*dev
,
2177 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
2179 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2180 int ret
= -EOPNOTSUPP
;
2183 case ETHTOOL_GRXCLSRULE
:
2184 ret
= bcm_sysport_rule_get(priv
, nfc
);
2193 static int bcm_sysport_set_rxnfc(struct net_device
*dev
,
2194 struct ethtool_rxnfc
*nfc
)
2196 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2197 int ret
= -EOPNOTSUPP
;
2200 case ETHTOOL_SRXCLSRLINS
:
2201 ret
= bcm_sysport_rule_set(priv
, nfc
);
2203 case ETHTOOL_SRXCLSRLDEL
:
2204 ret
= bcm_sysport_rule_del(priv
, nfc
->fs
.location
);
2213 static const struct ethtool_ops bcm_sysport_ethtool_ops
= {
2214 .get_drvinfo
= bcm_sysport_get_drvinfo
,
2215 .get_msglevel
= bcm_sysport_get_msglvl
,
2216 .set_msglevel
= bcm_sysport_set_msglvl
,
2217 .get_link
= ethtool_op_get_link
,
2218 .get_strings
= bcm_sysport_get_strings
,
2219 .get_ethtool_stats
= bcm_sysport_get_stats
,
2220 .get_sset_count
= bcm_sysport_get_sset_count
,
2221 .get_wol
= bcm_sysport_get_wol
,
2222 .set_wol
= bcm_sysport_set_wol
,
2223 .get_coalesce
= bcm_sysport_get_coalesce
,
2224 .set_coalesce
= bcm_sysport_set_coalesce
,
2225 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
2226 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
2227 .get_rxnfc
= bcm_sysport_get_rxnfc
,
2228 .set_rxnfc
= bcm_sysport_set_rxnfc
,
2231 static u16
bcm_sysport_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
2232 struct net_device
*sb_dev
)
2234 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2235 u16 queue
= skb_get_queue_mapping(skb
);
2236 struct bcm_sysport_tx_ring
*tx_ring
;
2237 unsigned int q
, port
;
2239 if (!netdev_uses_dsa(dev
))
2240 return netdev_pick_tx(dev
, skb
, NULL
);
2242 /* DSA tagging layer will have configured the correct queue */
2243 q
= BRCM_TAG_GET_QUEUE(queue
);
2244 port
= BRCM_TAG_GET_PORT(queue
);
2245 tx_ring
= priv
->ring_map
[q
+ port
* priv
->per_port_num_tx_queues
];
2247 if (unlikely(!tx_ring
))
2248 return netdev_pick_tx(dev
, skb
, NULL
);
2250 return tx_ring
->index
;
2253 static const struct net_device_ops bcm_sysport_netdev_ops
= {
2254 .ndo_start_xmit
= bcm_sysport_xmit
,
2255 .ndo_tx_timeout
= bcm_sysport_tx_timeout
,
2256 .ndo_open
= bcm_sysport_open
,
2257 .ndo_stop
= bcm_sysport_stop
,
2258 .ndo_set_features
= bcm_sysport_set_features
,
2259 .ndo_set_rx_mode
= bcm_sysport_set_rx_mode
,
2260 .ndo_set_mac_address
= bcm_sysport_change_mac
,
2261 #ifdef CONFIG_NET_POLL_CONTROLLER
2262 .ndo_poll_controller
= bcm_sysport_poll_controller
,
2264 .ndo_get_stats64
= bcm_sysport_get_stats64
,
2265 .ndo_select_queue
= bcm_sysport_select_queue
,
2268 static int bcm_sysport_map_queues(struct notifier_block
*nb
,
2269 struct dsa_notifier_register_info
*info
)
2271 struct bcm_sysport_tx_ring
*ring
;
2272 struct bcm_sysport_priv
*priv
;
2273 struct net_device
*slave_dev
;
2274 unsigned int num_tx_queues
;
2275 unsigned int q
, qp
, port
;
2276 struct net_device
*dev
;
2278 priv
= container_of(nb
, struct bcm_sysport_priv
, dsa_notifier
);
2279 if (priv
->netdev
!= info
->master
)
2284 /* We can't be setting up queue inspection for non directly attached
2287 if (info
->switch_number
)
2290 if (dev
->netdev_ops
!= &bcm_sysport_netdev_ops
)
2293 port
= info
->port_number
;
2294 slave_dev
= info
->info
.dev
;
2296 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2297 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2298 * per-port (slave_dev) network devices queue, we achieve just that.
2299 * This need to happen now before any slave network device is used such
2300 * it accurately reflects the number of real TX queues.
2303 netif_set_real_num_tx_queues(slave_dev
,
2304 slave_dev
->num_tx_queues
/ 2);
2306 num_tx_queues
= slave_dev
->real_num_tx_queues
;
2308 if (priv
->per_port_num_tx_queues
&&
2309 priv
->per_port_num_tx_queues
!= num_tx_queues
)
2310 netdev_warn(slave_dev
, "asymmetric number of per-port queues\n");
2312 priv
->per_port_num_tx_queues
= num_tx_queues
;
2314 for (q
= 0, qp
= 0; q
< dev
->num_tx_queues
&& qp
< num_tx_queues
;
2316 ring
= &priv
->tx_rings
[q
];
2321 /* Just remember the mapping actual programming done
2322 * during bcm_sysport_init_tx_ring
2324 ring
->switch_queue
= qp
;
2325 ring
->switch_port
= port
;
2326 ring
->inspect
= true;
2327 priv
->ring_map
[q
+ port
* num_tx_queues
] = ring
;
2334 static int bcm_sysport_unmap_queues(struct notifier_block
*nb
,
2335 struct dsa_notifier_register_info
*info
)
2337 struct bcm_sysport_tx_ring
*ring
;
2338 struct bcm_sysport_priv
*priv
;
2339 struct net_device
*slave_dev
;
2340 unsigned int num_tx_queues
;
2341 struct net_device
*dev
;
2342 unsigned int q
, port
;
2344 priv
= container_of(nb
, struct bcm_sysport_priv
, dsa_notifier
);
2345 if (priv
->netdev
!= info
->master
)
2350 if (dev
->netdev_ops
!= &bcm_sysport_netdev_ops
)
2353 port
= info
->port_number
;
2354 slave_dev
= info
->info
.dev
;
2356 num_tx_queues
= slave_dev
->real_num_tx_queues
;
2358 for (q
= 0; q
< dev
->num_tx_queues
; q
++) {
2359 ring
= &priv
->tx_rings
[q
];
2361 if (ring
->switch_port
!= port
)
2367 ring
->inspect
= false;
2368 priv
->ring_map
[q
+ port
* num_tx_queues
] = NULL
;
2374 static int bcm_sysport_dsa_notifier(struct notifier_block
*nb
,
2375 unsigned long event
, void *ptr
)
2377 int ret
= NOTIFY_DONE
;
2380 case DSA_PORT_REGISTER
:
2381 ret
= bcm_sysport_map_queues(nb
, ptr
);
2383 case DSA_PORT_UNREGISTER
:
2384 ret
= bcm_sysport_unmap_queues(nb
, ptr
);
2388 return notifier_from_errno(ret
);
2391 #define REV_FMT "v%2x.%02x"
2393 static const struct bcm_sysport_hw_params bcm_sysport_params
[] = {
2396 .num_rx_desc_words
= SP_NUM_HW_RX_DESC_WORDS
,
2398 [SYSTEMPORT_LITE
] = {
2400 .num_rx_desc_words
= SP_LT_NUM_HW_RX_DESC_WORDS
,
2404 static const struct of_device_id bcm_sysport_of_match
[] = {
2405 { .compatible
= "brcm,systemportlite-v1.00",
2406 .data
= &bcm_sysport_params
[SYSTEMPORT_LITE
] },
2407 { .compatible
= "brcm,systemport-v1.00",
2408 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2409 { .compatible
= "brcm,systemport",
2410 .data
= &bcm_sysport_params
[SYSTEMPORT
] },
2413 MODULE_DEVICE_TABLE(of
, bcm_sysport_of_match
);
2415 static int bcm_sysport_probe(struct platform_device
*pdev
)
2417 const struct bcm_sysport_hw_params
*params
;
2418 const struct of_device_id
*of_id
= NULL
;
2419 struct bcm_sysport_priv
*priv
;
2420 struct device_node
*dn
;
2421 struct net_device
*dev
;
2422 const void *macaddr
;
2427 dn
= pdev
->dev
.of_node
;
2428 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2429 of_id
= of_match_node(bcm_sysport_of_match
, dn
);
2430 if (!of_id
|| !of_id
->data
)
2433 /* Fairly quickly we need to know the type of adapter we have */
2434 params
= of_id
->data
;
2436 /* Read the Transmit/Receive Queue properties */
2437 if (of_property_read_u32(dn
, "systemport,num-txq", &txq
))
2438 txq
= TDMA_NUM_RINGS
;
2439 if (of_property_read_u32(dn
, "systemport,num-rxq", &rxq
))
2442 /* Sanity check the number of transmit queues */
2443 if (!txq
|| txq
> TDMA_NUM_RINGS
)
2446 dev
= alloc_etherdev_mqs(sizeof(*priv
), txq
, rxq
);
2450 /* Initialize private members */
2451 priv
= netdev_priv(dev
);
2453 /* Allocate number of TX rings */
2454 priv
->tx_rings
= devm_kcalloc(&pdev
->dev
, txq
,
2455 sizeof(struct bcm_sysport_tx_ring
),
2457 if (!priv
->tx_rings
)
2460 priv
->is_lite
= params
->is_lite
;
2461 priv
->num_rx_desc_words
= params
->num_rx_desc_words
;
2463 priv
->irq0
= platform_get_irq(pdev
, 0);
2464 if (!priv
->is_lite
) {
2465 priv
->irq1
= platform_get_irq(pdev
, 1);
2466 priv
->wol_irq
= platform_get_irq(pdev
, 2);
2468 priv
->wol_irq
= platform_get_irq(pdev
, 1);
2470 if (priv
->irq0
<= 0 || (priv
->irq1
<= 0 && !priv
->is_lite
)) {
2471 dev_err(&pdev
->dev
, "invalid interrupts\n");
2473 goto err_free_netdev
;
2476 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
2477 if (IS_ERR(priv
->base
)) {
2478 ret
= PTR_ERR(priv
->base
);
2479 goto err_free_netdev
;
2485 priv
->phy_interface
= of_get_phy_mode(dn
);
2486 /* Default to GMII interface mode */
2487 if (priv
->phy_interface
< 0)
2488 priv
->phy_interface
= PHY_INTERFACE_MODE_GMII
;
2490 /* In the case of a fixed PHY, the DT node associated
2491 * to the PHY is the Ethernet MAC DT node.
2493 if (of_phy_is_fixed_link(dn
)) {
2494 ret
= of_phy_register_fixed_link(dn
);
2496 dev_err(&pdev
->dev
, "failed to register fixed PHY\n");
2497 goto err_free_netdev
;
2503 /* Initialize netdevice members */
2504 macaddr
= of_get_mac_address(dn
);
2505 if (IS_ERR(macaddr
)) {
2506 dev_warn(&pdev
->dev
, "using random Ethernet MAC\n");
2507 eth_hw_addr_random(dev
);
2509 ether_addr_copy(dev
->dev_addr
, macaddr
);
2512 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2513 dev_set_drvdata(&pdev
->dev
, dev
);
2514 dev
->ethtool_ops
= &bcm_sysport_ethtool_ops
;
2515 dev
->netdev_ops
= &bcm_sysport_netdev_ops
;
2516 netif_napi_add(dev
, &priv
->napi
, bcm_sysport_poll
, 64);
2518 dev
->features
|= NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
|
2519 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2520 dev
->hw_features
|= dev
->features
;
2521 dev
->vlan_features
|= dev
->features
;
2523 /* Request the WOL interrupt and advertise suspend if available */
2524 priv
->wol_irq_disabled
= 1;
2525 ret
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
,
2526 bcm_sysport_wol_isr
, 0, dev
->name
, priv
);
2528 device_set_wakeup_capable(&pdev
->dev
, 1);
2530 /* Set the needed headroom once and for all */
2531 BUILD_BUG_ON(sizeof(struct bcm_tsb
) != 8);
2532 dev
->needed_headroom
+= sizeof(struct bcm_tsb
);
2534 /* libphy will adjust the link state accordingly */
2535 netif_carrier_off(dev
);
2537 priv
->rx_max_coalesced_frames
= 1;
2538 u64_stats_init(&priv
->syncp
);
2540 priv
->dsa_notifier
.notifier_call
= bcm_sysport_dsa_notifier
;
2542 ret
= register_dsa_notifier(&priv
->dsa_notifier
);
2544 dev_err(&pdev
->dev
, "failed to register DSA notifier\n");
2545 goto err_deregister_fixed_link
;
2548 ret
= register_netdev(dev
);
2550 dev_err(&pdev
->dev
, "failed to register net_device\n");
2551 goto err_deregister_notifier
;
2554 priv
->rev
= topctrl_readl(priv
, REV_CNTL
) & REV_MASK
;
2555 dev_info(&pdev
->dev
,
2556 "Broadcom SYSTEMPORT%s " REV_FMT
2557 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2558 priv
->is_lite
? " Lite" : "",
2559 (priv
->rev
>> 8) & 0xff, priv
->rev
& 0xff,
2560 priv
->irq0
, priv
->irq1
, txq
, rxq
);
2564 err_deregister_notifier
:
2565 unregister_dsa_notifier(&priv
->dsa_notifier
);
2566 err_deregister_fixed_link
:
2567 if (of_phy_is_fixed_link(dn
))
2568 of_phy_deregister_fixed_link(dn
);
2574 static int bcm_sysport_remove(struct platform_device
*pdev
)
2576 struct net_device
*dev
= dev_get_drvdata(&pdev
->dev
);
2577 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2578 struct device_node
*dn
= pdev
->dev
.of_node
;
2580 /* Not much to do, ndo_close has been called
2581 * and we use managed allocations
2583 unregister_dsa_notifier(&priv
->dsa_notifier
);
2584 unregister_netdev(dev
);
2585 if (of_phy_is_fixed_link(dn
))
2586 of_phy_deregister_fixed_link(dn
);
2588 dev_set_drvdata(&pdev
->dev
, NULL
);
2593 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv
*priv
)
2595 struct net_device
*ndev
= priv
->netdev
;
2596 unsigned int timeout
= 1000;
2597 unsigned int index
, i
= 0;
2600 reg
= umac_readl(priv
, UMAC_MPD_CTRL
);
2601 if (priv
->wolopts
& (WAKE_MAGIC
| WAKE_MAGICSECURE
))
2604 if (priv
->wolopts
& WAKE_MAGICSECURE
) {
2605 /* Program the SecureOn password */
2606 umac_writel(priv
, get_unaligned_be16(&priv
->sopass
[0]),
2608 umac_writel(priv
, get_unaligned_be32(&priv
->sopass
[2]),
2612 umac_writel(priv
, reg
, UMAC_MPD_CTRL
);
2614 if (priv
->wolopts
& WAKE_FILTER
) {
2615 /* Turn on ACPI matching to steal packets from RBUF */
2616 reg
= rbuf_readl(priv
, RBUF_CONTROL
);
2618 reg
|= RBUF_ACPI_EN_LITE
;
2620 reg
|= RBUF_ACPI_EN
;
2621 rbuf_writel(priv
, reg
, RBUF_CONTROL
);
2623 /* Enable RXCHK, active filters and Broadcom tag matching */
2624 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2625 reg
&= ~(RXCHK_BRCM_TAG_MATCH_MASK
<<
2626 RXCHK_BRCM_TAG_MATCH_SHIFT
);
2627 for_each_set_bit(index
, priv
->filters
, RXCHK_BRCM_TAG_MAX
) {
2628 reg
|= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT
+ i
);
2631 reg
|= RXCHK_EN
| RXCHK_BRCM_TAG_EN
;
2632 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2635 /* Make sure RBUF entered WoL mode as result */
2637 reg
= rbuf_readl(priv
, RBUF_STATUS
);
2638 if (reg
& RBUF_WOL_MODE
)
2642 } while (timeout
-- > 0);
2644 /* Do not leave the UniMAC RBUF matching only MPD packets */
2646 mpd_enable_set(priv
, false);
2647 netif_err(priv
, wol
, ndev
, "failed to enter WOL mode\n");
2651 /* UniMAC receive needs to be turned on */
2652 umac_enable_set(priv
, CMD_RX_EN
, 1);
2654 netif_dbg(priv
, wol
, ndev
, "entered WOL mode\n");
2659 static int __maybe_unused
bcm_sysport_suspend(struct device
*d
)
2661 struct net_device
*dev
= dev_get_drvdata(d
);
2662 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2667 if (!netif_running(dev
))
2670 netif_device_detach(dev
);
2672 bcm_sysport_netif_stop(dev
);
2674 phy_suspend(dev
->phydev
);
2676 /* Disable UniMAC RX */
2677 umac_enable_set(priv
, CMD_RX_EN
, 0);
2679 ret
= rdma_enable_set(priv
, 0);
2681 netdev_err(dev
, "RDMA timeout!\n");
2685 /* Disable RXCHK if enabled */
2686 if (priv
->rx_chk_en
) {
2687 reg
= rxchk_readl(priv
, RXCHK_CONTROL
);
2689 rxchk_writel(priv
, reg
, RXCHK_CONTROL
);
2694 topctrl_writel(priv
, RX_FLUSH
, RX_FLUSH_CNTL
);
2696 ret
= tdma_enable_set(priv
, 0);
2698 netdev_err(dev
, "TDMA timeout!\n");
2702 /* Wait for a packet boundary */
2703 usleep_range(2000, 3000);
2705 umac_enable_set(priv
, CMD_TX_EN
, 0);
2707 topctrl_writel(priv
, TX_FLUSH
, TX_FLUSH_CNTL
);
2709 /* Free RX/TX rings SW structures */
2710 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2711 bcm_sysport_fini_tx_ring(priv
, i
);
2712 bcm_sysport_fini_rx_ring(priv
);
2714 /* Get prepared for Wake-on-LAN */
2715 if (device_may_wakeup(d
) && priv
->wolopts
)
2716 ret
= bcm_sysport_suspend_to_wol(priv
);
2721 static int __maybe_unused
bcm_sysport_resume(struct device
*d
)
2723 struct net_device
*dev
= dev_get_drvdata(d
);
2724 struct bcm_sysport_priv
*priv
= netdev_priv(dev
);
2728 if (!netif_running(dev
))
2733 /* We may have been suspended and never received a WOL event that
2734 * would turn off MPD detection, take care of that now
2736 bcm_sysport_resume_from_wol(priv
);
2738 /* Initialize both hardware and software ring */
2739 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
2740 ret
= bcm_sysport_init_tx_ring(priv
, i
);
2742 netdev_err(dev
, "failed to initialize TX ring %d\n",
2744 goto out_free_tx_rings
;
2748 /* Initialize linked-list */
2749 tdma_writel(priv
, TDMA_LL_RAM_INIT_BUSY
, TDMA_STATUS
);
2751 /* Initialize RX ring */
2752 ret
= bcm_sysport_init_rx_ring(priv
);
2754 netdev_err(dev
, "failed to initialize RX ring\n");
2755 goto out_free_rx_ring
;
2758 /* RX pipe enable */
2759 topctrl_writel(priv
, 0, RX_FLUSH_CNTL
);
2761 ret
= rdma_enable_set(priv
, 1);
2763 netdev_err(dev
, "failed to enable RDMA\n");
2764 goto out_free_rx_ring
;
2767 /* Restore enabled features */
2768 bcm_sysport_set_features(dev
, dev
->features
);
2772 /* Set maximum frame length */
2774 umac_writel(priv
, UMAC_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
2776 gib_set_pad_extension(priv
);
2778 /* Set MAC address */
2779 umac_set_hw_addr(priv
, dev
->dev_addr
);
2781 umac_enable_set(priv
, CMD_RX_EN
, 1);
2783 /* TX pipe enable */
2784 topctrl_writel(priv
, 0, TX_FLUSH_CNTL
);
2786 umac_enable_set(priv
, CMD_TX_EN
, 1);
2788 ret
= tdma_enable_set(priv
, 1);
2790 netdev_err(dev
, "TDMA timeout!\n");
2791 goto out_free_rx_ring
;
2794 phy_resume(dev
->phydev
);
2796 bcm_sysport_netif_start(dev
);
2798 netif_device_attach(dev
);
2803 bcm_sysport_fini_rx_ring(priv
);
2805 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
2806 bcm_sysport_fini_tx_ring(priv
, i
);
2810 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops
,
2811 bcm_sysport_suspend
, bcm_sysport_resume
);
2813 static struct platform_driver bcm_sysport_driver
= {
2814 .probe
= bcm_sysport_probe
,
2815 .remove
= bcm_sysport_remove
,
2817 .name
= "brcm-systemport",
2818 .of_match_table
= bcm_sysport_of_match
,
2819 .pm
= &bcm_sysport_pm_ops
,
2822 module_platform_driver(bcm_sysport_driver
);
2824 MODULE_AUTHOR("Broadcom Corporation");
2825 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2826 MODULE_ALIAS("platform:brcm-systemport");
2827 MODULE_LICENSE("GPL");