]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/broadcom/bcmsysport.c
networking: make skb_pull & friends return void pointers
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / broadcom / bcmsysport.c
CommitLineData
80105bef
FF
1/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
c6e970a0 25#include <net/dsa.h>
80105bef
FF
26#include <net/ip.h>
27#include <net/ipv6.h>
28
29#include "bcmsysport.h"
30
31/* I/O accessors register helpers */
32#define BCM_SYSPORT_IO_MACRO(name, offset) \
33static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
34{ \
35 u32 reg = __raw_readl(priv->base + offset + off); \
36 return reg; \
37} \
38static inline void name##_writel(struct bcm_sysport_priv *priv, \
39 u32 val, u32 off) \
40{ \
41 __raw_writel(val, priv->base + offset + off); \
42} \
43
44BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
45BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
46BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
44a4524c 47BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
80105bef 48BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
80105bef
FF
49BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
51BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
53BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
54
44a4524c
FF
55/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
56 * same layout, except it has been moved by 4 bytes up, *sigh*
57 */
58static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
59{
60 if (priv->is_lite && off >= RDMA_STATUS)
61 off += 4;
62 return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
63}
64
65static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
66{
67 if (priv->is_lite && off >= RDMA_STATUS)
68 off += 4;
69 __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
70}
71
72static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
73{
74 if (!priv->is_lite) {
75 return BIT(bit);
76 } else {
77 if (bit >= ACB_ALGO)
78 return BIT(bit + 1);
79 else
80 return BIT(bit);
81 }
82}
83
80105bef
FF
84/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
85 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
86 */
87#define BCM_SYSPORT_INTR_L2(which) \
88static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
89 u32 mask) \
90{ \
80105bef 91 priv->irq##which##_mask &= ~(mask); \
9a0a5c4c 92 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
80105bef
FF
93} \
94static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
95 u32 mask) \
96{ \
97 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
98 priv->irq##which##_mask |= (mask); \
99} \
100
101BCM_SYSPORT_INTR_L2(0)
102BCM_SYSPORT_INTR_L2(1)
103
104/* Register accesses to GISB/RBUS registers are expensive (few hundred
105 * nanoseconds), so keep the check for 64-bits explicit here to save
106 * one register write per-packet on 32-bits platforms.
107 */
108static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
109 void __iomem *d,
110 dma_addr_t addr)
111{
112#ifdef CONFIG_PHYS_ADDR_T_64BIT
113 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
23acb2fc 114 d + DESC_ADDR_HI_STATUS_LEN);
80105bef
FF
115#endif
116 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
117}
118
119static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
23acb2fc
FF
120 struct dma_desc *desc,
121 unsigned int port)
80105bef
FF
122{
123 /* Ports are latched, so write upper address first */
124 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
125 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
126}
127
128/* Ethtool operations */
80105bef 129static int bcm_sysport_set_rx_csum(struct net_device *dev,
23acb2fc 130 netdev_features_t wanted)
80105bef
FF
131{
132 struct bcm_sysport_priv *priv = netdev_priv(dev);
133 u32 reg;
134
9d34c1cb 135 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
80105bef 136 reg = rxchk_readl(priv, RXCHK_CONTROL);
9d34c1cb 137 if (priv->rx_chk_en)
80105bef
FF
138 reg |= RXCHK_EN;
139 else
140 reg &= ~RXCHK_EN;
141
142 /* If UniMAC forwards CRC, we need to skip over it to get
143 * a valid CHK bit to be set in the per-packet status word
144 */
9d34c1cb 145 if (priv->rx_chk_en && priv->crc_fwd)
80105bef
FF
146 reg |= RXCHK_SKIP_FCS;
147 else
148 reg &= ~RXCHK_SKIP_FCS;
149
d09d3038
FF
150 /* If Broadcom tags are enabled (e.g: using a switch), make
151 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
152 * tag after the Ethernet MAC Source Address.
153 */
154 if (netdev_uses_dsa(dev))
155 reg |= RXCHK_BRCM_TAG_EN;
156 else
157 reg &= ~RXCHK_BRCM_TAG_EN;
158
80105bef
FF
159 rxchk_writel(priv, reg, RXCHK_CONTROL);
160
161 return 0;
162}
163
164static int bcm_sysport_set_tx_csum(struct net_device *dev,
23acb2fc 165 netdev_features_t wanted)
80105bef
FF
166{
167 struct bcm_sysport_priv *priv = netdev_priv(dev);
168 u32 reg;
169
170 /* Hardware transmit checksum requires us to enable the Transmit status
171 * block prepended to the packet contents
172 */
173 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
174 reg = tdma_readl(priv, TDMA_CONTROL);
175 if (priv->tsb_en)
44a4524c 176 reg |= tdma_control_bit(priv, TSB_EN);
80105bef 177 else
44a4524c 178 reg &= ~tdma_control_bit(priv, TSB_EN);
80105bef
FF
179 tdma_writel(priv, reg, TDMA_CONTROL);
180
181 return 0;
182}
183
184static int bcm_sysport_set_features(struct net_device *dev,
23acb2fc 185 netdev_features_t features)
80105bef
FF
186{
187 netdev_features_t changed = features ^ dev->features;
188 netdev_features_t wanted = dev->wanted_features;
189 int ret = 0;
190
191 if (changed & NETIF_F_RXCSUM)
192 ret = bcm_sysport_set_rx_csum(dev, wanted);
193 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
194 ret = bcm_sysport_set_tx_csum(dev, wanted);
195
196 return ret;
197}
198
199/* Hardware counters must be kept in sync because the order/offset
200 * is important here (order in structure declaration = order in hardware)
201 */
202static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
203 /* general stats */
204 STAT_NETDEV(rx_packets),
205 STAT_NETDEV(tx_packets),
206 STAT_NETDEV(rx_bytes),
207 STAT_NETDEV(tx_bytes),
208 STAT_NETDEV(rx_errors),
209 STAT_NETDEV(tx_errors),
210 STAT_NETDEV(rx_dropped),
211 STAT_NETDEV(tx_dropped),
212 STAT_NETDEV(multicast),
213 /* UniMAC RSV counters */
214 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
215 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
216 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
217 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
218 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
219 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
220 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
221 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
222 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
223 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
224 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
225 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
226 STAT_MIB_RX("rx_multicast", mib.rx.mca),
227 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
228 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
229 STAT_MIB_RX("rx_control", mib.rx.cf),
230 STAT_MIB_RX("rx_pause", mib.rx.pf),
231 STAT_MIB_RX("rx_unknown", mib.rx.uo),
232 STAT_MIB_RX("rx_align", mib.rx.aln),
233 STAT_MIB_RX("rx_outrange", mib.rx.flr),
234 STAT_MIB_RX("rx_code", mib.rx.cde),
235 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
236 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
237 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
238 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
239 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
240 STAT_MIB_RX("rx_unicast", mib.rx.uc),
241 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
242 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
243 /* UniMAC TSV counters */
244 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
245 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
246 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
247 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
248 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
249 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
250 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
251 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
252 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
253 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
254 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
255 STAT_MIB_TX("tx_multicast", mib.tx.mca),
256 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
257 STAT_MIB_TX("tx_pause", mib.tx.pf),
258 STAT_MIB_TX("tx_control", mib.tx.cf),
259 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
260 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
261 STAT_MIB_TX("tx_defer", mib.tx.drf),
262 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
263 STAT_MIB_TX("tx_single_col", mib.tx.scl),
264 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
265 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
266 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
267 STAT_MIB_TX("tx_frags", mib.tx.frg),
268 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
269 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
270 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
271 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
272 STAT_MIB_TX("tx_unicast", mib.tx.uc),
273 /* UniMAC RUNT counters */
274 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
275 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
276 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
277 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
278 /* RXCHK misc statistics */
279 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
280 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
23acb2fc 281 RXCHK_OTHER_DISC_CNTR),
80105bef
FF
282 /* RBUF misc statistics */
283 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
284 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
55ff4ea9
FF
285 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
286 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
287 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
30defeb2 288 /* Per TX-queue statistics are dynamically appended */
80105bef
FF
289};
290
291#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
292
293static void bcm_sysport_get_drvinfo(struct net_device *dev,
23acb2fc 294 struct ethtool_drvinfo *info)
80105bef
FF
295{
296 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
297 strlcpy(info->version, "0.1", sizeof(info->version));
298 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
80105bef
FF
299}
300
301static u32 bcm_sysport_get_msglvl(struct net_device *dev)
302{
303 struct bcm_sysport_priv *priv = netdev_priv(dev);
304
305 return priv->msg_enable;
306}
307
308static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
309{
310 struct bcm_sysport_priv *priv = netdev_priv(dev);
311
312 priv->msg_enable = enable;
313}
314
44a4524c
FF
315static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
316{
317 switch (type) {
318 case BCM_SYSPORT_STAT_NETDEV:
319 case BCM_SYSPORT_STAT_RXCHK:
320 case BCM_SYSPORT_STAT_RBUF:
321 case BCM_SYSPORT_STAT_SOFT:
322 return true;
323 default:
324 return false;
325 }
326}
327
80105bef
FF
328static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
329{
44a4524c
FF
330 struct bcm_sysport_priv *priv = netdev_priv(dev);
331 const struct bcm_sysport_stats *s;
332 unsigned int i, j;
333
80105bef
FF
334 switch (string_set) {
335 case ETH_SS_STATS:
44a4524c
FF
336 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
337 s = &bcm_sysport_gstrings_stats[i];
338 if (priv->is_lite &&
339 !bcm_sysport_lite_stat_valid(s->type))
340 continue;
341 j++;
342 }
30defeb2
FF
343 /* Include per-queue statistics */
344 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
80105bef
FF
345 default:
346 return -EOPNOTSUPP;
347 }
348}
349
350static void bcm_sysport_get_strings(struct net_device *dev,
23acb2fc 351 u32 stringset, u8 *data)
80105bef 352{
44a4524c
FF
353 struct bcm_sysport_priv *priv = netdev_priv(dev);
354 const struct bcm_sysport_stats *s;
30defeb2 355 char buf[128];
44a4524c 356 int i, j;
80105bef
FF
357
358 switch (stringset) {
359 case ETH_SS_STATS:
44a4524c
FF
360 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
361 s = &bcm_sysport_gstrings_stats[i];
362 if (priv->is_lite &&
363 !bcm_sysport_lite_stat_valid(s->type))
364 continue;
365
366 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
23acb2fc 367 ETH_GSTRING_LEN);
44a4524c 368 j++;
80105bef 369 }
30defeb2
FF
370
371 for (i = 0; i < dev->num_tx_queues; i++) {
372 snprintf(buf, sizeof(buf), "txq%d_packets", i);
373 memcpy(data + j * ETH_GSTRING_LEN, buf,
374 ETH_GSTRING_LEN);
375 j++;
376
377 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
378 memcpy(data + j * ETH_GSTRING_LEN, buf,
379 ETH_GSTRING_LEN);
380 j++;
381 }
80105bef
FF
382 break;
383 default:
384 break;
385 }
386}
387
388static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
389{
390 int i, j = 0;
391
392 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
393 const struct bcm_sysport_stats *s;
394 u8 offset = 0;
395 u32 val = 0;
396 char *p;
397
398 s = &bcm_sysport_gstrings_stats[i];
399 switch (s->type) {
400 case BCM_SYSPORT_STAT_NETDEV:
55ff4ea9 401 case BCM_SYSPORT_STAT_SOFT:
80105bef
FF
402 continue;
403 case BCM_SYSPORT_STAT_MIB_RX:
404 case BCM_SYSPORT_STAT_MIB_TX:
405 case BCM_SYSPORT_STAT_RUNT:
44a4524c
FF
406 if (priv->is_lite)
407 continue;
408
80105bef
FF
409 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
410 offset = UMAC_MIB_STAT_OFFSET;
411 val = umac_readl(priv, UMAC_MIB_START + j + offset);
412 break;
413 case BCM_SYSPORT_STAT_RXCHK:
414 val = rxchk_readl(priv, s->reg_offset);
415 if (val == ~0)
416 rxchk_writel(priv, 0, s->reg_offset);
417 break;
418 case BCM_SYSPORT_STAT_RBUF:
419 val = rbuf_readl(priv, s->reg_offset);
420 if (val == ~0)
421 rbuf_writel(priv, 0, s->reg_offset);
422 break;
423 }
424
425 j += s->stat_sizeof;
426 p = (char *)priv + s->stat_offset;
427 *(u32 *)p = val;
428 }
429
430 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
431}
432
433static void bcm_sysport_get_stats(struct net_device *dev,
23acb2fc 434 struct ethtool_stats *stats, u64 *data)
80105bef
FF
435{
436 struct bcm_sysport_priv *priv = netdev_priv(dev);
30defeb2 437 struct bcm_sysport_tx_ring *ring;
44a4524c 438 int i, j;
80105bef
FF
439
440 if (netif_running(dev))
441 bcm_sysport_update_mib_counters(priv);
442
44a4524c 443 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
80105bef
FF
444 const struct bcm_sysport_stats *s;
445 char *p;
446
447 s = &bcm_sysport_gstrings_stats[i];
448 if (s->type == BCM_SYSPORT_STAT_NETDEV)
449 p = (char *)&dev->stats;
450 else
451 p = (char *)priv;
452 p += s->stat_offset;
44a4524c
FF
453 data[j] = *(unsigned long *)p;
454 j++;
80105bef 455 }
30defeb2
FF
456
457 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
458 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
459 * needs to point to how many total statistics we have minus the
460 * number of per TX queue statistics
461 */
462 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
463 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
464
465 for (i = 0; i < dev->num_tx_queues; i++) {
466 ring = &priv->tx_rings[i];
467 data[j] = ring->packets;
468 j++;
469 data[j] = ring->bytes;
470 j++;
471 }
80105bef
FF
472}
473
83e82f4c
FF
474static void bcm_sysport_get_wol(struct net_device *dev,
475 struct ethtool_wolinfo *wol)
476{
477 struct bcm_sysport_priv *priv = netdev_priv(dev);
478 u32 reg;
479
480 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
481 wol->wolopts = priv->wolopts;
482
483 if (!(priv->wolopts & WAKE_MAGICSECURE))
484 return;
485
486 /* Return the programmed SecureOn password */
487 reg = umac_readl(priv, UMAC_PSW_MS);
488 put_unaligned_be16(reg, &wol->sopass[0]);
489 reg = umac_readl(priv, UMAC_PSW_LS);
490 put_unaligned_be32(reg, &wol->sopass[2]);
491}
492
493static int bcm_sysport_set_wol(struct net_device *dev,
23acb2fc 494 struct ethtool_wolinfo *wol)
83e82f4c
FF
495{
496 struct bcm_sysport_priv *priv = netdev_priv(dev);
497 struct device *kdev = &priv->pdev->dev;
498 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
499
500 if (!device_can_wakeup(kdev))
501 return -ENOTSUPP;
502
503 if (wol->wolopts & ~supported)
504 return -EINVAL;
505
506 /* Program the SecureOn password */
507 if (wol->wolopts & WAKE_MAGICSECURE) {
508 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
23acb2fc 509 UMAC_PSW_MS);
83e82f4c 510 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
23acb2fc 511 UMAC_PSW_LS);
83e82f4c
FF
512 }
513
514 /* Flag the device and relevant IRQ as wakeup capable */
515 if (wol->wolopts) {
516 device_set_wakeup_enable(kdev, 1);
61b423a8
FF
517 if (priv->wol_irq_disabled)
518 enable_irq_wake(priv->wol_irq);
83e82f4c
FF
519 priv->wol_irq_disabled = 0;
520 } else {
521 device_set_wakeup_enable(kdev, 0);
522 /* Avoid unbalanced disable_irq_wake calls */
523 if (!priv->wol_irq_disabled)
524 disable_irq_wake(priv->wol_irq);
525 priv->wol_irq_disabled = 1;
526 }
527
528 priv->wolopts = wol->wolopts;
529
530 return 0;
531}
532
b1a15e86
FF
533static int bcm_sysport_get_coalesce(struct net_device *dev,
534 struct ethtool_coalesce *ec)
535{
536 struct bcm_sysport_priv *priv = netdev_priv(dev);
537 u32 reg;
538
539 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
540
541 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
542 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
543
d0634868
FF
544 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
545
546 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
547 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
548
b1a15e86
FF
549 return 0;
550}
551
552static int bcm_sysport_set_coalesce(struct net_device *dev,
553 struct ethtool_coalesce *ec)
554{
555 struct bcm_sysport_priv *priv = netdev_priv(dev);
556 unsigned int i;
557 u32 reg;
558
d0634868
FF
559 /* Base system clock is 125Mhz, DMA timeout is this reference clock
560 * divided by 1024, which yield roughly 8.192 us, our maximum value has
561 * to fit in the RING_TIMEOUT_MASK (16 bits).
b1a15e86
FF
562 */
563 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
d0634868
FF
564 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
565 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
566 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
b1a15e86
FF
567 return -EINVAL;
568
d0634868
FF
569 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
570 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
b1a15e86
FF
571 return -EINVAL;
572
573 for (i = 0; i < dev->num_tx_queues; i++) {
574 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
575 reg &= ~(RING_INTR_THRESH_MASK |
576 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
577 reg |= ec->tx_max_coalesced_frames;
578 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
579 RING_TIMEOUT_SHIFT;
580 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
581 }
582
d0634868
FF
583 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
584 reg &= ~(RDMA_INTR_THRESH_MASK |
585 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
586 reg |= ec->rx_max_coalesced_frames;
587 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
588 RDMA_TIMEOUT_SHIFT;
589 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
590
b1a15e86
FF
591 return 0;
592}
593
80105bef
FF
594static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
595{
596 dev_kfree_skb_any(cb->skb);
597 cb->skb = NULL;
598 dma_unmap_addr_set(cb, dma_addr, 0);
599}
600
c73b0183
FF
601static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
602 struct bcm_sysport_cb *cb)
80105bef
FF
603{
604 struct device *kdev = &priv->pdev->dev;
605 struct net_device *ndev = priv->netdev;
c73b0183 606 struct sk_buff *skb, *rx_skb;
80105bef 607 dma_addr_t mapping;
80105bef 608
c73b0183
FF
609 /* Allocate a new SKB for a new packet */
610 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
611 if (!skb) {
612 priv->mib.alloc_rx_buff_failed++;
80105bef 613 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
c73b0183 614 return NULL;
80105bef
FF
615 }
616
c73b0183 617 mapping = dma_map_single(kdev, skb->data,
23acb2fc 618 RX_BUF_LENGTH, DMA_FROM_DEVICE);
c73b0183 619 if (dma_mapping_error(kdev, mapping)) {
60b4ea17 620 priv->mib.rx_dma_failed++;
c73b0183 621 dev_kfree_skb_any(skb);
80105bef 622 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
c73b0183 623 return NULL;
80105bef
FF
624 }
625
c73b0183
FF
626 /* Grab the current SKB on the ring */
627 rx_skb = cb->skb;
628 if (likely(rx_skb))
629 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
630 RX_BUF_LENGTH, DMA_FROM_DEVICE);
631
632 /* Put the new SKB on the ring */
633 cb->skb = skb;
80105bef 634 dma_unmap_addr_set(cb, dma_addr, mapping);
baf387a8 635 dma_desc_set_addr(priv, cb->bd_addr, mapping);
80105bef
FF
636
637 netif_dbg(priv, rx_status, ndev, "RX refill\n");
638
c73b0183
FF
639 /* Return the current SKB to the caller */
640 return rx_skb;
80105bef
FF
641}
642
643static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
644{
645 struct bcm_sysport_cb *cb;
c73b0183 646 struct sk_buff *skb;
80105bef
FF
647 unsigned int i;
648
649 for (i = 0; i < priv->num_rx_bds; i++) {
baf387a8 650 cb = &priv->rx_cbs[i];
c73b0183
FF
651 skb = bcm_sysport_rx_refill(priv, cb);
652 if (skb)
653 dev_kfree_skb(skb);
654 if (!cb->skb)
655 return -ENOMEM;
80105bef
FF
656 }
657
c73b0183 658 return 0;
80105bef
FF
659}
660
661/* Poll the hardware for up to budget packets to process */
662static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
663 unsigned int budget)
664{
80105bef
FF
665 struct net_device *ndev = priv->netdev;
666 unsigned int processed = 0, to_process;
667 struct bcm_sysport_cb *cb;
668 struct sk_buff *skb;
669 unsigned int p_index;
670 u16 len, status;
3afc557d 671 struct bcm_rsb *rsb;
80105bef 672
6baa785a
FF
673 /* Clear status before servicing to reduce spurious interrupts */
674 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
675
44a4524c
FF
676 /* Determine how much we should process since last call, SYSTEMPORT Lite
677 * groups the producer and consumer indexes into the same 32-bit
678 * which we access using RDMA_CONS_INDEX
679 */
680 if (!priv->is_lite)
681 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
682 else
683 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
80105bef
FF
684 p_index &= RDMA_PROD_INDEX_MASK;
685
e9d7af78 686 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
80105bef
FF
687
688 netif_dbg(priv, rx_status, ndev,
23acb2fc
FF
689 "p_index=%d rx_c_index=%d to_process=%d\n",
690 p_index, priv->rx_c_index, to_process);
80105bef 691
23acb2fc 692 while ((processed < to_process) && (processed < budget)) {
80105bef 693 cb = &priv->rx_cbs[priv->rx_read_ptr];
c73b0183 694 skb = bcm_sysport_rx_refill(priv, cb);
fe24ba08 695
fe24ba08
FF
696
697 /* We do not have a backing SKB, so we do not a corresponding
698 * DMA mapping for this incoming packet since
699 * bcm_sysport_rx_refill always either has both skb and mapping
700 * or none.
701 */
702 if (unlikely(!skb)) {
703 netif_err(priv, rx_err, ndev, "out of memory!\n");
704 ndev->stats.rx_dropped++;
705 ndev->stats.rx_errors++;
c73b0183 706 goto next;
fe24ba08
FF
707 }
708
80105bef 709 /* Extract the Receive Status Block prepended */
3afc557d 710 rsb = (struct bcm_rsb *)skb->data;
80105bef
FF
711 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
712 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
23acb2fc 713 DESC_STATUS_MASK;
80105bef 714
80105bef 715 netif_dbg(priv, rx_status, ndev,
23acb2fc
FF
716 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
717 p_index, priv->rx_c_index, priv->rx_read_ptr,
718 len, status);
80105bef 719
25977ac7
FF
720 if (unlikely(len > RX_BUF_LENGTH)) {
721 netif_err(priv, rx_status, ndev, "oversized packet\n");
722 ndev->stats.rx_length_errors++;
723 ndev->stats.rx_errors++;
724 dev_kfree_skb_any(skb);
725 goto next;
726 }
727
80105bef
FF
728 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
729 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
730 ndev->stats.rx_dropped++;
731 ndev->stats.rx_errors++;
c73b0183
FF
732 dev_kfree_skb_any(skb);
733 goto next;
80105bef
FF
734 }
735
736 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
737 netif_err(priv, rx_err, ndev, "error packet\n");
ad51c610 738 if (status & RX_STATUS_OVFLOW)
80105bef
FF
739 ndev->stats.rx_over_errors++;
740 ndev->stats.rx_dropped++;
741 ndev->stats.rx_errors++;
c73b0183
FF
742 dev_kfree_skb_any(skb);
743 goto next;
80105bef
FF
744 }
745
746 skb_put(skb, len);
747
748 /* Hardware validated our checksum */
749 if (likely(status & DESC_L4_CSUM))
750 skb->ip_summed = CHECKSUM_UNNECESSARY;
751
e0ea05d0
FF
752 /* Hardware pre-pends packets with 2bytes before Ethernet
753 * header plus we have the Receive Status Block, strip off all
754 * of this from the SKB.
80105bef
FF
755 */
756 skb_pull(skb, sizeof(*rsb) + 2);
757 len -= (sizeof(*rsb) + 2);
758
759 /* UniMAC may forward CRC */
760 if (priv->crc_fwd) {
761 skb_trim(skb, len - ETH_FCS_LEN);
762 len -= ETH_FCS_LEN;
763 }
764
765 skb->protocol = eth_type_trans(skb, ndev);
766 ndev->stats.rx_packets++;
767 ndev->stats.rx_bytes += len;
768
769 napi_gro_receive(&priv->napi, skb);
c73b0183
FF
770next:
771 processed++;
772 priv->rx_read_ptr++;
773
774 if (priv->rx_read_ptr == priv->num_rx_bds)
775 priv->rx_read_ptr = 0;
80105bef
FF
776 }
777
778 return processed;
779}
780
30defeb2 781static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
23acb2fc
FF
782 struct bcm_sysport_cb *cb,
783 unsigned int *bytes_compl,
784 unsigned int *pkts_compl)
80105bef 785{
30defeb2 786 struct bcm_sysport_priv *priv = ring->priv;
80105bef 787 struct device *kdev = &priv->pdev->dev;
80105bef
FF
788
789 if (cb->skb) {
30defeb2 790 ring->bytes += cb->skb->len;
80105bef
FF
791 *bytes_compl += cb->skb->len;
792 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
23acb2fc
FF
793 dma_unmap_len(cb, dma_len),
794 DMA_TO_DEVICE);
30defeb2 795 ring->packets++;
80105bef
FF
796 (*pkts_compl)++;
797 bcm_sysport_free_cb(cb);
798 /* SKB fragment */
799 } else if (dma_unmap_addr(cb, dma_addr)) {
30defeb2 800 ring->bytes += dma_unmap_len(cb, dma_len);
80105bef 801 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
23acb2fc 802 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
80105bef
FF
803 dma_unmap_addr_set(cb, dma_addr, 0);
804 }
805}
806
807/* Reclaim queued SKBs for transmission completion, lockless version */
808static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
809 struct bcm_sysport_tx_ring *ring)
810{
811 struct net_device *ndev = priv->netdev;
812 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
813 unsigned int pkts_compl = 0, bytes_compl = 0;
814 struct bcm_sysport_cb *cb;
80105bef
FF
815 u32 hw_ind;
816
6baa785a
FF
817 /* Clear status before servicing to reduce spurious interrupts */
818 if (!ring->priv->is_lite)
819 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
820 else
821 intrl2_0_writel(ring->priv, BIT(ring->index +
822 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
823
80105bef
FF
824 /* Compute how many descriptors have been processed since last call */
825 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
826 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
827 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
828
829 last_c_index = ring->c_index;
830 num_tx_cbs = ring->size;
831
832 c_index &= (num_tx_cbs - 1);
833
834 if (c_index >= last_c_index)
835 last_tx_cn = c_index - last_c_index;
836 else
837 last_tx_cn = num_tx_cbs - last_c_index + c_index;
838
839 netif_dbg(priv, tx_done, ndev,
23acb2fc
FF
840 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
841 ring->index, c_index, last_tx_cn, last_c_index);
80105bef
FF
842
843 while (last_tx_cn-- > 0) {
844 cb = ring->cbs + last_c_index;
30defeb2 845 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
80105bef
FF
846
847 ring->desc_count++;
848 last_c_index++;
849 last_c_index &= (num_tx_cbs - 1);
850 }
851
852 ring->c_index = c_index;
853
80105bef 854 netif_dbg(priv, tx_done, ndev,
23acb2fc
FF
855 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
856 ring->index, ring->c_index, pkts_compl, bytes_compl);
80105bef
FF
857
858 return pkts_compl;
859}
860
861/* Locked version of the per-ring TX reclaim routine */
862static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
863 struct bcm_sysport_tx_ring *ring)
864{
148d3d02 865 struct netdev_queue *txq;
80105bef 866 unsigned int released;
d8498088 867 unsigned long flags;
80105bef 868
148d3d02
FF
869 txq = netdev_get_tx_queue(priv->netdev, ring->index);
870
d8498088 871 spin_lock_irqsave(&ring->lock, flags);
80105bef 872 released = __bcm_sysport_tx_reclaim(priv, ring);
148d3d02
FF
873 if (released)
874 netif_tx_wake_queue(txq);
875
d8498088 876 spin_unlock_irqrestore(&ring->lock, flags);
80105bef
FF
877
878 return released;
879}
880
148d3d02
FF
881/* Locked version of the per-ring TX reclaim, but does not wake the queue */
882static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
883 struct bcm_sysport_tx_ring *ring)
884{
885 unsigned long flags;
886
887 spin_lock_irqsave(&ring->lock, flags);
888 __bcm_sysport_tx_reclaim(priv, ring);
889 spin_unlock_irqrestore(&ring->lock, flags);
890}
891
80105bef
FF
892static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
893{
894 struct bcm_sysport_tx_ring *ring =
895 container_of(napi, struct bcm_sysport_tx_ring, napi);
896 unsigned int work_done = 0;
897
898 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
899
16f62d9b 900 if (work_done == 0) {
80105bef
FF
901 napi_complete(napi);
902 /* re-enable TX interrupt */
44a4524c
FF
903 if (!ring->priv->is_lite)
904 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
905 else
906 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
907 INTRL2_0_TDMA_MBDONE_SHIFT));
9dfa9a27
FF
908
909 return 0;
80105bef
FF
910 }
911
9dfa9a27 912 return budget;
80105bef
FF
913}
914
915static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
916{
917 unsigned int q;
918
919 for (q = 0; q < priv->netdev->num_tx_queues; q++)
920 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
921}
922
923static int bcm_sysport_poll(struct napi_struct *napi, int budget)
924{
925 struct bcm_sysport_priv *priv =
926 container_of(napi, struct bcm_sysport_priv, napi);
927 unsigned int work_done = 0;
928
929 work_done = bcm_sysport_desc_rx(priv, budget);
930
931 priv->rx_c_index += work_done;
932 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
44a4524c
FF
933
934 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
935 * maintained by HW, but writes to it will be ignore while RDMA
936 * is active
937 */
938 if (!priv->is_lite)
939 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
940 else
941 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
80105bef
FF
942
943 if (work_done < budget) {
c82f47ef 944 napi_complete_done(napi, work_done);
80105bef
FF
945 /* re-enable RX interrupts */
946 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
947 }
948
949 return work_done;
950}
951
83e82f4c
FF
952static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
953{
954 u32 reg;
955
956 /* Stop monitoring MPD interrupt */
957 intrl2_0_mask_set(priv, INTRL2_0_MPD);
958
959 /* Clear the MagicPacket detection logic */
960 reg = umac_readl(priv, UMAC_MPD_CTRL);
961 reg &= ~MPD_EN;
962 umac_writel(priv, reg, UMAC_MPD_CTRL);
963
964 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
965}
80105bef
FF
966
967/* RX and misc interrupt routine */
968static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
969{
970 struct net_device *dev = dev_id;
971 struct bcm_sysport_priv *priv = netdev_priv(dev);
44a4524c
FF
972 struct bcm_sysport_tx_ring *txr;
973 unsigned int ring, ring_bit;
80105bef
FF
974
975 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
976 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
977 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
978
979 if (unlikely(priv->irq0_stat == 0)) {
980 netdev_warn(priv->netdev, "spurious RX interrupt\n");
981 return IRQ_NONE;
982 }
983
984 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
985 if (likely(napi_schedule_prep(&priv->napi))) {
986 /* disable RX interrupts */
987 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
ba90950c 988 __napi_schedule_irqoff(&priv->napi);
80105bef
FF
989 }
990 }
991
992 /* TX ring is full, perform a full reclaim since we do not know
993 * which one would trigger this interrupt
994 */
995 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
996 bcm_sysport_tx_reclaim_all(priv);
997
83e82f4c
FF
998 if (priv->irq0_stat & INTRL2_0_MPD) {
999 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
1000 bcm_sysport_resume_from_wol(priv);
1001 }
1002
44a4524c
FF
1003 if (!priv->is_lite)
1004 goto out;
1005
1006 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1007 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1008 if (!(priv->irq0_stat & ring_bit))
1009 continue;
1010
1011 txr = &priv->tx_rings[ring];
1012
1013 if (likely(napi_schedule_prep(&txr->napi))) {
1014 intrl2_0_mask_set(priv, ring_bit);
1015 __napi_schedule(&txr->napi);
1016 }
1017 }
1018out:
80105bef
FF
1019 return IRQ_HANDLED;
1020}
1021
1022/* TX interrupt service routine */
1023static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1024{
1025 struct net_device *dev = dev_id;
1026 struct bcm_sysport_priv *priv = netdev_priv(dev);
1027 struct bcm_sysport_tx_ring *txr;
1028 unsigned int ring;
1029
1030 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1031 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1032 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1033
1034 if (unlikely(priv->irq1_stat == 0)) {
1035 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1036 return IRQ_NONE;
1037 }
1038
1039 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1040 if (!(priv->irq1_stat & BIT(ring)))
1041 continue;
1042
1043 txr = &priv->tx_rings[ring];
1044
1045 if (likely(napi_schedule_prep(&txr->napi))) {
1046 intrl2_1_mask_set(priv, BIT(ring));
ba90950c 1047 __napi_schedule_irqoff(&txr->napi);
80105bef
FF
1048 }
1049 }
1050
1051 return IRQ_HANDLED;
1052}
1053
83e82f4c
FF
1054static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1055{
1056 struct bcm_sysport_priv *priv = dev_id;
1057
1058 pm_wakeup_event(&priv->pdev->dev, 0);
1059
1060 return IRQ_HANDLED;
1061}
1062
6cec4f5e
FF
1063#ifdef CONFIG_NET_POLL_CONTROLLER
1064static void bcm_sysport_poll_controller(struct net_device *dev)
1065{
1066 struct bcm_sysport_priv *priv = netdev_priv(dev);
1067
1068 disable_irq(priv->irq0);
1069 bcm_sysport_rx_isr(priv->irq0, priv);
1070 enable_irq(priv->irq0);
1071
44a4524c
FF
1072 if (!priv->is_lite) {
1073 disable_irq(priv->irq1);
1074 bcm_sysport_tx_isr(priv->irq1, priv);
1075 enable_irq(priv->irq1);
1076 }
6cec4f5e
FF
1077}
1078#endif
1079
e87474a6
FF
1080static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1081 struct net_device *dev)
80105bef
FF
1082{
1083 struct sk_buff *nskb;
3afc557d 1084 struct bcm_tsb *tsb;
80105bef
FF
1085 u32 csum_info;
1086 u8 ip_proto;
1087 u16 csum_start;
1088 u16 ip_ver;
1089
1090 /* Re-allocate SKB if needed */
1091 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1092 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1093 dev_kfree_skb(skb);
1094 if (!nskb) {
1095 dev->stats.tx_errors++;
1096 dev->stats.tx_dropped++;
e87474a6 1097 return NULL;
80105bef
FF
1098 }
1099 skb = nskb;
1100 }
1101
3afc557d 1102 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
80105bef
FF
1103 /* Zero-out TSB by default */
1104 memset(tsb, 0, sizeof(*tsb));
1105
1106 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1107 ip_ver = htons(skb->protocol);
1108 switch (ip_ver) {
1109 case ETH_P_IP:
1110 ip_proto = ip_hdr(skb)->protocol;
1111 break;
1112 case ETH_P_IPV6:
1113 ip_proto = ipv6_hdr(skb)->nexthdr;
1114 break;
1115 default:
e87474a6 1116 return skb;
80105bef
FF
1117 }
1118
1119 /* Get the checksum offset and the L4 (transport) offset */
1120 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1121 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1122 csum_info |= (csum_start << L4_PTR_SHIFT);
1123
1124 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1125 csum_info |= L4_LENGTH_VALID;
1126 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1127 csum_info |= L4_UDP;
23acb2fc 1128 } else {
80105bef 1129 csum_info = 0;
23acb2fc 1130 }
80105bef
FF
1131
1132 tsb->l4_ptr_dest_map = csum_info;
1133 }
1134
e87474a6 1135 return skb;
80105bef
FF
1136}
1137
1138static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1139 struct net_device *dev)
1140{
1141 struct bcm_sysport_priv *priv = netdev_priv(dev);
1142 struct device *kdev = &priv->pdev->dev;
1143 struct bcm_sysport_tx_ring *ring;
1144 struct bcm_sysport_cb *cb;
1145 struct netdev_queue *txq;
1146 struct dma_desc *desc;
dab531b4 1147 unsigned int skb_len;
d8498088 1148 unsigned long flags;
80105bef
FF
1149 dma_addr_t mapping;
1150 u32 len_status;
1151 u16 queue;
1152 int ret;
1153
1154 queue = skb_get_queue_mapping(skb);
1155 txq = netdev_get_tx_queue(dev, queue);
1156 ring = &priv->tx_rings[queue];
1157
d8498088
FF
1158 /* lock against tx reclaim in BH context and TX ring full interrupt */
1159 spin_lock_irqsave(&ring->lock, flags);
80105bef
FF
1160 if (unlikely(ring->desc_count == 0)) {
1161 netif_tx_stop_queue(txq);
1162 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1163 ret = NETDEV_TX_BUSY;
1164 goto out;
1165 }
1166
dab531b4
FF
1167 /* The Ethernet switch we are interfaced with needs packets to be at
1168 * least 64 bytes (including FCS) otherwise they will be discarded when
1169 * they enter the switch port logic. When Broadcom tags are enabled, we
1170 * need to make sure that packets are at least 68 bytes
1171 * (including FCS and tag) because the length verification is done after
1172 * the Broadcom tag is stripped off the ingress packet.
1173 */
bb7da333 1174 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
dab531b4
FF
1175 ret = NETDEV_TX_OK;
1176 goto out;
1177 }
1178
38e5a855
FF
1179 /* Insert TSB and checksum infos */
1180 if (priv->tsb_en) {
1181 skb = bcm_sysport_insert_tsb(skb, dev);
1182 if (!skb) {
1183 ret = NETDEV_TX_OK;
1184 goto out;
1185 }
1186 }
1187
bb7da333 1188 skb_len = skb->len;
dab531b4
FF
1189
1190 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
80105bef 1191 if (dma_mapping_error(kdev, mapping)) {
60b4ea17 1192 priv->mib.tx_dma_failed++;
80105bef 1193 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
23acb2fc 1194 skb->data, skb_len);
80105bef
FF
1195 ret = NETDEV_TX_OK;
1196 goto out;
1197 }
1198
1199 /* Remember the SKB for future freeing */
1200 cb = &ring->cbs[ring->curr_desc];
1201 cb->skb = skb;
1202 dma_unmap_addr_set(cb, dma_addr, mapping);
dab531b4 1203 dma_unmap_len_set(cb, dma_len, skb_len);
80105bef
FF
1204
1205 /* Fetch a descriptor entry from our pool */
1206 desc = ring->desc_cpu;
1207
1208 desc->addr_lo = lower_32_bits(mapping);
1209 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
dab531b4 1210 len_status |= (skb_len << DESC_LEN_SHIFT);
80105bef 1211 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
23acb2fc 1212 DESC_STATUS_SHIFT;
80105bef
FF
1213 if (skb->ip_summed == CHECKSUM_PARTIAL)
1214 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1215
1216 ring->curr_desc++;
1217 if (ring->curr_desc == ring->size)
1218 ring->curr_desc = 0;
1219 ring->desc_count--;
1220
1221 /* Ensure write completion of the descriptor status/length
1222 * in DRAM before the System Port WRITE_PORT register latches
1223 * the value
1224 */
1225 wmb();
1226 desc->addr_status_len = len_status;
1227 wmb();
1228
1229 /* Write this descriptor address to the RING write port */
1230 tdma_port_write_desc_addr(priv, desc, ring->index);
1231
1232 /* Check ring space and update SW control flow */
1233 if (ring->desc_count == 0)
1234 netif_tx_stop_queue(txq);
1235
1236 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
23acb2fc 1237 ring->index, ring->desc_count, ring->curr_desc);
80105bef
FF
1238
1239 ret = NETDEV_TX_OK;
1240out:
d8498088 1241 spin_unlock_irqrestore(&ring->lock, flags);
80105bef
FF
1242 return ret;
1243}
1244
1245static void bcm_sysport_tx_timeout(struct net_device *dev)
1246{
1247 netdev_warn(dev, "transmit timeout!\n");
1248
860e9538 1249 netif_trans_update(dev);
80105bef
FF
1250 dev->stats.tx_errors++;
1251
1252 netif_tx_wake_all_queues(dev);
1253}
1254
1255/* phylib adjust link callback */
1256static void bcm_sysport_adj_link(struct net_device *dev)
1257{
1258 struct bcm_sysport_priv *priv = netdev_priv(dev);
715a0227 1259 struct phy_device *phydev = dev->phydev;
80105bef
FF
1260 unsigned int changed = 0;
1261 u32 cmd_bits = 0, reg;
1262
1263 if (priv->old_link != phydev->link) {
1264 changed = 1;
1265 priv->old_link = phydev->link;
1266 }
1267
1268 if (priv->old_duplex != phydev->duplex) {
1269 changed = 1;
1270 priv->old_duplex = phydev->duplex;
1271 }
1272
44a4524c
FF
1273 if (priv->is_lite)
1274 goto out;
1275
80105bef
FF
1276 switch (phydev->speed) {
1277 case SPEED_2500:
1278 cmd_bits = CMD_SPEED_2500;
1279 break;
1280 case SPEED_1000:
1281 cmd_bits = CMD_SPEED_1000;
1282 break;
1283 case SPEED_100:
1284 cmd_bits = CMD_SPEED_100;
1285 break;
1286 case SPEED_10:
1287 cmd_bits = CMD_SPEED_10;
1288 break;
1289 default:
1290 break;
1291 }
1292 cmd_bits <<= CMD_SPEED_SHIFT;
1293
1294 if (phydev->duplex == DUPLEX_HALF)
1295 cmd_bits |= CMD_HD_EN;
1296
1297 if (priv->old_pause != phydev->pause) {
1298 changed = 1;
1299 priv->old_pause = phydev->pause;
1300 }
1301
1302 if (!phydev->pause)
1303 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1304
4a804c01
FF
1305 if (!changed)
1306 return;
1307
1308 if (phydev->link) {
d5e32cc7
FF
1309 reg = umac_readl(priv, UMAC_CMD);
1310 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
80105bef
FF
1311 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1312 CMD_TX_PAUSE_IGNORE);
d5e32cc7
FF
1313 reg |= cmd_bits;
1314 umac_writel(priv, reg, UMAC_CMD);
d5e32cc7 1315 }
44a4524c
FF
1316out:
1317 if (changed)
1318 phy_print_status(phydev);
80105bef
FF
1319}
1320
1321static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1322 unsigned int index)
1323{
1324 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1325 struct device *kdev = &priv->pdev->dev;
1326 size_t size;
1327 void *p;
1328 u32 reg;
1329
1330 /* Simple descriptors partitioning for now */
1331 size = 256;
1332
1333 /* We just need one DMA descriptor which is DMA-able, since writing to
1334 * the port will allocate a new descriptor in its internal linked-list
1335 */
3e8fc38c
FF
1336 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1337 GFP_KERNEL);
80105bef
FF
1338 if (!p) {
1339 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1340 return -ENOMEM;
1341 }
1342
40a8a317 1343 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
80105bef
FF
1344 if (!ring->cbs) {
1345 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1346 return -ENOMEM;
1347 }
1348
1349 /* Initialize SW view of the ring */
1350 spin_lock_init(&ring->lock);
1351 ring->priv = priv;
d64b5e85 1352 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
80105bef
FF
1353 ring->index = index;
1354 ring->size = size;
1355 ring->alloc_size = ring->size;
1356 ring->desc_cpu = p;
1357 ring->desc_count = ring->size;
1358 ring->curr_desc = 0;
1359
1360 /* Initialize HW ring */
1361 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1362 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1363 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1364 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1365 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1366 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1367
1368 /* Program the number of descriptors as MAX_THRESHOLD and half of
1369 * its size for the hysteresis trigger
1370 */
1371 tdma_writel(priv, ring->size |
1372 1 << RING_HYST_THRESH_SHIFT,
1373 TDMA_DESC_RING_MAX_HYST(index));
1374
1375 /* Enable the ring queue in the arbiter */
1376 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1377 reg |= (1 << index);
1378 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1379
1380 napi_enable(&ring->napi);
1381
1382 netif_dbg(priv, hw, priv->netdev,
23acb2fc
FF
1383 "TDMA cfg, size=%d, desc_cpu=%p\n",
1384 ring->size, ring->desc_cpu);
80105bef
FF
1385
1386 return 0;
1387}
1388
1389static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
23acb2fc 1390 unsigned int index)
80105bef
FF
1391{
1392 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1393 struct device *kdev = &priv->pdev->dev;
1394 u32 reg;
1395
1396 /* Caller should stop the TDMA engine */
1397 reg = tdma_readl(priv, TDMA_STATUS);
1398 if (!(reg & TDMA_DISABLED))
1399 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1400
914adb55
FF
1401 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1402 * fail, so by checking this pointer we know whether the TX ring was
1403 * fully initialized or not.
1404 */
1405 if (!ring->cbs)
1406 return;
1407
80105bef
FF
1408 napi_disable(&ring->napi);
1409 netif_napi_del(&ring->napi);
1410
148d3d02 1411 bcm_sysport_tx_clean(priv, ring);
80105bef
FF
1412
1413 kfree(ring->cbs);
1414 ring->cbs = NULL;
1415
1416 if (ring->desc_dma) {
3e8fc38c
FF
1417 dma_free_coherent(kdev, sizeof(struct dma_desc),
1418 ring->desc_cpu, ring->desc_dma);
80105bef
FF
1419 ring->desc_dma = 0;
1420 }
1421 ring->size = 0;
1422 ring->alloc_size = 0;
1423
1424 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1425}
1426
1427/* RDMA helper */
1428static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
23acb2fc 1429 unsigned int enable)
80105bef
FF
1430{
1431 unsigned int timeout = 1000;
1432 u32 reg;
1433
1434 reg = rdma_readl(priv, RDMA_CONTROL);
1435 if (enable)
1436 reg |= RDMA_EN;
1437 else
1438 reg &= ~RDMA_EN;
1439 rdma_writel(priv, reg, RDMA_CONTROL);
1440
1441 /* Poll for RMDA disabling completion */
1442 do {
1443 reg = rdma_readl(priv, RDMA_STATUS);
1444 if (!!(reg & RDMA_DISABLED) == !enable)
1445 return 0;
1446 usleep_range(1000, 2000);
1447 } while (timeout-- > 0);
1448
1449 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1450
1451 return -ETIMEDOUT;
1452}
1453
1454/* TDMA helper */
1455static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
23acb2fc 1456 unsigned int enable)
80105bef
FF
1457{
1458 unsigned int timeout = 1000;
1459 u32 reg;
1460
1461 reg = tdma_readl(priv, TDMA_CONTROL);
1462 if (enable)
44a4524c 1463 reg |= tdma_control_bit(priv, TDMA_EN);
80105bef 1464 else
44a4524c 1465 reg &= ~tdma_control_bit(priv, TDMA_EN);
80105bef
FF
1466 tdma_writel(priv, reg, TDMA_CONTROL);
1467
1468 /* Poll for TMDA disabling completion */
1469 do {
1470 reg = tdma_readl(priv, TDMA_STATUS);
1471 if (!!(reg & TDMA_DISABLED) == !enable)
1472 return 0;
1473
1474 usleep_range(1000, 2000);
1475 } while (timeout-- > 0);
1476
1477 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1478
1479 return -ETIMEDOUT;
1480}
1481
1482static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1483{
baf387a8 1484 struct bcm_sysport_cb *cb;
80105bef
FF
1485 u32 reg;
1486 int ret;
baf387a8 1487 int i;
80105bef
FF
1488
1489 /* Initialize SW view of the RX ring */
44a4524c 1490 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
80105bef 1491 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
80105bef
FF
1492 priv->rx_c_index = 0;
1493 priv->rx_read_ptr = 0;
40a8a317
FF
1494 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1495 GFP_KERNEL);
80105bef
FF
1496 if (!priv->rx_cbs) {
1497 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1498 return -ENOMEM;
1499 }
1500
baf387a8
FF
1501 for (i = 0; i < priv->num_rx_bds; i++) {
1502 cb = priv->rx_cbs + i;
1503 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1504 }
1505
80105bef
FF
1506 ret = bcm_sysport_alloc_rx_bufs(priv);
1507 if (ret) {
1508 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1509 return ret;
1510 }
1511
1512 /* Initialize HW, ensure RDMA is disabled */
1513 reg = rdma_readl(priv, RDMA_STATUS);
1514 if (!(reg & RDMA_DISABLED))
1515 rdma_enable_set(priv, 0);
1516
1517 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1518 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1519 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1520 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1521 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1522 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1523 /* Operate the queue in ring mode */
1524 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1525 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1526 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
44a4524c 1527 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
80105bef
FF
1528
1529 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1530
1531 netif_dbg(priv, hw, priv->netdev,
23acb2fc
FF
1532 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1533 priv->num_rx_bds, priv->rx_bds);
80105bef
FF
1534
1535 return 0;
1536}
1537
1538static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1539{
1540 struct bcm_sysport_cb *cb;
1541 unsigned int i;
1542 u32 reg;
1543
1544 /* Caller should ensure RDMA is disabled */
1545 reg = rdma_readl(priv, RDMA_STATUS);
1546 if (!(reg & RDMA_DISABLED))
1547 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1548
1549 for (i = 0; i < priv->num_rx_bds; i++) {
1550 cb = &priv->rx_cbs[i];
1551 if (dma_unmap_addr(cb, dma_addr))
1552 dma_unmap_single(&priv->pdev->dev,
23acb2fc
FF
1553 dma_unmap_addr(cb, dma_addr),
1554 RX_BUF_LENGTH, DMA_FROM_DEVICE);
80105bef
FF
1555 bcm_sysport_free_cb(cb);
1556 }
1557
1558 kfree(priv->rx_cbs);
1559 priv->rx_cbs = NULL;
1560
1561 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1562}
1563
1564static void bcm_sysport_set_rx_mode(struct net_device *dev)
1565{
1566 struct bcm_sysport_priv *priv = netdev_priv(dev);
1567 u32 reg;
1568
44a4524c
FF
1569 if (priv->is_lite)
1570 return;
1571
80105bef
FF
1572 reg = umac_readl(priv, UMAC_CMD);
1573 if (dev->flags & IFF_PROMISC)
1574 reg |= CMD_PROMISC;
1575 else
1576 reg &= ~CMD_PROMISC;
1577 umac_writel(priv, reg, UMAC_CMD);
1578
1579 /* No support for ALLMULTI */
1580 if (dev->flags & IFF_ALLMULTI)
1581 return;
1582}
1583
1584static inline void umac_enable_set(struct bcm_sysport_priv *priv,
23acb2fc 1585 u32 mask, unsigned int enable)
80105bef
FF
1586{
1587 u32 reg;
1588
44a4524c
FF
1589 if (!priv->is_lite) {
1590 reg = umac_readl(priv, UMAC_CMD);
1591 if (enable)
1592 reg |= mask;
1593 else
1594 reg &= ~mask;
1595 umac_writel(priv, reg, UMAC_CMD);
1596 } else {
1597 reg = gib_readl(priv, GIB_CONTROL);
1598 if (enable)
1599 reg |= mask;
1600 else
1601 reg &= ~mask;
1602 gib_writel(priv, reg, GIB_CONTROL);
1603 }
00b91c69
FF
1604
1605 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1606 * to be processed (1 msec).
1607 */
1608 if (enable == 0)
1609 usleep_range(1000, 2000);
80105bef
FF
1610}
1611
412bce83 1612static inline void umac_reset(struct bcm_sysport_priv *priv)
80105bef 1613{
80105bef 1614 u32 reg;
80105bef 1615
44a4524c
FF
1616 if (priv->is_lite)
1617 return;
1618
412bce83
FF
1619 reg = umac_readl(priv, UMAC_CMD);
1620 reg |= CMD_SW_RESET;
1621 umac_writel(priv, reg, UMAC_CMD);
1622 udelay(10);
1623 reg = umac_readl(priv, UMAC_CMD);
1624 reg &= ~CMD_SW_RESET;
1625 umac_writel(priv, reg, UMAC_CMD);
80105bef
FF
1626}
1627
1628static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
23acb2fc 1629 unsigned char *addr)
80105bef 1630{
44a4524c
FF
1631 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1632 addr[3];
1633 u32 mac1 = (addr[4] << 8) | addr[5];
1634
1635 if (!priv->is_lite) {
1636 umac_writel(priv, mac0, UMAC_MAC0);
1637 umac_writel(priv, mac1, UMAC_MAC1);
1638 } else {
1639 gib_writel(priv, mac0, GIB_MAC0);
1640 gib_writel(priv, mac1, GIB_MAC1);
1641 }
80105bef
FF
1642}
1643
1644static void topctrl_flush(struct bcm_sysport_priv *priv)
1645{
1646 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1647 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1648 mdelay(1);
1649 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1650 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1651}
1652
fb3b596d
FF
1653static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1654{
1655 struct bcm_sysport_priv *priv = netdev_priv(dev);
1656 struct sockaddr *addr = p;
1657
1658 if (!is_valid_ether_addr(addr->sa_data))
1659 return -EINVAL;
1660
1661 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1662
1663 /* interface is disabled, changes to MAC will be reflected on next
1664 * open call
1665 */
1666 if (!netif_running(dev))
1667 return 0;
1668
1669 umac_set_hw_addr(priv, dev->dev_addr);
1670
1671 return 0;
1672}
1673
30defeb2
FF
1674static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
1675{
1676 struct bcm_sysport_priv *priv = netdev_priv(dev);
1677 unsigned long tx_bytes = 0, tx_packets = 0;
1678 struct bcm_sysport_tx_ring *ring;
1679 unsigned int q;
1680
1681 for (q = 0; q < dev->num_tx_queues; q++) {
1682 ring = &priv->tx_rings[q];
1683 tx_bytes += ring->bytes;
1684 tx_packets += ring->packets;
1685 }
1686
1687 dev->stats.tx_bytes = tx_bytes;
1688 dev->stats.tx_packets = tx_packets;
1689 return &dev->stats;
1690}
1691
b02e6d9b
FF
1692static void bcm_sysport_netif_start(struct net_device *dev)
1693{
1694 struct bcm_sysport_priv *priv = netdev_priv(dev);
1695
1696 /* Enable NAPI */
1697 napi_enable(&priv->napi);
1698
8edf0047
FF
1699 /* Enable RX interrupt and TX ring full interrupt */
1700 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1701
715a0227 1702 phy_start(dev->phydev);
b02e6d9b 1703
44a4524c
FF
1704 /* Enable TX interrupts for the TXQs */
1705 if (!priv->is_lite)
1706 intrl2_1_mask_clear(priv, 0xffffffff);
1707 else
1708 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
b02e6d9b
FF
1709
1710 /* Last call before we start the real business */
1711 netif_tx_start_all_queues(dev);
1712}
1713
40755a0f
FF
1714static void rbuf_init(struct bcm_sysport_priv *priv)
1715{
1716 u32 reg;
1717
1718 reg = rbuf_readl(priv, RBUF_CONTROL);
1719 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
44a4524c
FF
1720 /* Set a correct RSB format on SYSTEMPORT Lite */
1721 if (priv->is_lite) {
1722 reg &= ~RBUF_RSB_SWAP1;
1723 reg |= RBUF_RSB_SWAP0;
1724 }
40755a0f
FF
1725 rbuf_writel(priv, reg, RBUF_CONTROL);
1726}
1727
44a4524c
FF
1728static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1729{
1730 intrl2_0_mask_set(priv, 0xffffffff);
1731 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1732 if (!priv->is_lite) {
1733 intrl2_1_mask_set(priv, 0xffffffff);
1734 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1735 }
1736}
1737
1738static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1739{
1740 u32 __maybe_unused reg;
1741
1742 /* Include Broadcom tag in pad extension */
1743 if (netdev_uses_dsa(priv->netdev)) {
1744 reg = gib_readl(priv, GIB_CONTROL);
1745 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1746 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1747 gib_writel(priv, reg, GIB_CONTROL);
1748 }
1749}
1750
80105bef
FF
1751static int bcm_sysport_open(struct net_device *dev)
1752{
1753 struct bcm_sysport_priv *priv = netdev_priv(dev);
715a0227 1754 struct phy_device *phydev;
80105bef 1755 unsigned int i;
80105bef
FF
1756 int ret;
1757
1758 /* Reset UniMAC */
412bce83 1759 umac_reset(priv);
80105bef
FF
1760
1761 /* Flush TX and RX FIFOs at TOPCTRL level */
1762 topctrl_flush(priv);
1763
1764 /* Disable the UniMAC RX/TX */
18e21b01 1765 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
80105bef
FF
1766
1767 /* Enable RBUF 2bytes alignment and Receive Status Block */
40755a0f 1768 rbuf_init(priv);
80105bef
FF
1769
1770 /* Set maximum frame length */
44a4524c
FF
1771 if (!priv->is_lite)
1772 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1773 else
1774 gib_set_pad_extension(priv);
80105bef
FF
1775
1776 /* Set MAC address */
1777 umac_set_hw_addr(priv, dev->dev_addr);
1778
1779 /* Read CRC forward */
44a4524c
FF
1780 if (!priv->is_lite)
1781 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1782 else
1783 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
1784 GIB_FCS_STRIP);
80105bef 1785
715a0227
PR
1786 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1787 0, priv->phy_interface);
1788 if (!phydev) {
80105bef
FF
1789 netdev_err(dev, "could not attach to PHY\n");
1790 return -ENODEV;
1791 }
1792
1793 /* Reset house keeping link status */
1794 priv->old_duplex = -1;
1795 priv->old_link = -1;
1796 priv->old_pause = -1;
1797
1798 /* mask all interrupts and request them */
44a4524c 1799 bcm_sysport_mask_all_intrs(priv);
80105bef
FF
1800
1801 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1802 if (ret) {
1803 netdev_err(dev, "failed to request RX interrupt\n");
1804 goto out_phy_disconnect;
1805 }
1806
44a4524c
FF
1807 if (!priv->is_lite) {
1808 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1809 dev->name, dev);
1810 if (ret) {
1811 netdev_err(dev, "failed to request TX interrupt\n");
1812 goto out_free_irq0;
1813 }
80105bef
FF
1814 }
1815
1816 /* Initialize both hardware and software ring */
1817 for (i = 0; i < dev->num_tx_queues; i++) {
1818 ret = bcm_sysport_init_tx_ring(priv, i);
1819 if (ret) {
1820 netdev_err(dev, "failed to initialize TX ring %d\n",
23acb2fc 1821 i);
80105bef
FF
1822 goto out_free_tx_ring;
1823 }
1824 }
1825
1826 /* Initialize linked-list */
1827 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1828
1829 /* Initialize RX ring */
1830 ret = bcm_sysport_init_rx_ring(priv);
1831 if (ret) {
1832 netdev_err(dev, "failed to initialize RX ring\n");
1833 goto out_free_rx_ring;
1834 }
1835
1836 /* Turn on RDMA */
1837 ret = rdma_enable_set(priv, 1);
1838 if (ret)
1839 goto out_free_rx_ring;
1840
80105bef
FF
1841 /* Turn on TDMA */
1842 ret = tdma_enable_set(priv, 1);
1843 if (ret)
1844 goto out_clear_rx_int;
1845
80105bef 1846 /* Turn on UniMAC TX/RX */
18e21b01 1847 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
80105bef 1848
b02e6d9b 1849 bcm_sysport_netif_start(dev);
80105bef
FF
1850
1851 return 0;
1852
1853out_clear_rx_int:
1854 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1855out_free_rx_ring:
1856 bcm_sysport_fini_rx_ring(priv);
1857out_free_tx_ring:
1858 for (i = 0; i < dev->num_tx_queues; i++)
1859 bcm_sysport_fini_tx_ring(priv, i);
44a4524c
FF
1860 if (!priv->is_lite)
1861 free_irq(priv->irq1, dev);
80105bef
FF
1862out_free_irq0:
1863 free_irq(priv->irq0, dev);
1864out_phy_disconnect:
715a0227 1865 phy_disconnect(phydev);
80105bef
FF
1866 return ret;
1867}
1868
b02e6d9b 1869static void bcm_sysport_netif_stop(struct net_device *dev)
80105bef
FF
1870{
1871 struct bcm_sysport_priv *priv = netdev_priv(dev);
80105bef
FF
1872
1873 /* stop all software from updating hardware */
1874 netif_tx_stop_all_queues(dev);
1875 napi_disable(&priv->napi);
715a0227 1876 phy_stop(dev->phydev);
80105bef
FF
1877
1878 /* mask all interrupts */
44a4524c 1879 bcm_sysport_mask_all_intrs(priv);
b02e6d9b
FF
1880}
1881
1882static int bcm_sysport_stop(struct net_device *dev)
1883{
1884 struct bcm_sysport_priv *priv = netdev_priv(dev);
1885 unsigned int i;
1886 int ret;
1887
1888 bcm_sysport_netif_stop(dev);
80105bef
FF
1889
1890 /* Disable UniMAC RX */
18e21b01 1891 umac_enable_set(priv, CMD_RX_EN, 0);
80105bef
FF
1892
1893 ret = tdma_enable_set(priv, 0);
1894 if (ret) {
1895 netdev_err(dev, "timeout disabling RDMA\n");
1896 return ret;
1897 }
1898
1899 /* Wait for a maximum packet size to be drained */
1900 usleep_range(2000, 3000);
1901
1902 ret = rdma_enable_set(priv, 0);
1903 if (ret) {
1904 netdev_err(dev, "timeout disabling TDMA\n");
1905 return ret;
1906 }
1907
1908 /* Disable UniMAC TX */
18e21b01 1909 umac_enable_set(priv, CMD_TX_EN, 0);
80105bef
FF
1910
1911 /* Free RX/TX rings SW structures */
1912 for (i = 0; i < dev->num_tx_queues; i++)
1913 bcm_sysport_fini_tx_ring(priv, i);
1914 bcm_sysport_fini_rx_ring(priv);
1915
1916 free_irq(priv->irq0, dev);
44a4524c
FF
1917 if (!priv->is_lite)
1918 free_irq(priv->irq1, dev);
80105bef
FF
1919
1920 /* Disconnect from PHY */
715a0227 1921 phy_disconnect(dev->phydev);
80105bef
FF
1922
1923 return 0;
1924}
1925
c1ab0e9c 1926static const struct ethtool_ops bcm_sysport_ethtool_ops = {
80105bef
FF
1927 .get_drvinfo = bcm_sysport_get_drvinfo,
1928 .get_msglevel = bcm_sysport_get_msglvl,
1929 .set_msglevel = bcm_sysport_set_msglvl,
1930 .get_link = ethtool_op_get_link,
1931 .get_strings = bcm_sysport_get_strings,
1932 .get_ethtool_stats = bcm_sysport_get_stats,
1933 .get_sset_count = bcm_sysport_get_sset_count,
83e82f4c
FF
1934 .get_wol = bcm_sysport_get_wol,
1935 .set_wol = bcm_sysport_set_wol,
b1a15e86
FF
1936 .get_coalesce = bcm_sysport_get_coalesce,
1937 .set_coalesce = bcm_sysport_set_coalesce,
697666ea
PR
1938 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1939 .set_link_ksettings = phy_ethtool_set_link_ksettings,
80105bef
FF
1940};
1941
1942static const struct net_device_ops bcm_sysport_netdev_ops = {
1943 .ndo_start_xmit = bcm_sysport_xmit,
1944 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1945 .ndo_open = bcm_sysport_open,
1946 .ndo_stop = bcm_sysport_stop,
1947 .ndo_set_features = bcm_sysport_set_features,
1948 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
fb3b596d 1949 .ndo_set_mac_address = bcm_sysport_change_mac,
6cec4f5e
FF
1950#ifdef CONFIG_NET_POLL_CONTROLLER
1951 .ndo_poll_controller = bcm_sysport_poll_controller,
1952#endif
30defeb2 1953 .ndo_get_stats = bcm_sysport_get_nstats,
80105bef
FF
1954};
1955
1956#define REV_FMT "v%2x.%02x"
1957
44a4524c
FF
1958static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
1959 [SYSTEMPORT] = {
1960 .is_lite = false,
1961 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
1962 },
1963 [SYSTEMPORT_LITE] = {
1964 .is_lite = true,
1965 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
1966 },
1967};
1968
1969static const struct of_device_id bcm_sysport_of_match[] = {
1970 { .compatible = "brcm,systemportlite-v1.00",
1971 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
1972 { .compatible = "brcm,systemport-v1.00",
1973 .data = &bcm_sysport_params[SYSTEMPORT] },
1974 { .compatible = "brcm,systemport",
1975 .data = &bcm_sysport_params[SYSTEMPORT] },
1976 { /* sentinel */ }
1977};
1978MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
1979
80105bef
FF
1980static int bcm_sysport_probe(struct platform_device *pdev)
1981{
44a4524c
FF
1982 const struct bcm_sysport_hw_params *params;
1983 const struct of_device_id *of_id = NULL;
80105bef
FF
1984 struct bcm_sysport_priv *priv;
1985 struct device_node *dn;
1986 struct net_device *dev;
1987 const void *macaddr;
1988 struct resource *r;
1989 u32 txq, rxq;
1990 int ret;
1991
1992 dn = pdev->dev.of_node;
1993 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
44a4524c
FF
1994 of_id = of_match_node(bcm_sysport_of_match, dn);
1995 if (!of_id || !of_id->data)
1996 return -EINVAL;
1997
1998 /* Fairly quickly we need to know the type of adapter we have */
1999 params = of_id->data;
80105bef
FF
2000
2001 /* Read the Transmit/Receive Queue properties */
2002 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2003 txq = TDMA_NUM_RINGS;
2004 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2005 rxq = 1;
2006
7b78be48
FF
2007 /* Sanity check the number of transmit queues */
2008 if (!txq || txq > TDMA_NUM_RINGS)
2009 return -EINVAL;
2010
80105bef
FF
2011 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2012 if (!dev)
2013 return -ENOMEM;
2014
2015 /* Initialize private members */
2016 priv = netdev_priv(dev);
2017
7b78be48
FF
2018 /* Allocate number of TX rings */
2019 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2020 sizeof(struct bcm_sysport_tx_ring),
2021 GFP_KERNEL);
2022 if (!priv->tx_rings)
2023 return -ENOMEM;
2024
44a4524c
FF
2025 priv->is_lite = params->is_lite;
2026 priv->num_rx_desc_words = params->num_rx_desc_words;
2027
80105bef 2028 priv->irq0 = platform_get_irq(pdev, 0);
d31353cd 2029 if (!priv->is_lite) {
44a4524c 2030 priv->irq1 = platform_get_irq(pdev, 1);
d31353cd
FF
2031 priv->wol_irq = platform_get_irq(pdev, 2);
2032 } else {
2033 priv->wol_irq = platform_get_irq(pdev, 1);
2034 }
44a4524c 2035 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
80105bef
FF
2036 dev_err(&pdev->dev, "invalid interrupts\n");
2037 ret = -EINVAL;
39f8b0d4 2038 goto err_free_netdev;
80105bef
FF
2039 }
2040
126e6122
JH
2041 priv->base = devm_ioremap_resource(&pdev->dev, r);
2042 if (IS_ERR(priv->base)) {
2043 ret = PTR_ERR(priv->base);
39f8b0d4 2044 goto err_free_netdev;
80105bef
FF
2045 }
2046
2047 priv->netdev = dev;
2048 priv->pdev = pdev;
2049
2050 priv->phy_interface = of_get_phy_mode(dn);
2051 /* Default to GMII interface mode */
2052 if (priv->phy_interface < 0)
2053 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2054
186534a3
FF
2055 /* In the case of a fixed PHY, the DT node associated
2056 * to the PHY is the Ethernet MAC DT node.
2057 */
2058 if (of_phy_is_fixed_link(dn)) {
2059 ret = of_phy_register_fixed_link(dn);
2060 if (ret) {
2061 dev_err(&pdev->dev, "failed to register fixed PHY\n");
39f8b0d4 2062 goto err_free_netdev;
186534a3
FF
2063 }
2064
2065 priv->phy_dn = dn;
2066 }
2067
80105bef
FF
2068 /* Initialize netdevice members */
2069 macaddr = of_get_mac_address(dn);
2070 if (!macaddr || !is_valid_ether_addr(macaddr)) {
2071 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
adb35050 2072 eth_hw_addr_random(dev);
80105bef
FF
2073 } else {
2074 ether_addr_copy(dev->dev_addr, macaddr);
2075 }
2076
2077 SET_NETDEV_DEV(dev, &pdev->dev);
2078 dev_set_drvdata(&pdev->dev, dev);
7ad24ea4 2079 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
80105bef
FF
2080 dev->netdev_ops = &bcm_sysport_netdev_ops;
2081 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2082
2083 /* HW supported features, none enabled by default */
2084 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2085 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2086
83e82f4c
FF
2087 /* Request the WOL interrupt and advertise suspend if available */
2088 priv->wol_irq_disabled = 1;
2089 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
23acb2fc 2090 bcm_sysport_wol_isr, 0, dev->name, priv);
83e82f4c
FF
2091 if (!ret)
2092 device_set_wakeup_capable(&pdev->dev, 1);
2093
80105bef 2094 /* Set the needed headroom once and for all */
3afc557d
PG
2095 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2096 dev->needed_headroom += sizeof(struct bcm_tsb);
80105bef 2097
f532e744
FF
2098 /* libphy will adjust the link state accordingly */
2099 netif_carrier_off(dev);
2100
80105bef
FF
2101 ret = register_netdev(dev);
2102 if (ret) {
2103 dev_err(&pdev->dev, "failed to register net_device\n");
39f8b0d4 2104 goto err_deregister_fixed_link;
80105bef
FF
2105 }
2106
2107 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2108 dev_info(&pdev->dev,
44a4524c 2109 "Broadcom SYSTEMPORT%s" REV_FMT
23acb2fc 2110 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
44a4524c 2111 priv->is_lite ? " Lite" : "",
23acb2fc
FF
2112 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2113 priv->base, priv->irq0, priv->irq1, txq, rxq);
80105bef
FF
2114
2115 return 0;
39f8b0d4
JH
2116
2117err_deregister_fixed_link:
2118 if (of_phy_is_fixed_link(dn))
2119 of_phy_deregister_fixed_link(dn);
2120err_free_netdev:
80105bef
FF
2121 free_netdev(dev);
2122 return ret;
2123}
2124
2125static int bcm_sysport_remove(struct platform_device *pdev)
2126{
2127 struct net_device *dev = dev_get_drvdata(&pdev->dev);
39f8b0d4 2128 struct device_node *dn = pdev->dev.of_node;
80105bef
FF
2129
2130 /* Not much to do, ndo_close has been called
2131 * and we use managed allocations
2132 */
2133 unregister_netdev(dev);
39f8b0d4
JH
2134 if (of_phy_is_fixed_link(dn))
2135 of_phy_deregister_fixed_link(dn);
80105bef
FF
2136 free_netdev(dev);
2137 dev_set_drvdata(&pdev->dev, NULL);
2138
2139 return 0;
2140}
2141
40755a0f 2142#ifdef CONFIG_PM_SLEEP
83e82f4c
FF
2143static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2144{
2145 struct net_device *ndev = priv->netdev;
2146 unsigned int timeout = 1000;
2147 u32 reg;
2148
2149 /* Password has already been programmed */
2150 reg = umac_readl(priv, UMAC_MPD_CTRL);
2151 reg |= MPD_EN;
2152 reg &= ~PSW_EN;
2153 if (priv->wolopts & WAKE_MAGICSECURE)
2154 reg |= PSW_EN;
2155 umac_writel(priv, reg, UMAC_MPD_CTRL);
2156
2157 /* Make sure RBUF entered WoL mode as result */
2158 do {
2159 reg = rbuf_readl(priv, RBUF_STATUS);
2160 if (reg & RBUF_WOL_MODE)
2161 break;
2162
2163 udelay(10);
2164 } while (timeout-- > 0);
2165
2166 /* Do not leave the UniMAC RBUF matching only MPD packets */
2167 if (!timeout) {
2168 reg = umac_readl(priv, UMAC_MPD_CTRL);
2169 reg &= ~MPD_EN;
2170 umac_writel(priv, reg, UMAC_MPD_CTRL);
2171 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2172 return -ETIMEDOUT;
2173 }
2174
2175 /* UniMAC receive needs to be turned on */
2176 umac_enable_set(priv, CMD_RX_EN, 1);
2177
2178 /* Enable the interrupt wake-up source */
2179 intrl2_0_mask_clear(priv, INTRL2_0_MPD);
2180
2181 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2182
2183 return 0;
2184}
2185
40755a0f
FF
2186static int bcm_sysport_suspend(struct device *d)
2187{
2188 struct net_device *dev = dev_get_drvdata(d);
2189 struct bcm_sysport_priv *priv = netdev_priv(dev);
2190 unsigned int i;
83e82f4c 2191 int ret = 0;
40755a0f
FF
2192 u32 reg;
2193
2194 if (!netif_running(dev))
2195 return 0;
2196
2197 bcm_sysport_netif_stop(dev);
2198
715a0227 2199 phy_suspend(dev->phydev);
40755a0f
FF
2200
2201 netif_device_detach(dev);
2202
2203 /* Disable UniMAC RX */
2204 umac_enable_set(priv, CMD_RX_EN, 0);
2205
2206 ret = rdma_enable_set(priv, 0);
2207 if (ret) {
2208 netdev_err(dev, "RDMA timeout!\n");
2209 return ret;
2210 }
2211
2212 /* Disable RXCHK if enabled */
9d34c1cb 2213 if (priv->rx_chk_en) {
40755a0f
FF
2214 reg = rxchk_readl(priv, RXCHK_CONTROL);
2215 reg &= ~RXCHK_EN;
2216 rxchk_writel(priv, reg, RXCHK_CONTROL);
2217 }
2218
2219 /* Flush RX pipe */
83e82f4c
FF
2220 if (!priv->wolopts)
2221 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
40755a0f
FF
2222
2223 ret = tdma_enable_set(priv, 0);
2224 if (ret) {
2225 netdev_err(dev, "TDMA timeout!\n");
2226 return ret;
2227 }
2228
2229 /* Wait for a packet boundary */
2230 usleep_range(2000, 3000);
2231
2232 umac_enable_set(priv, CMD_TX_EN, 0);
2233
2234 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2235
2236 /* Free RX/TX rings SW structures */
2237 for (i = 0; i < dev->num_tx_queues; i++)
2238 bcm_sysport_fini_tx_ring(priv, i);
2239 bcm_sysport_fini_rx_ring(priv);
2240
83e82f4c
FF
2241 /* Get prepared for Wake-on-LAN */
2242 if (device_may_wakeup(d) && priv->wolopts)
2243 ret = bcm_sysport_suspend_to_wol(priv);
2244
2245 return ret;
40755a0f
FF
2246}
2247
2248static int bcm_sysport_resume(struct device *d)
2249{
2250 struct net_device *dev = dev_get_drvdata(d);
2251 struct bcm_sysport_priv *priv = netdev_priv(dev);
2252 unsigned int i;
2253 u32 reg;
2254 int ret;
2255
2256 if (!netif_running(dev))
2257 return 0;
2258
704d33e7
FF
2259 umac_reset(priv);
2260
83e82f4c
FF
2261 /* We may have been suspended and never received a WOL event that
2262 * would turn off MPD detection, take care of that now
2263 */
2264 bcm_sysport_resume_from_wol(priv);
2265
40755a0f
FF
2266 /* Initialize both hardware and software ring */
2267 for (i = 0; i < dev->num_tx_queues; i++) {
2268 ret = bcm_sysport_init_tx_ring(priv, i);
2269 if (ret) {
2270 netdev_err(dev, "failed to initialize TX ring %d\n",
23acb2fc 2271 i);
40755a0f
FF
2272 goto out_free_tx_rings;
2273 }
2274 }
2275
2276 /* Initialize linked-list */
2277 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2278
2279 /* Initialize RX ring */
2280 ret = bcm_sysport_init_rx_ring(priv);
2281 if (ret) {
2282 netdev_err(dev, "failed to initialize RX ring\n");
2283 goto out_free_rx_ring;
2284 }
2285
2286 netif_device_attach(dev);
2287
40755a0f
FF
2288 /* RX pipe enable */
2289 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2290
2291 ret = rdma_enable_set(priv, 1);
2292 if (ret) {
2293 netdev_err(dev, "failed to enable RDMA\n");
2294 goto out_free_rx_ring;
2295 }
2296
2297 /* Enable rxhck */
9d34c1cb 2298 if (priv->rx_chk_en) {
40755a0f
FF
2299 reg = rxchk_readl(priv, RXCHK_CONTROL);
2300 reg |= RXCHK_EN;
2301 rxchk_writel(priv, reg, RXCHK_CONTROL);
2302 }
2303
2304 rbuf_init(priv);
2305
2306 /* Set maximum frame length */
44a4524c
FF
2307 if (!priv->is_lite)
2308 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2309 else
2310 gib_set_pad_extension(priv);
40755a0f
FF
2311
2312 /* Set MAC address */
2313 umac_set_hw_addr(priv, dev->dev_addr);
2314
2315 umac_enable_set(priv, CMD_RX_EN, 1);
2316
2317 /* TX pipe enable */
2318 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2319
2320 umac_enable_set(priv, CMD_TX_EN, 1);
2321
2322 ret = tdma_enable_set(priv, 1);
2323 if (ret) {
2324 netdev_err(dev, "TDMA timeout!\n");
2325 goto out_free_rx_ring;
2326 }
2327
715a0227 2328 phy_resume(dev->phydev);
40755a0f
FF
2329
2330 bcm_sysport_netif_start(dev);
2331
2332 return 0;
2333
2334out_free_rx_ring:
2335 bcm_sysport_fini_rx_ring(priv);
2336out_free_tx_rings:
2337 for (i = 0; i < dev->num_tx_queues; i++)
2338 bcm_sysport_fini_tx_ring(priv, i);
2339 return ret;
2340}
2341#endif
2342
2343static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2344 bcm_sysport_suspend, bcm_sysport_resume);
2345
80105bef
FF
2346static struct platform_driver bcm_sysport_driver = {
2347 .probe = bcm_sysport_probe,
2348 .remove = bcm_sysport_remove,
2349 .driver = {
2350 .name = "brcm-systemport",
80105bef 2351 .of_match_table = bcm_sysport_of_match,
40755a0f 2352 .pm = &bcm_sysport_pm_ops,
80105bef
FF
2353 },
2354};
2355module_platform_driver(bcm_sysport_driver);
2356
2357MODULE_AUTHOR("Broadcom Corporation");
2358MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2359MODULE_ALIAS("platform:brcm-systemport");
2360MODULE_LICENSE("GPL");