2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #define pr_fmt(fmt) "bcmgenet: " fmt
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/types.h>
26 #include <linux/fcntl.h>
27 #include <linux/interrupt.h>
28 #include <linux/string.h>
29 #include <linux/if_ether.h>
30 #include <linux/init.h>
31 #include <linux/errno.h>
32 #include <linux/delay.h>
33 #include <linux/platform_device.h>
34 #include <linux/dma-mapping.h>
36 #include <linux/clk.h>
37 #include <linux/version.h>
39 #include <linux/of_address.h>
40 #include <linux/of_irq.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
45 #include <linux/mii.h>
46 #include <linux/ethtool.h>
47 #include <linux/netdevice.h>
48 #include <linux/inetdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
53 #include <linux/ipv6.h>
54 #include <linux/phy.h>
56 #include <asm/unaligned.h>
60 /* Maximum number of hardware queues, downsized if needed */
61 #define GENET_MAX_MQ_CNT 4
63 /* Default highest priority queue for multi queue support */
64 #define GENET_Q0_PRIORITY 0
66 #define GENET_DEFAULT_BD_CNT \
67 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
69 #define RX_BUF_LENGTH 2048
70 #define SKB_ALIGNMENT 32
72 /* Tx/Rx DMA register offset, skip 256 descriptors */
73 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
74 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
76 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
77 TOTAL_DESC * DMA_DESC_SIZE)
79 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
80 TOTAL_DESC * DMA_DESC_SIZE)
82 static inline void dmadesc_set_length_status(struct bcmgenet_priv
*priv
,
83 void __iomem
*d
, u32 value
)
85 __raw_writel(value
, d
+ DMA_DESC_LENGTH_STATUS
);
88 static inline u32
dmadesc_get_length_status(struct bcmgenet_priv
*priv
,
91 return __raw_readl(d
+ DMA_DESC_LENGTH_STATUS
);
94 static inline void dmadesc_set_addr(struct bcmgenet_priv
*priv
,
98 __raw_writel(lower_32_bits(addr
), d
+ DMA_DESC_ADDRESS_LO
);
100 /* Register writes to GISB bus can take couple hundred nanoseconds
101 * and are done for each packet, save these expensive writes unless
102 * the platform is explicitely configured for 64-bits/LPAE.
104 #ifdef CONFIG_PHYS_ADDR_T_64BIT
105 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
106 __raw_writel(upper_32_bits(addr
), d
+ DMA_DESC_ADDRESS_HI
);
110 /* Combined address + length/status setter */
111 static inline void dmadesc_set(struct bcmgenet_priv
*priv
,
112 void __iomem
*d
, dma_addr_t addr
, u32 val
)
114 dmadesc_set_length_status(priv
, d
, val
);
115 dmadesc_set_addr(priv
, d
, addr
);
118 static inline dma_addr_t
dmadesc_get_addr(struct bcmgenet_priv
*priv
,
123 addr
= __raw_readl(d
+ DMA_DESC_ADDRESS_LO
);
125 /* Register writes to GISB bus can take couple hundred nanoseconds
126 * and are done for each packet, save these expensive writes unless
127 * the platform is explicitely configured for 64-bits/LPAE.
129 #ifdef CONFIG_PHYS_ADDR_T_64BIT
130 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
131 addr
|= (u64
)__raw_readl(d
+ DMA_DESC_ADDRESS_HI
) << 32;
136 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
138 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
141 static inline u32
bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv
*priv
)
143 if (GENET_IS_V1(priv
))
144 return bcmgenet_rbuf_readl(priv
, RBUF_FLUSH_CTRL_V1
);
146 return bcmgenet_sys_readl(priv
, SYS_RBUF_FLUSH_CTRL
);
149 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
151 if (GENET_IS_V1(priv
))
152 bcmgenet_rbuf_writel(priv
, val
, RBUF_FLUSH_CTRL_V1
);
154 bcmgenet_sys_writel(priv
, val
, SYS_RBUF_FLUSH_CTRL
);
157 /* These macros are defined to deal with register map change
158 * between GENET1.1 and GENET2. Only those currently being used
159 * by driver are defined.
161 static inline u32
bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv
*priv
)
163 if (GENET_IS_V1(priv
))
164 return bcmgenet_rbuf_readl(priv
, TBUF_CTRL_V1
);
166 return __raw_readl(priv
->base
+
167 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
170 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
172 if (GENET_IS_V1(priv
))
173 bcmgenet_rbuf_writel(priv
, val
, TBUF_CTRL_V1
);
175 __raw_writel(val
, priv
->base
+
176 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
179 static inline u32
bcmgenet_bp_mc_get(struct bcmgenet_priv
*priv
)
181 if (GENET_IS_V1(priv
))
182 return bcmgenet_rbuf_readl(priv
, TBUF_BP_MC_V1
);
184 return __raw_readl(priv
->base
+
185 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
188 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv
*priv
, u32 val
)
190 if (GENET_IS_V1(priv
))
191 bcmgenet_rbuf_writel(priv
, val
, TBUF_BP_MC_V1
);
193 __raw_writel(val
, priv
->base
+
194 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
197 /* RX/TX DMA register accessors */
208 static const u8 bcmgenet_dma_regs_v3plus
[] = {
209 [DMA_RING_CFG
] = 0x00,
212 [DMA_SCB_BURST_SIZE
] = 0x0C,
213 [DMA_ARB_CTRL
] = 0x2C,
214 [DMA_PRIORITY
] = 0x30,
215 [DMA_RING_PRIORITY
] = 0x38,
218 static const u8 bcmgenet_dma_regs_v2
[] = {
219 [DMA_RING_CFG
] = 0x00,
222 [DMA_SCB_BURST_SIZE
] = 0x0C,
223 [DMA_ARB_CTRL
] = 0x30,
224 [DMA_PRIORITY
] = 0x34,
225 [DMA_RING_PRIORITY
] = 0x3C,
228 static const u8 bcmgenet_dma_regs_v1
[] = {
231 [DMA_SCB_BURST_SIZE
] = 0x0C,
232 [DMA_ARB_CTRL
] = 0x30,
233 [DMA_PRIORITY
] = 0x34,
234 [DMA_RING_PRIORITY
] = 0x3C,
237 /* Set at runtime once bcmgenet version is known */
238 static const u8
*bcmgenet_dma_regs
;
240 static inline struct bcmgenet_priv
*dev_to_priv(struct device
*dev
)
242 return netdev_priv(dev_get_drvdata(dev
));
245 static inline u32
bcmgenet_tdma_readl(struct bcmgenet_priv
*priv
,
248 return __raw_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
249 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
252 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv
*priv
,
253 u32 val
, enum dma_reg r
)
255 __raw_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
256 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
259 static inline u32
bcmgenet_rdma_readl(struct bcmgenet_priv
*priv
,
262 return __raw_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
263 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
266 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv
*priv
,
267 u32 val
, enum dma_reg r
)
269 __raw_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
270 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
273 /* RDMA/TDMA ring registers and accessors
274 * we merge the common fields and just prefix with T/D the registers
275 * having different meaning depending on the direction
279 RDMA_WRITE_PTR
= TDMA_READ_PTR
,
281 RDMA_WRITE_PTR_HI
= TDMA_READ_PTR_HI
,
283 RDMA_PROD_INDEX
= TDMA_CONS_INDEX
,
285 RDMA_CONS_INDEX
= TDMA_PROD_INDEX
,
291 DMA_MBUF_DONE_THRESH
,
293 RDMA_XON_XOFF_THRESH
= TDMA_FLOW_PERIOD
,
295 RDMA_READ_PTR
= TDMA_WRITE_PTR
,
297 RDMA_READ_PTR_HI
= TDMA_WRITE_PTR_HI
300 /* GENET v4 supports 40-bits pointer addressing
301 * for obvious reasons the LO and HI word parts
302 * are contiguous, but this offsets the other
305 static const u8 genet_dma_ring_regs_v4
[] = {
306 [TDMA_READ_PTR
] = 0x00,
307 [TDMA_READ_PTR_HI
] = 0x04,
308 [TDMA_CONS_INDEX
] = 0x08,
309 [TDMA_PROD_INDEX
] = 0x0C,
310 [DMA_RING_BUF_SIZE
] = 0x10,
311 [DMA_START_ADDR
] = 0x14,
312 [DMA_START_ADDR_HI
] = 0x18,
313 [DMA_END_ADDR
] = 0x1C,
314 [DMA_END_ADDR_HI
] = 0x20,
315 [DMA_MBUF_DONE_THRESH
] = 0x24,
316 [TDMA_FLOW_PERIOD
] = 0x28,
317 [TDMA_WRITE_PTR
] = 0x2C,
318 [TDMA_WRITE_PTR_HI
] = 0x30,
321 static const u8 genet_dma_ring_regs_v123
[] = {
322 [TDMA_READ_PTR
] = 0x00,
323 [TDMA_CONS_INDEX
] = 0x04,
324 [TDMA_PROD_INDEX
] = 0x08,
325 [DMA_RING_BUF_SIZE
] = 0x0C,
326 [DMA_START_ADDR
] = 0x10,
327 [DMA_END_ADDR
] = 0x14,
328 [DMA_MBUF_DONE_THRESH
] = 0x18,
329 [TDMA_FLOW_PERIOD
] = 0x1C,
330 [TDMA_WRITE_PTR
] = 0x20,
333 /* Set at runtime once GENET version is known */
334 static const u8
*genet_dma_ring_regs
;
336 static inline u32
bcmgenet_tdma_ring_readl(struct bcmgenet_priv
*priv
,
340 return __raw_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
341 (DMA_RING_SIZE
* ring
) +
342 genet_dma_ring_regs
[r
]);
345 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv
*priv
,
350 __raw_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
351 (DMA_RING_SIZE
* ring
) +
352 genet_dma_ring_regs
[r
]);
355 static inline u32
bcmgenet_rdma_ring_readl(struct bcmgenet_priv
*priv
,
359 return __raw_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
360 (DMA_RING_SIZE
* ring
) +
361 genet_dma_ring_regs
[r
]);
364 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv
*priv
,
369 __raw_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
370 (DMA_RING_SIZE
* ring
) +
371 genet_dma_ring_regs
[r
]);
374 static int bcmgenet_get_settings(struct net_device
*dev
,
375 struct ethtool_cmd
*cmd
)
377 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
379 if (!netif_running(dev
))
385 return phy_ethtool_gset(priv
->phydev
, cmd
);
388 static int bcmgenet_set_settings(struct net_device
*dev
,
389 struct ethtool_cmd
*cmd
)
391 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
393 if (!netif_running(dev
))
399 return phy_ethtool_sset(priv
->phydev
, cmd
);
402 static int bcmgenet_set_rx_csum(struct net_device
*dev
,
403 netdev_features_t wanted
)
405 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
409 rx_csum_en
= !!(wanted
& NETIF_F_RXCSUM
);
411 rbuf_chk_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CHK_CTRL
);
413 /* enable rx checksumming */
415 rbuf_chk_ctrl
|= RBUF_RXCHK_EN
;
417 rbuf_chk_ctrl
&= ~RBUF_RXCHK_EN
;
418 priv
->desc_rxchk_en
= rx_csum_en
;
420 /* If UniMAC forwards CRC, we need to skip over it to get
421 * a valid CHK bit to be set in the per-packet status word
423 if (rx_csum_en
&& priv
->crc_fwd_en
)
424 rbuf_chk_ctrl
|= RBUF_SKIP_FCS
;
426 rbuf_chk_ctrl
&= ~RBUF_SKIP_FCS
;
428 bcmgenet_rbuf_writel(priv
, rbuf_chk_ctrl
, RBUF_CHK_CTRL
);
433 static int bcmgenet_set_tx_csum(struct net_device
*dev
,
434 netdev_features_t wanted
)
436 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
438 u32 tbuf_ctrl
, rbuf_ctrl
;
440 tbuf_ctrl
= bcmgenet_tbuf_ctrl_get(priv
);
441 rbuf_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
443 desc_64b_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
445 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
447 tbuf_ctrl
|= RBUF_64B_EN
;
448 rbuf_ctrl
|= RBUF_64B_EN
;
450 tbuf_ctrl
&= ~RBUF_64B_EN
;
451 rbuf_ctrl
&= ~RBUF_64B_EN
;
453 priv
->desc_64b_en
= desc_64b_en
;
455 bcmgenet_tbuf_ctrl_set(priv
, tbuf_ctrl
);
456 bcmgenet_rbuf_writel(priv
, rbuf_ctrl
, RBUF_CTRL
);
461 static int bcmgenet_set_features(struct net_device
*dev
,
462 netdev_features_t features
)
464 netdev_features_t changed
= features
^ dev
->features
;
465 netdev_features_t wanted
= dev
->wanted_features
;
468 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
469 ret
= bcmgenet_set_tx_csum(dev
, wanted
);
470 if (changed
& (NETIF_F_RXCSUM
))
471 ret
= bcmgenet_set_rx_csum(dev
, wanted
);
476 static u32
bcmgenet_get_msglevel(struct net_device
*dev
)
478 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
480 return priv
->msg_enable
;
483 static void bcmgenet_set_msglevel(struct net_device
*dev
, u32 level
)
485 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
487 priv
->msg_enable
= level
;
490 /* standard ethtool support functions. */
491 enum bcmgenet_stat_type
{
492 BCMGENET_STAT_NETDEV
= -1,
493 BCMGENET_STAT_MIB_RX
,
494 BCMGENET_STAT_MIB_TX
,
499 struct bcmgenet_stats
{
500 char stat_string
[ETH_GSTRING_LEN
];
503 enum bcmgenet_stat_type type
;
504 /* reg offset from UMAC base for misc counters */
508 #define STAT_NETDEV(m) { \
509 .stat_string = __stringify(m), \
510 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
511 .stat_offset = offsetof(struct net_device_stats, m), \
512 .type = BCMGENET_STAT_NETDEV, \
515 #define STAT_GENET_MIB(str, m, _type) { \
516 .stat_string = str, \
517 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
518 .stat_offset = offsetof(struct bcmgenet_priv, m), \
522 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
523 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
524 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
526 #define STAT_GENET_MISC(str, m, offset) { \
527 .stat_string = str, \
528 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
529 .stat_offset = offsetof(struct bcmgenet_priv, m), \
530 .type = BCMGENET_STAT_MISC, \
531 .reg_offset = offset, \
535 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
536 * between the end of TX stats and the beginning of the RX RUNT
538 #define BCMGENET_STAT_OFFSET 0xc
540 /* Hardware counters must be kept in sync because the order/offset
541 * is important here (order in structure declaration = order in hardware)
543 static const struct bcmgenet_stats bcmgenet_gstrings_stats
[] = {
545 STAT_NETDEV(rx_packets
),
546 STAT_NETDEV(tx_packets
),
547 STAT_NETDEV(rx_bytes
),
548 STAT_NETDEV(tx_bytes
),
549 STAT_NETDEV(rx_errors
),
550 STAT_NETDEV(tx_errors
),
551 STAT_NETDEV(rx_dropped
),
552 STAT_NETDEV(tx_dropped
),
553 STAT_NETDEV(multicast
),
554 /* UniMAC RSV counters */
555 STAT_GENET_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
556 STAT_GENET_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
557 STAT_GENET_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
558 STAT_GENET_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
559 STAT_GENET_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
560 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
561 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
562 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
563 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
564 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
565 STAT_GENET_MIB_RX("rx_pkts", mib
.rx
.pkt
),
566 STAT_GENET_MIB_RX("rx_bytes", mib
.rx
.bytes
),
567 STAT_GENET_MIB_RX("rx_multicast", mib
.rx
.mca
),
568 STAT_GENET_MIB_RX("rx_broadcast", mib
.rx
.bca
),
569 STAT_GENET_MIB_RX("rx_fcs", mib
.rx
.fcs
),
570 STAT_GENET_MIB_RX("rx_control", mib
.rx
.cf
),
571 STAT_GENET_MIB_RX("rx_pause", mib
.rx
.pf
),
572 STAT_GENET_MIB_RX("rx_unknown", mib
.rx
.uo
),
573 STAT_GENET_MIB_RX("rx_align", mib
.rx
.aln
),
574 STAT_GENET_MIB_RX("rx_outrange", mib
.rx
.flr
),
575 STAT_GENET_MIB_RX("rx_code", mib
.rx
.cde
),
576 STAT_GENET_MIB_RX("rx_carrier", mib
.rx
.fcr
),
577 STAT_GENET_MIB_RX("rx_oversize", mib
.rx
.ovr
),
578 STAT_GENET_MIB_RX("rx_jabber", mib
.rx
.jbr
),
579 STAT_GENET_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
580 STAT_GENET_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
581 STAT_GENET_MIB_RX("rx_unicast", mib
.rx
.uc
),
582 STAT_GENET_MIB_RX("rx_ppp", mib
.rx
.ppp
),
583 STAT_GENET_MIB_RX("rx_crc", mib
.rx
.rcrc
),
584 /* UniMAC TSV counters */
585 STAT_GENET_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
586 STAT_GENET_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
587 STAT_GENET_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
588 STAT_GENET_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
589 STAT_GENET_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
590 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
591 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
592 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
593 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
594 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
595 STAT_GENET_MIB_TX("tx_pkts", mib
.tx
.pkts
),
596 STAT_GENET_MIB_TX("tx_multicast", mib
.tx
.mca
),
597 STAT_GENET_MIB_TX("tx_broadcast", mib
.tx
.bca
),
598 STAT_GENET_MIB_TX("tx_pause", mib
.tx
.pf
),
599 STAT_GENET_MIB_TX("tx_control", mib
.tx
.cf
),
600 STAT_GENET_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
601 STAT_GENET_MIB_TX("tx_oversize", mib
.tx
.ovr
),
602 STAT_GENET_MIB_TX("tx_defer", mib
.tx
.drf
),
603 STAT_GENET_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
604 STAT_GENET_MIB_TX("tx_single_col", mib
.tx
.scl
),
605 STAT_GENET_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
606 STAT_GENET_MIB_TX("tx_late_col", mib
.tx
.lcl
),
607 STAT_GENET_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
608 STAT_GENET_MIB_TX("tx_frags", mib
.tx
.frg
),
609 STAT_GENET_MIB_TX("tx_total_col", mib
.tx
.ncl
),
610 STAT_GENET_MIB_TX("tx_jabber", mib
.tx
.jbr
),
611 STAT_GENET_MIB_TX("tx_bytes", mib
.tx
.bytes
),
612 STAT_GENET_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
613 STAT_GENET_MIB_TX("tx_unicast", mib
.tx
.uc
),
614 /* UniMAC RUNT counters */
615 STAT_GENET_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
616 STAT_GENET_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
617 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
618 STAT_GENET_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
619 /* Misc UniMAC counters */
620 STAT_GENET_MISC("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
,
622 STAT_GENET_MISC("rbuf_err_cnt", mib
.rbuf_err_cnt
, UMAC_RBUF_ERR_CNT
),
623 STAT_GENET_MISC("mdf_err_cnt", mib
.mdf_err_cnt
, UMAC_MDF_ERR_CNT
),
626 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
628 static void bcmgenet_get_drvinfo(struct net_device
*dev
,
629 struct ethtool_drvinfo
*info
)
631 strlcpy(info
->driver
, "bcmgenet", sizeof(info
->driver
));
632 strlcpy(info
->version
, "v2.0", sizeof(info
->version
));
633 info
->n_stats
= BCMGENET_STATS_LEN
;
637 static int bcmgenet_get_sset_count(struct net_device
*dev
, int string_set
)
639 switch (string_set
) {
641 return BCMGENET_STATS_LEN
;
647 static void bcmgenet_get_strings(struct net_device
*dev
,
648 u32 stringset
, u8
*data
)
654 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
655 memcpy(data
+ i
* ETH_GSTRING_LEN
,
656 bcmgenet_gstrings_stats
[i
].stat_string
,
663 static void bcmgenet_update_mib_counters(struct bcmgenet_priv
*priv
)
667 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
668 const struct bcmgenet_stats
*s
;
673 s
= &bcmgenet_gstrings_stats
[i
];
675 case BCMGENET_STAT_NETDEV
:
677 case BCMGENET_STAT_MIB_RX
:
678 case BCMGENET_STAT_MIB_TX
:
679 case BCMGENET_STAT_RUNT
:
680 if (s
->type
!= BCMGENET_STAT_MIB_RX
)
681 offset
= BCMGENET_STAT_OFFSET
;
682 val
= bcmgenet_umac_readl(priv
, UMAC_MIB_START
+
685 case BCMGENET_STAT_MISC
:
686 val
= bcmgenet_umac_readl(priv
, s
->reg_offset
);
687 /* clear if overflowed */
689 bcmgenet_umac_writel(priv
, 0, s
->reg_offset
);
694 p
= (char *)priv
+ s
->stat_offset
;
699 static void bcmgenet_get_ethtool_stats(struct net_device
*dev
,
700 struct ethtool_stats
*stats
,
703 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
706 if (netif_running(dev
))
707 bcmgenet_update_mib_counters(priv
);
709 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
710 const struct bcmgenet_stats
*s
;
713 s
= &bcmgenet_gstrings_stats
[i
];
714 if (s
->type
== BCMGENET_STAT_NETDEV
)
715 p
= (char *)&dev
->stats
;
723 /* standard ethtool support functions. */
724 static struct ethtool_ops bcmgenet_ethtool_ops
= {
725 .get_strings
= bcmgenet_get_strings
,
726 .get_sset_count
= bcmgenet_get_sset_count
,
727 .get_ethtool_stats
= bcmgenet_get_ethtool_stats
,
728 .get_settings
= bcmgenet_get_settings
,
729 .set_settings
= bcmgenet_set_settings
,
730 .get_drvinfo
= bcmgenet_get_drvinfo
,
731 .get_link
= ethtool_op_get_link
,
732 .get_msglevel
= bcmgenet_get_msglevel
,
733 .set_msglevel
= bcmgenet_set_msglevel
,
736 /* Power down the unimac, based on mode. */
737 static void bcmgenet_power_down(struct bcmgenet_priv
*priv
,
738 enum bcmgenet_power_mode mode
)
743 case GENET_POWER_CABLE_SENSE
:
744 phy_detach(priv
->phydev
);
747 case GENET_POWER_PASSIVE
:
749 bcmgenet_mii_reset(priv
->dev
);
750 if (priv
->hw_params
->flags
& GENET_HAS_EXT
) {
751 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
752 reg
|= (EXT_PWR_DOWN_PHY
|
753 EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
754 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
762 static void bcmgenet_power_up(struct bcmgenet_priv
*priv
,
763 enum bcmgenet_power_mode mode
)
767 if (!(priv
->hw_params
->flags
& GENET_HAS_EXT
))
770 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
773 case GENET_POWER_PASSIVE
:
774 reg
&= ~(EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_PHY
|
777 case GENET_POWER_CABLE_SENSE
:
779 reg
|= EXT_PWR_DN_EN_LD
;
785 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
786 bcmgenet_mii_reset(priv
->dev
);
789 /* ioctl handle special commands that are not present in ethtool. */
790 static int bcmgenet_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
792 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
795 if (!netif_running(dev
))
805 val
= phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
816 static struct enet_cb
*bcmgenet_get_txcb(struct bcmgenet_priv
*priv
,
817 struct bcmgenet_tx_ring
*ring
)
819 struct enet_cb
*tx_cb_ptr
;
821 tx_cb_ptr
= ring
->cbs
;
822 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
823 tx_cb_ptr
->bd_addr
= priv
->tx_bds
+ ring
->write_ptr
* DMA_DESC_SIZE
;
824 /* Advancing local write pointer */
825 if (ring
->write_ptr
== ring
->end_ptr
)
826 ring
->write_ptr
= ring
->cb_ptr
;
833 /* Simple helper to free a control block's resources */
834 static void bcmgenet_free_cb(struct enet_cb
*cb
)
836 dev_kfree_skb_any(cb
->skb
);
838 dma_unmap_addr_set(cb
, dma_addr
, 0);
841 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv
*priv
,
842 struct bcmgenet_tx_ring
*ring
)
844 bcmgenet_intrl2_0_writel(priv
,
845 UMAC_IRQ_TXDMA_BDONE
| UMAC_IRQ_TXDMA_PDONE
,
846 INTRL2_CPU_MASK_SET
);
849 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv
*priv
,
850 struct bcmgenet_tx_ring
*ring
)
852 bcmgenet_intrl2_0_writel(priv
,
853 UMAC_IRQ_TXDMA_BDONE
| UMAC_IRQ_TXDMA_PDONE
,
854 INTRL2_CPU_MASK_CLEAR
);
857 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv
*priv
,
858 struct bcmgenet_tx_ring
*ring
)
860 bcmgenet_intrl2_1_writel(priv
,
861 (1 << ring
->index
), INTRL2_CPU_MASK_CLEAR
);
862 priv
->int1_mask
&= ~(1 << ring
->index
);
865 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv
*priv
,
866 struct bcmgenet_tx_ring
*ring
)
868 bcmgenet_intrl2_1_writel(priv
,
869 (1 << ring
->index
), INTRL2_CPU_MASK_SET
);
870 priv
->int1_mask
|= (1 << ring
->index
);
873 /* Unlocked version of the reclaim routine */
874 static void __bcmgenet_tx_reclaim(struct net_device
*dev
,
875 struct bcmgenet_tx_ring
*ring
)
877 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
878 int last_tx_cn
, last_c_index
, num_tx_bds
;
879 struct enet_cb
*tx_cb_ptr
;
880 struct netdev_queue
*txq
;
881 unsigned int c_index
;
883 /* Compute how many buffers are transmited since last xmit call */
884 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
);
885 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
887 last_c_index
= ring
->c_index
;
888 num_tx_bds
= ring
->size
;
890 c_index
&= (num_tx_bds
- 1);
892 if (c_index
>= last_c_index
)
893 last_tx_cn
= c_index
- last_c_index
;
895 last_tx_cn
= num_tx_bds
- last_c_index
+ c_index
;
897 netif_dbg(priv
, tx_done
, dev
,
898 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
899 __func__
, ring
->index
,
900 c_index
, last_tx_cn
, last_c_index
);
902 /* Reclaim transmitted buffers */
903 while (last_tx_cn
-- > 0) {
904 tx_cb_ptr
= ring
->cbs
+ last_c_index
;
905 if (tx_cb_ptr
->skb
) {
906 dev
->stats
.tx_bytes
+= tx_cb_ptr
->skb
->len
;
907 dma_unmap_single(&dev
->dev
,
908 dma_unmap_addr(tx_cb_ptr
, dma_addr
),
911 bcmgenet_free_cb(tx_cb_ptr
);
912 } else if (dma_unmap_addr(tx_cb_ptr
, dma_addr
)) {
913 dev
->stats
.tx_bytes
+=
914 dma_unmap_len(tx_cb_ptr
, dma_len
);
915 dma_unmap_page(&dev
->dev
,
916 dma_unmap_addr(tx_cb_ptr
, dma_addr
),
917 dma_unmap_len(tx_cb_ptr
, dma_len
),
919 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, 0);
921 dev
->stats
.tx_packets
++;
925 last_c_index
&= (num_tx_bds
- 1);
928 if (ring
->free_bds
> (MAX_SKB_FRAGS
+ 1))
929 ring
->int_disable(priv
, ring
);
931 if (netif_tx_queue_stopped(txq
))
932 netif_tx_wake_queue(txq
);
934 ring
->c_index
= c_index
;
937 static void bcmgenet_tx_reclaim(struct net_device
*dev
,
938 struct bcmgenet_tx_ring
*ring
)
942 spin_lock_irqsave(&ring
->lock
, flags
);
943 __bcmgenet_tx_reclaim(dev
, ring
);
944 spin_unlock_irqrestore(&ring
->lock
, flags
);
947 static void bcmgenet_tx_reclaim_all(struct net_device
*dev
)
949 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
952 if (netif_is_multiqueue(dev
)) {
953 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
954 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[i
]);
957 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[DESC_INDEX
]);
960 /* Transmits a single SKB (either head of a fragment or a single SKB)
961 * caller must hold priv->lock
963 static int bcmgenet_xmit_single(struct net_device
*dev
,
966 struct bcmgenet_tx_ring
*ring
)
968 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
969 struct device
*kdev
= &priv
->pdev
->dev
;
970 struct enet_cb
*tx_cb_ptr
;
971 unsigned int skb_len
;
976 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
978 if (unlikely(!tx_cb_ptr
))
981 tx_cb_ptr
->skb
= skb
;
983 skb_len
= skb_headlen(skb
) < ETH_ZLEN
? ETH_ZLEN
: skb_headlen(skb
);
985 mapping
= dma_map_single(kdev
, skb
->data
, skb_len
, DMA_TO_DEVICE
);
986 ret
= dma_mapping_error(kdev
, mapping
);
988 netif_err(priv
, tx_err
, dev
, "Tx DMA map failed\n");
993 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
994 dma_unmap_len_set(tx_cb_ptr
, dma_len
, skb
->len
);
995 length_status
= (skb_len
<< DMA_BUFLENGTH_SHIFT
) | dma_desc_flags
|
996 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
) |
999 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1000 length_status
|= DMA_TX_DO_CSUM
;
1002 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
, length_status
);
1004 /* Decrement total BD count and advance our write pointer */
1005 ring
->free_bds
-= 1;
1006 ring
->prod_index
+= 1;
1007 ring
->prod_index
&= DMA_P_INDEX_MASK
;
1012 /* Transmit a SKB fragement */
1013 static int bcmgenet_xmit_frag(struct net_device
*dev
,
1016 struct bcmgenet_tx_ring
*ring
)
1018 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1019 struct device
*kdev
= &priv
->pdev
->dev
;
1020 struct enet_cb
*tx_cb_ptr
;
1024 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
1026 if (unlikely(!tx_cb_ptr
))
1028 tx_cb_ptr
->skb
= NULL
;
1030 mapping
= skb_frag_dma_map(kdev
, frag
, 0,
1031 skb_frag_size(frag
), DMA_TO_DEVICE
);
1032 ret
= dma_mapping_error(kdev
, mapping
);
1034 netif_err(priv
, tx_err
, dev
, "%s: Tx DMA map failed\n",
1039 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
1040 dma_unmap_len_set(tx_cb_ptr
, dma_len
, frag
->size
);
1042 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
,
1043 (frag
->size
<< DMA_BUFLENGTH_SHIFT
) | dma_desc_flags
|
1044 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
));
1047 ring
->free_bds
-= 1;
1048 ring
->prod_index
+= 1;
1049 ring
->prod_index
&= DMA_P_INDEX_MASK
;
1054 /* Reallocate the SKB to put enough headroom in front of it and insert
1055 * the transmit checksum offsets in the descriptors
1057 static int bcmgenet_put_tx_csum(struct net_device
*dev
, struct sk_buff
*skb
)
1059 struct status_64
*status
= NULL
;
1060 struct sk_buff
*new_skb
;
1066 if (unlikely(skb_headroom(skb
) < sizeof(*status
))) {
1067 /* If 64 byte status block enabled, must make sure skb has
1068 * enough headroom for us to insert 64B status block.
1070 new_skb
= skb_realloc_headroom(skb
, sizeof(*status
));
1073 dev
->stats
.tx_errors
++;
1074 dev
->stats
.tx_dropped
++;
1080 skb_push(skb
, sizeof(*status
));
1081 status
= (struct status_64
*)skb
->data
;
1083 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1084 ip_ver
= htons(skb
->protocol
);
1087 ip_proto
= ip_hdr(skb
)->protocol
;
1090 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1096 offset
= skb_checksum_start_offset(skb
) - sizeof(*status
);
1097 tx_csum_info
= (offset
<< STATUS_TX_CSUM_START_SHIFT
) |
1098 (offset
+ skb
->csum_offset
);
1100 /* Set the length valid bit for TCP and UDP and just set
1101 * the special UDP flag for IPv4, else just set to 0.
1103 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1104 tx_csum_info
|= STATUS_TX_CSUM_LV
;
1105 if (ip_proto
== IPPROTO_UDP
&& ip_ver
== ETH_P_IP
)
1106 tx_csum_info
|= STATUS_TX_CSUM_PROTO_UDP
;
1110 status
->tx_csum_info
= tx_csum_info
;
1116 static netdev_tx_t
bcmgenet_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1118 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1119 struct bcmgenet_tx_ring
*ring
= NULL
;
1120 struct netdev_queue
*txq
;
1121 unsigned long flags
= 0;
1122 int nr_frags
, index
;
1127 index
= skb_get_queue_mapping(skb
);
1128 /* Mapping strategy:
1129 * queue_mapping = 0, unclassified, packet xmited through ring16
1130 * queue_mapping = 1, goes to ring 0. (highest priority queue
1131 * queue_mapping = 2, goes to ring 1.
1132 * queue_mapping = 3, goes to ring 2.
1133 * queue_mapping = 4, goes to ring 3.
1140 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1141 ring
= &priv
->tx_rings
[index
];
1142 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
1144 spin_lock_irqsave(&ring
->lock
, flags
);
1145 if (ring
->free_bds
<= nr_frags
+ 1) {
1146 netif_tx_stop_queue(txq
);
1147 netdev_err(dev
, "%s: tx ring %d full when queue %d awake\n",
1148 __func__
, index
, ring
->queue
);
1149 ret
= NETDEV_TX_BUSY
;
1153 /* set the SKB transmit checksum */
1154 if (priv
->desc_64b_en
) {
1155 ret
= bcmgenet_put_tx_csum(dev
, skb
);
1162 dma_desc_flags
= DMA_SOP
;
1164 dma_desc_flags
|= DMA_EOP
;
1166 /* Transmit single SKB or head of fragment list */
1167 ret
= bcmgenet_xmit_single(dev
, skb
, dma_desc_flags
, ring
);
1174 for (i
= 0; i
< nr_frags
; i
++) {
1175 ret
= bcmgenet_xmit_frag(dev
,
1176 &skb_shinfo(skb
)->frags
[i
],
1177 (i
== nr_frags
- 1) ? DMA_EOP
: 0, ring
);
1184 skb_tx_timestamp(skb
);
1186 /* we kept a software copy of how much we should advance the TDMA
1187 * producer index, now write it down to the hardware
1189 bcmgenet_tdma_ring_writel(priv
, ring
->index
,
1190 ring
->prod_index
, TDMA_PROD_INDEX
);
1192 if (ring
->free_bds
<= (MAX_SKB_FRAGS
+ 1)) {
1193 netif_tx_stop_queue(txq
);
1194 ring
->int_enable(priv
, ring
);
1198 spin_unlock_irqrestore(&ring
->lock
, flags
);
1204 static int bcmgenet_rx_refill(struct bcmgenet_priv
*priv
,
1207 struct device
*kdev
= &priv
->pdev
->dev
;
1208 struct sk_buff
*skb
;
1212 skb
= netdev_alloc_skb(priv
->dev
,
1213 priv
->rx_buf_len
+ SKB_ALIGNMENT
);
1217 /* a caller did not release this control block */
1218 WARN_ON(cb
->skb
!= NULL
);
1220 mapping
= dma_map_single(kdev
, skb
->data
,
1221 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1222 ret
= dma_mapping_error(kdev
, mapping
);
1224 bcmgenet_free_cb(cb
);
1225 netif_err(priv
, rx_err
, priv
->dev
,
1226 "%s DMA map failed\n", __func__
);
1230 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1231 /* assign packet, prepare descriptor, and advance pointer */
1233 dmadesc_set_addr(priv
, priv
->rx_bd_assign_ptr
, mapping
);
1235 /* turn on the newly assigned BD for DMA to use */
1236 priv
->rx_bd_assign_index
++;
1237 priv
->rx_bd_assign_index
&= (priv
->num_rx_bds
- 1);
1239 priv
->rx_bd_assign_ptr
= priv
->rx_bds
+
1240 (priv
->rx_bd_assign_index
* DMA_DESC_SIZE
);
1245 /* bcmgenet_desc_rx - descriptor based rx process.
1246 * this could be called from bottom half, or from NAPI polling method.
1248 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv
*priv
,
1249 unsigned int budget
)
1251 struct net_device
*dev
= priv
->dev
;
1253 struct sk_buff
*skb
;
1254 u32 dma_length_status
;
1255 unsigned long dma_flag
;
1257 unsigned int rxpktprocessed
= 0, rxpkttoprocess
;
1258 unsigned int p_index
;
1259 unsigned int chksum_ok
= 0;
1261 p_index
= bcmgenet_rdma_ring_readl(priv
,
1262 DESC_INDEX
, RDMA_PROD_INDEX
);
1263 p_index
&= DMA_P_INDEX_MASK
;
1265 if (p_index
< priv
->rx_c_index
)
1266 rxpkttoprocess
= (DMA_C_INDEX_MASK
+ 1) -
1267 priv
->rx_c_index
+ p_index
;
1269 rxpkttoprocess
= p_index
- priv
->rx_c_index
;
1271 netif_dbg(priv
, rx_status
, dev
,
1272 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess
);
1274 while ((rxpktprocessed
< rxpkttoprocess
) &&
1275 (rxpktprocessed
< budget
)) {
1277 /* Unmap the packet contents such that we can use the
1278 * RSV from the 64 bytes descriptor when enabled and save
1279 * a 32-bits register read
1281 cb
= &priv
->rx_cbs
[priv
->rx_read_ptr
];
1283 dma_unmap_single(&dev
->dev
, dma_unmap_addr(cb
, dma_addr
),
1284 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1286 if (!priv
->desc_64b_en
) {
1287 dma_length_status
= dmadesc_get_length_status(priv
,
1289 (priv
->rx_read_ptr
*
1292 struct status_64
*status
;
1293 status
= (struct status_64
*)skb
->data
;
1294 dma_length_status
= status
->length_status
;
1297 /* DMA flags and length are still valid no matter how
1298 * we got the Receive Status Vector (64B RSB or register)
1300 dma_flag
= dma_length_status
& 0xffff;
1301 len
= dma_length_status
>> DMA_BUFLENGTH_SHIFT
;
1303 netif_dbg(priv
, rx_status
, dev
,
1304 "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1305 __func__
, p_index
, priv
->rx_c_index
, priv
->rx_read_ptr
,
1310 priv
->rx_read_ptr
++;
1311 priv
->rx_read_ptr
&= (priv
->num_rx_bds
- 1);
1313 /* out of memory, just drop packets at the hardware level */
1314 if (unlikely(!skb
)) {
1315 dev
->stats
.rx_dropped
++;
1316 dev
->stats
.rx_errors
++;
1320 if (unlikely(!(dma_flag
& DMA_EOP
) || !(dma_flag
& DMA_SOP
))) {
1321 netif_err(priv
, rx_status
, dev
,
1322 "Droping fragmented packet!\n");
1323 dev
->stats
.rx_dropped
++;
1324 dev
->stats
.rx_errors
++;
1325 dev_kfree_skb_any(cb
->skb
);
1330 if (unlikely(dma_flag
& (DMA_RX_CRC_ERROR
|
1335 netif_err(priv
, rx_status
, dev
, "dma_flag=0x%x\n",
1336 (unsigned int)dma_flag
);
1337 if (dma_flag
& DMA_RX_CRC_ERROR
)
1338 dev
->stats
.rx_crc_errors
++;
1339 if (dma_flag
& DMA_RX_OV
)
1340 dev
->stats
.rx_over_errors
++;
1341 if (dma_flag
& DMA_RX_NO
)
1342 dev
->stats
.rx_frame_errors
++;
1343 if (dma_flag
& DMA_RX_LG
)
1344 dev
->stats
.rx_length_errors
++;
1345 dev
->stats
.rx_dropped
++;
1346 dev
->stats
.rx_errors
++;
1348 /* discard the packet and advance consumer index.*/
1349 dev_kfree_skb_any(cb
->skb
);
1352 } /* error packet */
1354 chksum_ok
= (dma_flag
& priv
->dma_rx_chk_bit
) &&
1355 priv
->desc_rxchk_en
;
1358 if (priv
->desc_64b_en
) {
1363 if (likely(chksum_ok
))
1364 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1366 /* remove hardware 2bytes added for IP alignment */
1370 if (priv
->crc_fwd_en
) {
1371 skb_trim(skb
, len
- ETH_FCS_LEN
);
1375 /*Finish setting up the received SKB and send it to the kernel*/
1376 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
1377 dev
->stats
.rx_packets
++;
1378 dev
->stats
.rx_bytes
+= len
;
1379 if (dma_flag
& DMA_RX_MULT
)
1380 dev
->stats
.multicast
++;
1383 napi_gro_receive(&priv
->napi
, skb
);
1385 netif_dbg(priv
, rx_status
, dev
, "pushed up to kernel\n");
1387 /* refill RX path on the current control block */
1389 err
= bcmgenet_rx_refill(priv
, cb
);
1391 netif_err(priv
, rx_err
, dev
, "Rx refill failed\n");
1394 return rxpktprocessed
;
1397 /* Assign skb to RX DMA descriptor. */
1398 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv
*priv
)
1404 netif_dbg(priv
, hw
, priv
->dev
, "%s:\n", __func__
);
1406 /* loop here for each buffer needing assign */
1407 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1408 cb
= &priv
->rx_cbs
[priv
->rx_bd_assign_index
];
1412 /* set the DMA descriptor length once and for all
1413 * it will only change if we support dynamically sizing
1414 * priv->rx_buf_len, but we do not
1416 dmadesc_set_length_status(priv
, priv
->rx_bd_assign_ptr
,
1417 priv
->rx_buf_len
<< DMA_BUFLENGTH_SHIFT
);
1419 ret
= bcmgenet_rx_refill(priv
, cb
);
1428 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv
*priv
)
1433 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1434 cb
= &priv
->rx_cbs
[i
];
1436 if (dma_unmap_addr(cb
, dma_addr
)) {
1437 dma_unmap_single(&priv
->dev
->dev
,
1438 dma_unmap_addr(cb
, dma_addr
),
1439 priv
->rx_buf_len
, DMA_FROM_DEVICE
);
1440 dma_unmap_addr_set(cb
, dma_addr
, 0);
1444 bcmgenet_free_cb(cb
);
1448 static int reset_umac(struct bcmgenet_priv
*priv
)
1450 struct device
*kdev
= &priv
->pdev
->dev
;
1451 unsigned int timeout
= 0;
1454 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1455 bcmgenet_rbuf_ctrl_set(priv
, 0);
1458 /* disable MAC while updating its registers */
1459 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1461 /* issue soft reset, wait for it to complete */
1462 bcmgenet_umac_writel(priv
, CMD_SW_RESET
, UMAC_CMD
);
1463 while (timeout
++ < 1000) {
1464 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
1465 if (!(reg
& CMD_SW_RESET
))
1471 if (timeout
== 1000) {
1473 "timeout waiting for MAC to come out of resetn\n");
1480 static int init_umac(struct bcmgenet_priv
*priv
)
1482 struct device
*kdev
= &priv
->pdev
->dev
;
1484 u32 reg
, cpu_mask_clear
;
1486 dev_dbg(&priv
->pdev
->dev
, "bcmgenet: init_umac\n");
1488 ret
= reset_umac(priv
);
1492 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1493 /* clear tx/rx counter */
1494 bcmgenet_umac_writel(priv
,
1495 MIB_RESET_RX
| MIB_RESET_TX
| MIB_RESET_RUNT
, UMAC_MIB_CTRL
);
1496 bcmgenet_umac_writel(priv
, 0, UMAC_MIB_CTRL
);
1498 bcmgenet_umac_writel(priv
, ENET_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
1500 /* init rx registers, enable ip header optimization */
1501 reg
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
1502 reg
|= RBUF_ALIGN_2B
;
1503 bcmgenet_rbuf_writel(priv
, reg
, RBUF_CTRL
);
1505 if (!GENET_IS_V1(priv
) && !GENET_IS_V2(priv
))
1506 bcmgenet_rbuf_writel(priv
, 1, RBUF_TBUF_SIZE_CTRL
);
1508 /* Mask all interrupts.*/
1509 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
1510 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
1511 bcmgenet_intrl2_0_writel(priv
, 0, INTRL2_CPU_MASK_CLEAR
);
1513 cpu_mask_clear
= UMAC_IRQ_RXDMA_BDONE
;
1515 dev_dbg(kdev
, "%s:Enabling RXDMA_BDONE interrupt\n", __func__
);
1517 /* Monitor cable plug/unpluged event for internal PHY */
1518 if (phy_is_internal(priv
->phydev
))
1519 cpu_mask_clear
|= (UMAC_IRQ_LINK_DOWN
| UMAC_IRQ_LINK_UP
);
1520 else if (priv
->ext_phy
)
1521 cpu_mask_clear
|= (UMAC_IRQ_LINK_DOWN
| UMAC_IRQ_LINK_UP
);
1522 else if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
1523 reg
= bcmgenet_bp_mc_get(priv
);
1524 reg
|= BIT(priv
->hw_params
->bp_in_en_shift
);
1526 /* bp_mask: back pressure mask */
1527 if (netif_is_multiqueue(priv
->dev
))
1528 reg
|= priv
->hw_params
->bp_in_mask
;
1530 reg
&= ~priv
->hw_params
->bp_in_mask
;
1531 bcmgenet_bp_mc_set(priv
, reg
);
1534 /* Enable MDIO interrupts on GENET v3+ */
1535 if (priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
)
1536 cpu_mask_clear
|= UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
;
1538 bcmgenet_intrl2_0_writel(priv
, cpu_mask_clear
,
1539 INTRL2_CPU_MASK_CLEAR
);
1541 /* Enable rx/tx engine.*/
1542 dev_dbg(kdev
, "done init umac\n");
1547 /* Initialize all house-keeping variables for a TX ring, along
1548 * with corresponding hardware registers
1550 static void bcmgenet_init_tx_ring(struct bcmgenet_priv
*priv
,
1551 unsigned int index
, unsigned int size
,
1552 unsigned int write_ptr
, unsigned int end_ptr
)
1554 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[index
];
1555 u32 words_per_bd
= WORDS_PER_BD(priv
);
1556 u32 flow_period_val
= 0;
1557 unsigned int first_bd
;
1559 spin_lock_init(&ring
->lock
);
1560 ring
->index
= index
;
1561 if (index
== DESC_INDEX
) {
1563 ring
->int_enable
= bcmgenet_tx_ring16_int_enable
;
1564 ring
->int_disable
= bcmgenet_tx_ring16_int_disable
;
1566 ring
->queue
= index
+ 1;
1567 ring
->int_enable
= bcmgenet_tx_ring_int_enable
;
1568 ring
->int_disable
= bcmgenet_tx_ring_int_disable
;
1570 ring
->cbs
= priv
->tx_cbs
+ write_ptr
;
1573 ring
->free_bds
= size
;
1574 ring
->write_ptr
= write_ptr
;
1575 ring
->cb_ptr
= write_ptr
;
1576 ring
->end_ptr
= end_ptr
- 1;
1577 ring
->prod_index
= 0;
1579 /* Set flow period for ring != 16 */
1580 if (index
!= DESC_INDEX
)
1581 flow_period_val
= ENET_MAX_MTU_SIZE
<< 16;
1583 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_PROD_INDEX
);
1584 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_CONS_INDEX
);
1585 bcmgenet_tdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
1586 /* Disable rate control for now */
1587 bcmgenet_tdma_ring_writel(priv
, index
, flow_period_val
,
1589 /* Unclassified traffic goes to ring 16 */
1590 bcmgenet_tdma_ring_writel(priv
, index
,
1591 ((size
<< DMA_RING_SIZE_SHIFT
) | RX_BUF_LENGTH
),
1594 first_bd
= write_ptr
;
1596 /* Set start and end address, read and write pointers */
1597 bcmgenet_tdma_ring_writel(priv
, index
, first_bd
* words_per_bd
,
1599 bcmgenet_tdma_ring_writel(priv
, index
, first_bd
* words_per_bd
,
1601 bcmgenet_tdma_ring_writel(priv
, index
, first_bd
,
1603 bcmgenet_tdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
1607 /* Initialize a RDMA ring */
1608 static int bcmgenet_init_rx_ring(struct bcmgenet_priv
*priv
,
1609 unsigned int index
, unsigned int size
)
1611 u32 words_per_bd
= WORDS_PER_BD(priv
);
1614 priv
->num_rx_bds
= TOTAL_DESC
;
1615 priv
->rx_bds
= priv
->base
+ priv
->hw_params
->rdma_offset
;
1616 priv
->rx_bd_assign_ptr
= priv
->rx_bds
;
1617 priv
->rx_bd_assign_index
= 0;
1618 priv
->rx_c_index
= 0;
1619 priv
->rx_read_ptr
= 0;
1620 priv
->rx_cbs
= kzalloc(priv
->num_rx_bds
* sizeof(struct enet_cb
),
1625 ret
= bcmgenet_alloc_rx_buffers(priv
);
1627 kfree(priv
->rx_cbs
);
1631 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_WRITE_PTR
);
1632 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_PROD_INDEX
);
1633 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_CONS_INDEX
);
1634 bcmgenet_rdma_ring_writel(priv
, index
,
1635 ((size
<< DMA_RING_SIZE_SHIFT
) | RX_BUF_LENGTH
),
1637 bcmgenet_rdma_ring_writel(priv
, index
, 0, DMA_START_ADDR
);
1638 bcmgenet_rdma_ring_writel(priv
, index
,
1639 words_per_bd
* size
- 1, DMA_END_ADDR
);
1640 bcmgenet_rdma_ring_writel(priv
, index
,
1641 (DMA_FC_THRESH_LO
<< DMA_XOFF_THRESHOLD_SHIFT
) |
1642 DMA_FC_THRESH_HI
, RDMA_XON_XOFF_THRESH
);
1643 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_READ_PTR
);
1648 /* init multi xmit queues, only available for GENET2+
1649 * the queue is partitioned as follows:
1651 * queue 0 - 3 is priority based, each one has 32 descriptors,
1652 * with queue 0 being the highest priority queue.
1654 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1655 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1658 * The transmit control block pool is then partitioned as following:
1659 * - tx_cbs[0...127] are for queue 16
1660 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1661 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1662 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1663 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1665 static void bcmgenet_init_multiq(struct net_device
*dev
)
1667 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1668 unsigned int i
, dma_enable
;
1669 u32 reg
, dma_ctrl
, ring_cfg
= 0, dma_priority
= 0;
1671 if (!netif_is_multiqueue(dev
)) {
1672 netdev_warn(dev
, "called with non multi queue aware HW\n");
1676 dma_ctrl
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
1677 dma_enable
= dma_ctrl
& DMA_EN
;
1678 dma_ctrl
&= ~DMA_EN
;
1679 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
1681 /* Enable strict priority arbiter mode */
1682 bcmgenet_tdma_writel(priv
, DMA_ARBITER_SP
, DMA_ARB_CTRL
);
1684 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
1685 /* first 64 tx_cbs are reserved for default tx queue
1688 bcmgenet_init_tx_ring(priv
, i
, priv
->hw_params
->bds_cnt
,
1689 i
* priv
->hw_params
->bds_cnt
,
1690 (i
+ 1) * priv
->hw_params
->bds_cnt
);
1692 /* Configure ring as decriptor ring and setup priority */
1694 dma_priority
|= ((GENET_Q0_PRIORITY
+ i
) <<
1695 (GENET_MAX_MQ_CNT
+ 1) * i
);
1696 dma_ctrl
|= 1 << (i
+ DMA_RING_BUF_EN_SHIFT
);
1700 reg
= bcmgenet_tdma_readl(priv
, DMA_RING_CFG
);
1702 bcmgenet_tdma_writel(priv
, reg
, DMA_RING_CFG
);
1704 /* Use configured rings priority and set ring #16 priority */
1705 reg
= bcmgenet_tdma_readl(priv
, DMA_RING_PRIORITY
);
1706 reg
|= ((GENET_Q0_PRIORITY
+ priv
->hw_params
->tx_queues
) << 20);
1707 reg
|= dma_priority
;
1708 bcmgenet_tdma_writel(priv
, reg
, DMA_PRIORITY
);
1710 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1711 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
1715 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
1718 static void bcmgenet_fini_dma(struct bcmgenet_priv
*priv
)
1723 bcmgenet_rdma_writel(priv
, 0, DMA_CTRL
);
1724 bcmgenet_tdma_writel(priv
, 0, DMA_CTRL
);
1726 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
1727 if (priv
->tx_cbs
[i
].skb
!= NULL
) {
1728 dev_kfree_skb(priv
->tx_cbs
[i
].skb
);
1729 priv
->tx_cbs
[i
].skb
= NULL
;
1733 bcmgenet_free_rx_buffers(priv
);
1734 kfree(priv
->rx_cbs
);
1735 kfree(priv
->tx_cbs
);
1738 /* init_edma: Initialize DMA control register */
1739 static int bcmgenet_init_dma(struct bcmgenet_priv
*priv
)
1743 netif_dbg(priv
, hw
, priv
->dev
, "bcmgenet: init_edma\n");
1745 /* by default, enable ring 16 (descriptor based) */
1746 ret
= bcmgenet_init_rx_ring(priv
, DESC_INDEX
, TOTAL_DESC
);
1748 netdev_err(priv
->dev
, "failed to initialize RX ring\n");
1753 bcmgenet_rdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
1756 bcmgenet_tdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
1758 /* Initialize commont TX ring structures */
1759 priv
->tx_bds
= priv
->base
+ priv
->hw_params
->tdma_offset
;
1760 priv
->num_tx_bds
= TOTAL_DESC
;
1761 priv
->tx_cbs
= kzalloc(priv
->num_tx_bds
* sizeof(struct enet_cb
),
1763 if (!priv
->tx_cbs
) {
1764 bcmgenet_fini_dma(priv
);
1768 /* initialize multi xmit queue */
1769 bcmgenet_init_multiq(priv
->dev
);
1771 /* initialize special ring 16 */
1772 bcmgenet_init_tx_ring(priv
, DESC_INDEX
, GENET_DEFAULT_BD_CNT
,
1773 priv
->hw_params
->tx_queues
* priv
->hw_params
->bds_cnt
,
1779 /* NAPI polling method*/
1780 static int bcmgenet_poll(struct napi_struct
*napi
, int budget
)
1782 struct bcmgenet_priv
*priv
= container_of(napi
,
1783 struct bcmgenet_priv
, napi
);
1784 unsigned int work_done
;
1787 bcmgenet_tx_reclaim(priv
->dev
, &priv
->tx_rings
[DESC_INDEX
]);
1789 work_done
= bcmgenet_desc_rx(priv
, budget
);
1791 /* Advancing our consumer index*/
1792 priv
->rx_c_index
+= work_done
;
1793 priv
->rx_c_index
&= DMA_C_INDEX_MASK
;
1794 bcmgenet_rdma_ring_writel(priv
, DESC_INDEX
,
1795 priv
->rx_c_index
, RDMA_CONS_INDEX
);
1796 if (work_done
< budget
) {
1797 napi_complete(napi
);
1798 bcmgenet_intrl2_0_writel(priv
,
1799 UMAC_IRQ_RXDMA_BDONE
, INTRL2_CPU_MASK_CLEAR
);
1805 /* Interrupt bottom half */
1806 static void bcmgenet_irq_task(struct work_struct
*work
)
1808 struct bcmgenet_priv
*priv
= container_of(
1809 work
, struct bcmgenet_priv
, bcmgenet_irq_work
);
1811 netif_dbg(priv
, intr
, priv
->dev
, "%s\n", __func__
);
1813 /* Link UP/DOWN event */
1814 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
1815 (priv
->irq0_stat
& (UMAC_IRQ_LINK_UP
|UMAC_IRQ_LINK_DOWN
))) {
1816 phy_mac_interrupt(priv
->phydev
,
1817 priv
->irq0_stat
& UMAC_IRQ_LINK_UP
);
1818 priv
->irq0_stat
&= ~(UMAC_IRQ_LINK_UP
|UMAC_IRQ_LINK_DOWN
);
1822 /* bcmgenet_isr1: interrupt handler for ring buffer. */
1823 static irqreturn_t
bcmgenet_isr1(int irq
, void *dev_id
)
1825 struct bcmgenet_priv
*priv
= dev_id
;
1828 /* Save irq status for bottom-half processing. */
1830 bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_STAT
) &
1832 /* clear inerrupts*/
1833 bcmgenet_intrl2_1_writel(priv
, priv
->irq1_stat
, INTRL2_CPU_CLEAR
);
1835 netif_dbg(priv
, intr
, priv
->dev
,
1836 "%s: IRQ=0x%x\n", __func__
, priv
->irq1_stat
);
1837 /* Check the MBDONE interrupts.
1838 * packet is done, reclaim descriptors
1840 if (priv
->irq1_stat
& 0x0000ffff) {
1842 for (index
= 0; index
< 16; index
++) {
1843 if (priv
->irq1_stat
& (1 << index
))
1844 bcmgenet_tx_reclaim(priv
->dev
,
1845 &priv
->tx_rings
[index
]);
1851 /* bcmgenet_isr0: Handle various interrupts. */
1852 static irqreturn_t
bcmgenet_isr0(int irq
, void *dev_id
)
1854 struct bcmgenet_priv
*priv
= dev_id
;
1856 /* Save irq status for bottom-half processing. */
1858 bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
) &
1859 ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
1860 /* clear inerrupts*/
1861 bcmgenet_intrl2_0_writel(priv
, priv
->irq0_stat
, INTRL2_CPU_CLEAR
);
1863 netif_dbg(priv
, intr
, priv
->dev
,
1864 "IRQ=0x%x\n", priv
->irq0_stat
);
1866 if (priv
->irq0_stat
& (UMAC_IRQ_RXDMA_BDONE
| UMAC_IRQ_RXDMA_PDONE
)) {
1867 /* We use NAPI(software interrupt throttling, if
1868 * Rx Descriptor throttling is not used.
1869 * Disable interrupt, will be enabled in the poll method.
1871 if (likely(napi_schedule_prep(&priv
->napi
))) {
1872 bcmgenet_intrl2_0_writel(priv
,
1873 UMAC_IRQ_RXDMA_BDONE
, INTRL2_CPU_MASK_SET
);
1874 __napi_schedule(&priv
->napi
);
1877 if (priv
->irq0_stat
&
1878 (UMAC_IRQ_TXDMA_BDONE
| UMAC_IRQ_TXDMA_PDONE
)) {
1880 bcmgenet_tx_reclaim(priv
->dev
, &priv
->tx_rings
[DESC_INDEX
]);
1882 if (priv
->irq0_stat
& (UMAC_IRQ_PHY_DET_R
|
1883 UMAC_IRQ_PHY_DET_F
|
1885 UMAC_IRQ_LINK_DOWN
|
1889 /* all other interested interrupts handled in bottom half */
1890 schedule_work(&priv
->bcmgenet_irq_work
);
1893 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
1894 priv
->irq0_stat
& (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
)) {
1895 priv
->irq0_stat
&= ~(UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
);
1902 static void bcmgenet_umac_reset(struct bcmgenet_priv
*priv
)
1906 reg
= bcmgenet_rbuf_ctrl_get(priv
);
1908 bcmgenet_rbuf_ctrl_set(priv
, reg
);
1912 bcmgenet_rbuf_ctrl_set(priv
, reg
);
1916 static void bcmgenet_set_hw_addr(struct bcmgenet_priv
*priv
,
1917 unsigned char *addr
)
1919 bcmgenet_umac_writel(priv
, (addr
[0] << 24) | (addr
[1] << 16) |
1920 (addr
[2] << 8) | addr
[3], UMAC_MAC0
);
1921 bcmgenet_umac_writel(priv
, (addr
[4] << 8) | addr
[5], UMAC_MAC1
);
1924 static int bcmgenet_wol_resume(struct bcmgenet_priv
*priv
)
1928 /* From WOL-enabled suspend, switch to regular clock */
1929 clk_disable(priv
->clk_wol
);
1930 /* init umac registers to synchronize s/w with h/w */
1931 ret
= init_umac(priv
);
1935 phy_init_hw(priv
->phydev
);
1936 /* Speed settings must be restored */
1937 bcmgenet_mii_config(priv
->dev
);
1942 /* Returns a reusable dma control register value */
1943 static u32
bcmgenet_dma_disable(struct bcmgenet_priv
*priv
)
1949 dma_ctrl
= 1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
) | DMA_EN
;
1950 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
1952 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
1954 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
1956 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
1958 bcmgenet_umac_writel(priv
, 1, UMAC_TX_FLUSH
);
1960 bcmgenet_umac_writel(priv
, 0, UMAC_TX_FLUSH
);
1965 static void bcmgenet_enable_dma(struct bcmgenet_priv
*priv
, u32 dma_ctrl
)
1969 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
1971 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
1973 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
1975 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
1978 static int bcmgenet_open(struct net_device
*dev
)
1980 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1981 unsigned long dma_ctrl
;
1985 netif_dbg(priv
, ifup
, dev
, "bcmgenet_open\n");
1987 /* Turn on the clock */
1988 if (!IS_ERR(priv
->clk
))
1989 clk_prepare_enable(priv
->clk
);
1991 /* take MAC out of reset */
1992 bcmgenet_umac_reset(priv
);
1994 ret
= init_umac(priv
);
1996 goto err_clk_disable
;
1998 /* disable ethernet MAC while updating its registers */
1999 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2000 reg
&= ~(CMD_TX_EN
| CMD_RX_EN
);
2001 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2003 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
2005 if (priv
->wol_enabled
) {
2006 ret
= bcmgenet_wol_resume(priv
);
2011 if (phy_is_internal(priv
->phydev
)) {
2012 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
2013 reg
|= EXT_ENERGY_DET_MASK
;
2014 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
2017 /* Disable RX/TX DMA and flush TX queues */
2018 dma_ctrl
= bcmgenet_dma_disable(priv
);
2020 /* Reinitialize TDMA and RDMA and SW housekeeping */
2021 ret
= bcmgenet_init_dma(priv
);
2023 netdev_err(dev
, "failed to initialize DMA\n");
2027 /* Always enable ring 16 - descriptor ring */
2028 bcmgenet_enable_dma(priv
, dma_ctrl
);
2030 ret
= request_irq(priv
->irq0
, bcmgenet_isr0
, IRQF_SHARED
,
2033 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq0
);
2037 ret
= request_irq(priv
->irq1
, bcmgenet_isr1
, IRQF_SHARED
,
2040 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq1
);
2044 /* Start the network engine */
2045 napi_enable(&priv
->napi
);
2047 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2048 reg
|= (CMD_TX_EN
| CMD_RX_EN
);
2049 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2051 /* Make sure we reflect the value of CRC_CMD_FWD */
2052 priv
->crc_fwd_en
= !!(reg
& CMD_CRC_FWD
);
2054 device_set_wakeup_capable(&dev
->dev
, 1);
2056 if (phy_is_internal(priv
->phydev
))
2057 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
2059 netif_tx_start_all_queues(dev
);
2061 phy_start(priv
->phydev
);
2066 free_irq(priv
->irq0
, dev
);
2068 bcmgenet_fini_dma(priv
);
2070 if (!IS_ERR(priv
->clk
))
2071 clk_disable_unprepare(priv
->clk
);
2075 static int bcmgenet_dma_teardown(struct bcmgenet_priv
*priv
)
2081 /* Disable TDMA to stop add more frames in TX DMA */
2082 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2084 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2086 /* Check TDMA status register to confirm TDMA is disabled */
2087 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2088 reg
= bcmgenet_tdma_readl(priv
, DMA_STATUS
);
2089 if (reg
& DMA_DISABLED
)
2095 if (timeout
== DMA_TIMEOUT_VAL
) {
2096 netdev_warn(priv
->dev
,
2097 "Timed out while disabling TX DMA\n");
2101 /* Wait 10ms for packet drain in both tx and rx dma */
2102 usleep_range(10000, 20000);
2105 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2107 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2110 /* Check RDMA status register to confirm RDMA is disabled */
2111 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2112 reg
= bcmgenet_rdma_readl(priv
, DMA_STATUS
);
2113 if (reg
& DMA_DISABLED
)
2119 if (timeout
== DMA_TIMEOUT_VAL
) {
2120 netdev_warn(priv
->dev
,
2121 "Timed out while disabling RX DMA\n");
2128 static int bcmgenet_close(struct net_device
*dev
)
2130 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2134 netif_dbg(priv
, ifdown
, dev
, "bcmgenet_close\n");
2136 phy_stop(priv
->phydev
);
2138 /* Disable MAC receive */
2139 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2141 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2143 netif_tx_stop_all_queues(dev
);
2145 ret
= bcmgenet_dma_teardown(priv
);
2149 /* Disable MAC transmit. TX DMA disabled have to done before this */
2150 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2152 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2154 napi_disable(&priv
->napi
);
2157 bcmgenet_tx_reclaim_all(dev
);
2158 bcmgenet_fini_dma(priv
);
2160 free_irq(priv
->irq0
, priv
);
2161 free_irq(priv
->irq1
, priv
);
2163 /* Wait for pending work items to complete - we are stopping
2164 * the clock now. Since interrupts are disabled, no new work
2165 * will be scheduled.
2167 cancel_work_sync(&priv
->bcmgenet_irq_work
);
2169 if (phy_is_internal(priv
->phydev
))
2170 bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
2172 if (priv
->wol_enabled
)
2173 clk_enable(priv
->clk_wol
);
2175 if (!IS_ERR(priv
->clk
))
2176 clk_disable_unprepare(priv
->clk
);
2181 static void bcmgenet_timeout(struct net_device
*dev
)
2183 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2185 netif_dbg(priv
, tx_err
, dev
, "bcmgenet_timeout\n");
2187 dev
->trans_start
= jiffies
;
2189 dev
->stats
.tx_errors
++;
2191 netif_tx_wake_all_queues(dev
);
2194 #define MAX_MC_COUNT 16
2196 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv
*priv
,
2197 unsigned char *addr
,
2203 bcmgenet_umac_writel(priv
,
2204 addr
[0] << 8 | addr
[1], UMAC_MDF_ADDR
+ (*i
* 4));
2205 bcmgenet_umac_writel(priv
,
2206 addr
[2] << 24 | addr
[3] << 16 |
2207 addr
[4] << 8 | addr
[5],
2208 UMAC_MDF_ADDR
+ ((*i
+ 1) * 4));
2209 reg
= bcmgenet_umac_readl(priv
, UMAC_MDF_CTRL
);
2210 reg
|= (1 << (MAX_MC_COUNT
- *mc
));
2211 bcmgenet_umac_writel(priv
, reg
, UMAC_MDF_CTRL
);
2216 static void bcmgenet_set_rx_mode(struct net_device
*dev
)
2218 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2219 struct netdev_hw_addr
*ha
;
2223 netif_dbg(priv
, hw
, dev
, "%s: %08X\n", __func__
, dev
->flags
);
2225 /* Promiscous mode */
2226 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2227 if (dev
->flags
& IFF_PROMISC
) {
2229 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2230 bcmgenet_umac_writel(priv
, 0, UMAC_MDF_CTRL
);
2233 reg
&= ~CMD_PROMISC
;
2234 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2237 /* UniMac doesn't support ALLMULTI */
2238 if (dev
->flags
& IFF_ALLMULTI
) {
2239 netdev_warn(dev
, "ALLMULTI is not supported\n");
2243 /* update MDF filter */
2247 bcmgenet_set_mdf_addr(priv
, dev
->broadcast
, &i
, &mc
);
2248 /* my own address.*/
2249 bcmgenet_set_mdf_addr(priv
, dev
->dev_addr
, &i
, &mc
);
2251 if (netdev_uc_count(dev
) > (MAX_MC_COUNT
- mc
))
2254 if (!netdev_uc_empty(dev
))
2255 netdev_for_each_uc_addr(ha
, dev
)
2256 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
2258 if (netdev_mc_empty(dev
) || netdev_mc_count(dev
) >= (MAX_MC_COUNT
- mc
))
2261 netdev_for_each_mc_addr(ha
, dev
)
2262 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
2265 /* Set the hardware MAC address. */
2266 static int bcmgenet_set_mac_addr(struct net_device
*dev
, void *p
)
2268 struct sockaddr
*addr
= p
;
2270 /* Setting the MAC address at the hardware level is not possible
2271 * without disabling the UniMAC RX/TX enable bits.
2273 if (netif_running(dev
))
2276 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
2281 static const struct net_device_ops bcmgenet_netdev_ops
= {
2282 .ndo_open
= bcmgenet_open
,
2283 .ndo_stop
= bcmgenet_close
,
2284 .ndo_start_xmit
= bcmgenet_xmit
,
2285 .ndo_tx_timeout
= bcmgenet_timeout
,
2286 .ndo_set_rx_mode
= bcmgenet_set_rx_mode
,
2287 .ndo_set_mac_address
= bcmgenet_set_mac_addr
,
2288 .ndo_do_ioctl
= bcmgenet_ioctl
,
2289 .ndo_set_features
= bcmgenet_set_features
,
2292 /* Array of GENET hardware parameters/characteristics */
2293 static struct bcmgenet_hw_params bcmgenet_hw_params
[] = {
2298 .bp_in_en_shift
= 16,
2299 .bp_in_mask
= 0xffff,
2300 .hfb_filter_cnt
= 16,
2302 .hfb_offset
= 0x1000,
2303 .rdma_offset
= 0x2000,
2304 .tdma_offset
= 0x3000,
2311 .bp_in_en_shift
= 16,
2312 .bp_in_mask
= 0xffff,
2313 .hfb_filter_cnt
= 16,
2315 .tbuf_offset
= 0x0600,
2316 .hfb_offset
= 0x1000,
2317 .hfb_reg_offset
= 0x2000,
2318 .rdma_offset
= 0x3000,
2319 .tdma_offset
= 0x4000,
2321 .flags
= GENET_HAS_EXT
,
2327 .bp_in_en_shift
= 17,
2328 .bp_in_mask
= 0x1ffff,
2329 .hfb_filter_cnt
= 48,
2331 .tbuf_offset
= 0x0600,
2332 .hfb_offset
= 0x8000,
2333 .hfb_reg_offset
= 0xfc00,
2334 .rdma_offset
= 0x10000,
2335 .tdma_offset
= 0x11000,
2337 .flags
= GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
,
2343 .bp_in_en_shift
= 17,
2344 .bp_in_mask
= 0x1ffff,
2345 .hfb_filter_cnt
= 48,
2347 .tbuf_offset
= 0x0600,
2348 .hfb_offset
= 0x8000,
2349 .hfb_reg_offset
= 0xfc00,
2350 .rdma_offset
= 0x2000,
2351 .tdma_offset
= 0x4000,
2353 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
,
2357 /* Infer hardware parameters from the detected GENET version */
2358 static void bcmgenet_set_hw_params(struct bcmgenet_priv
*priv
)
2360 struct bcmgenet_hw_params
*params
;
2364 if (GENET_IS_V4(priv
)) {
2365 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
2366 genet_dma_ring_regs
= genet_dma_ring_regs_v4
;
2367 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
2368 priv
->version
= GENET_V4
;
2369 } else if (GENET_IS_V3(priv
)) {
2370 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
2371 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
2372 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
2373 priv
->version
= GENET_V3
;
2374 } else if (GENET_IS_V2(priv
)) {
2375 bcmgenet_dma_regs
= bcmgenet_dma_regs_v2
;
2376 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
2377 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
2378 priv
->version
= GENET_V2
;
2379 } else if (GENET_IS_V1(priv
)) {
2380 bcmgenet_dma_regs
= bcmgenet_dma_regs_v1
;
2381 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
2382 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
2383 priv
->version
= GENET_V1
;
2386 /* enum genet_version starts at 1 */
2387 priv
->hw_params
= &bcmgenet_hw_params
[priv
->version
];
2388 params
= priv
->hw_params
;
2390 /* Read GENET HW version */
2391 reg
= bcmgenet_sys_readl(priv
, SYS_REV_CTRL
);
2392 major
= (reg
>> 24 & 0x0f);
2395 else if (major
== 0)
2397 if (major
!= priv
->version
) {
2398 dev_err(&priv
->pdev
->dev
,
2399 "GENET version mismatch, got: %d, configured for: %d\n",
2400 major
, priv
->version
);
2403 /* Print the GENET core version */
2404 dev_info(&priv
->pdev
->dev
, "GENET " GENET_VER_FMT
,
2405 major
, (reg
>> 16) & 0x0f, reg
& 0xffff);
2407 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2408 if (!(params
->flags
& GENET_HAS_40BITS
))
2409 pr_warn("GENET does not support 40-bits PA\n");
2412 pr_debug("Configuration for version: %d\n"
2413 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2414 "BP << en: %2d, BP msk: 0x%05x\n"
2415 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2416 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2417 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2420 params
->tx_queues
, params
->rx_queues
, params
->bds_cnt
,
2421 params
->bp_in_en_shift
, params
->bp_in_mask
,
2422 params
->hfb_filter_cnt
, params
->qtag_mask
,
2423 params
->tbuf_offset
, params
->hfb_offset
,
2424 params
->hfb_reg_offset
,
2425 params
->rdma_offset
, params
->tdma_offset
,
2426 params
->words_per_bd
);
2429 static const struct of_device_id bcmgenet_match
[] = {
2430 { .compatible
= "brcm,genet-v1", .data
= (void *)GENET_V1
},
2431 { .compatible
= "brcm,genet-v2", .data
= (void *)GENET_V2
},
2432 { .compatible
= "brcm,genet-v3", .data
= (void *)GENET_V3
},
2433 { .compatible
= "brcm,genet-v4", .data
= (void *)GENET_V4
},
2437 static int bcmgenet_probe(struct platform_device
*pdev
)
2439 struct device_node
*dn
= pdev
->dev
.of_node
;
2440 const struct of_device_id
*of_id
;
2441 struct bcmgenet_priv
*priv
;
2442 struct net_device
*dev
;
2443 const void *macaddr
;
2447 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2448 dev
= alloc_etherdev_mqs(sizeof(*priv
), GENET_MAX_MQ_CNT
+ 1, 1);
2450 dev_err(&pdev
->dev
, "can't allocate net device\n");
2454 of_id
= of_match_node(bcmgenet_match
, dn
);
2458 priv
= netdev_priv(dev
);
2459 priv
->irq0
= platform_get_irq(pdev
, 0);
2460 priv
->irq1
= platform_get_irq(pdev
, 1);
2461 if (!priv
->irq0
|| !priv
->irq1
) {
2462 dev_err(&pdev
->dev
, "can't find IRQs\n");
2467 macaddr
= of_get_mac_address(dn
);
2469 dev_err(&pdev
->dev
, "can't find MAC address\n");
2474 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2475 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
2476 if (IS_ERR(priv
->base
)) {
2477 err
= PTR_ERR(priv
->base
);
2481 SET_NETDEV_DEV(dev
, &pdev
->dev
);
2482 dev_set_drvdata(&pdev
->dev
, dev
);
2483 ether_addr_copy(dev
->dev_addr
, macaddr
);
2484 dev
->watchdog_timeo
= 2 * HZ
;
2485 SET_ETHTOOL_OPS(dev
, &bcmgenet_ethtool_ops
);
2486 dev
->netdev_ops
= &bcmgenet_netdev_ops
;
2487 netif_napi_add(dev
, &priv
->napi
, bcmgenet_poll
, 64);
2489 priv
->msg_enable
= netif_msg_init(-1, GENET_MSG_DEFAULT
);
2491 /* Set hardware features */
2492 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
|
2493 NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
2495 /* Set the needed headroom to account for any possible
2496 * features enabling/disabling at runtime
2498 dev
->needed_headroom
+= 64;
2500 netdev_boot_setup_check(dev
);
2504 priv
->version
= (enum bcmgenet_version
)of_id
->data
;
2506 bcmgenet_set_hw_params(priv
);
2508 /* Mii wait queue */
2509 init_waitqueue_head(&priv
->wq
);
2510 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2511 priv
->rx_buf_len
= RX_BUF_LENGTH
;
2512 INIT_WORK(&priv
->bcmgenet_irq_work
, bcmgenet_irq_task
);
2514 priv
->clk
= devm_clk_get(&priv
->pdev
->dev
, "enet");
2515 if (IS_ERR(priv
->clk
))
2516 dev_warn(&priv
->pdev
->dev
, "failed to get enet clock\n");
2518 priv
->clk_wol
= devm_clk_get(&priv
->pdev
->dev
, "enet-wol");
2519 if (IS_ERR(priv
->clk_wol
))
2520 dev_warn(&priv
->pdev
->dev
, "failed to get enet-wol clock\n");
2522 if (!IS_ERR(priv
->clk
))
2523 clk_prepare_enable(priv
->clk
);
2525 err
= reset_umac(priv
);
2527 goto err_clk_disable
;
2529 err
= bcmgenet_mii_init(dev
);
2531 goto err_clk_disable
;
2533 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2534 * just the ring 16 descriptor based TX
2536 netif_set_real_num_tx_queues(priv
->dev
, priv
->hw_params
->tx_queues
+ 1);
2537 netif_set_real_num_rx_queues(priv
->dev
, priv
->hw_params
->rx_queues
+ 1);
2539 err
= register_netdev(dev
);
2541 goto err_clk_disable
;
2543 /* Turn off the main clock, WOL clock is handled separately */
2544 if (!IS_ERR(priv
->clk
))
2545 clk_disable_unprepare(priv
->clk
);
2550 if (!IS_ERR(priv
->clk
))
2551 clk_disable_unprepare(priv
->clk
);
2557 static int bcmgenet_remove(struct platform_device
*pdev
)
2559 struct bcmgenet_priv
*priv
= dev_to_priv(&pdev
->dev
);
2561 dev_set_drvdata(&pdev
->dev
, NULL
);
2562 unregister_netdev(priv
->dev
);
2563 bcmgenet_mii_exit(priv
->dev
);
2564 free_netdev(priv
->dev
);
2570 static struct platform_driver bcmgenet_driver
= {
2571 .probe
= bcmgenet_probe
,
2572 .remove
= bcmgenet_remove
,
2575 .owner
= THIS_MODULE
,
2576 .of_match_table
= bcmgenet_match
,
2579 module_platform_driver(bcmgenet_driver
);
2581 MODULE_AUTHOR("Broadcom Corporation");
2582 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2583 MODULE_ALIAS("platform:bcmgenet");
2584 MODULE_LICENSE("GPL");