1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom GENET (Gigabit Ethernet) controller driver
5 * Copyright (c) 2014-2017 Broadcom
8 #define pr_fmt(fmt) "bcmgenet: " fmt
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/types.h>
14 #include <linux/fcntl.h>
15 #include <linux/interrupt.h>
16 #include <linux/string.h>
17 #include <linux/if_ether.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/delay.h>
21 #include <linux/platform_device.h>
22 #include <linux/dma-mapping.h>
24 #include <linux/clk.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_net.h>
29 #include <linux/of_platform.h>
32 #include <linux/mii.h>
33 #include <linux/ethtool.h>
34 #include <linux/netdevice.h>
35 #include <linux/inetdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
40 #include <linux/ipv6.h>
41 #include <linux/phy.h>
42 #include <linux/platform_data/bcmgenet.h>
44 #include <asm/unaligned.h>
48 /* Maximum number of hardware queues, downsized if needed */
49 #define GENET_MAX_MQ_CNT 4
51 /* Default highest priority queue for multi queue support */
52 #define GENET_Q0_PRIORITY 0
54 #define GENET_Q16_RX_BD_CNT \
55 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
56 #define GENET_Q16_TX_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
59 #define RX_BUF_LENGTH 2048
60 #define SKB_ALIGNMENT 32
62 /* Tx/Rx DMA register offset, skip 256 descriptors */
63 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
66 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
69 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
72 static inline void bcmgenet_writel(u32 value
, void __iomem
*offset
)
74 /* MIPS chips strapped for BE will automagically configure the
75 * peripheral registers for CPU-native byte order.
77 if (IS_ENABLED(CONFIG_MIPS
) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
78 __raw_writel(value
, offset
);
80 writel_relaxed(value
, offset
);
83 static inline u32
bcmgenet_readl(void __iomem
*offset
)
85 if (IS_ENABLED(CONFIG_MIPS
) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
86 return __raw_readl(offset
);
88 return readl_relaxed(offset
);
91 static inline void dmadesc_set_length_status(struct bcmgenet_priv
*priv
,
92 void __iomem
*d
, u32 value
)
94 bcmgenet_writel(value
, d
+ DMA_DESC_LENGTH_STATUS
);
97 static inline u32
dmadesc_get_length_status(struct bcmgenet_priv
*priv
,
100 return bcmgenet_readl(d
+ DMA_DESC_LENGTH_STATUS
);
103 static inline void dmadesc_set_addr(struct bcmgenet_priv
*priv
,
107 bcmgenet_writel(lower_32_bits(addr
), d
+ DMA_DESC_ADDRESS_LO
);
109 /* Register writes to GISB bus can take couple hundred nanoseconds
110 * and are done for each packet, save these expensive writes unless
111 * the platform is explicitly configured for 64-bits/LPAE.
113 #ifdef CONFIG_PHYS_ADDR_T_64BIT
114 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
115 bcmgenet_writel(upper_32_bits(addr
), d
+ DMA_DESC_ADDRESS_HI
);
119 /* Combined address + length/status setter */
120 static inline void dmadesc_set(struct bcmgenet_priv
*priv
,
121 void __iomem
*d
, dma_addr_t addr
, u32 val
)
123 dmadesc_set_addr(priv
, d
, addr
);
124 dmadesc_set_length_status(priv
, d
, val
);
127 static inline dma_addr_t
dmadesc_get_addr(struct bcmgenet_priv
*priv
,
132 addr
= bcmgenet_readl(d
+ DMA_DESC_ADDRESS_LO
);
134 /* Register writes to GISB bus can take couple hundred nanoseconds
135 * and are done for each packet, save these expensive writes unless
136 * the platform is explicitly configured for 64-bits/LPAE.
138 #ifdef CONFIG_PHYS_ADDR_T_64BIT
139 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
140 addr
|= (u64
)bcmgenet_readl(d
+ DMA_DESC_ADDRESS_HI
) << 32;
145 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
147 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
150 static inline u32
bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv
*priv
)
152 if (GENET_IS_V1(priv
))
153 return bcmgenet_rbuf_readl(priv
, RBUF_FLUSH_CTRL_V1
);
155 return bcmgenet_sys_readl(priv
, SYS_RBUF_FLUSH_CTRL
);
158 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
160 if (GENET_IS_V1(priv
))
161 bcmgenet_rbuf_writel(priv
, val
, RBUF_FLUSH_CTRL_V1
);
163 bcmgenet_sys_writel(priv
, val
, SYS_RBUF_FLUSH_CTRL
);
166 /* These macros are defined to deal with register map change
167 * between GENET1.1 and GENET2. Only those currently being used
168 * by driver are defined.
170 static inline u32
bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv
*priv
)
172 if (GENET_IS_V1(priv
))
173 return bcmgenet_rbuf_readl(priv
, TBUF_CTRL_V1
);
175 return bcmgenet_readl(priv
->base
+
176 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
179 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
181 if (GENET_IS_V1(priv
))
182 bcmgenet_rbuf_writel(priv
, val
, TBUF_CTRL_V1
);
184 bcmgenet_writel(val
, priv
->base
+
185 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
188 static inline u32
bcmgenet_bp_mc_get(struct bcmgenet_priv
*priv
)
190 if (GENET_IS_V1(priv
))
191 return bcmgenet_rbuf_readl(priv
, TBUF_BP_MC_V1
);
193 return bcmgenet_readl(priv
->base
+
194 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
197 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv
*priv
, u32 val
)
199 if (GENET_IS_V1(priv
))
200 bcmgenet_rbuf_writel(priv
, val
, TBUF_BP_MC_V1
);
202 bcmgenet_writel(val
, priv
->base
+
203 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
206 /* RX/TX DMA register accessors */
243 static const u8 bcmgenet_dma_regs_v3plus
[] = {
244 [DMA_RING_CFG
] = 0x00,
247 [DMA_SCB_BURST_SIZE
] = 0x0C,
248 [DMA_ARB_CTRL
] = 0x2C,
249 [DMA_PRIORITY_0
] = 0x30,
250 [DMA_PRIORITY_1
] = 0x34,
251 [DMA_PRIORITY_2
] = 0x38,
252 [DMA_RING0_TIMEOUT
] = 0x2C,
253 [DMA_RING1_TIMEOUT
] = 0x30,
254 [DMA_RING2_TIMEOUT
] = 0x34,
255 [DMA_RING3_TIMEOUT
] = 0x38,
256 [DMA_RING4_TIMEOUT
] = 0x3c,
257 [DMA_RING5_TIMEOUT
] = 0x40,
258 [DMA_RING6_TIMEOUT
] = 0x44,
259 [DMA_RING7_TIMEOUT
] = 0x48,
260 [DMA_RING8_TIMEOUT
] = 0x4c,
261 [DMA_RING9_TIMEOUT
] = 0x50,
262 [DMA_RING10_TIMEOUT
] = 0x54,
263 [DMA_RING11_TIMEOUT
] = 0x58,
264 [DMA_RING12_TIMEOUT
] = 0x5c,
265 [DMA_RING13_TIMEOUT
] = 0x60,
266 [DMA_RING14_TIMEOUT
] = 0x64,
267 [DMA_RING15_TIMEOUT
] = 0x68,
268 [DMA_RING16_TIMEOUT
] = 0x6C,
269 [DMA_INDEX2RING_0
] = 0x70,
270 [DMA_INDEX2RING_1
] = 0x74,
271 [DMA_INDEX2RING_2
] = 0x78,
272 [DMA_INDEX2RING_3
] = 0x7C,
273 [DMA_INDEX2RING_4
] = 0x80,
274 [DMA_INDEX2RING_5
] = 0x84,
275 [DMA_INDEX2RING_6
] = 0x88,
276 [DMA_INDEX2RING_7
] = 0x8C,
279 static const u8 bcmgenet_dma_regs_v2
[] = {
280 [DMA_RING_CFG
] = 0x00,
283 [DMA_SCB_BURST_SIZE
] = 0x0C,
284 [DMA_ARB_CTRL
] = 0x30,
285 [DMA_PRIORITY_0
] = 0x34,
286 [DMA_PRIORITY_1
] = 0x38,
287 [DMA_PRIORITY_2
] = 0x3C,
288 [DMA_RING0_TIMEOUT
] = 0x2C,
289 [DMA_RING1_TIMEOUT
] = 0x30,
290 [DMA_RING2_TIMEOUT
] = 0x34,
291 [DMA_RING3_TIMEOUT
] = 0x38,
292 [DMA_RING4_TIMEOUT
] = 0x3c,
293 [DMA_RING5_TIMEOUT
] = 0x40,
294 [DMA_RING6_TIMEOUT
] = 0x44,
295 [DMA_RING7_TIMEOUT
] = 0x48,
296 [DMA_RING8_TIMEOUT
] = 0x4c,
297 [DMA_RING9_TIMEOUT
] = 0x50,
298 [DMA_RING10_TIMEOUT
] = 0x54,
299 [DMA_RING11_TIMEOUT
] = 0x58,
300 [DMA_RING12_TIMEOUT
] = 0x5c,
301 [DMA_RING13_TIMEOUT
] = 0x60,
302 [DMA_RING14_TIMEOUT
] = 0x64,
303 [DMA_RING15_TIMEOUT
] = 0x68,
304 [DMA_RING16_TIMEOUT
] = 0x6C,
307 static const u8 bcmgenet_dma_regs_v1
[] = {
310 [DMA_SCB_BURST_SIZE
] = 0x0C,
311 [DMA_ARB_CTRL
] = 0x30,
312 [DMA_PRIORITY_0
] = 0x34,
313 [DMA_PRIORITY_1
] = 0x38,
314 [DMA_PRIORITY_2
] = 0x3C,
315 [DMA_RING0_TIMEOUT
] = 0x2C,
316 [DMA_RING1_TIMEOUT
] = 0x30,
317 [DMA_RING2_TIMEOUT
] = 0x34,
318 [DMA_RING3_TIMEOUT
] = 0x38,
319 [DMA_RING4_TIMEOUT
] = 0x3c,
320 [DMA_RING5_TIMEOUT
] = 0x40,
321 [DMA_RING6_TIMEOUT
] = 0x44,
322 [DMA_RING7_TIMEOUT
] = 0x48,
323 [DMA_RING8_TIMEOUT
] = 0x4c,
324 [DMA_RING9_TIMEOUT
] = 0x50,
325 [DMA_RING10_TIMEOUT
] = 0x54,
326 [DMA_RING11_TIMEOUT
] = 0x58,
327 [DMA_RING12_TIMEOUT
] = 0x5c,
328 [DMA_RING13_TIMEOUT
] = 0x60,
329 [DMA_RING14_TIMEOUT
] = 0x64,
330 [DMA_RING15_TIMEOUT
] = 0x68,
331 [DMA_RING16_TIMEOUT
] = 0x6C,
334 /* Set at runtime once bcmgenet version is known */
335 static const u8
*bcmgenet_dma_regs
;
337 static inline struct bcmgenet_priv
*dev_to_priv(struct device
*dev
)
339 return netdev_priv(dev_get_drvdata(dev
));
342 static inline u32
bcmgenet_tdma_readl(struct bcmgenet_priv
*priv
,
345 return bcmgenet_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
346 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
349 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv
*priv
,
350 u32 val
, enum dma_reg r
)
352 bcmgenet_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
353 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
356 static inline u32
bcmgenet_rdma_readl(struct bcmgenet_priv
*priv
,
359 return bcmgenet_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
360 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
363 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv
*priv
,
364 u32 val
, enum dma_reg r
)
366 bcmgenet_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
367 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
370 /* RDMA/TDMA ring registers and accessors
371 * we merge the common fields and just prefix with T/D the registers
372 * having different meaning depending on the direction
376 RDMA_WRITE_PTR
= TDMA_READ_PTR
,
378 RDMA_WRITE_PTR_HI
= TDMA_READ_PTR_HI
,
380 RDMA_PROD_INDEX
= TDMA_CONS_INDEX
,
382 RDMA_CONS_INDEX
= TDMA_PROD_INDEX
,
388 DMA_MBUF_DONE_THRESH
,
390 RDMA_XON_XOFF_THRESH
= TDMA_FLOW_PERIOD
,
392 RDMA_READ_PTR
= TDMA_WRITE_PTR
,
394 RDMA_READ_PTR_HI
= TDMA_WRITE_PTR_HI
397 /* GENET v4 supports 40-bits pointer addressing
398 * for obvious reasons the LO and HI word parts
399 * are contiguous, but this offsets the other
402 static const u8 genet_dma_ring_regs_v4
[] = {
403 [TDMA_READ_PTR
] = 0x00,
404 [TDMA_READ_PTR_HI
] = 0x04,
405 [TDMA_CONS_INDEX
] = 0x08,
406 [TDMA_PROD_INDEX
] = 0x0C,
407 [DMA_RING_BUF_SIZE
] = 0x10,
408 [DMA_START_ADDR
] = 0x14,
409 [DMA_START_ADDR_HI
] = 0x18,
410 [DMA_END_ADDR
] = 0x1C,
411 [DMA_END_ADDR_HI
] = 0x20,
412 [DMA_MBUF_DONE_THRESH
] = 0x24,
413 [TDMA_FLOW_PERIOD
] = 0x28,
414 [TDMA_WRITE_PTR
] = 0x2C,
415 [TDMA_WRITE_PTR_HI
] = 0x30,
418 static const u8 genet_dma_ring_regs_v123
[] = {
419 [TDMA_READ_PTR
] = 0x00,
420 [TDMA_CONS_INDEX
] = 0x04,
421 [TDMA_PROD_INDEX
] = 0x08,
422 [DMA_RING_BUF_SIZE
] = 0x0C,
423 [DMA_START_ADDR
] = 0x10,
424 [DMA_END_ADDR
] = 0x14,
425 [DMA_MBUF_DONE_THRESH
] = 0x18,
426 [TDMA_FLOW_PERIOD
] = 0x1C,
427 [TDMA_WRITE_PTR
] = 0x20,
430 /* Set at runtime once GENET version is known */
431 static const u8
*genet_dma_ring_regs
;
433 static inline u32
bcmgenet_tdma_ring_readl(struct bcmgenet_priv
*priv
,
437 return bcmgenet_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
438 (DMA_RING_SIZE
* ring
) +
439 genet_dma_ring_regs
[r
]);
442 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv
*priv
,
443 unsigned int ring
, u32 val
,
446 bcmgenet_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
447 (DMA_RING_SIZE
* ring
) +
448 genet_dma_ring_regs
[r
]);
451 static inline u32
bcmgenet_rdma_ring_readl(struct bcmgenet_priv
*priv
,
455 return bcmgenet_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
456 (DMA_RING_SIZE
* ring
) +
457 genet_dma_ring_regs
[r
]);
460 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv
*priv
,
461 unsigned int ring
, u32 val
,
464 bcmgenet_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
465 (DMA_RING_SIZE
* ring
) +
466 genet_dma_ring_regs
[r
]);
469 static int bcmgenet_begin(struct net_device
*dev
)
471 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
473 /* Turn on the clock */
474 return clk_prepare_enable(priv
->clk
);
477 static void bcmgenet_complete(struct net_device
*dev
)
479 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
481 /* Turn off the clock */
482 clk_disable_unprepare(priv
->clk
);
485 static int bcmgenet_get_link_ksettings(struct net_device
*dev
,
486 struct ethtool_link_ksettings
*cmd
)
488 if (!netif_running(dev
))
494 phy_ethtool_ksettings_get(dev
->phydev
, cmd
);
499 static int bcmgenet_set_link_ksettings(struct net_device
*dev
,
500 const struct ethtool_link_ksettings
*cmd
)
502 if (!netif_running(dev
))
508 return phy_ethtool_ksettings_set(dev
->phydev
, cmd
);
511 static int bcmgenet_set_rx_csum(struct net_device
*dev
,
512 netdev_features_t wanted
)
514 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
518 rx_csum_en
= !!(wanted
& NETIF_F_RXCSUM
);
520 rbuf_chk_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CHK_CTRL
);
522 /* enable rx checksumming */
524 rbuf_chk_ctrl
|= RBUF_RXCHK_EN
;
526 rbuf_chk_ctrl
&= ~RBUF_RXCHK_EN
;
527 priv
->desc_rxchk_en
= rx_csum_en
;
529 /* If UniMAC forwards CRC, we need to skip over it to get
530 * a valid CHK bit to be set in the per-packet status word
532 if (rx_csum_en
&& priv
->crc_fwd_en
)
533 rbuf_chk_ctrl
|= RBUF_SKIP_FCS
;
535 rbuf_chk_ctrl
&= ~RBUF_SKIP_FCS
;
537 bcmgenet_rbuf_writel(priv
, rbuf_chk_ctrl
, RBUF_CHK_CTRL
);
542 static int bcmgenet_set_tx_csum(struct net_device
*dev
,
543 netdev_features_t wanted
)
545 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
547 u32 tbuf_ctrl
, rbuf_ctrl
;
549 tbuf_ctrl
= bcmgenet_tbuf_ctrl_get(priv
);
550 rbuf_ctrl
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
552 desc_64b_en
= !!(wanted
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
554 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
556 tbuf_ctrl
|= RBUF_64B_EN
;
557 rbuf_ctrl
|= RBUF_64B_EN
;
559 tbuf_ctrl
&= ~RBUF_64B_EN
;
560 rbuf_ctrl
&= ~RBUF_64B_EN
;
562 priv
->desc_64b_en
= desc_64b_en
;
564 bcmgenet_tbuf_ctrl_set(priv
, tbuf_ctrl
);
565 bcmgenet_rbuf_writel(priv
, rbuf_ctrl
, RBUF_CTRL
);
570 static int bcmgenet_set_features(struct net_device
*dev
,
571 netdev_features_t features
)
573 netdev_features_t changed
= features
^ dev
->features
;
574 netdev_features_t wanted
= dev
->wanted_features
;
577 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
578 ret
= bcmgenet_set_tx_csum(dev
, wanted
);
579 if (changed
& (NETIF_F_RXCSUM
))
580 ret
= bcmgenet_set_rx_csum(dev
, wanted
);
585 static u32
bcmgenet_get_msglevel(struct net_device
*dev
)
587 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
589 return priv
->msg_enable
;
592 static void bcmgenet_set_msglevel(struct net_device
*dev
, u32 level
)
594 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
596 priv
->msg_enable
= level
;
599 static int bcmgenet_get_coalesce(struct net_device
*dev
,
600 struct ethtool_coalesce
*ec
)
602 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
603 struct bcmgenet_rx_ring
*ring
;
606 ec
->tx_max_coalesced_frames
=
607 bcmgenet_tdma_ring_readl(priv
, DESC_INDEX
,
608 DMA_MBUF_DONE_THRESH
);
609 ec
->rx_max_coalesced_frames
=
610 bcmgenet_rdma_ring_readl(priv
, DESC_INDEX
,
611 DMA_MBUF_DONE_THRESH
);
612 ec
->rx_coalesce_usecs
=
613 bcmgenet_rdma_readl(priv
, DMA_RING16_TIMEOUT
) * 8192 / 1000;
615 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
616 ring
= &priv
->rx_rings
[i
];
617 ec
->use_adaptive_rx_coalesce
|= ring
->dim
.use_dim
;
619 ring
= &priv
->rx_rings
[DESC_INDEX
];
620 ec
->use_adaptive_rx_coalesce
|= ring
->dim
.use_dim
;
625 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring
*ring
,
628 struct bcmgenet_priv
*priv
= ring
->priv
;
629 unsigned int i
= ring
->index
;
632 bcmgenet_rdma_ring_writel(priv
, i
, pkts
, DMA_MBUF_DONE_THRESH
);
634 reg
= bcmgenet_rdma_readl(priv
, DMA_RING0_TIMEOUT
+ i
);
635 reg
&= ~DMA_TIMEOUT_MASK
;
636 reg
|= DIV_ROUND_UP(usecs
* 1000, 8192);
637 bcmgenet_rdma_writel(priv
, reg
, DMA_RING0_TIMEOUT
+ i
);
640 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring
*ring
,
641 struct ethtool_coalesce
*ec
)
643 struct dim_cq_moder moder
;
646 ring
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
647 ring
->rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
648 usecs
= ring
->rx_coalesce_usecs
;
649 pkts
= ring
->rx_max_coalesced_frames
;
651 if (ec
->use_adaptive_rx_coalesce
&& !ring
->dim
.use_dim
) {
652 moder
= net_dim_get_def_rx_moderation(ring
->dim
.dim
.mode
);
657 ring
->dim
.use_dim
= ec
->use_adaptive_rx_coalesce
;
658 bcmgenet_set_rx_coalesce(ring
, usecs
, pkts
);
661 static int bcmgenet_set_coalesce(struct net_device
*dev
,
662 struct ethtool_coalesce
*ec
)
664 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
667 /* Base system clock is 125Mhz, DMA timeout is this reference clock
668 * divided by 1024, which yields roughly 8.192us, our maximum value
669 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
671 if (ec
->tx_max_coalesced_frames
> DMA_INTR_THRESHOLD_MASK
||
672 ec
->tx_max_coalesced_frames
== 0 ||
673 ec
->rx_max_coalesced_frames
> DMA_INTR_THRESHOLD_MASK
||
674 ec
->rx_coalesce_usecs
> (DMA_TIMEOUT_MASK
* 8) + 1)
677 if (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0)
680 /* GENET TDMA hardware does not support a configurable timeout, but will
681 * always generate an interrupt either after MBDONE packets have been
682 * transmitted, or when the ring is empty.
684 if (ec
->tx_coalesce_usecs
|| ec
->tx_coalesce_usecs_high
||
685 ec
->tx_coalesce_usecs_irq
|| ec
->tx_coalesce_usecs_low
||
686 ec
->use_adaptive_tx_coalesce
)
689 /* Program all TX queues with the same values, as there is no
690 * ethtool knob to do coalescing on a per-queue basis
692 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
693 bcmgenet_tdma_ring_writel(priv
, i
,
694 ec
->tx_max_coalesced_frames
,
695 DMA_MBUF_DONE_THRESH
);
696 bcmgenet_tdma_ring_writel(priv
, DESC_INDEX
,
697 ec
->tx_max_coalesced_frames
,
698 DMA_MBUF_DONE_THRESH
);
700 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
701 bcmgenet_set_ring_rx_coalesce(&priv
->rx_rings
[i
], ec
);
702 bcmgenet_set_ring_rx_coalesce(&priv
->rx_rings
[DESC_INDEX
], ec
);
707 /* standard ethtool support functions. */
708 enum bcmgenet_stat_type
{
709 BCMGENET_STAT_NETDEV
= -1,
710 BCMGENET_STAT_MIB_RX
,
711 BCMGENET_STAT_MIB_TX
,
717 struct bcmgenet_stats
{
718 char stat_string
[ETH_GSTRING_LEN
];
721 enum bcmgenet_stat_type type
;
722 /* reg offset from UMAC base for misc counters */
726 #define STAT_NETDEV(m) { \
727 .stat_string = __stringify(m), \
728 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
729 .stat_offset = offsetof(struct net_device_stats, m), \
730 .type = BCMGENET_STAT_NETDEV, \
733 #define STAT_GENET_MIB(str, m, _type) { \
734 .stat_string = str, \
735 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
736 .stat_offset = offsetof(struct bcmgenet_priv, m), \
740 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
741 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
742 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
743 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
745 #define STAT_GENET_MISC(str, m, offset) { \
746 .stat_string = str, \
747 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
748 .stat_offset = offsetof(struct bcmgenet_priv, m), \
749 .type = BCMGENET_STAT_MISC, \
750 .reg_offset = offset, \
753 #define STAT_GENET_Q(num) \
754 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
755 tx_rings[num].packets), \
756 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
757 tx_rings[num].bytes), \
758 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
759 rx_rings[num].bytes), \
760 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
761 rx_rings[num].packets), \
762 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
763 rx_rings[num].errors), \
764 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
765 rx_rings[num].dropped)
767 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
768 * between the end of TX stats and the beginning of the RX RUNT
770 #define BCMGENET_STAT_OFFSET 0xc
772 /* Hardware counters must be kept in sync because the order/offset
773 * is important here (order in structure declaration = order in hardware)
775 static const struct bcmgenet_stats bcmgenet_gstrings_stats
[] = {
777 STAT_NETDEV(rx_packets
),
778 STAT_NETDEV(tx_packets
),
779 STAT_NETDEV(rx_bytes
),
780 STAT_NETDEV(tx_bytes
),
781 STAT_NETDEV(rx_errors
),
782 STAT_NETDEV(tx_errors
),
783 STAT_NETDEV(rx_dropped
),
784 STAT_NETDEV(tx_dropped
),
785 STAT_NETDEV(multicast
),
786 /* UniMAC RSV counters */
787 STAT_GENET_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
788 STAT_GENET_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
789 STAT_GENET_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
790 STAT_GENET_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
791 STAT_GENET_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
792 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
793 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
794 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
795 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
796 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
797 STAT_GENET_MIB_RX("rx_pkts", mib
.rx
.pkt
),
798 STAT_GENET_MIB_RX("rx_bytes", mib
.rx
.bytes
),
799 STAT_GENET_MIB_RX("rx_multicast", mib
.rx
.mca
),
800 STAT_GENET_MIB_RX("rx_broadcast", mib
.rx
.bca
),
801 STAT_GENET_MIB_RX("rx_fcs", mib
.rx
.fcs
),
802 STAT_GENET_MIB_RX("rx_control", mib
.rx
.cf
),
803 STAT_GENET_MIB_RX("rx_pause", mib
.rx
.pf
),
804 STAT_GENET_MIB_RX("rx_unknown", mib
.rx
.uo
),
805 STAT_GENET_MIB_RX("rx_align", mib
.rx
.aln
),
806 STAT_GENET_MIB_RX("rx_outrange", mib
.rx
.flr
),
807 STAT_GENET_MIB_RX("rx_code", mib
.rx
.cde
),
808 STAT_GENET_MIB_RX("rx_carrier", mib
.rx
.fcr
),
809 STAT_GENET_MIB_RX("rx_oversize", mib
.rx
.ovr
),
810 STAT_GENET_MIB_RX("rx_jabber", mib
.rx
.jbr
),
811 STAT_GENET_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
812 STAT_GENET_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
813 STAT_GENET_MIB_RX("rx_unicast", mib
.rx
.uc
),
814 STAT_GENET_MIB_RX("rx_ppp", mib
.rx
.ppp
),
815 STAT_GENET_MIB_RX("rx_crc", mib
.rx
.rcrc
),
816 /* UniMAC TSV counters */
817 STAT_GENET_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
818 STAT_GENET_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
819 STAT_GENET_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
820 STAT_GENET_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
821 STAT_GENET_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
822 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
823 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
824 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
825 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
826 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
827 STAT_GENET_MIB_TX("tx_pkts", mib
.tx
.pkts
),
828 STAT_GENET_MIB_TX("tx_multicast", mib
.tx
.mca
),
829 STAT_GENET_MIB_TX("tx_broadcast", mib
.tx
.bca
),
830 STAT_GENET_MIB_TX("tx_pause", mib
.tx
.pf
),
831 STAT_GENET_MIB_TX("tx_control", mib
.tx
.cf
),
832 STAT_GENET_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
833 STAT_GENET_MIB_TX("tx_oversize", mib
.tx
.ovr
),
834 STAT_GENET_MIB_TX("tx_defer", mib
.tx
.drf
),
835 STAT_GENET_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
836 STAT_GENET_MIB_TX("tx_single_col", mib
.tx
.scl
),
837 STAT_GENET_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
838 STAT_GENET_MIB_TX("tx_late_col", mib
.tx
.lcl
),
839 STAT_GENET_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
840 STAT_GENET_MIB_TX("tx_frags", mib
.tx
.frg
),
841 STAT_GENET_MIB_TX("tx_total_col", mib
.tx
.ncl
),
842 STAT_GENET_MIB_TX("tx_jabber", mib
.tx
.jbr
),
843 STAT_GENET_MIB_TX("tx_bytes", mib
.tx
.bytes
),
844 STAT_GENET_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
845 STAT_GENET_MIB_TX("tx_unicast", mib
.tx
.uc
),
846 /* UniMAC RUNT counters */
847 STAT_GENET_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
848 STAT_GENET_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
849 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
850 STAT_GENET_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
851 /* Misc UniMAC counters */
852 STAT_GENET_MISC("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
,
853 UMAC_RBUF_OVFL_CNT_V1
),
854 STAT_GENET_MISC("rbuf_err_cnt", mib
.rbuf_err_cnt
,
855 UMAC_RBUF_ERR_CNT_V1
),
856 STAT_GENET_MISC("mdf_err_cnt", mib
.mdf_err_cnt
, UMAC_MDF_ERR_CNT
),
857 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
858 STAT_GENET_SOFT_MIB("rx_dma_failed", mib
.rx_dma_failed
),
859 STAT_GENET_SOFT_MIB("tx_dma_failed", mib
.tx_dma_failed
),
868 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
870 static void bcmgenet_get_drvinfo(struct net_device
*dev
,
871 struct ethtool_drvinfo
*info
)
873 strlcpy(info
->driver
, "bcmgenet", sizeof(info
->driver
));
874 strlcpy(info
->version
, "v2.0", sizeof(info
->version
));
877 static int bcmgenet_get_sset_count(struct net_device
*dev
, int string_set
)
879 switch (string_set
) {
881 return BCMGENET_STATS_LEN
;
887 static void bcmgenet_get_strings(struct net_device
*dev
, u32 stringset
,
894 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
895 memcpy(data
+ i
* ETH_GSTRING_LEN
,
896 bcmgenet_gstrings_stats
[i
].stat_string
,
903 static u32
bcmgenet_update_stat_misc(struct bcmgenet_priv
*priv
, u16 offset
)
909 case UMAC_RBUF_OVFL_CNT_V1
:
910 if (GENET_IS_V2(priv
))
911 new_offset
= RBUF_OVFL_CNT_V2
;
913 new_offset
= RBUF_OVFL_CNT_V3PLUS
;
915 val
= bcmgenet_rbuf_readl(priv
, new_offset
);
916 /* clear if overflowed */
918 bcmgenet_rbuf_writel(priv
, 0, new_offset
);
920 case UMAC_RBUF_ERR_CNT_V1
:
921 if (GENET_IS_V2(priv
))
922 new_offset
= RBUF_ERR_CNT_V2
;
924 new_offset
= RBUF_ERR_CNT_V3PLUS
;
926 val
= bcmgenet_rbuf_readl(priv
, new_offset
);
927 /* clear if overflowed */
929 bcmgenet_rbuf_writel(priv
, 0, new_offset
);
932 val
= bcmgenet_umac_readl(priv
, offset
);
933 /* clear if overflowed */
935 bcmgenet_umac_writel(priv
, 0, offset
);
942 static void bcmgenet_update_mib_counters(struct bcmgenet_priv
*priv
)
946 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
947 const struct bcmgenet_stats
*s
;
952 s
= &bcmgenet_gstrings_stats
[i
];
954 case BCMGENET_STAT_NETDEV
:
955 case BCMGENET_STAT_SOFT
:
957 case BCMGENET_STAT_RUNT
:
958 offset
+= BCMGENET_STAT_OFFSET
;
960 case BCMGENET_STAT_MIB_TX
:
961 offset
+= BCMGENET_STAT_OFFSET
;
963 case BCMGENET_STAT_MIB_RX
:
964 val
= bcmgenet_umac_readl(priv
,
965 UMAC_MIB_START
+ j
+ offset
);
966 offset
= 0; /* Reset Offset */
968 case BCMGENET_STAT_MISC
:
969 if (GENET_IS_V1(priv
)) {
970 val
= bcmgenet_umac_readl(priv
, s
->reg_offset
);
971 /* clear if overflowed */
973 bcmgenet_umac_writel(priv
, 0,
976 val
= bcmgenet_update_stat_misc(priv
,
983 p
= (char *)priv
+ s
->stat_offset
;
988 static void bcmgenet_get_ethtool_stats(struct net_device
*dev
,
989 struct ethtool_stats
*stats
,
992 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
995 if (netif_running(dev
))
996 bcmgenet_update_mib_counters(priv
);
998 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
999 const struct bcmgenet_stats
*s
;
1002 s
= &bcmgenet_gstrings_stats
[i
];
1003 if (s
->type
== BCMGENET_STAT_NETDEV
)
1004 p
= (char *)&dev
->stats
;
1007 p
+= s
->stat_offset
;
1008 if (sizeof(unsigned long) != sizeof(u32
) &&
1009 s
->stat_sizeof
== sizeof(unsigned long))
1010 data
[i
] = *(unsigned long *)p
;
1012 data
[i
] = *(u32
*)p
;
1016 static void bcmgenet_eee_enable_set(struct net_device
*dev
, bool enable
)
1018 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1019 u32 off
= priv
->hw_params
->tbuf_offset
+ TBUF_ENERGY_CTRL
;
1022 if (enable
&& !priv
->clk_eee_enabled
) {
1023 clk_prepare_enable(priv
->clk_eee
);
1024 priv
->clk_eee_enabled
= true;
1027 reg
= bcmgenet_umac_readl(priv
, UMAC_EEE_CTRL
);
1032 bcmgenet_umac_writel(priv
, reg
, UMAC_EEE_CTRL
);
1034 /* Enable EEE and switch to a 27Mhz clock automatically */
1035 reg
= bcmgenet_readl(priv
->base
+ off
);
1037 reg
|= TBUF_EEE_EN
| TBUF_PM_EN
;
1039 reg
&= ~(TBUF_EEE_EN
| TBUF_PM_EN
);
1040 bcmgenet_writel(reg
, priv
->base
+ off
);
1042 /* Do the same for thing for RBUF */
1043 reg
= bcmgenet_rbuf_readl(priv
, RBUF_ENERGY_CTRL
);
1045 reg
|= RBUF_EEE_EN
| RBUF_PM_EN
;
1047 reg
&= ~(RBUF_EEE_EN
| RBUF_PM_EN
);
1048 bcmgenet_rbuf_writel(priv
, reg
, RBUF_ENERGY_CTRL
);
1050 if (!enable
&& priv
->clk_eee_enabled
) {
1051 clk_disable_unprepare(priv
->clk_eee
);
1052 priv
->clk_eee_enabled
= false;
1055 priv
->eee
.eee_enabled
= enable
;
1056 priv
->eee
.eee_active
= enable
;
1059 static int bcmgenet_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
1061 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1062 struct ethtool_eee
*p
= &priv
->eee
;
1064 if (GENET_IS_V1(priv
))
1070 e
->eee_enabled
= p
->eee_enabled
;
1071 e
->eee_active
= p
->eee_active
;
1072 e
->tx_lpi_timer
= bcmgenet_umac_readl(priv
, UMAC_EEE_LPI_TIMER
);
1074 return phy_ethtool_get_eee(dev
->phydev
, e
);
1077 static int bcmgenet_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
1079 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1080 struct ethtool_eee
*p
= &priv
->eee
;
1083 if (GENET_IS_V1(priv
))
1089 p
->eee_enabled
= e
->eee_enabled
;
1091 if (!p
->eee_enabled
) {
1092 bcmgenet_eee_enable_set(dev
, false);
1094 ret
= phy_init_eee(dev
->phydev
, 0);
1096 netif_err(priv
, hw
, dev
, "EEE initialization failed\n");
1100 bcmgenet_umac_writel(priv
, e
->tx_lpi_timer
, UMAC_EEE_LPI_TIMER
);
1101 bcmgenet_eee_enable_set(dev
, true);
1104 return phy_ethtool_set_eee(dev
->phydev
, e
);
1107 /* standard ethtool support functions. */
1108 static const struct ethtool_ops bcmgenet_ethtool_ops
= {
1109 .begin
= bcmgenet_begin
,
1110 .complete
= bcmgenet_complete
,
1111 .get_strings
= bcmgenet_get_strings
,
1112 .get_sset_count
= bcmgenet_get_sset_count
,
1113 .get_ethtool_stats
= bcmgenet_get_ethtool_stats
,
1114 .get_drvinfo
= bcmgenet_get_drvinfo
,
1115 .get_link
= ethtool_op_get_link
,
1116 .get_msglevel
= bcmgenet_get_msglevel
,
1117 .set_msglevel
= bcmgenet_set_msglevel
,
1118 .get_wol
= bcmgenet_get_wol
,
1119 .set_wol
= bcmgenet_set_wol
,
1120 .get_eee
= bcmgenet_get_eee
,
1121 .set_eee
= bcmgenet_set_eee
,
1122 .nway_reset
= phy_ethtool_nway_reset
,
1123 .get_coalesce
= bcmgenet_get_coalesce
,
1124 .set_coalesce
= bcmgenet_set_coalesce
,
1125 .get_link_ksettings
= bcmgenet_get_link_ksettings
,
1126 .set_link_ksettings
= bcmgenet_set_link_ksettings
,
1129 /* Power down the unimac, based on mode. */
1130 static int bcmgenet_power_down(struct bcmgenet_priv
*priv
,
1131 enum bcmgenet_power_mode mode
)
1137 case GENET_POWER_CABLE_SENSE
:
1138 phy_detach(priv
->dev
->phydev
);
1141 case GENET_POWER_WOL_MAGIC
:
1142 ret
= bcmgenet_wol_power_down_cfg(priv
, mode
);
1145 case GENET_POWER_PASSIVE
:
1146 /* Power down LED */
1147 if (priv
->hw_params
->flags
& GENET_HAS_EXT
) {
1148 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
1149 if (GENET_IS_V5(priv
))
1150 reg
|= EXT_PWR_DOWN_PHY_EN
|
1151 EXT_PWR_DOWN_PHY_RD
|
1152 EXT_PWR_DOWN_PHY_SD
|
1153 EXT_PWR_DOWN_PHY_RX
|
1154 EXT_PWR_DOWN_PHY_TX
|
1157 reg
|= EXT_PWR_DOWN_PHY
;
1159 reg
|= (EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
1160 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1162 bcmgenet_phy_power_set(priv
->dev
, false);
1172 static void bcmgenet_power_up(struct bcmgenet_priv
*priv
,
1173 enum bcmgenet_power_mode mode
)
1177 if (!(priv
->hw_params
->flags
& GENET_HAS_EXT
))
1180 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
1183 case GENET_POWER_PASSIVE
:
1184 reg
&= ~(EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
1185 if (GENET_IS_V5(priv
)) {
1186 reg
&= ~(EXT_PWR_DOWN_PHY_EN
|
1187 EXT_PWR_DOWN_PHY_RD
|
1188 EXT_PWR_DOWN_PHY_SD
|
1189 EXT_PWR_DOWN_PHY_RX
|
1190 EXT_PWR_DOWN_PHY_TX
|
1192 reg
|= EXT_PHY_RESET
;
1193 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1196 reg
&= ~EXT_PHY_RESET
;
1198 reg
&= ~EXT_PWR_DOWN_PHY
;
1199 reg
|= EXT_PWR_DN_EN_LD
;
1201 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1202 bcmgenet_phy_power_set(priv
->dev
, true);
1205 case GENET_POWER_CABLE_SENSE
:
1207 if (!GENET_IS_V5(priv
)) {
1208 reg
|= EXT_PWR_DN_EN_LD
;
1209 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1212 case GENET_POWER_WOL_MAGIC
:
1213 bcmgenet_wol_power_up_cfg(priv
, mode
);
1220 /* ioctl handle special commands that are not present in ethtool. */
1221 static int bcmgenet_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1223 if (!netif_running(dev
))
1229 return phy_mii_ioctl(dev
->phydev
, rq
, cmd
);
1232 static struct enet_cb
*bcmgenet_get_txcb(struct bcmgenet_priv
*priv
,
1233 struct bcmgenet_tx_ring
*ring
)
1235 struct enet_cb
*tx_cb_ptr
;
1237 tx_cb_ptr
= ring
->cbs
;
1238 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
1240 /* Advancing local write pointer */
1241 if (ring
->write_ptr
== ring
->end_ptr
)
1242 ring
->write_ptr
= ring
->cb_ptr
;
1249 static struct enet_cb
*bcmgenet_put_txcb(struct bcmgenet_priv
*priv
,
1250 struct bcmgenet_tx_ring
*ring
)
1252 struct enet_cb
*tx_cb_ptr
;
1254 tx_cb_ptr
= ring
->cbs
;
1255 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
1257 /* Rewinding local write pointer */
1258 if (ring
->write_ptr
== ring
->cb_ptr
)
1259 ring
->write_ptr
= ring
->end_ptr
;
1266 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring
*ring
)
1268 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_RXDMA_DONE
,
1269 INTRL2_CPU_MASK_SET
);
1272 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring
*ring
)
1274 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_RXDMA_DONE
,
1275 INTRL2_CPU_MASK_CLEAR
);
1278 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring
*ring
)
1280 bcmgenet_intrl2_1_writel(ring
->priv
,
1281 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
),
1282 INTRL2_CPU_MASK_SET
);
1285 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring
*ring
)
1287 bcmgenet_intrl2_1_writel(ring
->priv
,
1288 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
),
1289 INTRL2_CPU_MASK_CLEAR
);
1292 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring
*ring
)
1294 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_TXDMA_DONE
,
1295 INTRL2_CPU_MASK_SET
);
1298 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring
*ring
)
1300 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_TXDMA_DONE
,
1301 INTRL2_CPU_MASK_CLEAR
);
1304 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring
*ring
)
1306 bcmgenet_intrl2_1_writel(ring
->priv
, 1 << ring
->index
,
1307 INTRL2_CPU_MASK_CLEAR
);
1310 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring
*ring
)
1312 bcmgenet_intrl2_1_writel(ring
->priv
, 1 << ring
->index
,
1313 INTRL2_CPU_MASK_SET
);
1316 /* Simple helper to free a transmit control block's resources
1317 * Returns an skb when the last transmit control block associated with the
1318 * skb is freed. The skb should be freed by the caller if necessary.
1320 static struct sk_buff
*bcmgenet_free_tx_cb(struct device
*dev
,
1323 struct sk_buff
*skb
;
1329 if (cb
== GENET_CB(skb
)->first_cb
)
1330 dma_unmap_single(dev
, dma_unmap_addr(cb
, dma_addr
),
1331 dma_unmap_len(cb
, dma_len
),
1334 dma_unmap_page(dev
, dma_unmap_addr(cb
, dma_addr
),
1335 dma_unmap_len(cb
, dma_len
),
1337 dma_unmap_addr_set(cb
, dma_addr
, 0);
1339 if (cb
== GENET_CB(skb
)->last_cb
)
1342 } else if (dma_unmap_addr(cb
, dma_addr
)) {
1344 dma_unmap_addr(cb
, dma_addr
),
1345 dma_unmap_len(cb
, dma_len
),
1347 dma_unmap_addr_set(cb
, dma_addr
, 0);
1353 /* Simple helper to free a receive control block's resources */
1354 static struct sk_buff
*bcmgenet_free_rx_cb(struct device
*dev
,
1357 struct sk_buff
*skb
;
1362 if (dma_unmap_addr(cb
, dma_addr
)) {
1363 dma_unmap_single(dev
, dma_unmap_addr(cb
, dma_addr
),
1364 dma_unmap_len(cb
, dma_len
), DMA_FROM_DEVICE
);
1365 dma_unmap_addr_set(cb
, dma_addr
, 0);
1371 /* Unlocked version of the reclaim routine */
1372 static unsigned int __bcmgenet_tx_reclaim(struct net_device
*dev
,
1373 struct bcmgenet_tx_ring
*ring
)
1375 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1376 unsigned int txbds_processed
= 0;
1377 unsigned int bytes_compl
= 0;
1378 unsigned int pkts_compl
= 0;
1379 unsigned int txbds_ready
;
1380 unsigned int c_index
;
1381 struct sk_buff
*skb
;
1383 /* Clear status before servicing to reduce spurious interrupts */
1384 if (ring
->index
== DESC_INDEX
)
1385 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_TXDMA_DONE
,
1388 bcmgenet_intrl2_1_writel(priv
, (1 << ring
->index
),
1391 /* Compute how many buffers are transmitted since last xmit call */
1392 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
)
1394 txbds_ready
= (c_index
- ring
->c_index
) & DMA_C_INDEX_MASK
;
1396 netif_dbg(priv
, tx_done
, dev
,
1397 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1398 __func__
, ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
1400 /* Reclaim transmitted buffers */
1401 while (txbds_processed
< txbds_ready
) {
1402 skb
= bcmgenet_free_tx_cb(&priv
->pdev
->dev
,
1403 &priv
->tx_cbs
[ring
->clean_ptr
]);
1406 bytes_compl
+= GENET_CB(skb
)->bytes_sent
;
1407 dev_consume_skb_any(skb
);
1411 if (likely(ring
->clean_ptr
< ring
->end_ptr
))
1414 ring
->clean_ptr
= ring
->cb_ptr
;
1417 ring
->free_bds
+= txbds_processed
;
1418 ring
->c_index
= c_index
;
1420 ring
->packets
+= pkts_compl
;
1421 ring
->bytes
+= bytes_compl
;
1423 netdev_tx_completed_queue(netdev_get_tx_queue(dev
, ring
->queue
),
1424 pkts_compl
, bytes_compl
);
1426 return txbds_processed
;
1429 static unsigned int bcmgenet_tx_reclaim(struct net_device
*dev
,
1430 struct bcmgenet_tx_ring
*ring
)
1432 unsigned int released
;
1434 spin_lock_bh(&ring
->lock
);
1435 released
= __bcmgenet_tx_reclaim(dev
, ring
);
1436 spin_unlock_bh(&ring
->lock
);
1441 static int bcmgenet_tx_poll(struct napi_struct
*napi
, int budget
)
1443 struct bcmgenet_tx_ring
*ring
=
1444 container_of(napi
, struct bcmgenet_tx_ring
, napi
);
1445 unsigned int work_done
= 0;
1446 struct netdev_queue
*txq
;
1448 spin_lock(&ring
->lock
);
1449 work_done
= __bcmgenet_tx_reclaim(ring
->priv
->dev
, ring
);
1450 if (ring
->free_bds
> (MAX_SKB_FRAGS
+ 1)) {
1451 txq
= netdev_get_tx_queue(ring
->priv
->dev
, ring
->queue
);
1452 netif_tx_wake_queue(txq
);
1454 spin_unlock(&ring
->lock
);
1456 if (work_done
== 0) {
1457 napi_complete(napi
);
1458 ring
->int_enable(ring
);
1466 static void bcmgenet_tx_reclaim_all(struct net_device
*dev
)
1468 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1471 if (netif_is_multiqueue(dev
)) {
1472 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
1473 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[i
]);
1476 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[DESC_INDEX
]);
1479 /* Reallocate the SKB to put enough headroom in front of it and insert
1480 * the transmit checksum offsets in the descriptors
1482 static struct sk_buff
*bcmgenet_put_tx_csum(struct net_device
*dev
,
1483 struct sk_buff
*skb
)
1485 struct status_64
*status
= NULL
;
1486 struct sk_buff
*new_skb
;
1492 if (unlikely(skb_headroom(skb
) < sizeof(*status
))) {
1493 /* If 64 byte status block enabled, must make sure skb has
1494 * enough headroom for us to insert 64B status block.
1496 new_skb
= skb_realloc_headroom(skb
, sizeof(*status
));
1499 dev
->stats
.tx_dropped
++;
1505 skb_push(skb
, sizeof(*status
));
1506 status
= (struct status_64
*)skb
->data
;
1508 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1509 ip_ver
= skb
->protocol
;
1511 case htons(ETH_P_IP
):
1512 ip_proto
= ip_hdr(skb
)->protocol
;
1514 case htons(ETH_P_IPV6
):
1515 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1521 offset
= skb_checksum_start_offset(skb
) - sizeof(*status
);
1522 tx_csum_info
= (offset
<< STATUS_TX_CSUM_START_SHIFT
) |
1523 (offset
+ skb
->csum_offset
);
1525 /* Set the length valid bit for TCP and UDP and just set
1526 * the special UDP flag for IPv4, else just set to 0.
1528 if (ip_proto
== IPPROTO_TCP
|| ip_proto
== IPPROTO_UDP
) {
1529 tx_csum_info
|= STATUS_TX_CSUM_LV
;
1530 if (ip_proto
== IPPROTO_UDP
&&
1531 ip_ver
== htons(ETH_P_IP
))
1532 tx_csum_info
|= STATUS_TX_CSUM_PROTO_UDP
;
1537 status
->tx_csum_info
= tx_csum_info
;
1543 static netdev_tx_t
bcmgenet_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1545 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1546 struct device
*kdev
= &priv
->pdev
->dev
;
1547 struct bcmgenet_tx_ring
*ring
= NULL
;
1548 struct enet_cb
*tx_cb_ptr
;
1549 struct netdev_queue
*txq
;
1550 int nr_frags
, index
;
1558 index
= skb_get_queue_mapping(skb
);
1559 /* Mapping strategy:
1560 * queue_mapping = 0, unclassified, packet xmited through ring16
1561 * queue_mapping = 1, goes to ring 0. (highest priority queue
1562 * queue_mapping = 2, goes to ring 1.
1563 * queue_mapping = 3, goes to ring 2.
1564 * queue_mapping = 4, goes to ring 3.
1571 ring
= &priv
->tx_rings
[index
];
1572 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
1574 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1576 spin_lock(&ring
->lock
);
1577 if (ring
->free_bds
<= (nr_frags
+ 1)) {
1578 if (!netif_tx_queue_stopped(txq
)) {
1579 netif_tx_stop_queue(txq
);
1581 "%s: tx ring %d full when queue %d awake\n",
1582 __func__
, index
, ring
->queue
);
1584 ret
= NETDEV_TX_BUSY
;
1588 if (skb_padto(skb
, ETH_ZLEN
)) {
1593 /* Retain how many bytes will be sent on the wire, without TSB inserted
1594 * by transmit checksum offload
1596 GENET_CB(skb
)->bytes_sent
= skb
->len
;
1598 /* set the SKB transmit checksum */
1599 if (priv
->desc_64b_en
) {
1600 skb
= bcmgenet_put_tx_csum(dev
, skb
);
1607 for (i
= 0; i
<= nr_frags
; i
++) {
1608 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
1613 /* Transmit single SKB or head of fragment list */
1614 GENET_CB(skb
)->first_cb
= tx_cb_ptr
;
1615 size
= skb_headlen(skb
);
1616 mapping
= dma_map_single(kdev
, skb
->data
, size
,
1620 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1621 size
= skb_frag_size(frag
);
1622 mapping
= skb_frag_dma_map(kdev
, frag
, 0, size
,
1626 ret
= dma_mapping_error(kdev
, mapping
);
1628 priv
->mib
.tx_dma_failed
++;
1629 netif_err(priv
, tx_err
, dev
, "Tx DMA map failed\n");
1631 goto out_unmap_frags
;
1633 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
1634 dma_unmap_len_set(tx_cb_ptr
, dma_len
, size
);
1636 tx_cb_ptr
->skb
= skb
;
1638 len_stat
= (size
<< DMA_BUFLENGTH_SHIFT
) |
1639 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
);
1642 len_stat
|= DMA_TX_APPEND_CRC
| DMA_SOP
;
1643 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1644 len_stat
|= DMA_TX_DO_CSUM
;
1647 len_stat
|= DMA_EOP
;
1649 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
, len_stat
);
1652 GENET_CB(skb
)->last_cb
= tx_cb_ptr
;
1653 skb_tx_timestamp(skb
);
1655 /* Decrement total BD count and advance our write pointer */
1656 ring
->free_bds
-= nr_frags
+ 1;
1657 ring
->prod_index
+= nr_frags
+ 1;
1658 ring
->prod_index
&= DMA_P_INDEX_MASK
;
1660 netdev_tx_sent_queue(txq
, GENET_CB(skb
)->bytes_sent
);
1662 if (ring
->free_bds
<= (MAX_SKB_FRAGS
+ 1))
1663 netif_tx_stop_queue(txq
);
1665 if (!netdev_xmit_more() || netif_xmit_stopped(txq
))
1666 /* Packets are ready, update producer index */
1667 bcmgenet_tdma_ring_writel(priv
, ring
->index
,
1668 ring
->prod_index
, TDMA_PROD_INDEX
);
1670 spin_unlock(&ring
->lock
);
1675 /* Back up for failed control block mapping */
1676 bcmgenet_put_txcb(priv
, ring
);
1678 /* Unmap successfully mapped control blocks */
1680 tx_cb_ptr
= bcmgenet_put_txcb(priv
, ring
);
1681 bcmgenet_free_tx_cb(kdev
, tx_cb_ptr
);
1688 static struct sk_buff
*bcmgenet_rx_refill(struct bcmgenet_priv
*priv
,
1691 struct device
*kdev
= &priv
->pdev
->dev
;
1692 struct sk_buff
*skb
;
1693 struct sk_buff
*rx_skb
;
1696 /* Allocate a new Rx skb */
1697 skb
= netdev_alloc_skb(priv
->dev
, priv
->rx_buf_len
+ SKB_ALIGNMENT
);
1699 priv
->mib
.alloc_rx_buff_failed
++;
1700 netif_err(priv
, rx_err
, priv
->dev
,
1701 "%s: Rx skb allocation failed\n", __func__
);
1705 /* DMA-map the new Rx skb */
1706 mapping
= dma_map_single(kdev
, skb
->data
, priv
->rx_buf_len
,
1708 if (dma_mapping_error(kdev
, mapping
)) {
1709 priv
->mib
.rx_dma_failed
++;
1710 dev_kfree_skb_any(skb
);
1711 netif_err(priv
, rx_err
, priv
->dev
,
1712 "%s: Rx skb DMA mapping failed\n", __func__
);
1716 /* Grab the current Rx skb from the ring and DMA-unmap it */
1717 rx_skb
= bcmgenet_free_rx_cb(kdev
, cb
);
1719 /* Put the new Rx skb on the ring */
1721 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
1722 dma_unmap_len_set(cb
, dma_len
, priv
->rx_buf_len
);
1723 dmadesc_set_addr(priv
, cb
->bd_addr
, mapping
);
1725 /* Return the current Rx skb to caller */
1729 /* bcmgenet_desc_rx - descriptor based rx process.
1730 * this could be called from bottom half, or from NAPI polling method.
1732 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring
*ring
,
1733 unsigned int budget
)
1735 struct bcmgenet_priv
*priv
= ring
->priv
;
1736 struct net_device
*dev
= priv
->dev
;
1738 struct sk_buff
*skb
;
1739 u32 dma_length_status
;
1740 unsigned long dma_flag
;
1742 unsigned int rxpktprocessed
= 0, rxpkttoprocess
;
1743 unsigned int bytes_processed
= 0;
1744 unsigned int p_index
, mask
;
1745 unsigned int discards
;
1746 unsigned int chksum_ok
= 0;
1748 /* Clear status before servicing to reduce spurious interrupts */
1749 if (ring
->index
== DESC_INDEX
) {
1750 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_RXDMA_DONE
,
1753 mask
= 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
);
1754 bcmgenet_intrl2_1_writel(priv
,
1759 p_index
= bcmgenet_rdma_ring_readl(priv
, ring
->index
, RDMA_PROD_INDEX
);
1761 discards
= (p_index
>> DMA_P_INDEX_DISCARD_CNT_SHIFT
) &
1762 DMA_P_INDEX_DISCARD_CNT_MASK
;
1763 if (discards
> ring
->old_discards
) {
1764 discards
= discards
- ring
->old_discards
;
1765 ring
->errors
+= discards
;
1766 ring
->old_discards
+= discards
;
1768 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1769 if (ring
->old_discards
>= 0xC000) {
1770 ring
->old_discards
= 0;
1771 bcmgenet_rdma_ring_writel(priv
, ring
->index
, 0,
1776 p_index
&= DMA_P_INDEX_MASK
;
1777 rxpkttoprocess
= (p_index
- ring
->c_index
) & DMA_C_INDEX_MASK
;
1779 netif_dbg(priv
, rx_status
, dev
,
1780 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess
);
1782 while ((rxpktprocessed
< rxpkttoprocess
) &&
1783 (rxpktprocessed
< budget
)) {
1784 cb
= &priv
->rx_cbs
[ring
->read_ptr
];
1785 skb
= bcmgenet_rx_refill(priv
, cb
);
1787 if (unlikely(!skb
)) {
1792 if (!priv
->desc_64b_en
) {
1794 dmadesc_get_length_status(priv
, cb
->bd_addr
);
1796 struct status_64
*status
;
1798 status
= (struct status_64
*)skb
->data
;
1799 dma_length_status
= status
->length_status
;
1802 /* DMA flags and length are still valid no matter how
1803 * we got the Receive Status Vector (64B RSB or register)
1805 dma_flag
= dma_length_status
& 0xffff;
1806 len
= dma_length_status
>> DMA_BUFLENGTH_SHIFT
;
1808 netif_dbg(priv
, rx_status
, dev
,
1809 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1810 __func__
, p_index
, ring
->c_index
,
1811 ring
->read_ptr
, dma_length_status
);
1813 if (unlikely(!(dma_flag
& DMA_EOP
) || !(dma_flag
& DMA_SOP
))) {
1814 netif_err(priv
, rx_status
, dev
,
1815 "dropping fragmented packet!\n");
1817 dev_kfree_skb_any(skb
);
1822 if (unlikely(dma_flag
& (DMA_RX_CRC_ERROR
|
1827 netif_err(priv
, rx_status
, dev
, "dma_flag=0x%x\n",
1828 (unsigned int)dma_flag
);
1829 if (dma_flag
& DMA_RX_CRC_ERROR
)
1830 dev
->stats
.rx_crc_errors
++;
1831 if (dma_flag
& DMA_RX_OV
)
1832 dev
->stats
.rx_over_errors
++;
1833 if (dma_flag
& DMA_RX_NO
)
1834 dev
->stats
.rx_frame_errors
++;
1835 if (dma_flag
& DMA_RX_LG
)
1836 dev
->stats
.rx_length_errors
++;
1837 dev
->stats
.rx_errors
++;
1838 dev_kfree_skb_any(skb
);
1840 } /* error packet */
1842 chksum_ok
= (dma_flag
& priv
->dma_rx_chk_bit
) &&
1843 priv
->desc_rxchk_en
;
1846 if (priv
->desc_64b_en
) {
1851 if (likely(chksum_ok
))
1852 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1854 /* remove hardware 2bytes added for IP alignment */
1858 if (priv
->crc_fwd_en
) {
1859 skb_trim(skb
, len
- ETH_FCS_LEN
);
1863 bytes_processed
+= len
;
1865 /*Finish setting up the received SKB and send it to the kernel*/
1866 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
1869 if (dma_flag
& DMA_RX_MULT
)
1870 dev
->stats
.multicast
++;
1873 napi_gro_receive(&ring
->napi
, skb
);
1874 netif_dbg(priv
, rx_status
, dev
, "pushed up to kernel\n");
1878 if (likely(ring
->read_ptr
< ring
->end_ptr
))
1881 ring
->read_ptr
= ring
->cb_ptr
;
1883 ring
->c_index
= (ring
->c_index
+ 1) & DMA_C_INDEX_MASK
;
1884 bcmgenet_rdma_ring_writel(priv
, ring
->index
, ring
->c_index
, RDMA_CONS_INDEX
);
1887 ring
->dim
.bytes
= bytes_processed
;
1888 ring
->dim
.packets
= rxpktprocessed
;
1890 return rxpktprocessed
;
1893 /* Rx NAPI polling method */
1894 static int bcmgenet_rx_poll(struct napi_struct
*napi
, int budget
)
1896 struct bcmgenet_rx_ring
*ring
= container_of(napi
,
1897 struct bcmgenet_rx_ring
, napi
);
1898 struct dim_sample dim_sample
;
1899 unsigned int work_done
;
1901 work_done
= bcmgenet_desc_rx(ring
, budget
);
1903 if (work_done
< budget
) {
1904 napi_complete_done(napi
, work_done
);
1905 ring
->int_enable(ring
);
1908 if (ring
->dim
.use_dim
) {
1909 dim_update_sample(ring
->dim
.event_ctr
, ring
->dim
.packets
,
1910 ring
->dim
.bytes
, &dim_sample
);
1911 net_dim(&ring
->dim
.dim
, dim_sample
);
1917 static void bcmgenet_dim_work(struct work_struct
*work
)
1919 struct dim
*dim
= container_of(work
, struct dim
, work
);
1920 struct bcmgenet_net_dim
*ndim
=
1921 container_of(dim
, struct bcmgenet_net_dim
, dim
);
1922 struct bcmgenet_rx_ring
*ring
=
1923 container_of(ndim
, struct bcmgenet_rx_ring
, dim
);
1924 struct dim_cq_moder cur_profile
=
1925 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
1927 bcmgenet_set_rx_coalesce(ring
, cur_profile
.usec
, cur_profile
.pkts
);
1928 dim
->state
= DIM_START_MEASURE
;
1931 /* Assign skb to RX DMA descriptor. */
1932 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv
*priv
,
1933 struct bcmgenet_rx_ring
*ring
)
1936 struct sk_buff
*skb
;
1939 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
1941 /* loop here for each buffer needing assign */
1942 for (i
= 0; i
< ring
->size
; i
++) {
1944 skb
= bcmgenet_rx_refill(priv
, cb
);
1946 dev_consume_skb_any(skb
);
1954 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv
*priv
)
1956 struct sk_buff
*skb
;
1960 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
1961 cb
= &priv
->rx_cbs
[i
];
1963 skb
= bcmgenet_free_rx_cb(&priv
->pdev
->dev
, cb
);
1965 dev_consume_skb_any(skb
);
1969 static void umac_enable_set(struct bcmgenet_priv
*priv
, u32 mask
, bool enable
)
1973 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
1978 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
1980 /* UniMAC stops on a packet boundary, wait for a full-size packet
1984 usleep_range(1000, 2000);
1987 static void reset_umac(struct bcmgenet_priv
*priv
)
1989 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1990 bcmgenet_rbuf_ctrl_set(priv
, 0);
1993 /* disable MAC while updating its registers */
1994 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
1996 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
1997 bcmgenet_umac_writel(priv
, CMD_SW_RESET
| CMD_LCL_LOOP_EN
, UMAC_CMD
);
1999 bcmgenet_umac_writel(priv
, 0, UMAC_CMD
);
2002 static void bcmgenet_intr_disable(struct bcmgenet_priv
*priv
)
2004 /* Mask all interrupts.*/
2005 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
2006 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
2007 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
2008 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
2011 static void bcmgenet_link_intr_enable(struct bcmgenet_priv
*priv
)
2013 u32 int0_enable
= 0;
2015 /* Monitor cable plug/unplugged event for internal PHY, external PHY
2018 if (priv
->internal_phy
) {
2019 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
2020 } else if (priv
->ext_phy
) {
2021 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
2022 } else if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
2023 if (priv
->hw_params
->flags
& GENET_HAS_MOCA_LINK_DET
)
2024 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
2026 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
2029 static void init_umac(struct bcmgenet_priv
*priv
)
2031 struct device
*kdev
= &priv
->pdev
->dev
;
2033 u32 int0_enable
= 0;
2035 dev_dbg(&priv
->pdev
->dev
, "bcmgenet: init_umac\n");
2039 /* clear tx/rx counter */
2040 bcmgenet_umac_writel(priv
,
2041 MIB_RESET_RX
| MIB_RESET_TX
| MIB_RESET_RUNT
,
2043 bcmgenet_umac_writel(priv
, 0, UMAC_MIB_CTRL
);
2045 bcmgenet_umac_writel(priv
, ENET_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
2047 /* init rx registers, enable ip header optimization */
2048 reg
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
2049 reg
|= RBUF_ALIGN_2B
;
2050 bcmgenet_rbuf_writel(priv
, reg
, RBUF_CTRL
);
2052 if (!GENET_IS_V1(priv
) && !GENET_IS_V2(priv
))
2053 bcmgenet_rbuf_writel(priv
, 1, RBUF_TBUF_SIZE_CTRL
);
2055 bcmgenet_intr_disable(priv
);
2057 /* Configure backpressure vectors for MoCA */
2058 if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
2059 reg
= bcmgenet_bp_mc_get(priv
);
2060 reg
|= BIT(priv
->hw_params
->bp_in_en_shift
);
2062 /* bp_mask: back pressure mask */
2063 if (netif_is_multiqueue(priv
->dev
))
2064 reg
|= priv
->hw_params
->bp_in_mask
;
2066 reg
&= ~priv
->hw_params
->bp_in_mask
;
2067 bcmgenet_bp_mc_set(priv
, reg
);
2070 /* Enable MDIO interrupts on GENET v3+ */
2071 if (priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
)
2072 int0_enable
|= (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
);
2074 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
2076 dev_dbg(kdev
, "done init umac\n");
2079 static void bcmgenet_init_dim(struct bcmgenet_rx_ring
*ring
,
2080 void (*cb
)(struct work_struct
*work
))
2082 struct bcmgenet_net_dim
*dim
= &ring
->dim
;
2084 INIT_WORK(&dim
->dim
.work
, cb
);
2085 dim
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
2091 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring
*ring
)
2093 struct bcmgenet_net_dim
*dim
= &ring
->dim
;
2094 struct dim_cq_moder moder
;
2097 usecs
= ring
->rx_coalesce_usecs
;
2098 pkts
= ring
->rx_max_coalesced_frames
;
2100 /* If DIM was enabled, re-apply default parameters */
2102 moder
= net_dim_get_def_rx_moderation(dim
->dim
.mode
);
2107 bcmgenet_set_rx_coalesce(ring
, usecs
, pkts
);
2110 /* Initialize a Tx ring along with corresponding hardware registers */
2111 static void bcmgenet_init_tx_ring(struct bcmgenet_priv
*priv
,
2112 unsigned int index
, unsigned int size
,
2113 unsigned int start_ptr
, unsigned int end_ptr
)
2115 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[index
];
2116 u32 words_per_bd
= WORDS_PER_BD(priv
);
2117 u32 flow_period_val
= 0;
2119 spin_lock_init(&ring
->lock
);
2121 ring
->index
= index
;
2122 if (index
== DESC_INDEX
) {
2124 ring
->int_enable
= bcmgenet_tx_ring16_int_enable
;
2125 ring
->int_disable
= bcmgenet_tx_ring16_int_disable
;
2127 ring
->queue
= index
+ 1;
2128 ring
->int_enable
= bcmgenet_tx_ring_int_enable
;
2129 ring
->int_disable
= bcmgenet_tx_ring_int_disable
;
2131 ring
->cbs
= priv
->tx_cbs
+ start_ptr
;
2133 ring
->clean_ptr
= start_ptr
;
2135 ring
->free_bds
= size
;
2136 ring
->write_ptr
= start_ptr
;
2137 ring
->cb_ptr
= start_ptr
;
2138 ring
->end_ptr
= end_ptr
- 1;
2139 ring
->prod_index
= 0;
2141 /* Set flow period for ring != 16 */
2142 if (index
!= DESC_INDEX
)
2143 flow_period_val
= ENET_MAX_MTU_SIZE
<< 16;
2145 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_PROD_INDEX
);
2146 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_CONS_INDEX
);
2147 bcmgenet_tdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
2148 /* Disable rate control for now */
2149 bcmgenet_tdma_ring_writel(priv
, index
, flow_period_val
,
2151 bcmgenet_tdma_ring_writel(priv
, index
,
2152 ((size
<< DMA_RING_SIZE_SHIFT
) |
2153 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
2155 /* Set start and end address, read and write pointers */
2156 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2158 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2160 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2162 bcmgenet_tdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
2165 /* Initialize Tx NAPI */
2166 netif_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_tx_poll
,
2170 /* Initialize a RDMA ring */
2171 static int bcmgenet_init_rx_ring(struct bcmgenet_priv
*priv
,
2172 unsigned int index
, unsigned int size
,
2173 unsigned int start_ptr
, unsigned int end_ptr
)
2175 struct bcmgenet_rx_ring
*ring
= &priv
->rx_rings
[index
];
2176 u32 words_per_bd
= WORDS_PER_BD(priv
);
2180 ring
->index
= index
;
2181 if (index
== DESC_INDEX
) {
2182 ring
->int_enable
= bcmgenet_rx_ring16_int_enable
;
2183 ring
->int_disable
= bcmgenet_rx_ring16_int_disable
;
2185 ring
->int_enable
= bcmgenet_rx_ring_int_enable
;
2186 ring
->int_disable
= bcmgenet_rx_ring_int_disable
;
2188 ring
->cbs
= priv
->rx_cbs
+ start_ptr
;
2191 ring
->read_ptr
= start_ptr
;
2192 ring
->cb_ptr
= start_ptr
;
2193 ring
->end_ptr
= end_ptr
- 1;
2195 ret
= bcmgenet_alloc_rx_buffers(priv
, ring
);
2199 bcmgenet_init_dim(ring
, bcmgenet_dim_work
);
2200 bcmgenet_init_rx_coalesce(ring
);
2202 /* Initialize Rx NAPI */
2203 netif_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_rx_poll
,
2206 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_PROD_INDEX
);
2207 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_CONS_INDEX
);
2208 bcmgenet_rdma_ring_writel(priv
, index
,
2209 ((size
<< DMA_RING_SIZE_SHIFT
) |
2210 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
2211 bcmgenet_rdma_ring_writel(priv
, index
,
2212 (DMA_FC_THRESH_LO
<<
2213 DMA_XOFF_THRESHOLD_SHIFT
) |
2214 DMA_FC_THRESH_HI
, RDMA_XON_XOFF_THRESH
);
2216 /* Set start and end address, read and write pointers */
2217 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2219 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2221 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2223 bcmgenet_rdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
2229 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv
*priv
)
2232 struct bcmgenet_tx_ring
*ring
;
2234 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2235 ring
= &priv
->tx_rings
[i
];
2236 napi_enable(&ring
->napi
);
2237 ring
->int_enable(ring
);
2240 ring
= &priv
->tx_rings
[DESC_INDEX
];
2241 napi_enable(&ring
->napi
);
2242 ring
->int_enable(ring
);
2245 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv
*priv
)
2248 struct bcmgenet_tx_ring
*ring
;
2250 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2251 ring
= &priv
->tx_rings
[i
];
2252 napi_disable(&ring
->napi
);
2255 ring
= &priv
->tx_rings
[DESC_INDEX
];
2256 napi_disable(&ring
->napi
);
2259 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv
*priv
)
2262 struct bcmgenet_tx_ring
*ring
;
2264 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2265 ring
= &priv
->tx_rings
[i
];
2266 netif_napi_del(&ring
->napi
);
2269 ring
= &priv
->tx_rings
[DESC_INDEX
];
2270 netif_napi_del(&ring
->napi
);
2273 /* Initialize Tx queues
2275 * Queues 0-3 are priority-based, each one has 32 descriptors,
2276 * with queue 0 being the highest priority queue.
2278 * Queue 16 is the default Tx queue with
2279 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2281 * The transmit control block pool is then partitioned as follows:
2282 * - Tx queue 0 uses tx_cbs[0..31]
2283 * - Tx queue 1 uses tx_cbs[32..63]
2284 * - Tx queue 2 uses tx_cbs[64..95]
2285 * - Tx queue 3 uses tx_cbs[96..127]
2286 * - Tx queue 16 uses tx_cbs[128..255]
2288 static void bcmgenet_init_tx_queues(struct net_device
*dev
)
2290 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2292 u32 dma_ctrl
, ring_cfg
;
2293 u32 dma_priority
[3] = {0, 0, 0};
2295 dma_ctrl
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2296 dma_enable
= dma_ctrl
& DMA_EN
;
2297 dma_ctrl
&= ~DMA_EN
;
2298 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2303 /* Enable strict priority arbiter mode */
2304 bcmgenet_tdma_writel(priv
, DMA_ARBITER_SP
, DMA_ARB_CTRL
);
2306 /* Initialize Tx priority queues */
2307 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
2308 bcmgenet_init_tx_ring(priv
, i
, priv
->hw_params
->tx_bds_per_q
,
2309 i
* priv
->hw_params
->tx_bds_per_q
,
2310 (i
+ 1) * priv
->hw_params
->tx_bds_per_q
);
2311 ring_cfg
|= (1 << i
);
2312 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2313 dma_priority
[DMA_PRIO_REG_INDEX(i
)] |=
2314 ((GENET_Q0_PRIORITY
+ i
) << DMA_PRIO_REG_SHIFT(i
));
2317 /* Initialize Tx default queue 16 */
2318 bcmgenet_init_tx_ring(priv
, DESC_INDEX
, GENET_Q16_TX_BD_CNT
,
2319 priv
->hw_params
->tx_queues
*
2320 priv
->hw_params
->tx_bds_per_q
,
2322 ring_cfg
|= (1 << DESC_INDEX
);
2323 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
2324 dma_priority
[DMA_PRIO_REG_INDEX(DESC_INDEX
)] |=
2325 ((GENET_Q0_PRIORITY
+ priv
->hw_params
->tx_queues
) <<
2326 DMA_PRIO_REG_SHIFT(DESC_INDEX
));
2328 /* Set Tx queue priorities */
2329 bcmgenet_tdma_writel(priv
, dma_priority
[0], DMA_PRIORITY_0
);
2330 bcmgenet_tdma_writel(priv
, dma_priority
[1], DMA_PRIORITY_1
);
2331 bcmgenet_tdma_writel(priv
, dma_priority
[2], DMA_PRIORITY_2
);
2333 /* Enable Tx queues */
2334 bcmgenet_tdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
2339 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2342 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv
*priv
)
2345 struct bcmgenet_rx_ring
*ring
;
2347 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2348 ring
= &priv
->rx_rings
[i
];
2349 napi_enable(&ring
->napi
);
2350 ring
->int_enable(ring
);
2353 ring
= &priv
->rx_rings
[DESC_INDEX
];
2354 napi_enable(&ring
->napi
);
2355 ring
->int_enable(ring
);
2358 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv
*priv
)
2361 struct bcmgenet_rx_ring
*ring
;
2363 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2364 ring
= &priv
->rx_rings
[i
];
2365 napi_disable(&ring
->napi
);
2366 cancel_work_sync(&ring
->dim
.dim
.work
);
2369 ring
= &priv
->rx_rings
[DESC_INDEX
];
2370 napi_disable(&ring
->napi
);
2371 cancel_work_sync(&ring
->dim
.dim
.work
);
2374 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv
*priv
)
2377 struct bcmgenet_rx_ring
*ring
;
2379 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2380 ring
= &priv
->rx_rings
[i
];
2381 netif_napi_del(&ring
->napi
);
2384 ring
= &priv
->rx_rings
[DESC_INDEX
];
2385 netif_napi_del(&ring
->napi
);
2388 /* Initialize Rx queues
2390 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2391 * used to direct traffic to these queues.
2393 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2395 static int bcmgenet_init_rx_queues(struct net_device
*dev
)
2397 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2404 dma_ctrl
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2405 dma_enable
= dma_ctrl
& DMA_EN
;
2406 dma_ctrl
&= ~DMA_EN
;
2407 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2412 /* Initialize Rx priority queues */
2413 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
2414 ret
= bcmgenet_init_rx_ring(priv
, i
,
2415 priv
->hw_params
->rx_bds_per_q
,
2416 i
* priv
->hw_params
->rx_bds_per_q
,
2418 priv
->hw_params
->rx_bds_per_q
);
2422 ring_cfg
|= (1 << i
);
2423 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2426 /* Initialize Rx default queue 16 */
2427 ret
= bcmgenet_init_rx_ring(priv
, DESC_INDEX
, GENET_Q16_RX_BD_CNT
,
2428 priv
->hw_params
->rx_queues
*
2429 priv
->hw_params
->rx_bds_per_q
,
2434 ring_cfg
|= (1 << DESC_INDEX
);
2435 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
2438 bcmgenet_rdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
2440 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2443 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2448 static int bcmgenet_dma_teardown(struct bcmgenet_priv
*priv
)
2456 /* Disable TDMA to stop add more frames in TX DMA */
2457 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2459 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2461 /* Check TDMA status register to confirm TDMA is disabled */
2462 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2463 reg
= bcmgenet_tdma_readl(priv
, DMA_STATUS
);
2464 if (reg
& DMA_DISABLED
)
2470 if (timeout
== DMA_TIMEOUT_VAL
) {
2471 netdev_warn(priv
->dev
, "Timed out while disabling TX DMA\n");
2475 /* Wait 10ms for packet drain in both tx and rx dma */
2476 usleep_range(10000, 20000);
2479 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2481 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2484 /* Check RDMA status register to confirm RDMA is disabled */
2485 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2486 reg
= bcmgenet_rdma_readl(priv
, DMA_STATUS
);
2487 if (reg
& DMA_DISABLED
)
2493 if (timeout
== DMA_TIMEOUT_VAL
) {
2494 netdev_warn(priv
->dev
, "Timed out while disabling RX DMA\n");
2499 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
2500 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2501 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2503 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2506 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
2507 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2508 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2510 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2515 static void bcmgenet_fini_dma(struct bcmgenet_priv
*priv
)
2517 struct netdev_queue
*txq
;
2518 struct sk_buff
*skb
;
2522 bcmgenet_fini_rx_napi(priv
);
2523 bcmgenet_fini_tx_napi(priv
);
2525 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
2526 cb
= priv
->tx_cbs
+ i
;
2527 skb
= bcmgenet_free_tx_cb(&priv
->pdev
->dev
, cb
);
2532 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
2533 txq
= netdev_get_tx_queue(priv
->dev
, priv
->tx_rings
[i
].queue
);
2534 netdev_tx_reset_queue(txq
);
2537 txq
= netdev_get_tx_queue(priv
->dev
, priv
->tx_rings
[DESC_INDEX
].queue
);
2538 netdev_tx_reset_queue(txq
);
2540 bcmgenet_free_rx_buffers(priv
);
2541 kfree(priv
->rx_cbs
);
2542 kfree(priv
->tx_cbs
);
2545 /* init_edma: Initialize DMA control register */
2546 static int bcmgenet_init_dma(struct bcmgenet_priv
*priv
)
2552 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
2554 /* Initialize common Rx ring structures */
2555 priv
->rx_bds
= priv
->base
+ priv
->hw_params
->rdma_offset
;
2556 priv
->num_rx_bds
= TOTAL_DESC
;
2557 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct enet_cb
),
2562 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
2563 cb
= priv
->rx_cbs
+ i
;
2564 cb
->bd_addr
= priv
->rx_bds
+ i
* DMA_DESC_SIZE
;
2567 /* Initialize common TX ring structures */
2568 priv
->tx_bds
= priv
->base
+ priv
->hw_params
->tdma_offset
;
2569 priv
->num_tx_bds
= TOTAL_DESC
;
2570 priv
->tx_cbs
= kcalloc(priv
->num_tx_bds
, sizeof(struct enet_cb
),
2572 if (!priv
->tx_cbs
) {
2573 kfree(priv
->rx_cbs
);
2577 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
2578 cb
= priv
->tx_cbs
+ i
;
2579 cb
->bd_addr
= priv
->tx_bds
+ i
* DMA_DESC_SIZE
;
2583 bcmgenet_rdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
2585 /* Initialize Rx queues */
2586 ret
= bcmgenet_init_rx_queues(priv
->dev
);
2588 netdev_err(priv
->dev
, "failed to initialize Rx queues\n");
2589 bcmgenet_free_rx_buffers(priv
);
2590 kfree(priv
->rx_cbs
);
2591 kfree(priv
->tx_cbs
);
2596 bcmgenet_tdma_writel(priv
, DMA_MAX_BURST_LENGTH
, DMA_SCB_BURST_SIZE
);
2598 /* Initialize Tx queues */
2599 bcmgenet_init_tx_queues(priv
->dev
);
2604 /* Interrupt bottom half */
2605 static void bcmgenet_irq_task(struct work_struct
*work
)
2607 unsigned int status
;
2608 struct bcmgenet_priv
*priv
= container_of(
2609 work
, struct bcmgenet_priv
, bcmgenet_irq_work
);
2611 netif_dbg(priv
, intr
, priv
->dev
, "%s\n", __func__
);
2613 spin_lock_irq(&priv
->lock
);
2614 status
= priv
->irq0_stat
;
2615 priv
->irq0_stat
= 0;
2616 spin_unlock_irq(&priv
->lock
);
2618 /* Link UP/DOWN event */
2619 if (status
& UMAC_IRQ_LINK_EVENT
) {
2620 priv
->dev
->phydev
->link
= !!(status
& UMAC_IRQ_LINK_UP
);
2621 phy_mac_interrupt(priv
->dev
->phydev
);
2625 /* bcmgenet_isr1: handle Rx and Tx priority queues */
2626 static irqreturn_t
bcmgenet_isr1(int irq
, void *dev_id
)
2628 struct bcmgenet_priv
*priv
= dev_id
;
2629 struct bcmgenet_rx_ring
*rx_ring
;
2630 struct bcmgenet_tx_ring
*tx_ring
;
2631 unsigned int index
, status
;
2633 /* Read irq status */
2634 status
= bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_STAT
) &
2635 ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2637 /* clear interrupts */
2638 bcmgenet_intrl2_1_writel(priv
, status
, INTRL2_CPU_CLEAR
);
2640 netif_dbg(priv
, intr
, priv
->dev
,
2641 "%s: IRQ=0x%x\n", __func__
, status
);
2643 /* Check Rx priority queue interrupts */
2644 for (index
= 0; index
< priv
->hw_params
->rx_queues
; index
++) {
2645 if (!(status
& BIT(UMAC_IRQ1_RX_INTR_SHIFT
+ index
)))
2648 rx_ring
= &priv
->rx_rings
[index
];
2649 rx_ring
->dim
.event_ctr
++;
2651 if (likely(napi_schedule_prep(&rx_ring
->napi
))) {
2652 rx_ring
->int_disable(rx_ring
);
2653 __napi_schedule_irqoff(&rx_ring
->napi
);
2657 /* Check Tx priority queue interrupts */
2658 for (index
= 0; index
< priv
->hw_params
->tx_queues
; index
++) {
2659 if (!(status
& BIT(index
)))
2662 tx_ring
= &priv
->tx_rings
[index
];
2664 if (likely(napi_schedule_prep(&tx_ring
->napi
))) {
2665 tx_ring
->int_disable(tx_ring
);
2666 __napi_schedule_irqoff(&tx_ring
->napi
);
2673 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
2674 static irqreturn_t
bcmgenet_isr0(int irq
, void *dev_id
)
2676 struct bcmgenet_priv
*priv
= dev_id
;
2677 struct bcmgenet_rx_ring
*rx_ring
;
2678 struct bcmgenet_tx_ring
*tx_ring
;
2679 unsigned int status
;
2680 unsigned long flags
;
2682 /* Read irq status */
2683 status
= bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
) &
2684 ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
2686 /* clear interrupts */
2687 bcmgenet_intrl2_0_writel(priv
, status
, INTRL2_CPU_CLEAR
);
2689 netif_dbg(priv
, intr
, priv
->dev
,
2690 "IRQ=0x%x\n", status
);
2692 if (status
& UMAC_IRQ_RXDMA_DONE
) {
2693 rx_ring
= &priv
->rx_rings
[DESC_INDEX
];
2694 rx_ring
->dim
.event_ctr
++;
2696 if (likely(napi_schedule_prep(&rx_ring
->napi
))) {
2697 rx_ring
->int_disable(rx_ring
);
2698 __napi_schedule_irqoff(&rx_ring
->napi
);
2702 if (status
& UMAC_IRQ_TXDMA_DONE
) {
2703 tx_ring
= &priv
->tx_rings
[DESC_INDEX
];
2705 if (likely(napi_schedule_prep(&tx_ring
->napi
))) {
2706 tx_ring
->int_disable(tx_ring
);
2707 __napi_schedule_irqoff(&tx_ring
->napi
);
2711 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
2712 status
& (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
)) {
2716 /* all other interested interrupts handled in bottom half */
2717 status
&= UMAC_IRQ_LINK_EVENT
;
2719 /* Save irq status for bottom-half processing. */
2720 spin_lock_irqsave(&priv
->lock
, flags
);
2721 priv
->irq0_stat
|= status
;
2722 spin_unlock_irqrestore(&priv
->lock
, flags
);
2724 schedule_work(&priv
->bcmgenet_irq_work
);
2730 static irqreturn_t
bcmgenet_wol_isr(int irq
, void *dev_id
)
2732 struct bcmgenet_priv
*priv
= dev_id
;
2734 pm_wakeup_event(&priv
->pdev
->dev
, 0);
2739 #ifdef CONFIG_NET_POLL_CONTROLLER
2740 static void bcmgenet_poll_controller(struct net_device
*dev
)
2742 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2744 /* Invoke the main RX/TX interrupt handler */
2745 disable_irq(priv
->irq0
);
2746 bcmgenet_isr0(priv
->irq0
, priv
);
2747 enable_irq(priv
->irq0
);
2749 /* And the interrupt handler for RX/TX priority queues */
2750 disable_irq(priv
->irq1
);
2751 bcmgenet_isr1(priv
->irq1
, priv
);
2752 enable_irq(priv
->irq1
);
2756 static void bcmgenet_umac_reset(struct bcmgenet_priv
*priv
)
2760 reg
= bcmgenet_rbuf_ctrl_get(priv
);
2762 bcmgenet_rbuf_ctrl_set(priv
, reg
);
2766 bcmgenet_rbuf_ctrl_set(priv
, reg
);
2770 static void bcmgenet_set_hw_addr(struct bcmgenet_priv
*priv
,
2771 unsigned char *addr
)
2773 bcmgenet_umac_writel(priv
, (addr
[0] << 24) | (addr
[1] << 16) |
2774 (addr
[2] << 8) | addr
[3], UMAC_MAC0
);
2775 bcmgenet_umac_writel(priv
, (addr
[4] << 8) | addr
[5], UMAC_MAC1
);
2778 /* Returns a reusable dma control register value */
2779 static u32
bcmgenet_dma_disable(struct bcmgenet_priv
*priv
)
2785 dma_ctrl
= 1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
) | DMA_EN
;
2786 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2788 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2790 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2792 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2794 bcmgenet_umac_writel(priv
, 1, UMAC_TX_FLUSH
);
2796 bcmgenet_umac_writel(priv
, 0, UMAC_TX_FLUSH
);
2801 static void bcmgenet_enable_dma(struct bcmgenet_priv
*priv
, u32 dma_ctrl
)
2805 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2807 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2809 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2811 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2814 /* bcmgenet_hfb_clear
2816 * Clear Hardware Filter Block and disable all filtering.
2818 static void bcmgenet_hfb_clear(struct bcmgenet_priv
*priv
)
2822 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_CTRL
);
2823 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_FLT_ENABLE_V3PLUS
);
2824 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_FLT_ENABLE_V3PLUS
+ 4);
2826 for (i
= DMA_INDEX2RING_0
; i
<= DMA_INDEX2RING_7
; i
++)
2827 bcmgenet_rdma_writel(priv
, 0x0, i
);
2829 for (i
= 0; i
< (priv
->hw_params
->hfb_filter_cnt
/ 4); i
++)
2830 bcmgenet_hfb_reg_writel(priv
, 0x0,
2831 HFB_FLT_LEN_V3PLUS
+ i
* sizeof(u32
));
2833 for (i
= 0; i
< priv
->hw_params
->hfb_filter_cnt
*
2834 priv
->hw_params
->hfb_filter_size
; i
++)
2835 bcmgenet_hfb_writel(priv
, 0x0, i
* sizeof(u32
));
2838 static void bcmgenet_hfb_init(struct bcmgenet_priv
*priv
)
2840 if (GENET_IS_V1(priv
) || GENET_IS_V2(priv
))
2843 bcmgenet_hfb_clear(priv
);
2846 static void bcmgenet_netif_start(struct net_device
*dev
)
2848 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2850 /* Start the network engine */
2851 bcmgenet_enable_rx_napi(priv
);
2853 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, true);
2855 bcmgenet_enable_tx_napi(priv
);
2857 /* Monitor link interrupts now */
2858 bcmgenet_link_intr_enable(priv
);
2860 phy_start(dev
->phydev
);
2863 static int bcmgenet_open(struct net_device
*dev
)
2865 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2866 unsigned long dma_ctrl
;
2870 netif_dbg(priv
, ifup
, dev
, "bcmgenet_open\n");
2872 /* Turn on the clock */
2873 clk_prepare_enable(priv
->clk
);
2875 /* If this is an internal GPHY, power it back on now, before UniMAC is
2876 * brought out of reset as absolutely no UniMAC activity is allowed
2878 if (priv
->internal_phy
)
2879 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
2881 /* take MAC out of reset */
2882 bcmgenet_umac_reset(priv
);
2886 /* Make sure we reflect the value of CRC_CMD_FWD */
2887 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2888 priv
->crc_fwd_en
= !!(reg
& CMD_CRC_FWD
);
2890 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
2892 if (priv
->internal_phy
) {
2893 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
2894 reg
|= EXT_ENERGY_DET_MASK
;
2895 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
2898 /* Disable RX/TX DMA and flush TX queues */
2899 dma_ctrl
= bcmgenet_dma_disable(priv
);
2901 /* Reinitialize TDMA and RDMA and SW housekeeping */
2902 ret
= bcmgenet_init_dma(priv
);
2904 netdev_err(dev
, "failed to initialize DMA\n");
2905 goto err_clk_disable
;
2908 /* Always enable ring 16 - descriptor ring */
2909 bcmgenet_enable_dma(priv
, dma_ctrl
);
2912 bcmgenet_hfb_init(priv
);
2914 ret
= request_irq(priv
->irq0
, bcmgenet_isr0
, IRQF_SHARED
,
2917 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq0
);
2921 ret
= request_irq(priv
->irq1
, bcmgenet_isr1
, IRQF_SHARED
,
2924 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq1
);
2928 ret
= bcmgenet_mii_probe(dev
);
2930 netdev_err(dev
, "failed to connect to PHY\n");
2934 bcmgenet_netif_start(dev
);
2936 netif_tx_start_all_queues(dev
);
2941 free_irq(priv
->irq1
, priv
);
2943 free_irq(priv
->irq0
, priv
);
2945 bcmgenet_dma_teardown(priv
);
2946 bcmgenet_fini_dma(priv
);
2948 if (priv
->internal_phy
)
2949 bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
2950 clk_disable_unprepare(priv
->clk
);
2954 static void bcmgenet_netif_stop(struct net_device
*dev
)
2956 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2958 bcmgenet_disable_tx_napi(priv
);
2959 netif_tx_disable(dev
);
2961 /* Disable MAC receive */
2962 umac_enable_set(priv
, CMD_RX_EN
, false);
2964 bcmgenet_dma_teardown(priv
);
2966 /* Disable MAC transmit. TX DMA disabled must be done before this */
2967 umac_enable_set(priv
, CMD_TX_EN
, false);
2969 phy_stop(dev
->phydev
);
2970 bcmgenet_disable_rx_napi(priv
);
2971 bcmgenet_intr_disable(priv
);
2973 /* Wait for pending work items to complete. Since interrupts are
2974 * disabled no new work will be scheduled.
2976 cancel_work_sync(&priv
->bcmgenet_irq_work
);
2978 priv
->old_link
= -1;
2979 priv
->old_speed
= -1;
2980 priv
->old_duplex
= -1;
2981 priv
->old_pause
= -1;
2984 bcmgenet_tx_reclaim_all(dev
);
2985 bcmgenet_fini_dma(priv
);
2988 static int bcmgenet_close(struct net_device
*dev
)
2990 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2993 netif_dbg(priv
, ifdown
, dev
, "bcmgenet_close\n");
2995 bcmgenet_netif_stop(dev
);
2997 /* Really kill the PHY state machine and disconnect from it */
2998 phy_disconnect(dev
->phydev
);
3000 free_irq(priv
->irq0
, priv
);
3001 free_irq(priv
->irq1
, priv
);
3003 if (priv
->internal_phy
)
3004 ret
= bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
3006 clk_disable_unprepare(priv
->clk
);
3011 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring
*ring
)
3013 struct bcmgenet_priv
*priv
= ring
->priv
;
3014 u32 p_index
, c_index
, intsts
, intmsk
;
3015 struct netdev_queue
*txq
;
3016 unsigned int free_bds
;
3019 if (!netif_msg_tx_err(priv
))
3022 txq
= netdev_get_tx_queue(priv
->dev
, ring
->queue
);
3024 spin_lock(&ring
->lock
);
3025 if (ring
->index
== DESC_INDEX
) {
3026 intsts
= ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
3027 intmsk
= UMAC_IRQ_TXDMA_DONE
| UMAC_IRQ_TXDMA_MBDONE
;
3029 intsts
= ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
3030 intmsk
= 1 << ring
->index
;
3032 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
);
3033 p_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_PROD_INDEX
);
3034 txq_stopped
= netif_tx_queue_stopped(txq
);
3035 free_bds
= ring
->free_bds
;
3036 spin_unlock(&ring
->lock
);
3038 netif_err(priv
, tx_err
, priv
->dev
, "Ring %d queue %d status summary\n"
3039 "TX queue status: %s, interrupts: %s\n"
3040 "(sw)free_bds: %d (sw)size: %d\n"
3041 "(sw)p_index: %d (hw)p_index: %d\n"
3042 "(sw)c_index: %d (hw)c_index: %d\n"
3043 "(sw)clean_p: %d (sw)write_p: %d\n"
3044 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3045 ring
->index
, ring
->queue
,
3046 txq_stopped
? "stopped" : "active",
3047 intsts
& intmsk
? "enabled" : "disabled",
3048 free_bds
, ring
->size
,
3049 ring
->prod_index
, p_index
& DMA_P_INDEX_MASK
,
3050 ring
->c_index
, c_index
& DMA_C_INDEX_MASK
,
3051 ring
->clean_ptr
, ring
->write_ptr
,
3052 ring
->cb_ptr
, ring
->end_ptr
);
3055 static void bcmgenet_timeout(struct net_device
*dev
)
3057 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3058 u32 int0_enable
= 0;
3059 u32 int1_enable
= 0;
3062 netif_dbg(priv
, tx_err
, dev
, "bcmgenet_timeout\n");
3064 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++)
3065 bcmgenet_dump_tx_queue(&priv
->tx_rings
[q
]);
3066 bcmgenet_dump_tx_queue(&priv
->tx_rings
[DESC_INDEX
]);
3068 bcmgenet_tx_reclaim_all(dev
);
3070 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++)
3071 int1_enable
|= (1 << q
);
3073 int0_enable
= UMAC_IRQ_TXDMA_DONE
;
3075 /* Re-enable TX interrupts if disabled */
3076 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
3077 bcmgenet_intrl2_1_writel(priv
, int1_enable
, INTRL2_CPU_MASK_CLEAR
);
3079 netif_trans_update(dev
);
3081 dev
->stats
.tx_errors
++;
3083 netif_tx_wake_all_queues(dev
);
3086 #define MAX_MC_COUNT 16
3088 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv
*priv
,
3089 unsigned char *addr
,
3095 bcmgenet_umac_writel(priv
, addr
[0] << 8 | addr
[1],
3096 UMAC_MDF_ADDR
+ (*i
* 4));
3097 bcmgenet_umac_writel(priv
, addr
[2] << 24 | addr
[3] << 16 |
3098 addr
[4] << 8 | addr
[5],
3099 UMAC_MDF_ADDR
+ ((*i
+ 1) * 4));
3100 reg
= bcmgenet_umac_readl(priv
, UMAC_MDF_CTRL
);
3101 reg
|= (1 << (MAX_MC_COUNT
- *mc
));
3102 bcmgenet_umac_writel(priv
, reg
, UMAC_MDF_CTRL
);
3107 static void bcmgenet_set_rx_mode(struct net_device
*dev
)
3109 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3110 struct netdev_hw_addr
*ha
;
3114 netif_dbg(priv
, hw
, dev
, "%s: %08X\n", __func__
, dev
->flags
);
3116 /* Promiscuous mode */
3117 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
3118 if (dev
->flags
& IFF_PROMISC
) {
3120 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
3121 bcmgenet_umac_writel(priv
, 0, UMAC_MDF_CTRL
);
3124 reg
&= ~CMD_PROMISC
;
3125 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
3128 /* UniMac doesn't support ALLMULTI */
3129 if (dev
->flags
& IFF_ALLMULTI
) {
3130 netdev_warn(dev
, "ALLMULTI is not supported\n");
3134 /* update MDF filter */
3138 bcmgenet_set_mdf_addr(priv
, dev
->broadcast
, &i
, &mc
);
3139 /* my own address.*/
3140 bcmgenet_set_mdf_addr(priv
, dev
->dev_addr
, &i
, &mc
);
3142 if (netdev_uc_count(dev
) > (MAX_MC_COUNT
- mc
))
3145 if (!netdev_uc_empty(dev
))
3146 netdev_for_each_uc_addr(ha
, dev
)
3147 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
3149 if (netdev_mc_empty(dev
) || netdev_mc_count(dev
) >= (MAX_MC_COUNT
- mc
))
3152 netdev_for_each_mc_addr(ha
, dev
)
3153 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
, &mc
);
3156 /* Set the hardware MAC address. */
3157 static int bcmgenet_set_mac_addr(struct net_device
*dev
, void *p
)
3159 struct sockaddr
*addr
= p
;
3161 /* Setting the MAC address at the hardware level is not possible
3162 * without disabling the UniMAC RX/TX enable bits.
3164 if (netif_running(dev
))
3167 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
3172 static struct net_device_stats
*bcmgenet_get_stats(struct net_device
*dev
)
3174 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3175 unsigned long tx_bytes
= 0, tx_packets
= 0;
3176 unsigned long rx_bytes
= 0, rx_packets
= 0;
3177 unsigned long rx_errors
= 0, rx_dropped
= 0;
3178 struct bcmgenet_tx_ring
*tx_ring
;
3179 struct bcmgenet_rx_ring
*rx_ring
;
3182 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++) {
3183 tx_ring
= &priv
->tx_rings
[q
];
3184 tx_bytes
+= tx_ring
->bytes
;
3185 tx_packets
+= tx_ring
->packets
;
3187 tx_ring
= &priv
->tx_rings
[DESC_INDEX
];
3188 tx_bytes
+= tx_ring
->bytes
;
3189 tx_packets
+= tx_ring
->packets
;
3191 for (q
= 0; q
< priv
->hw_params
->rx_queues
; q
++) {
3192 rx_ring
= &priv
->rx_rings
[q
];
3194 rx_bytes
+= rx_ring
->bytes
;
3195 rx_packets
+= rx_ring
->packets
;
3196 rx_errors
+= rx_ring
->errors
;
3197 rx_dropped
+= rx_ring
->dropped
;
3199 rx_ring
= &priv
->rx_rings
[DESC_INDEX
];
3200 rx_bytes
+= rx_ring
->bytes
;
3201 rx_packets
+= rx_ring
->packets
;
3202 rx_errors
+= rx_ring
->errors
;
3203 rx_dropped
+= rx_ring
->dropped
;
3205 dev
->stats
.tx_bytes
= tx_bytes
;
3206 dev
->stats
.tx_packets
= tx_packets
;
3207 dev
->stats
.rx_bytes
= rx_bytes
;
3208 dev
->stats
.rx_packets
= rx_packets
;
3209 dev
->stats
.rx_errors
= rx_errors
;
3210 dev
->stats
.rx_missed_errors
= rx_errors
;
3214 static const struct net_device_ops bcmgenet_netdev_ops
= {
3215 .ndo_open
= bcmgenet_open
,
3216 .ndo_stop
= bcmgenet_close
,
3217 .ndo_start_xmit
= bcmgenet_xmit
,
3218 .ndo_tx_timeout
= bcmgenet_timeout
,
3219 .ndo_set_rx_mode
= bcmgenet_set_rx_mode
,
3220 .ndo_set_mac_address
= bcmgenet_set_mac_addr
,
3221 .ndo_do_ioctl
= bcmgenet_ioctl
,
3222 .ndo_set_features
= bcmgenet_set_features
,
3223 #ifdef CONFIG_NET_POLL_CONTROLLER
3224 .ndo_poll_controller
= bcmgenet_poll_controller
,
3226 .ndo_get_stats
= bcmgenet_get_stats
,
3229 /* Array of GENET hardware parameters/characteristics */
3230 static struct bcmgenet_hw_params bcmgenet_hw_params
[] = {
3236 .bp_in_en_shift
= 16,
3237 .bp_in_mask
= 0xffff,
3238 .hfb_filter_cnt
= 16,
3240 .hfb_offset
= 0x1000,
3241 .rdma_offset
= 0x2000,
3242 .tdma_offset
= 0x3000,
3250 .bp_in_en_shift
= 16,
3251 .bp_in_mask
= 0xffff,
3252 .hfb_filter_cnt
= 16,
3254 .tbuf_offset
= 0x0600,
3255 .hfb_offset
= 0x1000,
3256 .hfb_reg_offset
= 0x2000,
3257 .rdma_offset
= 0x3000,
3258 .tdma_offset
= 0x4000,
3260 .flags
= GENET_HAS_EXT
,
3267 .bp_in_en_shift
= 17,
3268 .bp_in_mask
= 0x1ffff,
3269 .hfb_filter_cnt
= 48,
3270 .hfb_filter_size
= 128,
3272 .tbuf_offset
= 0x0600,
3273 .hfb_offset
= 0x8000,
3274 .hfb_reg_offset
= 0xfc00,
3275 .rdma_offset
= 0x10000,
3276 .tdma_offset
= 0x11000,
3278 .flags
= GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
|
3279 GENET_HAS_MOCA_LINK_DET
,
3286 .bp_in_en_shift
= 17,
3287 .bp_in_mask
= 0x1ffff,
3288 .hfb_filter_cnt
= 48,
3289 .hfb_filter_size
= 128,
3291 .tbuf_offset
= 0x0600,
3292 .hfb_offset
= 0x8000,
3293 .hfb_reg_offset
= 0xfc00,
3294 .rdma_offset
= 0x2000,
3295 .tdma_offset
= 0x4000,
3297 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
|
3298 GENET_HAS_MDIO_INTR
| GENET_HAS_MOCA_LINK_DET
,
3305 .bp_in_en_shift
= 17,
3306 .bp_in_mask
= 0x1ffff,
3307 .hfb_filter_cnt
= 48,
3308 .hfb_filter_size
= 128,
3310 .tbuf_offset
= 0x0600,
3311 .hfb_offset
= 0x8000,
3312 .hfb_reg_offset
= 0xfc00,
3313 .rdma_offset
= 0x2000,
3314 .tdma_offset
= 0x4000,
3316 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
|
3317 GENET_HAS_MDIO_INTR
| GENET_HAS_MOCA_LINK_DET
,
3321 /* Infer hardware parameters from the detected GENET version */
3322 static void bcmgenet_set_hw_params(struct bcmgenet_priv
*priv
)
3324 struct bcmgenet_hw_params
*params
;
3329 if (GENET_IS_V5(priv
) || GENET_IS_V4(priv
)) {
3330 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
3331 genet_dma_ring_regs
= genet_dma_ring_regs_v4
;
3332 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
3333 } else if (GENET_IS_V3(priv
)) {
3334 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
3335 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3336 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V3PLUS
;
3337 } else if (GENET_IS_V2(priv
)) {
3338 bcmgenet_dma_regs
= bcmgenet_dma_regs_v2
;
3339 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3340 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
3341 } else if (GENET_IS_V1(priv
)) {
3342 bcmgenet_dma_regs
= bcmgenet_dma_regs_v1
;
3343 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3344 priv
->dma_rx_chk_bit
= DMA_RX_CHK_V12
;
3347 /* enum genet_version starts at 1 */
3348 priv
->hw_params
= &bcmgenet_hw_params
[priv
->version
];
3349 params
= priv
->hw_params
;
3351 /* Read GENET HW version */
3352 reg
= bcmgenet_sys_readl(priv
, SYS_REV_CTRL
);
3353 major
= (reg
>> 24 & 0x0f);
3356 else if (major
== 5)
3358 else if (major
== 0)
3360 if (major
!= priv
->version
) {
3361 dev_err(&priv
->pdev
->dev
,
3362 "GENET version mismatch, got: %d, configured for: %d\n",
3363 major
, priv
->version
);
3366 /* Print the GENET core version */
3367 dev_info(&priv
->pdev
->dev
, "GENET " GENET_VER_FMT
,
3368 major
, (reg
>> 16) & 0x0f, reg
& 0xffff);
3370 /* Store the integrated PHY revision for the MDIO probing function
3371 * to pass this information to the PHY driver. The PHY driver expects
3372 * to find the PHY major revision in bits 15:8 while the GENET register
3373 * stores that information in bits 7:0, account for that.
3375 * On newer chips, starting with PHY revision G0, a new scheme is
3376 * deployed similar to the Starfighter 2 switch with GPHY major
3377 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3378 * is reserved as well as special value 0x01ff, we have a small
3379 * heuristic to check for the new GPHY revision and re-arrange things
3380 * so the GPHY driver is happy.
3382 gphy_rev
= reg
& 0xffff;
3384 if (GENET_IS_V5(priv
)) {
3385 /* The EPHY revision should come from the MDIO registers of
3386 * the PHY not from GENET.
3388 if (gphy_rev
!= 0) {
3389 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3392 /* This is reserved so should require special treatment */
3393 } else if (gphy_rev
== 0 || gphy_rev
== 0x01ff) {
3394 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev
);
3396 /* This is the good old scheme, just GPHY major, no minor nor patch */
3397 } else if ((gphy_rev
& 0xf0) != 0) {
3398 priv
->gphy_rev
= gphy_rev
<< 8;
3399 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3400 } else if ((gphy_rev
& 0xff00) != 0) {
3401 priv
->gphy_rev
= gphy_rev
;
3404 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3405 if (!(params
->flags
& GENET_HAS_40BITS
))
3406 pr_warn("GENET does not support 40-bits PA\n");
3409 pr_debug("Configuration for version: %d\n"
3410 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3411 "BP << en: %2d, BP msk: 0x%05x\n"
3412 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3413 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3414 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3417 params
->tx_queues
, params
->tx_bds_per_q
,
3418 params
->rx_queues
, params
->rx_bds_per_q
,
3419 params
->bp_in_en_shift
, params
->bp_in_mask
,
3420 params
->hfb_filter_cnt
, params
->qtag_mask
,
3421 params
->tbuf_offset
, params
->hfb_offset
,
3422 params
->hfb_reg_offset
,
3423 params
->rdma_offset
, params
->tdma_offset
,
3424 params
->words_per_bd
);
3427 static const struct of_device_id bcmgenet_match
[] = {
3428 { .compatible
= "brcm,genet-v1", .data
= (void *)GENET_V1
},
3429 { .compatible
= "brcm,genet-v2", .data
= (void *)GENET_V2
},
3430 { .compatible
= "brcm,genet-v3", .data
= (void *)GENET_V3
},
3431 { .compatible
= "brcm,genet-v4", .data
= (void *)GENET_V4
},
3432 { .compatible
= "brcm,genet-v5", .data
= (void *)GENET_V5
},
3435 MODULE_DEVICE_TABLE(of
, bcmgenet_match
);
3437 static int bcmgenet_probe(struct platform_device
*pdev
)
3439 struct bcmgenet_platform_data
*pd
= pdev
->dev
.platform_data
;
3440 struct device_node
*dn
= pdev
->dev
.of_node
;
3441 const struct of_device_id
*of_id
= NULL
;
3442 struct bcmgenet_priv
*priv
;
3443 struct net_device
*dev
;
3444 const void *macaddr
;
3448 const char *phy_mode_str
;
3450 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3451 dev
= alloc_etherdev_mqs(sizeof(*priv
), GENET_MAX_MQ_CNT
+ 1,
3452 GENET_MAX_MQ_CNT
+ 1);
3454 dev_err(&pdev
->dev
, "can't allocate net device\n");
3459 of_id
= of_match_node(bcmgenet_match
, dn
);
3464 priv
= netdev_priv(dev
);
3465 priv
->irq0
= platform_get_irq(pdev
, 0);
3466 priv
->irq1
= platform_get_irq(pdev
, 1);
3467 priv
->wol_irq
= platform_get_irq(pdev
, 2);
3468 if (!priv
->irq0
|| !priv
->irq1
) {
3469 dev_err(&pdev
->dev
, "can't find IRQs\n");
3475 macaddr
= of_get_mac_address(dn
);
3476 if (IS_ERR(macaddr
)) {
3477 dev_err(&pdev
->dev
, "can't find MAC address\n");
3482 macaddr
= pd
->mac_address
;
3485 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3486 priv
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
3487 if (IS_ERR(priv
->base
)) {
3488 err
= PTR_ERR(priv
->base
);
3492 spin_lock_init(&priv
->lock
);
3494 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3495 dev_set_drvdata(&pdev
->dev
, dev
);
3496 ether_addr_copy(dev
->dev_addr
, macaddr
);
3497 dev
->watchdog_timeo
= 2 * HZ
;
3498 dev
->ethtool_ops
= &bcmgenet_ethtool_ops
;
3499 dev
->netdev_ops
= &bcmgenet_netdev_ops
;
3501 priv
->msg_enable
= netif_msg_init(-1, GENET_MSG_DEFAULT
);
3503 /* Set hardware features */
3504 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
|
3505 NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
3507 /* Request the WOL interrupt and advertise suspend if available */
3508 priv
->wol_irq_disabled
= true;
3509 err
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
, bcmgenet_wol_isr
, 0,
3512 device_set_wakeup_capable(&pdev
->dev
, 1);
3514 /* Set the needed headroom to account for any possible
3515 * features enabling/disabling at runtime
3517 dev
->needed_headroom
+= 64;
3519 netdev_boot_setup_check(dev
);
3524 priv
->version
= (enum bcmgenet_version
)of_id
->data
;
3526 priv
->version
= pd
->genet_version
;
3528 priv
->clk
= devm_clk_get(&priv
->pdev
->dev
, "enet");
3529 if (IS_ERR(priv
->clk
)) {
3530 dev_warn(&priv
->pdev
->dev
, "failed to get enet clock\n");
3534 clk_prepare_enable(priv
->clk
);
3536 bcmgenet_set_hw_params(priv
);
3538 /* Mii wait queue */
3539 init_waitqueue_head(&priv
->wq
);
3540 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3541 priv
->rx_buf_len
= RX_BUF_LENGTH
;
3542 INIT_WORK(&priv
->bcmgenet_irq_work
, bcmgenet_irq_task
);
3544 priv
->clk_wol
= devm_clk_get(&priv
->pdev
->dev
, "enet-wol");
3545 if (IS_ERR(priv
->clk_wol
)) {
3546 dev_warn(&priv
->pdev
->dev
, "failed to get enet-wol clock\n");
3547 priv
->clk_wol
= NULL
;
3550 priv
->clk_eee
= devm_clk_get(&priv
->pdev
->dev
, "enet-eee");
3551 if (IS_ERR(priv
->clk_eee
)) {
3552 dev_warn(&priv
->pdev
->dev
, "failed to get enet-eee clock\n");
3553 priv
->clk_eee
= NULL
;
3556 /* If this is an internal GPHY, power it on now, before UniMAC is
3557 * brought out of reset as absolutely no UniMAC activity is allowed
3559 if (dn
&& !of_property_read_string(dn
, "phy-mode", &phy_mode_str
) &&
3560 !strcasecmp(phy_mode_str
, "internal"))
3561 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
3565 err
= bcmgenet_mii_init(dev
);
3567 goto err_clk_disable
;
3569 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3570 * just the ring 16 descriptor based TX
3572 netif_set_real_num_tx_queues(priv
->dev
, priv
->hw_params
->tx_queues
+ 1);
3573 netif_set_real_num_rx_queues(priv
->dev
, priv
->hw_params
->rx_queues
+ 1);
3575 /* Set default coalescing parameters */
3576 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
3577 priv
->rx_rings
[i
].rx_max_coalesced_frames
= 1;
3578 priv
->rx_rings
[DESC_INDEX
].rx_max_coalesced_frames
= 1;
3580 /* libphy will determine the link state */
3581 netif_carrier_off(dev
);
3583 /* Turn off the main clock, WOL clock is handled separately */
3584 clk_disable_unprepare(priv
->clk
);
3586 err
= register_netdev(dev
);
3593 clk_disable_unprepare(priv
->clk
);
3599 static int bcmgenet_remove(struct platform_device
*pdev
)
3601 struct bcmgenet_priv
*priv
= dev_to_priv(&pdev
->dev
);
3603 dev_set_drvdata(&pdev
->dev
, NULL
);
3604 unregister_netdev(priv
->dev
);
3605 bcmgenet_mii_exit(priv
->dev
);
3606 free_netdev(priv
->dev
);
3611 #ifdef CONFIG_PM_SLEEP
3612 static int bcmgenet_resume(struct device
*d
)
3614 struct net_device
*dev
= dev_get_drvdata(d
);
3615 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3616 unsigned long dma_ctrl
;
3620 if (!netif_running(dev
))
3623 /* Turn on the clock */
3624 ret
= clk_prepare_enable(priv
->clk
);
3628 /* If this is an internal GPHY, power it back on now, before UniMAC is
3629 * brought out of reset as absolutely no UniMAC activity is allowed
3631 if (priv
->internal_phy
)
3632 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
3634 bcmgenet_umac_reset(priv
);
3638 /* From WOL-enabled suspend, switch to regular clock */
3640 clk_disable_unprepare(priv
->clk_wol
);
3642 phy_init_hw(dev
->phydev
);
3644 /* Speed settings must be restored */
3645 bcmgenet_mii_config(priv
->dev
, false);
3647 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
3649 if (priv
->internal_phy
) {
3650 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
3651 reg
|= EXT_ENERGY_DET_MASK
;
3652 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
3656 bcmgenet_power_up(priv
, GENET_POWER_WOL_MAGIC
);
3658 /* Disable RX/TX DMA and flush TX queues */
3659 dma_ctrl
= bcmgenet_dma_disable(priv
);
3661 /* Reinitialize TDMA and RDMA and SW housekeeping */
3662 ret
= bcmgenet_init_dma(priv
);
3664 netdev_err(dev
, "failed to initialize DMA\n");
3665 goto out_clk_disable
;
3668 /* Always enable ring 16 - descriptor ring */
3669 bcmgenet_enable_dma(priv
, dma_ctrl
);
3671 if (!device_may_wakeup(d
))
3672 phy_resume(dev
->phydev
);
3674 if (priv
->eee
.eee_enabled
)
3675 bcmgenet_eee_enable_set(dev
, true);
3677 bcmgenet_netif_start(dev
);
3679 netif_device_attach(dev
);
3684 if (priv
->internal_phy
)
3685 bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
3686 clk_disable_unprepare(priv
->clk
);
3690 static int bcmgenet_suspend(struct device
*d
)
3692 struct net_device
*dev
= dev_get_drvdata(d
);
3693 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3696 if (!netif_running(dev
))
3699 netif_device_detach(dev
);
3701 bcmgenet_netif_stop(dev
);
3703 if (!device_may_wakeup(d
))
3704 phy_suspend(dev
->phydev
);
3706 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3707 if (device_may_wakeup(d
) && priv
->wolopts
) {
3708 ret
= bcmgenet_power_down(priv
, GENET_POWER_WOL_MAGIC
);
3709 clk_prepare_enable(priv
->clk_wol
);
3710 } else if (priv
->internal_phy
) {
3711 ret
= bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
3714 /* Turn off the clocks */
3715 clk_disable_unprepare(priv
->clk
);
3722 #endif /* CONFIG_PM_SLEEP */
3724 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops
, bcmgenet_suspend
, bcmgenet_resume
);
3726 static struct platform_driver bcmgenet_driver
= {
3727 .probe
= bcmgenet_probe
,
3728 .remove
= bcmgenet_remove
,
3731 .of_match_table
= bcmgenet_match
,
3732 .pm
= &bcmgenet_pm_ops
,
3735 module_platform_driver(bcmgenet_driver
);
3737 MODULE_AUTHOR("Broadcom Corporation");
3738 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3739 MODULE_ALIAS("platform:bcmgenet");
3740 MODULE_LICENSE("GPL");