1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
22 #include "mtk_eth_soc.h"
24 static int mtk_msg_level
= -1;
25 module_param_named(msg_level
, mtk_msg_level
, int, 0);
26 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
28 #define MTK_ETHTOOL_STAT(x) { #x, \
29 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
31 /* strings used by ethtool */
32 static const struct mtk_ethtool_stats
{
33 char str
[ETH_GSTRING_LEN
];
35 } mtk_ethtool_stats
[] = {
36 MTK_ETHTOOL_STAT(tx_bytes
),
37 MTK_ETHTOOL_STAT(tx_packets
),
38 MTK_ETHTOOL_STAT(tx_skip
),
39 MTK_ETHTOOL_STAT(tx_collisions
),
40 MTK_ETHTOOL_STAT(rx_bytes
),
41 MTK_ETHTOOL_STAT(rx_packets
),
42 MTK_ETHTOOL_STAT(rx_overflow
),
43 MTK_ETHTOOL_STAT(rx_fcs_errors
),
44 MTK_ETHTOOL_STAT(rx_short_errors
),
45 MTK_ETHTOOL_STAT(rx_long_errors
),
46 MTK_ETHTOOL_STAT(rx_checksum_errors
),
47 MTK_ETHTOOL_STAT(rx_flow_control_packets
),
50 static const char * const mtk_clks_source_name
[] = {
51 "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
52 "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
55 void mtk_w32(struct mtk_eth
*eth
, u32 val
, unsigned reg
)
57 __raw_writel(val
, eth
->base
+ reg
);
60 u32
mtk_r32(struct mtk_eth
*eth
, unsigned reg
)
62 return __raw_readl(eth
->base
+ reg
);
65 static int mtk_mdio_busy_wait(struct mtk_eth
*eth
)
67 unsigned long t_start
= jiffies
;
70 if (!(mtk_r32(eth
, MTK_PHY_IAC
) & PHY_IAC_ACCESS
))
72 if (time_after(jiffies
, t_start
+ PHY_IAC_TIMEOUT
))
77 dev_err(eth
->dev
, "mdio: MDIO timeout\n");
81 static u32
_mtk_mdio_write(struct mtk_eth
*eth
, u32 phy_addr
,
82 u32 phy_register
, u32 write_data
)
84 if (mtk_mdio_busy_wait(eth
))
89 mtk_w32(eth
, PHY_IAC_ACCESS
| PHY_IAC_START
| PHY_IAC_WRITE
|
90 (phy_register
<< PHY_IAC_REG_SHIFT
) |
91 (phy_addr
<< PHY_IAC_ADDR_SHIFT
) | write_data
,
94 if (mtk_mdio_busy_wait(eth
))
100 static u32
_mtk_mdio_read(struct mtk_eth
*eth
, int phy_addr
, int phy_reg
)
104 if (mtk_mdio_busy_wait(eth
))
107 mtk_w32(eth
, PHY_IAC_ACCESS
| PHY_IAC_START
| PHY_IAC_READ
|
108 (phy_reg
<< PHY_IAC_REG_SHIFT
) |
109 (phy_addr
<< PHY_IAC_ADDR_SHIFT
),
112 if (mtk_mdio_busy_wait(eth
))
115 d
= mtk_r32(eth
, MTK_PHY_IAC
) & 0xffff;
120 static int mtk_mdio_write(struct mii_bus
*bus
, int phy_addr
,
121 int phy_reg
, u16 val
)
123 struct mtk_eth
*eth
= bus
->priv
;
125 return _mtk_mdio_write(eth
, phy_addr
, phy_reg
, val
);
128 static int mtk_mdio_read(struct mii_bus
*bus
, int phy_addr
, int phy_reg
)
130 struct mtk_eth
*eth
= bus
->priv
;
132 return _mtk_mdio_read(eth
, phy_addr
, phy_reg
);
135 static void mtk_gmac0_rgmii_adjust(struct mtk_eth
*eth
, int speed
)
140 val
= (speed
== SPEED_1000
) ?
141 INTF_MODE_RGMII_1000
: INTF_MODE_RGMII_10_100
;
142 mtk_w32(eth
, val
, INTF_MODE
);
144 regmap_update_bits(eth
->ethsys
, ETHSYS_CLKCFG0
,
145 ETHSYS_TRGMII_CLK_SEL362_5
,
146 ETHSYS_TRGMII_CLK_SEL362_5
);
148 val
= (speed
== SPEED_1000
) ? 250000000 : 500000000;
149 ret
= clk_set_rate(eth
->clks
[MTK_CLK_TRGPLL
], val
);
151 dev_err(eth
->dev
, "Failed to set trgmii pll: %d\n", ret
);
153 val
= (speed
== SPEED_1000
) ?
154 RCK_CTRL_RGMII_1000
: RCK_CTRL_RGMII_10_100
;
155 mtk_w32(eth
, val
, TRGMII_RCK_CTRL
);
157 val
= (speed
== SPEED_1000
) ?
158 TCK_CTRL_RGMII_1000
: TCK_CTRL_RGMII_10_100
;
159 mtk_w32(eth
, val
, TRGMII_TCK_CTRL
);
162 static void mtk_gmac_sgmii_hw_setup(struct mtk_eth
*eth
, int mac_id
)
166 /* Setup the link timer and QPHY power up inside SGMIISYS */
167 regmap_write(eth
->sgmiisys
, SGMSYS_PCS_LINK_TIMER
,
168 SGMII_LINK_TIMER_DEFAULT
);
170 regmap_read(eth
->sgmiisys
, SGMSYS_SGMII_MODE
, &val
);
171 val
|= SGMII_REMOTE_FAULT_DIS
;
172 regmap_write(eth
->sgmiisys
, SGMSYS_SGMII_MODE
, val
);
174 regmap_read(eth
->sgmiisys
, SGMSYS_PCS_CONTROL_1
, &val
);
175 val
|= SGMII_AN_RESTART
;
176 regmap_write(eth
->sgmiisys
, SGMSYS_PCS_CONTROL_1
, val
);
178 regmap_read(eth
->sgmiisys
, SGMSYS_QPHY_PWR_STATE_CTRL
, &val
);
179 val
&= ~SGMII_PHYA_PWD
;
180 regmap_write(eth
->sgmiisys
, SGMSYS_QPHY_PWR_STATE_CTRL
, val
);
182 /* Determine MUX for which GMAC uses the SGMII interface */
183 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_DUAL_GMAC_SHARED_SGMII
)) {
184 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
185 val
&= ~SYSCFG0_SGMII_MASK
;
186 val
|= !mac_id
? SYSCFG0_SGMII_GMAC1
: SYSCFG0_SGMII_GMAC2
;
187 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
189 dev_info(eth
->dev
, "setup shared sgmii for gmac=%d\n",
193 /* Setup the GMAC1 going through SGMII path when SoC also support
196 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_GMAC1_ESW
| MTK_GMAC1_SGMII
) &&
198 mtk_w32(eth
, 0, MTK_MAC_MISC
);
199 dev_info(eth
->dev
, "setup gmac1 going through sgmii");
203 static void mtk_phy_link_adjust(struct net_device
*dev
)
205 struct mtk_mac
*mac
= netdev_priv(dev
);
206 u16 lcl_adv
= 0, rmt_adv
= 0;
208 u32 mcr
= MAC_MCR_MAX_RX_1536
| MAC_MCR_IPG_CFG
|
209 MAC_MCR_FORCE_MODE
| MAC_MCR_TX_EN
|
210 MAC_MCR_RX_EN
| MAC_MCR_BACKOFF_EN
|
213 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
216 switch (dev
->phydev
->speed
) {
218 mcr
|= MAC_MCR_SPEED_1000
;
221 mcr
|= MAC_MCR_SPEED_100
;
225 if (MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_GMAC1_TRGMII
) &&
226 !mac
->id
&& !mac
->trgmii
)
227 mtk_gmac0_rgmii_adjust(mac
->hw
, dev
->phydev
->speed
);
229 if (dev
->phydev
->link
)
230 mcr
|= MAC_MCR_FORCE_LINK
;
232 if (dev
->phydev
->duplex
) {
233 mcr
|= MAC_MCR_FORCE_DPX
;
235 if (dev
->phydev
->pause
)
236 rmt_adv
= LPA_PAUSE_CAP
;
237 if (dev
->phydev
->asym_pause
)
238 rmt_adv
|= LPA_PAUSE_ASYM
;
240 lcl_adv
= linkmode_adv_to_lcl_adv_t(dev
->phydev
->advertising
);
241 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
243 if (flowctrl
& FLOW_CTRL_TX
)
244 mcr
|= MAC_MCR_FORCE_TX_FC
;
245 if (flowctrl
& FLOW_CTRL_RX
)
246 mcr
|= MAC_MCR_FORCE_RX_FC
;
248 netif_dbg(mac
->hw
, link
, dev
, "rx pause %s, tx pause %s\n",
249 flowctrl
& FLOW_CTRL_RX
? "enabled" : "disabled",
250 flowctrl
& FLOW_CTRL_TX
? "enabled" : "disabled");
253 mtk_w32(mac
->hw
, mcr
, MTK_MAC_MCR(mac
->id
));
255 if (!of_phy_is_fixed_link(mac
->of_node
))
256 phy_print_status(dev
->phydev
);
259 static int mtk_phy_connect_node(struct mtk_eth
*eth
, struct mtk_mac
*mac
,
260 struct device_node
*phy_node
)
262 struct phy_device
*phydev
;
265 phy_mode
= of_get_phy_mode(phy_node
);
267 dev_err(eth
->dev
, "incorrect phy-mode %d\n", phy_mode
);
271 phydev
= of_phy_connect(eth
->netdev
[mac
->id
], phy_node
,
272 mtk_phy_link_adjust
, 0, phy_mode
);
274 dev_err(eth
->dev
, "could not connect to PHY\n");
279 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
280 mac
->id
, phydev_name(phydev
), phydev
->phy_id
,
286 static int mtk_phy_connect(struct net_device
*dev
)
288 struct mtk_mac
*mac
= netdev_priv(dev
);
290 struct device_node
*np
;
294 np
= of_parse_phandle(mac
->of_node
, "phy-handle", 0);
295 if (!np
&& of_phy_is_fixed_link(mac
->of_node
))
296 if (!of_phy_register_fixed_link(mac
->of_node
))
297 np
= of_node_get(mac
->of_node
);
302 switch (of_get_phy_mode(np
)) {
303 case PHY_INTERFACE_MODE_TRGMII
:
305 case PHY_INTERFACE_MODE_RGMII_TXID
:
306 case PHY_INTERFACE_MODE_RGMII_RXID
:
307 case PHY_INTERFACE_MODE_RGMII_ID
:
308 case PHY_INTERFACE_MODE_RGMII
:
310 case PHY_INTERFACE_MODE_SGMII
:
311 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SGMII
))
312 mtk_gmac_sgmii_hw_setup(eth
, mac
->id
);
314 case PHY_INTERFACE_MODE_MII
:
317 case PHY_INTERFACE_MODE_REVMII
:
320 case PHY_INTERFACE_MODE_RMII
:
329 /* put the gmac into the right mode */
330 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
331 val
&= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK
, mac
->id
);
332 val
|= SYSCFG0_GE_MODE(mac
->ge_mode
, mac
->id
);
333 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
335 /* couple phydev to net_device */
336 if (mtk_phy_connect_node(eth
, mac
, np
))
344 if (of_phy_is_fixed_link(mac
->of_node
))
345 of_phy_deregister_fixed_link(mac
->of_node
);
347 dev_err(eth
->dev
, "%s: invalid phy\n", __func__
);
351 static int mtk_mdio_init(struct mtk_eth
*eth
)
353 struct device_node
*mii_np
;
356 mii_np
= of_get_child_by_name(eth
->dev
->of_node
, "mdio-bus");
358 dev_err(eth
->dev
, "no %s child node found", "mdio-bus");
362 if (!of_device_is_available(mii_np
)) {
367 eth
->mii_bus
= devm_mdiobus_alloc(eth
->dev
);
373 eth
->mii_bus
->name
= "mdio";
374 eth
->mii_bus
->read
= mtk_mdio_read
;
375 eth
->mii_bus
->write
= mtk_mdio_write
;
376 eth
->mii_bus
->priv
= eth
;
377 eth
->mii_bus
->parent
= eth
->dev
;
379 snprintf(eth
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%pOFn", mii_np
);
380 ret
= of_mdiobus_register(eth
->mii_bus
, mii_np
);
387 static void mtk_mdio_cleanup(struct mtk_eth
*eth
)
392 mdiobus_unregister(eth
->mii_bus
);
395 static inline void mtk_tx_irq_disable(struct mtk_eth
*eth
, u32 mask
)
400 spin_lock_irqsave(ð
->tx_irq_lock
, flags
);
401 val
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
402 mtk_w32(eth
, val
& ~mask
, MTK_QDMA_INT_MASK
);
403 spin_unlock_irqrestore(ð
->tx_irq_lock
, flags
);
406 static inline void mtk_tx_irq_enable(struct mtk_eth
*eth
, u32 mask
)
411 spin_lock_irqsave(ð
->tx_irq_lock
, flags
);
412 val
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
413 mtk_w32(eth
, val
| mask
, MTK_QDMA_INT_MASK
);
414 spin_unlock_irqrestore(ð
->tx_irq_lock
, flags
);
417 static inline void mtk_rx_irq_disable(struct mtk_eth
*eth
, u32 mask
)
422 spin_lock_irqsave(ð
->rx_irq_lock
, flags
);
423 val
= mtk_r32(eth
, MTK_PDMA_INT_MASK
);
424 mtk_w32(eth
, val
& ~mask
, MTK_PDMA_INT_MASK
);
425 spin_unlock_irqrestore(ð
->rx_irq_lock
, flags
);
428 static inline void mtk_rx_irq_enable(struct mtk_eth
*eth
, u32 mask
)
433 spin_lock_irqsave(ð
->rx_irq_lock
, flags
);
434 val
= mtk_r32(eth
, MTK_PDMA_INT_MASK
);
435 mtk_w32(eth
, val
| mask
, MTK_PDMA_INT_MASK
);
436 spin_unlock_irqrestore(ð
->rx_irq_lock
, flags
);
439 static int mtk_set_mac_address(struct net_device
*dev
, void *p
)
441 int ret
= eth_mac_addr(dev
, p
);
442 struct mtk_mac
*mac
= netdev_priv(dev
);
443 const char *macaddr
= dev
->dev_addr
;
448 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
451 spin_lock_bh(&mac
->hw
->page_lock
);
452 mtk_w32(mac
->hw
, (macaddr
[0] << 8) | macaddr
[1],
453 MTK_GDMA_MAC_ADRH(mac
->id
));
454 mtk_w32(mac
->hw
, (macaddr
[2] << 24) | (macaddr
[3] << 16) |
455 (macaddr
[4] << 8) | macaddr
[5],
456 MTK_GDMA_MAC_ADRL(mac
->id
));
457 spin_unlock_bh(&mac
->hw
->page_lock
);
462 void mtk_stats_update_mac(struct mtk_mac
*mac
)
464 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
465 unsigned int base
= MTK_GDM1_TX_GBCNT
;
468 base
+= hw_stats
->reg_offset
;
470 u64_stats_update_begin(&hw_stats
->syncp
);
472 hw_stats
->rx_bytes
+= mtk_r32(mac
->hw
, base
);
473 stats
= mtk_r32(mac
->hw
, base
+ 0x04);
475 hw_stats
->rx_bytes
+= (stats
<< 32);
476 hw_stats
->rx_packets
+= mtk_r32(mac
->hw
, base
+ 0x08);
477 hw_stats
->rx_overflow
+= mtk_r32(mac
->hw
, base
+ 0x10);
478 hw_stats
->rx_fcs_errors
+= mtk_r32(mac
->hw
, base
+ 0x14);
479 hw_stats
->rx_short_errors
+= mtk_r32(mac
->hw
, base
+ 0x18);
480 hw_stats
->rx_long_errors
+= mtk_r32(mac
->hw
, base
+ 0x1c);
481 hw_stats
->rx_checksum_errors
+= mtk_r32(mac
->hw
, base
+ 0x20);
482 hw_stats
->rx_flow_control_packets
+=
483 mtk_r32(mac
->hw
, base
+ 0x24);
484 hw_stats
->tx_skip
+= mtk_r32(mac
->hw
, base
+ 0x28);
485 hw_stats
->tx_collisions
+= mtk_r32(mac
->hw
, base
+ 0x2c);
486 hw_stats
->tx_bytes
+= mtk_r32(mac
->hw
, base
+ 0x30);
487 stats
= mtk_r32(mac
->hw
, base
+ 0x34);
489 hw_stats
->tx_bytes
+= (stats
<< 32);
490 hw_stats
->tx_packets
+= mtk_r32(mac
->hw
, base
+ 0x38);
491 u64_stats_update_end(&hw_stats
->syncp
);
494 static void mtk_stats_update(struct mtk_eth
*eth
)
498 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
499 if (!eth
->mac
[i
] || !eth
->mac
[i
]->hw_stats
)
501 if (spin_trylock(ð
->mac
[i
]->hw_stats
->stats_lock
)) {
502 mtk_stats_update_mac(eth
->mac
[i
]);
503 spin_unlock(ð
->mac
[i
]->hw_stats
->stats_lock
);
508 static void mtk_get_stats64(struct net_device
*dev
,
509 struct rtnl_link_stats64
*storage
)
511 struct mtk_mac
*mac
= netdev_priv(dev
);
512 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
515 if (netif_running(dev
) && netif_device_present(dev
)) {
516 if (spin_trylock_bh(&hw_stats
->stats_lock
)) {
517 mtk_stats_update_mac(mac
);
518 spin_unlock_bh(&hw_stats
->stats_lock
);
523 start
= u64_stats_fetch_begin_irq(&hw_stats
->syncp
);
524 storage
->rx_packets
= hw_stats
->rx_packets
;
525 storage
->tx_packets
= hw_stats
->tx_packets
;
526 storage
->rx_bytes
= hw_stats
->rx_bytes
;
527 storage
->tx_bytes
= hw_stats
->tx_bytes
;
528 storage
->collisions
= hw_stats
->tx_collisions
;
529 storage
->rx_length_errors
= hw_stats
->rx_short_errors
+
530 hw_stats
->rx_long_errors
;
531 storage
->rx_over_errors
= hw_stats
->rx_overflow
;
532 storage
->rx_crc_errors
= hw_stats
->rx_fcs_errors
;
533 storage
->rx_errors
= hw_stats
->rx_checksum_errors
;
534 storage
->tx_aborted_errors
= hw_stats
->tx_skip
;
535 } while (u64_stats_fetch_retry_irq(&hw_stats
->syncp
, start
));
537 storage
->tx_errors
= dev
->stats
.tx_errors
;
538 storage
->rx_dropped
= dev
->stats
.rx_dropped
;
539 storage
->tx_dropped
= dev
->stats
.tx_dropped
;
542 static inline int mtk_max_frag_size(int mtu
)
544 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
545 if (mtu
+ MTK_RX_ETH_HLEN
< MTK_MAX_RX_LENGTH
)
546 mtu
= MTK_MAX_RX_LENGTH
- MTK_RX_ETH_HLEN
;
548 return SKB_DATA_ALIGN(MTK_RX_HLEN
+ mtu
) +
549 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
552 static inline int mtk_max_buf_size(int frag_size
)
554 int buf_size
= frag_size
- NET_SKB_PAD
- NET_IP_ALIGN
-
555 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
557 WARN_ON(buf_size
< MTK_MAX_RX_LENGTH
);
562 static inline void mtk_rx_get_desc(struct mtk_rx_dma
*rxd
,
563 struct mtk_rx_dma
*dma_rxd
)
565 rxd
->rxd1
= READ_ONCE(dma_rxd
->rxd1
);
566 rxd
->rxd2
= READ_ONCE(dma_rxd
->rxd2
);
567 rxd
->rxd3
= READ_ONCE(dma_rxd
->rxd3
);
568 rxd
->rxd4
= READ_ONCE(dma_rxd
->rxd4
);
571 /* the qdma core needs scratch memory to be setup */
572 static int mtk_init_fq_dma(struct mtk_eth
*eth
)
574 dma_addr_t phy_ring_tail
;
575 int cnt
= MTK_DMA_SIZE
;
579 eth
->scratch_ring
= dma_alloc_coherent(eth
->dev
,
580 cnt
* sizeof(struct mtk_tx_dma
),
581 ð
->phy_scratch_ring
,
583 if (unlikely(!eth
->scratch_ring
))
586 eth
->scratch_head
= kcalloc(cnt
, MTK_QDMA_PAGE_SIZE
,
588 if (unlikely(!eth
->scratch_head
))
591 dma_addr
= dma_map_single(eth
->dev
,
592 eth
->scratch_head
, cnt
* MTK_QDMA_PAGE_SIZE
,
594 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
597 phy_ring_tail
= eth
->phy_scratch_ring
+
598 (sizeof(struct mtk_tx_dma
) * (cnt
- 1));
600 for (i
= 0; i
< cnt
; i
++) {
601 eth
->scratch_ring
[i
].txd1
=
602 (dma_addr
+ (i
* MTK_QDMA_PAGE_SIZE
));
604 eth
->scratch_ring
[i
].txd2
= (eth
->phy_scratch_ring
+
605 ((i
+ 1) * sizeof(struct mtk_tx_dma
)));
606 eth
->scratch_ring
[i
].txd3
= TX_DMA_SDL(MTK_QDMA_PAGE_SIZE
);
609 mtk_w32(eth
, eth
->phy_scratch_ring
, MTK_QDMA_FQ_HEAD
);
610 mtk_w32(eth
, phy_ring_tail
, MTK_QDMA_FQ_TAIL
);
611 mtk_w32(eth
, (cnt
<< 16) | cnt
, MTK_QDMA_FQ_CNT
);
612 mtk_w32(eth
, MTK_QDMA_PAGE_SIZE
<< 16, MTK_QDMA_FQ_BLEN
);
617 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring
*ring
, u32 desc
)
619 void *ret
= ring
->dma
;
621 return ret
+ (desc
- ring
->phys
);
624 static inline struct mtk_tx_buf
*mtk_desc_to_tx_buf(struct mtk_tx_ring
*ring
,
625 struct mtk_tx_dma
*txd
)
627 int idx
= txd
- ring
->dma
;
629 return &ring
->buf
[idx
];
632 static void mtk_tx_unmap(struct mtk_eth
*eth
, struct mtk_tx_buf
*tx_buf
)
634 if (tx_buf
->flags
& MTK_TX_FLAGS_SINGLE0
) {
635 dma_unmap_single(eth
->dev
,
636 dma_unmap_addr(tx_buf
, dma_addr0
),
637 dma_unmap_len(tx_buf
, dma_len0
),
639 } else if (tx_buf
->flags
& MTK_TX_FLAGS_PAGE0
) {
640 dma_unmap_page(eth
->dev
,
641 dma_unmap_addr(tx_buf
, dma_addr0
),
642 dma_unmap_len(tx_buf
, dma_len0
),
647 (tx_buf
->skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
))
648 dev_kfree_skb_any(tx_buf
->skb
);
652 static int mtk_tx_map(struct sk_buff
*skb
, struct net_device
*dev
,
653 int tx_num
, struct mtk_tx_ring
*ring
, bool gso
)
655 struct mtk_mac
*mac
= netdev_priv(dev
);
656 struct mtk_eth
*eth
= mac
->hw
;
657 struct mtk_tx_dma
*itxd
, *txd
;
658 struct mtk_tx_buf
*itx_buf
, *tx_buf
;
659 dma_addr_t mapped_addr
;
660 unsigned int nr_frags
;
664 itxd
= ring
->next_free
;
665 if (itxd
== ring
->last_free
)
668 /* set the forward port */
669 fport
= (mac
->id
+ 1) << TX_DMA_FPORT_SHIFT
;
672 itx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
673 memset(itx_buf
, 0, sizeof(*itx_buf
));
678 /* TX Checksum offload */
679 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
680 txd4
|= TX_DMA_CHKSUM
;
682 /* VLAN header offload */
683 if (skb_vlan_tag_present(skb
))
684 txd4
|= TX_DMA_INS_VLAN
| skb_vlan_tag_get(skb
);
686 mapped_addr
= dma_map_single(eth
->dev
, skb
->data
,
687 skb_headlen(skb
), DMA_TO_DEVICE
);
688 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
691 WRITE_ONCE(itxd
->txd1
, mapped_addr
);
692 itx_buf
->flags
|= MTK_TX_FLAGS_SINGLE0
;
693 itx_buf
->flags
|= (!mac
->id
) ? MTK_TX_FLAGS_FPORT0
:
695 dma_unmap_addr_set(itx_buf
, dma_addr0
, mapped_addr
);
696 dma_unmap_len_set(itx_buf
, dma_len0
, skb_headlen(skb
));
700 nr_frags
= skb_shinfo(skb
)->nr_frags
;
701 for (i
= 0; i
< nr_frags
; i
++) {
702 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
703 unsigned int offset
= 0;
704 int frag_size
= skb_frag_size(frag
);
707 bool last_frag
= false;
708 unsigned int frag_map_size
;
710 txd
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
711 if (txd
== ring
->last_free
)
715 frag_map_size
= min(frag_size
, MTK_TX_DMA_BUF_LEN
);
716 mapped_addr
= skb_frag_dma_map(eth
->dev
, frag
, offset
,
719 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
722 if (i
== nr_frags
- 1 &&
723 (frag_size
- frag_map_size
) == 0)
726 WRITE_ONCE(txd
->txd1
, mapped_addr
);
727 WRITE_ONCE(txd
->txd3
, (TX_DMA_SWC
|
728 TX_DMA_PLEN0(frag_map_size
) |
729 last_frag
* TX_DMA_LS0
));
730 WRITE_ONCE(txd
->txd4
, fport
);
732 tx_buf
= mtk_desc_to_tx_buf(ring
, txd
);
733 memset(tx_buf
, 0, sizeof(*tx_buf
));
734 tx_buf
->skb
= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
;
735 tx_buf
->flags
|= MTK_TX_FLAGS_PAGE0
;
736 tx_buf
->flags
|= (!mac
->id
) ? MTK_TX_FLAGS_FPORT0
:
739 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
740 dma_unmap_len_set(tx_buf
, dma_len0
, frag_map_size
);
741 frag_size
-= frag_map_size
;
742 offset
+= frag_map_size
;
746 /* store skb to cleanup */
749 WRITE_ONCE(itxd
->txd4
, txd4
);
750 WRITE_ONCE(itxd
->txd3
, (TX_DMA_SWC
| TX_DMA_PLEN0(skb_headlen(skb
)) |
751 (!nr_frags
* TX_DMA_LS0
)));
753 netdev_sent_queue(dev
, skb
->len
);
754 skb_tx_timestamp(skb
);
756 ring
->next_free
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
757 atomic_sub(n_desc
, &ring
->free_count
);
759 /* make sure that all changes to the dma ring are flushed before we
764 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) ||
766 mtk_w32(eth
, txd
->txd2
, MTK_QTX_CTX_PTR
);
772 tx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
775 mtk_tx_unmap(eth
, tx_buf
);
777 itxd
->txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
778 itxd
= mtk_qdma_phys_to_virt(ring
, itxd
->txd2
);
779 } while (itxd
!= txd
);
784 static inline int mtk_cal_txd_req(struct sk_buff
*skb
)
787 struct skb_frag_struct
*frag
;
790 if (skb_is_gso(skb
)) {
791 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
792 frag
= &skb_shinfo(skb
)->frags
[i
];
793 nfrags
+= DIV_ROUND_UP(frag
->size
, MTK_TX_DMA_BUF_LEN
);
796 nfrags
+= skb_shinfo(skb
)->nr_frags
;
802 static int mtk_queue_stopped(struct mtk_eth
*eth
)
806 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
809 if (netif_queue_stopped(eth
->netdev
[i
]))
816 static void mtk_wake_queue(struct mtk_eth
*eth
)
820 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
823 netif_wake_queue(eth
->netdev
[i
]);
827 static void mtk_stop_queue(struct mtk_eth
*eth
)
831 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
834 netif_stop_queue(eth
->netdev
[i
]);
838 static int mtk_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
840 struct mtk_mac
*mac
= netdev_priv(dev
);
841 struct mtk_eth
*eth
= mac
->hw
;
842 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
843 struct net_device_stats
*stats
= &dev
->stats
;
847 /* normally we can rely on the stack not calling this more than once,
848 * however we have 2 queues running on the same ring so we need to lock
851 spin_lock(ð
->page_lock
);
853 if (unlikely(test_bit(MTK_RESETTING
, ð
->state
)))
856 tx_num
= mtk_cal_txd_req(skb
);
857 if (unlikely(atomic_read(&ring
->free_count
) <= tx_num
)) {
859 netif_err(eth
, tx_queued
, dev
,
860 "Tx Ring full when queue awake!\n");
861 spin_unlock(ð
->page_lock
);
862 return NETDEV_TX_BUSY
;
865 /* TSO: fill MSS info in tcp checksum field */
866 if (skb_is_gso(skb
)) {
867 if (skb_cow_head(skb
, 0)) {
868 netif_warn(eth
, tx_err
, dev
,
869 "GSO expand head fail.\n");
873 if (skb_shinfo(skb
)->gso_type
&
874 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
876 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
880 if (mtk_tx_map(skb
, dev
, tx_num
, ring
, gso
) < 0)
883 if (unlikely(atomic_read(&ring
->free_count
) <= ring
->thresh
))
886 spin_unlock(ð
->page_lock
);
891 spin_unlock(ð
->page_lock
);
893 dev_kfree_skb_any(skb
);
897 static struct mtk_rx_ring
*mtk_get_rx_ring(struct mtk_eth
*eth
)
900 struct mtk_rx_ring
*ring
;
904 return ð
->rx_ring
[0];
906 for (i
= 0; i
< MTK_MAX_RX_RING_NUM
; i
++) {
907 ring
= ð
->rx_ring
[i
];
908 idx
= NEXT_RX_DESP_IDX(ring
->calc_idx
, ring
->dma_size
);
909 if (ring
->dma
[idx
].rxd2
& RX_DMA_DONE
) {
910 ring
->calc_idx_update
= true;
918 static void mtk_update_rx_cpu_idx(struct mtk_eth
*eth
)
920 struct mtk_rx_ring
*ring
;
924 ring
= ð
->rx_ring
[0];
925 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
927 for (i
= 0; i
< MTK_MAX_RX_RING_NUM
; i
++) {
928 ring
= ð
->rx_ring
[i
];
929 if (ring
->calc_idx_update
) {
930 ring
->calc_idx_update
= false;
931 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
937 static int mtk_poll_rx(struct napi_struct
*napi
, int budget
,
940 struct mtk_rx_ring
*ring
;
944 struct mtk_rx_dma
*rxd
, trxd
;
947 while (done
< budget
) {
948 struct net_device
*netdev
;
953 ring
= mtk_get_rx_ring(eth
);
957 idx
= NEXT_RX_DESP_IDX(ring
->calc_idx
, ring
->dma_size
);
958 rxd
= &ring
->dma
[idx
];
959 data
= ring
->data
[idx
];
961 mtk_rx_get_desc(&trxd
, rxd
);
962 if (!(trxd
.rxd2
& RX_DMA_DONE
))
965 /* find out which mac the packet come from. values start at 1 */
966 mac
= (trxd
.rxd4
>> RX_DMA_FPORT_SHIFT
) &
970 if (unlikely(mac
< 0 || mac
>= MTK_MAC_COUNT
||
974 netdev
= eth
->netdev
[mac
];
976 if (unlikely(test_bit(MTK_RESETTING
, ð
->state
)))
979 /* alloc new buffer */
980 new_data
= napi_alloc_frag(ring
->frag_size
);
981 if (unlikely(!new_data
)) {
982 netdev
->stats
.rx_dropped
++;
985 dma_addr
= dma_map_single(eth
->dev
,
986 new_data
+ NET_SKB_PAD
,
989 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
))) {
990 skb_free_frag(new_data
);
991 netdev
->stats
.rx_dropped
++;
996 skb
= build_skb(data
, ring
->frag_size
);
997 if (unlikely(!skb
)) {
998 skb_free_frag(new_data
);
999 netdev
->stats
.rx_dropped
++;
1002 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1004 dma_unmap_single(eth
->dev
, trxd
.rxd1
,
1005 ring
->buf_size
, DMA_FROM_DEVICE
);
1006 pktlen
= RX_DMA_GET_PLEN0(trxd
.rxd2
);
1008 skb_put(skb
, pktlen
);
1009 if (trxd
.rxd4
& RX_DMA_L4_VALID
)
1010 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1012 skb_checksum_none_assert(skb
);
1013 skb
->protocol
= eth_type_trans(skb
, netdev
);
1015 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
1016 RX_DMA_VID(trxd
.rxd3
))
1017 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1018 RX_DMA_VID(trxd
.rxd3
));
1019 skb_record_rx_queue(skb
, 0);
1020 napi_gro_receive(napi
, skb
);
1022 ring
->data
[idx
] = new_data
;
1023 rxd
->rxd1
= (unsigned int)dma_addr
;
1026 rxd
->rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
1028 ring
->calc_idx
= idx
;
1035 /* make sure that all changes to the dma ring are flushed before
1039 mtk_update_rx_cpu_idx(eth
);
1045 static int mtk_poll_tx(struct mtk_eth
*eth
, int budget
)
1047 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1048 struct mtk_tx_dma
*desc
;
1049 struct sk_buff
*skb
;
1050 struct mtk_tx_buf
*tx_buf
;
1051 unsigned int done
[MTK_MAX_DEVS
];
1052 unsigned int bytes
[MTK_MAX_DEVS
];
1056 memset(done
, 0, sizeof(done
));
1057 memset(bytes
, 0, sizeof(bytes
));
1059 cpu
= mtk_r32(eth
, MTK_QTX_CRX_PTR
);
1060 dma
= mtk_r32(eth
, MTK_QTX_DRX_PTR
);
1062 desc
= mtk_qdma_phys_to_virt(ring
, cpu
);
1064 while ((cpu
!= dma
) && budget
) {
1065 u32 next_cpu
= desc
->txd2
;
1068 desc
= mtk_qdma_phys_to_virt(ring
, desc
->txd2
);
1069 if ((desc
->txd3
& TX_DMA_OWNER_CPU
) == 0)
1072 tx_buf
= mtk_desc_to_tx_buf(ring
, desc
);
1073 if (tx_buf
->flags
& MTK_TX_FLAGS_FPORT1
)
1080 if (skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
) {
1081 bytes
[mac
] += skb
->len
;
1085 mtk_tx_unmap(eth
, tx_buf
);
1087 ring
->last_free
= desc
;
1088 atomic_inc(&ring
->free_count
);
1093 mtk_w32(eth
, cpu
, MTK_QTX_CRX_PTR
);
1095 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1096 if (!eth
->netdev
[i
] || !done
[i
])
1098 netdev_completed_queue(eth
->netdev
[i
], done
[i
], bytes
[i
]);
1102 if (mtk_queue_stopped(eth
) &&
1103 (atomic_read(&ring
->free_count
) > ring
->thresh
))
1104 mtk_wake_queue(eth
);
1109 static void mtk_handle_status_irq(struct mtk_eth
*eth
)
1111 u32 status2
= mtk_r32(eth
, MTK_INT_STATUS2
);
1113 if (unlikely(status2
& (MTK_GDM1_AF
| MTK_GDM2_AF
))) {
1114 mtk_stats_update(eth
);
1115 mtk_w32(eth
, (MTK_GDM1_AF
| MTK_GDM2_AF
),
1120 static int mtk_napi_tx(struct napi_struct
*napi
, int budget
)
1122 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, tx_napi
);
1126 mtk_handle_status_irq(eth
);
1127 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QMTK_INT_STATUS
);
1128 tx_done
= mtk_poll_tx(eth
, budget
);
1130 if (unlikely(netif_msg_intr(eth
))) {
1131 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1132 mask
= mtk_r32(eth
, MTK_QDMA_INT_MASK
);
1134 "done tx %d, intr 0x%08x/0x%x\n",
1135 tx_done
, status
, mask
);
1138 if (tx_done
== budget
)
1141 status
= mtk_r32(eth
, MTK_QMTK_INT_STATUS
);
1142 if (status
& MTK_TX_DONE_INT
)
1145 napi_complete(napi
);
1146 mtk_tx_irq_enable(eth
, MTK_TX_DONE_INT
);
1151 static int mtk_napi_rx(struct napi_struct
*napi
, int budget
)
1153 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, rx_napi
);
1156 int remain_budget
= budget
;
1158 mtk_handle_status_irq(eth
);
1161 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_STATUS
);
1162 rx_done
= mtk_poll_rx(napi
, remain_budget
, eth
);
1164 if (unlikely(netif_msg_intr(eth
))) {
1165 status
= mtk_r32(eth
, MTK_PDMA_INT_STATUS
);
1166 mask
= mtk_r32(eth
, MTK_PDMA_INT_MASK
);
1168 "done rx %d, intr 0x%08x/0x%x\n",
1169 rx_done
, status
, mask
);
1171 if (rx_done
== remain_budget
)
1174 status
= mtk_r32(eth
, MTK_PDMA_INT_STATUS
);
1175 if (status
& MTK_RX_DONE_INT
) {
1176 remain_budget
-= rx_done
;
1179 napi_complete(napi
);
1180 mtk_rx_irq_enable(eth
, MTK_RX_DONE_INT
);
1182 return rx_done
+ budget
- remain_budget
;
1185 static int mtk_tx_alloc(struct mtk_eth
*eth
)
1187 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1188 int i
, sz
= sizeof(*ring
->dma
);
1190 ring
->buf
= kcalloc(MTK_DMA_SIZE
, sizeof(*ring
->buf
),
1195 ring
->dma
= dma_alloc_coherent(eth
->dev
, MTK_DMA_SIZE
* sz
,
1196 &ring
->phys
, GFP_ATOMIC
);
1200 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1201 int next
= (i
+ 1) % MTK_DMA_SIZE
;
1202 u32 next_ptr
= ring
->phys
+ next
* sz
;
1204 ring
->dma
[i
].txd2
= next_ptr
;
1205 ring
->dma
[i
].txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
1208 atomic_set(&ring
->free_count
, MTK_DMA_SIZE
- 2);
1209 ring
->next_free
= &ring
->dma
[0];
1210 ring
->last_free
= &ring
->dma
[MTK_DMA_SIZE
- 1];
1211 ring
->thresh
= MAX_SKB_FRAGS
;
1213 /* make sure that all changes to the dma ring are flushed before we
1218 mtk_w32(eth
, ring
->phys
, MTK_QTX_CTX_PTR
);
1219 mtk_w32(eth
, ring
->phys
, MTK_QTX_DTX_PTR
);
1221 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1224 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1226 mtk_w32(eth
, (QDMA_RES_THRES
<< 8) | QDMA_RES_THRES
, MTK_QTX_CFG(0));
1234 static void mtk_tx_clean(struct mtk_eth
*eth
)
1236 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1240 for (i
= 0; i
< MTK_DMA_SIZE
; i
++)
1241 mtk_tx_unmap(eth
, &ring
->buf
[i
]);
1247 dma_free_coherent(eth
->dev
,
1248 MTK_DMA_SIZE
* sizeof(*ring
->dma
),
1255 static int mtk_rx_alloc(struct mtk_eth
*eth
, int ring_no
, int rx_flag
)
1257 struct mtk_rx_ring
*ring
;
1258 int rx_data_len
, rx_dma_size
;
1262 if (rx_flag
== MTK_RX_FLAGS_QDMA
) {
1265 ring
= ð
->rx_ring_qdma
;
1268 ring
= ð
->rx_ring
[ring_no
];
1271 if (rx_flag
== MTK_RX_FLAGS_HWLRO
) {
1272 rx_data_len
= MTK_MAX_LRO_RX_LENGTH
;
1273 rx_dma_size
= MTK_HW_LRO_DMA_SIZE
;
1275 rx_data_len
= ETH_DATA_LEN
;
1276 rx_dma_size
= MTK_DMA_SIZE
;
1279 ring
->frag_size
= mtk_max_frag_size(rx_data_len
);
1280 ring
->buf_size
= mtk_max_buf_size(ring
->frag_size
);
1281 ring
->data
= kcalloc(rx_dma_size
, sizeof(*ring
->data
),
1286 for (i
= 0; i
< rx_dma_size
; i
++) {
1287 ring
->data
[i
] = netdev_alloc_frag(ring
->frag_size
);
1292 ring
->dma
= dma_alloc_coherent(eth
->dev
,
1293 rx_dma_size
* sizeof(*ring
->dma
),
1294 &ring
->phys
, GFP_ATOMIC
);
1298 for (i
= 0; i
< rx_dma_size
; i
++) {
1299 dma_addr_t dma_addr
= dma_map_single(eth
->dev
,
1300 ring
->data
[i
] + NET_SKB_PAD
,
1303 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
1305 ring
->dma
[i
].rxd1
= (unsigned int)dma_addr
;
1307 ring
->dma
[i
].rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
1309 ring
->dma_size
= rx_dma_size
;
1310 ring
->calc_idx_update
= false;
1311 ring
->calc_idx
= rx_dma_size
- 1;
1312 ring
->crx_idx_reg
= MTK_PRX_CRX_IDX_CFG(ring_no
);
1313 /* make sure that all changes to the dma ring are flushed before we
1318 mtk_w32(eth
, ring
->phys
, MTK_PRX_BASE_PTR_CFG(ring_no
) + offset
);
1319 mtk_w32(eth
, rx_dma_size
, MTK_PRX_MAX_CNT_CFG(ring_no
) + offset
);
1320 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
+ offset
);
1321 mtk_w32(eth
, MTK_PST_DRX_IDX_CFG(ring_no
), MTK_PDMA_RST_IDX
+ offset
);
1326 static void mtk_rx_clean(struct mtk_eth
*eth
, struct mtk_rx_ring
*ring
)
1330 if (ring
->data
&& ring
->dma
) {
1331 for (i
= 0; i
< ring
->dma_size
; i
++) {
1334 if (!ring
->dma
[i
].rxd1
)
1336 dma_unmap_single(eth
->dev
,
1340 skb_free_frag(ring
->data
[i
]);
1347 dma_free_coherent(eth
->dev
,
1348 ring
->dma_size
* sizeof(*ring
->dma
),
1355 static int mtk_hwlro_rx_init(struct mtk_eth
*eth
)
1358 u32 ring_ctrl_dw1
= 0, ring_ctrl_dw2
= 0, ring_ctrl_dw3
= 0;
1359 u32 lro_ctrl_dw0
= 0, lro_ctrl_dw3
= 0;
1361 /* set LRO rings to auto-learn modes */
1362 ring_ctrl_dw2
|= MTK_RING_AUTO_LERAN_MODE
;
1364 /* validate LRO ring */
1365 ring_ctrl_dw2
|= MTK_RING_VLD
;
1367 /* set AGE timer (unit: 20us) */
1368 ring_ctrl_dw2
|= MTK_RING_AGE_TIME_H
;
1369 ring_ctrl_dw1
|= MTK_RING_AGE_TIME_L
;
1371 /* set max AGG timer (unit: 20us) */
1372 ring_ctrl_dw2
|= MTK_RING_MAX_AGG_TIME
;
1374 /* set max LRO AGG count */
1375 ring_ctrl_dw2
|= MTK_RING_MAX_AGG_CNT_L
;
1376 ring_ctrl_dw3
|= MTK_RING_MAX_AGG_CNT_H
;
1378 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1379 mtk_w32(eth
, ring_ctrl_dw1
, MTK_LRO_CTRL_DW1_CFG(i
));
1380 mtk_w32(eth
, ring_ctrl_dw2
, MTK_LRO_CTRL_DW2_CFG(i
));
1381 mtk_w32(eth
, ring_ctrl_dw3
, MTK_LRO_CTRL_DW3_CFG(i
));
1384 /* IPv4 checksum update enable */
1385 lro_ctrl_dw0
|= MTK_L3_CKS_UPD_EN
;
1387 /* switch priority comparison to packet count mode */
1388 lro_ctrl_dw0
|= MTK_LRO_ALT_PKT_CNT_MODE
;
1390 /* bandwidth threshold setting */
1391 mtk_w32(eth
, MTK_HW_LRO_BW_THRE
, MTK_PDMA_LRO_CTRL_DW2
);
1393 /* auto-learn score delta setting */
1394 mtk_w32(eth
, MTK_HW_LRO_REPLACE_DELTA
, MTK_PDMA_LRO_ALT_SCORE_DELTA
);
1396 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1397 mtk_w32(eth
, (MTK_HW_LRO_TIMER_UNIT
<< 16) | MTK_HW_LRO_REFRESH_TIME
,
1398 MTK_PDMA_LRO_ALT_REFRESH_TIMER
);
1400 /* set HW LRO mode & the max aggregation count for rx packets */
1401 lro_ctrl_dw3
|= MTK_ADMA_MODE
| (MTK_HW_LRO_MAX_AGG_CNT
& 0xff);
1403 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1404 lro_ctrl_dw3
|= MTK_LRO_MIN_RXD_SDL
;
1407 lro_ctrl_dw0
|= MTK_LRO_EN
;
1409 mtk_w32(eth
, lro_ctrl_dw3
, MTK_PDMA_LRO_CTRL_DW3
);
1410 mtk_w32(eth
, lro_ctrl_dw0
, MTK_PDMA_LRO_CTRL_DW0
);
1415 static void mtk_hwlro_rx_uninit(struct mtk_eth
*eth
)
1420 /* relinquish lro rings, flush aggregated packets */
1421 mtk_w32(eth
, MTK_LRO_RING_RELINQUISH_REQ
, MTK_PDMA_LRO_CTRL_DW0
);
1423 /* wait for relinquishments done */
1424 for (i
= 0; i
< 10; i
++) {
1425 val
= mtk_r32(eth
, MTK_PDMA_LRO_CTRL_DW0
);
1426 if (val
& MTK_LRO_RING_RELINQUISH_DONE
) {
1433 /* invalidate lro rings */
1434 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++)
1435 mtk_w32(eth
, 0, MTK_LRO_CTRL_DW2_CFG(i
));
1437 /* disable HW LRO */
1438 mtk_w32(eth
, 0, MTK_PDMA_LRO_CTRL_DW0
);
1441 static void mtk_hwlro_val_ipaddr(struct mtk_eth
*eth
, int idx
, __be32 ip
)
1445 reg_val
= mtk_r32(eth
, MTK_LRO_CTRL_DW2_CFG(idx
));
1447 /* invalidate the IP setting */
1448 mtk_w32(eth
, (reg_val
& ~MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1450 mtk_w32(eth
, ip
, MTK_LRO_DIP_DW0_CFG(idx
));
1452 /* validate the IP setting */
1453 mtk_w32(eth
, (reg_val
| MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1456 static void mtk_hwlro_inval_ipaddr(struct mtk_eth
*eth
, int idx
)
1460 reg_val
= mtk_r32(eth
, MTK_LRO_CTRL_DW2_CFG(idx
));
1462 /* invalidate the IP setting */
1463 mtk_w32(eth
, (reg_val
& ~MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1465 mtk_w32(eth
, 0, MTK_LRO_DIP_DW0_CFG(idx
));
1468 static int mtk_hwlro_get_ip_cnt(struct mtk_mac
*mac
)
1473 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1474 if (mac
->hwlro_ip
[i
])
1481 static int mtk_hwlro_add_ipaddr(struct net_device
*dev
,
1482 struct ethtool_rxnfc
*cmd
)
1484 struct ethtool_rx_flow_spec
*fsp
=
1485 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1486 struct mtk_mac
*mac
= netdev_priv(dev
);
1487 struct mtk_eth
*eth
= mac
->hw
;
1490 if ((fsp
->flow_type
!= TCP_V4_FLOW
) ||
1491 (!fsp
->h_u
.tcp_ip4_spec
.ip4dst
) ||
1492 (fsp
->location
> 1))
1495 mac
->hwlro_ip
[fsp
->location
] = htonl(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
1496 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + fsp
->location
;
1498 mac
->hwlro_ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1500 mtk_hwlro_val_ipaddr(eth
, hwlro_idx
, mac
->hwlro_ip
[fsp
->location
]);
1505 static int mtk_hwlro_del_ipaddr(struct net_device
*dev
,
1506 struct ethtool_rxnfc
*cmd
)
1508 struct ethtool_rx_flow_spec
*fsp
=
1509 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1510 struct mtk_mac
*mac
= netdev_priv(dev
);
1511 struct mtk_eth
*eth
= mac
->hw
;
1514 if (fsp
->location
> 1)
1517 mac
->hwlro_ip
[fsp
->location
] = 0;
1518 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + fsp
->location
;
1520 mac
->hwlro_ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1522 mtk_hwlro_inval_ipaddr(eth
, hwlro_idx
);
1527 static void mtk_hwlro_netdev_disable(struct net_device
*dev
)
1529 struct mtk_mac
*mac
= netdev_priv(dev
);
1530 struct mtk_eth
*eth
= mac
->hw
;
1533 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1534 mac
->hwlro_ip
[i
] = 0;
1535 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + i
;
1537 mtk_hwlro_inval_ipaddr(eth
, hwlro_idx
);
1540 mac
->hwlro_ip_cnt
= 0;
1543 static int mtk_hwlro_get_fdir_entry(struct net_device
*dev
,
1544 struct ethtool_rxnfc
*cmd
)
1546 struct mtk_mac
*mac
= netdev_priv(dev
);
1547 struct ethtool_rx_flow_spec
*fsp
=
1548 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1550 /* only tcp dst ipv4 is meaningful, others are meaningless */
1551 fsp
->flow_type
= TCP_V4_FLOW
;
1552 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= ntohl(mac
->hwlro_ip
[fsp
->location
]);
1553 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= 0;
1555 fsp
->h_u
.tcp_ip4_spec
.ip4src
= 0;
1556 fsp
->m_u
.tcp_ip4_spec
.ip4src
= 0xffffffff;
1557 fsp
->h_u
.tcp_ip4_spec
.psrc
= 0;
1558 fsp
->m_u
.tcp_ip4_spec
.psrc
= 0xffff;
1559 fsp
->h_u
.tcp_ip4_spec
.pdst
= 0;
1560 fsp
->m_u
.tcp_ip4_spec
.pdst
= 0xffff;
1561 fsp
->h_u
.tcp_ip4_spec
.tos
= 0;
1562 fsp
->m_u
.tcp_ip4_spec
.tos
= 0xff;
1567 static int mtk_hwlro_get_fdir_all(struct net_device
*dev
,
1568 struct ethtool_rxnfc
*cmd
,
1571 struct mtk_mac
*mac
= netdev_priv(dev
);
1575 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1576 if (mac
->hwlro_ip
[i
]) {
1582 cmd
->rule_cnt
= cnt
;
1587 static netdev_features_t
mtk_fix_features(struct net_device
*dev
,
1588 netdev_features_t features
)
1590 if (!(features
& NETIF_F_LRO
)) {
1591 struct mtk_mac
*mac
= netdev_priv(dev
);
1592 int ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1595 netdev_info(dev
, "RX flow is programmed, LRO should keep on\n");
1597 features
|= NETIF_F_LRO
;
1604 static int mtk_set_features(struct net_device
*dev
, netdev_features_t features
)
1608 if (!((dev
->features
^ features
) & NETIF_F_LRO
))
1611 if (!(features
& NETIF_F_LRO
))
1612 mtk_hwlro_netdev_disable(dev
);
1617 /* wait for DMA to finish whatever it is doing before we start using it again */
1618 static int mtk_dma_busy_wait(struct mtk_eth
*eth
)
1620 unsigned long t_start
= jiffies
;
1623 if (!(mtk_r32(eth
, MTK_QDMA_GLO_CFG
) &
1624 (MTK_RX_DMA_BUSY
| MTK_TX_DMA_BUSY
)))
1626 if (time_after(jiffies
, t_start
+ MTK_DMA_BUSY_TIMEOUT
))
1630 dev_err(eth
->dev
, "DMA init timeout\n");
1634 static int mtk_dma_init(struct mtk_eth
*eth
)
1639 if (mtk_dma_busy_wait(eth
))
1642 /* QDMA needs scratch memory for internal reordering of the
1645 err
= mtk_init_fq_dma(eth
);
1649 err
= mtk_tx_alloc(eth
);
1653 err
= mtk_rx_alloc(eth
, 0, MTK_RX_FLAGS_QDMA
);
1657 err
= mtk_rx_alloc(eth
, 0, MTK_RX_FLAGS_NORMAL
);
1662 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1663 err
= mtk_rx_alloc(eth
, i
, MTK_RX_FLAGS_HWLRO
);
1667 err
= mtk_hwlro_rx_init(eth
);
1672 /* Enable random early drop and set drop threshold automatically */
1673 mtk_w32(eth
, FC_THRES_DROP_MODE
| FC_THRES_DROP_EN
| FC_THRES_MIN
,
1675 mtk_w32(eth
, 0x0, MTK_QDMA_HRED2
);
1680 static void mtk_dma_free(struct mtk_eth
*eth
)
1684 for (i
= 0; i
< MTK_MAC_COUNT
; i
++)
1686 netdev_reset_queue(eth
->netdev
[i
]);
1687 if (eth
->scratch_ring
) {
1688 dma_free_coherent(eth
->dev
,
1689 MTK_DMA_SIZE
* sizeof(struct mtk_tx_dma
),
1691 eth
->phy_scratch_ring
);
1692 eth
->scratch_ring
= NULL
;
1693 eth
->phy_scratch_ring
= 0;
1696 mtk_rx_clean(eth
, ð
->rx_ring
[0]);
1697 mtk_rx_clean(eth
, ð
->rx_ring_qdma
);
1700 mtk_hwlro_rx_uninit(eth
);
1701 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++)
1702 mtk_rx_clean(eth
, ð
->rx_ring
[i
]);
1705 kfree(eth
->scratch_head
);
1708 static void mtk_tx_timeout(struct net_device
*dev
)
1710 struct mtk_mac
*mac
= netdev_priv(dev
);
1711 struct mtk_eth
*eth
= mac
->hw
;
1713 eth
->netdev
[mac
->id
]->stats
.tx_errors
++;
1714 netif_err(eth
, tx_err
, dev
,
1715 "transmit timed out\n");
1716 schedule_work(ð
->pending_work
);
1719 static irqreturn_t
mtk_handle_irq_rx(int irq
, void *_eth
)
1721 struct mtk_eth
*eth
= _eth
;
1723 if (likely(napi_schedule_prep(ð
->rx_napi
))) {
1724 __napi_schedule(ð
->rx_napi
);
1725 mtk_rx_irq_disable(eth
, MTK_RX_DONE_INT
);
1731 static irqreturn_t
mtk_handle_irq_tx(int irq
, void *_eth
)
1733 struct mtk_eth
*eth
= _eth
;
1735 if (likely(napi_schedule_prep(ð
->tx_napi
))) {
1736 __napi_schedule(ð
->tx_napi
);
1737 mtk_tx_irq_disable(eth
, MTK_TX_DONE_INT
);
1743 static irqreturn_t
mtk_handle_irq(int irq
, void *_eth
)
1745 struct mtk_eth
*eth
= _eth
;
1747 if (mtk_r32(eth
, MTK_PDMA_INT_MASK
) & MTK_RX_DONE_INT
) {
1748 if (mtk_r32(eth
, MTK_PDMA_INT_STATUS
) & MTK_RX_DONE_INT
)
1749 mtk_handle_irq_rx(irq
, _eth
);
1751 if (mtk_r32(eth
, MTK_QDMA_INT_MASK
) & MTK_TX_DONE_INT
) {
1752 if (mtk_r32(eth
, MTK_QMTK_INT_STATUS
) & MTK_TX_DONE_INT
)
1753 mtk_handle_irq_tx(irq
, _eth
);
1759 #ifdef CONFIG_NET_POLL_CONTROLLER
1760 static void mtk_poll_controller(struct net_device
*dev
)
1762 struct mtk_mac
*mac
= netdev_priv(dev
);
1763 struct mtk_eth
*eth
= mac
->hw
;
1765 mtk_tx_irq_disable(eth
, MTK_TX_DONE_INT
);
1766 mtk_rx_irq_disable(eth
, MTK_RX_DONE_INT
);
1767 mtk_handle_irq_rx(eth
->irq
[2], dev
);
1768 mtk_tx_irq_enable(eth
, MTK_TX_DONE_INT
);
1769 mtk_rx_irq_enable(eth
, MTK_RX_DONE_INT
);
1773 static int mtk_start_dma(struct mtk_eth
*eth
)
1775 u32 rx_2b_offset
= (NET_IP_ALIGN
== 2) ? MTK_RX_2B_OFFSET
: 0;
1778 err
= mtk_dma_init(eth
);
1785 MTK_TX_WB_DDONE
| MTK_TX_DMA_EN
|
1786 MTK_DMA_SIZE_16DWORDS
| MTK_NDP_CO_PRO
|
1787 MTK_RX_DMA_EN
| MTK_RX_2B_OFFSET
|
1792 MTK_RX_DMA_EN
| rx_2b_offset
|
1793 MTK_RX_BT_32DWORDS
| MTK_MULTI_EN
,
1799 static int mtk_open(struct net_device
*dev
)
1801 struct mtk_mac
*mac
= netdev_priv(dev
);
1802 struct mtk_eth
*eth
= mac
->hw
;
1804 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1805 if (!refcount_read(ð
->dma_refcnt
)) {
1806 int err
= mtk_start_dma(eth
);
1811 napi_enable(ð
->tx_napi
);
1812 napi_enable(ð
->rx_napi
);
1813 mtk_tx_irq_enable(eth
, MTK_TX_DONE_INT
);
1814 mtk_rx_irq_enable(eth
, MTK_RX_DONE_INT
);
1815 refcount_set(ð
->dma_refcnt
, 1);
1818 refcount_inc(ð
->dma_refcnt
);
1820 phy_start(dev
->phydev
);
1821 netif_start_queue(dev
);
1826 static void mtk_stop_dma(struct mtk_eth
*eth
, u32 glo_cfg
)
1831 /* stop the dma engine */
1832 spin_lock_bh(ð
->page_lock
);
1833 val
= mtk_r32(eth
, glo_cfg
);
1834 mtk_w32(eth
, val
& ~(MTK_TX_WB_DDONE
| MTK_RX_DMA_EN
| MTK_TX_DMA_EN
),
1836 spin_unlock_bh(ð
->page_lock
);
1838 /* wait for dma stop */
1839 for (i
= 0; i
< 10; i
++) {
1840 val
= mtk_r32(eth
, glo_cfg
);
1841 if (val
& (MTK_TX_DMA_BUSY
| MTK_RX_DMA_BUSY
)) {
1849 static int mtk_stop(struct net_device
*dev
)
1851 struct mtk_mac
*mac
= netdev_priv(dev
);
1852 struct mtk_eth
*eth
= mac
->hw
;
1854 netif_tx_disable(dev
);
1855 phy_stop(dev
->phydev
);
1857 /* only shutdown DMA if this is the last user */
1858 if (!refcount_dec_and_test(ð
->dma_refcnt
))
1861 mtk_tx_irq_disable(eth
, MTK_TX_DONE_INT
);
1862 mtk_rx_irq_disable(eth
, MTK_RX_DONE_INT
);
1863 napi_disable(ð
->tx_napi
);
1864 napi_disable(ð
->rx_napi
);
1866 mtk_stop_dma(eth
, MTK_QDMA_GLO_CFG
);
1867 mtk_stop_dma(eth
, MTK_PDMA_GLO_CFG
);
1874 static void ethsys_reset(struct mtk_eth
*eth
, u32 reset_bits
)
1876 regmap_update_bits(eth
->ethsys
, ETHSYS_RSTCTRL
,
1880 usleep_range(1000, 1100);
1881 regmap_update_bits(eth
->ethsys
, ETHSYS_RSTCTRL
,
1887 static void mtk_clk_disable(struct mtk_eth
*eth
)
1891 for (clk
= MTK_CLK_MAX
- 1; clk
>= 0; clk
--)
1892 clk_disable_unprepare(eth
->clks
[clk
]);
1895 static int mtk_clk_enable(struct mtk_eth
*eth
)
1899 for (clk
= 0; clk
< MTK_CLK_MAX
; clk
++) {
1900 ret
= clk_prepare_enable(eth
->clks
[clk
]);
1902 goto err_disable_clks
;
1909 clk_disable_unprepare(eth
->clks
[clk
]);
1914 static int mtk_hw_init(struct mtk_eth
*eth
)
1918 if (test_and_set_bit(MTK_HW_INIT
, ð
->state
))
1921 pm_runtime_enable(eth
->dev
);
1922 pm_runtime_get_sync(eth
->dev
);
1924 ret
= mtk_clk_enable(eth
);
1926 goto err_disable_pm
;
1928 ethsys_reset(eth
, RSTCTRL_FE
);
1929 ethsys_reset(eth
, RSTCTRL_PPE
);
1931 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
1932 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1935 val
&= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK
, eth
->mac
[i
]->id
);
1936 val
|= SYSCFG0_GE_MODE(eth
->mac
[i
]->ge_mode
, eth
->mac
[i
]->id
);
1938 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
1941 /* Set GE2 driving and slew rate */
1942 regmap_write(eth
->pctl
, GPIO_DRV_SEL10
, 0xa00);
1945 regmap_write(eth
->pctl
, GPIO_OD33_CTRL8
, 0x5);
1948 regmap_write(eth
->pctl
, GPIO_BIAS_CTRL
, 0x0);
1951 /* Set linkdown as the default for each GMAC. Its own MCR would be set
1952 * up with the more appropriate value when mtk_phy_link_adjust call is
1955 for (i
= 0; i
< MTK_MAC_COUNT
; i
++)
1956 mtk_w32(eth
, 0, MTK_MAC_MCR(i
));
1958 /* Indicates CDM to parse the MTK special tag from CPU
1959 * which also is working out for untag packets.
1961 val
= mtk_r32(eth
, MTK_CDMQ_IG_CTRL
);
1962 mtk_w32(eth
, val
| MTK_CDMQ_STAG_EN
, MTK_CDMQ_IG_CTRL
);
1964 /* Enable RX VLan Offloading */
1965 mtk_w32(eth
, 1, MTK_CDMP_EG_CTRL
);
1967 /* enable interrupt delay for RX */
1968 mtk_w32(eth
, MTK_PDMA_DELAY_RX_DELAY
, MTK_PDMA_DELAY_INT
);
1970 /* disable delay and normal interrupt */
1971 mtk_w32(eth
, 0, MTK_QDMA_DELAY_INT
);
1972 mtk_tx_irq_disable(eth
, ~0);
1973 mtk_rx_irq_disable(eth
, ~0);
1974 mtk_w32(eth
, RST_GL_PSE
, MTK_RST_GL
);
1975 mtk_w32(eth
, 0, MTK_RST_GL
);
1977 /* FE int grouping */
1978 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_PDMA_INT_GRP1
);
1979 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_GRP2
);
1980 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QDMA_INT_GRP1
);
1981 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_QDMA_INT_GRP2
);
1982 mtk_w32(eth
, 0x21021000, MTK_FE_INT_GRP
);
1984 for (i
= 0; i
< 2; i
++) {
1985 u32 val
= mtk_r32(eth
, MTK_GDMA_FWD_CFG(i
));
1987 /* setup the forward port to send frame to PDMA */
1990 /* Enable RX checksum */
1991 val
|= MTK_GDMA_ICS_EN
| MTK_GDMA_TCS_EN
| MTK_GDMA_UCS_EN
;
1993 /* setup the mac dma */
1994 mtk_w32(eth
, val
, MTK_GDMA_FWD_CFG(i
));
2000 pm_runtime_put_sync(eth
->dev
);
2001 pm_runtime_disable(eth
->dev
);
2006 static int mtk_hw_deinit(struct mtk_eth
*eth
)
2008 if (!test_and_clear_bit(MTK_HW_INIT
, ð
->state
))
2011 mtk_clk_disable(eth
);
2013 pm_runtime_put_sync(eth
->dev
);
2014 pm_runtime_disable(eth
->dev
);
2019 static int __init
mtk_init(struct net_device
*dev
)
2021 struct mtk_mac
*mac
= netdev_priv(dev
);
2022 struct mtk_eth
*eth
= mac
->hw
;
2023 const char *mac_addr
;
2025 mac_addr
= of_get_mac_address(mac
->of_node
);
2026 if (!IS_ERR(mac_addr
))
2027 ether_addr_copy(dev
->dev_addr
, mac_addr
);
2029 /* If the mac address is invalid, use random mac address */
2030 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2031 eth_hw_addr_random(dev
);
2032 dev_err(eth
->dev
, "generated random MAC address %pM\n",
2036 return mtk_phy_connect(dev
);
2039 static void mtk_uninit(struct net_device
*dev
)
2041 struct mtk_mac
*mac
= netdev_priv(dev
);
2042 struct mtk_eth
*eth
= mac
->hw
;
2044 phy_disconnect(dev
->phydev
);
2045 if (of_phy_is_fixed_link(mac
->of_node
))
2046 of_phy_deregister_fixed_link(mac
->of_node
);
2047 mtk_tx_irq_disable(eth
, ~0);
2048 mtk_rx_irq_disable(eth
, ~0);
2051 static int mtk_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2057 return phy_mii_ioctl(dev
->phydev
, ifr
, cmd
);
2065 static void mtk_pending_work(struct work_struct
*work
)
2067 struct mtk_eth
*eth
= container_of(work
, struct mtk_eth
, pending_work
);
2069 unsigned long restart
= 0;
2073 dev_dbg(eth
->dev
, "[%s][%d] reset\n", __func__
, __LINE__
);
2075 while (test_and_set_bit_lock(MTK_RESETTING
, ð
->state
))
2078 dev_dbg(eth
->dev
, "[%s][%d] mtk_stop starts\n", __func__
, __LINE__
);
2079 /* stop all devices to make sure that dma is properly shut down */
2080 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2081 if (!eth
->netdev
[i
])
2083 mtk_stop(eth
->netdev
[i
]);
2084 __set_bit(i
, &restart
);
2086 dev_dbg(eth
->dev
, "[%s][%d] mtk_stop ends\n", __func__
, __LINE__
);
2088 /* restart underlying hardware such as power, clock, pin mux
2089 * and the connected phy
2094 pinctrl_select_state(eth
->dev
->pins
->p
,
2095 eth
->dev
->pins
->default_state
);
2098 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2100 of_phy_is_fixed_link(eth
->mac
[i
]->of_node
))
2102 err
= phy_init_hw(eth
->netdev
[i
]->phydev
);
2104 dev_err(eth
->dev
, "%s: PHY init failed.\n",
2105 eth
->netdev
[i
]->name
);
2108 /* restart DMA and enable IRQs */
2109 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2110 if (!test_bit(i
, &restart
))
2112 err
= mtk_open(eth
->netdev
[i
]);
2114 netif_alert(eth
, ifup
, eth
->netdev
[i
],
2115 "Driver up/down cycle failed, closing device.\n");
2116 dev_close(eth
->netdev
[i
]);
2120 dev_dbg(eth
->dev
, "[%s][%d] reset done\n", __func__
, __LINE__
);
2122 clear_bit_unlock(MTK_RESETTING
, ð
->state
);
2127 static int mtk_free_dev(struct mtk_eth
*eth
)
2131 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2132 if (!eth
->netdev
[i
])
2134 free_netdev(eth
->netdev
[i
]);
2140 static int mtk_unreg_dev(struct mtk_eth
*eth
)
2144 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2145 if (!eth
->netdev
[i
])
2147 unregister_netdev(eth
->netdev
[i
]);
2153 static int mtk_cleanup(struct mtk_eth
*eth
)
2157 cancel_work_sync(ð
->pending_work
);
2162 static int mtk_get_link_ksettings(struct net_device
*ndev
,
2163 struct ethtool_link_ksettings
*cmd
)
2165 struct mtk_mac
*mac
= netdev_priv(ndev
);
2167 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2170 phy_ethtool_ksettings_get(ndev
->phydev
, cmd
);
2175 static int mtk_set_link_ksettings(struct net_device
*ndev
,
2176 const struct ethtool_link_ksettings
*cmd
)
2178 struct mtk_mac
*mac
= netdev_priv(ndev
);
2180 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2183 return phy_ethtool_ksettings_set(ndev
->phydev
, cmd
);
2186 static void mtk_get_drvinfo(struct net_device
*dev
,
2187 struct ethtool_drvinfo
*info
)
2189 struct mtk_mac
*mac
= netdev_priv(dev
);
2191 strlcpy(info
->driver
, mac
->hw
->dev
->driver
->name
, sizeof(info
->driver
));
2192 strlcpy(info
->bus_info
, dev_name(mac
->hw
->dev
), sizeof(info
->bus_info
));
2193 info
->n_stats
= ARRAY_SIZE(mtk_ethtool_stats
);
2196 static u32
mtk_get_msglevel(struct net_device
*dev
)
2198 struct mtk_mac
*mac
= netdev_priv(dev
);
2200 return mac
->hw
->msg_enable
;
2203 static void mtk_set_msglevel(struct net_device
*dev
, u32 value
)
2205 struct mtk_mac
*mac
= netdev_priv(dev
);
2207 mac
->hw
->msg_enable
= value
;
2210 static int mtk_nway_reset(struct net_device
*dev
)
2212 struct mtk_mac
*mac
= netdev_priv(dev
);
2214 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2217 return genphy_restart_aneg(dev
->phydev
);
2220 static u32
mtk_get_link(struct net_device
*dev
)
2222 struct mtk_mac
*mac
= netdev_priv(dev
);
2225 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2228 err
= genphy_update_link(dev
->phydev
);
2230 return ethtool_op_get_link(dev
);
2232 return dev
->phydev
->link
;
2235 static void mtk_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2239 switch (stringset
) {
2241 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++) {
2242 memcpy(data
, mtk_ethtool_stats
[i
].str
, ETH_GSTRING_LEN
);
2243 data
+= ETH_GSTRING_LEN
;
2249 static int mtk_get_sset_count(struct net_device
*dev
, int sset
)
2253 return ARRAY_SIZE(mtk_ethtool_stats
);
2259 static void mtk_get_ethtool_stats(struct net_device
*dev
,
2260 struct ethtool_stats
*stats
, u64
*data
)
2262 struct mtk_mac
*mac
= netdev_priv(dev
);
2263 struct mtk_hw_stats
*hwstats
= mac
->hw_stats
;
2264 u64
*data_src
, *data_dst
;
2268 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2271 if (netif_running(dev
) && netif_device_present(dev
)) {
2272 if (spin_trylock_bh(&hwstats
->stats_lock
)) {
2273 mtk_stats_update_mac(mac
);
2274 spin_unlock_bh(&hwstats
->stats_lock
);
2278 data_src
= (u64
*)hwstats
;
2282 start
= u64_stats_fetch_begin_irq(&hwstats
->syncp
);
2284 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++)
2285 *data_dst
++ = *(data_src
+ mtk_ethtool_stats
[i
].offset
);
2286 } while (u64_stats_fetch_retry_irq(&hwstats
->syncp
, start
));
2289 static int mtk_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2292 int ret
= -EOPNOTSUPP
;
2295 case ETHTOOL_GRXRINGS
:
2296 if (dev
->hw_features
& NETIF_F_LRO
) {
2297 cmd
->data
= MTK_MAX_RX_RING_NUM
;
2301 case ETHTOOL_GRXCLSRLCNT
:
2302 if (dev
->hw_features
& NETIF_F_LRO
) {
2303 struct mtk_mac
*mac
= netdev_priv(dev
);
2305 cmd
->rule_cnt
= mac
->hwlro_ip_cnt
;
2309 case ETHTOOL_GRXCLSRULE
:
2310 if (dev
->hw_features
& NETIF_F_LRO
)
2311 ret
= mtk_hwlro_get_fdir_entry(dev
, cmd
);
2313 case ETHTOOL_GRXCLSRLALL
:
2314 if (dev
->hw_features
& NETIF_F_LRO
)
2315 ret
= mtk_hwlro_get_fdir_all(dev
, cmd
,
2325 static int mtk_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2327 int ret
= -EOPNOTSUPP
;
2330 case ETHTOOL_SRXCLSRLINS
:
2331 if (dev
->hw_features
& NETIF_F_LRO
)
2332 ret
= mtk_hwlro_add_ipaddr(dev
, cmd
);
2334 case ETHTOOL_SRXCLSRLDEL
:
2335 if (dev
->hw_features
& NETIF_F_LRO
)
2336 ret
= mtk_hwlro_del_ipaddr(dev
, cmd
);
2345 static const struct ethtool_ops mtk_ethtool_ops
= {
2346 .get_link_ksettings
= mtk_get_link_ksettings
,
2347 .set_link_ksettings
= mtk_set_link_ksettings
,
2348 .get_drvinfo
= mtk_get_drvinfo
,
2349 .get_msglevel
= mtk_get_msglevel
,
2350 .set_msglevel
= mtk_set_msglevel
,
2351 .nway_reset
= mtk_nway_reset
,
2352 .get_link
= mtk_get_link
,
2353 .get_strings
= mtk_get_strings
,
2354 .get_sset_count
= mtk_get_sset_count
,
2355 .get_ethtool_stats
= mtk_get_ethtool_stats
,
2356 .get_rxnfc
= mtk_get_rxnfc
,
2357 .set_rxnfc
= mtk_set_rxnfc
,
2360 static const struct net_device_ops mtk_netdev_ops
= {
2361 .ndo_init
= mtk_init
,
2362 .ndo_uninit
= mtk_uninit
,
2363 .ndo_open
= mtk_open
,
2364 .ndo_stop
= mtk_stop
,
2365 .ndo_start_xmit
= mtk_start_xmit
,
2366 .ndo_set_mac_address
= mtk_set_mac_address
,
2367 .ndo_validate_addr
= eth_validate_addr
,
2368 .ndo_do_ioctl
= mtk_do_ioctl
,
2369 .ndo_tx_timeout
= mtk_tx_timeout
,
2370 .ndo_get_stats64
= mtk_get_stats64
,
2371 .ndo_fix_features
= mtk_fix_features
,
2372 .ndo_set_features
= mtk_set_features
,
2373 #ifdef CONFIG_NET_POLL_CONTROLLER
2374 .ndo_poll_controller
= mtk_poll_controller
,
2378 static int mtk_add_mac(struct mtk_eth
*eth
, struct device_node
*np
)
2380 struct mtk_mac
*mac
;
2381 const __be32
*_id
= of_get_property(np
, "reg", NULL
);
2385 dev_err(eth
->dev
, "missing mac id\n");
2389 id
= be32_to_cpup(_id
);
2390 if (id
>= MTK_MAC_COUNT
) {
2391 dev_err(eth
->dev
, "%d is not a valid mac id\n", id
);
2395 if (eth
->netdev
[id
]) {
2396 dev_err(eth
->dev
, "duplicate mac id found: %d\n", id
);
2400 eth
->netdev
[id
] = alloc_etherdev(sizeof(*mac
));
2401 if (!eth
->netdev
[id
]) {
2402 dev_err(eth
->dev
, "alloc_etherdev failed\n");
2405 mac
= netdev_priv(eth
->netdev
[id
]);
2411 memset(mac
->hwlro_ip
, 0, sizeof(mac
->hwlro_ip
));
2412 mac
->hwlro_ip_cnt
= 0;
2414 mac
->hw_stats
= devm_kzalloc(eth
->dev
,
2415 sizeof(*mac
->hw_stats
),
2417 if (!mac
->hw_stats
) {
2418 dev_err(eth
->dev
, "failed to allocate counter memory\n");
2422 spin_lock_init(&mac
->hw_stats
->stats_lock
);
2423 u64_stats_init(&mac
->hw_stats
->syncp
);
2424 mac
->hw_stats
->reg_offset
= id
* MTK_STAT_OFFSET
;
2426 SET_NETDEV_DEV(eth
->netdev
[id
], eth
->dev
);
2427 eth
->netdev
[id
]->watchdog_timeo
= 5 * HZ
;
2428 eth
->netdev
[id
]->netdev_ops
= &mtk_netdev_ops
;
2429 eth
->netdev
[id
]->base_addr
= (unsigned long)eth
->base
;
2431 eth
->netdev
[id
]->hw_features
= MTK_HW_FEATURES
;
2433 eth
->netdev
[id
]->hw_features
|= NETIF_F_LRO
;
2435 eth
->netdev
[id
]->vlan_features
= MTK_HW_FEATURES
&
2436 ~(NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
);
2437 eth
->netdev
[id
]->features
|= MTK_HW_FEATURES
;
2438 eth
->netdev
[id
]->ethtool_ops
= &mtk_ethtool_ops
;
2440 eth
->netdev
[id
]->irq
= eth
->irq
[0];
2441 eth
->netdev
[id
]->dev
.of_node
= np
;
2446 free_netdev(eth
->netdev
[id
]);
2450 static int mtk_probe(struct platform_device
*pdev
)
2452 struct resource
*res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2453 struct device_node
*mac_np
;
2454 struct mtk_eth
*eth
;
2458 eth
= devm_kzalloc(&pdev
->dev
, sizeof(*eth
), GFP_KERNEL
);
2462 eth
->soc
= of_device_get_match_data(&pdev
->dev
);
2464 eth
->dev
= &pdev
->dev
;
2465 eth
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
2466 if (IS_ERR(eth
->base
))
2467 return PTR_ERR(eth
->base
);
2469 spin_lock_init(ð
->page_lock
);
2470 spin_lock_init(ð
->tx_irq_lock
);
2471 spin_lock_init(ð
->rx_irq_lock
);
2473 eth
->ethsys
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
2475 if (IS_ERR(eth
->ethsys
)) {
2476 dev_err(&pdev
->dev
, "no ethsys regmap found\n");
2477 return PTR_ERR(eth
->ethsys
);
2480 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SGMII
)) {
2482 syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
2483 "mediatek,sgmiisys");
2484 if (IS_ERR(eth
->sgmiisys
)) {
2485 dev_err(&pdev
->dev
, "no sgmiisys regmap found\n");
2486 return PTR_ERR(eth
->sgmiisys
);
2490 if (eth
->soc
->required_pctl
) {
2491 eth
->pctl
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
2493 if (IS_ERR(eth
->pctl
)) {
2494 dev_err(&pdev
->dev
, "no pctl regmap found\n");
2495 return PTR_ERR(eth
->pctl
);
2499 for (i
= 0; i
< 3; i
++) {
2500 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SHARED_INT
) && i
> 0)
2501 eth
->irq
[i
] = eth
->irq
[0];
2503 eth
->irq
[i
] = platform_get_irq(pdev
, i
);
2504 if (eth
->irq
[i
] < 0) {
2505 dev_err(&pdev
->dev
, "no IRQ%d resource found\n", i
);
2509 for (i
= 0; i
< ARRAY_SIZE(eth
->clks
); i
++) {
2510 eth
->clks
[i
] = devm_clk_get(eth
->dev
,
2511 mtk_clks_source_name
[i
]);
2512 if (IS_ERR(eth
->clks
[i
])) {
2513 if (PTR_ERR(eth
->clks
[i
]) == -EPROBE_DEFER
)
2514 return -EPROBE_DEFER
;
2515 if (eth
->soc
->required_clks
& BIT(i
)) {
2516 dev_err(&pdev
->dev
, "clock %s not found\n",
2517 mtk_clks_source_name
[i
]);
2520 eth
->clks
[i
] = NULL
;
2524 eth
->msg_enable
= netif_msg_init(mtk_msg_level
, MTK_DEFAULT_MSG_ENABLE
);
2525 INIT_WORK(ð
->pending_work
, mtk_pending_work
);
2527 err
= mtk_hw_init(eth
);
2531 eth
->hwlro
= MTK_HAS_CAPS(eth
->soc
->caps
, MTK_HWLRO
);
2533 for_each_child_of_node(pdev
->dev
.of_node
, mac_np
) {
2534 if (!of_device_is_compatible(mac_np
,
2535 "mediatek,eth-mac"))
2538 if (!of_device_is_available(mac_np
))
2541 err
= mtk_add_mac(eth
, mac_np
);
2546 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SHARED_INT
)) {
2547 err
= devm_request_irq(eth
->dev
, eth
->irq
[0],
2549 dev_name(eth
->dev
), eth
);
2551 err
= devm_request_irq(eth
->dev
, eth
->irq
[1],
2552 mtk_handle_irq_tx
, 0,
2553 dev_name(eth
->dev
), eth
);
2557 err
= devm_request_irq(eth
->dev
, eth
->irq
[2],
2558 mtk_handle_irq_rx
, 0,
2559 dev_name(eth
->dev
), eth
);
2564 err
= mtk_mdio_init(eth
);
2568 for (i
= 0; i
< MTK_MAX_DEVS
; i
++) {
2569 if (!eth
->netdev
[i
])
2572 err
= register_netdev(eth
->netdev
[i
]);
2574 dev_err(eth
->dev
, "error bringing up device\n");
2575 goto err_deinit_mdio
;
2577 netif_info(eth
, probe
, eth
->netdev
[i
],
2578 "mediatek frame engine at 0x%08lx, irq %d\n",
2579 eth
->netdev
[i
]->base_addr
, eth
->irq
[0]);
2582 /* we run 2 devices on the same DMA ring so we need a dummy device
2585 init_dummy_netdev(ð
->dummy_dev
);
2586 netif_napi_add(ð
->dummy_dev
, ð
->tx_napi
, mtk_napi_tx
,
2588 netif_napi_add(ð
->dummy_dev
, ð
->rx_napi
, mtk_napi_rx
,
2591 platform_set_drvdata(pdev
, eth
);
2596 mtk_mdio_cleanup(eth
);
2605 static int mtk_remove(struct platform_device
*pdev
)
2607 struct mtk_eth
*eth
= platform_get_drvdata(pdev
);
2610 /* stop all devices to make sure that dma is properly shut down */
2611 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2612 if (!eth
->netdev
[i
])
2614 mtk_stop(eth
->netdev
[i
]);
2619 netif_napi_del(ð
->tx_napi
);
2620 netif_napi_del(ð
->rx_napi
);
2622 mtk_mdio_cleanup(eth
);
2627 static const struct mtk_soc_data mt2701_data
= {
2628 .caps
= MTK_GMAC1_TRGMII
| MTK_HWLRO
,
2629 .required_clks
= MT7623_CLKS_BITMAP
,
2630 .required_pctl
= true,
2633 static const struct mtk_soc_data mt7621_data
= {
2634 .caps
= MTK_SHARED_INT
,
2635 .required_clks
= MT7621_CLKS_BITMAP
,
2636 .required_pctl
= false,
2639 static const struct mtk_soc_data mt7622_data
= {
2640 .caps
= MTK_DUAL_GMAC_SHARED_SGMII
| MTK_GMAC1_ESW
| MTK_HWLRO
,
2641 .required_clks
= MT7622_CLKS_BITMAP
,
2642 .required_pctl
= false,
2645 static const struct mtk_soc_data mt7623_data
= {
2646 .caps
= MTK_GMAC1_TRGMII
| MTK_HWLRO
,
2647 .required_clks
= MT7623_CLKS_BITMAP
,
2648 .required_pctl
= true,
2651 const struct of_device_id of_mtk_match
[] = {
2652 { .compatible
= "mediatek,mt2701-eth", .data
= &mt2701_data
},
2653 { .compatible
= "mediatek,mt7621-eth", .data
= &mt7621_data
},
2654 { .compatible
= "mediatek,mt7622-eth", .data
= &mt7622_data
},
2655 { .compatible
= "mediatek,mt7623-eth", .data
= &mt7623_data
},
2658 MODULE_DEVICE_TABLE(of
, of_mtk_match
);
2660 static struct platform_driver mtk_driver
= {
2662 .remove
= mtk_remove
,
2664 .name
= "mtk_soc_eth",
2665 .of_match_table
= of_mtk_match
,
2669 module_platform_driver(mtk_driver
);
2671 MODULE_LICENSE("GPL");
2672 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2673 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");