1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 #include <linux/phylink.h>
22 #include <linux/jhash.h>
25 #include "mtk_eth_soc.h"
27 static int mtk_msg_level
= -1;
28 module_param_named(msg_level
, mtk_msg_level
, int, 0);
29 MODULE_PARM_DESC(msg_level
, "Message level (-1=defaults,0=none,...,16=all)");
31 #define MTK_ETHTOOL_STAT(x) { #x, \
32 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
34 /* strings used by ethtool */
35 static const struct mtk_ethtool_stats
{
36 char str
[ETH_GSTRING_LEN
];
38 } mtk_ethtool_stats
[] = {
39 MTK_ETHTOOL_STAT(tx_bytes
),
40 MTK_ETHTOOL_STAT(tx_packets
),
41 MTK_ETHTOOL_STAT(tx_skip
),
42 MTK_ETHTOOL_STAT(tx_collisions
),
43 MTK_ETHTOOL_STAT(rx_bytes
),
44 MTK_ETHTOOL_STAT(rx_packets
),
45 MTK_ETHTOOL_STAT(rx_overflow
),
46 MTK_ETHTOOL_STAT(rx_fcs_errors
),
47 MTK_ETHTOOL_STAT(rx_short_errors
),
48 MTK_ETHTOOL_STAT(rx_long_errors
),
49 MTK_ETHTOOL_STAT(rx_checksum_errors
),
50 MTK_ETHTOOL_STAT(rx_flow_control_packets
),
53 static const char * const mtk_clks_source_name
[] = {
54 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
55 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
56 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
57 "sgmii_ck", "eth2pll",
60 void mtk_w32(struct mtk_eth
*eth
, u32 val
, unsigned reg
)
62 __raw_writel(val
, eth
->base
+ reg
);
65 u32
mtk_r32(struct mtk_eth
*eth
, unsigned reg
)
67 return __raw_readl(eth
->base
+ reg
);
70 static u32
mtk_m32(struct mtk_eth
*eth
, u32 mask
, u32 set
, unsigned reg
)
74 val
= mtk_r32(eth
, reg
);
77 mtk_w32(eth
, val
, reg
);
81 static int mtk_mdio_busy_wait(struct mtk_eth
*eth
)
83 unsigned long t_start
= jiffies
;
86 if (!(mtk_r32(eth
, MTK_PHY_IAC
) & PHY_IAC_ACCESS
))
88 if (time_after(jiffies
, t_start
+ PHY_IAC_TIMEOUT
))
93 dev_err(eth
->dev
, "mdio: MDIO timeout\n");
97 static int _mtk_mdio_write(struct mtk_eth
*eth
, u32 phy_addr
, u32 phy_reg
,
102 ret
= mtk_mdio_busy_wait(eth
);
106 mtk_w32(eth
, PHY_IAC_ACCESS
|
109 PHY_IAC_REG(phy_reg
) |
110 PHY_IAC_ADDR(phy_addr
) |
111 PHY_IAC_DATA(write_data
),
114 ret
= mtk_mdio_busy_wait(eth
);
121 static int _mtk_mdio_read(struct mtk_eth
*eth
, u32 phy_addr
, u32 phy_reg
)
125 ret
= mtk_mdio_busy_wait(eth
);
129 mtk_w32(eth
, PHY_IAC_ACCESS
|
131 PHY_IAC_CMD_C22_READ
|
132 PHY_IAC_REG(phy_reg
) |
133 PHY_IAC_ADDR(phy_addr
),
136 ret
= mtk_mdio_busy_wait(eth
);
140 return mtk_r32(eth
, MTK_PHY_IAC
) & PHY_IAC_DATA_MASK
;
143 static int mtk_mdio_write(struct mii_bus
*bus
, int phy_addr
,
144 int phy_reg
, u16 val
)
146 struct mtk_eth
*eth
= bus
->priv
;
148 return _mtk_mdio_write(eth
, phy_addr
, phy_reg
, val
);
151 static int mtk_mdio_read(struct mii_bus
*bus
, int phy_addr
, int phy_reg
)
153 struct mtk_eth
*eth
= bus
->priv
;
155 return _mtk_mdio_read(eth
, phy_addr
, phy_reg
);
158 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth
*eth
,
159 phy_interface_t interface
)
163 /* Check DDR memory type.
164 * Currently TRGMII mode with DDR2 memory is not supported.
166 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG
, &val
);
167 if (interface
== PHY_INTERFACE_MODE_TRGMII
&&
168 val
& SYSCFG_DRAM_TYPE_DDR2
) {
170 "TRGMII mode with DDR2 memory is not supported!\n");
174 val
= (interface
== PHY_INTERFACE_MODE_TRGMII
) ?
175 ETHSYS_TRGMII_MT7621_DDR_PLL
: 0;
177 regmap_update_bits(eth
->ethsys
, ETHSYS_CLKCFG0
,
178 ETHSYS_TRGMII_MT7621_MASK
, val
);
183 static void mtk_gmac0_rgmii_adjust(struct mtk_eth
*eth
,
184 phy_interface_t interface
, int speed
)
189 if (interface
== PHY_INTERFACE_MODE_TRGMII
) {
190 mtk_w32(eth
, TRGMII_MODE
, INTF_MODE
);
192 ret
= clk_set_rate(eth
->clks
[MTK_CLK_TRGPLL
], val
);
194 dev_err(eth
->dev
, "Failed to set trgmii pll: %d\n", ret
);
198 val
= (speed
== SPEED_1000
) ?
199 INTF_MODE_RGMII_1000
: INTF_MODE_RGMII_10_100
;
200 mtk_w32(eth
, val
, INTF_MODE
);
202 regmap_update_bits(eth
->ethsys
, ETHSYS_CLKCFG0
,
203 ETHSYS_TRGMII_CLK_SEL362_5
,
204 ETHSYS_TRGMII_CLK_SEL362_5
);
206 val
= (speed
== SPEED_1000
) ? 250000000 : 500000000;
207 ret
= clk_set_rate(eth
->clks
[MTK_CLK_TRGPLL
], val
);
209 dev_err(eth
->dev
, "Failed to set trgmii pll: %d\n", ret
);
211 val
= (speed
== SPEED_1000
) ?
212 RCK_CTRL_RGMII_1000
: RCK_CTRL_RGMII_10_100
;
213 mtk_w32(eth
, val
, TRGMII_RCK_CTRL
);
215 val
= (speed
== SPEED_1000
) ?
216 TCK_CTRL_RGMII_1000
: TCK_CTRL_RGMII_10_100
;
217 mtk_w32(eth
, val
, TRGMII_TCK_CTRL
);
220 static void mtk_mac_config(struct phylink_config
*config
, unsigned int mode
,
221 const struct phylink_link_state
*state
)
223 struct mtk_mac
*mac
= container_of(config
, struct mtk_mac
,
225 struct mtk_eth
*eth
= mac
->hw
;
226 u32 mcr_cur
, mcr_new
, sid
, i
;
227 int val
, ge_mode
, err
= 0;
229 /* MT76x8 has no hardware settings between for the MAC */
230 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
) &&
231 mac
->interface
!= state
->interface
) {
232 /* Setup soc pin functions */
233 switch (state
->interface
) {
234 case PHY_INTERFACE_MODE_TRGMII
:
237 if (!MTK_HAS_CAPS(mac
->hw
->soc
->caps
,
241 case PHY_INTERFACE_MODE_RGMII_TXID
:
242 case PHY_INTERFACE_MODE_RGMII_RXID
:
243 case PHY_INTERFACE_MODE_RGMII_ID
:
244 case PHY_INTERFACE_MODE_RGMII
:
245 case PHY_INTERFACE_MODE_MII
:
246 case PHY_INTERFACE_MODE_REVMII
:
247 case PHY_INTERFACE_MODE_RMII
:
248 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_RGMII
)) {
249 err
= mtk_gmac_rgmii_path_setup(eth
, mac
->id
);
254 case PHY_INTERFACE_MODE_1000BASEX
:
255 case PHY_INTERFACE_MODE_2500BASEX
:
256 case PHY_INTERFACE_MODE_SGMII
:
257 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SGMII
)) {
258 err
= mtk_gmac_sgmii_path_setup(eth
, mac
->id
);
263 case PHY_INTERFACE_MODE_GMII
:
264 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_GEPHY
)) {
265 err
= mtk_gmac_gephy_path_setup(eth
, mac
->id
);
274 /* Setup clock for 1st gmac */
275 if (!mac
->id
&& state
->interface
!= PHY_INTERFACE_MODE_SGMII
&&
276 !phy_interface_mode_is_8023z(state
->interface
) &&
277 MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_GMAC1_TRGMII
)) {
278 if (MTK_HAS_CAPS(mac
->hw
->soc
->caps
,
279 MTK_TRGMII_MT7621_CLK
)) {
280 if (mt7621_gmac0_rgmii_adjust(mac
->hw
,
284 mtk_gmac0_rgmii_adjust(mac
->hw
,
288 /* mt7623_pad_clk_setup */
289 for (i
= 0 ; i
< NUM_TRGMII_CTRL
; i
++)
291 TD_DM_DRVP(8) | TD_DM_DRVN(8),
294 /* Assert/release MT7623 RXC reset */
295 mtk_m32(mac
->hw
, 0, RXC_RST
| RXC_DQSISEL
,
297 mtk_m32(mac
->hw
, RXC_RST
, 0, TRGMII_RCK_CTRL
);
302 switch (state
->interface
) {
303 case PHY_INTERFACE_MODE_MII
:
304 case PHY_INTERFACE_MODE_GMII
:
307 case PHY_INTERFACE_MODE_REVMII
:
310 case PHY_INTERFACE_MODE_RMII
:
319 /* put the gmac into the right mode */
320 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
321 val
&= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK
, mac
->id
);
322 val
|= SYSCFG0_GE_MODE(ge_mode
, mac
->id
);
323 regmap_write(eth
->ethsys
, ETHSYS_SYSCFG0
, val
);
325 mac
->interface
= state
->interface
;
329 if (state
->interface
== PHY_INTERFACE_MODE_SGMII
||
330 phy_interface_mode_is_8023z(state
->interface
)) {
331 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
334 regmap_read(eth
->ethsys
, ETHSYS_SYSCFG0
, &val
);
336 regmap_update_bits(eth
->ethsys
, ETHSYS_SYSCFG0
,
338 ~(u32
)SYSCFG0_SGMII_MASK
);
340 /* Decide how GMAC and SGMIISYS be mapped */
341 sid
= (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SHARED_SGMII
)) ?
344 /* Setup SGMIISYS with the determined property */
345 if (state
->interface
!= PHY_INTERFACE_MODE_SGMII
)
346 err
= mtk_sgmii_setup_mode_force(eth
->sgmii
, sid
,
348 else if (phylink_autoneg_inband(mode
))
349 err
= mtk_sgmii_setup_mode_an(eth
->sgmii
, sid
);
354 regmap_update_bits(eth
->ethsys
, ETHSYS_SYSCFG0
,
355 SYSCFG0_SGMII_MASK
, val
);
356 } else if (phylink_autoneg_inband(mode
)) {
358 "In-band mode not supported in non SGMII mode!\n");
363 mcr_cur
= mtk_r32(mac
->hw
, MTK_MAC_MCR(mac
->id
));
365 mcr_new
|= MAC_MCR_IPG_CFG
| MAC_MCR_FORCE_MODE
|
366 MAC_MCR_BACKOFF_EN
| MAC_MCR_BACKPR_EN
| MAC_MCR_FORCE_LINK
;
368 /* Only update control register when needed! */
369 if (mcr_new
!= mcr_cur
)
370 mtk_w32(mac
->hw
, mcr_new
, MTK_MAC_MCR(mac
->id
));
375 dev_err(eth
->dev
, "%s: GMAC%d mode %s not supported!\n", __func__
,
376 mac
->id
, phy_modes(state
->interface
));
380 dev_err(eth
->dev
, "%s: GMAC%d mode %s err: %d!\n", __func__
,
381 mac
->id
, phy_modes(state
->interface
), err
);
384 static void mtk_mac_pcs_get_state(struct phylink_config
*config
,
385 struct phylink_link_state
*state
)
387 struct mtk_mac
*mac
= container_of(config
, struct mtk_mac
,
389 u32 pmsr
= mtk_r32(mac
->hw
, MTK_MAC_MSR(mac
->id
));
391 state
->link
= (pmsr
& MAC_MSR_LINK
);
392 state
->duplex
= (pmsr
& MAC_MSR_DPX
) >> 1;
394 switch (pmsr
& (MAC_MSR_SPEED_1000
| MAC_MSR_SPEED_100
)) {
396 state
->speed
= SPEED_10
;
398 case MAC_MSR_SPEED_100
:
399 state
->speed
= SPEED_100
;
401 case MAC_MSR_SPEED_1000
:
402 state
->speed
= SPEED_1000
;
405 state
->speed
= SPEED_UNKNOWN
;
409 state
->pause
&= (MLO_PAUSE_RX
| MLO_PAUSE_TX
);
410 if (pmsr
& MAC_MSR_RX_FC
)
411 state
->pause
|= MLO_PAUSE_RX
;
412 if (pmsr
& MAC_MSR_TX_FC
)
413 state
->pause
|= MLO_PAUSE_TX
;
416 static void mtk_mac_an_restart(struct phylink_config
*config
)
418 struct mtk_mac
*mac
= container_of(config
, struct mtk_mac
,
421 mtk_sgmii_restart_an(mac
->hw
, mac
->id
);
424 static void mtk_mac_link_down(struct phylink_config
*config
, unsigned int mode
,
425 phy_interface_t interface
)
427 struct mtk_mac
*mac
= container_of(config
, struct mtk_mac
,
429 u32 mcr
= mtk_r32(mac
->hw
, MTK_MAC_MCR(mac
->id
));
431 mcr
&= ~(MAC_MCR_TX_EN
| MAC_MCR_RX_EN
);
432 mtk_w32(mac
->hw
, mcr
, MTK_MAC_MCR(mac
->id
));
435 static void mtk_mac_link_up(struct phylink_config
*config
,
436 struct phy_device
*phy
,
437 unsigned int mode
, phy_interface_t interface
,
438 int speed
, int duplex
, bool tx_pause
, bool rx_pause
)
440 struct mtk_mac
*mac
= container_of(config
, struct mtk_mac
,
442 u32 mcr
= mtk_r32(mac
->hw
, MTK_MAC_MCR(mac
->id
));
444 mcr
&= ~(MAC_MCR_SPEED_100
| MAC_MCR_SPEED_1000
|
445 MAC_MCR_FORCE_DPX
| MAC_MCR_FORCE_TX_FC
|
446 MAC_MCR_FORCE_RX_FC
);
448 /* Configure speed */
452 mcr
|= MAC_MCR_SPEED_1000
;
455 mcr
|= MAC_MCR_SPEED_100
;
459 /* Configure duplex */
460 if (duplex
== DUPLEX_FULL
)
461 mcr
|= MAC_MCR_FORCE_DPX
;
463 /* Configure pause modes - phylink will avoid these for half duplex */
465 mcr
|= MAC_MCR_FORCE_TX_FC
;
467 mcr
|= MAC_MCR_FORCE_RX_FC
;
469 mcr
|= MAC_MCR_TX_EN
| MAC_MCR_RX_EN
;
470 mtk_w32(mac
->hw
, mcr
, MTK_MAC_MCR(mac
->id
));
473 static void mtk_validate(struct phylink_config
*config
,
474 unsigned long *supported
,
475 struct phylink_link_state
*state
)
477 struct mtk_mac
*mac
= container_of(config
, struct mtk_mac
,
479 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
481 if (state
->interface
!= PHY_INTERFACE_MODE_NA
&&
482 state
->interface
!= PHY_INTERFACE_MODE_MII
&&
483 state
->interface
!= PHY_INTERFACE_MODE_GMII
&&
484 !(MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_RGMII
) &&
485 phy_interface_mode_is_rgmii(state
->interface
)) &&
486 !(MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_TRGMII
) &&
487 !mac
->id
&& state
->interface
== PHY_INTERFACE_MODE_TRGMII
) &&
488 !(MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_SGMII
) &&
489 (state
->interface
== PHY_INTERFACE_MODE_SGMII
||
490 phy_interface_mode_is_8023z(state
->interface
)))) {
491 linkmode_zero(supported
);
495 phylink_set_port_modes(mask
);
496 phylink_set(mask
, Autoneg
);
498 switch (state
->interface
) {
499 case PHY_INTERFACE_MODE_TRGMII
:
500 phylink_set(mask
, 1000baseT_Full
);
502 case PHY_INTERFACE_MODE_1000BASEX
:
503 case PHY_INTERFACE_MODE_2500BASEX
:
504 phylink_set(mask
, 1000baseX_Full
);
505 phylink_set(mask
, 2500baseX_Full
);
507 case PHY_INTERFACE_MODE_GMII
:
508 case PHY_INTERFACE_MODE_RGMII
:
509 case PHY_INTERFACE_MODE_RGMII_ID
:
510 case PHY_INTERFACE_MODE_RGMII_RXID
:
511 case PHY_INTERFACE_MODE_RGMII_TXID
:
512 phylink_set(mask
, 1000baseT_Half
);
514 case PHY_INTERFACE_MODE_SGMII
:
515 phylink_set(mask
, 1000baseT_Full
);
516 phylink_set(mask
, 1000baseX_Full
);
518 case PHY_INTERFACE_MODE_MII
:
519 case PHY_INTERFACE_MODE_RMII
:
520 case PHY_INTERFACE_MODE_REVMII
:
521 case PHY_INTERFACE_MODE_NA
:
523 phylink_set(mask
, 10baseT_Half
);
524 phylink_set(mask
, 10baseT_Full
);
525 phylink_set(mask
, 100baseT_Half
);
526 phylink_set(mask
, 100baseT_Full
);
530 if (state
->interface
== PHY_INTERFACE_MODE_NA
) {
531 if (MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_SGMII
)) {
532 phylink_set(mask
, 1000baseT_Full
);
533 phylink_set(mask
, 1000baseX_Full
);
534 phylink_set(mask
, 2500baseX_Full
);
536 if (MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_RGMII
)) {
537 phylink_set(mask
, 1000baseT_Full
);
538 phylink_set(mask
, 1000baseT_Half
);
539 phylink_set(mask
, 1000baseX_Full
);
541 if (MTK_HAS_CAPS(mac
->hw
->soc
->caps
, MTK_GEPHY
)) {
542 phylink_set(mask
, 1000baseT_Full
);
543 phylink_set(mask
, 1000baseT_Half
);
547 phylink_set(mask
, Pause
);
548 phylink_set(mask
, Asym_Pause
);
550 linkmode_and(supported
, supported
, mask
);
551 linkmode_and(state
->advertising
, state
->advertising
, mask
);
553 /* We can only operate at 2500BaseX or 1000BaseX. If requested
554 * to advertise both, only report advertising at 2500BaseX.
556 phylink_helper_basex_speed(state
);
559 static const struct phylink_mac_ops mtk_phylink_ops
= {
560 .validate
= mtk_validate
,
561 .mac_pcs_get_state
= mtk_mac_pcs_get_state
,
562 .mac_an_restart
= mtk_mac_an_restart
,
563 .mac_config
= mtk_mac_config
,
564 .mac_link_down
= mtk_mac_link_down
,
565 .mac_link_up
= mtk_mac_link_up
,
568 static int mtk_mdio_init(struct mtk_eth
*eth
)
570 struct device_node
*mii_np
;
573 mii_np
= of_get_child_by_name(eth
->dev
->of_node
, "mdio-bus");
575 dev_err(eth
->dev
, "no %s child node found", "mdio-bus");
579 if (!of_device_is_available(mii_np
)) {
584 eth
->mii_bus
= devm_mdiobus_alloc(eth
->dev
);
590 eth
->mii_bus
->name
= "mdio";
591 eth
->mii_bus
->read
= mtk_mdio_read
;
592 eth
->mii_bus
->write
= mtk_mdio_write
;
593 eth
->mii_bus
->priv
= eth
;
594 eth
->mii_bus
->parent
= eth
->dev
;
596 snprintf(eth
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%pOFn", mii_np
);
597 ret
= of_mdiobus_register(eth
->mii_bus
, mii_np
);
604 static void mtk_mdio_cleanup(struct mtk_eth
*eth
)
609 mdiobus_unregister(eth
->mii_bus
);
612 static inline void mtk_tx_irq_disable(struct mtk_eth
*eth
, u32 mask
)
617 spin_lock_irqsave(ð
->tx_irq_lock
, flags
);
618 val
= mtk_r32(eth
, eth
->tx_int_mask_reg
);
619 mtk_w32(eth
, val
& ~mask
, eth
->tx_int_mask_reg
);
620 spin_unlock_irqrestore(ð
->tx_irq_lock
, flags
);
623 static inline void mtk_tx_irq_enable(struct mtk_eth
*eth
, u32 mask
)
628 spin_lock_irqsave(ð
->tx_irq_lock
, flags
);
629 val
= mtk_r32(eth
, eth
->tx_int_mask_reg
);
630 mtk_w32(eth
, val
| mask
, eth
->tx_int_mask_reg
);
631 spin_unlock_irqrestore(ð
->tx_irq_lock
, flags
);
634 static inline void mtk_rx_irq_disable(struct mtk_eth
*eth
, u32 mask
)
639 spin_lock_irqsave(ð
->rx_irq_lock
, flags
);
640 val
= mtk_r32(eth
, MTK_PDMA_INT_MASK
);
641 mtk_w32(eth
, val
& ~mask
, MTK_PDMA_INT_MASK
);
642 spin_unlock_irqrestore(ð
->rx_irq_lock
, flags
);
645 static inline void mtk_rx_irq_enable(struct mtk_eth
*eth
, u32 mask
)
650 spin_lock_irqsave(ð
->rx_irq_lock
, flags
);
651 val
= mtk_r32(eth
, MTK_PDMA_INT_MASK
);
652 mtk_w32(eth
, val
| mask
, MTK_PDMA_INT_MASK
);
653 spin_unlock_irqrestore(ð
->rx_irq_lock
, flags
);
656 static int mtk_set_mac_address(struct net_device
*dev
, void *p
)
658 int ret
= eth_mac_addr(dev
, p
);
659 struct mtk_mac
*mac
= netdev_priv(dev
);
660 struct mtk_eth
*eth
= mac
->hw
;
661 const char *macaddr
= dev
->dev_addr
;
666 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
669 spin_lock_bh(&mac
->hw
->page_lock
);
670 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
671 mtk_w32(mac
->hw
, (macaddr
[0] << 8) | macaddr
[1],
672 MT7628_SDM_MAC_ADRH
);
673 mtk_w32(mac
->hw
, (macaddr
[2] << 24) | (macaddr
[3] << 16) |
674 (macaddr
[4] << 8) | macaddr
[5],
675 MT7628_SDM_MAC_ADRL
);
677 mtk_w32(mac
->hw
, (macaddr
[0] << 8) | macaddr
[1],
678 MTK_GDMA_MAC_ADRH(mac
->id
));
679 mtk_w32(mac
->hw
, (macaddr
[2] << 24) | (macaddr
[3] << 16) |
680 (macaddr
[4] << 8) | macaddr
[5],
681 MTK_GDMA_MAC_ADRL(mac
->id
));
683 spin_unlock_bh(&mac
->hw
->page_lock
);
688 void mtk_stats_update_mac(struct mtk_mac
*mac
)
690 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
691 struct mtk_eth
*eth
= mac
->hw
;
693 u64_stats_update_begin(&hw_stats
->syncp
);
695 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
696 hw_stats
->tx_packets
+= mtk_r32(mac
->hw
, MT7628_SDM_TPCNT
);
697 hw_stats
->tx_bytes
+= mtk_r32(mac
->hw
, MT7628_SDM_TBCNT
);
698 hw_stats
->rx_packets
+= mtk_r32(mac
->hw
, MT7628_SDM_RPCNT
);
699 hw_stats
->rx_bytes
+= mtk_r32(mac
->hw
, MT7628_SDM_RBCNT
);
700 hw_stats
->rx_checksum_errors
+=
701 mtk_r32(mac
->hw
, MT7628_SDM_CS_ERR
);
703 unsigned int offs
= hw_stats
->reg_offset
;
706 hw_stats
->rx_bytes
+= mtk_r32(mac
->hw
,
707 MTK_GDM1_RX_GBCNT_L
+ offs
);
708 stats
= mtk_r32(mac
->hw
, MTK_GDM1_RX_GBCNT_H
+ offs
);
710 hw_stats
->rx_bytes
+= (stats
<< 32);
711 hw_stats
->rx_packets
+=
712 mtk_r32(mac
->hw
, MTK_GDM1_RX_GPCNT
+ offs
);
713 hw_stats
->rx_overflow
+=
714 mtk_r32(mac
->hw
, MTK_GDM1_RX_OERCNT
+ offs
);
715 hw_stats
->rx_fcs_errors
+=
716 mtk_r32(mac
->hw
, MTK_GDM1_RX_FERCNT
+ offs
);
717 hw_stats
->rx_short_errors
+=
718 mtk_r32(mac
->hw
, MTK_GDM1_RX_SERCNT
+ offs
);
719 hw_stats
->rx_long_errors
+=
720 mtk_r32(mac
->hw
, MTK_GDM1_RX_LENCNT
+ offs
);
721 hw_stats
->rx_checksum_errors
+=
722 mtk_r32(mac
->hw
, MTK_GDM1_RX_CERCNT
+ offs
);
723 hw_stats
->rx_flow_control_packets
+=
724 mtk_r32(mac
->hw
, MTK_GDM1_RX_FCCNT
+ offs
);
726 mtk_r32(mac
->hw
, MTK_GDM1_TX_SKIPCNT
+ offs
);
727 hw_stats
->tx_collisions
+=
728 mtk_r32(mac
->hw
, MTK_GDM1_TX_COLCNT
+ offs
);
729 hw_stats
->tx_bytes
+=
730 mtk_r32(mac
->hw
, MTK_GDM1_TX_GBCNT_L
+ offs
);
731 stats
= mtk_r32(mac
->hw
, MTK_GDM1_TX_GBCNT_H
+ offs
);
733 hw_stats
->tx_bytes
+= (stats
<< 32);
734 hw_stats
->tx_packets
+=
735 mtk_r32(mac
->hw
, MTK_GDM1_TX_GPCNT
+ offs
);
738 u64_stats_update_end(&hw_stats
->syncp
);
741 static void mtk_stats_update(struct mtk_eth
*eth
)
745 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
746 if (!eth
->mac
[i
] || !eth
->mac
[i
]->hw_stats
)
748 if (spin_trylock(ð
->mac
[i
]->hw_stats
->stats_lock
)) {
749 mtk_stats_update_mac(eth
->mac
[i
]);
750 spin_unlock(ð
->mac
[i
]->hw_stats
->stats_lock
);
755 static void mtk_get_stats64(struct net_device
*dev
,
756 struct rtnl_link_stats64
*storage
)
758 struct mtk_mac
*mac
= netdev_priv(dev
);
759 struct mtk_hw_stats
*hw_stats
= mac
->hw_stats
;
762 if (netif_running(dev
) && netif_device_present(dev
)) {
763 if (spin_trylock_bh(&hw_stats
->stats_lock
)) {
764 mtk_stats_update_mac(mac
);
765 spin_unlock_bh(&hw_stats
->stats_lock
);
770 start
= u64_stats_fetch_begin_irq(&hw_stats
->syncp
);
771 storage
->rx_packets
= hw_stats
->rx_packets
;
772 storage
->tx_packets
= hw_stats
->tx_packets
;
773 storage
->rx_bytes
= hw_stats
->rx_bytes
;
774 storage
->tx_bytes
= hw_stats
->tx_bytes
;
775 storage
->collisions
= hw_stats
->tx_collisions
;
776 storage
->rx_length_errors
= hw_stats
->rx_short_errors
+
777 hw_stats
->rx_long_errors
;
778 storage
->rx_over_errors
= hw_stats
->rx_overflow
;
779 storage
->rx_crc_errors
= hw_stats
->rx_fcs_errors
;
780 storage
->rx_errors
= hw_stats
->rx_checksum_errors
;
781 storage
->tx_aborted_errors
= hw_stats
->tx_skip
;
782 } while (u64_stats_fetch_retry_irq(&hw_stats
->syncp
, start
));
784 storage
->tx_errors
= dev
->stats
.tx_errors
;
785 storage
->rx_dropped
= dev
->stats
.rx_dropped
;
786 storage
->tx_dropped
= dev
->stats
.tx_dropped
;
789 static inline int mtk_max_frag_size(int mtu
)
791 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
792 if (mtu
+ MTK_RX_ETH_HLEN
< MTK_MAX_RX_LENGTH_2K
)
793 mtu
= MTK_MAX_RX_LENGTH_2K
- MTK_RX_ETH_HLEN
;
795 return SKB_DATA_ALIGN(MTK_RX_HLEN
+ mtu
) +
796 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
799 static inline int mtk_max_buf_size(int frag_size
)
801 int buf_size
= frag_size
- NET_SKB_PAD
- NET_IP_ALIGN
-
802 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
804 WARN_ON(buf_size
< MTK_MAX_RX_LENGTH_2K
);
809 static inline bool mtk_rx_get_desc(struct mtk_rx_dma
*rxd
,
810 struct mtk_rx_dma
*dma_rxd
)
812 rxd
->rxd2
= READ_ONCE(dma_rxd
->rxd2
);
813 if (!(rxd
->rxd2
& RX_DMA_DONE
))
816 rxd
->rxd1
= READ_ONCE(dma_rxd
->rxd1
);
817 rxd
->rxd3
= READ_ONCE(dma_rxd
->rxd3
);
818 rxd
->rxd4
= READ_ONCE(dma_rxd
->rxd4
);
823 /* the qdma core needs scratch memory to be setup */
824 static int mtk_init_fq_dma(struct mtk_eth
*eth
)
826 dma_addr_t phy_ring_tail
;
827 int cnt
= MTK_DMA_SIZE
;
831 eth
->scratch_ring
= dma_alloc_coherent(eth
->dev
,
832 cnt
* sizeof(struct mtk_tx_dma
),
833 ð
->phy_scratch_ring
,
835 if (unlikely(!eth
->scratch_ring
))
838 eth
->scratch_head
= kcalloc(cnt
, MTK_QDMA_PAGE_SIZE
,
840 if (unlikely(!eth
->scratch_head
))
843 dma_addr
= dma_map_single(eth
->dev
,
844 eth
->scratch_head
, cnt
* MTK_QDMA_PAGE_SIZE
,
846 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
849 phy_ring_tail
= eth
->phy_scratch_ring
+
850 (sizeof(struct mtk_tx_dma
) * (cnt
- 1));
852 for (i
= 0; i
< cnt
; i
++) {
853 eth
->scratch_ring
[i
].txd1
=
854 (dma_addr
+ (i
* MTK_QDMA_PAGE_SIZE
));
856 eth
->scratch_ring
[i
].txd2
= (eth
->phy_scratch_ring
+
857 ((i
+ 1) * sizeof(struct mtk_tx_dma
)));
858 eth
->scratch_ring
[i
].txd3
= TX_DMA_SDL(MTK_QDMA_PAGE_SIZE
);
861 mtk_w32(eth
, eth
->phy_scratch_ring
, MTK_QDMA_FQ_HEAD
);
862 mtk_w32(eth
, phy_ring_tail
, MTK_QDMA_FQ_TAIL
);
863 mtk_w32(eth
, (cnt
<< 16) | cnt
, MTK_QDMA_FQ_CNT
);
864 mtk_w32(eth
, MTK_QDMA_PAGE_SIZE
<< 16, MTK_QDMA_FQ_BLEN
);
869 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring
*ring
, u32 desc
)
871 void *ret
= ring
->dma
;
873 return ret
+ (desc
- ring
->phys
);
876 static inline struct mtk_tx_buf
*mtk_desc_to_tx_buf(struct mtk_tx_ring
*ring
,
877 struct mtk_tx_dma
*txd
)
879 int idx
= txd
- ring
->dma
;
881 return &ring
->buf
[idx
];
884 static struct mtk_tx_dma
*qdma_to_pdma(struct mtk_tx_ring
*ring
,
885 struct mtk_tx_dma
*dma
)
887 return ring
->dma_pdma
- ring
->dma
+ dma
;
890 static int txd_to_idx(struct mtk_tx_ring
*ring
, struct mtk_tx_dma
*dma
)
892 return ((void *)dma
- (void *)ring
->dma
) / sizeof(*dma
);
895 static void mtk_tx_unmap(struct mtk_eth
*eth
, struct mtk_tx_buf
*tx_buf
,
898 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
899 if (tx_buf
->flags
& MTK_TX_FLAGS_SINGLE0
) {
900 dma_unmap_single(eth
->dev
,
901 dma_unmap_addr(tx_buf
, dma_addr0
),
902 dma_unmap_len(tx_buf
, dma_len0
),
904 } else if (tx_buf
->flags
& MTK_TX_FLAGS_PAGE0
) {
905 dma_unmap_page(eth
->dev
,
906 dma_unmap_addr(tx_buf
, dma_addr0
),
907 dma_unmap_len(tx_buf
, dma_len0
),
911 if (dma_unmap_len(tx_buf
, dma_len0
)) {
912 dma_unmap_page(eth
->dev
,
913 dma_unmap_addr(tx_buf
, dma_addr0
),
914 dma_unmap_len(tx_buf
, dma_len0
),
918 if (dma_unmap_len(tx_buf
, dma_len1
)) {
919 dma_unmap_page(eth
->dev
,
920 dma_unmap_addr(tx_buf
, dma_addr1
),
921 dma_unmap_len(tx_buf
, dma_len1
),
928 (tx_buf
->skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
)) {
930 napi_consume_skb(tx_buf
->skb
, napi
);
932 dev_kfree_skb_any(tx_buf
->skb
);
937 static void setup_tx_buf(struct mtk_eth
*eth
, struct mtk_tx_buf
*tx_buf
,
938 struct mtk_tx_dma
*txd
, dma_addr_t mapped_addr
,
939 size_t size
, int idx
)
941 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
942 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
943 dma_unmap_len_set(tx_buf
, dma_len0
, size
);
946 txd
->txd3
= mapped_addr
;
947 txd
->txd2
|= TX_DMA_PLEN1(size
);
948 dma_unmap_addr_set(tx_buf
, dma_addr1
, mapped_addr
);
949 dma_unmap_len_set(tx_buf
, dma_len1
, size
);
951 tx_buf
->skb
= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
;
952 txd
->txd1
= mapped_addr
;
953 txd
->txd2
= TX_DMA_PLEN0(size
);
954 dma_unmap_addr_set(tx_buf
, dma_addr0
, mapped_addr
);
955 dma_unmap_len_set(tx_buf
, dma_len0
, size
);
960 static int mtk_tx_map(struct sk_buff
*skb
, struct net_device
*dev
,
961 int tx_num
, struct mtk_tx_ring
*ring
, bool gso
)
963 struct mtk_mac
*mac
= netdev_priv(dev
);
964 struct mtk_eth
*eth
= mac
->hw
;
965 struct mtk_tx_dma
*itxd
, *txd
;
966 struct mtk_tx_dma
*itxd_pdma
, *txd_pdma
;
967 struct mtk_tx_buf
*itx_buf
, *tx_buf
;
968 dma_addr_t mapped_addr
;
969 unsigned int nr_frags
;
974 itxd
= ring
->next_free
;
975 itxd_pdma
= qdma_to_pdma(ring
, itxd
);
976 if (itxd
== ring
->last_free
)
979 /* set the forward port */
980 fport
= (mac
->id
+ 1) << TX_DMA_FPORT_SHIFT
;
983 itx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
984 memset(itx_buf
, 0, sizeof(*itx_buf
));
989 /* TX Checksum offload */
990 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
991 txd4
|= TX_DMA_CHKSUM
;
993 /* VLAN header offload */
994 if (skb_vlan_tag_present(skb
))
995 txd4
|= TX_DMA_INS_VLAN
| skb_vlan_tag_get(skb
);
997 mapped_addr
= dma_map_single(eth
->dev
, skb
->data
,
998 skb_headlen(skb
), DMA_TO_DEVICE
);
999 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
1002 WRITE_ONCE(itxd
->txd1
, mapped_addr
);
1003 itx_buf
->flags
|= MTK_TX_FLAGS_SINGLE0
;
1004 itx_buf
->flags
|= (!mac
->id
) ? MTK_TX_FLAGS_FPORT0
:
1005 MTK_TX_FLAGS_FPORT1
;
1006 setup_tx_buf(eth
, itx_buf
, itxd_pdma
, mapped_addr
, skb_headlen(skb
),
1011 txd_pdma
= qdma_to_pdma(ring
, txd
);
1012 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1014 for (i
= 0; i
< nr_frags
; i
++) {
1015 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1016 unsigned int offset
= 0;
1017 int frag_size
= skb_frag_size(frag
);
1020 bool last_frag
= false;
1021 unsigned int frag_map_size
;
1022 bool new_desc
= true;
1024 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
) ||
1026 txd
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
1027 txd_pdma
= qdma_to_pdma(ring
, txd
);
1028 if (txd
== ring
->last_free
)
1037 frag_map_size
= min(frag_size
, MTK_TX_DMA_BUF_LEN
);
1038 mapped_addr
= skb_frag_dma_map(eth
->dev
, frag
, offset
,
1041 if (unlikely(dma_mapping_error(eth
->dev
, mapped_addr
)))
1044 if (i
== nr_frags
- 1 &&
1045 (frag_size
- frag_map_size
) == 0)
1048 WRITE_ONCE(txd
->txd1
, mapped_addr
);
1049 WRITE_ONCE(txd
->txd3
, (TX_DMA_SWC
|
1050 TX_DMA_PLEN0(frag_map_size
) |
1051 last_frag
* TX_DMA_LS0
));
1052 WRITE_ONCE(txd
->txd4
, fport
);
1054 tx_buf
= mtk_desc_to_tx_buf(ring
, txd
);
1056 memset(tx_buf
, 0, sizeof(*tx_buf
));
1057 tx_buf
->skb
= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
;
1058 tx_buf
->flags
|= MTK_TX_FLAGS_PAGE0
;
1059 tx_buf
->flags
|= (!mac
->id
) ? MTK_TX_FLAGS_FPORT0
:
1060 MTK_TX_FLAGS_FPORT1
;
1062 setup_tx_buf(eth
, tx_buf
, txd_pdma
, mapped_addr
,
1063 frag_map_size
, k
++);
1065 frag_size
-= frag_map_size
;
1066 offset
+= frag_map_size
;
1070 /* store skb to cleanup */
1073 WRITE_ONCE(itxd
->txd4
, txd4
);
1074 WRITE_ONCE(itxd
->txd3
, (TX_DMA_SWC
| TX_DMA_PLEN0(skb_headlen(skb
)) |
1075 (!nr_frags
* TX_DMA_LS0
)));
1076 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
1078 txd_pdma
->txd2
|= TX_DMA_LS0
;
1080 txd_pdma
->txd2
|= TX_DMA_LS1
;
1083 netdev_sent_queue(dev
, skb
->len
);
1084 skb_tx_timestamp(skb
);
1086 ring
->next_free
= mtk_qdma_phys_to_virt(ring
, txd
->txd2
);
1087 atomic_sub(n_desc
, &ring
->free_count
);
1089 /* make sure that all changes to the dma ring are flushed before we
1094 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
1095 if (netif_xmit_stopped(netdev_get_tx_queue(dev
, 0)) ||
1096 !netdev_xmit_more())
1097 mtk_w32(eth
, txd
->txd2
, MTK_QTX_CTX_PTR
);
1099 int next_idx
= NEXT_DESP_IDX(txd_to_idx(ring
, txd
),
1101 mtk_w32(eth
, next_idx
, MT7628_TX_CTX_IDX0
);
1108 tx_buf
= mtk_desc_to_tx_buf(ring
, itxd
);
1111 mtk_tx_unmap(eth
, tx_buf
, false);
1113 itxd
->txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
1114 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
1115 itxd_pdma
->txd2
= TX_DMA_DESP2_DEF
;
1117 itxd
= mtk_qdma_phys_to_virt(ring
, itxd
->txd2
);
1118 itxd_pdma
= qdma_to_pdma(ring
, itxd
);
1119 } while (itxd
!= txd
);
1124 static inline int mtk_cal_txd_req(struct sk_buff
*skb
)
1130 if (skb_is_gso(skb
)) {
1131 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1132 frag
= &skb_shinfo(skb
)->frags
[i
];
1133 nfrags
+= DIV_ROUND_UP(skb_frag_size(frag
),
1134 MTK_TX_DMA_BUF_LEN
);
1137 nfrags
+= skb_shinfo(skb
)->nr_frags
;
1143 static int mtk_queue_stopped(struct mtk_eth
*eth
)
1147 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1148 if (!eth
->netdev
[i
])
1150 if (netif_queue_stopped(eth
->netdev
[i
]))
1157 static void mtk_wake_queue(struct mtk_eth
*eth
)
1161 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1162 if (!eth
->netdev
[i
])
1164 netif_wake_queue(eth
->netdev
[i
]);
1168 static netdev_tx_t
mtk_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1170 struct mtk_mac
*mac
= netdev_priv(dev
);
1171 struct mtk_eth
*eth
= mac
->hw
;
1172 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1173 struct net_device_stats
*stats
= &dev
->stats
;
1177 /* normally we can rely on the stack not calling this more than once,
1178 * however we have 2 queues running on the same ring so we need to lock
1181 spin_lock(ð
->page_lock
);
1183 if (unlikely(test_bit(MTK_RESETTING
, ð
->state
)))
1186 tx_num
= mtk_cal_txd_req(skb
);
1187 if (unlikely(atomic_read(&ring
->free_count
) <= tx_num
)) {
1188 netif_stop_queue(dev
);
1189 netif_err(eth
, tx_queued
, dev
,
1190 "Tx Ring full when queue awake!\n");
1191 spin_unlock(ð
->page_lock
);
1192 return NETDEV_TX_BUSY
;
1195 /* TSO: fill MSS info in tcp checksum field */
1196 if (skb_is_gso(skb
)) {
1197 if (skb_cow_head(skb
, 0)) {
1198 netif_warn(eth
, tx_err
, dev
,
1199 "GSO expand head fail.\n");
1203 if (skb_shinfo(skb
)->gso_type
&
1204 (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
1206 tcp_hdr(skb
)->check
= htons(skb_shinfo(skb
)->gso_size
);
1210 if (mtk_tx_map(skb
, dev
, tx_num
, ring
, gso
) < 0)
1213 if (unlikely(atomic_read(&ring
->free_count
) <= ring
->thresh
))
1214 netif_stop_queue(dev
);
1216 spin_unlock(ð
->page_lock
);
1218 return NETDEV_TX_OK
;
1221 spin_unlock(ð
->page_lock
);
1222 stats
->tx_dropped
++;
1223 dev_kfree_skb_any(skb
);
1224 return NETDEV_TX_OK
;
1227 static struct mtk_rx_ring
*mtk_get_rx_ring(struct mtk_eth
*eth
)
1230 struct mtk_rx_ring
*ring
;
1234 return ð
->rx_ring
[0];
1236 for (i
= 0; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1237 ring
= ð
->rx_ring
[i
];
1238 idx
= NEXT_DESP_IDX(ring
->calc_idx
, ring
->dma_size
);
1239 if (ring
->dma
[idx
].rxd2
& RX_DMA_DONE
) {
1240 ring
->calc_idx_update
= true;
1248 static void mtk_update_rx_cpu_idx(struct mtk_eth
*eth
)
1250 struct mtk_rx_ring
*ring
;
1254 ring
= ð
->rx_ring
[0];
1255 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
1257 for (i
= 0; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1258 ring
= ð
->rx_ring
[i
];
1259 if (ring
->calc_idx_update
) {
1260 ring
->calc_idx_update
= false;
1261 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
);
1267 static int mtk_poll_rx(struct napi_struct
*napi
, int budget
,
1268 struct mtk_eth
*eth
)
1270 struct dim_sample dim_sample
= {};
1271 struct mtk_rx_ring
*ring
;
1273 struct sk_buff
*skb
;
1274 u8
*data
, *new_data
;
1275 struct mtk_rx_dma
*rxd
, trxd
;
1276 int done
= 0, bytes
= 0;
1278 while (done
< budget
) {
1279 struct net_device
*netdev
;
1280 unsigned int pktlen
;
1281 dma_addr_t dma_addr
;
1285 ring
= mtk_get_rx_ring(eth
);
1286 if (unlikely(!ring
))
1289 idx
= NEXT_DESP_IDX(ring
->calc_idx
, ring
->dma_size
);
1290 rxd
= &ring
->dma
[idx
];
1291 data
= ring
->data
[idx
];
1293 if (!mtk_rx_get_desc(&trxd
, rxd
))
1296 /* find out which mac the packet come from. values start at 1 */
1297 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
) ||
1298 (trxd
.rxd4
& RX_DMA_SPECIAL_TAG
))
1301 mac
= ((trxd
.rxd4
>> RX_DMA_FPORT_SHIFT
) &
1302 RX_DMA_FPORT_MASK
) - 1;
1304 if (unlikely(mac
< 0 || mac
>= MTK_MAC_COUNT
||
1308 netdev
= eth
->netdev
[mac
];
1310 if (unlikely(test_bit(MTK_RESETTING
, ð
->state
)))
1313 /* alloc new buffer */
1314 new_data
= napi_alloc_frag(ring
->frag_size
);
1315 if (unlikely(!new_data
)) {
1316 netdev
->stats
.rx_dropped
++;
1319 dma_addr
= dma_map_single(eth
->dev
,
1320 new_data
+ NET_SKB_PAD
+
1324 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
))) {
1325 skb_free_frag(new_data
);
1326 netdev
->stats
.rx_dropped
++;
1330 dma_unmap_single(eth
->dev
, trxd
.rxd1
,
1331 ring
->buf_size
, DMA_FROM_DEVICE
);
1334 skb
= build_skb(data
, ring
->frag_size
);
1335 if (unlikely(!skb
)) {
1336 skb_free_frag(data
);
1337 netdev
->stats
.rx_dropped
++;
1340 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1342 pktlen
= RX_DMA_GET_PLEN0(trxd
.rxd2
);
1344 skb_put(skb
, pktlen
);
1345 if (trxd
.rxd4
& eth
->rx_dma_l4_valid
)
1346 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1348 skb_checksum_none_assert(skb
);
1349 skb
->protocol
= eth_type_trans(skb
, netdev
);
1352 hash
= trxd
.rxd4
& MTK_RXD4_FOE_ENTRY
;
1353 if (hash
!= MTK_RXD4_FOE_ENTRY
) {
1354 hash
= jhash_1word(hash
, 0);
1355 skb_set_hash(skb
, hash
, PKT_HASH_TYPE_L4
);
1358 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
1359 (trxd
.rxd2
& RX_DMA_VTAG
))
1360 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1361 RX_DMA_VID(trxd
.rxd3
));
1362 skb_record_rx_queue(skb
, 0);
1363 napi_gro_receive(napi
, skb
);
1366 ring
->data
[idx
] = new_data
;
1367 rxd
->rxd1
= (unsigned int)dma_addr
;
1370 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
))
1371 rxd
->rxd2
= RX_DMA_LSO
;
1373 rxd
->rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
1375 ring
->calc_idx
= idx
;
1382 /* make sure that all changes to the dma ring are flushed before
1386 mtk_update_rx_cpu_idx(eth
);
1389 eth
->rx_packets
+= done
;
1390 eth
->rx_bytes
+= bytes
;
1391 dim_update_sample(eth
->rx_events
, eth
->rx_packets
, eth
->rx_bytes
,
1393 net_dim(ð
->rx_dim
, dim_sample
);
1398 static int mtk_poll_tx_qdma(struct mtk_eth
*eth
, int budget
,
1399 unsigned int *done
, unsigned int *bytes
)
1401 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1402 struct mtk_tx_dma
*desc
;
1403 struct sk_buff
*skb
;
1404 struct mtk_tx_buf
*tx_buf
;
1407 cpu
= ring
->last_free_ptr
;
1408 dma
= mtk_r32(eth
, MTK_QTX_DRX_PTR
);
1410 desc
= mtk_qdma_phys_to_virt(ring
, cpu
);
1412 while ((cpu
!= dma
) && budget
) {
1413 u32 next_cpu
= desc
->txd2
;
1416 desc
= mtk_qdma_phys_to_virt(ring
, desc
->txd2
);
1417 if ((desc
->txd3
& TX_DMA_OWNER_CPU
) == 0)
1420 tx_buf
= mtk_desc_to_tx_buf(ring
, desc
);
1421 if (tx_buf
->flags
& MTK_TX_FLAGS_FPORT1
)
1428 if (skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
) {
1429 bytes
[mac
] += skb
->len
;
1433 mtk_tx_unmap(eth
, tx_buf
, true);
1435 ring
->last_free
= desc
;
1436 atomic_inc(&ring
->free_count
);
1441 ring
->last_free_ptr
= cpu
;
1442 mtk_w32(eth
, cpu
, MTK_QTX_CRX_PTR
);
1447 static int mtk_poll_tx_pdma(struct mtk_eth
*eth
, int budget
,
1448 unsigned int *done
, unsigned int *bytes
)
1450 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1451 struct mtk_tx_dma
*desc
;
1452 struct sk_buff
*skb
;
1453 struct mtk_tx_buf
*tx_buf
;
1456 cpu
= ring
->cpu_idx
;
1457 dma
= mtk_r32(eth
, MT7628_TX_DTX_IDX0
);
1459 while ((cpu
!= dma
) && budget
) {
1460 tx_buf
= &ring
->buf
[cpu
];
1465 if (skb
!= (struct sk_buff
*)MTK_DMA_DUMMY_DESC
) {
1466 bytes
[0] += skb
->len
;
1471 mtk_tx_unmap(eth
, tx_buf
, true);
1473 desc
= &ring
->dma
[cpu
];
1474 ring
->last_free
= desc
;
1475 atomic_inc(&ring
->free_count
);
1477 cpu
= NEXT_DESP_IDX(cpu
, ring
->dma_size
);
1480 ring
->cpu_idx
= cpu
;
1485 static int mtk_poll_tx(struct mtk_eth
*eth
, int budget
)
1487 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1488 struct dim_sample dim_sample
= {};
1489 unsigned int done
[MTK_MAX_DEVS
];
1490 unsigned int bytes
[MTK_MAX_DEVS
];
1493 memset(done
, 0, sizeof(done
));
1494 memset(bytes
, 0, sizeof(bytes
));
1496 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
1497 budget
= mtk_poll_tx_qdma(eth
, budget
, done
, bytes
);
1499 budget
= mtk_poll_tx_pdma(eth
, budget
, done
, bytes
);
1501 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
1502 if (!eth
->netdev
[i
] || !done
[i
])
1504 netdev_completed_queue(eth
->netdev
[i
], done
[i
], bytes
[i
]);
1506 eth
->tx_packets
+= done
[i
];
1507 eth
->tx_bytes
+= bytes
[i
];
1510 dim_update_sample(eth
->tx_events
, eth
->tx_packets
, eth
->tx_bytes
,
1512 net_dim(ð
->tx_dim
, dim_sample
);
1514 if (mtk_queue_stopped(eth
) &&
1515 (atomic_read(&ring
->free_count
) > ring
->thresh
))
1516 mtk_wake_queue(eth
);
1521 static void mtk_handle_status_irq(struct mtk_eth
*eth
)
1523 u32 status2
= mtk_r32(eth
, MTK_INT_STATUS2
);
1525 if (unlikely(status2
& (MTK_GDM1_AF
| MTK_GDM2_AF
))) {
1526 mtk_stats_update(eth
);
1527 mtk_w32(eth
, (MTK_GDM1_AF
| MTK_GDM2_AF
),
1532 static int mtk_napi_tx(struct napi_struct
*napi
, int budget
)
1534 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, tx_napi
);
1537 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
1538 mtk_handle_status_irq(eth
);
1539 mtk_w32(eth
, MTK_TX_DONE_INT
, eth
->tx_int_status_reg
);
1540 tx_done
= mtk_poll_tx(eth
, budget
);
1542 if (unlikely(netif_msg_intr(eth
))) {
1544 "done tx %d, intr 0x%08x/0x%x\n", tx_done
,
1545 mtk_r32(eth
, eth
->tx_int_status_reg
),
1546 mtk_r32(eth
, eth
->tx_int_mask_reg
));
1549 if (tx_done
== budget
)
1552 if (mtk_r32(eth
, eth
->tx_int_status_reg
) & MTK_TX_DONE_INT
)
1555 if (napi_complete_done(napi
, tx_done
))
1556 mtk_tx_irq_enable(eth
, MTK_TX_DONE_INT
);
1561 static int mtk_napi_rx(struct napi_struct
*napi
, int budget
)
1563 struct mtk_eth
*eth
= container_of(napi
, struct mtk_eth
, rx_napi
);
1564 int rx_done_total
= 0;
1566 mtk_handle_status_irq(eth
);
1571 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_STATUS
);
1572 rx_done
= mtk_poll_rx(napi
, budget
- rx_done_total
, eth
);
1573 rx_done_total
+= rx_done
;
1575 if (unlikely(netif_msg_intr(eth
))) {
1577 "done rx %d, intr 0x%08x/0x%x\n", rx_done
,
1578 mtk_r32(eth
, MTK_PDMA_INT_STATUS
),
1579 mtk_r32(eth
, MTK_PDMA_INT_MASK
));
1582 if (rx_done_total
== budget
)
1585 } while (mtk_r32(eth
, MTK_PDMA_INT_STATUS
) & MTK_RX_DONE_INT
);
1587 if (napi_complete_done(napi
, rx_done_total
))
1588 mtk_rx_irq_enable(eth
, MTK_RX_DONE_INT
);
1590 return rx_done_total
;
1593 static int mtk_tx_alloc(struct mtk_eth
*eth
)
1595 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1596 int i
, sz
= sizeof(*ring
->dma
);
1598 ring
->buf
= kcalloc(MTK_DMA_SIZE
, sizeof(*ring
->buf
),
1603 ring
->dma
= dma_alloc_coherent(eth
->dev
, MTK_DMA_SIZE
* sz
,
1604 &ring
->phys
, GFP_ATOMIC
);
1608 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1609 int next
= (i
+ 1) % MTK_DMA_SIZE
;
1610 u32 next_ptr
= ring
->phys
+ next
* sz
;
1612 ring
->dma
[i
].txd2
= next_ptr
;
1613 ring
->dma
[i
].txd3
= TX_DMA_LS0
| TX_DMA_OWNER_CPU
;
1616 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1617 * only as the framework. The real HW descriptors are the PDMA
1618 * descriptors in ring->dma_pdma.
1620 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
1621 ring
->dma_pdma
= dma_alloc_coherent(eth
->dev
, MTK_DMA_SIZE
* sz
,
1624 if (!ring
->dma_pdma
)
1627 for (i
= 0; i
< MTK_DMA_SIZE
; i
++) {
1628 ring
->dma_pdma
[i
].txd2
= TX_DMA_DESP2_DEF
;
1629 ring
->dma_pdma
[i
].txd4
= 0;
1633 ring
->dma_size
= MTK_DMA_SIZE
;
1634 atomic_set(&ring
->free_count
, MTK_DMA_SIZE
- 2);
1635 ring
->next_free
= &ring
->dma
[0];
1636 ring
->last_free
= &ring
->dma
[MTK_DMA_SIZE
- 1];
1637 ring
->last_free_ptr
= (u32
)(ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
));
1638 ring
->thresh
= MAX_SKB_FRAGS
;
1640 /* make sure that all changes to the dma ring are flushed before we
1645 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
1646 mtk_w32(eth
, ring
->phys
, MTK_QTX_CTX_PTR
);
1647 mtk_w32(eth
, ring
->phys
, MTK_QTX_DTX_PTR
);
1649 ring
->phys
+ ((MTK_DMA_SIZE
- 1) * sz
),
1651 mtk_w32(eth
, ring
->last_free_ptr
, MTK_QTX_DRX_PTR
);
1652 mtk_w32(eth
, (QDMA_RES_THRES
<< 8) | QDMA_RES_THRES
,
1655 mtk_w32(eth
, ring
->phys_pdma
, MT7628_TX_BASE_PTR0
);
1656 mtk_w32(eth
, MTK_DMA_SIZE
, MT7628_TX_MAX_CNT0
);
1657 mtk_w32(eth
, 0, MT7628_TX_CTX_IDX0
);
1658 mtk_w32(eth
, MT7628_PST_DTX_IDX0
, MTK_PDMA_RST_IDX
);
1667 static void mtk_tx_clean(struct mtk_eth
*eth
)
1669 struct mtk_tx_ring
*ring
= ð
->tx_ring
;
1673 for (i
= 0; i
< MTK_DMA_SIZE
; i
++)
1674 mtk_tx_unmap(eth
, &ring
->buf
[i
], false);
1680 dma_free_coherent(eth
->dev
,
1681 MTK_DMA_SIZE
* sizeof(*ring
->dma
),
1687 if (ring
->dma_pdma
) {
1688 dma_free_coherent(eth
->dev
,
1689 MTK_DMA_SIZE
* sizeof(*ring
->dma_pdma
),
1692 ring
->dma_pdma
= NULL
;
1696 static int mtk_rx_alloc(struct mtk_eth
*eth
, int ring_no
, int rx_flag
)
1698 struct mtk_rx_ring
*ring
;
1699 int rx_data_len
, rx_dma_size
;
1703 if (rx_flag
== MTK_RX_FLAGS_QDMA
) {
1706 ring
= ð
->rx_ring_qdma
;
1709 ring
= ð
->rx_ring
[ring_no
];
1712 if (rx_flag
== MTK_RX_FLAGS_HWLRO
) {
1713 rx_data_len
= MTK_MAX_LRO_RX_LENGTH
;
1714 rx_dma_size
= MTK_HW_LRO_DMA_SIZE
;
1716 rx_data_len
= ETH_DATA_LEN
;
1717 rx_dma_size
= MTK_DMA_SIZE
;
1720 ring
->frag_size
= mtk_max_frag_size(rx_data_len
);
1721 ring
->buf_size
= mtk_max_buf_size(ring
->frag_size
);
1722 ring
->data
= kcalloc(rx_dma_size
, sizeof(*ring
->data
),
1727 for (i
= 0; i
< rx_dma_size
; i
++) {
1728 ring
->data
[i
] = netdev_alloc_frag(ring
->frag_size
);
1733 ring
->dma
= dma_alloc_coherent(eth
->dev
,
1734 rx_dma_size
* sizeof(*ring
->dma
),
1735 &ring
->phys
, GFP_ATOMIC
);
1739 for (i
= 0; i
< rx_dma_size
; i
++) {
1740 dma_addr_t dma_addr
= dma_map_single(eth
->dev
,
1741 ring
->data
[i
] + NET_SKB_PAD
+ eth
->ip_align
,
1744 if (unlikely(dma_mapping_error(eth
->dev
, dma_addr
)))
1746 ring
->dma
[i
].rxd1
= (unsigned int)dma_addr
;
1748 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
))
1749 ring
->dma
[i
].rxd2
= RX_DMA_LSO
;
1751 ring
->dma
[i
].rxd2
= RX_DMA_PLEN0(ring
->buf_size
);
1753 ring
->dma_size
= rx_dma_size
;
1754 ring
->calc_idx_update
= false;
1755 ring
->calc_idx
= rx_dma_size
- 1;
1756 ring
->crx_idx_reg
= MTK_PRX_CRX_IDX_CFG(ring_no
);
1757 /* make sure that all changes to the dma ring are flushed before we
1762 mtk_w32(eth
, ring
->phys
, MTK_PRX_BASE_PTR_CFG(ring_no
) + offset
);
1763 mtk_w32(eth
, rx_dma_size
, MTK_PRX_MAX_CNT_CFG(ring_no
) + offset
);
1764 mtk_w32(eth
, ring
->calc_idx
, ring
->crx_idx_reg
+ offset
);
1765 mtk_w32(eth
, MTK_PST_DRX_IDX_CFG(ring_no
), MTK_PDMA_RST_IDX
+ offset
);
1770 static void mtk_rx_clean(struct mtk_eth
*eth
, struct mtk_rx_ring
*ring
)
1774 if (ring
->data
&& ring
->dma
) {
1775 for (i
= 0; i
< ring
->dma_size
; i
++) {
1778 if (!ring
->dma
[i
].rxd1
)
1780 dma_unmap_single(eth
->dev
,
1784 skb_free_frag(ring
->data
[i
]);
1791 dma_free_coherent(eth
->dev
,
1792 ring
->dma_size
* sizeof(*ring
->dma
),
1799 static int mtk_hwlro_rx_init(struct mtk_eth
*eth
)
1802 u32 ring_ctrl_dw1
= 0, ring_ctrl_dw2
= 0, ring_ctrl_dw3
= 0;
1803 u32 lro_ctrl_dw0
= 0, lro_ctrl_dw3
= 0;
1805 /* set LRO rings to auto-learn modes */
1806 ring_ctrl_dw2
|= MTK_RING_AUTO_LERAN_MODE
;
1808 /* validate LRO ring */
1809 ring_ctrl_dw2
|= MTK_RING_VLD
;
1811 /* set AGE timer (unit: 20us) */
1812 ring_ctrl_dw2
|= MTK_RING_AGE_TIME_H
;
1813 ring_ctrl_dw1
|= MTK_RING_AGE_TIME_L
;
1815 /* set max AGG timer (unit: 20us) */
1816 ring_ctrl_dw2
|= MTK_RING_MAX_AGG_TIME
;
1818 /* set max LRO AGG count */
1819 ring_ctrl_dw2
|= MTK_RING_MAX_AGG_CNT_L
;
1820 ring_ctrl_dw3
|= MTK_RING_MAX_AGG_CNT_H
;
1822 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++) {
1823 mtk_w32(eth
, ring_ctrl_dw1
, MTK_LRO_CTRL_DW1_CFG(i
));
1824 mtk_w32(eth
, ring_ctrl_dw2
, MTK_LRO_CTRL_DW2_CFG(i
));
1825 mtk_w32(eth
, ring_ctrl_dw3
, MTK_LRO_CTRL_DW3_CFG(i
));
1828 /* IPv4 checksum update enable */
1829 lro_ctrl_dw0
|= MTK_L3_CKS_UPD_EN
;
1831 /* switch priority comparison to packet count mode */
1832 lro_ctrl_dw0
|= MTK_LRO_ALT_PKT_CNT_MODE
;
1834 /* bandwidth threshold setting */
1835 mtk_w32(eth
, MTK_HW_LRO_BW_THRE
, MTK_PDMA_LRO_CTRL_DW2
);
1837 /* auto-learn score delta setting */
1838 mtk_w32(eth
, MTK_HW_LRO_REPLACE_DELTA
, MTK_PDMA_LRO_ALT_SCORE_DELTA
);
1840 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1841 mtk_w32(eth
, (MTK_HW_LRO_TIMER_UNIT
<< 16) | MTK_HW_LRO_REFRESH_TIME
,
1842 MTK_PDMA_LRO_ALT_REFRESH_TIMER
);
1844 /* set HW LRO mode & the max aggregation count for rx packets */
1845 lro_ctrl_dw3
|= MTK_ADMA_MODE
| (MTK_HW_LRO_MAX_AGG_CNT
& 0xff);
1847 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1848 lro_ctrl_dw3
|= MTK_LRO_MIN_RXD_SDL
;
1851 lro_ctrl_dw0
|= MTK_LRO_EN
;
1853 mtk_w32(eth
, lro_ctrl_dw3
, MTK_PDMA_LRO_CTRL_DW3
);
1854 mtk_w32(eth
, lro_ctrl_dw0
, MTK_PDMA_LRO_CTRL_DW0
);
1859 static void mtk_hwlro_rx_uninit(struct mtk_eth
*eth
)
1864 /* relinquish lro rings, flush aggregated packets */
1865 mtk_w32(eth
, MTK_LRO_RING_RELINQUISH_REQ
, MTK_PDMA_LRO_CTRL_DW0
);
1867 /* wait for relinquishments done */
1868 for (i
= 0; i
< 10; i
++) {
1869 val
= mtk_r32(eth
, MTK_PDMA_LRO_CTRL_DW0
);
1870 if (val
& MTK_LRO_RING_RELINQUISH_DONE
) {
1877 /* invalidate lro rings */
1878 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++)
1879 mtk_w32(eth
, 0, MTK_LRO_CTRL_DW2_CFG(i
));
1881 /* disable HW LRO */
1882 mtk_w32(eth
, 0, MTK_PDMA_LRO_CTRL_DW0
);
1885 static void mtk_hwlro_val_ipaddr(struct mtk_eth
*eth
, int idx
, __be32 ip
)
1889 reg_val
= mtk_r32(eth
, MTK_LRO_CTRL_DW2_CFG(idx
));
1891 /* invalidate the IP setting */
1892 mtk_w32(eth
, (reg_val
& ~MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1894 mtk_w32(eth
, ip
, MTK_LRO_DIP_DW0_CFG(idx
));
1896 /* validate the IP setting */
1897 mtk_w32(eth
, (reg_val
| MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1900 static void mtk_hwlro_inval_ipaddr(struct mtk_eth
*eth
, int idx
)
1904 reg_val
= mtk_r32(eth
, MTK_LRO_CTRL_DW2_CFG(idx
));
1906 /* invalidate the IP setting */
1907 mtk_w32(eth
, (reg_val
& ~MTK_RING_MYIP_VLD
), MTK_LRO_CTRL_DW2_CFG(idx
));
1909 mtk_w32(eth
, 0, MTK_LRO_DIP_DW0_CFG(idx
));
1912 static int mtk_hwlro_get_ip_cnt(struct mtk_mac
*mac
)
1917 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1918 if (mac
->hwlro_ip
[i
])
1925 static int mtk_hwlro_add_ipaddr(struct net_device
*dev
,
1926 struct ethtool_rxnfc
*cmd
)
1928 struct ethtool_rx_flow_spec
*fsp
=
1929 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1930 struct mtk_mac
*mac
= netdev_priv(dev
);
1931 struct mtk_eth
*eth
= mac
->hw
;
1934 if ((fsp
->flow_type
!= TCP_V4_FLOW
) ||
1935 (!fsp
->h_u
.tcp_ip4_spec
.ip4dst
) ||
1936 (fsp
->location
> 1))
1939 mac
->hwlro_ip
[fsp
->location
] = htonl(fsp
->h_u
.tcp_ip4_spec
.ip4dst
);
1940 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + fsp
->location
;
1942 mac
->hwlro_ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1944 mtk_hwlro_val_ipaddr(eth
, hwlro_idx
, mac
->hwlro_ip
[fsp
->location
]);
1949 static int mtk_hwlro_del_ipaddr(struct net_device
*dev
,
1950 struct ethtool_rxnfc
*cmd
)
1952 struct ethtool_rx_flow_spec
*fsp
=
1953 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1954 struct mtk_mac
*mac
= netdev_priv(dev
);
1955 struct mtk_eth
*eth
= mac
->hw
;
1958 if (fsp
->location
> 1)
1961 mac
->hwlro_ip
[fsp
->location
] = 0;
1962 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + fsp
->location
;
1964 mac
->hwlro_ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
1966 mtk_hwlro_inval_ipaddr(eth
, hwlro_idx
);
1971 static void mtk_hwlro_netdev_disable(struct net_device
*dev
)
1973 struct mtk_mac
*mac
= netdev_priv(dev
);
1974 struct mtk_eth
*eth
= mac
->hw
;
1977 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
1978 mac
->hwlro_ip
[i
] = 0;
1979 hwlro_idx
= (mac
->id
* MTK_MAX_LRO_IP_CNT
) + i
;
1981 mtk_hwlro_inval_ipaddr(eth
, hwlro_idx
);
1984 mac
->hwlro_ip_cnt
= 0;
1987 static int mtk_hwlro_get_fdir_entry(struct net_device
*dev
,
1988 struct ethtool_rxnfc
*cmd
)
1990 struct mtk_mac
*mac
= netdev_priv(dev
);
1991 struct ethtool_rx_flow_spec
*fsp
=
1992 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1994 /* only tcp dst ipv4 is meaningful, others are meaningless */
1995 fsp
->flow_type
= TCP_V4_FLOW
;
1996 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= ntohl(mac
->hwlro_ip
[fsp
->location
]);
1997 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= 0;
1999 fsp
->h_u
.tcp_ip4_spec
.ip4src
= 0;
2000 fsp
->m_u
.tcp_ip4_spec
.ip4src
= 0xffffffff;
2001 fsp
->h_u
.tcp_ip4_spec
.psrc
= 0;
2002 fsp
->m_u
.tcp_ip4_spec
.psrc
= 0xffff;
2003 fsp
->h_u
.tcp_ip4_spec
.pdst
= 0;
2004 fsp
->m_u
.tcp_ip4_spec
.pdst
= 0xffff;
2005 fsp
->h_u
.tcp_ip4_spec
.tos
= 0;
2006 fsp
->m_u
.tcp_ip4_spec
.tos
= 0xff;
2011 static int mtk_hwlro_get_fdir_all(struct net_device
*dev
,
2012 struct ethtool_rxnfc
*cmd
,
2015 struct mtk_mac
*mac
= netdev_priv(dev
);
2019 for (i
= 0; i
< MTK_MAX_LRO_IP_CNT
; i
++) {
2020 if (mac
->hwlro_ip
[i
]) {
2026 cmd
->rule_cnt
= cnt
;
2031 static netdev_features_t
mtk_fix_features(struct net_device
*dev
,
2032 netdev_features_t features
)
2034 if (!(features
& NETIF_F_LRO
)) {
2035 struct mtk_mac
*mac
= netdev_priv(dev
);
2036 int ip_cnt
= mtk_hwlro_get_ip_cnt(mac
);
2039 netdev_info(dev
, "RX flow is programmed, LRO should keep on\n");
2041 features
|= NETIF_F_LRO
;
2048 static int mtk_set_features(struct net_device
*dev
, netdev_features_t features
)
2052 if (!((dev
->features
^ features
) & NETIF_F_LRO
))
2055 if (!(features
& NETIF_F_LRO
))
2056 mtk_hwlro_netdev_disable(dev
);
2061 /* wait for DMA to finish whatever it is doing before we start using it again */
2062 static int mtk_dma_busy_wait(struct mtk_eth
*eth
)
2068 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
2069 reg
= MTK_QDMA_GLO_CFG
;
2071 reg
= MTK_PDMA_GLO_CFG
;
2073 ret
= readx_poll_timeout_atomic(__raw_readl
, eth
->base
+ reg
, val
,
2074 !(val
& (MTK_RX_DMA_BUSY
| MTK_TX_DMA_BUSY
)),
2075 5, MTK_DMA_BUSY_TIMEOUT_US
);
2077 dev_err(eth
->dev
, "DMA init timeout\n");
2082 static int mtk_dma_init(struct mtk_eth
*eth
)
2087 if (mtk_dma_busy_wait(eth
))
2090 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
2091 /* QDMA needs scratch memory for internal reordering of the
2094 err
= mtk_init_fq_dma(eth
);
2099 err
= mtk_tx_alloc(eth
);
2103 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
2104 err
= mtk_rx_alloc(eth
, 0, MTK_RX_FLAGS_QDMA
);
2109 err
= mtk_rx_alloc(eth
, 0, MTK_RX_FLAGS_NORMAL
);
2114 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++) {
2115 err
= mtk_rx_alloc(eth
, i
, MTK_RX_FLAGS_HWLRO
);
2119 err
= mtk_hwlro_rx_init(eth
);
2124 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
2125 /* Enable random early drop and set drop threshold
2128 mtk_w32(eth
, FC_THRES_DROP_MODE
| FC_THRES_DROP_EN
|
2129 FC_THRES_MIN
, MTK_QDMA_FC_THRES
);
2130 mtk_w32(eth
, 0x0, MTK_QDMA_HRED2
);
2136 static void mtk_dma_free(struct mtk_eth
*eth
)
2140 for (i
= 0; i
< MTK_MAC_COUNT
; i
++)
2142 netdev_reset_queue(eth
->netdev
[i
]);
2143 if (eth
->scratch_ring
) {
2144 dma_free_coherent(eth
->dev
,
2145 MTK_DMA_SIZE
* sizeof(struct mtk_tx_dma
),
2147 eth
->phy_scratch_ring
);
2148 eth
->scratch_ring
= NULL
;
2149 eth
->phy_scratch_ring
= 0;
2152 mtk_rx_clean(eth
, ð
->rx_ring
[0]);
2153 mtk_rx_clean(eth
, ð
->rx_ring_qdma
);
2156 mtk_hwlro_rx_uninit(eth
);
2157 for (i
= 1; i
< MTK_MAX_RX_RING_NUM
; i
++)
2158 mtk_rx_clean(eth
, ð
->rx_ring
[i
]);
2161 kfree(eth
->scratch_head
);
2164 static void mtk_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
2166 struct mtk_mac
*mac
= netdev_priv(dev
);
2167 struct mtk_eth
*eth
= mac
->hw
;
2169 eth
->netdev
[mac
->id
]->stats
.tx_errors
++;
2170 netif_err(eth
, tx_err
, dev
,
2171 "transmit timed out\n");
2172 schedule_work(ð
->pending_work
);
2175 static irqreturn_t
mtk_handle_irq_rx(int irq
, void *_eth
)
2177 struct mtk_eth
*eth
= _eth
;
2180 if (likely(napi_schedule_prep(ð
->rx_napi
))) {
2181 __napi_schedule(ð
->rx_napi
);
2182 mtk_rx_irq_disable(eth
, MTK_RX_DONE_INT
);
2188 static irqreturn_t
mtk_handle_irq_tx(int irq
, void *_eth
)
2190 struct mtk_eth
*eth
= _eth
;
2193 if (likely(napi_schedule_prep(ð
->tx_napi
))) {
2194 __napi_schedule(ð
->tx_napi
);
2195 mtk_tx_irq_disable(eth
, MTK_TX_DONE_INT
);
2201 static irqreturn_t
mtk_handle_irq(int irq
, void *_eth
)
2203 struct mtk_eth
*eth
= _eth
;
2205 if (mtk_r32(eth
, MTK_PDMA_INT_MASK
) & MTK_RX_DONE_INT
) {
2206 if (mtk_r32(eth
, MTK_PDMA_INT_STATUS
) & MTK_RX_DONE_INT
)
2207 mtk_handle_irq_rx(irq
, _eth
);
2209 if (mtk_r32(eth
, eth
->tx_int_mask_reg
) & MTK_TX_DONE_INT
) {
2210 if (mtk_r32(eth
, eth
->tx_int_status_reg
) & MTK_TX_DONE_INT
)
2211 mtk_handle_irq_tx(irq
, _eth
);
2217 #ifdef CONFIG_NET_POLL_CONTROLLER
2218 static void mtk_poll_controller(struct net_device
*dev
)
2220 struct mtk_mac
*mac
= netdev_priv(dev
);
2221 struct mtk_eth
*eth
= mac
->hw
;
2223 mtk_tx_irq_disable(eth
, MTK_TX_DONE_INT
);
2224 mtk_rx_irq_disable(eth
, MTK_RX_DONE_INT
);
2225 mtk_handle_irq_rx(eth
->irq
[2], dev
);
2226 mtk_tx_irq_enable(eth
, MTK_TX_DONE_INT
);
2227 mtk_rx_irq_enable(eth
, MTK_RX_DONE_INT
);
2231 static int mtk_start_dma(struct mtk_eth
*eth
)
2233 u32 rx_2b_offset
= (NET_IP_ALIGN
== 2) ? MTK_RX_2B_OFFSET
: 0;
2236 err
= mtk_dma_init(eth
);
2242 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
2244 MTK_TX_WB_DDONE
| MTK_TX_DMA_EN
|
2245 MTK_TX_BT_32DWORDS
| MTK_NDP_CO_PRO
|
2246 MTK_RX_DMA_EN
| MTK_RX_2B_OFFSET
|
2251 MTK_RX_DMA_EN
| rx_2b_offset
|
2252 MTK_RX_BT_32DWORDS
| MTK_MULTI_EN
,
2255 mtk_w32(eth
, MTK_TX_WB_DDONE
| MTK_TX_DMA_EN
| MTK_RX_DMA_EN
|
2256 MTK_MULTI_EN
| MTK_PDMA_SIZE_8DWORDS
,
2263 static void mtk_gdm_config(struct mtk_eth
*eth
, u32 config
)
2267 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
))
2270 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2271 u32 val
= mtk_r32(eth
, MTK_GDMA_FWD_CFG(i
));
2273 /* default setup the forward port to send frame to PDMA */
2276 /* Enable RX checksum */
2277 val
|= MTK_GDMA_ICS_EN
| MTK_GDMA_TCS_EN
| MTK_GDMA_UCS_EN
;
2281 if (!i
&& eth
->netdev
[0] && netdev_uses_dsa(eth
->netdev
[0]))
2282 val
|= MTK_GDMA_SPECIAL_TAG
;
2284 mtk_w32(eth
, val
, MTK_GDMA_FWD_CFG(i
));
2286 /* Reset and enable PSE */
2287 mtk_w32(eth
, RST_GL_PSE
, MTK_RST_GL
);
2288 mtk_w32(eth
, 0, MTK_RST_GL
);
2291 static int mtk_open(struct net_device
*dev
)
2293 struct mtk_mac
*mac
= netdev_priv(dev
);
2294 struct mtk_eth
*eth
= mac
->hw
;
2297 err
= phylink_of_phy_connect(mac
->phylink
, mac
->of_node
, 0);
2299 netdev_err(dev
, "%s: could not attach PHY: %d\n", __func__
,
2304 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2305 if (!refcount_read(ð
->dma_refcnt
)) {
2306 u32 gdm_config
= MTK_GDMA_TO_PDMA
;
2309 err
= mtk_start_dma(eth
);
2313 if (eth
->soc
->offload_version
&& mtk_ppe_start(ð
->ppe
) == 0)
2314 gdm_config
= MTK_GDMA_TO_PPE
;
2316 mtk_gdm_config(eth
, gdm_config
);
2318 napi_enable(ð
->tx_napi
);
2319 napi_enable(ð
->rx_napi
);
2320 mtk_tx_irq_enable(eth
, MTK_TX_DONE_INT
);
2321 mtk_rx_irq_enable(eth
, MTK_RX_DONE_INT
);
2322 refcount_set(ð
->dma_refcnt
, 1);
2325 refcount_inc(ð
->dma_refcnt
);
2327 phylink_start(mac
->phylink
);
2328 netif_start_queue(dev
);
2332 static void mtk_stop_dma(struct mtk_eth
*eth
, u32 glo_cfg
)
2337 /* stop the dma engine */
2338 spin_lock_bh(ð
->page_lock
);
2339 val
= mtk_r32(eth
, glo_cfg
);
2340 mtk_w32(eth
, val
& ~(MTK_TX_WB_DDONE
| MTK_RX_DMA_EN
| MTK_TX_DMA_EN
),
2342 spin_unlock_bh(ð
->page_lock
);
2344 /* wait for dma stop */
2345 for (i
= 0; i
< 10; i
++) {
2346 val
= mtk_r32(eth
, glo_cfg
);
2347 if (val
& (MTK_TX_DMA_BUSY
| MTK_RX_DMA_BUSY
)) {
2355 static int mtk_stop(struct net_device
*dev
)
2357 struct mtk_mac
*mac
= netdev_priv(dev
);
2358 struct mtk_eth
*eth
= mac
->hw
;
2360 phylink_stop(mac
->phylink
);
2362 netif_tx_disable(dev
);
2364 phylink_disconnect_phy(mac
->phylink
);
2366 /* only shutdown DMA if this is the last user */
2367 if (!refcount_dec_and_test(ð
->dma_refcnt
))
2370 mtk_gdm_config(eth
, MTK_GDMA_DROP_ALL
);
2372 mtk_tx_irq_disable(eth
, MTK_TX_DONE_INT
);
2373 mtk_rx_irq_disable(eth
, MTK_RX_DONE_INT
);
2374 napi_disable(ð
->tx_napi
);
2375 napi_disable(ð
->rx_napi
);
2377 cancel_work_sync(ð
->rx_dim
.work
);
2378 cancel_work_sync(ð
->tx_dim
.work
);
2380 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
2381 mtk_stop_dma(eth
, MTK_QDMA_GLO_CFG
);
2382 mtk_stop_dma(eth
, MTK_PDMA_GLO_CFG
);
2386 if (eth
->soc
->offload_version
)
2387 mtk_ppe_stop(ð
->ppe
);
2392 static void ethsys_reset(struct mtk_eth
*eth
, u32 reset_bits
)
2394 regmap_update_bits(eth
->ethsys
, ETHSYS_RSTCTRL
,
2398 usleep_range(1000, 1100);
2399 regmap_update_bits(eth
->ethsys
, ETHSYS_RSTCTRL
,
2405 static void mtk_clk_disable(struct mtk_eth
*eth
)
2409 for (clk
= MTK_CLK_MAX
- 1; clk
>= 0; clk
--)
2410 clk_disable_unprepare(eth
->clks
[clk
]);
2413 static int mtk_clk_enable(struct mtk_eth
*eth
)
2417 for (clk
= 0; clk
< MTK_CLK_MAX
; clk
++) {
2418 ret
= clk_prepare_enable(eth
->clks
[clk
]);
2420 goto err_disable_clks
;
2427 clk_disable_unprepare(eth
->clks
[clk
]);
2432 static void mtk_dim_rx(struct work_struct
*work
)
2434 struct dim
*dim
= container_of(work
, struct dim
, work
);
2435 struct mtk_eth
*eth
= container_of(dim
, struct mtk_eth
, rx_dim
);
2436 struct dim_cq_moder cur_profile
;
2439 cur_profile
= net_dim_get_rx_moderation(eth
->rx_dim
.mode
,
2441 spin_lock_bh(ð
->dim_lock
);
2443 val
= mtk_r32(eth
, MTK_PDMA_DELAY_INT
);
2444 val
&= MTK_PDMA_DELAY_TX_MASK
;
2445 val
|= MTK_PDMA_DELAY_RX_EN
;
2447 cur
= min_t(u32
, DIV_ROUND_UP(cur_profile
.usec
, 20), MTK_PDMA_DELAY_PTIME_MASK
);
2448 val
|= cur
<< MTK_PDMA_DELAY_RX_PTIME_SHIFT
;
2450 cur
= min_t(u32
, cur_profile
.pkts
, MTK_PDMA_DELAY_PINT_MASK
);
2451 val
|= cur
<< MTK_PDMA_DELAY_RX_PINT_SHIFT
;
2453 mtk_w32(eth
, val
, MTK_PDMA_DELAY_INT
);
2454 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
2455 mtk_w32(eth
, val
, MTK_QDMA_DELAY_INT
);
2457 spin_unlock_bh(ð
->dim_lock
);
2459 dim
->state
= DIM_START_MEASURE
;
2462 static void mtk_dim_tx(struct work_struct
*work
)
2464 struct dim
*dim
= container_of(work
, struct dim
, work
);
2465 struct mtk_eth
*eth
= container_of(dim
, struct mtk_eth
, tx_dim
);
2466 struct dim_cq_moder cur_profile
;
2469 cur_profile
= net_dim_get_tx_moderation(eth
->tx_dim
.mode
,
2471 spin_lock_bh(ð
->dim_lock
);
2473 val
= mtk_r32(eth
, MTK_PDMA_DELAY_INT
);
2474 val
&= MTK_PDMA_DELAY_RX_MASK
;
2475 val
|= MTK_PDMA_DELAY_TX_EN
;
2477 cur
= min_t(u32
, DIV_ROUND_UP(cur_profile
.usec
, 20), MTK_PDMA_DELAY_PTIME_MASK
);
2478 val
|= cur
<< MTK_PDMA_DELAY_TX_PTIME_SHIFT
;
2480 cur
= min_t(u32
, cur_profile
.pkts
, MTK_PDMA_DELAY_PINT_MASK
);
2481 val
|= cur
<< MTK_PDMA_DELAY_TX_PINT_SHIFT
;
2483 mtk_w32(eth
, val
, MTK_PDMA_DELAY_INT
);
2484 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
))
2485 mtk_w32(eth
, val
, MTK_QDMA_DELAY_INT
);
2487 spin_unlock_bh(ð
->dim_lock
);
2489 dim
->state
= DIM_START_MEASURE
;
2492 static int mtk_hw_init(struct mtk_eth
*eth
)
2496 if (test_and_set_bit(MTK_HW_INIT
, ð
->state
))
2499 pm_runtime_enable(eth
->dev
);
2500 pm_runtime_get_sync(eth
->dev
);
2502 ret
= mtk_clk_enable(eth
);
2504 goto err_disable_pm
;
2506 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
2507 ret
= device_reset(eth
->dev
);
2509 dev_err(eth
->dev
, "MAC reset failed!\n");
2510 goto err_disable_pm
;
2513 /* set interrupt delays based on current Net DIM sample */
2514 mtk_dim_rx(ð
->rx_dim
.work
);
2515 mtk_dim_tx(ð
->tx_dim
.work
);
2517 /* disable delay and normal interrupt */
2518 mtk_tx_irq_disable(eth
, ~0);
2519 mtk_rx_irq_disable(eth
, ~0);
2524 /* Non-MT7628 handling... */
2525 ethsys_reset(eth
, RSTCTRL_FE
);
2526 ethsys_reset(eth
, RSTCTRL_PPE
);
2529 /* Set GE2 driving and slew rate */
2530 regmap_write(eth
->pctl
, GPIO_DRV_SEL10
, 0xa00);
2533 regmap_write(eth
->pctl
, GPIO_OD33_CTRL8
, 0x5);
2536 regmap_write(eth
->pctl
, GPIO_BIAS_CTRL
, 0x0);
2539 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2540 * up with the more appropriate value when mtk_mac_config call is being
2543 for (i
= 0; i
< MTK_MAC_COUNT
; i
++)
2544 mtk_w32(eth
, MAC_MCR_FORCE_LINK_DOWN
, MTK_MAC_MCR(i
));
2546 /* Indicates CDM to parse the MTK special tag from CPU
2547 * which also is working out for untag packets.
2549 val
= mtk_r32(eth
, MTK_CDMQ_IG_CTRL
);
2550 mtk_w32(eth
, val
| MTK_CDMQ_STAG_EN
, MTK_CDMQ_IG_CTRL
);
2552 /* Enable RX VLan Offloading */
2553 mtk_w32(eth
, 1, MTK_CDMP_EG_CTRL
);
2555 /* set interrupt delays based on current Net DIM sample */
2556 mtk_dim_rx(ð
->rx_dim
.work
);
2557 mtk_dim_tx(ð
->tx_dim
.work
);
2559 /* disable delay and normal interrupt */
2560 mtk_tx_irq_disable(eth
, ~0);
2561 mtk_rx_irq_disable(eth
, ~0);
2563 /* FE int grouping */
2564 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_PDMA_INT_GRP1
);
2565 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_PDMA_INT_GRP2
);
2566 mtk_w32(eth
, MTK_TX_DONE_INT
, MTK_QDMA_INT_GRP1
);
2567 mtk_w32(eth
, MTK_RX_DONE_INT
, MTK_QDMA_INT_GRP2
);
2568 mtk_w32(eth
, 0x21021000, MTK_FE_INT_GRP
);
2573 pm_runtime_put_sync(eth
->dev
);
2574 pm_runtime_disable(eth
->dev
);
2579 static int mtk_hw_deinit(struct mtk_eth
*eth
)
2581 if (!test_and_clear_bit(MTK_HW_INIT
, ð
->state
))
2584 mtk_clk_disable(eth
);
2586 pm_runtime_put_sync(eth
->dev
);
2587 pm_runtime_disable(eth
->dev
);
2592 static int __init
mtk_init(struct net_device
*dev
)
2594 struct mtk_mac
*mac
= netdev_priv(dev
);
2595 struct mtk_eth
*eth
= mac
->hw
;
2598 ret
= of_get_mac_address(mac
->of_node
, dev
->dev_addr
);
2600 /* If the mac address is invalid, use random mac address */
2601 eth_hw_addr_random(dev
);
2602 dev_err(eth
->dev
, "generated random MAC address %pM\n",
2609 static void mtk_uninit(struct net_device
*dev
)
2611 struct mtk_mac
*mac
= netdev_priv(dev
);
2612 struct mtk_eth
*eth
= mac
->hw
;
2614 phylink_disconnect_phy(mac
->phylink
);
2615 mtk_tx_irq_disable(eth
, ~0);
2616 mtk_rx_irq_disable(eth
, ~0);
2619 static int mtk_change_mtu(struct net_device
*dev
, int new_mtu
)
2621 int length
= new_mtu
+ MTK_RX_ETH_HLEN
;
2622 struct mtk_mac
*mac
= netdev_priv(dev
);
2623 struct mtk_eth
*eth
= mac
->hw
;
2624 u32 mcr_cur
, mcr_new
;
2626 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
2627 mcr_cur
= mtk_r32(mac
->hw
, MTK_MAC_MCR(mac
->id
));
2628 mcr_new
= mcr_cur
& ~MAC_MCR_MAX_RX_MASK
;
2631 mcr_new
|= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518
);
2632 else if (length
<= 1536)
2633 mcr_new
|= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536
);
2634 else if (length
<= 1552)
2635 mcr_new
|= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552
);
2637 mcr_new
|= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048
);
2639 if (mcr_new
!= mcr_cur
)
2640 mtk_w32(mac
->hw
, mcr_new
, MTK_MAC_MCR(mac
->id
));
2648 static int mtk_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2650 struct mtk_mac
*mac
= netdev_priv(dev
);
2656 return phylink_mii_ioctl(mac
->phylink
, ifr
, cmd
);
2664 static void mtk_pending_work(struct work_struct
*work
)
2666 struct mtk_eth
*eth
= container_of(work
, struct mtk_eth
, pending_work
);
2668 unsigned long restart
= 0;
2672 dev_dbg(eth
->dev
, "[%s][%d] reset\n", __func__
, __LINE__
);
2674 while (test_and_set_bit_lock(MTK_RESETTING
, ð
->state
))
2677 dev_dbg(eth
->dev
, "[%s][%d] mtk_stop starts\n", __func__
, __LINE__
);
2678 /* stop all devices to make sure that dma is properly shut down */
2679 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2680 if (!eth
->netdev
[i
])
2682 mtk_stop(eth
->netdev
[i
]);
2683 __set_bit(i
, &restart
);
2685 dev_dbg(eth
->dev
, "[%s][%d] mtk_stop ends\n", __func__
, __LINE__
);
2687 /* restart underlying hardware such as power, clock, pin mux
2688 * and the connected phy
2693 pinctrl_select_state(eth
->dev
->pins
->p
,
2694 eth
->dev
->pins
->default_state
);
2697 /* restart DMA and enable IRQs */
2698 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2699 if (!test_bit(i
, &restart
))
2701 err
= mtk_open(eth
->netdev
[i
]);
2703 netif_alert(eth
, ifup
, eth
->netdev
[i
],
2704 "Driver up/down cycle failed, closing device.\n");
2705 dev_close(eth
->netdev
[i
]);
2709 dev_dbg(eth
->dev
, "[%s][%d] reset done\n", __func__
, __LINE__
);
2711 clear_bit_unlock(MTK_RESETTING
, ð
->state
);
2716 static int mtk_free_dev(struct mtk_eth
*eth
)
2720 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2721 if (!eth
->netdev
[i
])
2723 free_netdev(eth
->netdev
[i
]);
2729 static int mtk_unreg_dev(struct mtk_eth
*eth
)
2733 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
2734 if (!eth
->netdev
[i
])
2736 unregister_netdev(eth
->netdev
[i
]);
2742 static int mtk_cleanup(struct mtk_eth
*eth
)
2746 cancel_work_sync(ð
->pending_work
);
2751 static int mtk_get_link_ksettings(struct net_device
*ndev
,
2752 struct ethtool_link_ksettings
*cmd
)
2754 struct mtk_mac
*mac
= netdev_priv(ndev
);
2756 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2759 return phylink_ethtool_ksettings_get(mac
->phylink
, cmd
);
2762 static int mtk_set_link_ksettings(struct net_device
*ndev
,
2763 const struct ethtool_link_ksettings
*cmd
)
2765 struct mtk_mac
*mac
= netdev_priv(ndev
);
2767 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2770 return phylink_ethtool_ksettings_set(mac
->phylink
, cmd
);
2773 static void mtk_get_drvinfo(struct net_device
*dev
,
2774 struct ethtool_drvinfo
*info
)
2776 struct mtk_mac
*mac
= netdev_priv(dev
);
2778 strlcpy(info
->driver
, mac
->hw
->dev
->driver
->name
, sizeof(info
->driver
));
2779 strlcpy(info
->bus_info
, dev_name(mac
->hw
->dev
), sizeof(info
->bus_info
));
2780 info
->n_stats
= ARRAY_SIZE(mtk_ethtool_stats
);
2783 static u32
mtk_get_msglevel(struct net_device
*dev
)
2785 struct mtk_mac
*mac
= netdev_priv(dev
);
2787 return mac
->hw
->msg_enable
;
2790 static void mtk_set_msglevel(struct net_device
*dev
, u32 value
)
2792 struct mtk_mac
*mac
= netdev_priv(dev
);
2794 mac
->hw
->msg_enable
= value
;
2797 static int mtk_nway_reset(struct net_device
*dev
)
2799 struct mtk_mac
*mac
= netdev_priv(dev
);
2801 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2807 return phylink_ethtool_nway_reset(mac
->phylink
);
2810 static void mtk_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
2814 switch (stringset
) {
2816 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++) {
2817 memcpy(data
, mtk_ethtool_stats
[i
].str
, ETH_GSTRING_LEN
);
2818 data
+= ETH_GSTRING_LEN
;
2824 static int mtk_get_sset_count(struct net_device
*dev
, int sset
)
2828 return ARRAY_SIZE(mtk_ethtool_stats
);
2834 static void mtk_get_ethtool_stats(struct net_device
*dev
,
2835 struct ethtool_stats
*stats
, u64
*data
)
2837 struct mtk_mac
*mac
= netdev_priv(dev
);
2838 struct mtk_hw_stats
*hwstats
= mac
->hw_stats
;
2839 u64
*data_src
, *data_dst
;
2843 if (unlikely(test_bit(MTK_RESETTING
, &mac
->hw
->state
)))
2846 if (netif_running(dev
) && netif_device_present(dev
)) {
2847 if (spin_trylock_bh(&hwstats
->stats_lock
)) {
2848 mtk_stats_update_mac(mac
);
2849 spin_unlock_bh(&hwstats
->stats_lock
);
2853 data_src
= (u64
*)hwstats
;
2857 start
= u64_stats_fetch_begin_irq(&hwstats
->syncp
);
2859 for (i
= 0; i
< ARRAY_SIZE(mtk_ethtool_stats
); i
++)
2860 *data_dst
++ = *(data_src
+ mtk_ethtool_stats
[i
].offset
);
2861 } while (u64_stats_fetch_retry_irq(&hwstats
->syncp
, start
));
2864 static int mtk_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2867 int ret
= -EOPNOTSUPP
;
2870 case ETHTOOL_GRXRINGS
:
2871 if (dev
->hw_features
& NETIF_F_LRO
) {
2872 cmd
->data
= MTK_MAX_RX_RING_NUM
;
2876 case ETHTOOL_GRXCLSRLCNT
:
2877 if (dev
->hw_features
& NETIF_F_LRO
) {
2878 struct mtk_mac
*mac
= netdev_priv(dev
);
2880 cmd
->rule_cnt
= mac
->hwlro_ip_cnt
;
2884 case ETHTOOL_GRXCLSRULE
:
2885 if (dev
->hw_features
& NETIF_F_LRO
)
2886 ret
= mtk_hwlro_get_fdir_entry(dev
, cmd
);
2888 case ETHTOOL_GRXCLSRLALL
:
2889 if (dev
->hw_features
& NETIF_F_LRO
)
2890 ret
= mtk_hwlro_get_fdir_all(dev
, cmd
,
2900 static int mtk_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2902 int ret
= -EOPNOTSUPP
;
2905 case ETHTOOL_SRXCLSRLINS
:
2906 if (dev
->hw_features
& NETIF_F_LRO
)
2907 ret
= mtk_hwlro_add_ipaddr(dev
, cmd
);
2909 case ETHTOOL_SRXCLSRLDEL
:
2910 if (dev
->hw_features
& NETIF_F_LRO
)
2911 ret
= mtk_hwlro_del_ipaddr(dev
, cmd
);
2920 static const struct ethtool_ops mtk_ethtool_ops
= {
2921 .get_link_ksettings
= mtk_get_link_ksettings
,
2922 .set_link_ksettings
= mtk_set_link_ksettings
,
2923 .get_drvinfo
= mtk_get_drvinfo
,
2924 .get_msglevel
= mtk_get_msglevel
,
2925 .set_msglevel
= mtk_set_msglevel
,
2926 .nway_reset
= mtk_nway_reset
,
2927 .get_link
= ethtool_op_get_link
,
2928 .get_strings
= mtk_get_strings
,
2929 .get_sset_count
= mtk_get_sset_count
,
2930 .get_ethtool_stats
= mtk_get_ethtool_stats
,
2931 .get_rxnfc
= mtk_get_rxnfc
,
2932 .set_rxnfc
= mtk_set_rxnfc
,
2935 static const struct net_device_ops mtk_netdev_ops
= {
2936 .ndo_init
= mtk_init
,
2937 .ndo_uninit
= mtk_uninit
,
2938 .ndo_open
= mtk_open
,
2939 .ndo_stop
= mtk_stop
,
2940 .ndo_start_xmit
= mtk_start_xmit
,
2941 .ndo_set_mac_address
= mtk_set_mac_address
,
2942 .ndo_validate_addr
= eth_validate_addr
,
2943 .ndo_eth_ioctl
= mtk_do_ioctl
,
2944 .ndo_change_mtu
= mtk_change_mtu
,
2945 .ndo_tx_timeout
= mtk_tx_timeout
,
2946 .ndo_get_stats64
= mtk_get_stats64
,
2947 .ndo_fix_features
= mtk_fix_features
,
2948 .ndo_set_features
= mtk_set_features
,
2949 #ifdef CONFIG_NET_POLL_CONTROLLER
2950 .ndo_poll_controller
= mtk_poll_controller
,
2952 .ndo_setup_tc
= mtk_eth_setup_tc
,
2955 static int mtk_add_mac(struct mtk_eth
*eth
, struct device_node
*np
)
2957 const __be32
*_id
= of_get_property(np
, "reg", NULL
);
2958 phy_interface_t phy_mode
;
2959 struct phylink
*phylink
;
2960 struct mtk_mac
*mac
;
2964 dev_err(eth
->dev
, "missing mac id\n");
2968 id
= be32_to_cpup(_id
);
2969 if (id
>= MTK_MAC_COUNT
) {
2970 dev_err(eth
->dev
, "%d is not a valid mac id\n", id
);
2974 if (eth
->netdev
[id
]) {
2975 dev_err(eth
->dev
, "duplicate mac id found: %d\n", id
);
2979 eth
->netdev
[id
] = alloc_etherdev(sizeof(*mac
));
2980 if (!eth
->netdev
[id
]) {
2981 dev_err(eth
->dev
, "alloc_etherdev failed\n");
2984 mac
= netdev_priv(eth
->netdev
[id
]);
2990 memset(mac
->hwlro_ip
, 0, sizeof(mac
->hwlro_ip
));
2991 mac
->hwlro_ip_cnt
= 0;
2993 mac
->hw_stats
= devm_kzalloc(eth
->dev
,
2994 sizeof(*mac
->hw_stats
),
2996 if (!mac
->hw_stats
) {
2997 dev_err(eth
->dev
, "failed to allocate counter memory\n");
3001 spin_lock_init(&mac
->hw_stats
->stats_lock
);
3002 u64_stats_init(&mac
->hw_stats
->syncp
);
3003 mac
->hw_stats
->reg_offset
= id
* MTK_STAT_OFFSET
;
3005 /* phylink create */
3006 err
= of_get_phy_mode(np
, &phy_mode
);
3008 dev_err(eth
->dev
, "incorrect phy-mode\n");
3012 /* mac config is not set */
3013 mac
->interface
= PHY_INTERFACE_MODE_NA
;
3014 mac
->mode
= MLO_AN_PHY
;
3015 mac
->speed
= SPEED_UNKNOWN
;
3017 mac
->phylink_config
.dev
= ð
->netdev
[id
]->dev
;
3018 mac
->phylink_config
.type
= PHYLINK_NETDEV
;
3020 phylink
= phylink_create(&mac
->phylink_config
,
3021 of_fwnode_handle(mac
->of_node
),
3022 phy_mode
, &mtk_phylink_ops
);
3023 if (IS_ERR(phylink
)) {
3024 err
= PTR_ERR(phylink
);
3028 mac
->phylink
= phylink
;
3030 SET_NETDEV_DEV(eth
->netdev
[id
], eth
->dev
);
3031 eth
->netdev
[id
]->watchdog_timeo
= 5 * HZ
;
3032 eth
->netdev
[id
]->netdev_ops
= &mtk_netdev_ops
;
3033 eth
->netdev
[id
]->base_addr
= (unsigned long)eth
->base
;
3035 eth
->netdev
[id
]->hw_features
= eth
->soc
->hw_features
;
3037 eth
->netdev
[id
]->hw_features
|= NETIF_F_LRO
;
3039 eth
->netdev
[id
]->vlan_features
= eth
->soc
->hw_features
&
3040 ~(NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
);
3041 eth
->netdev
[id
]->features
|= eth
->soc
->hw_features
;
3042 eth
->netdev
[id
]->ethtool_ops
= &mtk_ethtool_ops
;
3044 eth
->netdev
[id
]->irq
= eth
->irq
[0];
3045 eth
->netdev
[id
]->dev
.of_node
= np
;
3047 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
))
3048 eth
->netdev
[id
]->max_mtu
= MTK_MAX_RX_LENGTH
- MTK_RX_ETH_HLEN
;
3050 eth
->netdev
[id
]->max_mtu
= MTK_MAX_RX_LENGTH_2K
- MTK_RX_ETH_HLEN
;
3055 free_netdev(eth
->netdev
[id
]);
3059 static int mtk_probe(struct platform_device
*pdev
)
3061 struct device_node
*mac_np
;
3062 struct mtk_eth
*eth
;
3065 eth
= devm_kzalloc(&pdev
->dev
, sizeof(*eth
), GFP_KERNEL
);
3069 eth
->soc
= of_device_get_match_data(&pdev
->dev
);
3071 eth
->dev
= &pdev
->dev
;
3072 eth
->base
= devm_platform_ioremap_resource(pdev
, 0);
3073 if (IS_ERR(eth
->base
))
3074 return PTR_ERR(eth
->base
);
3076 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_QDMA
)) {
3077 eth
->tx_int_mask_reg
= MTK_QDMA_INT_MASK
;
3078 eth
->tx_int_status_reg
= MTK_QDMA_INT_STATUS
;
3080 eth
->tx_int_mask_reg
= MTK_PDMA_INT_MASK
;
3081 eth
->tx_int_status_reg
= MTK_PDMA_INT_STATUS
;
3084 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
3085 eth
->rx_dma_l4_valid
= RX_DMA_L4_VALID_PDMA
;
3086 eth
->ip_align
= NET_IP_ALIGN
;
3088 eth
->rx_dma_l4_valid
= RX_DMA_L4_VALID
;
3091 spin_lock_init(ð
->page_lock
);
3092 spin_lock_init(ð
->tx_irq_lock
);
3093 spin_lock_init(ð
->rx_irq_lock
);
3094 spin_lock_init(ð
->dim_lock
);
3096 eth
->rx_dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
3097 INIT_WORK(ð
->rx_dim
.work
, mtk_dim_rx
);
3099 eth
->tx_dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
3100 INIT_WORK(ð
->tx_dim
.work
, mtk_dim_tx
);
3102 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
3103 eth
->ethsys
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
3105 if (IS_ERR(eth
->ethsys
)) {
3106 dev_err(&pdev
->dev
, "no ethsys regmap found\n");
3107 return PTR_ERR(eth
->ethsys
);
3111 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_INFRA
)) {
3112 eth
->infra
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
3113 "mediatek,infracfg");
3114 if (IS_ERR(eth
->infra
)) {
3115 dev_err(&pdev
->dev
, "no infracfg regmap found\n");
3116 return PTR_ERR(eth
->infra
);
3120 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SGMII
)) {
3121 eth
->sgmii
= devm_kzalloc(eth
->dev
, sizeof(*eth
->sgmii
),
3126 err
= mtk_sgmii_init(eth
->sgmii
, pdev
->dev
.of_node
,
3127 eth
->soc
->ana_rgc3
);
3133 if (eth
->soc
->required_pctl
) {
3134 eth
->pctl
= syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
3136 if (IS_ERR(eth
->pctl
)) {
3137 dev_err(&pdev
->dev
, "no pctl regmap found\n");
3138 return PTR_ERR(eth
->pctl
);
3142 for (i
= 0; i
< 3; i
++) {
3143 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SHARED_INT
) && i
> 0)
3144 eth
->irq
[i
] = eth
->irq
[0];
3146 eth
->irq
[i
] = platform_get_irq(pdev
, i
);
3147 if (eth
->irq
[i
] < 0) {
3148 dev_err(&pdev
->dev
, "no IRQ%d resource found\n", i
);
3152 for (i
= 0; i
< ARRAY_SIZE(eth
->clks
); i
++) {
3153 eth
->clks
[i
] = devm_clk_get(eth
->dev
,
3154 mtk_clks_source_name
[i
]);
3155 if (IS_ERR(eth
->clks
[i
])) {
3156 if (PTR_ERR(eth
->clks
[i
]) == -EPROBE_DEFER
)
3157 return -EPROBE_DEFER
;
3158 if (eth
->soc
->required_clks
& BIT(i
)) {
3159 dev_err(&pdev
->dev
, "clock %s not found\n",
3160 mtk_clks_source_name
[i
]);
3163 eth
->clks
[i
] = NULL
;
3167 eth
->msg_enable
= netif_msg_init(mtk_msg_level
, MTK_DEFAULT_MSG_ENABLE
);
3168 INIT_WORK(ð
->pending_work
, mtk_pending_work
);
3170 err
= mtk_hw_init(eth
);
3174 eth
->hwlro
= MTK_HAS_CAPS(eth
->soc
->caps
, MTK_HWLRO
);
3176 for_each_child_of_node(pdev
->dev
.of_node
, mac_np
) {
3177 if (!of_device_is_compatible(mac_np
,
3178 "mediatek,eth-mac"))
3181 if (!of_device_is_available(mac_np
))
3184 err
= mtk_add_mac(eth
, mac_np
);
3186 of_node_put(mac_np
);
3191 if (MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SHARED_INT
)) {
3192 err
= devm_request_irq(eth
->dev
, eth
->irq
[0],
3194 dev_name(eth
->dev
), eth
);
3196 err
= devm_request_irq(eth
->dev
, eth
->irq
[1],
3197 mtk_handle_irq_tx
, 0,
3198 dev_name(eth
->dev
), eth
);
3202 err
= devm_request_irq(eth
->dev
, eth
->irq
[2],
3203 mtk_handle_irq_rx
, 0,
3204 dev_name(eth
->dev
), eth
);
3209 /* No MT7628/88 support yet */
3210 if (!MTK_HAS_CAPS(eth
->soc
->caps
, MTK_SOC_MT7628
)) {
3211 err
= mtk_mdio_init(eth
);
3216 if (eth
->soc
->offload_version
) {
3217 err
= mtk_ppe_init(ð
->ppe
, eth
->dev
,
3218 eth
->base
+ MTK_ETH_PPE_BASE
, 2);
3222 err
= mtk_eth_offload_init(eth
);
3227 for (i
= 0; i
< MTK_MAX_DEVS
; i
++) {
3228 if (!eth
->netdev
[i
])
3231 err
= register_netdev(eth
->netdev
[i
]);
3233 dev_err(eth
->dev
, "error bringing up device\n");
3234 goto err_deinit_mdio
;
3236 netif_info(eth
, probe
, eth
->netdev
[i
],
3237 "mediatek frame engine at 0x%08lx, irq %d\n",
3238 eth
->netdev
[i
]->base_addr
, eth
->irq
[0]);
3241 /* we run 2 devices on the same DMA ring so we need a dummy device
3244 init_dummy_netdev(ð
->dummy_dev
);
3245 netif_napi_add(ð
->dummy_dev
, ð
->tx_napi
, mtk_napi_tx
,
3247 netif_napi_add(ð
->dummy_dev
, ð
->rx_napi
, mtk_napi_rx
,
3250 platform_set_drvdata(pdev
, eth
);
3255 mtk_mdio_cleanup(eth
);
3264 static int mtk_remove(struct platform_device
*pdev
)
3266 struct mtk_eth
*eth
= platform_get_drvdata(pdev
);
3267 struct mtk_mac
*mac
;
3270 /* stop all devices to make sure that dma is properly shut down */
3271 for (i
= 0; i
< MTK_MAC_COUNT
; i
++) {
3272 if (!eth
->netdev
[i
])
3274 mtk_stop(eth
->netdev
[i
]);
3275 mac
= netdev_priv(eth
->netdev
[i
]);
3276 phylink_disconnect_phy(mac
->phylink
);
3281 netif_napi_del(ð
->tx_napi
);
3282 netif_napi_del(ð
->rx_napi
);
3284 mtk_mdio_cleanup(eth
);
3289 static const struct mtk_soc_data mt2701_data
= {
3290 .caps
= MT7623_CAPS
| MTK_HWLRO
,
3291 .hw_features
= MTK_HW_FEATURES
,
3292 .required_clks
= MT7623_CLKS_BITMAP
,
3293 .required_pctl
= true,
3296 static const struct mtk_soc_data mt7621_data
= {
3297 .caps
= MT7621_CAPS
,
3298 .hw_features
= MTK_HW_FEATURES
,
3299 .required_clks
= MT7621_CLKS_BITMAP
,
3300 .required_pctl
= false,
3301 .offload_version
= 2,
3304 static const struct mtk_soc_data mt7622_data
= {
3306 .caps
= MT7622_CAPS
| MTK_HWLRO
,
3307 .hw_features
= MTK_HW_FEATURES
,
3308 .required_clks
= MT7622_CLKS_BITMAP
,
3309 .required_pctl
= false,
3310 .offload_version
= 2,
3313 static const struct mtk_soc_data mt7623_data
= {
3314 .caps
= MT7623_CAPS
| MTK_HWLRO
,
3315 .hw_features
= MTK_HW_FEATURES
,
3316 .required_clks
= MT7623_CLKS_BITMAP
,
3317 .required_pctl
= true,
3318 .offload_version
= 2,
3321 static const struct mtk_soc_data mt7629_data
= {
3323 .caps
= MT7629_CAPS
| MTK_HWLRO
,
3324 .hw_features
= MTK_HW_FEATURES
,
3325 .required_clks
= MT7629_CLKS_BITMAP
,
3326 .required_pctl
= false,
3329 static const struct mtk_soc_data rt5350_data
= {
3330 .caps
= MT7628_CAPS
,
3331 .hw_features
= MTK_HW_FEATURES_MT7628
,
3332 .required_clks
= MT7628_CLKS_BITMAP
,
3333 .required_pctl
= false,
3336 const struct of_device_id of_mtk_match
[] = {
3337 { .compatible
= "mediatek,mt2701-eth", .data
= &mt2701_data
},
3338 { .compatible
= "mediatek,mt7621-eth", .data
= &mt7621_data
},
3339 { .compatible
= "mediatek,mt7622-eth", .data
= &mt7622_data
},
3340 { .compatible
= "mediatek,mt7623-eth", .data
= &mt7623_data
},
3341 { .compatible
= "mediatek,mt7629-eth", .data
= &mt7629_data
},
3342 { .compatible
= "ralink,rt5350-eth", .data
= &rt5350_data
},
3345 MODULE_DEVICE_TABLE(of
, of_mtk_match
);
3347 static struct platform_driver mtk_driver
= {
3349 .remove
= mtk_remove
,
3351 .name
= "mtk_soc_eth",
3352 .of_match_table
= of_mtk_match
,
3356 module_platform_driver(mtk_driver
);
3358 MODULE_LICENSE("GPL");
3359 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3360 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");