]>
Commit | Line | Data |
---|---|---|
656e7052 JC |
1 | /* This program is free software; you can redistribute it and/or modify |
2 | * it under the terms of the GNU General Public License as published by | |
3 | * the Free Software Foundation; version 2 of the License | |
4 | * | |
5 | * This program is distributed in the hope that it will be useful, | |
6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
8 | * GNU General Public License for more details. | |
9 | * | |
10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | |
11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | |
12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | |
13 | */ | |
14 | ||
15 | #include <linux/of_device.h> | |
16 | #include <linux/of_mdio.h> | |
17 | #include <linux/of_net.h> | |
18 | #include <linux/mfd/syscon.h> | |
19 | #include <linux/regmap.h> | |
20 | #include <linux/clk.h> | |
26a2ad8a | 21 | #include <linux/pm_runtime.h> |
656e7052 JC |
22 | #include <linux/if_vlan.h> |
23 | #include <linux/reset.h> | |
24 | #include <linux/tcp.h> | |
25 | ||
26 | #include "mtk_eth_soc.h" | |
27 | ||
28 | static int mtk_msg_level = -1; | |
29 | module_param_named(msg_level, mtk_msg_level, int, 0); | |
30 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); | |
31 | ||
32 | #define MTK_ETHTOOL_STAT(x) { #x, \ | |
33 | offsetof(struct mtk_hw_stats, x) / sizeof(u64) } | |
34 | ||
35 | /* strings used by ethtool */ | |
36 | static const struct mtk_ethtool_stats { | |
37 | char str[ETH_GSTRING_LEN]; | |
38 | u32 offset; | |
39 | } mtk_ethtool_stats[] = { | |
40 | MTK_ETHTOOL_STAT(tx_bytes), | |
41 | MTK_ETHTOOL_STAT(tx_packets), | |
42 | MTK_ETHTOOL_STAT(tx_skip), | |
43 | MTK_ETHTOOL_STAT(tx_collisions), | |
44 | MTK_ETHTOOL_STAT(rx_bytes), | |
45 | MTK_ETHTOOL_STAT(rx_packets), | |
46 | MTK_ETHTOOL_STAT(rx_overflow), | |
47 | MTK_ETHTOOL_STAT(rx_fcs_errors), | |
48 | MTK_ETHTOOL_STAT(rx_short_errors), | |
49 | MTK_ETHTOOL_STAT(rx_long_errors), | |
50 | MTK_ETHTOOL_STAT(rx_checksum_errors), | |
51 | MTK_ETHTOOL_STAT(rx_flow_control_packets), | |
52 | }; | |
53 | ||
549e5495 | 54 | static const char * const mtk_clks_source_name[] = { |
f430dea7 | 55 | "ethif", "esw", "gp1", "gp2", "trgpll" |
549e5495 SW |
56 | }; |
57 | ||
656e7052 JC |
58 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) |
59 | { | |
60 | __raw_writel(val, eth->base + reg); | |
61 | } | |
62 | ||
63 | u32 mtk_r32(struct mtk_eth *eth, unsigned reg) | |
64 | { | |
65 | return __raw_readl(eth->base + reg); | |
66 | } | |
67 | ||
68 | static int mtk_mdio_busy_wait(struct mtk_eth *eth) | |
69 | { | |
70 | unsigned long t_start = jiffies; | |
71 | ||
72 | while (1) { | |
73 | if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) | |
74 | return 0; | |
75 | if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) | |
76 | break; | |
77 | usleep_range(10, 20); | |
78 | } | |
79 | ||
80 | dev_err(eth->dev, "mdio: MDIO timeout\n"); | |
81 | return -1; | |
82 | } | |
83 | ||
379672de WY |
84 | static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, |
85 | u32 phy_register, u32 write_data) | |
656e7052 JC |
86 | { |
87 | if (mtk_mdio_busy_wait(eth)) | |
88 | return -1; | |
89 | ||
90 | write_data &= 0xffff; | |
91 | ||
92 | mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | | |
93 | (phy_register << PHY_IAC_REG_SHIFT) | | |
94 | (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, | |
95 | MTK_PHY_IAC); | |
96 | ||
97 | if (mtk_mdio_busy_wait(eth)) | |
98 | return -1; | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
379672de | 103 | static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) |
656e7052 JC |
104 | { |
105 | u32 d; | |
106 | ||
107 | if (mtk_mdio_busy_wait(eth)) | |
108 | return 0xffff; | |
109 | ||
110 | mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | | |
111 | (phy_reg << PHY_IAC_REG_SHIFT) | | |
112 | (phy_addr << PHY_IAC_ADDR_SHIFT), | |
113 | MTK_PHY_IAC); | |
114 | ||
115 | if (mtk_mdio_busy_wait(eth)) | |
116 | return 0xffff; | |
117 | ||
118 | d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; | |
119 | ||
120 | return d; | |
121 | } | |
122 | ||
123 | static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, | |
124 | int phy_reg, u16 val) | |
125 | { | |
126 | struct mtk_eth *eth = bus->priv; | |
127 | ||
128 | return _mtk_mdio_write(eth, phy_addr, phy_reg, val); | |
129 | } | |
130 | ||
131 | static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |
132 | { | |
133 | struct mtk_eth *eth = bus->priv; | |
134 | ||
135 | return _mtk_mdio_read(eth, phy_addr, phy_reg); | |
136 | } | |
137 | ||
f430dea7 SW |
138 | static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) |
139 | { | |
140 | u32 val; | |
141 | int ret; | |
142 | ||
143 | val = (speed == SPEED_1000) ? | |
144 | INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; | |
145 | mtk_w32(eth, val, INTF_MODE); | |
146 | ||
147 | regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, | |
148 | ETHSYS_TRGMII_CLK_SEL362_5, | |
149 | ETHSYS_TRGMII_CLK_SEL362_5); | |
150 | ||
151 | val = (speed == SPEED_1000) ? 250000000 : 500000000; | |
152 | ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); | |
153 | if (ret) | |
154 | dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); | |
155 | ||
156 | val = (speed == SPEED_1000) ? | |
157 | RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100; | |
158 | mtk_w32(eth, val, TRGMII_RCK_CTRL); | |
159 | ||
160 | val = (speed == SPEED_1000) ? | |
161 | TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100; | |
162 | mtk_w32(eth, val, TRGMII_TCK_CTRL); | |
163 | } | |
164 | ||
656e7052 JC |
165 | static void mtk_phy_link_adjust(struct net_device *dev) |
166 | { | |
167 | struct mtk_mac *mac = netdev_priv(dev); | |
08ef55c6 JC |
168 | u16 lcl_adv = 0, rmt_adv = 0; |
169 | u8 flowctrl; | |
656e7052 JC |
170 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | |
171 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | |
172 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | |
173 | MAC_MCR_BACKPR_EN; | |
174 | ||
dce6fa42 SW |
175 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
176 | return; | |
177 | ||
2364c5c5 | 178 | switch (dev->phydev->speed) { |
656e7052 JC |
179 | case SPEED_1000: |
180 | mcr |= MAC_MCR_SPEED_1000; | |
181 | break; | |
182 | case SPEED_100: | |
183 | mcr |= MAC_MCR_SPEED_100; | |
184 | break; | |
185 | }; | |
186 | ||
f430dea7 | 187 | if (mac->id == 0 && !mac->trgmii) |
2364c5c5 | 188 | mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed); |
f430dea7 | 189 | |
2364c5c5 | 190 | if (dev->phydev->link) |
656e7052 JC |
191 | mcr |= MAC_MCR_FORCE_LINK; |
192 | ||
2364c5c5 | 193 | if (dev->phydev->duplex) { |
656e7052 JC |
194 | mcr |= MAC_MCR_FORCE_DPX; |
195 | ||
2364c5c5 | 196 | if (dev->phydev->pause) |
08ef55c6 | 197 | rmt_adv = LPA_PAUSE_CAP; |
2364c5c5 | 198 | if (dev->phydev->asym_pause) |
08ef55c6 JC |
199 | rmt_adv |= LPA_PAUSE_ASYM; |
200 | ||
2364c5c5 | 201 | if (dev->phydev->advertising & ADVERTISED_Pause) |
08ef55c6 | 202 | lcl_adv |= ADVERTISE_PAUSE_CAP; |
2364c5c5 | 203 | if (dev->phydev->advertising & ADVERTISED_Asym_Pause) |
08ef55c6 JC |
204 | lcl_adv |= ADVERTISE_PAUSE_ASYM; |
205 | ||
206 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | |
207 | ||
208 | if (flowctrl & FLOW_CTRL_TX) | |
209 | mcr |= MAC_MCR_FORCE_TX_FC; | |
210 | if (flowctrl & FLOW_CTRL_RX) | |
211 | mcr |= MAC_MCR_FORCE_RX_FC; | |
212 | ||
213 | netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", | |
214 | flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", | |
215 | flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); | |
216 | } | |
656e7052 JC |
217 | |
218 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | |
219 | ||
2364c5c5 | 220 | if (dev->phydev->link) |
656e7052 JC |
221 | netif_carrier_on(dev); |
222 | else | |
223 | netif_carrier_off(dev); | |
224 | } | |
225 | ||
226 | static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, | |
227 | struct device_node *phy_node) | |
228 | { | |
656e7052 | 229 | struct phy_device *phydev; |
a2b2a19f | 230 | int phy_mode; |
656e7052 | 231 | |
656e7052 JC |
232 | phy_mode = of_get_phy_mode(phy_node); |
233 | if (phy_mode < 0) { | |
234 | dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); | |
235 | return -EINVAL; | |
236 | } | |
237 | ||
238 | phydev = of_phy_connect(eth->netdev[mac->id], phy_node, | |
239 | mtk_phy_link_adjust, 0, phy_mode); | |
977bc20c | 240 | if (!phydev) { |
656e7052 | 241 | dev_err(eth->dev, "could not connect to PHY\n"); |
977bc20c | 242 | return -ENODEV; |
656e7052 JC |
243 | } |
244 | ||
245 | dev_info(eth->dev, | |
246 | "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", | |
247 | mac->id, phydev_name(phydev), phydev->phy_id, | |
248 | phydev->drv->name); | |
249 | ||
656e7052 JC |
250 | return 0; |
251 | } | |
252 | ||
2364c5c5 | 253 | static int mtk_phy_connect(struct net_device *dev) |
656e7052 | 254 | { |
2364c5c5 SW |
255 | struct mtk_mac *mac = netdev_priv(dev); |
256 | struct mtk_eth *eth; | |
656e7052 | 257 | struct device_node *np; |
9ea4d311 | 258 | u32 val; |
656e7052 | 259 | |
2364c5c5 | 260 | eth = mac->hw; |
656e7052 | 261 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); |
0c72c50f JC |
262 | if (!np && of_phy_is_fixed_link(mac->of_node)) |
263 | if (!of_phy_register_fixed_link(mac->of_node)) | |
264 | np = of_node_get(mac->of_node); | |
656e7052 JC |
265 | if (!np) |
266 | return -ENODEV; | |
267 | ||
268 | switch (of_get_phy_mode(np)) { | |
572de608 SW |
269 | case PHY_INTERFACE_MODE_TRGMII: |
270 | mac->trgmii = true; | |
37920fce JC |
271 | case PHY_INTERFACE_MODE_RGMII_TXID: |
272 | case PHY_INTERFACE_MODE_RGMII_RXID: | |
273 | case PHY_INTERFACE_MODE_RGMII_ID: | |
656e7052 | 274 | case PHY_INTERFACE_MODE_RGMII: |
9ea4d311 | 275 | mac->ge_mode = 0; |
656e7052 JC |
276 | break; |
277 | case PHY_INTERFACE_MODE_MII: | |
9ea4d311 | 278 | mac->ge_mode = 1; |
656e7052 | 279 | break; |
8ca7f4fe | 280 | case PHY_INTERFACE_MODE_REVMII: |
9ea4d311 | 281 | mac->ge_mode = 2; |
656e7052 | 282 | break; |
8ca7f4fe | 283 | case PHY_INTERFACE_MODE_RMII: |
284 | if (!mac->id) | |
285 | goto err_phy; | |
9ea4d311 | 286 | mac->ge_mode = 3; |
8ca7f4fe | 287 | break; |
656e7052 | 288 | default: |
8ca7f4fe | 289 | goto err_phy; |
656e7052 JC |
290 | } |
291 | ||
292 | /* put the gmac into the right mode */ | |
293 | regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); | |
294 | val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); | |
9ea4d311 | 295 | val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); |
656e7052 JC |
296 | regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); |
297 | ||
2364c5c5 | 298 | /* couple phydev to net_device */ |
f6f7d9c0 SW |
299 | if (mtk_phy_connect_node(eth, mac, np)) |
300 | goto err_phy; | |
301 | ||
2364c5c5 SW |
302 | dev->phydev->autoneg = AUTONEG_ENABLE; |
303 | dev->phydev->speed = 0; | |
304 | dev->phydev->duplex = 0; | |
b2025c7c | 305 | |
306 | if (of_phy_is_fixed_link(mac->of_node)) | |
2364c5c5 | 307 | dev->phydev->supported |= |
b2025c7c | 308 | SUPPORTED_Pause | SUPPORTED_Asym_Pause; |
309 | ||
2364c5c5 | 310 | dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
08ef55c6 | 311 | SUPPORTED_Asym_Pause; |
2364c5c5 | 312 | dev->phydev->advertising = dev->phydev->supported | |
656e7052 | 313 | ADVERTISED_Autoneg; |
2364c5c5 | 314 | phy_start_aneg(dev->phydev); |
656e7052 | 315 | |
e8c2993a | 316 | of_node_put(np); |
317 | ||
656e7052 | 318 | return 0; |
8ca7f4fe | 319 | |
320 | err_phy: | |
16a67eb3 JH |
321 | if (of_phy_is_fixed_link(mac->of_node)) |
322 | of_phy_deregister_fixed_link(mac->of_node); | |
8ca7f4fe | 323 | of_node_put(np); |
f6f7d9c0 | 324 | dev_err(eth->dev, "%s: invalid phy\n", __func__); |
8ca7f4fe | 325 | return -EINVAL; |
656e7052 JC |
326 | } |
327 | ||
328 | static int mtk_mdio_init(struct mtk_eth *eth) | |
329 | { | |
330 | struct device_node *mii_np; | |
1e515b7f | 331 | int ret; |
656e7052 JC |
332 | |
333 | mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); | |
334 | if (!mii_np) { | |
335 | dev_err(eth->dev, "no %s child node found", "mdio-bus"); | |
336 | return -ENODEV; | |
337 | } | |
338 | ||
339 | if (!of_device_is_available(mii_np)) { | |
aa6e8a54 | 340 | ret = -ENODEV; |
656e7052 JC |
341 | goto err_put_node; |
342 | } | |
343 | ||
1e515b7f | 344 | eth->mii_bus = devm_mdiobus_alloc(eth->dev); |
656e7052 | 345 | if (!eth->mii_bus) { |
1e515b7f | 346 | ret = -ENOMEM; |
656e7052 JC |
347 | goto err_put_node; |
348 | } | |
349 | ||
350 | eth->mii_bus->name = "mdio"; | |
351 | eth->mii_bus->read = mtk_mdio_read; | |
352 | eth->mii_bus->write = mtk_mdio_write; | |
353 | eth->mii_bus->priv = eth; | |
354 | eth->mii_bus->parent = eth->dev; | |
355 | ||
356 | snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); | |
1e515b7f | 357 | ret = of_mdiobus_register(eth->mii_bus, mii_np); |
656e7052 JC |
358 | |
359 | err_put_node: | |
360 | of_node_put(mii_np); | |
1e515b7f | 361 | return ret; |
656e7052 JC |
362 | } |
363 | ||
364 | static void mtk_mdio_cleanup(struct mtk_eth *eth) | |
365 | { | |
366 | if (!eth->mii_bus) | |
367 | return; | |
368 | ||
369 | mdiobus_unregister(eth->mii_bus); | |
656e7052 JC |
370 | } |
371 | ||
bacfd110 NC |
372 | static inline void mtk_irq_disable(struct mtk_eth *eth, |
373 | unsigned reg, u32 mask) | |
656e7052 | 374 | { |
7bc9ccec | 375 | unsigned long flags; |
656e7052 JC |
376 | u32 val; |
377 | ||
7bc9ccec | 378 | spin_lock_irqsave(ð->irq_lock, flags); |
bacfd110 NC |
379 | val = mtk_r32(eth, reg); |
380 | mtk_w32(eth, val & ~mask, reg); | |
7bc9ccec | 381 | spin_unlock_irqrestore(ð->irq_lock, flags); |
656e7052 JC |
382 | } |
383 | ||
bacfd110 NC |
384 | static inline void mtk_irq_enable(struct mtk_eth *eth, |
385 | unsigned reg, u32 mask) | |
656e7052 | 386 | { |
7bc9ccec | 387 | unsigned long flags; |
656e7052 JC |
388 | u32 val; |
389 | ||
7bc9ccec | 390 | spin_lock_irqsave(ð->irq_lock, flags); |
bacfd110 NC |
391 | val = mtk_r32(eth, reg); |
392 | mtk_w32(eth, val | mask, reg); | |
7bc9ccec | 393 | spin_unlock_irqrestore(ð->irq_lock, flags); |
656e7052 JC |
394 | } |
395 | ||
396 | static int mtk_set_mac_address(struct net_device *dev, void *p) | |
397 | { | |
398 | int ret = eth_mac_addr(dev, p); | |
399 | struct mtk_mac *mac = netdev_priv(dev); | |
400 | const char *macaddr = dev->dev_addr; | |
656e7052 JC |
401 | |
402 | if (ret) | |
403 | return ret; | |
404 | ||
dce6fa42 SW |
405 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
406 | return -EBUSY; | |
407 | ||
e3e9652a | 408 | spin_lock_bh(&mac->hw->page_lock); |
656e7052 JC |
409 | mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], |
410 | MTK_GDMA_MAC_ADRH(mac->id)); | |
411 | mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | | |
412 | (macaddr[4] << 8) | macaddr[5], | |
413 | MTK_GDMA_MAC_ADRL(mac->id)); | |
e3e9652a | 414 | spin_unlock_bh(&mac->hw->page_lock); |
656e7052 JC |
415 | |
416 | return 0; | |
417 | } | |
418 | ||
419 | void mtk_stats_update_mac(struct mtk_mac *mac) | |
420 | { | |
421 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
422 | unsigned int base = MTK_GDM1_TX_GBCNT; | |
423 | u64 stats; | |
424 | ||
425 | base += hw_stats->reg_offset; | |
426 | ||
427 | u64_stats_update_begin(&hw_stats->syncp); | |
428 | ||
429 | hw_stats->rx_bytes += mtk_r32(mac->hw, base); | |
430 | stats = mtk_r32(mac->hw, base + 0x04); | |
431 | if (stats) | |
432 | hw_stats->rx_bytes += (stats << 32); | |
433 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); | |
434 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); | |
435 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); | |
436 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); | |
437 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); | |
438 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); | |
439 | hw_stats->rx_flow_control_packets += | |
440 | mtk_r32(mac->hw, base + 0x24); | |
441 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); | |
442 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); | |
443 | hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); | |
444 | stats = mtk_r32(mac->hw, base + 0x34); | |
445 | if (stats) | |
446 | hw_stats->tx_bytes += (stats << 32); | |
447 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); | |
448 | u64_stats_update_end(&hw_stats->syncp); | |
449 | } | |
450 | ||
451 | static void mtk_stats_update(struct mtk_eth *eth) | |
452 | { | |
453 | int i; | |
454 | ||
455 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
456 | if (!eth->mac[i] || !eth->mac[i]->hw_stats) | |
457 | continue; | |
458 | if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { | |
459 | mtk_stats_update_mac(eth->mac[i]); | |
460 | spin_unlock(ð->mac[i]->hw_stats->stats_lock); | |
461 | } | |
462 | } | |
463 | } | |
464 | ||
465 | static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, | |
466 | struct rtnl_link_stats64 *storage) | |
467 | { | |
468 | struct mtk_mac *mac = netdev_priv(dev); | |
469 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
470 | unsigned int start; | |
471 | ||
472 | if (netif_running(dev) && netif_device_present(dev)) { | |
473 | if (spin_trylock(&hw_stats->stats_lock)) { | |
474 | mtk_stats_update_mac(mac); | |
475 | spin_unlock(&hw_stats->stats_lock); | |
476 | } | |
477 | } | |
478 | ||
479 | do { | |
480 | start = u64_stats_fetch_begin_irq(&hw_stats->syncp); | |
481 | storage->rx_packets = hw_stats->rx_packets; | |
482 | storage->tx_packets = hw_stats->tx_packets; | |
483 | storage->rx_bytes = hw_stats->rx_bytes; | |
484 | storage->tx_bytes = hw_stats->tx_bytes; | |
485 | storage->collisions = hw_stats->tx_collisions; | |
486 | storage->rx_length_errors = hw_stats->rx_short_errors + | |
487 | hw_stats->rx_long_errors; | |
488 | storage->rx_over_errors = hw_stats->rx_overflow; | |
489 | storage->rx_crc_errors = hw_stats->rx_fcs_errors; | |
490 | storage->rx_errors = hw_stats->rx_checksum_errors; | |
491 | storage->tx_aborted_errors = hw_stats->tx_skip; | |
492 | } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); | |
493 | ||
494 | storage->tx_errors = dev->stats.tx_errors; | |
495 | storage->rx_dropped = dev->stats.rx_dropped; | |
496 | storage->tx_dropped = dev->stats.tx_dropped; | |
497 | ||
498 | return storage; | |
499 | } | |
500 | ||
501 | static inline int mtk_max_frag_size(int mtu) | |
502 | { | |
503 | /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ | |
504 | if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) | |
505 | mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; | |
506 | ||
507 | return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + | |
508 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
509 | } | |
510 | ||
511 | static inline int mtk_max_buf_size(int frag_size) | |
512 | { | |
513 | int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - | |
514 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
515 | ||
516 | WARN_ON(buf_size < MTK_MAX_RX_LENGTH); | |
517 | ||
518 | return buf_size; | |
519 | } | |
520 | ||
521 | static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, | |
522 | struct mtk_rx_dma *dma_rxd) | |
523 | { | |
524 | rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); | |
525 | rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); | |
526 | rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); | |
527 | rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); | |
528 | } | |
529 | ||
530 | /* the qdma core needs scratch memory to be setup */ | |
531 | static int mtk_init_fq_dma(struct mtk_eth *eth) | |
532 | { | |
605e4fe4 | 533 | dma_addr_t phy_ring_tail; |
656e7052 JC |
534 | int cnt = MTK_DMA_SIZE; |
535 | dma_addr_t dma_addr; | |
536 | int i; | |
537 | ||
538 | eth->scratch_ring = dma_alloc_coherent(eth->dev, | |
539 | cnt * sizeof(struct mtk_tx_dma), | |
605e4fe4 | 540 | ð->phy_scratch_ring, |
656e7052 JC |
541 | GFP_ATOMIC | __GFP_ZERO); |
542 | if (unlikely(!eth->scratch_ring)) | |
543 | return -ENOMEM; | |
544 | ||
545 | eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, | |
546 | GFP_KERNEL); | |
562c5a70 JC |
547 | if (unlikely(!eth->scratch_head)) |
548 | return -ENOMEM; | |
549 | ||
656e7052 JC |
550 | dma_addr = dma_map_single(eth->dev, |
551 | eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, | |
552 | DMA_FROM_DEVICE); | |
553 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
554 | return -ENOMEM; | |
555 | ||
556 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); | |
605e4fe4 | 557 | phy_ring_tail = eth->phy_scratch_ring + |
656e7052 JC |
558 | (sizeof(struct mtk_tx_dma) * (cnt - 1)); |
559 | ||
560 | for (i = 0; i < cnt; i++) { | |
561 | eth->scratch_ring[i].txd1 = | |
562 | (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); | |
563 | if (i < cnt - 1) | |
605e4fe4 | 564 | eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + |
656e7052 JC |
565 | ((i + 1) * sizeof(struct mtk_tx_dma))); |
566 | eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); | |
567 | } | |
568 | ||
605e4fe4 | 569 | mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); |
656e7052 JC |
570 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); |
571 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); | |
572 | mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
577 | static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) | |
578 | { | |
579 | void *ret = ring->dma; | |
580 | ||
581 | return ret + (desc - ring->phys); | |
582 | } | |
583 | ||
584 | static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |
585 | struct mtk_tx_dma *txd) | |
586 | { | |
587 | int idx = txd - ring->dma; | |
588 | ||
589 | return &ring->buf[idx]; | |
590 | } | |
591 | ||
55a4e778 | 592 | static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) |
656e7052 JC |
593 | { |
594 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | |
55a4e778 | 595 | dma_unmap_single(eth->dev, |
656e7052 JC |
596 | dma_unmap_addr(tx_buf, dma_addr0), |
597 | dma_unmap_len(tx_buf, dma_len0), | |
598 | DMA_TO_DEVICE); | |
599 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | |
55a4e778 | 600 | dma_unmap_page(eth->dev, |
656e7052 JC |
601 | dma_unmap_addr(tx_buf, dma_addr0), |
602 | dma_unmap_len(tx_buf, dma_len0), | |
603 | DMA_TO_DEVICE); | |
604 | } | |
605 | tx_buf->flags = 0; | |
606 | if (tx_buf->skb && | |
607 | (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) | |
608 | dev_kfree_skb_any(tx_buf->skb); | |
609 | tx_buf->skb = NULL; | |
610 | } | |
611 | ||
612 | static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |
613 | int tx_num, struct mtk_tx_ring *ring, bool gso) | |
614 | { | |
615 | struct mtk_mac *mac = netdev_priv(dev); | |
616 | struct mtk_eth *eth = mac->hw; | |
617 | struct mtk_tx_dma *itxd, *txd; | |
618 | struct mtk_tx_buf *tx_buf; | |
656e7052 JC |
619 | dma_addr_t mapped_addr; |
620 | unsigned int nr_frags; | |
621 | int i, n_desc = 1; | |
c6f1dc4d | 622 | u32 txd4 = 0, fport; |
656e7052 JC |
623 | |
624 | itxd = ring->next_free; | |
625 | if (itxd == ring->last_free) | |
626 | return -ENOMEM; | |
627 | ||
628 | /* set the forward port */ | |
c6f1dc4d SW |
629 | fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; |
630 | txd4 |= fport; | |
656e7052 JC |
631 | |
632 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | |
633 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
634 | ||
635 | if (gso) | |
636 | txd4 |= TX_DMA_TSO; | |
637 | ||
638 | /* TX Checksum offload */ | |
639 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
640 | txd4 |= TX_DMA_CHKSUM; | |
641 | ||
642 | /* VLAN header offload */ | |
643 | if (skb_vlan_tag_present(skb)) | |
644 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | |
645 | ||
55a4e778 | 646 | mapped_addr = dma_map_single(eth->dev, skb->data, |
656e7052 | 647 | skb_headlen(skb), DMA_TO_DEVICE); |
55a4e778 | 648 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
656e7052 JC |
649 | return -ENOMEM; |
650 | ||
656e7052 JC |
651 | WRITE_ONCE(itxd->txd1, mapped_addr); |
652 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | |
653 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
654 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | |
655 | ||
656 | /* TX SG offload */ | |
657 | txd = itxd; | |
658 | nr_frags = skb_shinfo(skb)->nr_frags; | |
659 | for (i = 0; i < nr_frags; i++) { | |
660 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | |
661 | unsigned int offset = 0; | |
662 | int frag_size = skb_frag_size(frag); | |
663 | ||
664 | while (frag_size) { | |
665 | bool last_frag = false; | |
666 | unsigned int frag_map_size; | |
667 | ||
668 | txd = mtk_qdma_phys_to_virt(ring, txd->txd2); | |
669 | if (txd == ring->last_free) | |
670 | goto err_dma; | |
671 | ||
672 | n_desc++; | |
673 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | |
55a4e778 | 674 | mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, |
656e7052 JC |
675 | frag_map_size, |
676 | DMA_TO_DEVICE); | |
55a4e778 | 677 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
656e7052 JC |
678 | goto err_dma; |
679 | ||
680 | if (i == nr_frags - 1 && | |
681 | (frag_size - frag_map_size) == 0) | |
682 | last_frag = true; | |
683 | ||
684 | WRITE_ONCE(txd->txd1, mapped_addr); | |
685 | WRITE_ONCE(txd->txd3, (TX_DMA_SWC | | |
686 | TX_DMA_PLEN0(frag_map_size) | | |
369f0453 | 687 | last_frag * TX_DMA_LS0)); |
c6f1dc4d | 688 | WRITE_ONCE(txd->txd4, fport); |
656e7052 JC |
689 | |
690 | tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; | |
691 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | |
692 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
693 | ||
694 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | |
695 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
696 | dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); | |
697 | frag_size -= frag_map_size; | |
698 | offset += frag_map_size; | |
699 | } | |
700 | } | |
701 | ||
702 | /* store skb to cleanup */ | |
703 | tx_buf->skb = skb; | |
704 | ||
705 | WRITE_ONCE(itxd->txd4, txd4); | |
706 | WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | | |
707 | (!nr_frags * TX_DMA_LS0))); | |
708 | ||
656e7052 JC |
709 | netdev_sent_queue(dev, skb->len); |
710 | skb_tx_timestamp(skb); | |
711 | ||
712 | ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); | |
713 | atomic_sub(n_desc, &ring->free_count); | |
714 | ||
715 | /* make sure that all changes to the dma ring are flushed before we | |
716 | * continue | |
717 | */ | |
718 | wmb(); | |
719 | ||
720 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | |
721 | mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); | |
722 | ||
723 | return 0; | |
724 | ||
725 | err_dma: | |
726 | do { | |
2fae723c | 727 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
656e7052 JC |
728 | |
729 | /* unmap dma */ | |
55a4e778 | 730 | mtk_tx_unmap(eth, tx_buf); |
656e7052 JC |
731 | |
732 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | |
733 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | |
734 | } while (itxd != txd); | |
735 | ||
736 | return -ENOMEM; | |
737 | } | |
738 | ||
739 | static inline int mtk_cal_txd_req(struct sk_buff *skb) | |
740 | { | |
741 | int i, nfrags; | |
742 | struct skb_frag_struct *frag; | |
743 | ||
744 | nfrags = 1; | |
745 | if (skb_is_gso(skb)) { | |
746 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
747 | frag = &skb_shinfo(skb)->frags[i]; | |
748 | nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); | |
749 | } | |
750 | } else { | |
751 | nfrags += skb_shinfo(skb)->nr_frags; | |
752 | } | |
753 | ||
beeb4ca4 | 754 | return nfrags; |
656e7052 JC |
755 | } |
756 | ||
ad3cba98 JC |
757 | static int mtk_queue_stopped(struct mtk_eth *eth) |
758 | { | |
759 | int i; | |
760 | ||
761 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
762 | if (!eth->netdev[i]) | |
763 | continue; | |
764 | if (netif_queue_stopped(eth->netdev[i])) | |
765 | return 1; | |
766 | } | |
767 | ||
768 | return 0; | |
769 | } | |
770 | ||
13c822f6 JC |
771 | static void mtk_wake_queue(struct mtk_eth *eth) |
772 | { | |
773 | int i; | |
774 | ||
775 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
776 | if (!eth->netdev[i]) | |
777 | continue; | |
778 | netif_wake_queue(eth->netdev[i]); | |
779 | } | |
780 | } | |
781 | ||
782 | static void mtk_stop_queue(struct mtk_eth *eth) | |
783 | { | |
784 | int i; | |
785 | ||
786 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
787 | if (!eth->netdev[i]) | |
788 | continue; | |
789 | netif_stop_queue(eth->netdev[i]); | |
790 | } | |
791 | } | |
792 | ||
656e7052 JC |
793 | static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) |
794 | { | |
795 | struct mtk_mac *mac = netdev_priv(dev); | |
796 | struct mtk_eth *eth = mac->hw; | |
797 | struct mtk_tx_ring *ring = ð->tx_ring; | |
798 | struct net_device_stats *stats = &dev->stats; | |
799 | bool gso = false; | |
800 | int tx_num; | |
801 | ||
34c2e4c9 JC |
802 | /* normally we can rely on the stack not calling this more than once, |
803 | * however we have 2 queues running on the same ring so we need to lock | |
804 | * the ring access | |
805 | */ | |
e3e9652a | 806 | spin_lock(ð->page_lock); |
34c2e4c9 | 807 | |
dce6fa42 SW |
808 | if (unlikely(test_bit(MTK_RESETTING, ð->state))) |
809 | goto drop; | |
810 | ||
656e7052 JC |
811 | tx_num = mtk_cal_txd_req(skb); |
812 | if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { | |
13c822f6 | 813 | mtk_stop_queue(eth); |
656e7052 JC |
814 | netif_err(eth, tx_queued, dev, |
815 | "Tx Ring full when queue awake!\n"); | |
e3e9652a | 816 | spin_unlock(ð->page_lock); |
656e7052 JC |
817 | return NETDEV_TX_BUSY; |
818 | } | |
819 | ||
820 | /* TSO: fill MSS info in tcp checksum field */ | |
821 | if (skb_is_gso(skb)) { | |
822 | if (skb_cow_head(skb, 0)) { | |
823 | netif_warn(eth, tx_err, dev, | |
824 | "GSO expand head fail.\n"); | |
825 | goto drop; | |
826 | } | |
827 | ||
828 | if (skb_shinfo(skb)->gso_type & | |
829 | (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | |
830 | gso = true; | |
831 | tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); | |
832 | } | |
833 | } | |
834 | ||
835 | if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) | |
836 | goto drop; | |
837 | ||
82c6544d | 838 | if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) |
13c822f6 | 839 | mtk_stop_queue(eth); |
82c6544d | 840 | |
e3e9652a | 841 | spin_unlock(ð->page_lock); |
656e7052 JC |
842 | |
843 | return NETDEV_TX_OK; | |
844 | ||
845 | drop: | |
e3e9652a | 846 | spin_unlock(ð->page_lock); |
656e7052 JC |
847 | stats->tx_dropped++; |
848 | dev_kfree_skb(skb); | |
849 | return NETDEV_TX_OK; | |
850 | } | |
851 | ||
ee406810 NC |
852 | static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) |
853 | { | |
854 | int i; | |
855 | struct mtk_rx_ring *ring; | |
856 | int idx; | |
857 | ||
858 | if (!eth->hwlro) | |
859 | return ð->rx_ring[0]; | |
860 | ||
861 | for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { | |
862 | ring = ð->rx_ring[i]; | |
863 | idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); | |
864 | if (ring->dma[idx].rxd2 & RX_DMA_DONE) { | |
865 | ring->calc_idx_update = true; | |
866 | return ring; | |
867 | } | |
868 | } | |
869 | ||
870 | return NULL; | |
871 | } | |
872 | ||
873 | static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) | |
874 | { | |
875 | struct mtk_rx_ring *ring; | |
876 | int i; | |
877 | ||
878 | if (!eth->hwlro) { | |
879 | ring = ð->rx_ring[0]; | |
880 | mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); | |
881 | } else { | |
882 | for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { | |
883 | ring = ð->rx_ring[i]; | |
884 | if (ring->calc_idx_update) { | |
885 | ring->calc_idx_update = false; | |
886 | mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); | |
887 | } | |
888 | } | |
889 | } | |
890 | } | |
891 | ||
656e7052 | 892 | static int mtk_poll_rx(struct napi_struct *napi, int budget, |
eece71e8 | 893 | struct mtk_eth *eth) |
656e7052 | 894 | { |
ee406810 NC |
895 | struct mtk_rx_ring *ring; |
896 | int idx; | |
656e7052 JC |
897 | struct sk_buff *skb; |
898 | u8 *data, *new_data; | |
899 | struct mtk_rx_dma *rxd, trxd; | |
900 | int done = 0; | |
901 | ||
902 | while (done < budget) { | |
903 | struct net_device *netdev; | |
904 | unsigned int pktlen; | |
905 | dma_addr_t dma_addr; | |
906 | int mac = 0; | |
907 | ||
ee406810 NC |
908 | ring = mtk_get_rx_ring(eth); |
909 | if (unlikely(!ring)) | |
910 | goto rx_done; | |
911 | ||
912 | idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); | |
656e7052 JC |
913 | rxd = &ring->dma[idx]; |
914 | data = ring->data[idx]; | |
915 | ||
916 | mtk_rx_get_desc(&trxd, rxd); | |
917 | if (!(trxd.rxd2 & RX_DMA_DONE)) | |
918 | break; | |
919 | ||
920 | /* find out which mac the packet come from. values start at 1 */ | |
921 | mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & | |
922 | RX_DMA_FPORT_MASK; | |
923 | mac--; | |
924 | ||
925 | netdev = eth->netdev[mac]; | |
926 | ||
dce6fa42 SW |
927 | if (unlikely(test_bit(MTK_RESETTING, ð->state))) |
928 | goto release_desc; | |
929 | ||
656e7052 JC |
930 | /* alloc new buffer */ |
931 | new_data = napi_alloc_frag(ring->frag_size); | |
932 | if (unlikely(!new_data)) { | |
933 | netdev->stats.rx_dropped++; | |
934 | goto release_desc; | |
935 | } | |
55a4e778 | 936 | dma_addr = dma_map_single(eth->dev, |
656e7052 JC |
937 | new_data + NET_SKB_PAD, |
938 | ring->buf_size, | |
939 | DMA_FROM_DEVICE); | |
55a4e778 | 940 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
656e7052 | 941 | skb_free_frag(new_data); |
94321a9f | 942 | netdev->stats.rx_dropped++; |
656e7052 JC |
943 | goto release_desc; |
944 | } | |
945 | ||
946 | /* receive data */ | |
947 | skb = build_skb(data, ring->frag_size); | |
948 | if (unlikely(!skb)) { | |
1b430799 | 949 | skb_free_frag(new_data); |
94321a9f | 950 | netdev->stats.rx_dropped++; |
656e7052 JC |
951 | goto release_desc; |
952 | } | |
953 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | |
954 | ||
55a4e778 | 955 | dma_unmap_single(eth->dev, trxd.rxd1, |
656e7052 JC |
956 | ring->buf_size, DMA_FROM_DEVICE); |
957 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | |
958 | skb->dev = netdev; | |
959 | skb_put(skb, pktlen); | |
960 | if (trxd.rxd4 & RX_DMA_L4_VALID) | |
961 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
962 | else | |
963 | skb_checksum_none_assert(skb); | |
964 | skb->protocol = eth_type_trans(skb, netdev); | |
965 | ||
966 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && | |
967 | RX_DMA_VID(trxd.rxd3)) | |
968 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
969 | RX_DMA_VID(trxd.rxd3)); | |
970 | napi_gro_receive(napi, skb); | |
971 | ||
972 | ring->data[idx] = new_data; | |
973 | rxd->rxd1 = (unsigned int)dma_addr; | |
974 | ||
975 | release_desc: | |
976 | rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); | |
977 | ||
978 | ring->calc_idx = idx; | |
635372ad | 979 | |
656e7052 JC |
980 | done++; |
981 | } | |
982 | ||
ee406810 | 983 | rx_done: |
41156cea SW |
984 | if (done) { |
985 | /* make sure that all changes to the dma ring are flushed before | |
986 | * we continue | |
987 | */ | |
988 | wmb(); | |
ee406810 | 989 | mtk_update_rx_cpu_idx(eth); |
41156cea | 990 | } |
656e7052 JC |
991 | |
992 | return done; | |
993 | } | |
994 | ||
80673029 | 995 | static int mtk_poll_tx(struct mtk_eth *eth, int budget) |
656e7052 JC |
996 | { |
997 | struct mtk_tx_ring *ring = ð->tx_ring; | |
998 | struct mtk_tx_dma *desc; | |
999 | struct sk_buff *skb; | |
1000 | struct mtk_tx_buf *tx_buf; | |
80673029 | 1001 | unsigned int done[MTK_MAX_DEVS]; |
656e7052 JC |
1002 | unsigned int bytes[MTK_MAX_DEVS]; |
1003 | u32 cpu, dma; | |
1004 | static int condition; | |
80673029 | 1005 | int total = 0, i; |
656e7052 JC |
1006 | |
1007 | memset(done, 0, sizeof(done)); | |
1008 | memset(bytes, 0, sizeof(bytes)); | |
1009 | ||
1010 | cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); | |
1011 | dma = mtk_r32(eth, MTK_QTX_DRX_PTR); | |
1012 | ||
1013 | desc = mtk_qdma_phys_to_virt(ring, cpu); | |
1014 | ||
1015 | while ((cpu != dma) && budget) { | |
1016 | u32 next_cpu = desc->txd2; | |
1017 | int mac; | |
1018 | ||
1019 | desc = mtk_qdma_phys_to_virt(ring, desc->txd2); | |
1020 | if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) | |
1021 | break; | |
1022 | ||
1023 | mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & | |
1024 | TX_DMA_FPORT_MASK; | |
1025 | mac--; | |
1026 | ||
1027 | tx_buf = mtk_desc_to_tx_buf(ring, desc); | |
1028 | skb = tx_buf->skb; | |
1029 | if (!skb) { | |
1030 | condition = 1; | |
1031 | break; | |
1032 | } | |
1033 | ||
1034 | if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { | |
1035 | bytes[mac] += skb->len; | |
1036 | done[mac]++; | |
1037 | budget--; | |
1038 | } | |
55a4e778 | 1039 | mtk_tx_unmap(eth, tx_buf); |
656e7052 | 1040 | |
656e7052 JC |
1041 | ring->last_free = desc; |
1042 | atomic_inc(&ring->free_count); | |
1043 | ||
1044 | cpu = next_cpu; | |
1045 | } | |
1046 | ||
1047 | mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); | |
1048 | ||
1049 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1050 | if (!eth->netdev[i] || !done[i]) | |
1051 | continue; | |
1052 | netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); | |
1053 | total += done[i]; | |
1054 | } | |
1055 | ||
ad3cba98 JC |
1056 | if (mtk_queue_stopped(eth) && |
1057 | (atomic_read(&ring->free_count) > ring->thresh)) | |
13c822f6 | 1058 | mtk_wake_queue(eth); |
656e7052 JC |
1059 | |
1060 | return total; | |
1061 | } | |
1062 | ||
80673029 | 1063 | static void mtk_handle_status_irq(struct mtk_eth *eth) |
656e7052 | 1064 | { |
80673029 | 1065 | u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); |
656e7052 | 1066 | |
eece71e8 | 1067 | if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { |
656e7052 | 1068 | mtk_stats_update(eth); |
eece71e8 JC |
1069 | mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), |
1070 | MTK_INT_STATUS2); | |
656e7052 | 1071 | } |
80673029 JC |
1072 | } |
1073 | ||
1074 | static int mtk_napi_tx(struct napi_struct *napi, int budget) | |
1075 | { | |
1076 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); | |
1077 | u32 status, mask; | |
1078 | int tx_done = 0; | |
1079 | ||
1080 | mtk_handle_status_irq(eth); | |
1081 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); | |
1082 | tx_done = mtk_poll_tx(eth, budget); | |
1083 | ||
1084 | if (unlikely(netif_msg_intr(eth))) { | |
1085 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
1086 | mask = mtk_r32(eth, MTK_QDMA_INT_MASK); | |
1087 | dev_info(eth->dev, | |
1088 | "done tx %d, intr 0x%08x/0x%x\n", | |
1089 | tx_done, status, mask); | |
1090 | } | |
1091 | ||
1092 | if (tx_done == budget) | |
1093 | return budget; | |
1094 | ||
1095 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
1096 | if (status & MTK_TX_DONE_INT) | |
1097 | return budget; | |
1098 | ||
1099 | napi_complete(napi); | |
bacfd110 | 1100 | mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
80673029 JC |
1101 | |
1102 | return tx_done; | |
1103 | } | |
1104 | ||
1105 | static int mtk_napi_rx(struct napi_struct *napi, int budget) | |
1106 | { | |
1107 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); | |
1108 | u32 status, mask; | |
1109 | int rx_done = 0; | |
41156cea | 1110 | int remain_budget = budget; |
80673029 JC |
1111 | |
1112 | mtk_handle_status_irq(eth); | |
41156cea SW |
1113 | |
1114 | poll_again: | |
bacfd110 | 1115 | mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); |
41156cea | 1116 | rx_done = mtk_poll_rx(napi, remain_budget, eth); |
656e7052 JC |
1117 | |
1118 | if (unlikely(netif_msg_intr(eth))) { | |
bacfd110 NC |
1119 | status = mtk_r32(eth, MTK_PDMA_INT_STATUS); |
1120 | mask = mtk_r32(eth, MTK_PDMA_INT_MASK); | |
80673029 JC |
1121 | dev_info(eth->dev, |
1122 | "done rx %d, intr 0x%08x/0x%x\n", | |
1123 | rx_done, status, mask); | |
656e7052 | 1124 | } |
41156cea | 1125 | if (rx_done == remain_budget) |
656e7052 JC |
1126 | return budget; |
1127 | ||
bacfd110 | 1128 | status = mtk_r32(eth, MTK_PDMA_INT_STATUS); |
41156cea SW |
1129 | if (status & MTK_RX_DONE_INT) { |
1130 | remain_budget -= rx_done; | |
1131 | goto poll_again; | |
1132 | } | |
656e7052 | 1133 | napi_complete(napi); |
bacfd110 | 1134 | mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); |
656e7052 | 1135 | |
41156cea | 1136 | return rx_done + budget - remain_budget; |
656e7052 JC |
1137 | } |
1138 | ||
1139 | static int mtk_tx_alloc(struct mtk_eth *eth) | |
1140 | { | |
1141 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1142 | int i, sz = sizeof(*ring->dma); | |
1143 | ||
1144 | ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), | |
1145 | GFP_KERNEL); | |
1146 | if (!ring->buf) | |
1147 | goto no_tx_mem; | |
1148 | ||
1149 | ring->dma = dma_alloc_coherent(eth->dev, | |
1150 | MTK_DMA_SIZE * sz, | |
1151 | &ring->phys, | |
1152 | GFP_ATOMIC | __GFP_ZERO); | |
1153 | if (!ring->dma) | |
1154 | goto no_tx_mem; | |
1155 | ||
1156 | memset(ring->dma, 0, MTK_DMA_SIZE * sz); | |
1157 | for (i = 0; i < MTK_DMA_SIZE; i++) { | |
1158 | int next = (i + 1) % MTK_DMA_SIZE; | |
1159 | u32 next_ptr = ring->phys + next * sz; | |
1160 | ||
1161 | ring->dma[i].txd2 = next_ptr; | |
1162 | ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | |
1163 | } | |
1164 | ||
1165 | atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); | |
1166 | ring->next_free = &ring->dma[0]; | |
12c97c13 | 1167 | ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; |
04698ccc | 1168 | ring->thresh = MAX_SKB_FRAGS; |
656e7052 JC |
1169 | |
1170 | /* make sure that all changes to the dma ring are flushed before we | |
1171 | * continue | |
1172 | */ | |
1173 | wmb(); | |
1174 | ||
1175 | mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); | |
1176 | mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); | |
1177 | mtk_w32(eth, | |
1178 | ring->phys + ((MTK_DMA_SIZE - 1) * sz), | |
1179 | MTK_QTX_CRX_PTR); | |
1180 | mtk_w32(eth, | |
1181 | ring->phys + ((MTK_DMA_SIZE - 1) * sz), | |
1182 | MTK_QTX_DRX_PTR); | |
bacfd110 | 1183 | mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); |
656e7052 JC |
1184 | |
1185 | return 0; | |
1186 | ||
1187 | no_tx_mem: | |
1188 | return -ENOMEM; | |
1189 | } | |
1190 | ||
1191 | static void mtk_tx_clean(struct mtk_eth *eth) | |
1192 | { | |
1193 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1194 | int i; | |
1195 | ||
1196 | if (ring->buf) { | |
1197 | for (i = 0; i < MTK_DMA_SIZE; i++) | |
55a4e778 | 1198 | mtk_tx_unmap(eth, &ring->buf[i]); |
656e7052 JC |
1199 | kfree(ring->buf); |
1200 | ring->buf = NULL; | |
1201 | } | |
1202 | ||
1203 | if (ring->dma) { | |
1204 | dma_free_coherent(eth->dev, | |
1205 | MTK_DMA_SIZE * sizeof(*ring->dma), | |
1206 | ring->dma, | |
1207 | ring->phys); | |
1208 | ring->dma = NULL; | |
1209 | } | |
1210 | } | |
1211 | ||
ee406810 | 1212 | static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) |
656e7052 | 1213 | { |
ee406810 NC |
1214 | struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; |
1215 | int rx_data_len, rx_dma_size; | |
656e7052 JC |
1216 | int i; |
1217 | ||
ee406810 NC |
1218 | if (rx_flag == MTK_RX_FLAGS_HWLRO) { |
1219 | rx_data_len = MTK_MAX_LRO_RX_LENGTH; | |
1220 | rx_dma_size = MTK_HW_LRO_DMA_SIZE; | |
1221 | } else { | |
1222 | rx_data_len = ETH_DATA_LEN; | |
1223 | rx_dma_size = MTK_DMA_SIZE; | |
1224 | } | |
1225 | ||
1226 | ring->frag_size = mtk_max_frag_size(rx_data_len); | |
656e7052 | 1227 | ring->buf_size = mtk_max_buf_size(ring->frag_size); |
ee406810 | 1228 | ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), |
656e7052 JC |
1229 | GFP_KERNEL); |
1230 | if (!ring->data) | |
1231 | return -ENOMEM; | |
1232 | ||
ee406810 | 1233 | for (i = 0; i < rx_dma_size; i++) { |
656e7052 JC |
1234 | ring->data[i] = netdev_alloc_frag(ring->frag_size); |
1235 | if (!ring->data[i]) | |
1236 | return -ENOMEM; | |
1237 | } | |
1238 | ||
1239 | ring->dma = dma_alloc_coherent(eth->dev, | |
ee406810 | 1240 | rx_dma_size * sizeof(*ring->dma), |
656e7052 JC |
1241 | &ring->phys, |
1242 | GFP_ATOMIC | __GFP_ZERO); | |
1243 | if (!ring->dma) | |
1244 | return -ENOMEM; | |
1245 | ||
ee406810 | 1246 | for (i = 0; i < rx_dma_size; i++) { |
656e7052 JC |
1247 | dma_addr_t dma_addr = dma_map_single(eth->dev, |
1248 | ring->data[i] + NET_SKB_PAD, | |
1249 | ring->buf_size, | |
1250 | DMA_FROM_DEVICE); | |
1251 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
1252 | return -ENOMEM; | |
1253 | ring->dma[i].rxd1 = (unsigned int)dma_addr; | |
1254 | ||
1255 | ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); | |
1256 | } | |
ee406810 NC |
1257 | ring->dma_size = rx_dma_size; |
1258 | ring->calc_idx_update = false; | |
1259 | ring->calc_idx = rx_dma_size - 1; | |
1260 | ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); | |
656e7052 JC |
1261 | /* make sure that all changes to the dma ring are flushed before we |
1262 | * continue | |
1263 | */ | |
1264 | wmb(); | |
1265 | ||
ee406810 NC |
1266 | mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); |
1267 | mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); | |
1268 | mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); | |
1269 | mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX); | |
656e7052 JC |
1270 | |
1271 | return 0; | |
1272 | } | |
1273 | ||
ee406810 | 1274 | static void mtk_rx_clean(struct mtk_eth *eth, int ring_no) |
656e7052 | 1275 | { |
ee406810 | 1276 | struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; |
656e7052 JC |
1277 | int i; |
1278 | ||
1279 | if (ring->data && ring->dma) { | |
ee406810 | 1280 | for (i = 0; i < ring->dma_size; i++) { |
656e7052 JC |
1281 | if (!ring->data[i]) |
1282 | continue; | |
1283 | if (!ring->dma[i].rxd1) | |
1284 | continue; | |
1285 | dma_unmap_single(eth->dev, | |
1286 | ring->dma[i].rxd1, | |
1287 | ring->buf_size, | |
1288 | DMA_FROM_DEVICE); | |
1289 | skb_free_frag(ring->data[i]); | |
1290 | } | |
1291 | kfree(ring->data); | |
1292 | ring->data = NULL; | |
1293 | } | |
1294 | ||
1295 | if (ring->dma) { | |
1296 | dma_free_coherent(eth->dev, | |
ee406810 | 1297 | ring->dma_size * sizeof(*ring->dma), |
656e7052 JC |
1298 | ring->dma, |
1299 | ring->phys); | |
1300 | ring->dma = NULL; | |
1301 | } | |
1302 | } | |
1303 | ||
ee406810 NC |
1304 | static int mtk_hwlro_rx_init(struct mtk_eth *eth) |
1305 | { | |
1306 | int i; | |
1307 | u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0; | |
1308 | u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0; | |
1309 | ||
1310 | /* set LRO rings to auto-learn modes */ | |
1311 | ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE; | |
1312 | ||
1313 | /* validate LRO ring */ | |
1314 | ring_ctrl_dw2 |= MTK_RING_VLD; | |
1315 | ||
1316 | /* set AGE timer (unit: 20us) */ | |
1317 | ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H; | |
1318 | ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L; | |
1319 | ||
1320 | /* set max AGG timer (unit: 20us) */ | |
1321 | ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME; | |
1322 | ||
1323 | /* set max LRO AGG count */ | |
1324 | ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L; | |
1325 | ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H; | |
1326 | ||
1327 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { | |
1328 | mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); | |
1329 | mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); | |
1330 | mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); | |
1331 | } | |
1332 | ||
1333 | /* IPv4 checksum update enable */ | |
1334 | lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN; | |
1335 | ||
1336 | /* switch priority comparison to packet count mode */ | |
1337 | lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE; | |
1338 | ||
1339 | /* bandwidth threshold setting */ | |
1340 | mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); | |
1341 | ||
1342 | /* auto-learn score delta setting */ | |
1343 | mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); | |
1344 | ||
1345 | /* set refresh timer for altering flows to 1 sec. (unit: 20us) */ | |
1346 | mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, | |
1347 | MTK_PDMA_LRO_ALT_REFRESH_TIMER); | |
1348 | ||
1349 | /* set HW LRO mode & the max aggregation count for rx packets */ | |
1350 | lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff); | |
1351 | ||
1352 | /* the minimal remaining room of SDL0 in RXD for lro aggregation */ | |
1353 | lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL; | |
1354 | ||
1355 | /* enable HW LRO */ | |
1356 | lro_ctrl_dw0 |= MTK_LRO_EN; | |
1357 | ||
1358 | mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); | |
1359 | mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); | |
1360 | ||
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) | |
1365 | { | |
1366 | int i; | |
1367 | u32 val; | |
1368 | ||
1369 | /* relinquish lro rings, flush aggregated packets */ | |
1370 | mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); | |
1371 | ||
1372 | /* wait for relinquishments done */ | |
1373 | for (i = 0; i < 10; i++) { | |
1374 | val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); | |
1375 | if (val & MTK_LRO_RING_RELINQUISH_DONE) { | |
1376 | msleep(20); | |
1377 | continue; | |
1378 | } | |
ca3ba106 | 1379 | break; |
ee406810 NC |
1380 | } |
1381 | ||
1382 | /* invalidate lro rings */ | |
1383 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) | |
1384 | mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); | |
1385 | ||
1386 | /* disable HW LRO */ | |
1387 | mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); | |
1388 | } | |
1389 | ||
7aab747e NC |
1390 | static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) |
1391 | { | |
1392 | u32 reg_val; | |
1393 | ||
1394 | reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); | |
1395 | ||
1396 | /* invalidate the IP setting */ | |
1397 | mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); | |
1398 | ||
1399 | mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); | |
1400 | ||
1401 | /* validate the IP setting */ | |
1402 | mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); | |
1403 | } | |
1404 | ||
1405 | static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) | |
1406 | { | |
1407 | u32 reg_val; | |
1408 | ||
1409 | reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); | |
1410 | ||
1411 | /* invalidate the IP setting */ | |
1412 | mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); | |
1413 | ||
1414 | mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); | |
1415 | } | |
1416 | ||
1417 | static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) | |
1418 | { | |
1419 | int cnt = 0; | |
1420 | int i; | |
1421 | ||
1422 | for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { | |
1423 | if (mac->hwlro_ip[i]) | |
1424 | cnt++; | |
1425 | } | |
1426 | ||
1427 | return cnt; | |
1428 | } | |
1429 | ||
1430 | static int mtk_hwlro_add_ipaddr(struct net_device *dev, | |
1431 | struct ethtool_rxnfc *cmd) | |
1432 | { | |
1433 | struct ethtool_rx_flow_spec *fsp = | |
1434 | (struct ethtool_rx_flow_spec *)&cmd->fs; | |
1435 | struct mtk_mac *mac = netdev_priv(dev); | |
1436 | struct mtk_eth *eth = mac->hw; | |
1437 | int hwlro_idx; | |
1438 | ||
1439 | if ((fsp->flow_type != TCP_V4_FLOW) || | |
1440 | (!fsp->h_u.tcp_ip4_spec.ip4dst) || | |
1441 | (fsp->location > 1)) | |
1442 | return -EINVAL; | |
1443 | ||
1444 | mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); | |
1445 | hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; | |
1446 | ||
1447 | mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); | |
1448 | ||
1449 | mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); | |
1450 | ||
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | static int mtk_hwlro_del_ipaddr(struct net_device *dev, | |
1455 | struct ethtool_rxnfc *cmd) | |
1456 | { | |
1457 | struct ethtool_rx_flow_spec *fsp = | |
1458 | (struct ethtool_rx_flow_spec *)&cmd->fs; | |
1459 | struct mtk_mac *mac = netdev_priv(dev); | |
1460 | struct mtk_eth *eth = mac->hw; | |
1461 | int hwlro_idx; | |
1462 | ||
1463 | if (fsp->location > 1) | |
1464 | return -EINVAL; | |
1465 | ||
1466 | mac->hwlro_ip[fsp->location] = 0; | |
1467 | hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; | |
1468 | ||
1469 | mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); | |
1470 | ||
1471 | mtk_hwlro_inval_ipaddr(eth, hwlro_idx); | |
1472 | ||
1473 | return 0; | |
1474 | } | |
1475 | ||
1476 | static void mtk_hwlro_netdev_disable(struct net_device *dev) | |
1477 | { | |
1478 | struct mtk_mac *mac = netdev_priv(dev); | |
1479 | struct mtk_eth *eth = mac->hw; | |
1480 | int i, hwlro_idx; | |
1481 | ||
1482 | for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { | |
1483 | mac->hwlro_ip[i] = 0; | |
1484 | hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; | |
1485 | ||
1486 | mtk_hwlro_inval_ipaddr(eth, hwlro_idx); | |
1487 | } | |
1488 | ||
1489 | mac->hwlro_ip_cnt = 0; | |
1490 | } | |
1491 | ||
1492 | static int mtk_hwlro_get_fdir_entry(struct net_device *dev, | |
1493 | struct ethtool_rxnfc *cmd) | |
1494 | { | |
1495 | struct mtk_mac *mac = netdev_priv(dev); | |
1496 | struct ethtool_rx_flow_spec *fsp = | |
1497 | (struct ethtool_rx_flow_spec *)&cmd->fs; | |
1498 | ||
1499 | /* only tcp dst ipv4 is meaningful, others are meaningless */ | |
1500 | fsp->flow_type = TCP_V4_FLOW; | |
1501 | fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); | |
1502 | fsp->m_u.tcp_ip4_spec.ip4dst = 0; | |
1503 | ||
1504 | fsp->h_u.tcp_ip4_spec.ip4src = 0; | |
1505 | fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; | |
1506 | fsp->h_u.tcp_ip4_spec.psrc = 0; | |
1507 | fsp->m_u.tcp_ip4_spec.psrc = 0xffff; | |
1508 | fsp->h_u.tcp_ip4_spec.pdst = 0; | |
1509 | fsp->m_u.tcp_ip4_spec.pdst = 0xffff; | |
1510 | fsp->h_u.tcp_ip4_spec.tos = 0; | |
1511 | fsp->m_u.tcp_ip4_spec.tos = 0xff; | |
1512 | ||
1513 | return 0; | |
1514 | } | |
1515 | ||
1516 | static int mtk_hwlro_get_fdir_all(struct net_device *dev, | |
1517 | struct ethtool_rxnfc *cmd, | |
1518 | u32 *rule_locs) | |
1519 | { | |
1520 | struct mtk_mac *mac = netdev_priv(dev); | |
1521 | int cnt = 0; | |
1522 | int i; | |
1523 | ||
1524 | for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { | |
1525 | if (mac->hwlro_ip[i]) { | |
1526 | rule_locs[cnt] = i; | |
1527 | cnt++; | |
1528 | } | |
1529 | } | |
1530 | ||
1531 | cmd->rule_cnt = cnt; | |
1532 | ||
1533 | return 0; | |
1534 | } | |
1535 | ||
1536 | static netdev_features_t mtk_fix_features(struct net_device *dev, | |
1537 | netdev_features_t features) | |
1538 | { | |
1539 | if (!(features & NETIF_F_LRO)) { | |
1540 | struct mtk_mac *mac = netdev_priv(dev); | |
1541 | int ip_cnt = mtk_hwlro_get_ip_cnt(mac); | |
1542 | ||
1543 | if (ip_cnt) { | |
1544 | netdev_info(dev, "RX flow is programmed, LRO should keep on\n"); | |
1545 | ||
1546 | features |= NETIF_F_LRO; | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | return features; | |
1551 | } | |
1552 | ||
1553 | static int mtk_set_features(struct net_device *dev, netdev_features_t features) | |
1554 | { | |
1555 | int err = 0; | |
1556 | ||
1557 | if (!((dev->features ^ features) & NETIF_F_LRO)) | |
1558 | return 0; | |
1559 | ||
1560 | if (!(features & NETIF_F_LRO)) | |
1561 | mtk_hwlro_netdev_disable(dev); | |
1562 | ||
1563 | return err; | |
1564 | } | |
1565 | ||
656e7052 JC |
1566 | /* wait for DMA to finish whatever it is doing before we start using it again */ |
1567 | static int mtk_dma_busy_wait(struct mtk_eth *eth) | |
1568 | { | |
1569 | unsigned long t_start = jiffies; | |
1570 | ||
1571 | while (1) { | |
1572 | if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & | |
1573 | (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) | |
1574 | return 0; | |
1575 | if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) | |
1576 | break; | |
1577 | } | |
1578 | ||
1579 | dev_err(eth->dev, "DMA init timeout\n"); | |
1580 | return -1; | |
1581 | } | |
1582 | ||
1583 | static int mtk_dma_init(struct mtk_eth *eth) | |
1584 | { | |
1585 | int err; | |
ee406810 | 1586 | u32 i; |
656e7052 JC |
1587 | |
1588 | if (mtk_dma_busy_wait(eth)) | |
1589 | return -EBUSY; | |
1590 | ||
1591 | /* QDMA needs scratch memory for internal reordering of the | |
1592 | * descriptors | |
1593 | */ | |
1594 | err = mtk_init_fq_dma(eth); | |
1595 | if (err) | |
1596 | return err; | |
1597 | ||
1598 | err = mtk_tx_alloc(eth); | |
1599 | if (err) | |
1600 | return err; | |
1601 | ||
ee406810 | 1602 | err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); |
656e7052 JC |
1603 | if (err) |
1604 | return err; | |
1605 | ||
ee406810 NC |
1606 | if (eth->hwlro) { |
1607 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { | |
1608 | err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); | |
1609 | if (err) | |
1610 | return err; | |
1611 | } | |
1612 | err = mtk_hwlro_rx_init(eth); | |
1613 | if (err) | |
1614 | return err; | |
1615 | } | |
1616 | ||
656e7052 JC |
1617 | /* Enable random early drop and set drop threshold automatically */ |
1618 | mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, | |
1619 | MTK_QDMA_FC_THRES); | |
1620 | mtk_w32(eth, 0x0, MTK_QDMA_HRED2); | |
1621 | ||
1622 | return 0; | |
1623 | } | |
1624 | ||
1625 | static void mtk_dma_free(struct mtk_eth *eth) | |
1626 | { | |
1627 | int i; | |
1628 | ||
1629 | for (i = 0; i < MTK_MAC_COUNT; i++) | |
1630 | if (eth->netdev[i]) | |
1631 | netdev_reset_queue(eth->netdev[i]); | |
605e4fe4 JC |
1632 | if (eth->scratch_ring) { |
1633 | dma_free_coherent(eth->dev, | |
1634 | MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), | |
1635 | eth->scratch_ring, | |
1636 | eth->phy_scratch_ring); | |
1637 | eth->scratch_ring = NULL; | |
1638 | eth->phy_scratch_ring = 0; | |
1639 | } | |
656e7052 | 1640 | mtk_tx_clean(eth); |
ee406810 NC |
1641 | mtk_rx_clean(eth, 0); |
1642 | ||
1643 | if (eth->hwlro) { | |
1644 | mtk_hwlro_rx_uninit(eth); | |
1645 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) | |
1646 | mtk_rx_clean(eth, i); | |
1647 | } | |
1648 | ||
656e7052 JC |
1649 | kfree(eth->scratch_head); |
1650 | } | |
1651 | ||
1652 | static void mtk_tx_timeout(struct net_device *dev) | |
1653 | { | |
1654 | struct mtk_mac *mac = netdev_priv(dev); | |
1655 | struct mtk_eth *eth = mac->hw; | |
1656 | ||
1657 | eth->netdev[mac->id]->stats.tx_errors++; | |
1658 | netif_err(eth, tx_err, dev, | |
1659 | "transmit timed out\n"); | |
7c78b4ad | 1660 | schedule_work(ð->pending_work); |
656e7052 JC |
1661 | } |
1662 | ||
80673029 | 1663 | static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) |
656e7052 JC |
1664 | { |
1665 | struct mtk_eth *eth = _eth; | |
656e7052 | 1666 | |
80673029 JC |
1667 | if (likely(napi_schedule_prep(ð->rx_napi))) { |
1668 | __napi_schedule(ð->rx_napi); | |
bacfd110 | 1669 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); |
80673029 | 1670 | } |
656e7052 | 1671 | |
80673029 JC |
1672 | return IRQ_HANDLED; |
1673 | } | |
1674 | ||
1675 | static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) | |
1676 | { | |
1677 | struct mtk_eth *eth = _eth; | |
1678 | ||
1679 | if (likely(napi_schedule_prep(ð->tx_napi))) { | |
1680 | __napi_schedule(ð->tx_napi); | |
bacfd110 | 1681 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
656e7052 | 1682 | } |
656e7052 JC |
1683 | |
1684 | return IRQ_HANDLED; | |
1685 | } | |
1686 | ||
1687 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1688 | static void mtk_poll_controller(struct net_device *dev) | |
1689 | { | |
1690 | struct mtk_mac *mac = netdev_priv(dev); | |
1691 | struct mtk_eth *eth = mac->hw; | |
656e7052 | 1692 | |
bacfd110 NC |
1693 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1694 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
8186f6e3 | 1695 | mtk_handle_irq_rx(eth->irq[2], dev); |
bacfd110 NC |
1696 | mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1697 | mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
656e7052 JC |
1698 | } |
1699 | #endif | |
1700 | ||
1701 | static int mtk_start_dma(struct mtk_eth *eth) | |
1702 | { | |
1703 | int err; | |
1704 | ||
1705 | err = mtk_dma_init(eth); | |
1706 | if (err) { | |
1707 | mtk_dma_free(eth); | |
1708 | return err; | |
1709 | } | |
1710 | ||
1711 | mtk_w32(eth, | |
bacfd110 NC |
1712 | MTK_TX_WB_DDONE | MTK_TX_DMA_EN | |
1713 | MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, | |
656e7052 JC |
1714 | MTK_QDMA_GLO_CFG); |
1715 | ||
bacfd110 NC |
1716 | mtk_w32(eth, |
1717 | MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | | |
1718 | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, | |
1719 | MTK_PDMA_GLO_CFG); | |
1720 | ||
656e7052 JC |
1721 | return 0; |
1722 | } | |
1723 | ||
1724 | static int mtk_open(struct net_device *dev) | |
1725 | { | |
1726 | struct mtk_mac *mac = netdev_priv(dev); | |
1727 | struct mtk_eth *eth = mac->hw; | |
1728 | ||
1729 | /* we run 2 netdevs on the same dma ring so we only bring it up once */ | |
1730 | if (!atomic_read(ð->dma_refcnt)) { | |
1731 | int err = mtk_start_dma(eth); | |
1732 | ||
1733 | if (err) | |
1734 | return err; | |
1735 | ||
80673029 | 1736 | napi_enable(ð->tx_napi); |
656e7052 | 1737 | napi_enable(ð->rx_napi); |
bacfd110 NC |
1738 | mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1739 | mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
656e7052 JC |
1740 | } |
1741 | atomic_inc(ð->dma_refcnt); | |
1742 | ||
2364c5c5 | 1743 | phy_start(dev->phydev); |
656e7052 JC |
1744 | netif_start_queue(dev); |
1745 | ||
1746 | return 0; | |
1747 | } | |
1748 | ||
1749 | static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) | |
1750 | { | |
656e7052 JC |
1751 | u32 val; |
1752 | int i; | |
1753 | ||
1754 | /* stop the dma engine */ | |
e3e9652a | 1755 | spin_lock_bh(ð->page_lock); |
656e7052 JC |
1756 | val = mtk_r32(eth, glo_cfg); |
1757 | mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), | |
1758 | glo_cfg); | |
e3e9652a | 1759 | spin_unlock_bh(ð->page_lock); |
656e7052 JC |
1760 | |
1761 | /* wait for dma stop */ | |
1762 | for (i = 0; i < 10; i++) { | |
1763 | val = mtk_r32(eth, glo_cfg); | |
1764 | if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { | |
1765 | msleep(20); | |
1766 | continue; | |
1767 | } | |
1768 | break; | |
1769 | } | |
1770 | } | |
1771 | ||
1772 | static int mtk_stop(struct net_device *dev) | |
1773 | { | |
1774 | struct mtk_mac *mac = netdev_priv(dev); | |
1775 | struct mtk_eth *eth = mac->hw; | |
1776 | ||
1777 | netif_tx_disable(dev); | |
2364c5c5 | 1778 | phy_stop(dev->phydev); |
656e7052 JC |
1779 | |
1780 | /* only shutdown DMA if this is the last user */ | |
1781 | if (!atomic_dec_and_test(ð->dma_refcnt)) | |
1782 | return 0; | |
1783 | ||
bacfd110 NC |
1784 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1785 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
80673029 | 1786 | napi_disable(ð->tx_napi); |
656e7052 JC |
1787 | napi_disable(ð->rx_napi); |
1788 | ||
1789 | mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); | |
6bf563d5 | 1790 | mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); |
656e7052 JC |
1791 | |
1792 | mtk_dma_free(eth); | |
1793 | ||
1794 | return 0; | |
1795 | } | |
1796 | ||
2a8307aa SW |
1797 | static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) |
1798 | { | |
1799 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, | |
1800 | reset_bits, | |
1801 | reset_bits); | |
1802 | ||
1803 | usleep_range(1000, 1100); | |
1804 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, | |
1805 | reset_bits, | |
1806 | ~reset_bits); | |
1807 | mdelay(10); | |
1808 | } | |
1809 | ||
9ea4d311 | 1810 | static int mtk_hw_init(struct mtk_eth *eth) |
656e7052 | 1811 | { |
9ea4d311 SW |
1812 | int i, val; |
1813 | ||
1814 | if (test_and_set_bit(MTK_HW_INIT, ð->state)) | |
1815 | return 0; | |
85574dbf | 1816 | |
26a2ad8a SW |
1817 | pm_runtime_enable(eth->dev); |
1818 | pm_runtime_get_sync(eth->dev); | |
1819 | ||
85574dbf SW |
1820 | clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); |
1821 | clk_prepare_enable(eth->clks[MTK_CLK_ESW]); | |
1822 | clk_prepare_enable(eth->clks[MTK_CLK_GP1]); | |
1823 | clk_prepare_enable(eth->clks[MTK_CLK_GP2]); | |
2a8307aa SW |
1824 | ethsys_reset(eth, RSTCTRL_FE); |
1825 | ethsys_reset(eth, RSTCTRL_PPE); | |
656e7052 | 1826 | |
9ea4d311 SW |
1827 | regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); |
1828 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1829 | if (!eth->mac[i]) | |
1830 | continue; | |
1831 | val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); | |
1832 | val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); | |
1833 | } | |
1834 | regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); | |
1835 | ||
656e7052 JC |
1836 | /* Set GE2 driving and slew rate */ |
1837 | regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); | |
1838 | ||
1839 | /* set GE2 TDSEL */ | |
1840 | regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); | |
1841 | ||
1842 | /* set GE2 TUNE */ | |
1843 | regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); | |
1844 | ||
1845 | /* GE1, Force 1000M/FD, FC ON */ | |
1846 | mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); | |
1847 | ||
1848 | /* GE2, Force 1000M/FD, FC ON */ | |
1849 | mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); | |
1850 | ||
1851 | /* Enable RX VLan Offloading */ | |
1852 | mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); | |
1853 | ||
656e7052 JC |
1854 | /* disable delay and normal interrupt */ |
1855 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); | |
bacfd110 NC |
1856 | mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); |
1857 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); | |
1858 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); | |
656e7052 JC |
1859 | mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); |
1860 | mtk_w32(eth, 0, MTK_RST_GL); | |
1861 | ||
1862 | /* FE int grouping */ | |
80673029 JC |
1863 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); |
1864 | mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); | |
1865 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); | |
1866 | mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); | |
1867 | mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); | |
656e7052 JC |
1868 | |
1869 | for (i = 0; i < 2; i++) { | |
1870 | u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); | |
1871 | ||
9c08435e | 1872 | /* setup the forward port to send frame to PDMA */ |
656e7052 | 1873 | val &= ~0xffff; |
656e7052 JC |
1874 | |
1875 | /* Enable RX checksum */ | |
1876 | val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; | |
1877 | ||
1878 | /* setup the mac dma */ | |
1879 | mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); | |
1880 | } | |
1881 | ||
1882 | return 0; | |
1883 | } | |
1884 | ||
bf253fb7 SW |
1885 | static int mtk_hw_deinit(struct mtk_eth *eth) |
1886 | { | |
9ea4d311 SW |
1887 | if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) |
1888 | return 0; | |
1889 | ||
bf253fb7 SW |
1890 | clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); |
1891 | clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); | |
1892 | clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); | |
1893 | clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); | |
1894 | ||
26a2ad8a SW |
1895 | pm_runtime_put_sync(eth->dev); |
1896 | pm_runtime_disable(eth->dev); | |
1897 | ||
bf253fb7 SW |
1898 | return 0; |
1899 | } | |
1900 | ||
656e7052 JC |
1901 | static int __init mtk_init(struct net_device *dev) |
1902 | { | |
1903 | struct mtk_mac *mac = netdev_priv(dev); | |
1904 | struct mtk_eth *eth = mac->hw; | |
1905 | const char *mac_addr; | |
1906 | ||
1907 | mac_addr = of_get_mac_address(mac->of_node); | |
1908 | if (mac_addr) | |
1909 | ether_addr_copy(dev->dev_addr, mac_addr); | |
1910 | ||
1911 | /* If the mac address is invalid, use random mac address */ | |
1912 | if (!is_valid_ether_addr(dev->dev_addr)) { | |
1913 | random_ether_addr(dev->dev_addr); | |
1914 | dev_err(eth->dev, "generated random MAC address %pM\n", | |
1915 | dev->dev_addr); | |
1916 | dev->addr_assign_type = NET_ADDR_RANDOM; | |
1917 | } | |
1918 | ||
2364c5c5 | 1919 | return mtk_phy_connect(dev); |
656e7052 JC |
1920 | } |
1921 | ||
1922 | static void mtk_uninit(struct net_device *dev) | |
1923 | { | |
1924 | struct mtk_mac *mac = netdev_priv(dev); | |
1925 | struct mtk_eth *eth = mac->hw; | |
1926 | ||
2364c5c5 | 1927 | phy_disconnect(dev->phydev); |
16a67eb3 JH |
1928 | if (of_phy_is_fixed_link(mac->of_node)) |
1929 | of_phy_deregister_fixed_link(mac->of_node); | |
bacfd110 NC |
1930 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); |
1931 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); | |
656e7052 JC |
1932 | } |
1933 | ||
1934 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1935 | { | |
656e7052 JC |
1936 | switch (cmd) { |
1937 | case SIOCGMIIPHY: | |
1938 | case SIOCGMIIREG: | |
1939 | case SIOCSMIIREG: | |
2364c5c5 | 1940 | return phy_mii_ioctl(dev->phydev, ifr, cmd); |
656e7052 JC |
1941 | default: |
1942 | break; | |
1943 | } | |
1944 | ||
1945 | return -EOPNOTSUPP; | |
1946 | } | |
1947 | ||
1948 | static void mtk_pending_work(struct work_struct *work) | |
1949 | { | |
7c78b4ad | 1950 | struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); |
e7d425dc JC |
1951 | int err, i; |
1952 | unsigned long restart = 0; | |
656e7052 JC |
1953 | |
1954 | rtnl_lock(); | |
656e7052 | 1955 | |
dce6fa42 SW |
1956 | dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); |
1957 | ||
1958 | while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) | |
1959 | cpu_relax(); | |
1960 | ||
1961 | dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); | |
e7d425dc JC |
1962 | /* stop all devices to make sure that dma is properly shut down */ |
1963 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
7c78b4ad | 1964 | if (!eth->netdev[i]) |
e7d425dc JC |
1965 | continue; |
1966 | mtk_stop(eth->netdev[i]); | |
1967 | __set_bit(i, &restart); | |
1968 | } | |
dce6fa42 | 1969 | dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); |
e7d425dc | 1970 | |
9ea4d311 SW |
1971 | /* restart underlying hardware such as power, clock, pin mux |
1972 | * and the connected phy | |
1973 | */ | |
1974 | mtk_hw_deinit(eth); | |
1975 | ||
1976 | if (eth->dev->pins) | |
1977 | pinctrl_select_state(eth->dev->pins->p, | |
1978 | eth->dev->pins->default_state); | |
1979 | mtk_hw_init(eth); | |
1980 | ||
1981 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1982 | if (!eth->mac[i] || | |
1983 | of_phy_is_fixed_link(eth->mac[i]->of_node)) | |
1984 | continue; | |
2364c5c5 | 1985 | err = phy_init_hw(eth->netdev[i]->phydev); |
9ea4d311 SW |
1986 | if (err) |
1987 | dev_err(eth->dev, "%s: PHY init failed.\n", | |
1988 | eth->netdev[i]->name); | |
1989 | } | |
1990 | ||
e7d425dc JC |
1991 | /* restart DMA and enable IRQs */ |
1992 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1993 | if (!test_bit(i, &restart)) | |
1994 | continue; | |
1995 | err = mtk_open(eth->netdev[i]); | |
1996 | if (err) { | |
1997 | netif_alert(eth, ifup, eth->netdev[i], | |
1998 | "Driver up/down cycle failed, closing device.\n"); | |
1999 | dev_close(eth->netdev[i]); | |
2000 | } | |
656e7052 | 2001 | } |
dce6fa42 SW |
2002 | |
2003 | dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); | |
2004 | ||
2005 | clear_bit_unlock(MTK_RESETTING, ð->state); | |
2006 | ||
656e7052 JC |
2007 | rtnl_unlock(); |
2008 | } | |
2009 | ||
8a8a9e89 | 2010 | static int mtk_free_dev(struct mtk_eth *eth) |
656e7052 JC |
2011 | { |
2012 | int i; | |
2013 | ||
2014 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
656e7052 JC |
2015 | if (!eth->netdev[i]) |
2016 | continue; | |
8a8a9e89 SW |
2017 | free_netdev(eth->netdev[i]); |
2018 | } | |
2019 | ||
2020 | return 0; | |
2021 | } | |
656e7052 | 2022 | |
8a8a9e89 SW |
2023 | static int mtk_unreg_dev(struct mtk_eth *eth) |
2024 | { | |
2025 | int i; | |
2026 | ||
2027 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
2028 | if (!eth->netdev[i]) | |
2029 | continue; | |
656e7052 | 2030 | unregister_netdev(eth->netdev[i]); |
656e7052 | 2031 | } |
8a8a9e89 SW |
2032 | |
2033 | return 0; | |
2034 | } | |
2035 | ||
2036 | static int mtk_cleanup(struct mtk_eth *eth) | |
2037 | { | |
2038 | mtk_unreg_dev(eth); | |
2039 | mtk_free_dev(eth); | |
7c78b4ad | 2040 | cancel_work_sync(ð->pending_work); |
656e7052 JC |
2041 | |
2042 | return 0; | |
2043 | } | |
2044 | ||
3a82e78c BX |
2045 | static int mtk_get_link_ksettings(struct net_device *ndev, |
2046 | struct ethtool_link_ksettings *cmd) | |
656e7052 | 2047 | { |
3e60b748 | 2048 | struct mtk_mac *mac = netdev_priv(ndev); |
656e7052 | 2049 | |
dce6fa42 SW |
2050 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2051 | return -EBUSY; | |
2052 | ||
3e60b748 | 2053 | return phy_ethtool_ksettings_get(ndev->phydev, cmd); |
656e7052 JC |
2054 | } |
2055 | ||
3a82e78c BX |
2056 | static int mtk_set_link_ksettings(struct net_device *ndev, |
2057 | const struct ethtool_link_ksettings *cmd) | |
656e7052 | 2058 | { |
3e60b748 | 2059 | struct mtk_mac *mac = netdev_priv(ndev); |
656e7052 | 2060 | |
3e60b748 SW |
2061 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2062 | return -EBUSY; | |
656e7052 | 2063 | |
3e60b748 | 2064 | return phy_ethtool_ksettings_set(ndev->phydev, cmd); |
656e7052 JC |
2065 | } |
2066 | ||
2067 | static void mtk_get_drvinfo(struct net_device *dev, | |
2068 | struct ethtool_drvinfo *info) | |
2069 | { | |
2070 | struct mtk_mac *mac = netdev_priv(dev); | |
2071 | ||
2072 | strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); | |
2073 | strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); | |
2074 | info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); | |
2075 | } | |
2076 | ||
2077 | static u32 mtk_get_msglevel(struct net_device *dev) | |
2078 | { | |
2079 | struct mtk_mac *mac = netdev_priv(dev); | |
2080 | ||
2081 | return mac->hw->msg_enable; | |
2082 | } | |
2083 | ||
2084 | static void mtk_set_msglevel(struct net_device *dev, u32 value) | |
2085 | { | |
2086 | struct mtk_mac *mac = netdev_priv(dev); | |
2087 | ||
2088 | mac->hw->msg_enable = value; | |
2089 | } | |
2090 | ||
2091 | static int mtk_nway_reset(struct net_device *dev) | |
2092 | { | |
2093 | struct mtk_mac *mac = netdev_priv(dev); | |
2094 | ||
dce6fa42 SW |
2095 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2096 | return -EBUSY; | |
2097 | ||
2364c5c5 | 2098 | return genphy_restart_aneg(dev->phydev); |
656e7052 JC |
2099 | } |
2100 | ||
2101 | static u32 mtk_get_link(struct net_device *dev) | |
2102 | { | |
2103 | struct mtk_mac *mac = netdev_priv(dev); | |
2104 | int err; | |
2105 | ||
dce6fa42 SW |
2106 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2107 | return -EBUSY; | |
2108 | ||
2364c5c5 | 2109 | err = genphy_update_link(dev->phydev); |
656e7052 JC |
2110 | if (err) |
2111 | return ethtool_op_get_link(dev); | |
2112 | ||
2364c5c5 | 2113 | return dev->phydev->link; |
656e7052 JC |
2114 | } |
2115 | ||
2116 | static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) | |
2117 | { | |
2118 | int i; | |
2119 | ||
2120 | switch (stringset) { | |
2121 | case ETH_SS_STATS: | |
2122 | for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { | |
2123 | memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); | |
2124 | data += ETH_GSTRING_LEN; | |
2125 | } | |
2126 | break; | |
2127 | } | |
2128 | } | |
2129 | ||
2130 | static int mtk_get_sset_count(struct net_device *dev, int sset) | |
2131 | { | |
2132 | switch (sset) { | |
2133 | case ETH_SS_STATS: | |
2134 | return ARRAY_SIZE(mtk_ethtool_stats); | |
2135 | default: | |
2136 | return -EOPNOTSUPP; | |
2137 | } | |
2138 | } | |
2139 | ||
2140 | static void mtk_get_ethtool_stats(struct net_device *dev, | |
2141 | struct ethtool_stats *stats, u64 *data) | |
2142 | { | |
2143 | struct mtk_mac *mac = netdev_priv(dev); | |
2144 | struct mtk_hw_stats *hwstats = mac->hw_stats; | |
2145 | u64 *data_src, *data_dst; | |
2146 | unsigned int start; | |
2147 | int i; | |
2148 | ||
dce6fa42 SW |
2149 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2150 | return; | |
2151 | ||
656e7052 JC |
2152 | if (netif_running(dev) && netif_device_present(dev)) { |
2153 | if (spin_trylock(&hwstats->stats_lock)) { | |
2154 | mtk_stats_update_mac(mac); | |
2155 | spin_unlock(&hwstats->stats_lock); | |
2156 | } | |
2157 | } | |
2158 | ||
94d308d0 SW |
2159 | data_src = (u64 *)hwstats; |
2160 | ||
656e7052 | 2161 | do { |
656e7052 JC |
2162 | data_dst = data; |
2163 | start = u64_stats_fetch_begin_irq(&hwstats->syncp); | |
2164 | ||
2165 | for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) | |
2166 | *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); | |
2167 | } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); | |
2168 | } | |
2169 | ||
7aab747e NC |
2170 | static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
2171 | u32 *rule_locs) | |
2172 | { | |
2173 | int ret = -EOPNOTSUPP; | |
2174 | ||
2175 | switch (cmd->cmd) { | |
2176 | case ETHTOOL_GRXRINGS: | |
2177 | if (dev->features & NETIF_F_LRO) { | |
2178 | cmd->data = MTK_MAX_RX_RING_NUM; | |
2179 | ret = 0; | |
2180 | } | |
2181 | break; | |
2182 | case ETHTOOL_GRXCLSRLCNT: | |
2183 | if (dev->features & NETIF_F_LRO) { | |
2184 | struct mtk_mac *mac = netdev_priv(dev); | |
2185 | ||
2186 | cmd->rule_cnt = mac->hwlro_ip_cnt; | |
2187 | ret = 0; | |
2188 | } | |
2189 | break; | |
2190 | case ETHTOOL_GRXCLSRULE: | |
2191 | if (dev->features & NETIF_F_LRO) | |
2192 | ret = mtk_hwlro_get_fdir_entry(dev, cmd); | |
2193 | break; | |
2194 | case ETHTOOL_GRXCLSRLALL: | |
2195 | if (dev->features & NETIF_F_LRO) | |
2196 | ret = mtk_hwlro_get_fdir_all(dev, cmd, | |
2197 | rule_locs); | |
2198 | break; | |
2199 | default: | |
2200 | break; | |
2201 | } | |
2202 | ||
2203 | return ret; | |
2204 | } | |
2205 | ||
2206 | static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | |
2207 | { | |
2208 | int ret = -EOPNOTSUPP; | |
2209 | ||
2210 | switch (cmd->cmd) { | |
2211 | case ETHTOOL_SRXCLSRLINS: | |
2212 | if (dev->features & NETIF_F_LRO) | |
2213 | ret = mtk_hwlro_add_ipaddr(dev, cmd); | |
2214 | break; | |
2215 | case ETHTOOL_SRXCLSRLDEL: | |
2216 | if (dev->features & NETIF_F_LRO) | |
2217 | ret = mtk_hwlro_del_ipaddr(dev, cmd); | |
2218 | break; | |
2219 | default: | |
2220 | break; | |
2221 | } | |
2222 | ||
2223 | return ret; | |
2224 | } | |
2225 | ||
6a38cb15 | 2226 | static const struct ethtool_ops mtk_ethtool_ops = { |
3e60b748 SW |
2227 | .get_link_ksettings = mtk_get_link_ksettings, |
2228 | .set_link_ksettings = mtk_set_link_ksettings, | |
656e7052 JC |
2229 | .get_drvinfo = mtk_get_drvinfo, |
2230 | .get_msglevel = mtk_get_msglevel, | |
2231 | .set_msglevel = mtk_set_msglevel, | |
2232 | .nway_reset = mtk_nway_reset, | |
2233 | .get_link = mtk_get_link, | |
2234 | .get_strings = mtk_get_strings, | |
2235 | .get_sset_count = mtk_get_sset_count, | |
2236 | .get_ethtool_stats = mtk_get_ethtool_stats, | |
7aab747e NC |
2237 | .get_rxnfc = mtk_get_rxnfc, |
2238 | .set_rxnfc = mtk_set_rxnfc, | |
656e7052 JC |
2239 | }; |
2240 | ||
2241 | static const struct net_device_ops mtk_netdev_ops = { | |
2242 | .ndo_init = mtk_init, | |
2243 | .ndo_uninit = mtk_uninit, | |
2244 | .ndo_open = mtk_open, | |
2245 | .ndo_stop = mtk_stop, | |
2246 | .ndo_start_xmit = mtk_start_xmit, | |
2247 | .ndo_set_mac_address = mtk_set_mac_address, | |
2248 | .ndo_validate_addr = eth_validate_addr, | |
2249 | .ndo_do_ioctl = mtk_do_ioctl, | |
2250 | .ndo_change_mtu = eth_change_mtu, | |
2251 | .ndo_tx_timeout = mtk_tx_timeout, | |
2252 | .ndo_get_stats64 = mtk_get_stats64, | |
7aab747e NC |
2253 | .ndo_fix_features = mtk_fix_features, |
2254 | .ndo_set_features = mtk_set_features, | |
656e7052 JC |
2255 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2256 | .ndo_poll_controller = mtk_poll_controller, | |
2257 | #endif | |
2258 | }; | |
2259 | ||
2260 | static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |
2261 | { | |
2262 | struct mtk_mac *mac; | |
2263 | const __be32 *_id = of_get_property(np, "reg", NULL); | |
2264 | int id, err; | |
2265 | ||
2266 | if (!_id) { | |
2267 | dev_err(eth->dev, "missing mac id\n"); | |
2268 | return -EINVAL; | |
2269 | } | |
2270 | ||
2271 | id = be32_to_cpup(_id); | |
2272 | if (id >= MTK_MAC_COUNT) { | |
2273 | dev_err(eth->dev, "%d is not a valid mac id\n", id); | |
2274 | return -EINVAL; | |
2275 | } | |
2276 | ||
2277 | if (eth->netdev[id]) { | |
2278 | dev_err(eth->dev, "duplicate mac id found: %d\n", id); | |
2279 | return -EINVAL; | |
2280 | } | |
2281 | ||
2282 | eth->netdev[id] = alloc_etherdev(sizeof(*mac)); | |
2283 | if (!eth->netdev[id]) { | |
2284 | dev_err(eth->dev, "alloc_etherdev failed\n"); | |
2285 | return -ENOMEM; | |
2286 | } | |
2287 | mac = netdev_priv(eth->netdev[id]); | |
2288 | eth->mac[id] = mac; | |
2289 | mac->id = id; | |
2290 | mac->hw = eth; | |
2291 | mac->of_node = np; | |
656e7052 | 2292 | |
ee406810 NC |
2293 | memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); |
2294 | mac->hwlro_ip_cnt = 0; | |
2295 | ||
656e7052 JC |
2296 | mac->hw_stats = devm_kzalloc(eth->dev, |
2297 | sizeof(*mac->hw_stats), | |
2298 | GFP_KERNEL); | |
2299 | if (!mac->hw_stats) { | |
2300 | dev_err(eth->dev, "failed to allocate counter memory\n"); | |
2301 | err = -ENOMEM; | |
2302 | goto free_netdev; | |
2303 | } | |
2304 | spin_lock_init(&mac->hw_stats->stats_lock); | |
d7005652 | 2305 | u64_stats_init(&mac->hw_stats->syncp); |
656e7052 JC |
2306 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
2307 | ||
2308 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | |
eaadf9fd | 2309 | eth->netdev[id]->watchdog_timeo = 5 * HZ; |
656e7052 JC |
2310 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; |
2311 | eth->netdev[id]->base_addr = (unsigned long)eth->base; | |
ee406810 NC |
2312 | |
2313 | eth->netdev[id]->hw_features = MTK_HW_FEATURES; | |
2314 | if (eth->hwlro) | |
2315 | eth->netdev[id]->hw_features |= NETIF_F_LRO; | |
2316 | ||
656e7052 JC |
2317 | eth->netdev[id]->vlan_features = MTK_HW_FEATURES & |
2318 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); | |
2319 | eth->netdev[id]->features |= MTK_HW_FEATURES; | |
2320 | eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; | |
2321 | ||
80673029 | 2322 | eth->netdev[id]->irq = eth->irq[0]; |
656e7052 JC |
2323 | return 0; |
2324 | ||
2325 | free_netdev: | |
2326 | free_netdev(eth->netdev[id]); | |
2327 | return err; | |
2328 | } | |
2329 | ||
b95b6d99 NC |
2330 | static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) |
2331 | { | |
2332 | u32 val[2], id[4]; | |
2333 | ||
2334 | regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]); | |
2335 | regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]); | |
2336 | ||
2337 | id[3] = ((val[0] >> 16) & 0xff) - '0'; | |
2338 | id[2] = ((val[0] >> 24) & 0xff) - '0'; | |
2339 | id[1] = (val[1] & 0xff) - '0'; | |
2340 | id[0] = ((val[1] >> 8) & 0xff) - '0'; | |
2341 | ||
2342 | *chip_id = (id[3] * 1000) + (id[2] * 100) + | |
2343 | (id[1] * 10) + id[0]; | |
2344 | ||
2345 | if (!(*chip_id)) { | |
2346 | dev_err(eth->dev, "failed to get chip id\n"); | |
2347 | return -ENODEV; | |
2348 | } | |
2349 | ||
2350 | dev_info(eth->dev, "chip id = %d\n", *chip_id); | |
2351 | ||
2352 | return 0; | |
2353 | } | |
2354 | ||
983e1a6c NC |
2355 | static bool mtk_is_hwlro_supported(struct mtk_eth *eth) |
2356 | { | |
2357 | switch (eth->chip_id) { | |
2358 | case MT7623_ETH: | |
2359 | return true; | |
2360 | } | |
2361 | ||
2362 | return false; | |
2363 | } | |
2364 | ||
656e7052 JC |
2365 | static int mtk_probe(struct platform_device *pdev) |
2366 | { | |
2367 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
2368 | struct device_node *mac_np; | |
2369 | const struct of_device_id *match; | |
2370 | struct mtk_soc_data *soc; | |
2371 | struct mtk_eth *eth; | |
2372 | int err; | |
80673029 | 2373 | int i; |
656e7052 | 2374 | |
656e7052 JC |
2375 | match = of_match_device(of_mtk_match, &pdev->dev); |
2376 | soc = (struct mtk_soc_data *)match->data; | |
2377 | ||
2378 | eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); | |
2379 | if (!eth) | |
2380 | return -ENOMEM; | |
2381 | ||
549e5495 | 2382 | eth->dev = &pdev->dev; |
656e7052 | 2383 | eth->base = devm_ioremap_resource(&pdev->dev, res); |
621e49f6 VZ |
2384 | if (IS_ERR(eth->base)) |
2385 | return PTR_ERR(eth->base); | |
656e7052 JC |
2386 | |
2387 | spin_lock_init(ð->page_lock); | |
7bc9ccec | 2388 | spin_lock_init(ð->irq_lock); |
656e7052 JC |
2389 | |
2390 | eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
2391 | "mediatek,ethsys"); | |
2392 | if (IS_ERR(eth->ethsys)) { | |
2393 | dev_err(&pdev->dev, "no ethsys regmap found\n"); | |
2394 | return PTR_ERR(eth->ethsys); | |
2395 | } | |
2396 | ||
2397 | eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
2398 | "mediatek,pctl"); | |
2399 | if (IS_ERR(eth->pctl)) { | |
2400 | dev_err(&pdev->dev, "no pctl regmap found\n"); | |
2401 | return PTR_ERR(eth->pctl); | |
2402 | } | |
2403 | ||
80673029 JC |
2404 | for (i = 0; i < 3; i++) { |
2405 | eth->irq[i] = platform_get_irq(pdev, i); | |
2406 | if (eth->irq[i] < 0) { | |
2407 | dev_err(&pdev->dev, "no IRQ%d resource found\n", i); | |
2408 | return -ENXIO; | |
2409 | } | |
656e7052 | 2410 | } |
549e5495 SW |
2411 | for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { |
2412 | eth->clks[i] = devm_clk_get(eth->dev, | |
2413 | mtk_clks_source_name[i]); | |
2414 | if (IS_ERR(eth->clks[i])) { | |
2415 | if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) | |
2416 | return -EPROBE_DEFER; | |
2417 | return -ENODEV; | |
2418 | } | |
2419 | } | |
656e7052 | 2420 | |
656e7052 | 2421 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); |
7c78b4ad | 2422 | INIT_WORK(ð->pending_work, mtk_pending_work); |
656e7052 JC |
2423 | |
2424 | err = mtk_hw_init(eth); | |
2425 | if (err) | |
2426 | return err; | |
2427 | ||
b95b6d99 NC |
2428 | err = mtk_get_chip_id(eth, ð->chip_id); |
2429 | if (err) | |
2430 | return err; | |
2431 | ||
983e1a6c NC |
2432 | eth->hwlro = mtk_is_hwlro_supported(eth); |
2433 | ||
656e7052 JC |
2434 | for_each_child_of_node(pdev->dev.of_node, mac_np) { |
2435 | if (!of_device_is_compatible(mac_np, | |
2436 | "mediatek,eth-mac")) | |
2437 | continue; | |
2438 | ||
2439 | if (!of_device_is_available(mac_np)) | |
2440 | continue; | |
2441 | ||
2442 | err = mtk_add_mac(eth, mac_np); | |
2443 | if (err) | |
8a8a9e89 | 2444 | goto err_deinit_hw; |
656e7052 JC |
2445 | } |
2446 | ||
85574dbf SW |
2447 | err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, |
2448 | dev_name(eth->dev), eth); | |
2449 | if (err) | |
2450 | goto err_free_dev; | |
2451 | ||
2452 | err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, | |
2453 | dev_name(eth->dev), eth); | |
2454 | if (err) | |
2455 | goto err_free_dev; | |
2456 | ||
2457 | err = mtk_mdio_init(eth); | |
2458 | if (err) | |
2459 | goto err_free_dev; | |
2460 | ||
2461 | for (i = 0; i < MTK_MAX_DEVS; i++) { | |
2462 | if (!eth->netdev[i]) | |
2463 | continue; | |
2464 | ||
2465 | err = register_netdev(eth->netdev[i]); | |
2466 | if (err) { | |
2467 | dev_err(eth->dev, "error bringing up device\n"); | |
8a8a9e89 | 2468 | goto err_deinit_mdio; |
85574dbf SW |
2469 | } else |
2470 | netif_info(eth, probe, eth->netdev[i], | |
2471 | "mediatek frame engine at 0x%08lx, irq %d\n", | |
2472 | eth->netdev[i]->base_addr, eth->irq[0]); | |
2473 | } | |
2474 | ||
656e7052 JC |
2475 | /* we run 2 devices on the same DMA ring so we need a dummy device |
2476 | * for NAPI to work | |
2477 | */ | |
2478 | init_dummy_netdev(ð->dummy_dev); | |
80673029 JC |
2479 | netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, |
2480 | MTK_NAPI_WEIGHT); | |
2481 | netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, | |
656e7052 JC |
2482 | MTK_NAPI_WEIGHT); |
2483 | ||
2484 | platform_set_drvdata(pdev, eth); | |
2485 | ||
2486 | return 0; | |
2487 | ||
8a8a9e89 SW |
2488 | err_deinit_mdio: |
2489 | mtk_mdio_cleanup(eth); | |
656e7052 | 2490 | err_free_dev: |
8a8a9e89 SW |
2491 | mtk_free_dev(eth); |
2492 | err_deinit_hw: | |
2493 | mtk_hw_deinit(eth); | |
2494 | ||
656e7052 JC |
2495 | return err; |
2496 | } | |
2497 | ||
2498 | static int mtk_remove(struct platform_device *pdev) | |
2499 | { | |
2500 | struct mtk_eth *eth = platform_get_drvdata(pdev); | |
79e9a414 SW |
2501 | int i; |
2502 | ||
2503 | /* stop all devices to make sure that dma is properly shut down */ | |
2504 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
2505 | if (!eth->netdev[i]) | |
2506 | continue; | |
2507 | mtk_stop(eth->netdev[i]); | |
2508 | } | |
656e7052 | 2509 | |
bf253fb7 | 2510 | mtk_hw_deinit(eth); |
656e7052 | 2511 | |
80673029 | 2512 | netif_napi_del(ð->tx_napi); |
656e7052 JC |
2513 | netif_napi_del(ð->rx_napi); |
2514 | mtk_cleanup(eth); | |
e82f7148 | 2515 | mtk_mdio_cleanup(eth); |
656e7052 JC |
2516 | |
2517 | return 0; | |
2518 | } | |
2519 | ||
2520 | const struct of_device_id of_mtk_match[] = { | |
2521 | { .compatible = "mediatek,mt7623-eth" }, | |
2522 | {}, | |
2523 | }; | |
7077dc41 | 2524 | MODULE_DEVICE_TABLE(of, of_mtk_match); |
656e7052 JC |
2525 | |
2526 | static struct platform_driver mtk_driver = { | |
2527 | .probe = mtk_probe, | |
2528 | .remove = mtk_remove, | |
2529 | .driver = { | |
2530 | .name = "mtk_soc_eth", | |
656e7052 JC |
2531 | .of_match_table = of_mtk_match, |
2532 | }, | |
2533 | }; | |
2534 | ||
2535 | module_platform_driver(mtk_driver); | |
2536 | ||
2537 | MODULE_LICENSE("GPL"); | |
2538 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | |
2539 | MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); |