]>
Commit | Line | Data |
---|---|---|
656e7052 JC |
1 | /* This program is free software; you can redistribute it and/or modify |
2 | * it under the terms of the GNU General Public License as published by | |
3 | * the Free Software Foundation; version 2 of the License | |
4 | * | |
5 | * This program is distributed in the hope that it will be useful, | |
6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
8 | * GNU General Public License for more details. | |
9 | * | |
10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | |
11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | |
12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | |
13 | */ | |
14 | ||
15 | #include <linux/of_device.h> | |
16 | #include <linux/of_mdio.h> | |
17 | #include <linux/of_net.h> | |
18 | #include <linux/mfd/syscon.h> | |
19 | #include <linux/regmap.h> | |
20 | #include <linux/clk.h> | |
26a2ad8a | 21 | #include <linux/pm_runtime.h> |
656e7052 JC |
22 | #include <linux/if_vlan.h> |
23 | #include <linux/reset.h> | |
24 | #include <linux/tcp.h> | |
25 | ||
26 | #include "mtk_eth_soc.h" | |
27 | ||
28 | static int mtk_msg_level = -1; | |
29 | module_param_named(msg_level, mtk_msg_level, int, 0); | |
30 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); | |
31 | ||
32 | #define MTK_ETHTOOL_STAT(x) { #x, \ | |
33 | offsetof(struct mtk_hw_stats, x) / sizeof(u64) } | |
34 | ||
35 | /* strings used by ethtool */ | |
36 | static const struct mtk_ethtool_stats { | |
37 | char str[ETH_GSTRING_LEN]; | |
38 | u32 offset; | |
39 | } mtk_ethtool_stats[] = { | |
40 | MTK_ETHTOOL_STAT(tx_bytes), | |
41 | MTK_ETHTOOL_STAT(tx_packets), | |
42 | MTK_ETHTOOL_STAT(tx_skip), | |
43 | MTK_ETHTOOL_STAT(tx_collisions), | |
44 | MTK_ETHTOOL_STAT(rx_bytes), | |
45 | MTK_ETHTOOL_STAT(rx_packets), | |
46 | MTK_ETHTOOL_STAT(rx_overflow), | |
47 | MTK_ETHTOOL_STAT(rx_fcs_errors), | |
48 | MTK_ETHTOOL_STAT(rx_short_errors), | |
49 | MTK_ETHTOOL_STAT(rx_long_errors), | |
50 | MTK_ETHTOOL_STAT(rx_checksum_errors), | |
51 | MTK_ETHTOOL_STAT(rx_flow_control_packets), | |
52 | }; | |
53 | ||
549e5495 | 54 | static const char * const mtk_clks_source_name[] = { |
f430dea7 | 55 | "ethif", "esw", "gp1", "gp2", "trgpll" |
549e5495 SW |
56 | }; |
57 | ||
656e7052 JC |
58 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) |
59 | { | |
60 | __raw_writel(val, eth->base + reg); | |
61 | } | |
62 | ||
63 | u32 mtk_r32(struct mtk_eth *eth, unsigned reg) | |
64 | { | |
65 | return __raw_readl(eth->base + reg); | |
66 | } | |
67 | ||
68 | static int mtk_mdio_busy_wait(struct mtk_eth *eth) | |
69 | { | |
70 | unsigned long t_start = jiffies; | |
71 | ||
72 | while (1) { | |
73 | if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) | |
74 | return 0; | |
75 | if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) | |
76 | break; | |
77 | usleep_range(10, 20); | |
78 | } | |
79 | ||
80 | dev_err(eth->dev, "mdio: MDIO timeout\n"); | |
81 | return -1; | |
82 | } | |
83 | ||
379672de WY |
84 | static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, |
85 | u32 phy_register, u32 write_data) | |
656e7052 JC |
86 | { |
87 | if (mtk_mdio_busy_wait(eth)) | |
88 | return -1; | |
89 | ||
90 | write_data &= 0xffff; | |
91 | ||
92 | mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | | |
93 | (phy_register << PHY_IAC_REG_SHIFT) | | |
94 | (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, | |
95 | MTK_PHY_IAC); | |
96 | ||
97 | if (mtk_mdio_busy_wait(eth)) | |
98 | return -1; | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
379672de | 103 | static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) |
656e7052 JC |
104 | { |
105 | u32 d; | |
106 | ||
107 | if (mtk_mdio_busy_wait(eth)) | |
108 | return 0xffff; | |
109 | ||
110 | mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | | |
111 | (phy_reg << PHY_IAC_REG_SHIFT) | | |
112 | (phy_addr << PHY_IAC_ADDR_SHIFT), | |
113 | MTK_PHY_IAC); | |
114 | ||
115 | if (mtk_mdio_busy_wait(eth)) | |
116 | return 0xffff; | |
117 | ||
118 | d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; | |
119 | ||
120 | return d; | |
121 | } | |
122 | ||
123 | static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, | |
124 | int phy_reg, u16 val) | |
125 | { | |
126 | struct mtk_eth *eth = bus->priv; | |
127 | ||
128 | return _mtk_mdio_write(eth, phy_addr, phy_reg, val); | |
129 | } | |
130 | ||
131 | static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |
132 | { | |
133 | struct mtk_eth *eth = bus->priv; | |
134 | ||
135 | return _mtk_mdio_read(eth, phy_addr, phy_reg); | |
136 | } | |
137 | ||
f430dea7 SW |
138 | static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) |
139 | { | |
140 | u32 val; | |
141 | int ret; | |
142 | ||
143 | val = (speed == SPEED_1000) ? | |
144 | INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; | |
145 | mtk_w32(eth, val, INTF_MODE); | |
146 | ||
147 | regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, | |
148 | ETHSYS_TRGMII_CLK_SEL362_5, | |
149 | ETHSYS_TRGMII_CLK_SEL362_5); | |
150 | ||
151 | val = (speed == SPEED_1000) ? 250000000 : 500000000; | |
152 | ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); | |
153 | if (ret) | |
154 | dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); | |
155 | ||
156 | val = (speed == SPEED_1000) ? | |
157 | RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100; | |
158 | mtk_w32(eth, val, TRGMII_RCK_CTRL); | |
159 | ||
160 | val = (speed == SPEED_1000) ? | |
161 | TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100; | |
162 | mtk_w32(eth, val, TRGMII_TCK_CTRL); | |
163 | } | |
164 | ||
656e7052 JC |
165 | static void mtk_phy_link_adjust(struct net_device *dev) |
166 | { | |
167 | struct mtk_mac *mac = netdev_priv(dev); | |
08ef55c6 JC |
168 | u16 lcl_adv = 0, rmt_adv = 0; |
169 | u8 flowctrl; | |
656e7052 JC |
170 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | |
171 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | |
172 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | |
173 | MAC_MCR_BACKPR_EN; | |
174 | ||
dce6fa42 SW |
175 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
176 | return; | |
177 | ||
2364c5c5 | 178 | switch (dev->phydev->speed) { |
656e7052 JC |
179 | case SPEED_1000: |
180 | mcr |= MAC_MCR_SPEED_1000; | |
181 | break; | |
182 | case SPEED_100: | |
183 | mcr |= MAC_MCR_SPEED_100; | |
184 | break; | |
185 | }; | |
186 | ||
f430dea7 | 187 | if (mac->id == 0 && !mac->trgmii) |
2364c5c5 | 188 | mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed); |
f430dea7 | 189 | |
2364c5c5 | 190 | if (dev->phydev->link) |
656e7052 JC |
191 | mcr |= MAC_MCR_FORCE_LINK; |
192 | ||
2364c5c5 | 193 | if (dev->phydev->duplex) { |
656e7052 JC |
194 | mcr |= MAC_MCR_FORCE_DPX; |
195 | ||
2364c5c5 | 196 | if (dev->phydev->pause) |
08ef55c6 | 197 | rmt_adv = LPA_PAUSE_CAP; |
2364c5c5 | 198 | if (dev->phydev->asym_pause) |
08ef55c6 JC |
199 | rmt_adv |= LPA_PAUSE_ASYM; |
200 | ||
2364c5c5 | 201 | if (dev->phydev->advertising & ADVERTISED_Pause) |
08ef55c6 | 202 | lcl_adv |= ADVERTISE_PAUSE_CAP; |
2364c5c5 | 203 | if (dev->phydev->advertising & ADVERTISED_Asym_Pause) |
08ef55c6 JC |
204 | lcl_adv |= ADVERTISE_PAUSE_ASYM; |
205 | ||
206 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | |
207 | ||
208 | if (flowctrl & FLOW_CTRL_TX) | |
209 | mcr |= MAC_MCR_FORCE_TX_FC; | |
210 | if (flowctrl & FLOW_CTRL_RX) | |
211 | mcr |= MAC_MCR_FORCE_RX_FC; | |
212 | ||
213 | netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", | |
214 | flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", | |
215 | flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); | |
216 | } | |
656e7052 JC |
217 | |
218 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | |
219 | ||
2364c5c5 | 220 | if (dev->phydev->link) |
656e7052 JC |
221 | netif_carrier_on(dev); |
222 | else | |
223 | netif_carrier_off(dev); | |
224 | } | |
225 | ||
226 | static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, | |
227 | struct device_node *phy_node) | |
228 | { | |
229 | const __be32 *_addr = NULL; | |
230 | struct phy_device *phydev; | |
231 | int phy_mode, addr; | |
232 | ||
233 | _addr = of_get_property(phy_node, "reg", NULL); | |
234 | ||
235 | if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) { | |
236 | pr_err("%s: invalid phy address\n", phy_node->name); | |
237 | return -EINVAL; | |
238 | } | |
239 | addr = be32_to_cpu(*_addr); | |
240 | phy_mode = of_get_phy_mode(phy_node); | |
241 | if (phy_mode < 0) { | |
242 | dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); | |
243 | return -EINVAL; | |
244 | } | |
245 | ||
246 | phydev = of_phy_connect(eth->netdev[mac->id], phy_node, | |
247 | mtk_phy_link_adjust, 0, phy_mode); | |
977bc20c | 248 | if (!phydev) { |
656e7052 | 249 | dev_err(eth->dev, "could not connect to PHY\n"); |
977bc20c | 250 | return -ENODEV; |
656e7052 JC |
251 | } |
252 | ||
253 | dev_info(eth->dev, | |
254 | "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", | |
255 | mac->id, phydev_name(phydev), phydev->phy_id, | |
256 | phydev->drv->name); | |
257 | ||
656e7052 JC |
258 | return 0; |
259 | } | |
260 | ||
2364c5c5 | 261 | static int mtk_phy_connect(struct net_device *dev) |
656e7052 | 262 | { |
2364c5c5 SW |
263 | struct mtk_mac *mac = netdev_priv(dev); |
264 | struct mtk_eth *eth; | |
656e7052 | 265 | struct device_node *np; |
9ea4d311 | 266 | u32 val; |
656e7052 | 267 | |
2364c5c5 | 268 | eth = mac->hw; |
656e7052 | 269 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); |
0c72c50f JC |
270 | if (!np && of_phy_is_fixed_link(mac->of_node)) |
271 | if (!of_phy_register_fixed_link(mac->of_node)) | |
272 | np = of_node_get(mac->of_node); | |
656e7052 JC |
273 | if (!np) |
274 | return -ENODEV; | |
275 | ||
276 | switch (of_get_phy_mode(np)) { | |
572de608 SW |
277 | case PHY_INTERFACE_MODE_TRGMII: |
278 | mac->trgmii = true; | |
37920fce JC |
279 | case PHY_INTERFACE_MODE_RGMII_TXID: |
280 | case PHY_INTERFACE_MODE_RGMII_RXID: | |
281 | case PHY_INTERFACE_MODE_RGMII_ID: | |
656e7052 | 282 | case PHY_INTERFACE_MODE_RGMII: |
9ea4d311 | 283 | mac->ge_mode = 0; |
656e7052 JC |
284 | break; |
285 | case PHY_INTERFACE_MODE_MII: | |
9ea4d311 | 286 | mac->ge_mode = 1; |
656e7052 | 287 | break; |
8ca7f4fe | 288 | case PHY_INTERFACE_MODE_REVMII: |
9ea4d311 | 289 | mac->ge_mode = 2; |
656e7052 | 290 | break; |
8ca7f4fe | 291 | case PHY_INTERFACE_MODE_RMII: |
292 | if (!mac->id) | |
293 | goto err_phy; | |
9ea4d311 | 294 | mac->ge_mode = 3; |
8ca7f4fe | 295 | break; |
656e7052 | 296 | default: |
8ca7f4fe | 297 | goto err_phy; |
656e7052 JC |
298 | } |
299 | ||
300 | /* put the gmac into the right mode */ | |
301 | regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); | |
302 | val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); | |
9ea4d311 | 303 | val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); |
656e7052 JC |
304 | regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); |
305 | ||
2364c5c5 | 306 | /* couple phydev to net_device */ |
656e7052 | 307 | mtk_phy_connect_node(eth, mac, np); |
2364c5c5 SW |
308 | dev->phydev->autoneg = AUTONEG_ENABLE; |
309 | dev->phydev->speed = 0; | |
310 | dev->phydev->duplex = 0; | |
b2025c7c | 311 | |
312 | if (of_phy_is_fixed_link(mac->of_node)) | |
2364c5c5 | 313 | dev->phydev->supported |= |
b2025c7c | 314 | SUPPORTED_Pause | SUPPORTED_Asym_Pause; |
315 | ||
2364c5c5 | 316 | dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
08ef55c6 | 317 | SUPPORTED_Asym_Pause; |
2364c5c5 | 318 | dev->phydev->advertising = dev->phydev->supported | |
656e7052 | 319 | ADVERTISED_Autoneg; |
2364c5c5 | 320 | phy_start_aneg(dev->phydev); |
656e7052 | 321 | |
e8c2993a | 322 | of_node_put(np); |
323 | ||
656e7052 | 324 | return 0; |
8ca7f4fe | 325 | |
326 | err_phy: | |
327 | of_node_put(np); | |
328 | dev_err(eth->dev, "invalid phy_mode\n"); | |
329 | return -EINVAL; | |
656e7052 JC |
330 | } |
331 | ||
332 | static int mtk_mdio_init(struct mtk_eth *eth) | |
333 | { | |
334 | struct device_node *mii_np; | |
1e515b7f | 335 | int ret; |
656e7052 JC |
336 | |
337 | mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); | |
338 | if (!mii_np) { | |
339 | dev_err(eth->dev, "no %s child node found", "mdio-bus"); | |
340 | return -ENODEV; | |
341 | } | |
342 | ||
343 | if (!of_device_is_available(mii_np)) { | |
aa6e8a54 | 344 | ret = -ENODEV; |
656e7052 JC |
345 | goto err_put_node; |
346 | } | |
347 | ||
1e515b7f | 348 | eth->mii_bus = devm_mdiobus_alloc(eth->dev); |
656e7052 | 349 | if (!eth->mii_bus) { |
1e515b7f | 350 | ret = -ENOMEM; |
656e7052 JC |
351 | goto err_put_node; |
352 | } | |
353 | ||
354 | eth->mii_bus->name = "mdio"; | |
355 | eth->mii_bus->read = mtk_mdio_read; | |
356 | eth->mii_bus->write = mtk_mdio_write; | |
357 | eth->mii_bus->priv = eth; | |
358 | eth->mii_bus->parent = eth->dev; | |
359 | ||
360 | snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); | |
1e515b7f | 361 | ret = of_mdiobus_register(eth->mii_bus, mii_np); |
656e7052 JC |
362 | |
363 | err_put_node: | |
364 | of_node_put(mii_np); | |
1e515b7f | 365 | return ret; |
656e7052 JC |
366 | } |
367 | ||
368 | static void mtk_mdio_cleanup(struct mtk_eth *eth) | |
369 | { | |
370 | if (!eth->mii_bus) | |
371 | return; | |
372 | ||
373 | mdiobus_unregister(eth->mii_bus); | |
656e7052 JC |
374 | } |
375 | ||
bacfd110 NC |
376 | static inline void mtk_irq_disable(struct mtk_eth *eth, |
377 | unsigned reg, u32 mask) | |
656e7052 | 378 | { |
7bc9ccec | 379 | unsigned long flags; |
656e7052 JC |
380 | u32 val; |
381 | ||
7bc9ccec | 382 | spin_lock_irqsave(ð->irq_lock, flags); |
bacfd110 NC |
383 | val = mtk_r32(eth, reg); |
384 | mtk_w32(eth, val & ~mask, reg); | |
7bc9ccec | 385 | spin_unlock_irqrestore(ð->irq_lock, flags); |
656e7052 JC |
386 | } |
387 | ||
bacfd110 NC |
388 | static inline void mtk_irq_enable(struct mtk_eth *eth, |
389 | unsigned reg, u32 mask) | |
656e7052 | 390 | { |
7bc9ccec | 391 | unsigned long flags; |
656e7052 JC |
392 | u32 val; |
393 | ||
7bc9ccec | 394 | spin_lock_irqsave(ð->irq_lock, flags); |
bacfd110 NC |
395 | val = mtk_r32(eth, reg); |
396 | mtk_w32(eth, val | mask, reg); | |
7bc9ccec | 397 | spin_unlock_irqrestore(ð->irq_lock, flags); |
656e7052 JC |
398 | } |
399 | ||
400 | static int mtk_set_mac_address(struct net_device *dev, void *p) | |
401 | { | |
402 | int ret = eth_mac_addr(dev, p); | |
403 | struct mtk_mac *mac = netdev_priv(dev); | |
404 | const char *macaddr = dev->dev_addr; | |
656e7052 JC |
405 | |
406 | if (ret) | |
407 | return ret; | |
408 | ||
dce6fa42 SW |
409 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
410 | return -EBUSY; | |
411 | ||
e3e9652a | 412 | spin_lock_bh(&mac->hw->page_lock); |
656e7052 JC |
413 | mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], |
414 | MTK_GDMA_MAC_ADRH(mac->id)); | |
415 | mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | | |
416 | (macaddr[4] << 8) | macaddr[5], | |
417 | MTK_GDMA_MAC_ADRL(mac->id)); | |
e3e9652a | 418 | spin_unlock_bh(&mac->hw->page_lock); |
656e7052 JC |
419 | |
420 | return 0; | |
421 | } | |
422 | ||
423 | void mtk_stats_update_mac(struct mtk_mac *mac) | |
424 | { | |
425 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
426 | unsigned int base = MTK_GDM1_TX_GBCNT; | |
427 | u64 stats; | |
428 | ||
429 | base += hw_stats->reg_offset; | |
430 | ||
431 | u64_stats_update_begin(&hw_stats->syncp); | |
432 | ||
433 | hw_stats->rx_bytes += mtk_r32(mac->hw, base); | |
434 | stats = mtk_r32(mac->hw, base + 0x04); | |
435 | if (stats) | |
436 | hw_stats->rx_bytes += (stats << 32); | |
437 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); | |
438 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); | |
439 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); | |
440 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); | |
441 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); | |
442 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); | |
443 | hw_stats->rx_flow_control_packets += | |
444 | mtk_r32(mac->hw, base + 0x24); | |
445 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); | |
446 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); | |
447 | hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); | |
448 | stats = mtk_r32(mac->hw, base + 0x34); | |
449 | if (stats) | |
450 | hw_stats->tx_bytes += (stats << 32); | |
451 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); | |
452 | u64_stats_update_end(&hw_stats->syncp); | |
453 | } | |
454 | ||
455 | static void mtk_stats_update(struct mtk_eth *eth) | |
456 | { | |
457 | int i; | |
458 | ||
459 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
460 | if (!eth->mac[i] || !eth->mac[i]->hw_stats) | |
461 | continue; | |
462 | if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { | |
463 | mtk_stats_update_mac(eth->mac[i]); | |
464 | spin_unlock(ð->mac[i]->hw_stats->stats_lock); | |
465 | } | |
466 | } | |
467 | } | |
468 | ||
469 | static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, | |
470 | struct rtnl_link_stats64 *storage) | |
471 | { | |
472 | struct mtk_mac *mac = netdev_priv(dev); | |
473 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
474 | unsigned int start; | |
475 | ||
476 | if (netif_running(dev) && netif_device_present(dev)) { | |
477 | if (spin_trylock(&hw_stats->stats_lock)) { | |
478 | mtk_stats_update_mac(mac); | |
479 | spin_unlock(&hw_stats->stats_lock); | |
480 | } | |
481 | } | |
482 | ||
483 | do { | |
484 | start = u64_stats_fetch_begin_irq(&hw_stats->syncp); | |
485 | storage->rx_packets = hw_stats->rx_packets; | |
486 | storage->tx_packets = hw_stats->tx_packets; | |
487 | storage->rx_bytes = hw_stats->rx_bytes; | |
488 | storage->tx_bytes = hw_stats->tx_bytes; | |
489 | storage->collisions = hw_stats->tx_collisions; | |
490 | storage->rx_length_errors = hw_stats->rx_short_errors + | |
491 | hw_stats->rx_long_errors; | |
492 | storage->rx_over_errors = hw_stats->rx_overflow; | |
493 | storage->rx_crc_errors = hw_stats->rx_fcs_errors; | |
494 | storage->rx_errors = hw_stats->rx_checksum_errors; | |
495 | storage->tx_aborted_errors = hw_stats->tx_skip; | |
496 | } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); | |
497 | ||
498 | storage->tx_errors = dev->stats.tx_errors; | |
499 | storage->rx_dropped = dev->stats.rx_dropped; | |
500 | storage->tx_dropped = dev->stats.tx_dropped; | |
501 | ||
502 | return storage; | |
503 | } | |
504 | ||
505 | static inline int mtk_max_frag_size(int mtu) | |
506 | { | |
507 | /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ | |
508 | if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) | |
509 | mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; | |
510 | ||
511 | return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + | |
512 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
513 | } | |
514 | ||
515 | static inline int mtk_max_buf_size(int frag_size) | |
516 | { | |
517 | int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - | |
518 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
519 | ||
520 | WARN_ON(buf_size < MTK_MAX_RX_LENGTH); | |
521 | ||
522 | return buf_size; | |
523 | } | |
524 | ||
525 | static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, | |
526 | struct mtk_rx_dma *dma_rxd) | |
527 | { | |
528 | rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); | |
529 | rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); | |
530 | rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); | |
531 | rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); | |
532 | } | |
533 | ||
534 | /* the qdma core needs scratch memory to be setup */ | |
535 | static int mtk_init_fq_dma(struct mtk_eth *eth) | |
536 | { | |
605e4fe4 | 537 | dma_addr_t phy_ring_tail; |
656e7052 JC |
538 | int cnt = MTK_DMA_SIZE; |
539 | dma_addr_t dma_addr; | |
540 | int i; | |
541 | ||
542 | eth->scratch_ring = dma_alloc_coherent(eth->dev, | |
543 | cnt * sizeof(struct mtk_tx_dma), | |
605e4fe4 | 544 | ð->phy_scratch_ring, |
656e7052 JC |
545 | GFP_ATOMIC | __GFP_ZERO); |
546 | if (unlikely(!eth->scratch_ring)) | |
547 | return -ENOMEM; | |
548 | ||
549 | eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, | |
550 | GFP_KERNEL); | |
562c5a70 JC |
551 | if (unlikely(!eth->scratch_head)) |
552 | return -ENOMEM; | |
553 | ||
656e7052 JC |
554 | dma_addr = dma_map_single(eth->dev, |
555 | eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, | |
556 | DMA_FROM_DEVICE); | |
557 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
558 | return -ENOMEM; | |
559 | ||
560 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); | |
605e4fe4 | 561 | phy_ring_tail = eth->phy_scratch_ring + |
656e7052 JC |
562 | (sizeof(struct mtk_tx_dma) * (cnt - 1)); |
563 | ||
564 | for (i = 0; i < cnt; i++) { | |
565 | eth->scratch_ring[i].txd1 = | |
566 | (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); | |
567 | if (i < cnt - 1) | |
605e4fe4 | 568 | eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + |
656e7052 JC |
569 | ((i + 1) * sizeof(struct mtk_tx_dma))); |
570 | eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); | |
571 | } | |
572 | ||
605e4fe4 | 573 | mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); |
656e7052 JC |
574 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); |
575 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); | |
576 | mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); | |
577 | ||
578 | return 0; | |
579 | } | |
580 | ||
581 | static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) | |
582 | { | |
583 | void *ret = ring->dma; | |
584 | ||
585 | return ret + (desc - ring->phys); | |
586 | } | |
587 | ||
588 | static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |
589 | struct mtk_tx_dma *txd) | |
590 | { | |
591 | int idx = txd - ring->dma; | |
592 | ||
593 | return &ring->buf[idx]; | |
594 | } | |
595 | ||
55a4e778 | 596 | static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) |
656e7052 JC |
597 | { |
598 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | |
55a4e778 | 599 | dma_unmap_single(eth->dev, |
656e7052 JC |
600 | dma_unmap_addr(tx_buf, dma_addr0), |
601 | dma_unmap_len(tx_buf, dma_len0), | |
602 | DMA_TO_DEVICE); | |
603 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | |
55a4e778 | 604 | dma_unmap_page(eth->dev, |
656e7052 JC |
605 | dma_unmap_addr(tx_buf, dma_addr0), |
606 | dma_unmap_len(tx_buf, dma_len0), | |
607 | DMA_TO_DEVICE); | |
608 | } | |
609 | tx_buf->flags = 0; | |
610 | if (tx_buf->skb && | |
611 | (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) | |
612 | dev_kfree_skb_any(tx_buf->skb); | |
613 | tx_buf->skb = NULL; | |
614 | } | |
615 | ||
616 | static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |
617 | int tx_num, struct mtk_tx_ring *ring, bool gso) | |
618 | { | |
619 | struct mtk_mac *mac = netdev_priv(dev); | |
620 | struct mtk_eth *eth = mac->hw; | |
621 | struct mtk_tx_dma *itxd, *txd; | |
622 | struct mtk_tx_buf *tx_buf; | |
656e7052 JC |
623 | dma_addr_t mapped_addr; |
624 | unsigned int nr_frags; | |
625 | int i, n_desc = 1; | |
c6f1dc4d | 626 | u32 txd4 = 0, fport; |
656e7052 JC |
627 | |
628 | itxd = ring->next_free; | |
629 | if (itxd == ring->last_free) | |
630 | return -ENOMEM; | |
631 | ||
632 | /* set the forward port */ | |
c6f1dc4d SW |
633 | fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; |
634 | txd4 |= fport; | |
656e7052 JC |
635 | |
636 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | |
637 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
638 | ||
639 | if (gso) | |
640 | txd4 |= TX_DMA_TSO; | |
641 | ||
642 | /* TX Checksum offload */ | |
643 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
644 | txd4 |= TX_DMA_CHKSUM; | |
645 | ||
646 | /* VLAN header offload */ | |
647 | if (skb_vlan_tag_present(skb)) | |
648 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | |
649 | ||
55a4e778 | 650 | mapped_addr = dma_map_single(eth->dev, skb->data, |
656e7052 | 651 | skb_headlen(skb), DMA_TO_DEVICE); |
55a4e778 | 652 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
656e7052 JC |
653 | return -ENOMEM; |
654 | ||
656e7052 JC |
655 | WRITE_ONCE(itxd->txd1, mapped_addr); |
656 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | |
657 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
658 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | |
659 | ||
660 | /* TX SG offload */ | |
661 | txd = itxd; | |
662 | nr_frags = skb_shinfo(skb)->nr_frags; | |
663 | for (i = 0; i < nr_frags; i++) { | |
664 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | |
665 | unsigned int offset = 0; | |
666 | int frag_size = skb_frag_size(frag); | |
667 | ||
668 | while (frag_size) { | |
669 | bool last_frag = false; | |
670 | unsigned int frag_map_size; | |
671 | ||
672 | txd = mtk_qdma_phys_to_virt(ring, txd->txd2); | |
673 | if (txd == ring->last_free) | |
674 | goto err_dma; | |
675 | ||
676 | n_desc++; | |
677 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | |
55a4e778 | 678 | mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, |
656e7052 JC |
679 | frag_map_size, |
680 | DMA_TO_DEVICE); | |
55a4e778 | 681 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
656e7052 JC |
682 | goto err_dma; |
683 | ||
684 | if (i == nr_frags - 1 && | |
685 | (frag_size - frag_map_size) == 0) | |
686 | last_frag = true; | |
687 | ||
688 | WRITE_ONCE(txd->txd1, mapped_addr); | |
689 | WRITE_ONCE(txd->txd3, (TX_DMA_SWC | | |
690 | TX_DMA_PLEN0(frag_map_size) | | |
369f0453 | 691 | last_frag * TX_DMA_LS0)); |
c6f1dc4d | 692 | WRITE_ONCE(txd->txd4, fport); |
656e7052 JC |
693 | |
694 | tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; | |
695 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | |
696 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
697 | ||
698 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | |
699 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
700 | dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); | |
701 | frag_size -= frag_map_size; | |
702 | offset += frag_map_size; | |
703 | } | |
704 | } | |
705 | ||
706 | /* store skb to cleanup */ | |
707 | tx_buf->skb = skb; | |
708 | ||
709 | WRITE_ONCE(itxd->txd4, txd4); | |
710 | WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | | |
711 | (!nr_frags * TX_DMA_LS0))); | |
712 | ||
656e7052 JC |
713 | netdev_sent_queue(dev, skb->len); |
714 | skb_tx_timestamp(skb); | |
715 | ||
716 | ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); | |
717 | atomic_sub(n_desc, &ring->free_count); | |
718 | ||
719 | /* make sure that all changes to the dma ring are flushed before we | |
720 | * continue | |
721 | */ | |
722 | wmb(); | |
723 | ||
724 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | |
725 | mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); | |
726 | ||
727 | return 0; | |
728 | ||
729 | err_dma: | |
730 | do { | |
2fae723c | 731 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
656e7052 JC |
732 | |
733 | /* unmap dma */ | |
55a4e778 | 734 | mtk_tx_unmap(eth, tx_buf); |
656e7052 JC |
735 | |
736 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | |
737 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | |
738 | } while (itxd != txd); | |
739 | ||
740 | return -ENOMEM; | |
741 | } | |
742 | ||
743 | static inline int mtk_cal_txd_req(struct sk_buff *skb) | |
744 | { | |
745 | int i, nfrags; | |
746 | struct skb_frag_struct *frag; | |
747 | ||
748 | nfrags = 1; | |
749 | if (skb_is_gso(skb)) { | |
750 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
751 | frag = &skb_shinfo(skb)->frags[i]; | |
752 | nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); | |
753 | } | |
754 | } else { | |
755 | nfrags += skb_shinfo(skb)->nr_frags; | |
756 | } | |
757 | ||
beeb4ca4 | 758 | return nfrags; |
656e7052 JC |
759 | } |
760 | ||
ad3cba98 JC |
761 | static int mtk_queue_stopped(struct mtk_eth *eth) |
762 | { | |
763 | int i; | |
764 | ||
765 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
766 | if (!eth->netdev[i]) | |
767 | continue; | |
768 | if (netif_queue_stopped(eth->netdev[i])) | |
769 | return 1; | |
770 | } | |
771 | ||
772 | return 0; | |
773 | } | |
774 | ||
13c822f6 JC |
775 | static void mtk_wake_queue(struct mtk_eth *eth) |
776 | { | |
777 | int i; | |
778 | ||
779 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
780 | if (!eth->netdev[i]) | |
781 | continue; | |
782 | netif_wake_queue(eth->netdev[i]); | |
783 | } | |
784 | } | |
785 | ||
786 | static void mtk_stop_queue(struct mtk_eth *eth) | |
787 | { | |
788 | int i; | |
789 | ||
790 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
791 | if (!eth->netdev[i]) | |
792 | continue; | |
793 | netif_stop_queue(eth->netdev[i]); | |
794 | } | |
795 | } | |
796 | ||
656e7052 JC |
797 | static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) |
798 | { | |
799 | struct mtk_mac *mac = netdev_priv(dev); | |
800 | struct mtk_eth *eth = mac->hw; | |
801 | struct mtk_tx_ring *ring = ð->tx_ring; | |
802 | struct net_device_stats *stats = &dev->stats; | |
803 | bool gso = false; | |
804 | int tx_num; | |
805 | ||
34c2e4c9 JC |
806 | /* normally we can rely on the stack not calling this more than once, |
807 | * however we have 2 queues running on the same ring so we need to lock | |
808 | * the ring access | |
809 | */ | |
e3e9652a | 810 | spin_lock(ð->page_lock); |
34c2e4c9 | 811 | |
dce6fa42 SW |
812 | if (unlikely(test_bit(MTK_RESETTING, ð->state))) |
813 | goto drop; | |
814 | ||
656e7052 JC |
815 | tx_num = mtk_cal_txd_req(skb); |
816 | if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { | |
13c822f6 | 817 | mtk_stop_queue(eth); |
656e7052 JC |
818 | netif_err(eth, tx_queued, dev, |
819 | "Tx Ring full when queue awake!\n"); | |
e3e9652a | 820 | spin_unlock(ð->page_lock); |
656e7052 JC |
821 | return NETDEV_TX_BUSY; |
822 | } | |
823 | ||
824 | /* TSO: fill MSS info in tcp checksum field */ | |
825 | if (skb_is_gso(skb)) { | |
826 | if (skb_cow_head(skb, 0)) { | |
827 | netif_warn(eth, tx_err, dev, | |
828 | "GSO expand head fail.\n"); | |
829 | goto drop; | |
830 | } | |
831 | ||
832 | if (skb_shinfo(skb)->gso_type & | |
833 | (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | |
834 | gso = true; | |
835 | tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); | |
836 | } | |
837 | } | |
838 | ||
839 | if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) | |
840 | goto drop; | |
841 | ||
82c6544d | 842 | if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) |
13c822f6 | 843 | mtk_stop_queue(eth); |
82c6544d | 844 | |
e3e9652a | 845 | spin_unlock(ð->page_lock); |
656e7052 JC |
846 | |
847 | return NETDEV_TX_OK; | |
848 | ||
849 | drop: | |
e3e9652a | 850 | spin_unlock(ð->page_lock); |
656e7052 JC |
851 | stats->tx_dropped++; |
852 | dev_kfree_skb(skb); | |
853 | return NETDEV_TX_OK; | |
854 | } | |
855 | ||
ee406810 NC |
856 | static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) |
857 | { | |
858 | int i; | |
859 | struct mtk_rx_ring *ring; | |
860 | int idx; | |
861 | ||
862 | if (!eth->hwlro) | |
863 | return ð->rx_ring[0]; | |
864 | ||
865 | for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { | |
866 | ring = ð->rx_ring[i]; | |
867 | idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); | |
868 | if (ring->dma[idx].rxd2 & RX_DMA_DONE) { | |
869 | ring->calc_idx_update = true; | |
870 | return ring; | |
871 | } | |
872 | } | |
873 | ||
874 | return NULL; | |
875 | } | |
876 | ||
877 | static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) | |
878 | { | |
879 | struct mtk_rx_ring *ring; | |
880 | int i; | |
881 | ||
882 | if (!eth->hwlro) { | |
883 | ring = ð->rx_ring[0]; | |
884 | mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); | |
885 | } else { | |
886 | for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { | |
887 | ring = ð->rx_ring[i]; | |
888 | if (ring->calc_idx_update) { | |
889 | ring->calc_idx_update = false; | |
890 | mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); | |
891 | } | |
892 | } | |
893 | } | |
894 | } | |
895 | ||
656e7052 | 896 | static int mtk_poll_rx(struct napi_struct *napi, int budget, |
eece71e8 | 897 | struct mtk_eth *eth) |
656e7052 | 898 | { |
ee406810 NC |
899 | struct mtk_rx_ring *ring; |
900 | int idx; | |
656e7052 JC |
901 | struct sk_buff *skb; |
902 | u8 *data, *new_data; | |
903 | struct mtk_rx_dma *rxd, trxd; | |
904 | int done = 0; | |
905 | ||
906 | while (done < budget) { | |
907 | struct net_device *netdev; | |
908 | unsigned int pktlen; | |
909 | dma_addr_t dma_addr; | |
910 | int mac = 0; | |
911 | ||
ee406810 NC |
912 | ring = mtk_get_rx_ring(eth); |
913 | if (unlikely(!ring)) | |
914 | goto rx_done; | |
915 | ||
916 | idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); | |
656e7052 JC |
917 | rxd = &ring->dma[idx]; |
918 | data = ring->data[idx]; | |
919 | ||
920 | mtk_rx_get_desc(&trxd, rxd); | |
921 | if (!(trxd.rxd2 & RX_DMA_DONE)) | |
922 | break; | |
923 | ||
924 | /* find out which mac the packet come from. values start at 1 */ | |
925 | mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & | |
926 | RX_DMA_FPORT_MASK; | |
927 | mac--; | |
928 | ||
929 | netdev = eth->netdev[mac]; | |
930 | ||
dce6fa42 SW |
931 | if (unlikely(test_bit(MTK_RESETTING, ð->state))) |
932 | goto release_desc; | |
933 | ||
656e7052 JC |
934 | /* alloc new buffer */ |
935 | new_data = napi_alloc_frag(ring->frag_size); | |
936 | if (unlikely(!new_data)) { | |
937 | netdev->stats.rx_dropped++; | |
938 | goto release_desc; | |
939 | } | |
55a4e778 | 940 | dma_addr = dma_map_single(eth->dev, |
656e7052 JC |
941 | new_data + NET_SKB_PAD, |
942 | ring->buf_size, | |
943 | DMA_FROM_DEVICE); | |
55a4e778 | 944 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
656e7052 | 945 | skb_free_frag(new_data); |
94321a9f | 946 | netdev->stats.rx_dropped++; |
656e7052 JC |
947 | goto release_desc; |
948 | } | |
949 | ||
950 | /* receive data */ | |
951 | skb = build_skb(data, ring->frag_size); | |
952 | if (unlikely(!skb)) { | |
1b430799 | 953 | skb_free_frag(new_data); |
94321a9f | 954 | netdev->stats.rx_dropped++; |
656e7052 JC |
955 | goto release_desc; |
956 | } | |
957 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | |
958 | ||
55a4e778 | 959 | dma_unmap_single(eth->dev, trxd.rxd1, |
656e7052 JC |
960 | ring->buf_size, DMA_FROM_DEVICE); |
961 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | |
962 | skb->dev = netdev; | |
963 | skb_put(skb, pktlen); | |
964 | if (trxd.rxd4 & RX_DMA_L4_VALID) | |
965 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
966 | else | |
967 | skb_checksum_none_assert(skb); | |
968 | skb->protocol = eth_type_trans(skb, netdev); | |
969 | ||
970 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && | |
971 | RX_DMA_VID(trxd.rxd3)) | |
972 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
973 | RX_DMA_VID(trxd.rxd3)); | |
974 | napi_gro_receive(napi, skb); | |
975 | ||
976 | ring->data[idx] = new_data; | |
977 | rxd->rxd1 = (unsigned int)dma_addr; | |
978 | ||
979 | release_desc: | |
980 | rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); | |
981 | ||
982 | ring->calc_idx = idx; | |
635372ad | 983 | |
656e7052 JC |
984 | done++; |
985 | } | |
986 | ||
ee406810 | 987 | rx_done: |
41156cea SW |
988 | if (done) { |
989 | /* make sure that all changes to the dma ring are flushed before | |
990 | * we continue | |
991 | */ | |
992 | wmb(); | |
ee406810 | 993 | mtk_update_rx_cpu_idx(eth); |
41156cea | 994 | } |
656e7052 JC |
995 | |
996 | return done; | |
997 | } | |
998 | ||
80673029 | 999 | static int mtk_poll_tx(struct mtk_eth *eth, int budget) |
656e7052 JC |
1000 | { |
1001 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1002 | struct mtk_tx_dma *desc; | |
1003 | struct sk_buff *skb; | |
1004 | struct mtk_tx_buf *tx_buf; | |
80673029 | 1005 | unsigned int done[MTK_MAX_DEVS]; |
656e7052 JC |
1006 | unsigned int bytes[MTK_MAX_DEVS]; |
1007 | u32 cpu, dma; | |
1008 | static int condition; | |
80673029 | 1009 | int total = 0, i; |
656e7052 JC |
1010 | |
1011 | memset(done, 0, sizeof(done)); | |
1012 | memset(bytes, 0, sizeof(bytes)); | |
1013 | ||
1014 | cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); | |
1015 | dma = mtk_r32(eth, MTK_QTX_DRX_PTR); | |
1016 | ||
1017 | desc = mtk_qdma_phys_to_virt(ring, cpu); | |
1018 | ||
1019 | while ((cpu != dma) && budget) { | |
1020 | u32 next_cpu = desc->txd2; | |
1021 | int mac; | |
1022 | ||
1023 | desc = mtk_qdma_phys_to_virt(ring, desc->txd2); | |
1024 | if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) | |
1025 | break; | |
1026 | ||
1027 | mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & | |
1028 | TX_DMA_FPORT_MASK; | |
1029 | mac--; | |
1030 | ||
1031 | tx_buf = mtk_desc_to_tx_buf(ring, desc); | |
1032 | skb = tx_buf->skb; | |
1033 | if (!skb) { | |
1034 | condition = 1; | |
1035 | break; | |
1036 | } | |
1037 | ||
1038 | if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { | |
1039 | bytes[mac] += skb->len; | |
1040 | done[mac]++; | |
1041 | budget--; | |
1042 | } | |
55a4e778 | 1043 | mtk_tx_unmap(eth, tx_buf); |
656e7052 | 1044 | |
656e7052 JC |
1045 | ring->last_free = desc; |
1046 | atomic_inc(&ring->free_count); | |
1047 | ||
1048 | cpu = next_cpu; | |
1049 | } | |
1050 | ||
1051 | mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); | |
1052 | ||
1053 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1054 | if (!eth->netdev[i] || !done[i]) | |
1055 | continue; | |
1056 | netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); | |
1057 | total += done[i]; | |
1058 | } | |
1059 | ||
ad3cba98 JC |
1060 | if (mtk_queue_stopped(eth) && |
1061 | (atomic_read(&ring->free_count) > ring->thresh)) | |
13c822f6 | 1062 | mtk_wake_queue(eth); |
656e7052 JC |
1063 | |
1064 | return total; | |
1065 | } | |
1066 | ||
80673029 | 1067 | static void mtk_handle_status_irq(struct mtk_eth *eth) |
656e7052 | 1068 | { |
80673029 | 1069 | u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); |
656e7052 | 1070 | |
eece71e8 | 1071 | if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { |
656e7052 | 1072 | mtk_stats_update(eth); |
eece71e8 JC |
1073 | mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), |
1074 | MTK_INT_STATUS2); | |
656e7052 | 1075 | } |
80673029 JC |
1076 | } |
1077 | ||
1078 | static int mtk_napi_tx(struct napi_struct *napi, int budget) | |
1079 | { | |
1080 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); | |
1081 | u32 status, mask; | |
1082 | int tx_done = 0; | |
1083 | ||
1084 | mtk_handle_status_irq(eth); | |
1085 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); | |
1086 | tx_done = mtk_poll_tx(eth, budget); | |
1087 | ||
1088 | if (unlikely(netif_msg_intr(eth))) { | |
1089 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
1090 | mask = mtk_r32(eth, MTK_QDMA_INT_MASK); | |
1091 | dev_info(eth->dev, | |
1092 | "done tx %d, intr 0x%08x/0x%x\n", | |
1093 | tx_done, status, mask); | |
1094 | } | |
1095 | ||
1096 | if (tx_done == budget) | |
1097 | return budget; | |
1098 | ||
1099 | status = mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
1100 | if (status & MTK_TX_DONE_INT) | |
1101 | return budget; | |
1102 | ||
1103 | napi_complete(napi); | |
bacfd110 | 1104 | mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
80673029 JC |
1105 | |
1106 | return tx_done; | |
1107 | } | |
1108 | ||
1109 | static int mtk_napi_rx(struct napi_struct *napi, int budget) | |
1110 | { | |
1111 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); | |
1112 | u32 status, mask; | |
1113 | int rx_done = 0; | |
41156cea | 1114 | int remain_budget = budget; |
80673029 JC |
1115 | |
1116 | mtk_handle_status_irq(eth); | |
41156cea SW |
1117 | |
1118 | poll_again: | |
bacfd110 | 1119 | mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); |
41156cea | 1120 | rx_done = mtk_poll_rx(napi, remain_budget, eth); |
656e7052 JC |
1121 | |
1122 | if (unlikely(netif_msg_intr(eth))) { | |
bacfd110 NC |
1123 | status = mtk_r32(eth, MTK_PDMA_INT_STATUS); |
1124 | mask = mtk_r32(eth, MTK_PDMA_INT_MASK); | |
80673029 JC |
1125 | dev_info(eth->dev, |
1126 | "done rx %d, intr 0x%08x/0x%x\n", | |
1127 | rx_done, status, mask); | |
656e7052 | 1128 | } |
41156cea | 1129 | if (rx_done == remain_budget) |
656e7052 JC |
1130 | return budget; |
1131 | ||
bacfd110 | 1132 | status = mtk_r32(eth, MTK_PDMA_INT_STATUS); |
41156cea SW |
1133 | if (status & MTK_RX_DONE_INT) { |
1134 | remain_budget -= rx_done; | |
1135 | goto poll_again; | |
1136 | } | |
656e7052 | 1137 | napi_complete(napi); |
bacfd110 | 1138 | mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); |
656e7052 | 1139 | |
41156cea | 1140 | return rx_done + budget - remain_budget; |
656e7052 JC |
1141 | } |
1142 | ||
1143 | static int mtk_tx_alloc(struct mtk_eth *eth) | |
1144 | { | |
1145 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1146 | int i, sz = sizeof(*ring->dma); | |
1147 | ||
1148 | ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), | |
1149 | GFP_KERNEL); | |
1150 | if (!ring->buf) | |
1151 | goto no_tx_mem; | |
1152 | ||
1153 | ring->dma = dma_alloc_coherent(eth->dev, | |
1154 | MTK_DMA_SIZE * sz, | |
1155 | &ring->phys, | |
1156 | GFP_ATOMIC | __GFP_ZERO); | |
1157 | if (!ring->dma) | |
1158 | goto no_tx_mem; | |
1159 | ||
1160 | memset(ring->dma, 0, MTK_DMA_SIZE * sz); | |
1161 | for (i = 0; i < MTK_DMA_SIZE; i++) { | |
1162 | int next = (i + 1) % MTK_DMA_SIZE; | |
1163 | u32 next_ptr = ring->phys + next * sz; | |
1164 | ||
1165 | ring->dma[i].txd2 = next_ptr; | |
1166 | ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | |
1167 | } | |
1168 | ||
1169 | atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); | |
1170 | ring->next_free = &ring->dma[0]; | |
12c97c13 | 1171 | ring->last_free = &ring->dma[MTK_DMA_SIZE - 1]; |
04698ccc | 1172 | ring->thresh = MAX_SKB_FRAGS; |
656e7052 JC |
1173 | |
1174 | /* make sure that all changes to the dma ring are flushed before we | |
1175 | * continue | |
1176 | */ | |
1177 | wmb(); | |
1178 | ||
1179 | mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); | |
1180 | mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); | |
1181 | mtk_w32(eth, | |
1182 | ring->phys + ((MTK_DMA_SIZE - 1) * sz), | |
1183 | MTK_QTX_CRX_PTR); | |
1184 | mtk_w32(eth, | |
1185 | ring->phys + ((MTK_DMA_SIZE - 1) * sz), | |
1186 | MTK_QTX_DRX_PTR); | |
bacfd110 | 1187 | mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); |
656e7052 JC |
1188 | |
1189 | return 0; | |
1190 | ||
1191 | no_tx_mem: | |
1192 | return -ENOMEM; | |
1193 | } | |
1194 | ||
1195 | static void mtk_tx_clean(struct mtk_eth *eth) | |
1196 | { | |
1197 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1198 | int i; | |
1199 | ||
1200 | if (ring->buf) { | |
1201 | for (i = 0; i < MTK_DMA_SIZE; i++) | |
55a4e778 | 1202 | mtk_tx_unmap(eth, &ring->buf[i]); |
656e7052 JC |
1203 | kfree(ring->buf); |
1204 | ring->buf = NULL; | |
1205 | } | |
1206 | ||
1207 | if (ring->dma) { | |
1208 | dma_free_coherent(eth->dev, | |
1209 | MTK_DMA_SIZE * sizeof(*ring->dma), | |
1210 | ring->dma, | |
1211 | ring->phys); | |
1212 | ring->dma = NULL; | |
1213 | } | |
1214 | } | |
1215 | ||
ee406810 | 1216 | static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) |
656e7052 | 1217 | { |
ee406810 NC |
1218 | struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; |
1219 | int rx_data_len, rx_dma_size; | |
656e7052 JC |
1220 | int i; |
1221 | ||
ee406810 NC |
1222 | if (rx_flag == MTK_RX_FLAGS_HWLRO) { |
1223 | rx_data_len = MTK_MAX_LRO_RX_LENGTH; | |
1224 | rx_dma_size = MTK_HW_LRO_DMA_SIZE; | |
1225 | } else { | |
1226 | rx_data_len = ETH_DATA_LEN; | |
1227 | rx_dma_size = MTK_DMA_SIZE; | |
1228 | } | |
1229 | ||
1230 | ring->frag_size = mtk_max_frag_size(rx_data_len); | |
656e7052 | 1231 | ring->buf_size = mtk_max_buf_size(ring->frag_size); |
ee406810 | 1232 | ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), |
656e7052 JC |
1233 | GFP_KERNEL); |
1234 | if (!ring->data) | |
1235 | return -ENOMEM; | |
1236 | ||
ee406810 | 1237 | for (i = 0; i < rx_dma_size; i++) { |
656e7052 JC |
1238 | ring->data[i] = netdev_alloc_frag(ring->frag_size); |
1239 | if (!ring->data[i]) | |
1240 | return -ENOMEM; | |
1241 | } | |
1242 | ||
1243 | ring->dma = dma_alloc_coherent(eth->dev, | |
ee406810 | 1244 | rx_dma_size * sizeof(*ring->dma), |
656e7052 JC |
1245 | &ring->phys, |
1246 | GFP_ATOMIC | __GFP_ZERO); | |
1247 | if (!ring->dma) | |
1248 | return -ENOMEM; | |
1249 | ||
ee406810 | 1250 | for (i = 0; i < rx_dma_size; i++) { |
656e7052 JC |
1251 | dma_addr_t dma_addr = dma_map_single(eth->dev, |
1252 | ring->data[i] + NET_SKB_PAD, | |
1253 | ring->buf_size, | |
1254 | DMA_FROM_DEVICE); | |
1255 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
1256 | return -ENOMEM; | |
1257 | ring->dma[i].rxd1 = (unsigned int)dma_addr; | |
1258 | ||
1259 | ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); | |
1260 | } | |
ee406810 NC |
1261 | ring->dma_size = rx_dma_size; |
1262 | ring->calc_idx_update = false; | |
1263 | ring->calc_idx = rx_dma_size - 1; | |
1264 | ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); | |
656e7052 JC |
1265 | /* make sure that all changes to the dma ring are flushed before we |
1266 | * continue | |
1267 | */ | |
1268 | wmb(); | |
1269 | ||
ee406810 NC |
1270 | mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); |
1271 | mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); | |
1272 | mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); | |
1273 | mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX); | |
656e7052 JC |
1274 | |
1275 | return 0; | |
1276 | } | |
1277 | ||
ee406810 | 1278 | static void mtk_rx_clean(struct mtk_eth *eth, int ring_no) |
656e7052 | 1279 | { |
ee406810 | 1280 | struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; |
656e7052 JC |
1281 | int i; |
1282 | ||
1283 | if (ring->data && ring->dma) { | |
ee406810 | 1284 | for (i = 0; i < ring->dma_size; i++) { |
656e7052 JC |
1285 | if (!ring->data[i]) |
1286 | continue; | |
1287 | if (!ring->dma[i].rxd1) | |
1288 | continue; | |
1289 | dma_unmap_single(eth->dev, | |
1290 | ring->dma[i].rxd1, | |
1291 | ring->buf_size, | |
1292 | DMA_FROM_DEVICE); | |
1293 | skb_free_frag(ring->data[i]); | |
1294 | } | |
1295 | kfree(ring->data); | |
1296 | ring->data = NULL; | |
1297 | } | |
1298 | ||
1299 | if (ring->dma) { | |
1300 | dma_free_coherent(eth->dev, | |
ee406810 | 1301 | ring->dma_size * sizeof(*ring->dma), |
656e7052 JC |
1302 | ring->dma, |
1303 | ring->phys); | |
1304 | ring->dma = NULL; | |
1305 | } | |
1306 | } | |
1307 | ||
ee406810 NC |
1308 | static int mtk_hwlro_rx_init(struct mtk_eth *eth) |
1309 | { | |
1310 | int i; | |
1311 | u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0; | |
1312 | u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0; | |
1313 | ||
1314 | /* set LRO rings to auto-learn modes */ | |
1315 | ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE; | |
1316 | ||
1317 | /* validate LRO ring */ | |
1318 | ring_ctrl_dw2 |= MTK_RING_VLD; | |
1319 | ||
1320 | /* set AGE timer (unit: 20us) */ | |
1321 | ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H; | |
1322 | ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L; | |
1323 | ||
1324 | /* set max AGG timer (unit: 20us) */ | |
1325 | ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME; | |
1326 | ||
1327 | /* set max LRO AGG count */ | |
1328 | ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L; | |
1329 | ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H; | |
1330 | ||
1331 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { | |
1332 | mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); | |
1333 | mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); | |
1334 | mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); | |
1335 | } | |
1336 | ||
1337 | /* IPv4 checksum update enable */ | |
1338 | lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN; | |
1339 | ||
1340 | /* switch priority comparison to packet count mode */ | |
1341 | lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE; | |
1342 | ||
1343 | /* bandwidth threshold setting */ | |
1344 | mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); | |
1345 | ||
1346 | /* auto-learn score delta setting */ | |
1347 | mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); | |
1348 | ||
1349 | /* set refresh timer for altering flows to 1 sec. (unit: 20us) */ | |
1350 | mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, | |
1351 | MTK_PDMA_LRO_ALT_REFRESH_TIMER); | |
1352 | ||
1353 | /* set HW LRO mode & the max aggregation count for rx packets */ | |
1354 | lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff); | |
1355 | ||
1356 | /* the minimal remaining room of SDL0 in RXD for lro aggregation */ | |
1357 | lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL; | |
1358 | ||
1359 | /* enable HW LRO */ | |
1360 | lro_ctrl_dw0 |= MTK_LRO_EN; | |
1361 | ||
1362 | mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); | |
1363 | mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); | |
1364 | ||
1365 | return 0; | |
1366 | } | |
1367 | ||
1368 | static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) | |
1369 | { | |
1370 | int i; | |
1371 | u32 val; | |
1372 | ||
1373 | /* relinquish lro rings, flush aggregated packets */ | |
1374 | mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); | |
1375 | ||
1376 | /* wait for relinquishments done */ | |
1377 | for (i = 0; i < 10; i++) { | |
1378 | val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); | |
1379 | if (val & MTK_LRO_RING_RELINQUISH_DONE) { | |
1380 | msleep(20); | |
1381 | continue; | |
1382 | } | |
1383 | } | |
1384 | ||
1385 | /* invalidate lro rings */ | |
1386 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) | |
1387 | mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); | |
1388 | ||
1389 | /* disable HW LRO */ | |
1390 | mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); | |
1391 | } | |
1392 | ||
7aab747e NC |
1393 | static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) |
1394 | { | |
1395 | u32 reg_val; | |
1396 | ||
1397 | reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); | |
1398 | ||
1399 | /* invalidate the IP setting */ | |
1400 | mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); | |
1401 | ||
1402 | mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); | |
1403 | ||
1404 | /* validate the IP setting */ | |
1405 | mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); | |
1406 | } | |
1407 | ||
1408 | static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) | |
1409 | { | |
1410 | u32 reg_val; | |
1411 | ||
1412 | reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); | |
1413 | ||
1414 | /* invalidate the IP setting */ | |
1415 | mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); | |
1416 | ||
1417 | mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); | |
1418 | } | |
1419 | ||
1420 | static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) | |
1421 | { | |
1422 | int cnt = 0; | |
1423 | int i; | |
1424 | ||
1425 | for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { | |
1426 | if (mac->hwlro_ip[i]) | |
1427 | cnt++; | |
1428 | } | |
1429 | ||
1430 | return cnt; | |
1431 | } | |
1432 | ||
1433 | static int mtk_hwlro_add_ipaddr(struct net_device *dev, | |
1434 | struct ethtool_rxnfc *cmd) | |
1435 | { | |
1436 | struct ethtool_rx_flow_spec *fsp = | |
1437 | (struct ethtool_rx_flow_spec *)&cmd->fs; | |
1438 | struct mtk_mac *mac = netdev_priv(dev); | |
1439 | struct mtk_eth *eth = mac->hw; | |
1440 | int hwlro_idx; | |
1441 | ||
1442 | if ((fsp->flow_type != TCP_V4_FLOW) || | |
1443 | (!fsp->h_u.tcp_ip4_spec.ip4dst) || | |
1444 | (fsp->location > 1)) | |
1445 | return -EINVAL; | |
1446 | ||
1447 | mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); | |
1448 | hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; | |
1449 | ||
1450 | mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); | |
1451 | ||
1452 | mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); | |
1453 | ||
1454 | return 0; | |
1455 | } | |
1456 | ||
1457 | static int mtk_hwlro_del_ipaddr(struct net_device *dev, | |
1458 | struct ethtool_rxnfc *cmd) | |
1459 | { | |
1460 | struct ethtool_rx_flow_spec *fsp = | |
1461 | (struct ethtool_rx_flow_spec *)&cmd->fs; | |
1462 | struct mtk_mac *mac = netdev_priv(dev); | |
1463 | struct mtk_eth *eth = mac->hw; | |
1464 | int hwlro_idx; | |
1465 | ||
1466 | if (fsp->location > 1) | |
1467 | return -EINVAL; | |
1468 | ||
1469 | mac->hwlro_ip[fsp->location] = 0; | |
1470 | hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; | |
1471 | ||
1472 | mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); | |
1473 | ||
1474 | mtk_hwlro_inval_ipaddr(eth, hwlro_idx); | |
1475 | ||
1476 | return 0; | |
1477 | } | |
1478 | ||
1479 | static void mtk_hwlro_netdev_disable(struct net_device *dev) | |
1480 | { | |
1481 | struct mtk_mac *mac = netdev_priv(dev); | |
1482 | struct mtk_eth *eth = mac->hw; | |
1483 | int i, hwlro_idx; | |
1484 | ||
1485 | for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { | |
1486 | mac->hwlro_ip[i] = 0; | |
1487 | hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; | |
1488 | ||
1489 | mtk_hwlro_inval_ipaddr(eth, hwlro_idx); | |
1490 | } | |
1491 | ||
1492 | mac->hwlro_ip_cnt = 0; | |
1493 | } | |
1494 | ||
1495 | static int mtk_hwlro_get_fdir_entry(struct net_device *dev, | |
1496 | struct ethtool_rxnfc *cmd) | |
1497 | { | |
1498 | struct mtk_mac *mac = netdev_priv(dev); | |
1499 | struct ethtool_rx_flow_spec *fsp = | |
1500 | (struct ethtool_rx_flow_spec *)&cmd->fs; | |
1501 | ||
1502 | /* only tcp dst ipv4 is meaningful, others are meaningless */ | |
1503 | fsp->flow_type = TCP_V4_FLOW; | |
1504 | fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); | |
1505 | fsp->m_u.tcp_ip4_spec.ip4dst = 0; | |
1506 | ||
1507 | fsp->h_u.tcp_ip4_spec.ip4src = 0; | |
1508 | fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; | |
1509 | fsp->h_u.tcp_ip4_spec.psrc = 0; | |
1510 | fsp->m_u.tcp_ip4_spec.psrc = 0xffff; | |
1511 | fsp->h_u.tcp_ip4_spec.pdst = 0; | |
1512 | fsp->m_u.tcp_ip4_spec.pdst = 0xffff; | |
1513 | fsp->h_u.tcp_ip4_spec.tos = 0; | |
1514 | fsp->m_u.tcp_ip4_spec.tos = 0xff; | |
1515 | ||
1516 | return 0; | |
1517 | } | |
1518 | ||
1519 | static int mtk_hwlro_get_fdir_all(struct net_device *dev, | |
1520 | struct ethtool_rxnfc *cmd, | |
1521 | u32 *rule_locs) | |
1522 | { | |
1523 | struct mtk_mac *mac = netdev_priv(dev); | |
1524 | int cnt = 0; | |
1525 | int i; | |
1526 | ||
1527 | for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { | |
1528 | if (mac->hwlro_ip[i]) { | |
1529 | rule_locs[cnt] = i; | |
1530 | cnt++; | |
1531 | } | |
1532 | } | |
1533 | ||
1534 | cmd->rule_cnt = cnt; | |
1535 | ||
1536 | return 0; | |
1537 | } | |
1538 | ||
1539 | static netdev_features_t mtk_fix_features(struct net_device *dev, | |
1540 | netdev_features_t features) | |
1541 | { | |
1542 | if (!(features & NETIF_F_LRO)) { | |
1543 | struct mtk_mac *mac = netdev_priv(dev); | |
1544 | int ip_cnt = mtk_hwlro_get_ip_cnt(mac); | |
1545 | ||
1546 | if (ip_cnt) { | |
1547 | netdev_info(dev, "RX flow is programmed, LRO should keep on\n"); | |
1548 | ||
1549 | features |= NETIF_F_LRO; | |
1550 | } | |
1551 | } | |
1552 | ||
1553 | return features; | |
1554 | } | |
1555 | ||
1556 | static int mtk_set_features(struct net_device *dev, netdev_features_t features) | |
1557 | { | |
1558 | int err = 0; | |
1559 | ||
1560 | if (!((dev->features ^ features) & NETIF_F_LRO)) | |
1561 | return 0; | |
1562 | ||
1563 | if (!(features & NETIF_F_LRO)) | |
1564 | mtk_hwlro_netdev_disable(dev); | |
1565 | ||
1566 | return err; | |
1567 | } | |
1568 | ||
656e7052 JC |
1569 | /* wait for DMA to finish whatever it is doing before we start using it again */ |
1570 | static int mtk_dma_busy_wait(struct mtk_eth *eth) | |
1571 | { | |
1572 | unsigned long t_start = jiffies; | |
1573 | ||
1574 | while (1) { | |
1575 | if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & | |
1576 | (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) | |
1577 | return 0; | |
1578 | if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) | |
1579 | break; | |
1580 | } | |
1581 | ||
1582 | dev_err(eth->dev, "DMA init timeout\n"); | |
1583 | return -1; | |
1584 | } | |
1585 | ||
1586 | static int mtk_dma_init(struct mtk_eth *eth) | |
1587 | { | |
1588 | int err; | |
ee406810 | 1589 | u32 i; |
656e7052 JC |
1590 | |
1591 | if (mtk_dma_busy_wait(eth)) | |
1592 | return -EBUSY; | |
1593 | ||
1594 | /* QDMA needs scratch memory for internal reordering of the | |
1595 | * descriptors | |
1596 | */ | |
1597 | err = mtk_init_fq_dma(eth); | |
1598 | if (err) | |
1599 | return err; | |
1600 | ||
1601 | err = mtk_tx_alloc(eth); | |
1602 | if (err) | |
1603 | return err; | |
1604 | ||
ee406810 | 1605 | err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); |
656e7052 JC |
1606 | if (err) |
1607 | return err; | |
1608 | ||
ee406810 NC |
1609 | if (eth->hwlro) { |
1610 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { | |
1611 | err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); | |
1612 | if (err) | |
1613 | return err; | |
1614 | } | |
1615 | err = mtk_hwlro_rx_init(eth); | |
1616 | if (err) | |
1617 | return err; | |
1618 | } | |
1619 | ||
656e7052 JC |
1620 | /* Enable random early drop and set drop threshold automatically */ |
1621 | mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, | |
1622 | MTK_QDMA_FC_THRES); | |
1623 | mtk_w32(eth, 0x0, MTK_QDMA_HRED2); | |
1624 | ||
1625 | return 0; | |
1626 | } | |
1627 | ||
1628 | static void mtk_dma_free(struct mtk_eth *eth) | |
1629 | { | |
1630 | int i; | |
1631 | ||
1632 | for (i = 0; i < MTK_MAC_COUNT; i++) | |
1633 | if (eth->netdev[i]) | |
1634 | netdev_reset_queue(eth->netdev[i]); | |
605e4fe4 JC |
1635 | if (eth->scratch_ring) { |
1636 | dma_free_coherent(eth->dev, | |
1637 | MTK_DMA_SIZE * sizeof(struct mtk_tx_dma), | |
1638 | eth->scratch_ring, | |
1639 | eth->phy_scratch_ring); | |
1640 | eth->scratch_ring = NULL; | |
1641 | eth->phy_scratch_ring = 0; | |
1642 | } | |
656e7052 | 1643 | mtk_tx_clean(eth); |
ee406810 NC |
1644 | mtk_rx_clean(eth, 0); |
1645 | ||
1646 | if (eth->hwlro) { | |
1647 | mtk_hwlro_rx_uninit(eth); | |
1648 | for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) | |
1649 | mtk_rx_clean(eth, i); | |
1650 | } | |
1651 | ||
656e7052 JC |
1652 | kfree(eth->scratch_head); |
1653 | } | |
1654 | ||
1655 | static void mtk_tx_timeout(struct net_device *dev) | |
1656 | { | |
1657 | struct mtk_mac *mac = netdev_priv(dev); | |
1658 | struct mtk_eth *eth = mac->hw; | |
1659 | ||
1660 | eth->netdev[mac->id]->stats.tx_errors++; | |
1661 | netif_err(eth, tx_err, dev, | |
1662 | "transmit timed out\n"); | |
7c78b4ad | 1663 | schedule_work(ð->pending_work); |
656e7052 JC |
1664 | } |
1665 | ||
80673029 | 1666 | static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) |
656e7052 JC |
1667 | { |
1668 | struct mtk_eth *eth = _eth; | |
656e7052 | 1669 | |
80673029 JC |
1670 | if (likely(napi_schedule_prep(ð->rx_napi))) { |
1671 | __napi_schedule(ð->rx_napi); | |
bacfd110 | 1672 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); |
80673029 | 1673 | } |
656e7052 | 1674 | |
80673029 JC |
1675 | return IRQ_HANDLED; |
1676 | } | |
1677 | ||
1678 | static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) | |
1679 | { | |
1680 | struct mtk_eth *eth = _eth; | |
1681 | ||
1682 | if (likely(napi_schedule_prep(ð->tx_napi))) { | |
1683 | __napi_schedule(ð->tx_napi); | |
bacfd110 | 1684 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
656e7052 | 1685 | } |
656e7052 JC |
1686 | |
1687 | return IRQ_HANDLED; | |
1688 | } | |
1689 | ||
1690 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1691 | static void mtk_poll_controller(struct net_device *dev) | |
1692 | { | |
1693 | struct mtk_mac *mac = netdev_priv(dev); | |
1694 | struct mtk_eth *eth = mac->hw; | |
656e7052 | 1695 | |
bacfd110 NC |
1696 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1697 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
8186f6e3 | 1698 | mtk_handle_irq_rx(eth->irq[2], dev); |
bacfd110 NC |
1699 | mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1700 | mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
656e7052 JC |
1701 | } |
1702 | #endif | |
1703 | ||
1704 | static int mtk_start_dma(struct mtk_eth *eth) | |
1705 | { | |
1706 | int err; | |
1707 | ||
1708 | err = mtk_dma_init(eth); | |
1709 | if (err) { | |
1710 | mtk_dma_free(eth); | |
1711 | return err; | |
1712 | } | |
1713 | ||
1714 | mtk_w32(eth, | |
bacfd110 NC |
1715 | MTK_TX_WB_DDONE | MTK_TX_DMA_EN | |
1716 | MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, | |
656e7052 JC |
1717 | MTK_QDMA_GLO_CFG); |
1718 | ||
bacfd110 NC |
1719 | mtk_w32(eth, |
1720 | MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | | |
1721 | MTK_RX_BT_32DWORDS | MTK_MULTI_EN, | |
1722 | MTK_PDMA_GLO_CFG); | |
1723 | ||
656e7052 JC |
1724 | return 0; |
1725 | } | |
1726 | ||
1727 | static int mtk_open(struct net_device *dev) | |
1728 | { | |
1729 | struct mtk_mac *mac = netdev_priv(dev); | |
1730 | struct mtk_eth *eth = mac->hw; | |
1731 | ||
1732 | /* we run 2 netdevs on the same dma ring so we only bring it up once */ | |
1733 | if (!atomic_read(ð->dma_refcnt)) { | |
1734 | int err = mtk_start_dma(eth); | |
1735 | ||
1736 | if (err) | |
1737 | return err; | |
1738 | ||
80673029 | 1739 | napi_enable(ð->tx_napi); |
656e7052 | 1740 | napi_enable(ð->rx_napi); |
bacfd110 NC |
1741 | mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1742 | mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
656e7052 JC |
1743 | } |
1744 | atomic_inc(ð->dma_refcnt); | |
1745 | ||
2364c5c5 | 1746 | phy_start(dev->phydev); |
656e7052 JC |
1747 | netif_start_queue(dev); |
1748 | ||
1749 | return 0; | |
1750 | } | |
1751 | ||
1752 | static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) | |
1753 | { | |
656e7052 JC |
1754 | u32 val; |
1755 | int i; | |
1756 | ||
1757 | /* stop the dma engine */ | |
e3e9652a | 1758 | spin_lock_bh(ð->page_lock); |
656e7052 JC |
1759 | val = mtk_r32(eth, glo_cfg); |
1760 | mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), | |
1761 | glo_cfg); | |
e3e9652a | 1762 | spin_unlock_bh(ð->page_lock); |
656e7052 JC |
1763 | |
1764 | /* wait for dma stop */ | |
1765 | for (i = 0; i < 10; i++) { | |
1766 | val = mtk_r32(eth, glo_cfg); | |
1767 | if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { | |
1768 | msleep(20); | |
1769 | continue; | |
1770 | } | |
1771 | break; | |
1772 | } | |
1773 | } | |
1774 | ||
1775 | static int mtk_stop(struct net_device *dev) | |
1776 | { | |
1777 | struct mtk_mac *mac = netdev_priv(dev); | |
1778 | struct mtk_eth *eth = mac->hw; | |
1779 | ||
1780 | netif_tx_disable(dev); | |
2364c5c5 | 1781 | phy_stop(dev->phydev); |
656e7052 JC |
1782 | |
1783 | /* only shutdown DMA if this is the last user */ | |
1784 | if (!atomic_dec_and_test(ð->dma_refcnt)) | |
1785 | return 0; | |
1786 | ||
bacfd110 NC |
1787 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); |
1788 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); | |
80673029 | 1789 | napi_disable(ð->tx_napi); |
656e7052 JC |
1790 | napi_disable(ð->rx_napi); |
1791 | ||
1792 | mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); | |
1793 | ||
1794 | mtk_dma_free(eth); | |
1795 | ||
1796 | return 0; | |
1797 | } | |
1798 | ||
2a8307aa SW |
1799 | static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) |
1800 | { | |
1801 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, | |
1802 | reset_bits, | |
1803 | reset_bits); | |
1804 | ||
1805 | usleep_range(1000, 1100); | |
1806 | regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, | |
1807 | reset_bits, | |
1808 | ~reset_bits); | |
1809 | mdelay(10); | |
1810 | } | |
1811 | ||
9ea4d311 | 1812 | static int mtk_hw_init(struct mtk_eth *eth) |
656e7052 | 1813 | { |
9ea4d311 SW |
1814 | int i, val; |
1815 | ||
1816 | if (test_and_set_bit(MTK_HW_INIT, ð->state)) | |
1817 | return 0; | |
85574dbf | 1818 | |
26a2ad8a SW |
1819 | pm_runtime_enable(eth->dev); |
1820 | pm_runtime_get_sync(eth->dev); | |
1821 | ||
85574dbf SW |
1822 | clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); |
1823 | clk_prepare_enable(eth->clks[MTK_CLK_ESW]); | |
1824 | clk_prepare_enable(eth->clks[MTK_CLK_GP1]); | |
1825 | clk_prepare_enable(eth->clks[MTK_CLK_GP2]); | |
2a8307aa SW |
1826 | ethsys_reset(eth, RSTCTRL_FE); |
1827 | ethsys_reset(eth, RSTCTRL_PPE); | |
656e7052 | 1828 | |
9ea4d311 SW |
1829 | regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); |
1830 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1831 | if (!eth->mac[i]) | |
1832 | continue; | |
1833 | val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); | |
1834 | val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); | |
1835 | } | |
1836 | regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); | |
1837 | ||
656e7052 JC |
1838 | /* Set GE2 driving and slew rate */ |
1839 | regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); | |
1840 | ||
1841 | /* set GE2 TDSEL */ | |
1842 | regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); | |
1843 | ||
1844 | /* set GE2 TUNE */ | |
1845 | regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); | |
1846 | ||
1847 | /* GE1, Force 1000M/FD, FC ON */ | |
1848 | mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); | |
1849 | ||
1850 | /* GE2, Force 1000M/FD, FC ON */ | |
1851 | mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); | |
1852 | ||
1853 | /* Enable RX VLan Offloading */ | |
1854 | mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); | |
1855 | ||
656e7052 JC |
1856 | /* disable delay and normal interrupt */ |
1857 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); | |
bacfd110 NC |
1858 | mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); |
1859 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); | |
1860 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); | |
656e7052 JC |
1861 | mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); |
1862 | mtk_w32(eth, 0, MTK_RST_GL); | |
1863 | ||
1864 | /* FE int grouping */ | |
80673029 JC |
1865 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); |
1866 | mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); | |
1867 | mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); | |
1868 | mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); | |
1869 | mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); | |
656e7052 JC |
1870 | |
1871 | for (i = 0; i < 2; i++) { | |
1872 | u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); | |
1873 | ||
9c08435e | 1874 | /* setup the forward port to send frame to PDMA */ |
656e7052 | 1875 | val &= ~0xffff; |
656e7052 JC |
1876 | |
1877 | /* Enable RX checksum */ | |
1878 | val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; | |
1879 | ||
1880 | /* setup the mac dma */ | |
1881 | mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); | |
1882 | } | |
1883 | ||
1884 | return 0; | |
1885 | } | |
1886 | ||
bf253fb7 SW |
1887 | static int mtk_hw_deinit(struct mtk_eth *eth) |
1888 | { | |
9ea4d311 SW |
1889 | if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) |
1890 | return 0; | |
1891 | ||
bf253fb7 SW |
1892 | clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); |
1893 | clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); | |
1894 | clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); | |
1895 | clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); | |
1896 | ||
26a2ad8a SW |
1897 | pm_runtime_put_sync(eth->dev); |
1898 | pm_runtime_disable(eth->dev); | |
1899 | ||
bf253fb7 SW |
1900 | return 0; |
1901 | } | |
1902 | ||
656e7052 JC |
1903 | static int __init mtk_init(struct net_device *dev) |
1904 | { | |
1905 | struct mtk_mac *mac = netdev_priv(dev); | |
1906 | struct mtk_eth *eth = mac->hw; | |
1907 | const char *mac_addr; | |
1908 | ||
1909 | mac_addr = of_get_mac_address(mac->of_node); | |
1910 | if (mac_addr) | |
1911 | ether_addr_copy(dev->dev_addr, mac_addr); | |
1912 | ||
1913 | /* If the mac address is invalid, use random mac address */ | |
1914 | if (!is_valid_ether_addr(dev->dev_addr)) { | |
1915 | random_ether_addr(dev->dev_addr); | |
1916 | dev_err(eth->dev, "generated random MAC address %pM\n", | |
1917 | dev->dev_addr); | |
1918 | dev->addr_assign_type = NET_ADDR_RANDOM; | |
1919 | } | |
1920 | ||
2364c5c5 | 1921 | return mtk_phy_connect(dev); |
656e7052 JC |
1922 | } |
1923 | ||
1924 | static void mtk_uninit(struct net_device *dev) | |
1925 | { | |
1926 | struct mtk_mac *mac = netdev_priv(dev); | |
1927 | struct mtk_eth *eth = mac->hw; | |
1928 | ||
2364c5c5 | 1929 | phy_disconnect(dev->phydev); |
bacfd110 NC |
1930 | mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); |
1931 | mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); | |
656e7052 JC |
1932 | } |
1933 | ||
1934 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1935 | { | |
656e7052 JC |
1936 | switch (cmd) { |
1937 | case SIOCGMIIPHY: | |
1938 | case SIOCGMIIREG: | |
1939 | case SIOCSMIIREG: | |
2364c5c5 | 1940 | return phy_mii_ioctl(dev->phydev, ifr, cmd); |
656e7052 JC |
1941 | default: |
1942 | break; | |
1943 | } | |
1944 | ||
1945 | return -EOPNOTSUPP; | |
1946 | } | |
1947 | ||
1948 | static void mtk_pending_work(struct work_struct *work) | |
1949 | { | |
7c78b4ad | 1950 | struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); |
e7d425dc JC |
1951 | int err, i; |
1952 | unsigned long restart = 0; | |
656e7052 JC |
1953 | |
1954 | rtnl_lock(); | |
656e7052 | 1955 | |
dce6fa42 SW |
1956 | dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); |
1957 | ||
1958 | while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) | |
1959 | cpu_relax(); | |
1960 | ||
1961 | dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); | |
e7d425dc JC |
1962 | /* stop all devices to make sure that dma is properly shut down */ |
1963 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
7c78b4ad | 1964 | if (!eth->netdev[i]) |
e7d425dc JC |
1965 | continue; |
1966 | mtk_stop(eth->netdev[i]); | |
1967 | __set_bit(i, &restart); | |
1968 | } | |
dce6fa42 | 1969 | dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); |
e7d425dc | 1970 | |
9ea4d311 SW |
1971 | /* restart underlying hardware such as power, clock, pin mux |
1972 | * and the connected phy | |
1973 | */ | |
1974 | mtk_hw_deinit(eth); | |
1975 | ||
1976 | if (eth->dev->pins) | |
1977 | pinctrl_select_state(eth->dev->pins->p, | |
1978 | eth->dev->pins->default_state); | |
1979 | mtk_hw_init(eth); | |
1980 | ||
1981 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1982 | if (!eth->mac[i] || | |
1983 | of_phy_is_fixed_link(eth->mac[i]->of_node)) | |
1984 | continue; | |
2364c5c5 | 1985 | err = phy_init_hw(eth->netdev[i]->phydev); |
9ea4d311 SW |
1986 | if (err) |
1987 | dev_err(eth->dev, "%s: PHY init failed.\n", | |
1988 | eth->netdev[i]->name); | |
1989 | } | |
1990 | ||
e7d425dc JC |
1991 | /* restart DMA and enable IRQs */ |
1992 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
1993 | if (!test_bit(i, &restart)) | |
1994 | continue; | |
1995 | err = mtk_open(eth->netdev[i]); | |
1996 | if (err) { | |
1997 | netif_alert(eth, ifup, eth->netdev[i], | |
1998 | "Driver up/down cycle failed, closing device.\n"); | |
1999 | dev_close(eth->netdev[i]); | |
2000 | } | |
656e7052 | 2001 | } |
dce6fa42 SW |
2002 | |
2003 | dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); | |
2004 | ||
2005 | clear_bit_unlock(MTK_RESETTING, ð->state); | |
2006 | ||
656e7052 JC |
2007 | rtnl_unlock(); |
2008 | } | |
2009 | ||
8a8a9e89 | 2010 | static int mtk_free_dev(struct mtk_eth *eth) |
656e7052 JC |
2011 | { |
2012 | int i; | |
2013 | ||
2014 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
656e7052 JC |
2015 | if (!eth->netdev[i]) |
2016 | continue; | |
8a8a9e89 SW |
2017 | free_netdev(eth->netdev[i]); |
2018 | } | |
2019 | ||
2020 | return 0; | |
2021 | } | |
656e7052 | 2022 | |
8a8a9e89 SW |
2023 | static int mtk_unreg_dev(struct mtk_eth *eth) |
2024 | { | |
2025 | int i; | |
2026 | ||
2027 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
2028 | if (!eth->netdev[i]) | |
2029 | continue; | |
656e7052 | 2030 | unregister_netdev(eth->netdev[i]); |
656e7052 | 2031 | } |
8a8a9e89 SW |
2032 | |
2033 | return 0; | |
2034 | } | |
2035 | ||
2036 | static int mtk_cleanup(struct mtk_eth *eth) | |
2037 | { | |
2038 | mtk_unreg_dev(eth); | |
2039 | mtk_free_dev(eth); | |
7c78b4ad | 2040 | cancel_work_sync(ð->pending_work); |
656e7052 JC |
2041 | |
2042 | return 0; | |
2043 | } | |
2044 | ||
2045 | static int mtk_get_settings(struct net_device *dev, | |
2046 | struct ethtool_cmd *cmd) | |
2047 | { | |
2048 | struct mtk_mac *mac = netdev_priv(dev); | |
2049 | int err; | |
2050 | ||
dce6fa42 SW |
2051 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2052 | return -EBUSY; | |
2053 | ||
2364c5c5 | 2054 | err = phy_read_status(dev->phydev); |
656e7052 JC |
2055 | if (err) |
2056 | return -ENODEV; | |
2057 | ||
2364c5c5 | 2058 | return phy_ethtool_gset(dev->phydev, cmd); |
656e7052 JC |
2059 | } |
2060 | ||
2061 | static int mtk_set_settings(struct net_device *dev, | |
2062 | struct ethtool_cmd *cmd) | |
2063 | { | |
2064 | struct mtk_mac *mac = netdev_priv(dev); | |
2065 | ||
2364c5c5 SW |
2066 | if (cmd->phy_address != dev->phydev->mdio.addr) { |
2067 | dev->phydev = mdiobus_get_phy(mac->hw->mii_bus, | |
656e7052 | 2068 | cmd->phy_address); |
2364c5c5 | 2069 | if (!dev->phydev) |
656e7052 JC |
2070 | return -ENODEV; |
2071 | } | |
2072 | ||
2364c5c5 | 2073 | return phy_ethtool_sset(dev->phydev, cmd); |
656e7052 JC |
2074 | } |
2075 | ||
2076 | static void mtk_get_drvinfo(struct net_device *dev, | |
2077 | struct ethtool_drvinfo *info) | |
2078 | { | |
2079 | struct mtk_mac *mac = netdev_priv(dev); | |
2080 | ||
2081 | strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); | |
2082 | strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); | |
2083 | info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); | |
2084 | } | |
2085 | ||
2086 | static u32 mtk_get_msglevel(struct net_device *dev) | |
2087 | { | |
2088 | struct mtk_mac *mac = netdev_priv(dev); | |
2089 | ||
2090 | return mac->hw->msg_enable; | |
2091 | } | |
2092 | ||
2093 | static void mtk_set_msglevel(struct net_device *dev, u32 value) | |
2094 | { | |
2095 | struct mtk_mac *mac = netdev_priv(dev); | |
2096 | ||
2097 | mac->hw->msg_enable = value; | |
2098 | } | |
2099 | ||
2100 | static int mtk_nway_reset(struct net_device *dev) | |
2101 | { | |
2102 | struct mtk_mac *mac = netdev_priv(dev); | |
2103 | ||
dce6fa42 SW |
2104 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2105 | return -EBUSY; | |
2106 | ||
2364c5c5 | 2107 | return genphy_restart_aneg(dev->phydev); |
656e7052 JC |
2108 | } |
2109 | ||
2110 | static u32 mtk_get_link(struct net_device *dev) | |
2111 | { | |
2112 | struct mtk_mac *mac = netdev_priv(dev); | |
2113 | int err; | |
2114 | ||
dce6fa42 SW |
2115 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2116 | return -EBUSY; | |
2117 | ||
2364c5c5 | 2118 | err = genphy_update_link(dev->phydev); |
656e7052 JC |
2119 | if (err) |
2120 | return ethtool_op_get_link(dev); | |
2121 | ||
2364c5c5 | 2122 | return dev->phydev->link; |
656e7052 JC |
2123 | } |
2124 | ||
2125 | static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) | |
2126 | { | |
2127 | int i; | |
2128 | ||
2129 | switch (stringset) { | |
2130 | case ETH_SS_STATS: | |
2131 | for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { | |
2132 | memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); | |
2133 | data += ETH_GSTRING_LEN; | |
2134 | } | |
2135 | break; | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | static int mtk_get_sset_count(struct net_device *dev, int sset) | |
2140 | { | |
2141 | switch (sset) { | |
2142 | case ETH_SS_STATS: | |
2143 | return ARRAY_SIZE(mtk_ethtool_stats); | |
2144 | default: | |
2145 | return -EOPNOTSUPP; | |
2146 | } | |
2147 | } | |
2148 | ||
2149 | static void mtk_get_ethtool_stats(struct net_device *dev, | |
2150 | struct ethtool_stats *stats, u64 *data) | |
2151 | { | |
2152 | struct mtk_mac *mac = netdev_priv(dev); | |
2153 | struct mtk_hw_stats *hwstats = mac->hw_stats; | |
2154 | u64 *data_src, *data_dst; | |
2155 | unsigned int start; | |
2156 | int i; | |
2157 | ||
dce6fa42 SW |
2158 | if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) |
2159 | return; | |
2160 | ||
656e7052 JC |
2161 | if (netif_running(dev) && netif_device_present(dev)) { |
2162 | if (spin_trylock(&hwstats->stats_lock)) { | |
2163 | mtk_stats_update_mac(mac); | |
2164 | spin_unlock(&hwstats->stats_lock); | |
2165 | } | |
2166 | } | |
2167 | ||
94d308d0 SW |
2168 | data_src = (u64 *)hwstats; |
2169 | ||
656e7052 | 2170 | do { |
656e7052 JC |
2171 | data_dst = data; |
2172 | start = u64_stats_fetch_begin_irq(&hwstats->syncp); | |
2173 | ||
2174 | for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) | |
2175 | *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); | |
2176 | } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); | |
2177 | } | |
2178 | ||
7aab747e NC |
2179 | static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
2180 | u32 *rule_locs) | |
2181 | { | |
2182 | int ret = -EOPNOTSUPP; | |
2183 | ||
2184 | switch (cmd->cmd) { | |
2185 | case ETHTOOL_GRXRINGS: | |
2186 | if (dev->features & NETIF_F_LRO) { | |
2187 | cmd->data = MTK_MAX_RX_RING_NUM; | |
2188 | ret = 0; | |
2189 | } | |
2190 | break; | |
2191 | case ETHTOOL_GRXCLSRLCNT: | |
2192 | if (dev->features & NETIF_F_LRO) { | |
2193 | struct mtk_mac *mac = netdev_priv(dev); | |
2194 | ||
2195 | cmd->rule_cnt = mac->hwlro_ip_cnt; | |
2196 | ret = 0; | |
2197 | } | |
2198 | break; | |
2199 | case ETHTOOL_GRXCLSRULE: | |
2200 | if (dev->features & NETIF_F_LRO) | |
2201 | ret = mtk_hwlro_get_fdir_entry(dev, cmd); | |
2202 | break; | |
2203 | case ETHTOOL_GRXCLSRLALL: | |
2204 | if (dev->features & NETIF_F_LRO) | |
2205 | ret = mtk_hwlro_get_fdir_all(dev, cmd, | |
2206 | rule_locs); | |
2207 | break; | |
2208 | default: | |
2209 | break; | |
2210 | } | |
2211 | ||
2212 | return ret; | |
2213 | } | |
2214 | ||
2215 | static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) | |
2216 | { | |
2217 | int ret = -EOPNOTSUPP; | |
2218 | ||
2219 | switch (cmd->cmd) { | |
2220 | case ETHTOOL_SRXCLSRLINS: | |
2221 | if (dev->features & NETIF_F_LRO) | |
2222 | ret = mtk_hwlro_add_ipaddr(dev, cmd); | |
2223 | break; | |
2224 | case ETHTOOL_SRXCLSRLDEL: | |
2225 | if (dev->features & NETIF_F_LRO) | |
2226 | ret = mtk_hwlro_del_ipaddr(dev, cmd); | |
2227 | break; | |
2228 | default: | |
2229 | break; | |
2230 | } | |
2231 | ||
2232 | return ret; | |
2233 | } | |
2234 | ||
6a38cb15 | 2235 | static const struct ethtool_ops mtk_ethtool_ops = { |
656e7052 JC |
2236 | .get_settings = mtk_get_settings, |
2237 | .set_settings = mtk_set_settings, | |
2238 | .get_drvinfo = mtk_get_drvinfo, | |
2239 | .get_msglevel = mtk_get_msglevel, | |
2240 | .set_msglevel = mtk_set_msglevel, | |
2241 | .nway_reset = mtk_nway_reset, | |
2242 | .get_link = mtk_get_link, | |
2243 | .get_strings = mtk_get_strings, | |
2244 | .get_sset_count = mtk_get_sset_count, | |
2245 | .get_ethtool_stats = mtk_get_ethtool_stats, | |
7aab747e NC |
2246 | .get_rxnfc = mtk_get_rxnfc, |
2247 | .set_rxnfc = mtk_set_rxnfc, | |
656e7052 JC |
2248 | }; |
2249 | ||
2250 | static const struct net_device_ops mtk_netdev_ops = { | |
2251 | .ndo_init = mtk_init, | |
2252 | .ndo_uninit = mtk_uninit, | |
2253 | .ndo_open = mtk_open, | |
2254 | .ndo_stop = mtk_stop, | |
2255 | .ndo_start_xmit = mtk_start_xmit, | |
2256 | .ndo_set_mac_address = mtk_set_mac_address, | |
2257 | .ndo_validate_addr = eth_validate_addr, | |
2258 | .ndo_do_ioctl = mtk_do_ioctl, | |
2259 | .ndo_change_mtu = eth_change_mtu, | |
2260 | .ndo_tx_timeout = mtk_tx_timeout, | |
2261 | .ndo_get_stats64 = mtk_get_stats64, | |
7aab747e NC |
2262 | .ndo_fix_features = mtk_fix_features, |
2263 | .ndo_set_features = mtk_set_features, | |
656e7052 JC |
2264 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2265 | .ndo_poll_controller = mtk_poll_controller, | |
2266 | #endif | |
2267 | }; | |
2268 | ||
2269 | static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |
2270 | { | |
2271 | struct mtk_mac *mac; | |
2272 | const __be32 *_id = of_get_property(np, "reg", NULL); | |
2273 | int id, err; | |
2274 | ||
2275 | if (!_id) { | |
2276 | dev_err(eth->dev, "missing mac id\n"); | |
2277 | return -EINVAL; | |
2278 | } | |
2279 | ||
2280 | id = be32_to_cpup(_id); | |
2281 | if (id >= MTK_MAC_COUNT) { | |
2282 | dev_err(eth->dev, "%d is not a valid mac id\n", id); | |
2283 | return -EINVAL; | |
2284 | } | |
2285 | ||
2286 | if (eth->netdev[id]) { | |
2287 | dev_err(eth->dev, "duplicate mac id found: %d\n", id); | |
2288 | return -EINVAL; | |
2289 | } | |
2290 | ||
2291 | eth->netdev[id] = alloc_etherdev(sizeof(*mac)); | |
2292 | if (!eth->netdev[id]) { | |
2293 | dev_err(eth->dev, "alloc_etherdev failed\n"); | |
2294 | return -ENOMEM; | |
2295 | } | |
2296 | mac = netdev_priv(eth->netdev[id]); | |
2297 | eth->mac[id] = mac; | |
2298 | mac->id = id; | |
2299 | mac->hw = eth; | |
2300 | mac->of_node = np; | |
656e7052 | 2301 | |
ee406810 NC |
2302 | memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); |
2303 | mac->hwlro_ip_cnt = 0; | |
2304 | ||
656e7052 JC |
2305 | mac->hw_stats = devm_kzalloc(eth->dev, |
2306 | sizeof(*mac->hw_stats), | |
2307 | GFP_KERNEL); | |
2308 | if (!mac->hw_stats) { | |
2309 | dev_err(eth->dev, "failed to allocate counter memory\n"); | |
2310 | err = -ENOMEM; | |
2311 | goto free_netdev; | |
2312 | } | |
2313 | spin_lock_init(&mac->hw_stats->stats_lock); | |
d7005652 | 2314 | u64_stats_init(&mac->hw_stats->syncp); |
656e7052 JC |
2315 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
2316 | ||
2317 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | |
eaadf9fd | 2318 | eth->netdev[id]->watchdog_timeo = 5 * HZ; |
656e7052 JC |
2319 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; |
2320 | eth->netdev[id]->base_addr = (unsigned long)eth->base; | |
ee406810 NC |
2321 | |
2322 | eth->netdev[id]->hw_features = MTK_HW_FEATURES; | |
2323 | if (eth->hwlro) | |
2324 | eth->netdev[id]->hw_features |= NETIF_F_LRO; | |
2325 | ||
656e7052 JC |
2326 | eth->netdev[id]->vlan_features = MTK_HW_FEATURES & |
2327 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); | |
2328 | eth->netdev[id]->features |= MTK_HW_FEATURES; | |
2329 | eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; | |
2330 | ||
80673029 | 2331 | eth->netdev[id]->irq = eth->irq[0]; |
656e7052 JC |
2332 | return 0; |
2333 | ||
2334 | free_netdev: | |
2335 | free_netdev(eth->netdev[id]); | |
2336 | return err; | |
2337 | } | |
2338 | ||
2339 | static int mtk_probe(struct platform_device *pdev) | |
2340 | { | |
2341 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
2342 | struct device_node *mac_np; | |
2343 | const struct of_device_id *match; | |
2344 | struct mtk_soc_data *soc; | |
2345 | struct mtk_eth *eth; | |
2346 | int err; | |
80673029 | 2347 | int i; |
656e7052 | 2348 | |
656e7052 JC |
2349 | match = of_match_device(of_mtk_match, &pdev->dev); |
2350 | soc = (struct mtk_soc_data *)match->data; | |
2351 | ||
2352 | eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); | |
2353 | if (!eth) | |
2354 | return -ENOMEM; | |
2355 | ||
549e5495 | 2356 | eth->dev = &pdev->dev; |
656e7052 | 2357 | eth->base = devm_ioremap_resource(&pdev->dev, res); |
621e49f6 VZ |
2358 | if (IS_ERR(eth->base)) |
2359 | return PTR_ERR(eth->base); | |
656e7052 JC |
2360 | |
2361 | spin_lock_init(ð->page_lock); | |
7bc9ccec | 2362 | spin_lock_init(ð->irq_lock); |
656e7052 JC |
2363 | |
2364 | eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
2365 | "mediatek,ethsys"); | |
2366 | if (IS_ERR(eth->ethsys)) { | |
2367 | dev_err(&pdev->dev, "no ethsys regmap found\n"); | |
2368 | return PTR_ERR(eth->ethsys); | |
2369 | } | |
2370 | ||
2371 | eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
2372 | "mediatek,pctl"); | |
2373 | if (IS_ERR(eth->pctl)) { | |
2374 | dev_err(&pdev->dev, "no pctl regmap found\n"); | |
2375 | return PTR_ERR(eth->pctl); | |
2376 | } | |
2377 | ||
ee406810 NC |
2378 | eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro"); |
2379 | ||
80673029 JC |
2380 | for (i = 0; i < 3; i++) { |
2381 | eth->irq[i] = platform_get_irq(pdev, i); | |
2382 | if (eth->irq[i] < 0) { | |
2383 | dev_err(&pdev->dev, "no IRQ%d resource found\n", i); | |
2384 | return -ENXIO; | |
2385 | } | |
656e7052 | 2386 | } |
549e5495 SW |
2387 | for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { |
2388 | eth->clks[i] = devm_clk_get(eth->dev, | |
2389 | mtk_clks_source_name[i]); | |
2390 | if (IS_ERR(eth->clks[i])) { | |
2391 | if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) | |
2392 | return -EPROBE_DEFER; | |
2393 | return -ENODEV; | |
2394 | } | |
2395 | } | |
656e7052 | 2396 | |
656e7052 | 2397 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); |
7c78b4ad | 2398 | INIT_WORK(ð->pending_work, mtk_pending_work); |
656e7052 JC |
2399 | |
2400 | err = mtk_hw_init(eth); | |
2401 | if (err) | |
2402 | return err; | |
2403 | ||
2404 | for_each_child_of_node(pdev->dev.of_node, mac_np) { | |
2405 | if (!of_device_is_compatible(mac_np, | |
2406 | "mediatek,eth-mac")) | |
2407 | continue; | |
2408 | ||
2409 | if (!of_device_is_available(mac_np)) | |
2410 | continue; | |
2411 | ||
2412 | err = mtk_add_mac(eth, mac_np); | |
2413 | if (err) | |
8a8a9e89 | 2414 | goto err_deinit_hw; |
656e7052 JC |
2415 | } |
2416 | ||
85574dbf SW |
2417 | err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, |
2418 | dev_name(eth->dev), eth); | |
2419 | if (err) | |
2420 | goto err_free_dev; | |
2421 | ||
2422 | err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, | |
2423 | dev_name(eth->dev), eth); | |
2424 | if (err) | |
2425 | goto err_free_dev; | |
2426 | ||
2427 | err = mtk_mdio_init(eth); | |
2428 | if (err) | |
2429 | goto err_free_dev; | |
2430 | ||
2431 | for (i = 0; i < MTK_MAX_DEVS; i++) { | |
2432 | if (!eth->netdev[i]) | |
2433 | continue; | |
2434 | ||
2435 | err = register_netdev(eth->netdev[i]); | |
2436 | if (err) { | |
2437 | dev_err(eth->dev, "error bringing up device\n"); | |
8a8a9e89 | 2438 | goto err_deinit_mdio; |
85574dbf SW |
2439 | } else |
2440 | netif_info(eth, probe, eth->netdev[i], | |
2441 | "mediatek frame engine at 0x%08lx, irq %d\n", | |
2442 | eth->netdev[i]->base_addr, eth->irq[0]); | |
2443 | } | |
2444 | ||
656e7052 JC |
2445 | /* we run 2 devices on the same DMA ring so we need a dummy device |
2446 | * for NAPI to work | |
2447 | */ | |
2448 | init_dummy_netdev(ð->dummy_dev); | |
80673029 JC |
2449 | netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, |
2450 | MTK_NAPI_WEIGHT); | |
2451 | netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, | |
656e7052 JC |
2452 | MTK_NAPI_WEIGHT); |
2453 | ||
2454 | platform_set_drvdata(pdev, eth); | |
2455 | ||
2456 | return 0; | |
2457 | ||
8a8a9e89 SW |
2458 | err_deinit_mdio: |
2459 | mtk_mdio_cleanup(eth); | |
656e7052 | 2460 | err_free_dev: |
8a8a9e89 SW |
2461 | mtk_free_dev(eth); |
2462 | err_deinit_hw: | |
2463 | mtk_hw_deinit(eth); | |
2464 | ||
656e7052 JC |
2465 | return err; |
2466 | } | |
2467 | ||
2468 | static int mtk_remove(struct platform_device *pdev) | |
2469 | { | |
2470 | struct mtk_eth *eth = platform_get_drvdata(pdev); | |
79e9a414 SW |
2471 | int i; |
2472 | ||
2473 | /* stop all devices to make sure that dma is properly shut down */ | |
2474 | for (i = 0; i < MTK_MAC_COUNT; i++) { | |
2475 | if (!eth->netdev[i]) | |
2476 | continue; | |
2477 | mtk_stop(eth->netdev[i]); | |
2478 | } | |
656e7052 | 2479 | |
bf253fb7 | 2480 | mtk_hw_deinit(eth); |
656e7052 | 2481 | |
80673029 | 2482 | netif_napi_del(ð->tx_napi); |
656e7052 JC |
2483 | netif_napi_del(ð->rx_napi); |
2484 | mtk_cleanup(eth); | |
e82f7148 | 2485 | mtk_mdio_cleanup(eth); |
656e7052 JC |
2486 | |
2487 | return 0; | |
2488 | } | |
2489 | ||
2490 | const struct of_device_id of_mtk_match[] = { | |
2491 | { .compatible = "mediatek,mt7623-eth" }, | |
2492 | {}, | |
2493 | }; | |
2494 | ||
2495 | static struct platform_driver mtk_driver = { | |
2496 | .probe = mtk_probe, | |
2497 | .remove = mtk_remove, | |
2498 | .driver = { | |
2499 | .name = "mtk_soc_eth", | |
656e7052 JC |
2500 | .of_match_table = of_mtk_match, |
2501 | }, | |
2502 | }; | |
2503 | ||
2504 | module_platform_driver(mtk_driver); | |
2505 | ||
2506 | MODULE_LICENSE("GPL"); | |
2507 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | |
2508 | MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); |