]>
Commit | Line | Data |
---|---|---|
e3cbf478 JC |
1 | /* This program is free software; you can redistribute it and/or modify |
2 | * it under the terms of the GNU General Public License as published by | |
3 | * the Free Software Foundation; version 2 of the License | |
4 | * | |
5 | * This program is distributed in the hope that it will be useful, | |
6 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
7 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
8 | * GNU General Public License for more details. | |
9 | * | |
10 | * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> | |
11 | * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> | |
12 | * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/dma-mapping.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/skbuff.h> | |
21 | #include <linux/etherdevice.h> | |
22 | #include <linux/ethtool.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/of_device.h> | |
25 | #include <linux/mfd/syscon.h> | |
26 | #include <linux/clk.h> | |
27 | #include <linux/of_net.h> | |
28 | #include <linux/of_mdio.h> | |
29 | #include <linux/if_vlan.h> | |
30 | #include <linux/reset.h> | |
31 | #include <linux/tcp.h> | |
32 | #include <linux/io.h> | |
33 | #include <linux/bug.h> | |
34 | #include <linux/regmap.h> | |
35 | ||
36 | #include "mtk_eth_soc.h" | |
37 | #include "mdio.h" | |
38 | #include "ethtool.h" | |
39 | ||
40 | #define MAX_RX_LENGTH 1536 | |
41 | #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) | |
42 | #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) | |
43 | #define DMA_DUMMY_DESC 0xffffffff | |
44 | #define MTK_DEFAULT_MSG_ENABLE \ | |
45 | (NETIF_MSG_DRV | \ | |
46 | NETIF_MSG_PROBE | \ | |
47 | NETIF_MSG_LINK | \ | |
48 | NETIF_MSG_TIMER | \ | |
49 | NETIF_MSG_IFDOWN | \ | |
50 | NETIF_MSG_IFUP | \ | |
51 | NETIF_MSG_RX_ERR | \ | |
52 | NETIF_MSG_TX_ERR) | |
53 | ||
54 | #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) | |
55 | #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1)) | |
56 | #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1)) | |
57 | ||
58 | #define SYSC_REG_RSTCTRL 0x34 | |
59 | ||
60 | static int mtk_msg_level = -1; | |
61 | module_param_named(msg_level, mtk_msg_level, int, 0); | |
62 | MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); | |
63 | ||
64 | static const u16 mtk_reg_table_default[MTK_REG_COUNT] = { | |
65 | [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG, | |
66 | [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG, | |
67 | [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG, | |
68 | [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0, | |
69 | [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0, | |
70 | [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0, | |
71 | [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0, | |
72 | [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0, | |
73 | [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0, | |
74 | [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0, | |
75 | [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0, | |
76 | [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE, | |
77 | [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS, | |
78 | [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0, | |
79 | [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT, | |
80 | [MTK_REG_MTK_RST_GL] = MTK_RST_GL, | |
81 | }; | |
82 | ||
83 | static const u16 *mtk_reg_table = mtk_reg_table_default; | |
84 | ||
fadd6fda | 85 | void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg) |
e3cbf478 JC |
86 | { |
87 | __raw_writel(val, eth->base + reg); | |
88 | } | |
89 | ||
fadd6fda | 90 | u32 mtk_r32(struct mtk_eth *eth, unsigned int reg) |
e3cbf478 JC |
91 | { |
92 | return __raw_readl(eth->base + reg); | |
93 | } | |
94 | ||
95 | static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg) | |
96 | { | |
97 | mtk_w32(eth, val, mtk_reg_table[reg]); | |
98 | } | |
99 | ||
100 | static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg) | |
101 | { | |
102 | return mtk_r32(eth, mtk_reg_table[reg]); | |
103 | } | |
104 | ||
105 | /* these bits are also exposed via the reset-controller API. however the switch | |
106 | * and FE need to be brought out of reset in the exakt same moemtn and the | |
107 | * reset-controller api does not provide this feature yet. Do the reset manually | |
108 | * until we fixed the reset-controller api to be able to do this | |
109 | */ | |
110 | void mtk_reset(struct mtk_eth *eth, u32 reset_bits) | |
111 | { | |
112 | u32 val; | |
113 | ||
114 | regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val); | |
115 | val |= reset_bits; | |
116 | regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val); | |
117 | usleep_range(10, 20); | |
118 | val &= ~reset_bits; | |
119 | regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val); | |
120 | usleep_range(10, 20); | |
121 | } | |
122 | EXPORT_SYMBOL(mtk_reset); | |
123 | ||
124 | static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask) | |
125 | { | |
126 | if (eth->soc->dma_type & MTK_PDMA) | |
127 | mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS); | |
128 | if (eth->soc->dma_type & MTK_QDMA) | |
129 | mtk_w32(eth, mask, MTK_QMTK_INT_STATUS); | |
130 | } | |
131 | ||
132 | static inline u32 mtk_irq_pending(struct mtk_eth *eth) | |
133 | { | |
134 | u32 status = 0; | |
135 | ||
136 | if (eth->soc->dma_type & MTK_PDMA) | |
137 | status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS); | |
138 | if (eth->soc->dma_type & MTK_QDMA) | |
139 | status |= mtk_r32(eth, MTK_QMTK_INT_STATUS); | |
140 | ||
141 | return status; | |
142 | } | |
143 | ||
144 | static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask) | |
145 | { | |
146 | u32 status_reg = MTK_REG_MTK_INT_STATUS; | |
147 | ||
148 | if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2]) | |
149 | status_reg = MTK_REG_MTK_INT_STATUS2; | |
150 | ||
151 | mtk_reg_w32(eth, mask, status_reg); | |
152 | } | |
153 | ||
154 | static u32 mtk_irq_pending_status(struct mtk_eth *eth) | |
155 | { | |
156 | u32 status_reg = MTK_REG_MTK_INT_STATUS; | |
157 | ||
158 | if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2]) | |
159 | status_reg = MTK_REG_MTK_INT_STATUS2; | |
160 | ||
161 | return mtk_reg_r32(eth, status_reg); | |
162 | } | |
163 | ||
164 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | |
165 | { | |
166 | u32 val; | |
167 | ||
168 | if (eth->soc->dma_type & MTK_PDMA) { | |
169 | val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | |
170 | mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE); | |
171 | /* flush write */ | |
172 | mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | |
173 | } | |
174 | if (eth->soc->dma_type & MTK_QDMA) { | |
175 | val = mtk_r32(eth, MTK_QMTK_INT_ENABLE); | |
176 | mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE); | |
177 | /* flush write */ | |
178 | mtk_r32(eth, MTK_QMTK_INT_ENABLE); | |
179 | } | |
180 | } | |
181 | ||
182 | static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) | |
183 | { | |
184 | u32 val; | |
185 | ||
186 | if (eth->soc->dma_type & MTK_PDMA) { | |
187 | val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | |
188 | mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE); | |
189 | /* flush write */ | |
190 | mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | |
191 | } | |
192 | if (eth->soc->dma_type & MTK_QDMA) { | |
193 | val = mtk_r32(eth, MTK_QMTK_INT_ENABLE); | |
194 | mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE); | |
195 | /* flush write */ | |
196 | mtk_r32(eth, MTK_QMTK_INT_ENABLE); | |
197 | } | |
198 | } | |
199 | ||
200 | static inline u32 mtk_irq_enabled(struct mtk_eth *eth) | |
201 | { | |
202 | u32 enabled = 0; | |
203 | ||
204 | if (eth->soc->dma_type & MTK_PDMA) | |
205 | enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE); | |
206 | if (eth->soc->dma_type & MTK_QDMA) | |
207 | enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE); | |
208 | ||
209 | return enabled; | |
210 | } | |
211 | ||
212 | static inline void mtk_hw_set_macaddr(struct mtk_mac *mac, | |
213 | unsigned char *macaddr) | |
214 | { | |
215 | unsigned long flags; | |
216 | ||
217 | spin_lock_irqsave(&mac->hw->page_lock, flags); | |
218 | mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH); | |
219 | mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | | |
220 | (macaddr[4] << 8) | macaddr[5], | |
221 | MTK_GDMA1_MAC_ADRL); | |
222 | spin_unlock_irqrestore(&mac->hw->page_lock, flags); | |
223 | } | |
224 | ||
225 | static int mtk_set_mac_address(struct net_device *dev, void *p) | |
226 | { | |
227 | int ret = eth_mac_addr(dev, p); | |
228 | struct mtk_mac *mac = netdev_priv(dev); | |
229 | struct mtk_eth *eth = mac->hw; | |
230 | ||
231 | if (ret) | |
232 | return ret; | |
233 | ||
234 | if (eth->soc->set_mac) | |
235 | eth->soc->set_mac(mac, dev->dev_addr); | |
236 | else | |
237 | mtk_hw_set_macaddr(mac, p); | |
238 | ||
239 | return 0; | |
240 | } | |
241 | ||
242 | static inline int mtk_max_frag_size(int mtu) | |
243 | { | |
244 | /* make sure buf_size will be at least MAX_RX_LENGTH */ | |
245 | if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH) | |
246 | mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN; | |
247 | ||
248 | return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + | |
249 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
250 | } | |
251 | ||
252 | static inline int mtk_max_buf_size(int frag_size) | |
253 | { | |
254 | int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - | |
255 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
256 | ||
257 | WARN_ON(buf_size < MAX_RX_LENGTH); | |
258 | ||
259 | return buf_size; | |
260 | } | |
261 | ||
262 | static inline void mtk_get_rxd(struct mtk_rx_dma *rxd, | |
263 | struct mtk_rx_dma *dma_rxd) | |
264 | { | |
265 | rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); | |
266 | rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); | |
267 | rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); | |
268 | rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); | |
269 | } | |
270 | ||
271 | static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd, | |
272 | struct mtk_tx_dma *dma_txd) | |
273 | { | |
274 | WRITE_ONCE(dma_txd->txd1, txd->txd1); | |
275 | WRITE_ONCE(dma_txd->txd3, txd->txd3); | |
276 | WRITE_ONCE(dma_txd->txd4, txd->txd4); | |
277 | /* clean dma done flag last */ | |
278 | WRITE_ONCE(dma_txd->txd2, txd->txd2); | |
279 | } | |
280 | ||
281 | static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring) | |
282 | { | |
283 | int i; | |
284 | ||
285 | if (ring->rx_data && ring->rx_dma) { | |
286 | for (i = 0; i < ring->rx_ring_size; i++) { | |
287 | if (!ring->rx_data[i]) | |
288 | continue; | |
289 | if (!ring->rx_dma[i].rxd1) | |
290 | continue; | |
291 | dma_unmap_single(eth->dev, | |
292 | ring->rx_dma[i].rxd1, | |
293 | ring->rx_buf_size, | |
294 | DMA_FROM_DEVICE); | |
295 | skb_free_frag(ring->rx_data[i]); | |
296 | } | |
297 | kfree(ring->rx_data); | |
298 | ring->rx_data = NULL; | |
299 | } | |
300 | ||
301 | if (ring->rx_dma) { | |
302 | dma_free_coherent(eth->dev, | |
303 | ring->rx_ring_size * sizeof(*ring->rx_dma), | |
304 | ring->rx_dma, | |
305 | ring->rx_phys); | |
306 | ring->rx_dma = NULL; | |
307 | } | |
308 | } | |
309 | ||
310 | static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring) | |
311 | { | |
312 | int i, pad = 0; | |
313 | ||
314 | ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); | |
315 | ring->rx_buf_size = mtk_max_buf_size(ring->frag_size); | |
316 | ring->rx_ring_size = eth->soc->dma_ring_size; | |
317 | ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data), | |
26a5e9b8 | 318 | GFP_KERNEL); |
e3cbf478 JC |
319 | if (!ring->rx_data) |
320 | goto no_rx_mem; | |
321 | ||
322 | for (i = 0; i < ring->rx_ring_size; i++) { | |
323 | ring->rx_data[i] = netdev_alloc_frag(ring->frag_size); | |
324 | if (!ring->rx_data[i]) | |
325 | goto no_rx_mem; | |
326 | } | |
327 | ||
26a5e9b8 KH |
328 | ring->rx_dma = |
329 | dma_alloc_coherent(eth->dev, | |
330 | ring->rx_ring_size * sizeof(*ring->rx_dma), | |
331 | &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO); | |
e3cbf478 JC |
332 | if (!ring->rx_dma) |
333 | goto no_rx_mem; | |
334 | ||
335 | if (!eth->soc->rx_2b_offset) | |
336 | pad = NET_IP_ALIGN; | |
337 | ||
338 | for (i = 0; i < ring->rx_ring_size; i++) { | |
339 | dma_addr_t dma_addr = dma_map_single(eth->dev, | |
340 | ring->rx_data[i] + NET_SKB_PAD + pad, | |
341 | ring->rx_buf_size, | |
342 | DMA_FROM_DEVICE); | |
343 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
344 | goto no_rx_mem; | |
345 | ring->rx_dma[i].rxd1 = (unsigned int)dma_addr; | |
346 | ||
347 | if (eth->soc->rx_sg_dma) | |
348 | ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); | |
349 | else | |
350 | ring->rx_dma[i].rxd2 = RX_DMA_LSO; | |
351 | } | |
352 | ring->rx_calc_idx = ring->rx_ring_size - 1; | |
353 | /* make sure that all changes to the dma ring are flushed before we | |
354 | * continue | |
355 | */ | |
356 | wmb(); | |
357 | ||
358 | return 0; | |
359 | ||
360 | no_rx_mem: | |
361 | return -ENOMEM; | |
362 | } | |
363 | ||
364 | static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | |
365 | { | |
366 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | |
367 | dma_unmap_single(dev, | |
368 | dma_unmap_addr(tx_buf, dma_addr0), | |
369 | dma_unmap_len(tx_buf, dma_len0), | |
370 | DMA_TO_DEVICE); | |
371 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | |
372 | dma_unmap_page(dev, | |
373 | dma_unmap_addr(tx_buf, dma_addr0), | |
374 | dma_unmap_len(tx_buf, dma_len0), | |
375 | DMA_TO_DEVICE); | |
376 | } | |
377 | if (tx_buf->flags & MTK_TX_FLAGS_PAGE1) | |
378 | dma_unmap_page(dev, | |
379 | dma_unmap_addr(tx_buf, dma_addr1), | |
380 | dma_unmap_len(tx_buf, dma_len1), | |
381 | DMA_TO_DEVICE); | |
382 | ||
383 | tx_buf->flags = 0; | |
384 | if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC)) | |
385 | dev_kfree_skb_any(tx_buf->skb); | |
386 | tx_buf->skb = NULL; | |
387 | } | |
388 | ||
389 | static void mtk_pdma_tx_clean(struct mtk_eth *eth) | |
390 | { | |
391 | struct mtk_tx_ring *ring = ð->tx_ring; | |
392 | int i; | |
393 | ||
394 | if (ring->tx_buf) { | |
395 | for (i = 0; i < ring->tx_ring_size; i++) | |
396 | mtk_txd_unmap(eth->dev, &ring->tx_buf[i]); | |
397 | kfree(ring->tx_buf); | |
398 | ring->tx_buf = NULL; | |
399 | } | |
400 | ||
401 | if (ring->tx_dma) { | |
402 | dma_free_coherent(eth->dev, | |
403 | ring->tx_ring_size * sizeof(*ring->tx_dma), | |
404 | ring->tx_dma, | |
405 | ring->tx_phys); | |
406 | ring->tx_dma = NULL; | |
407 | } | |
408 | } | |
409 | ||
410 | static void mtk_qdma_tx_clean(struct mtk_eth *eth) | |
411 | { | |
412 | struct mtk_tx_ring *ring = ð->tx_ring; | |
413 | int i; | |
414 | ||
415 | if (ring->tx_buf) { | |
416 | for (i = 0; i < ring->tx_ring_size; i++) | |
417 | mtk_txd_unmap(eth->dev, &ring->tx_buf[i]); | |
418 | kfree(ring->tx_buf); | |
419 | ring->tx_buf = NULL; | |
420 | } | |
421 | ||
422 | if (ring->tx_dma) { | |
423 | dma_free_coherent(eth->dev, | |
424 | ring->tx_ring_size * sizeof(*ring->tx_dma), | |
425 | ring->tx_dma, | |
426 | ring->tx_phys); | |
427 | ring->tx_dma = NULL; | |
428 | } | |
429 | } | |
430 | ||
431 | void mtk_stats_update_mac(struct mtk_mac *mac) | |
432 | { | |
433 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
434 | unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]; | |
435 | u64 stats; | |
436 | ||
437 | base += hw_stats->reg_offset; | |
438 | ||
439 | u64_stats_update_begin(&hw_stats->syncp); | |
440 | ||
441 | if (mac->hw->soc->new_stats) { | |
442 | hw_stats->rx_bytes += mtk_r32(mac->hw, base); | |
443 | stats = mtk_r32(mac->hw, base + 0x04); | |
444 | if (stats) | |
445 | hw_stats->rx_bytes += (stats << 32); | |
446 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); | |
447 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); | |
448 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); | |
449 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); | |
450 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); | |
451 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); | |
452 | hw_stats->rx_flow_control_packets += | |
453 | mtk_r32(mac->hw, base + 0x24); | |
454 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); | |
455 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); | |
456 | hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); | |
457 | stats = mtk_r32(mac->hw, base + 0x34); | |
458 | if (stats) | |
459 | hw_stats->tx_bytes += (stats << 32); | |
460 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); | |
461 | } else { | |
462 | hw_stats->tx_bytes += mtk_r32(mac->hw, base); | |
463 | hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04); | |
464 | hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08); | |
465 | hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c); | |
466 | hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20); | |
467 | hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24); | |
468 | hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28); | |
469 | hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c); | |
470 | hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30); | |
471 | hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34); | |
472 | hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38); | |
473 | hw_stats->rx_flow_control_packets += | |
474 | mtk_r32(mac->hw, base + 0x3c); | |
475 | } | |
476 | ||
477 | u64_stats_update_end(&hw_stats->syncp); | |
478 | } | |
479 | ||
480 | static void mtk_get_stats64(struct net_device *dev, | |
481 | struct rtnl_link_stats64 *storage) | |
482 | { | |
483 | struct mtk_mac *mac = netdev_priv(dev); | |
484 | struct mtk_hw_stats *hw_stats = mac->hw_stats; | |
485 | unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]; | |
486 | unsigned int start; | |
487 | ||
488 | if (!base) { | |
489 | netdev_stats_to_stats64(storage, &dev->stats); | |
490 | return; | |
491 | } | |
492 | ||
493 | if (netif_running(dev) && netif_device_present(dev)) { | |
494 | if (spin_trylock(&hw_stats->stats_lock)) { | |
495 | mtk_stats_update_mac(mac); | |
496 | spin_unlock(&hw_stats->stats_lock); | |
497 | } | |
498 | } | |
499 | ||
500 | do { | |
501 | start = u64_stats_fetch_begin_irq(&hw_stats->syncp); | |
502 | storage->rx_packets = hw_stats->rx_packets; | |
503 | storage->tx_packets = hw_stats->tx_packets; | |
504 | storage->rx_bytes = hw_stats->rx_bytes; | |
505 | storage->tx_bytes = hw_stats->tx_bytes; | |
506 | storage->collisions = hw_stats->tx_collisions; | |
507 | storage->rx_length_errors = hw_stats->rx_short_errors + | |
508 | hw_stats->rx_long_errors; | |
509 | storage->rx_over_errors = hw_stats->rx_overflow; | |
510 | storage->rx_crc_errors = hw_stats->rx_fcs_errors; | |
511 | storage->rx_errors = hw_stats->rx_checksum_errors; | |
512 | storage->tx_aborted_errors = hw_stats->tx_skip; | |
513 | } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); | |
514 | ||
515 | storage->tx_errors = dev->stats.tx_errors; | |
516 | storage->rx_dropped = dev->stats.rx_dropped; | |
517 | storage->tx_dropped = dev->stats.tx_dropped; | |
518 | } | |
519 | ||
520 | static int mtk_vlan_rx_add_vid(struct net_device *dev, | |
521 | __be16 proto, u16 vid) | |
522 | { | |
523 | struct mtk_mac *mac = netdev_priv(dev); | |
524 | struct mtk_eth *eth = mac->hw; | |
525 | u32 idx = (vid & 0xf); | |
526 | u32 vlan_cfg; | |
527 | ||
528 | if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) && | |
529 | (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) | |
530 | return 0; | |
531 | ||
532 | if (test_bit(idx, ð->vlan_map)) { | |
533 | netdev_warn(dev, "disable tx vlan offload\n"); | |
534 | dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX; | |
535 | netdev_update_features(dev); | |
536 | } else { | |
537 | vlan_cfg = mtk_r32(eth, | |
538 | mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + | |
539 | ((idx >> 1) << 2)); | |
540 | if (idx & 0x1) { | |
541 | vlan_cfg &= 0xffff; | |
542 | vlan_cfg |= (vid << 16); | |
543 | } else { | |
544 | vlan_cfg &= 0xffff0000; | |
545 | vlan_cfg |= vid; | |
546 | } | |
547 | mtk_w32(eth, | |
548 | vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + | |
549 | ((idx >> 1) << 2)); | |
550 | set_bit(idx, ð->vlan_map); | |
551 | } | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
556 | static int mtk_vlan_rx_kill_vid(struct net_device *dev, | |
557 | __be16 proto, u16 vid) | |
558 | { | |
559 | struct mtk_mac *mac = netdev_priv(dev); | |
560 | struct mtk_eth *eth = mac->hw; | |
561 | u32 idx = (vid & 0xf); | |
562 | ||
563 | if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) && | |
564 | (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) | |
565 | return 0; | |
566 | ||
567 | clear_bit(idx, ð->vlan_map); | |
568 | ||
569 | return 0; | |
570 | } | |
571 | ||
572 | static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring) | |
573 | { | |
574 | barrier(); | |
575 | return (u32)(ring->tx_ring_size - | |
576 | ((ring->tx_next_idx - ring->tx_free_idx) & | |
577 | (ring->tx_ring_size - 1))); | |
578 | } | |
579 | ||
580 | static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth) | |
581 | { | |
582 | unsigned int len; | |
583 | int ret; | |
584 | ||
585 | if (unlikely(skb->len >= VLAN_ETH_ZLEN)) | |
586 | return 0; | |
587 | ||
588 | if (eth->soc->padding_64b && !eth->soc->padding_bug) | |
589 | return 0; | |
590 | ||
591 | if (skb_vlan_tag_present(skb)) | |
592 | len = ETH_ZLEN; | |
593 | else if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | |
594 | len = VLAN_ETH_ZLEN; | |
595 | else if (!eth->soc->padding_64b) | |
596 | len = ETH_ZLEN; | |
597 | else | |
598 | return 0; | |
599 | ||
600 | if (skb->len >= len) | |
601 | return 0; | |
602 | ||
603 | ret = skb_pad(skb, len - skb->len); | |
604 | if (ret < 0) | |
605 | return ret; | |
606 | skb->len = len; | |
607 | skb_set_tail_pointer(skb, len); | |
608 | ||
609 | return ret; | |
610 | } | |
611 | ||
612 | static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev, | |
613 | int tx_num, struct mtk_tx_ring *ring, bool gso) | |
614 | { | |
615 | struct mtk_mac *mac = netdev_priv(dev); | |
616 | struct mtk_eth *eth = mac->hw; | |
617 | struct skb_frag_struct *frag; | |
618 | struct mtk_tx_dma txd, *ptxd; | |
619 | struct mtk_tx_buf *tx_buf; | |
620 | int i, j, k, frag_size, frag_map_size, offset; | |
621 | dma_addr_t mapped_addr; | |
622 | unsigned int nr_frags; | |
623 | u32 def_txd4; | |
624 | ||
625 | if (mtk_skb_padto(skb, eth)) { | |
626 | netif_warn(eth, tx_err, dev, "tx padding failed!\n"); | |
627 | return -1; | |
628 | } | |
629 | ||
630 | tx_buf = &ring->tx_buf[ring->tx_next_idx]; | |
631 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
632 | memset(&txd, 0, sizeof(txd)); | |
633 | nr_frags = skb_shinfo(skb)->nr_frags; | |
634 | ||
635 | /* init tx descriptor */ | |
636 | def_txd4 = eth->soc->txd4; | |
637 | txd.txd4 = def_txd4; | |
638 | ||
639 | if (eth->soc->mac_count > 1) | |
640 | txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; | |
641 | ||
642 | if (gso) | |
643 | txd.txd4 |= TX_DMA_TSO; | |
644 | ||
645 | /* TX Checksum offload */ | |
646 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
647 | txd.txd4 |= TX_DMA_CHKSUM; | |
648 | ||
649 | /* VLAN header offload */ | |
650 | if (skb_vlan_tag_present(skb)) { | |
651 | u16 tag = skb_vlan_tag_get(skb); | |
652 | ||
653 | txd.txd4 |= TX_DMA_INS_VLAN | | |
654 | ((tag >> VLAN_PRIO_SHIFT) << 4) | | |
655 | (tag & 0xF); | |
656 | } | |
657 | ||
658 | mapped_addr = dma_map_single(&dev->dev, skb->data, | |
659 | skb_headlen(skb), DMA_TO_DEVICE); | |
660 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | |
661 | return -1; | |
662 | ||
663 | txd.txd1 = mapped_addr; | |
664 | txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb)); | |
665 | ||
666 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | |
667 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
668 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | |
669 | ||
670 | /* TX SG offload */ | |
671 | j = ring->tx_next_idx; | |
672 | k = 0; | |
673 | for (i = 0; i < nr_frags; i++) { | |
674 | offset = 0; | |
675 | frag = &skb_shinfo(skb)->frags[i]; | |
676 | frag_size = skb_frag_size(frag); | |
677 | ||
678 | while (frag_size > 0) { | |
679 | frag_map_size = min(frag_size, TX_DMA_BUF_LEN); | |
680 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | |
681 | frag_map_size, | |
682 | DMA_TO_DEVICE); | |
683 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | |
684 | goto err_dma; | |
685 | ||
686 | if (k & 0x1) { | |
687 | j = NEXT_TX_DESP_IDX(j); | |
688 | txd.txd1 = mapped_addr; | |
689 | txd.txd2 = TX_DMA_PLEN0(frag_map_size); | |
690 | txd.txd4 = def_txd4; | |
691 | ||
692 | tx_buf = &ring->tx_buf[j]; | |
693 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
694 | ||
695 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | |
696 | dma_unmap_addr_set(tx_buf, dma_addr0, | |
697 | mapped_addr); | |
698 | dma_unmap_len_set(tx_buf, dma_len0, | |
699 | frag_map_size); | |
700 | } else { | |
701 | txd.txd3 = mapped_addr; | |
702 | txd.txd2 |= TX_DMA_PLEN1(frag_map_size); | |
703 | ||
704 | tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; | |
705 | tx_buf->flags |= MTK_TX_FLAGS_PAGE1; | |
706 | dma_unmap_addr_set(tx_buf, dma_addr1, | |
707 | mapped_addr); | |
708 | dma_unmap_len_set(tx_buf, dma_len1, | |
709 | frag_map_size); | |
710 | ||
711 | if (!((i == (nr_frags - 1)) && | |
712 | (frag_map_size == frag_size))) { | |
713 | mtk_set_txd_pdma(&txd, | |
714 | &ring->tx_dma[j]); | |
715 | memset(&txd, 0, sizeof(txd)); | |
716 | } | |
717 | } | |
718 | frag_size -= frag_map_size; | |
719 | offset += frag_map_size; | |
720 | k++; | |
721 | } | |
722 | } | |
723 | ||
724 | /* set last segment */ | |
725 | if (k & 0x1) | |
726 | txd.txd2 |= TX_DMA_LS1; | |
727 | else | |
728 | txd.txd2 |= TX_DMA_LS0; | |
729 | mtk_set_txd_pdma(&txd, &ring->tx_dma[j]); | |
730 | ||
731 | /* store skb to cleanup */ | |
732 | tx_buf->skb = skb; | |
733 | ||
734 | netdev_sent_queue(dev, skb->len); | |
735 | skb_tx_timestamp(skb); | |
736 | ||
737 | ring->tx_next_idx = NEXT_TX_DESP_IDX(j); | |
738 | /* make sure that all changes to the dma ring are flushed before we | |
739 | * continue | |
740 | */ | |
741 | wmb(); | |
742 | atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); | |
743 | ||
744 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | |
745 | mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0); | |
746 | ||
747 | return 0; | |
748 | ||
749 | err_dma: | |
750 | j = ring->tx_next_idx; | |
751 | for (i = 0; i < tx_num; i++) { | |
752 | ptxd = &ring->tx_dma[j]; | |
753 | tx_buf = &ring->tx_buf[j]; | |
754 | ||
755 | /* unmap dma */ | |
756 | mtk_txd_unmap(&dev->dev, tx_buf); | |
757 | ||
758 | ptxd->txd2 = TX_DMA_DESP2_DEF; | |
759 | j = NEXT_TX_DESP_IDX(j); | |
760 | } | |
761 | /* make sure that all changes to the dma ring are flushed before we | |
762 | * continue | |
763 | */ | |
764 | wmb(); | |
765 | return -1; | |
766 | } | |
767 | ||
768 | /* the qdma core needs scratch memory to be setup */ | |
769 | static int mtk_init_fq_dma(struct mtk_eth *eth) | |
770 | { | |
9cecd920 | 771 | dma_addr_t dma_addr, phy_ring_head, phy_ring_tail; |
e3cbf478 | 772 | int cnt = eth->soc->dma_ring_size; |
e3cbf478 JC |
773 | int i; |
774 | ||
775 | eth->scratch_ring = dma_alloc_coherent(eth->dev, | |
776 | cnt * sizeof(struct mtk_tx_dma), | |
777 | &phy_ring_head, | |
778 | GFP_ATOMIC | __GFP_ZERO); | |
779 | if (unlikely(!eth->scratch_ring)) | |
780 | return -ENOMEM; | |
781 | ||
782 | eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE, | |
783 | GFP_KERNEL); | |
784 | dma_addr = dma_map_single(eth->dev, | |
785 | eth->scratch_head, cnt * QDMA_PAGE_SIZE, | |
786 | DMA_FROM_DEVICE); | |
787 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) | |
788 | return -ENOMEM; | |
789 | ||
790 | memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); | |
791 | phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1)); | |
792 | ||
793 | for (i = 0; i < cnt; i++) { | |
794 | eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE)); | |
795 | if (i < cnt - 1) | |
796 | eth->scratch_ring[i].txd2 = (phy_ring_head + | |
797 | ((i + 1) * sizeof(struct mtk_tx_dma))); | |
798 | eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE); | |
799 | } | |
800 | ||
801 | mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); | |
802 | mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); | |
803 | mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); | |
804 | mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); | |
805 | ||
806 | return 0; | |
807 | } | |
808 | ||
809 | static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) | |
810 | { | |
811 | void *ret = ring->tx_dma; | |
812 | ||
813 | return ret + (desc - ring->tx_phys); | |
814 | } | |
815 | ||
816 | static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring, | |
817 | struct mtk_tx_dma *txd) | |
818 | { | |
819 | return mtk_qdma_phys_to_virt(ring, txd->txd2); | |
820 | } | |
821 | ||
822 | static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |
823 | struct mtk_tx_dma *txd) | |
824 | { | |
825 | int idx = txd - ring->tx_dma; | |
826 | ||
827 | return &ring->tx_buf[idx]; | |
828 | } | |
829 | ||
830 | static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev, | |
831 | int tx_num, struct mtk_tx_ring *ring, bool gso) | |
832 | { | |
833 | struct mtk_mac *mac = netdev_priv(dev); | |
834 | struct mtk_eth *eth = mac->hw; | |
835 | struct mtk_tx_dma *itxd, *txd; | |
836 | struct mtk_tx_buf *tx_buf; | |
837 | dma_addr_t mapped_addr; | |
838 | unsigned int nr_frags; | |
839 | int i, n_desc = 1; | |
840 | u32 txd4 = eth->soc->txd4; | |
841 | ||
842 | itxd = ring->tx_next_free; | |
843 | if (itxd == ring->tx_last_free) | |
844 | return -ENOMEM; | |
845 | ||
846 | if (eth->soc->mac_count > 1) | |
847 | txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; | |
848 | ||
849 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | |
850 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
851 | ||
852 | if (gso) | |
853 | txd4 |= TX_DMA_TSO; | |
854 | ||
855 | /* TX Checksum offload */ | |
856 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
857 | txd4 |= TX_DMA_CHKSUM; | |
858 | ||
859 | /* VLAN header offload */ | |
860 | if (skb_vlan_tag_present(skb)) | |
861 | txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb); | |
862 | ||
863 | mapped_addr = dma_map_single(&dev->dev, skb->data, | |
864 | skb_headlen(skb), DMA_TO_DEVICE); | |
865 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | |
866 | return -ENOMEM; | |
867 | ||
868 | WRITE_ONCE(itxd->txd1, mapped_addr); | |
869 | tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; | |
870 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
871 | dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); | |
872 | ||
873 | /* TX SG offload */ | |
874 | txd = itxd; | |
875 | nr_frags = skb_shinfo(skb)->nr_frags; | |
876 | for (i = 0; i < nr_frags; i++) { | |
877 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; | |
878 | unsigned int offset = 0; | |
879 | int frag_size = skb_frag_size(frag); | |
880 | ||
881 | while (frag_size) { | |
882 | bool last_frag = false; | |
883 | unsigned int frag_map_size; | |
884 | ||
885 | txd = mtk_tx_next_qdma(ring, txd); | |
886 | if (txd == ring->tx_last_free) | |
887 | goto err_dma; | |
888 | ||
889 | n_desc++; | |
890 | frag_map_size = min(frag_size, TX_DMA_BUF_LEN); | |
891 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | |
892 | frag_map_size, | |
893 | DMA_TO_DEVICE); | |
894 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | |
895 | goto err_dma; | |
896 | ||
897 | if (i == nr_frags - 1 && | |
898 | (frag_size - frag_map_size) == 0) | |
899 | last_frag = true; | |
900 | ||
901 | WRITE_ONCE(txd->txd1, mapped_addr); | |
902 | WRITE_ONCE(txd->txd3, (QDMA_TX_SWC | | |
903 | TX_DMA_PLEN0(frag_map_size) | | |
904 | last_frag * TX_DMA_LS0) | | |
905 | mac->id); | |
906 | WRITE_ONCE(txd->txd4, 0); | |
907 | ||
908 | tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; | |
909 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | |
910 | memset(tx_buf, 0, sizeof(*tx_buf)); | |
911 | ||
912 | tx_buf->flags |= MTK_TX_FLAGS_PAGE0; | |
913 | dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); | |
914 | dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); | |
915 | frag_size -= frag_map_size; | |
916 | offset += frag_map_size; | |
917 | } | |
918 | } | |
919 | ||
920 | /* store skb to cleanup */ | |
921 | tx_buf->skb = skb; | |
922 | ||
923 | WRITE_ONCE(itxd->txd4, txd4); | |
924 | WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | | |
925 | (!nr_frags * TX_DMA_LS0))); | |
926 | ||
927 | netdev_sent_queue(dev, skb->len); | |
928 | skb_tx_timestamp(skb); | |
929 | ||
930 | ring->tx_next_free = mtk_tx_next_qdma(ring, txd); | |
931 | atomic_sub(n_desc, &ring->tx_free_count); | |
932 | ||
933 | /* make sure that all changes to the dma ring are flushed before we | |
934 | * continue | |
935 | */ | |
936 | wmb(); | |
937 | ||
938 | if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) | |
939 | mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); | |
940 | ||
941 | return 0; | |
942 | ||
943 | err_dma: | |
944 | do { | |
945 | tx_buf = mtk_desc_to_tx_buf(ring, txd); | |
946 | ||
947 | /* unmap dma */ | |
948 | mtk_txd_unmap(&dev->dev, tx_buf); | |
949 | ||
950 | itxd->txd3 = TX_DMA_DESP2_DEF; | |
951 | itxd = mtk_tx_next_qdma(ring, itxd); | |
952 | } while (itxd != txd); | |
953 | ||
954 | return -ENOMEM; | |
955 | } | |
956 | ||
957 | static inline int mtk_cal_txd_req(struct sk_buff *skb) | |
958 | { | |
959 | int i, nfrags; | |
960 | struct skb_frag_struct *frag; | |
961 | ||
962 | nfrags = 1; | |
963 | if (skb_is_gso(skb)) { | |
964 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
965 | frag = &skb_shinfo(skb)->frags[i]; | |
966 | nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN); | |
967 | } | |
968 | } else { | |
969 | nfrags += skb_shinfo(skb)->nr_frags; | |
970 | } | |
971 | ||
972 | return DIV_ROUND_UP(nfrags, 2); | |
973 | } | |
974 | ||
975 | static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
976 | { | |
977 | struct mtk_mac *mac = netdev_priv(dev); | |
978 | struct mtk_eth *eth = mac->hw; | |
979 | struct mtk_tx_ring *ring = ð->tx_ring; | |
980 | struct net_device_stats *stats = &dev->stats; | |
981 | int tx_num; | |
982 | int len = skb->len; | |
983 | bool gso = false; | |
984 | ||
985 | tx_num = mtk_cal_txd_req(skb); | |
986 | if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) { | |
987 | netif_stop_queue(dev); | |
988 | netif_err(eth, tx_queued, dev, | |
989 | "Tx Ring full when queue awake!\n"); | |
990 | return NETDEV_TX_BUSY; | |
991 | } | |
992 | ||
993 | /* TSO: fill MSS info in tcp checksum field */ | |
994 | if (skb_is_gso(skb)) { | |
995 | if (skb_cow_head(skb, 0)) { | |
996 | netif_warn(eth, tx_err, dev, | |
997 | "GSO expand head fail.\n"); | |
998 | goto drop; | |
999 | } | |
1000 | ||
1001 | if (skb_shinfo(skb)->gso_type & | |
1002 | (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | |
1003 | gso = true; | |
1004 | tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); | |
1005 | } | |
1006 | } | |
1007 | ||
1008 | if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0) | |
1009 | goto drop; | |
1010 | ||
1011 | stats->tx_packets++; | |
1012 | stats->tx_bytes += len; | |
1013 | ||
1014 | if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) { | |
1015 | netif_stop_queue(dev); | |
1016 | smp_mb(); | |
1017 | if (unlikely(atomic_read(&ring->tx_free_count) > | |
1018 | ring->tx_thresh)) | |
1019 | netif_wake_queue(dev); | |
1020 | } | |
1021 | ||
1022 | return NETDEV_TX_OK; | |
1023 | ||
1024 | drop: | |
1025 | stats->tx_dropped++; | |
1026 | dev_kfree_skb(skb); | |
1027 | return NETDEV_TX_OK; | |
1028 | } | |
1029 | ||
1030 | static int mtk_poll_rx(struct napi_struct *napi, int budget, | |
1031 | struct mtk_eth *eth, u32 rx_intr) | |
1032 | { | |
1033 | struct mtk_soc_data *soc = eth->soc; | |
1034 | struct mtk_rx_ring *ring = ð->rx_ring[0]; | |
1035 | int idx = ring->rx_calc_idx; | |
1036 | u32 checksum_bit; | |
1037 | struct sk_buff *skb; | |
1038 | u8 *data, *new_data; | |
1039 | struct mtk_rx_dma *rxd, trxd; | |
1040 | int done = 0, pad; | |
1041 | ||
1042 | if (eth->soc->hw_features & NETIF_F_RXCSUM) | |
1043 | checksum_bit = soc->checksum_bit; | |
1044 | else | |
1045 | checksum_bit = 0; | |
1046 | ||
1047 | if (eth->soc->rx_2b_offset) | |
1048 | pad = 0; | |
1049 | else | |
1050 | pad = NET_IP_ALIGN; | |
1051 | ||
1052 | while (done < budget) { | |
1053 | struct net_device *netdev; | |
1054 | unsigned int pktlen; | |
1055 | dma_addr_t dma_addr; | |
1056 | int mac = 0; | |
1057 | ||
1058 | idx = NEXT_RX_DESP_IDX(idx); | |
1059 | rxd = &ring->rx_dma[idx]; | |
1060 | data = ring->rx_data[idx]; | |
1061 | ||
1062 | mtk_get_rxd(&trxd, rxd); | |
1063 | if (!(trxd.rxd2 & RX_DMA_DONE)) | |
1064 | break; | |
1065 | ||
1066 | /* find out which mac the packet come from. values start at 1 */ | |
1067 | if (eth->soc->mac_count > 1) { | |
1068 | mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & | |
1069 | RX_DMA_FPORT_MASK; | |
1070 | mac--; | |
1071 | if (mac < 0 || mac >= eth->soc->mac_count) | |
1072 | goto release_desc; | |
1073 | } | |
1074 | ||
1075 | netdev = eth->netdev[mac]; | |
1076 | ||
1077 | /* alloc new buffer */ | |
1078 | new_data = napi_alloc_frag(ring->frag_size); | |
1079 | if (unlikely(!new_data || !netdev)) { | |
1080 | netdev->stats.rx_dropped++; | |
1081 | goto release_desc; | |
1082 | } | |
1083 | dma_addr = dma_map_single(&netdev->dev, | |
1084 | new_data + NET_SKB_PAD + pad, | |
1085 | ring->rx_buf_size, | |
1086 | DMA_FROM_DEVICE); | |
1087 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | |
1088 | skb_free_frag(new_data); | |
1089 | goto release_desc; | |
1090 | } | |
1091 | ||
1092 | /* receive data */ | |
1093 | skb = build_skb(data, ring->frag_size); | |
1094 | if (unlikely(!skb)) { | |
1095 | put_page(virt_to_head_page(new_data)); | |
1096 | goto release_desc; | |
1097 | } | |
1098 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | |
1099 | ||
1100 | dma_unmap_single(&netdev->dev, trxd.rxd1, | |
1101 | ring->rx_buf_size, DMA_FROM_DEVICE); | |
1102 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | |
1103 | skb->dev = netdev; | |
1104 | skb_put(skb, pktlen); | |
1105 | if (trxd.rxd4 & checksum_bit) | |
1106 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1107 | else | |
1108 | skb_checksum_none_assert(skb); | |
1109 | skb->protocol = eth_type_trans(skb, netdev); | |
1110 | ||
1111 | netdev->stats.rx_packets++; | |
1112 | netdev->stats.rx_bytes += pktlen; | |
1113 | ||
1114 | if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && | |
1115 | RX_DMA_VID(trxd.rxd3)) | |
1116 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
1117 | RX_DMA_VID(trxd.rxd3)); | |
1118 | napi_gro_receive(napi, skb); | |
1119 | ||
1120 | ring->rx_data[idx] = new_data; | |
1121 | rxd->rxd1 = (unsigned int)dma_addr; | |
1122 | ||
1123 | release_desc: | |
1124 | if (eth->soc->rx_sg_dma) | |
1125 | rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); | |
1126 | else | |
1127 | rxd->rxd2 = RX_DMA_LSO; | |
1128 | ||
1129 | ring->rx_calc_idx = idx; | |
1130 | /* make sure that all changes to the dma ring are flushed before | |
1131 | * we continue | |
1132 | */ | |
1133 | wmb(); | |
1134 | if (eth->soc->dma_type == MTK_QDMA) | |
1135 | mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0); | |
1136 | else | |
1137 | mtk_reg_w32(eth, ring->rx_calc_idx, | |
1138 | MTK_REG_RX_CALC_IDX0); | |
1139 | done++; | |
1140 | } | |
1141 | ||
1142 | if (done < budget) | |
1143 | mtk_irq_ack(eth, rx_intr); | |
1144 | ||
1145 | return done; | |
1146 | } | |
1147 | ||
1148 | static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again) | |
1149 | { | |
1150 | struct sk_buff *skb; | |
1151 | struct mtk_tx_buf *tx_buf; | |
1152 | int done = 0; | |
1153 | u32 idx, hwidx; | |
1154 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1155 | unsigned int bytes = 0; | |
1156 | ||
1157 | idx = ring->tx_free_idx; | |
1158 | hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0); | |
1159 | ||
1160 | while ((idx != hwidx) && budget) { | |
1161 | tx_buf = &ring->tx_buf[idx]; | |
1162 | skb = tx_buf->skb; | |
1163 | ||
1164 | if (!skb) | |
1165 | break; | |
1166 | ||
1167 | if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { | |
1168 | bytes += skb->len; | |
1169 | done++; | |
1170 | budget--; | |
1171 | } | |
1172 | mtk_txd_unmap(eth->dev, tx_buf); | |
1173 | idx = NEXT_TX_DESP_IDX(idx); | |
1174 | } | |
1175 | ring->tx_free_idx = idx; | |
1176 | atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); | |
1177 | ||
1178 | /* read hw index again make sure no new tx packet */ | |
1179 | if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0)) | |
1180 | *tx_again = 1; | |
1181 | ||
1182 | if (done) | |
1183 | netdev_completed_queue(*eth->netdev, done, bytes); | |
1184 | ||
1185 | return done; | |
1186 | } | |
1187 | ||
1188 | static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again) | |
1189 | { | |
1190 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1191 | struct mtk_tx_dma *desc; | |
1192 | struct sk_buff *skb; | |
1193 | struct mtk_tx_buf *tx_buf; | |
1194 | int total = 0, done[MTK_MAX_DEVS]; | |
1195 | unsigned int bytes[MTK_MAX_DEVS]; | |
1196 | u32 cpu, dma; | |
e3cbf478 JC |
1197 | int i; |
1198 | ||
1199 | memset(done, 0, sizeof(done)); | |
1200 | memset(bytes, 0, sizeof(bytes)); | |
1201 | ||
1202 | cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); | |
1203 | dma = mtk_r32(eth, MTK_QTX_DRX_PTR); | |
1204 | ||
1205 | desc = mtk_qdma_phys_to_virt(ring, cpu); | |
1206 | ||
1207 | while ((cpu != dma) && budget) { | |
1208 | u32 next_cpu = desc->txd2; | |
1209 | int mac; | |
1210 | ||
1211 | desc = mtk_tx_next_qdma(ring, desc); | |
1212 | if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0) | |
1213 | break; | |
1214 | ||
1215 | mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & | |
1216 | TX_DMA_FPORT_MASK; | |
1217 | mac--; | |
1218 | ||
1219 | tx_buf = mtk_desc_to_tx_buf(ring, desc); | |
1220 | skb = tx_buf->skb; | |
2f1b4edb | 1221 | if (!skb) |
e3cbf478 | 1222 | break; |
e3cbf478 JC |
1223 | |
1224 | if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { | |
1225 | bytes[mac] += skb->len; | |
1226 | done[mac]++; | |
1227 | budget--; | |
1228 | } | |
1229 | mtk_txd_unmap(eth->dev, tx_buf); | |
1230 | ||
1231 | ring->tx_last_free->txd2 = next_cpu; | |
1232 | ring->tx_last_free = desc; | |
1233 | atomic_inc(&ring->tx_free_count); | |
1234 | ||
1235 | cpu = next_cpu; | |
1236 | } | |
1237 | ||
1238 | mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); | |
1239 | ||
1240 | /* read hw index again make sure no new tx packet */ | |
1241 | if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) | |
1242 | *tx_again = true; | |
1243 | ||
1244 | for (i = 0; i < eth->soc->mac_count; i++) { | |
1245 | if (!done[i]) | |
1246 | continue; | |
1247 | netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); | |
1248 | total += done[i]; | |
1249 | } | |
1250 | ||
1251 | return total; | |
1252 | } | |
1253 | ||
1254 | static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr, | |
1255 | bool *tx_again) | |
1256 | { | |
1257 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1258 | struct net_device *netdev = eth->netdev[0]; | |
1259 | int done; | |
1260 | ||
1261 | done = eth->tx_ring.tx_poll(eth, budget, tx_again); | |
1262 | if (!*tx_again) | |
1263 | mtk_irq_ack(eth, tx_intr); | |
1264 | ||
1265 | if (!done) | |
1266 | return 0; | |
1267 | ||
1268 | smp_mb(); | |
1269 | if (unlikely(!netif_queue_stopped(netdev))) | |
1270 | return done; | |
1271 | ||
1272 | if (atomic_read(&ring->tx_free_count) > ring->tx_thresh) | |
1273 | netif_wake_queue(netdev); | |
1274 | ||
1275 | return done; | |
1276 | } | |
1277 | ||
1278 | static void mtk_stats_update(struct mtk_eth *eth) | |
1279 | { | |
1280 | int i; | |
1281 | ||
1282 | for (i = 0; i < eth->soc->mac_count; i++) { | |
1283 | if (!eth->mac[i] || !eth->mac[i]->hw_stats) | |
1284 | continue; | |
1285 | if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { | |
1286 | mtk_stats_update_mac(eth->mac[i]); | |
1287 | spin_unlock(ð->mac[i]->hw_stats->stats_lock); | |
1288 | } | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | static int mtk_poll(struct napi_struct *napi, int budget) | |
1293 | { | |
1294 | struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); | |
1295 | u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr; | |
1296 | int tx_done, rx_done; | |
1297 | bool tx_again = false; | |
1298 | ||
1299 | status = mtk_irq_pending(eth); | |
1300 | mtk_status = mtk_irq_pending_status(eth); | |
1301 | tx_intr = eth->soc->tx_int; | |
1302 | rx_intr = eth->soc->rx_int; | |
1303 | status_intr = eth->soc->status_int; | |
1304 | tx_done = 0; | |
1305 | rx_done = 0; | |
1306 | tx_again = 0; | |
1307 | ||
1308 | if (status & tx_intr) | |
1309 | tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again); | |
1310 | ||
1311 | if (status & rx_intr) | |
1312 | rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); | |
1313 | ||
1314 | if (unlikely(mtk_status & status_intr)) { | |
1315 | mtk_stats_update(eth); | |
1316 | mtk_irq_ack_status(eth, status_intr); | |
1317 | } | |
1318 | ||
1319 | if (unlikely(netif_msg_intr(eth))) { | |
1320 | mask = mtk_irq_enabled(eth); | |
1321 | netdev_info(eth->netdev[0], | |
1322 | "done tx %d, rx %d, intr 0x%08x/0x%x\n", | |
1323 | tx_done, rx_done, status, mask); | |
1324 | } | |
1325 | ||
1326 | if (tx_again || rx_done == budget) | |
1327 | return budget; | |
1328 | ||
1329 | status = mtk_irq_pending(eth); | |
1330 | if (status & (tx_intr | rx_intr)) | |
1331 | return budget; | |
1332 | ||
1333 | napi_complete(napi); | |
1334 | mtk_irq_enable(eth, tx_intr | rx_intr); | |
1335 | ||
1336 | return rx_done; | |
1337 | } | |
1338 | ||
1339 | static int mtk_pdma_tx_alloc(struct mtk_eth *eth) | |
1340 | { | |
1341 | int i; | |
1342 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1343 | ||
1344 | ring->tx_ring_size = eth->soc->dma_ring_size; | |
1345 | ring->tx_free_idx = 0; | |
1346 | ring->tx_next_idx = 0; | |
1347 | ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, | |
1348 | MAX_SKB_FRAGS); | |
1349 | ||
1350 | ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), | |
26a5e9b8 | 1351 | GFP_KERNEL); |
e3cbf478 JC |
1352 | if (!ring->tx_buf) |
1353 | goto no_tx_mem; | |
1354 | ||
26a5e9b8 KH |
1355 | ring->tx_dma = |
1356 | dma_alloc_coherent(eth->dev, | |
1357 | ring->tx_ring_size * sizeof(*ring->tx_dma), | |
1358 | &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO); | |
e3cbf478 JC |
1359 | if (!ring->tx_dma) |
1360 | goto no_tx_mem; | |
1361 | ||
1362 | for (i = 0; i < ring->tx_ring_size; i++) { | |
1363 | ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF; | |
1364 | ring->tx_dma[i].txd4 = eth->soc->txd4; | |
1365 | } | |
1366 | ||
1367 | atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring)); | |
1368 | ring->tx_map = mtk_pdma_tx_map; | |
1369 | ring->tx_poll = mtk_pdma_tx_poll; | |
1370 | ring->tx_clean = mtk_pdma_tx_clean; | |
1371 | ||
1372 | /* make sure that all changes to the dma ring are flushed before we | |
1373 | * continue | |
1374 | */ | |
1375 | wmb(); | |
1376 | ||
1377 | mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0); | |
1378 | mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0); | |
1379 | mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0); | |
1380 | mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG); | |
1381 | ||
1382 | return 0; | |
1383 | ||
1384 | no_tx_mem: | |
1385 | return -ENOMEM; | |
1386 | } | |
1387 | ||
1388 | static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth) | |
1389 | { | |
1390 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1391 | int i, sz = sizeof(*ring->tx_dma); | |
1392 | ||
1393 | ring->tx_ring_size = eth->soc->dma_ring_size; | |
1394 | ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), | |
1395 | GFP_KERNEL); | |
1396 | if (!ring->tx_buf) | |
1397 | goto no_tx_mem; | |
1398 | ||
750afb08 | 1399 | ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz, |
e3cbf478 JC |
1400 | &ring->tx_phys, |
1401 | GFP_ATOMIC | __GFP_ZERO); | |
1402 | if (!ring->tx_dma) | |
1403 | goto no_tx_mem; | |
1404 | ||
e3cbf478 JC |
1405 | for (i = 0; i < ring->tx_ring_size; i++) { |
1406 | int next = (i + 1) % ring->tx_ring_size; | |
1407 | u32 next_ptr = ring->tx_phys + next * sz; | |
1408 | ||
1409 | ring->tx_dma[i].txd2 = next_ptr; | |
1410 | ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF; | |
1411 | } | |
1412 | ||
1413 | atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2); | |
1414 | ring->tx_next_free = &ring->tx_dma[0]; | |
1415 | ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2]; | |
1416 | ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, | |
1417 | MAX_SKB_FRAGS); | |
1418 | ||
1419 | ring->tx_map = mtk_qdma_tx_map; | |
1420 | ring->tx_poll = mtk_qdma_tx_poll; | |
1421 | ring->tx_clean = mtk_qdma_tx_clean; | |
1422 | ||
1423 | /* make sure that all changes to the dma ring are flushed before we | |
1424 | * continue | |
1425 | */ | |
1426 | wmb(); | |
1427 | ||
1428 | mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR); | |
1429 | mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR); | |
1430 | mtk_w32(eth, | |
1431 | ring->tx_phys + ((ring->tx_ring_size - 1) * sz), | |
1432 | MTK_QTX_CRX_PTR); | |
1433 | mtk_w32(eth, | |
1434 | ring->tx_phys + ((ring->tx_ring_size - 1) * sz), | |
1435 | MTK_QTX_DRX_PTR); | |
1436 | ||
1437 | return 0; | |
1438 | ||
1439 | no_tx_mem: | |
1440 | return -ENOMEM; | |
1441 | } | |
1442 | ||
1443 | static int mtk_qdma_init(struct mtk_eth *eth, int ring) | |
1444 | { | |
1445 | int err; | |
1446 | ||
1447 | err = mtk_init_fq_dma(eth); | |
1448 | if (err) | |
1449 | return err; | |
1450 | ||
1451 | err = mtk_qdma_tx_alloc_tx(eth); | |
1452 | if (err) | |
1453 | return err; | |
1454 | ||
1455 | err = mtk_dma_rx_alloc(eth, ð->rx_ring[ring]); | |
1456 | if (err) | |
1457 | return err; | |
1458 | ||
1459 | mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0); | |
1460 | mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0); | |
1461 | mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0); | |
1462 | mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); | |
1463 | mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); | |
1464 | ||
1465 | /* Enable random early drop and set drop threshold automatically */ | |
1466 | mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES); | |
1467 | mtk_w32(eth, 0x0, MTK_QDMA_HRED2); | |
1468 | ||
1469 | return 0; | |
1470 | } | |
1471 | ||
1472 | static int mtk_pdma_qdma_init(struct mtk_eth *eth) | |
1473 | { | |
1474 | int err = mtk_qdma_init(eth, 1); | |
1475 | ||
1476 | if (err) | |
1477 | return err; | |
1478 | ||
1479 | err = mtk_dma_rx_alloc(eth, ð->rx_ring[0]); | |
1480 | if (err) | |
1481 | return err; | |
1482 | ||
1483 | mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0); | |
1484 | mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0); | |
1485 | mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0); | |
1486 | mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG); | |
1487 | ||
1488 | return 0; | |
1489 | } | |
1490 | ||
1491 | static int mtk_pdma_init(struct mtk_eth *eth) | |
1492 | { | |
1493 | struct mtk_rx_ring *ring = ð->rx_ring[0]; | |
1494 | int err; | |
1495 | ||
1496 | err = mtk_pdma_tx_alloc(eth); | |
1497 | if (err) | |
1498 | return err; | |
1499 | ||
1500 | err = mtk_dma_rx_alloc(eth, ring); | |
1501 | if (err) | |
1502 | return err; | |
1503 | ||
1504 | mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0); | |
1505 | mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0); | |
1506 | mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0); | |
1507 | mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG); | |
1508 | ||
1509 | return 0; | |
1510 | } | |
1511 | ||
1512 | static void mtk_dma_free(struct mtk_eth *eth) | |
1513 | { | |
1514 | int i; | |
1515 | ||
1516 | for (i = 0; i < eth->soc->mac_count; i++) | |
1517 | if (eth->netdev[i]) | |
1518 | netdev_reset_queue(eth->netdev[i]); | |
1519 | eth->tx_ring.tx_clean(eth); | |
1520 | mtk_clean_rx(eth, ð->rx_ring[0]); | |
1521 | mtk_clean_rx(eth, ð->rx_ring[1]); | |
1522 | kfree(eth->scratch_head); | |
1523 | } | |
1524 | ||
1525 | static void mtk_tx_timeout(struct net_device *dev) | |
1526 | { | |
1527 | struct mtk_mac *mac = netdev_priv(dev); | |
1528 | struct mtk_eth *eth = mac->hw; | |
1529 | struct mtk_tx_ring *ring = ð->tx_ring; | |
1530 | ||
1531 | eth->netdev[mac->id]->stats.tx_errors++; | |
1532 | netif_err(eth, tx_err, dev, | |
1533 | "transmit timed out\n"); | |
1534 | if (eth->soc->dma_type & MTK_PDMA) { | |
1535 | netif_info(eth, drv, dev, "pdma_cfg:%08x\n", | |
1536 | mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG)); | |
4654ef48 KH |
1537 | netif_info(eth, drv, dev, |
1538 | "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", | |
e3cbf478 JC |
1539 | 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0), |
1540 | mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0), | |
1541 | mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0), | |
1542 | mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0), | |
1543 | ring->tx_free_idx, | |
1544 | ring->tx_next_idx); | |
1545 | } | |
1546 | if (eth->soc->dma_type & MTK_QDMA) { | |
1547 | netif_info(eth, drv, dev, "qdma_cfg:%08x\n", | |
1548 | mtk_r32(eth, MTK_QDMA_GLO_CFG)); | |
4654ef48 KH |
1549 | netif_info(eth, drv, dev, |
1550 | "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n", | |
e3cbf478 JC |
1551 | 0, mtk_r32(eth, MTK_QTX_CTX_PTR), |
1552 | mtk_r32(eth, MTK_QTX_DTX_PTR), | |
1553 | mtk_r32(eth, MTK_QTX_CRX_PTR), | |
1554 | mtk_r32(eth, MTK_QTX_DRX_PTR), | |
1555 | atomic_read(&ring->tx_free_count)); | |
1556 | } | |
1557 | netif_info(eth, drv, dev, | |
1558 | "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n", | |
1559 | 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0), | |
1560 | mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0), | |
1561 | mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0), | |
1562 | mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0)); | |
1563 | ||
1564 | schedule_work(&mac->pending_work); | |
1565 | } | |
1566 | ||
1567 | static irqreturn_t mtk_handle_irq(int irq, void *_eth) | |
1568 | { | |
1569 | struct mtk_eth *eth = _eth; | |
1570 | u32 status, int_mask; | |
1571 | ||
1572 | status = mtk_irq_pending(eth); | |
1573 | if (unlikely(!status)) | |
1574 | return IRQ_NONE; | |
1575 | ||
1576 | int_mask = (eth->soc->rx_int | eth->soc->tx_int); | |
1577 | if (likely(status & int_mask)) { | |
1578 | if (likely(napi_schedule_prep(ð->rx_napi))) | |
1579 | __napi_schedule(ð->rx_napi); | |
1580 | } else { | |
1581 | mtk_irq_ack(eth, status); | |
1582 | } | |
1583 | mtk_irq_disable(eth, int_mask); | |
1584 | ||
1585 | return IRQ_HANDLED; | |
1586 | } | |
1587 | ||
1588 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1589 | static void mtk_poll_controller(struct net_device *dev) | |
1590 | { | |
1591 | struct mtk_mac *mac = netdev_priv(dev); | |
1592 | struct mtk_eth *eth = mac->hw; | |
1593 | u32 int_mask = eth->soc->tx_int | eth->soc->rx_int; | |
1594 | ||
1595 | mtk_irq_disable(eth, int_mask); | |
1596 | mtk_handle_irq(dev->irq, dev); | |
1597 | mtk_irq_enable(eth, int_mask); | |
1598 | } | |
1599 | #endif | |
1600 | ||
1601 | int mtk_set_clock_cycle(struct mtk_eth *eth) | |
1602 | { | |
1603 | unsigned long sysclk = eth->sysclk; | |
1604 | ||
1605 | sysclk /= MTK_US_CYC_CNT_DIVISOR; | |
1606 | sysclk <<= MTK_US_CYC_CNT_SHIFT; | |
1607 | ||
1608 | mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) & | |
1609 | ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) | | |
1610 | sysclk, | |
1611 | MTK_GLO_CFG); | |
1612 | return 0; | |
1613 | } | |
1614 | ||
1615 | void mtk_fwd_config(struct mtk_eth *eth) | |
1616 | { | |
1617 | u32 fwd_cfg; | |
1618 | ||
1619 | fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG); | |
1620 | ||
1621 | /* disable jumbo frame */ | |
1622 | if (eth->soc->jumbo_frame) | |
1623 | fwd_cfg &= ~MTK_GDM1_JMB_EN; | |
1624 | ||
1625 | /* set unicast/multicast/broadcast frame to cpu */ | |
1626 | fwd_cfg &= ~0xffff; | |
1627 | ||
1628 | mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG); | |
1629 | } | |
1630 | ||
1631 | void mtk_csum_config(struct mtk_eth *eth) | |
1632 | { | |
1633 | if (eth->soc->hw_features & NETIF_F_RXCSUM) | |
1634 | mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) | | |
1635 | (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN), | |
1636 | MTK_GDMA1_FWD_CFG); | |
1637 | else | |
1638 | mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) & | |
1639 | ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN), | |
1640 | MTK_GDMA1_FWD_CFG); | |
1641 | if (eth->soc->hw_features & NETIF_F_IP_CSUM) | |
1642 | mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) | | |
1643 | (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN), | |
1644 | MTK_CDMA_CSG_CFG); | |
1645 | else | |
1646 | mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) & | |
1647 | ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN), | |
1648 | MTK_CDMA_CSG_CFG); | |
1649 | } | |
1650 | ||
1651 | static int mtk_start_dma(struct mtk_eth *eth) | |
1652 | { | |
1653 | unsigned long flags; | |
1654 | u32 val; | |
1655 | int err; | |
1656 | ||
1657 | if (eth->soc->dma_type == MTK_PDMA) | |
1658 | err = mtk_pdma_init(eth); | |
1659 | else if (eth->soc->dma_type == MTK_QDMA) | |
1660 | err = mtk_qdma_init(eth, 0); | |
1661 | else | |
1662 | err = mtk_pdma_qdma_init(eth); | |
1663 | if (err) { | |
1664 | mtk_dma_free(eth); | |
1665 | return err; | |
1666 | } | |
1667 | ||
1668 | spin_lock_irqsave(ð->page_lock, flags); | |
1669 | ||
1670 | val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN; | |
1671 | if (eth->soc->rx_2b_offset) | |
1672 | val |= MTK_RX_2B_OFFSET; | |
1673 | val |= eth->soc->pdma_glo_cfg; | |
1674 | ||
1675 | if (eth->soc->dma_type & MTK_PDMA) | |
1676 | mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG); | |
1677 | ||
1678 | if (eth->soc->dma_type & MTK_QDMA) | |
1679 | mtk_w32(eth, val, MTK_QDMA_GLO_CFG); | |
1680 | ||
1681 | spin_unlock_irqrestore(ð->page_lock, flags); | |
1682 | ||
1683 | return 0; | |
1684 | } | |
1685 | ||
1686 | static int mtk_open(struct net_device *dev) | |
1687 | { | |
1688 | struct mtk_mac *mac = netdev_priv(dev); | |
1689 | struct mtk_eth *eth = mac->hw; | |
1690 | ||
370e0a71 N |
1691 | dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); |
1692 | ||
e3cbf478 JC |
1693 | if (!atomic_read(ð->dma_refcnt)) { |
1694 | int err = mtk_start_dma(eth); | |
1695 | ||
1696 | if (err) | |
1697 | return err; | |
1698 | ||
1699 | napi_enable(ð->rx_napi); | |
1700 | mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int); | |
1701 | } | |
1702 | atomic_inc(ð->dma_refcnt); | |
1703 | ||
1704 | if (eth->phy) | |
1705 | eth->phy->start(mac); | |
1706 | ||
1707 | if (eth->soc->has_carrier && eth->soc->has_carrier(eth)) | |
1708 | netif_carrier_on(dev); | |
1709 | ||
1710 | netif_start_queue(dev); | |
1711 | eth->soc->fwd_config(eth); | |
1712 | ||
1713 | return 0; | |
1714 | } | |
1715 | ||
1716 | static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) | |
1717 | { | |
1718 | unsigned long flags; | |
1719 | u32 val; | |
1720 | int i; | |
1721 | ||
1722 | /* stop the dma enfine */ | |
1723 | spin_lock_irqsave(ð->page_lock, flags); | |
1724 | val = mtk_r32(eth, glo_cfg); | |
1725 | mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), | |
1726 | glo_cfg); | |
1727 | spin_unlock_irqrestore(ð->page_lock, flags); | |
1728 | ||
1729 | /* wait for dma stop */ | |
1730 | for (i = 0; i < 10; i++) { | |
1731 | val = mtk_r32(eth, glo_cfg); | |
1732 | if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { | |
1733 | msleep(20); | |
1734 | continue; | |
1735 | } | |
1736 | break; | |
1737 | } | |
1738 | } | |
1739 | ||
1740 | static int mtk_stop(struct net_device *dev) | |
1741 | { | |
1742 | struct mtk_mac *mac = netdev_priv(dev); | |
1743 | struct mtk_eth *eth = mac->hw; | |
1744 | ||
1745 | netif_tx_disable(dev); | |
1746 | if (eth->phy) | |
1747 | eth->phy->stop(mac); | |
1748 | ||
1749 | if (!atomic_dec_and_test(ð->dma_refcnt)) | |
1750 | return 0; | |
1751 | ||
1752 | mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int); | |
1753 | napi_disable(ð->rx_napi); | |
1754 | ||
1755 | if (eth->soc->dma_type & MTK_PDMA) | |
1756 | mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]); | |
1757 | ||
1758 | if (eth->soc->dma_type & MTK_QDMA) | |
1759 | mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); | |
1760 | ||
1761 | mtk_dma_free(eth); | |
1762 | ||
1763 | return 0; | |
1764 | } | |
1765 | ||
1766 | static int __init mtk_init_hw(struct mtk_eth *eth) | |
1767 | { | |
1768 | int i, err; | |
1769 | ||
1770 | eth->soc->reset_fe(eth); | |
1771 | ||
1772 | if (eth->soc->switch_init) | |
1773 | if (eth->soc->switch_init(eth)) { | |
1774 | dev_err(eth->dev, "failed to initialize switch core\n"); | |
1775 | return -ENODEV; | |
1776 | } | |
1777 | ||
1778 | err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, | |
1779 | dev_name(eth->dev), eth); | |
1780 | if (err) | |
1781 | return err; | |
1782 | ||
1783 | err = mtk_mdio_init(eth); | |
1784 | if (err) | |
1785 | return err; | |
1786 | ||
1787 | /* disable delay and normal interrupt */ | |
1788 | mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG); | |
1789 | if (eth->soc->dma_type & MTK_QDMA) | |
1790 | mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); | |
1791 | mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int); | |
1792 | ||
1793 | /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */ | |
1794 | if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) | |
1795 | for (i = 0; i < 16; i += 2) | |
1796 | mtk_w32(eth, ((i + 1) << 16) + i, | |
1797 | mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] + | |
1798 | (i * 2)); | |
1799 | ||
1800 | if (eth->soc->fwd_config(eth)) | |
1801 | dev_err(eth->dev, "unable to get clock\n"); | |
1802 | ||
1803 | if (mtk_reg_table[MTK_REG_MTK_RST_GL]) { | |
1804 | mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL); | |
1805 | mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL); | |
1806 | } | |
1807 | ||
1808 | return 0; | |
1809 | } | |
1810 | ||
1811 | static int __init mtk_init(struct net_device *dev) | |
1812 | { | |
1813 | struct mtk_mac *mac = netdev_priv(dev); | |
1814 | struct mtk_eth *eth = mac->hw; | |
1815 | struct device_node *port; | |
1816 | const char *mac_addr; | |
1817 | int err; | |
1818 | ||
1819 | mac_addr = of_get_mac_address(mac->of_node); | |
1820 | if (mac_addr) | |
1821 | ether_addr_copy(dev->dev_addr, mac_addr); | |
1822 | ||
1823 | /* If the mac address is invalid, use random mac address */ | |
1824 | if (!is_valid_ether_addr(dev->dev_addr)) { | |
144e2643 | 1825 | eth_hw_addr_random(dev); |
e3cbf478 JC |
1826 | dev_err(eth->dev, "generated random MAC address %pM\n", |
1827 | dev->dev_addr); | |
e3cbf478 JC |
1828 | } |
1829 | mac->hw->soc->set_mac(mac, dev->dev_addr); | |
1830 | ||
1831 | if (eth->soc->port_init) | |
1832 | for_each_child_of_node(mac->of_node, port) | |
1833 | if (of_device_is_compatible(port, | |
1834 | "mediatek,eth-port") && | |
1835 | of_device_is_available(port)) | |
1836 | eth->soc->port_init(eth, mac, port); | |
1837 | ||
1838 | if (eth->phy) { | |
1839 | err = eth->phy->connect(mac); | |
1840 | if (err) | |
1841 | return err; | |
1842 | } | |
1843 | ||
1844 | return 0; | |
1845 | } | |
1846 | ||
1847 | static void mtk_uninit(struct net_device *dev) | |
1848 | { | |
1849 | struct mtk_mac *mac = netdev_priv(dev); | |
1850 | struct mtk_eth *eth = mac->hw; | |
1851 | ||
1852 | if (eth->phy) | |
1853 | eth->phy->disconnect(mac); | |
1854 | mtk_mdio_cleanup(eth); | |
1855 | ||
1856 | mtk_irq_disable(eth, ~0); | |
1857 | free_irq(dev->irq, dev); | |
1858 | } | |
1859 | ||
1860 | static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1861 | { | |
1862 | struct mtk_mac *mac = netdev_priv(dev); | |
1863 | ||
1864 | if (!mac->phy_dev) | |
1865 | return -ENODEV; | |
1866 | ||
1867 | switch (cmd) { | |
1868 | case SIOCGMIIPHY: | |
1869 | case SIOCGMIIREG: | |
1870 | case SIOCSMIIREG: | |
1871 | return phy_mii_ioctl(mac->phy_dev, ifr, cmd); | |
1872 | default: | |
1873 | break; | |
1874 | } | |
1875 | ||
1876 | return -EOPNOTSUPP; | |
1877 | } | |
1878 | ||
1879 | static int mtk_change_mtu(struct net_device *dev, int new_mtu) | |
1880 | { | |
1881 | struct mtk_mac *mac = netdev_priv(dev); | |
1882 | struct mtk_eth *eth = mac->hw; | |
1883 | int frag_size, old_mtu; | |
1884 | u32 fwd_cfg; | |
1885 | ||
1886 | if (!eth->soc->jumbo_frame) | |
1887 | return eth_change_mtu(dev, new_mtu); | |
1888 | ||
1889 | frag_size = mtk_max_frag_size(new_mtu); | |
1890 | if (new_mtu < 68 || frag_size > PAGE_SIZE) | |
1891 | return -EINVAL; | |
1892 | ||
1893 | old_mtu = dev->mtu; | |
1894 | dev->mtu = new_mtu; | |
1895 | ||
1896 | /* return early if the buffer sizes will not change */ | |
1897 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | |
1898 | return 0; | |
1899 | if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN) | |
1900 | return 0; | |
1901 | ||
1902 | if (new_mtu <= ETH_DATA_LEN) | |
1903 | eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN); | |
1904 | else | |
1905 | eth->rx_ring[0].frag_size = PAGE_SIZE; | |
1906 | eth->rx_ring[0].rx_buf_size = | |
1907 | mtk_max_buf_size(eth->rx_ring[0].frag_size); | |
1908 | ||
1909 | if (!netif_running(dev)) | |
1910 | return 0; | |
1911 | ||
1912 | mtk_stop(dev); | |
1913 | fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG); | |
1914 | if (new_mtu <= ETH_DATA_LEN) { | |
1915 | fwd_cfg &= ~MTK_GDM1_JMB_EN; | |
1916 | } else { | |
1917 | fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT); | |
1918 | fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) << | |
1919 | MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN; | |
1920 | } | |
1921 | mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG); | |
1922 | ||
1923 | return mtk_open(dev); | |
1924 | } | |
1925 | ||
1926 | static void mtk_pending_work(struct work_struct *work) | |
1927 | { | |
1928 | struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); | |
1929 | struct mtk_eth *eth = mac->hw; | |
1930 | struct net_device *dev = eth->netdev[mac->id]; | |
1931 | int err; | |
1932 | ||
1933 | rtnl_lock(); | |
1934 | mtk_stop(dev); | |
1935 | ||
1936 | err = mtk_open(dev); | |
1937 | if (err) { | |
1938 | netif_alert(eth, ifup, dev, | |
1939 | "Driver up/down cycle failed, closing device.\n"); | |
1940 | dev_close(dev); | |
1941 | } | |
1942 | rtnl_unlock(); | |
1943 | } | |
1944 | ||
1945 | static int mtk_cleanup(struct mtk_eth *eth) | |
1946 | { | |
1947 | int i; | |
1948 | ||
1949 | for (i = 0; i < eth->soc->mac_count; i++) { | |
1950 | struct mtk_mac *mac = netdev_priv(eth->netdev[i]); | |
1951 | ||
1952 | if (!eth->netdev[i]) | |
1953 | continue; | |
1954 | ||
1955 | unregister_netdev(eth->netdev[i]); | |
1956 | free_netdev(eth->netdev[i]); | |
1957 | cancel_work_sync(&mac->pending_work); | |
1958 | } | |
1959 | ||
1960 | return 0; | |
1961 | } | |
1962 | ||
1963 | static const struct net_device_ops mtk_netdev_ops = { | |
1964 | .ndo_init = mtk_init, | |
1965 | .ndo_uninit = mtk_uninit, | |
1966 | .ndo_open = mtk_open, | |
1967 | .ndo_stop = mtk_stop, | |
1968 | .ndo_start_xmit = mtk_start_xmit, | |
1969 | .ndo_set_mac_address = mtk_set_mac_address, | |
1970 | .ndo_validate_addr = eth_validate_addr, | |
1971 | .ndo_do_ioctl = mtk_do_ioctl, | |
1972 | .ndo_change_mtu = mtk_change_mtu, | |
1973 | .ndo_tx_timeout = mtk_tx_timeout, | |
1974 | .ndo_get_stats64 = mtk_get_stats64, | |
1975 | .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid, | |
1976 | .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid, | |
1977 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1978 | .ndo_poll_controller = mtk_poll_controller, | |
1979 | #endif | |
1980 | }; | |
1981 | ||
1982 | static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |
1983 | { | |
1984 | struct mtk_mac *mac; | |
1985 | const __be32 *_id = of_get_property(np, "reg", NULL); | |
1986 | int id, err; | |
1987 | ||
1988 | if (!_id) { | |
1989 | dev_err(eth->dev, "missing mac id\n"); | |
1990 | return -EINVAL; | |
1991 | } | |
1992 | id = be32_to_cpup(_id); | |
1993 | if (id >= eth->soc->mac_count || eth->netdev[id]) { | |
1994 | dev_err(eth->dev, "%d is not a valid mac id\n", id); | |
1995 | return -EINVAL; | |
1996 | } | |
1997 | ||
1998 | eth->netdev[id] = alloc_etherdev(sizeof(*mac)); | |
1999 | if (!eth->netdev[id]) { | |
2000 | dev_err(eth->dev, "alloc_etherdev failed\n"); | |
2001 | return -ENOMEM; | |
2002 | } | |
2003 | mac = netdev_priv(eth->netdev[id]); | |
2004 | eth->mac[id] = mac; | |
2005 | mac->id = id; | |
2006 | mac->hw = eth; | |
2007 | mac->of_node = np; | |
2008 | INIT_WORK(&mac->pending_work, mtk_pending_work); | |
2009 | ||
2010 | if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) { | |
2011 | mac->hw_stats = devm_kzalloc(eth->dev, | |
26a5e9b8 KH |
2012 | sizeof(*mac->hw_stats), |
2013 | GFP_KERNEL); | |
85e1d426 KH |
2014 | if (!mac->hw_stats) { |
2015 | err = -ENOMEM; | |
2016 | goto free_netdev; | |
2017 | } | |
e3cbf478 JC |
2018 | spin_lock_init(&mac->hw_stats->stats_lock); |
2019 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | |
2020 | } | |
2021 | ||
2022 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | |
2023 | eth->netdev[id]->netdev_ops = &mtk_netdev_ops; | |
2024 | eth->netdev[id]->base_addr = (unsigned long)eth->base; | |
2025 | ||
2026 | if (eth->soc->init_data) | |
2027 | eth->soc->init_data(eth->soc, eth->netdev[id]); | |
2028 | ||
2029 | eth->netdev[id]->vlan_features = eth->soc->hw_features & | |
2030 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); | |
2031 | eth->netdev[id]->features |= eth->soc->hw_features; | |
2032 | ||
2033 | if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) | |
2034 | eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | |
2035 | ||
2036 | mtk_set_ethtool_ops(eth->netdev[id]); | |
2037 | ||
2038 | err = register_netdev(eth->netdev[id]); | |
2039 | if (err) { | |
2040 | dev_err(eth->dev, "error bringing up device\n"); | |
85e1d426 KH |
2041 | err = -ENOMEM; |
2042 | goto free_netdev; | |
e3cbf478 JC |
2043 | } |
2044 | eth->netdev[id]->irq = eth->irq; | |
2045 | netif_info(eth, probe, eth->netdev[id], | |
2046 | "mediatek frame engine at 0x%08lx, irq %d\n", | |
2047 | eth->netdev[id]->base_addr, eth->netdev[id]->irq); | |
2048 | ||
2049 | return 0; | |
85e1d426 KH |
2050 | |
2051 | free_netdev: | |
2052 | free_netdev(eth->netdev[id]); | |
2053 | return err; | |
e3cbf478 JC |
2054 | } |
2055 | ||
2056 | static int mtk_probe(struct platform_device *pdev) | |
2057 | { | |
2058 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
2059 | const struct of_device_id *match; | |
2060 | struct device_node *mac_np; | |
2061 | struct mtk_soc_data *soc; | |
2062 | struct mtk_eth *eth; | |
2063 | struct clk *sysclk; | |
2064 | int err; | |
2065 | ||
e3cbf478 JC |
2066 | device_reset(&pdev->dev); |
2067 | ||
2068 | match = of_match_device(of_mtk_match, &pdev->dev); | |
2069 | soc = (struct mtk_soc_data *)match->data; | |
2070 | ||
2071 | if (soc->reg_table) | |
2072 | mtk_reg_table = soc->reg_table; | |
2073 | ||
2074 | eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); | |
2075 | if (!eth) | |
2076 | return -ENOMEM; | |
2077 | ||
2078 | eth->base = devm_ioremap_resource(&pdev->dev, res); | |
960526d5 WY |
2079 | if (IS_ERR(eth->base)) |
2080 | return PTR_ERR(eth->base); | |
e3cbf478 JC |
2081 | |
2082 | spin_lock_init(ð->page_lock); | |
2083 | ||
2084 | eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
2085 | "mediatek,ethsys"); | |
2086 | if (IS_ERR(eth->ethsys)) | |
2087 | return PTR_ERR(eth->ethsys); | |
2088 | ||
2089 | eth->irq = platform_get_irq(pdev, 0); | |
2090 | if (eth->irq < 0) { | |
2091 | dev_err(&pdev->dev, "no IRQ resource found\n"); | |
2092 | return -ENXIO; | |
2093 | } | |
2094 | ||
2095 | sysclk = devm_clk_get(&pdev->dev, NULL); | |
2096 | if (IS_ERR(sysclk)) { | |
2097 | dev_err(&pdev->dev, | |
12a01487 | 2098 | "the clock is not defined in the devicetree\n"); |
e3cbf478 JC |
2099 | return -ENXIO; |
2100 | } | |
2101 | eth->sysclk = clk_get_rate(sysclk); | |
2102 | ||
2103 | eth->switch_np = of_parse_phandle(pdev->dev.of_node, | |
2104 | "mediatek,switch", 0); | |
2105 | if (soc->has_switch && !eth->switch_np) { | |
2106 | dev_err(&pdev->dev, "failed to read switch phandle\n"); | |
2107 | return -ENODEV; | |
2108 | } | |
2109 | ||
2110 | eth->dev = &pdev->dev; | |
2111 | eth->soc = soc; | |
2112 | eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); | |
2113 | ||
2114 | err = mtk_init_hw(eth); | |
2115 | if (err) | |
2116 | return err; | |
2117 | ||
2118 | if (eth->soc->mac_count > 1) { | |
2119 | for_each_child_of_node(pdev->dev.of_node, mac_np) { | |
2120 | if (!of_device_is_compatible(mac_np, | |
2121 | "mediatek,eth-mac")) | |
2122 | continue; | |
2123 | ||
2124 | if (!of_device_is_available(mac_np)) | |
2125 | continue; | |
2126 | ||
2127 | err = mtk_add_mac(eth, mac_np); | |
2128 | if (err) | |
2129 | goto err_free_dev; | |
2130 | } | |
2131 | ||
2132 | init_dummy_netdev(ð->dummy_dev); | |
2133 | netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, | |
2134 | soc->napi_weight); | |
2135 | } else { | |
2136 | err = mtk_add_mac(eth, pdev->dev.of_node); | |
2137 | if (err) | |
2138 | goto err_free_dev; | |
2139 | netif_napi_add(eth->netdev[0], ð->rx_napi, mtk_poll, | |
2140 | soc->napi_weight); | |
2141 | } | |
2142 | ||
2143 | platform_set_drvdata(pdev, eth); | |
2144 | ||
2145 | return 0; | |
2146 | ||
2147 | err_free_dev: | |
2148 | mtk_cleanup(eth); | |
2149 | return err; | |
2150 | } | |
2151 | ||
2152 | static int mtk_remove(struct platform_device *pdev) | |
2153 | { | |
2154 | struct mtk_eth *eth = platform_get_drvdata(pdev); | |
2155 | ||
2156 | netif_napi_del(ð->rx_napi); | |
2157 | mtk_cleanup(eth); | |
2158 | platform_set_drvdata(pdev, NULL); | |
2159 | ||
2160 | return 0; | |
2161 | } | |
2162 | ||
2163 | static struct platform_driver mtk_driver = { | |
2164 | .probe = mtk_probe, | |
2165 | .remove = mtk_remove, | |
2166 | .driver = { | |
2167 | .name = "mtk_soc_eth", | |
e3cbf478 JC |
2168 | .of_match_table = of_mtk_match, |
2169 | }, | |
2170 | }; | |
2171 | ||
2172 | module_platform_driver(mtk_driver); | |
2173 | ||
2174 | MODULE_LICENSE("GPL"); | |
2175 | MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); | |
2176 | MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); |