]>
Commit | Line | Data |
---|---|---|
1c1008c7 FF |
1 | /* |
2 | * Broadcom GENET (Gigabit Ethernet) controller driver | |
3 | * | |
ffff7132 | 4 | * Copyright (c) 2014-2017 Broadcom |
1c1008c7 FF |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
1c1008c7 FF |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) "bcmgenet: " fmt | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/fcntl.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/if_ether.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/pm.h> | |
27 | #include <linux/clk.h> | |
1c1008c7 FF |
28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/of_net.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <net/arp.h> | |
34 | ||
35 | #include <linux/mii.h> | |
36 | #include <linux/ethtool.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/in.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/phy.h> | |
b0ba512e | 45 | #include <linux/platform_data/bcmgenet.h> |
1c1008c7 FF |
46 | |
47 | #include <asm/unaligned.h> | |
48 | ||
49 | #include "bcmgenet.h" | |
50 | ||
51 | /* Maximum number of hardware queues, downsized if needed */ | |
52 | #define GENET_MAX_MQ_CNT 4 | |
53 | ||
54 | /* Default highest priority queue for multi queue support */ | |
55 | #define GENET_Q0_PRIORITY 0 | |
56 | ||
3feafa02 PG |
57 | #define GENET_Q16_RX_BD_CNT \ |
58 | (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) | |
51a966a7 PG |
59 | #define GENET_Q16_TX_BD_CNT \ |
60 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) | |
1c1008c7 FF |
61 | |
62 | #define RX_BUF_LENGTH 2048 | |
63 | #define SKB_ALIGNMENT 32 | |
64 | ||
65 | /* Tx/Rx DMA register offset, skip 256 descriptors */ | |
66 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) | |
67 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) | |
68 | ||
69 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ | |
70 | TOTAL_DESC * DMA_DESC_SIZE) | |
71 | ||
72 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ | |
73 | TOTAL_DESC * DMA_DESC_SIZE) | |
74 | ||
75 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 76 | void __iomem *d, u32 value) |
1c1008c7 FF |
77 | { |
78 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | |
79 | } | |
80 | ||
81 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 82 | void __iomem *d) |
1c1008c7 FF |
83 | { |
84 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | |
85 | } | |
86 | ||
87 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |
88 | void __iomem *d, | |
89 | dma_addr_t addr) | |
90 | { | |
91 | __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); | |
92 | ||
93 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
94 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 95 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
96 | */ |
97 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
98 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
99 | __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); | |
100 | #endif | |
101 | } | |
102 | ||
103 | /* Combined address + length/status setter */ | |
104 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | |
c91b7f66 | 105 | void __iomem *d, dma_addr_t addr, u32 val) |
1c1008c7 | 106 | { |
1c1008c7 | 107 | dmadesc_set_addr(priv, d, addr); |
7ee40625 | 108 | dmadesc_set_length_status(priv, d, val); |
1c1008c7 FF |
109 | } |
110 | ||
111 | static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | |
112 | void __iomem *d) | |
113 | { | |
114 | dma_addr_t addr; | |
115 | ||
116 | addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); | |
117 | ||
118 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
119 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 120 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
121 | */ |
122 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
123 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
124 | addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; | |
125 | #endif | |
126 | return addr; | |
127 | } | |
128 | ||
129 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" | |
130 | ||
131 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
132 | NETIF_MSG_LINK) | |
133 | ||
134 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) | |
135 | { | |
136 | if (GENET_IS_V1(priv)) | |
137 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); | |
138 | else | |
139 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); | |
140 | } | |
141 | ||
142 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
143 | { | |
144 | if (GENET_IS_V1(priv)) | |
145 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); | |
146 | else | |
147 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); | |
148 | } | |
149 | ||
150 | /* These macros are defined to deal with register map change | |
151 | * between GENET1.1 and GENET2. Only those currently being used | |
152 | * by driver are defined. | |
153 | */ | |
154 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) | |
155 | { | |
156 | if (GENET_IS_V1(priv)) | |
157 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); | |
158 | else | |
159 | return __raw_readl(priv->base + | |
160 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
161 | } | |
162 | ||
163 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
164 | { | |
165 | if (GENET_IS_V1(priv)) | |
166 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); | |
167 | else | |
168 | __raw_writel(val, priv->base + | |
169 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
170 | } | |
171 | ||
172 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) | |
173 | { | |
174 | if (GENET_IS_V1(priv)) | |
175 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); | |
176 | else | |
177 | return __raw_readl(priv->base + | |
178 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
179 | } | |
180 | ||
181 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) | |
182 | { | |
183 | if (GENET_IS_V1(priv)) | |
184 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); | |
185 | else | |
186 | __raw_writel(val, priv->base + | |
187 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
188 | } | |
189 | ||
190 | /* RX/TX DMA register accessors */ | |
191 | enum dma_reg { | |
192 | DMA_RING_CFG = 0, | |
193 | DMA_CTRL, | |
194 | DMA_STATUS, | |
195 | DMA_SCB_BURST_SIZE, | |
196 | DMA_ARB_CTRL, | |
37742166 PG |
197 | DMA_PRIORITY_0, |
198 | DMA_PRIORITY_1, | |
199 | DMA_PRIORITY_2, | |
0034de41 PG |
200 | DMA_INDEX2RING_0, |
201 | DMA_INDEX2RING_1, | |
202 | DMA_INDEX2RING_2, | |
203 | DMA_INDEX2RING_3, | |
204 | DMA_INDEX2RING_4, | |
205 | DMA_INDEX2RING_5, | |
206 | DMA_INDEX2RING_6, | |
207 | DMA_INDEX2RING_7, | |
4a29645b FF |
208 | DMA_RING0_TIMEOUT, |
209 | DMA_RING1_TIMEOUT, | |
210 | DMA_RING2_TIMEOUT, | |
211 | DMA_RING3_TIMEOUT, | |
212 | DMA_RING4_TIMEOUT, | |
213 | DMA_RING5_TIMEOUT, | |
214 | DMA_RING6_TIMEOUT, | |
215 | DMA_RING7_TIMEOUT, | |
216 | DMA_RING8_TIMEOUT, | |
217 | DMA_RING9_TIMEOUT, | |
218 | DMA_RING10_TIMEOUT, | |
219 | DMA_RING11_TIMEOUT, | |
220 | DMA_RING12_TIMEOUT, | |
221 | DMA_RING13_TIMEOUT, | |
222 | DMA_RING14_TIMEOUT, | |
223 | DMA_RING15_TIMEOUT, | |
224 | DMA_RING16_TIMEOUT, | |
1c1008c7 FF |
225 | }; |
226 | ||
227 | static const u8 bcmgenet_dma_regs_v3plus[] = { | |
228 | [DMA_RING_CFG] = 0x00, | |
229 | [DMA_CTRL] = 0x04, | |
230 | [DMA_STATUS] = 0x08, | |
231 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
232 | [DMA_ARB_CTRL] = 0x2C, | |
37742166 PG |
233 | [DMA_PRIORITY_0] = 0x30, |
234 | [DMA_PRIORITY_1] = 0x34, | |
235 | [DMA_PRIORITY_2] = 0x38, | |
4a29645b FF |
236 | [DMA_RING0_TIMEOUT] = 0x2C, |
237 | [DMA_RING1_TIMEOUT] = 0x30, | |
238 | [DMA_RING2_TIMEOUT] = 0x34, | |
239 | [DMA_RING3_TIMEOUT] = 0x38, | |
240 | [DMA_RING4_TIMEOUT] = 0x3c, | |
241 | [DMA_RING5_TIMEOUT] = 0x40, | |
242 | [DMA_RING6_TIMEOUT] = 0x44, | |
243 | [DMA_RING7_TIMEOUT] = 0x48, | |
244 | [DMA_RING8_TIMEOUT] = 0x4c, | |
245 | [DMA_RING9_TIMEOUT] = 0x50, | |
246 | [DMA_RING10_TIMEOUT] = 0x54, | |
247 | [DMA_RING11_TIMEOUT] = 0x58, | |
248 | [DMA_RING12_TIMEOUT] = 0x5c, | |
249 | [DMA_RING13_TIMEOUT] = 0x60, | |
250 | [DMA_RING14_TIMEOUT] = 0x64, | |
251 | [DMA_RING15_TIMEOUT] = 0x68, | |
252 | [DMA_RING16_TIMEOUT] = 0x6C, | |
0034de41 PG |
253 | [DMA_INDEX2RING_0] = 0x70, |
254 | [DMA_INDEX2RING_1] = 0x74, | |
255 | [DMA_INDEX2RING_2] = 0x78, | |
256 | [DMA_INDEX2RING_3] = 0x7C, | |
257 | [DMA_INDEX2RING_4] = 0x80, | |
258 | [DMA_INDEX2RING_5] = 0x84, | |
259 | [DMA_INDEX2RING_6] = 0x88, | |
260 | [DMA_INDEX2RING_7] = 0x8C, | |
1c1008c7 FF |
261 | }; |
262 | ||
263 | static const u8 bcmgenet_dma_regs_v2[] = { | |
264 | [DMA_RING_CFG] = 0x00, | |
265 | [DMA_CTRL] = 0x04, | |
266 | [DMA_STATUS] = 0x08, | |
267 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
268 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
269 | [DMA_PRIORITY_0] = 0x34, |
270 | [DMA_PRIORITY_1] = 0x38, | |
271 | [DMA_PRIORITY_2] = 0x3C, | |
4a29645b FF |
272 | [DMA_RING0_TIMEOUT] = 0x2C, |
273 | [DMA_RING1_TIMEOUT] = 0x30, | |
274 | [DMA_RING2_TIMEOUT] = 0x34, | |
275 | [DMA_RING3_TIMEOUT] = 0x38, | |
276 | [DMA_RING4_TIMEOUT] = 0x3c, | |
277 | [DMA_RING5_TIMEOUT] = 0x40, | |
278 | [DMA_RING6_TIMEOUT] = 0x44, | |
279 | [DMA_RING7_TIMEOUT] = 0x48, | |
280 | [DMA_RING8_TIMEOUT] = 0x4c, | |
281 | [DMA_RING9_TIMEOUT] = 0x50, | |
282 | [DMA_RING10_TIMEOUT] = 0x54, | |
283 | [DMA_RING11_TIMEOUT] = 0x58, | |
284 | [DMA_RING12_TIMEOUT] = 0x5c, | |
285 | [DMA_RING13_TIMEOUT] = 0x60, | |
286 | [DMA_RING14_TIMEOUT] = 0x64, | |
287 | [DMA_RING15_TIMEOUT] = 0x68, | |
288 | [DMA_RING16_TIMEOUT] = 0x6C, | |
1c1008c7 FF |
289 | }; |
290 | ||
291 | static const u8 bcmgenet_dma_regs_v1[] = { | |
292 | [DMA_CTRL] = 0x00, | |
293 | [DMA_STATUS] = 0x04, | |
294 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
295 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
296 | [DMA_PRIORITY_0] = 0x34, |
297 | [DMA_PRIORITY_1] = 0x38, | |
298 | [DMA_PRIORITY_2] = 0x3C, | |
4a29645b FF |
299 | [DMA_RING0_TIMEOUT] = 0x2C, |
300 | [DMA_RING1_TIMEOUT] = 0x30, | |
301 | [DMA_RING2_TIMEOUT] = 0x34, | |
302 | [DMA_RING3_TIMEOUT] = 0x38, | |
303 | [DMA_RING4_TIMEOUT] = 0x3c, | |
304 | [DMA_RING5_TIMEOUT] = 0x40, | |
305 | [DMA_RING6_TIMEOUT] = 0x44, | |
306 | [DMA_RING7_TIMEOUT] = 0x48, | |
307 | [DMA_RING8_TIMEOUT] = 0x4c, | |
308 | [DMA_RING9_TIMEOUT] = 0x50, | |
309 | [DMA_RING10_TIMEOUT] = 0x54, | |
310 | [DMA_RING11_TIMEOUT] = 0x58, | |
311 | [DMA_RING12_TIMEOUT] = 0x5c, | |
312 | [DMA_RING13_TIMEOUT] = 0x60, | |
313 | [DMA_RING14_TIMEOUT] = 0x64, | |
314 | [DMA_RING15_TIMEOUT] = 0x68, | |
315 | [DMA_RING16_TIMEOUT] = 0x6C, | |
1c1008c7 FF |
316 | }; |
317 | ||
318 | /* Set at runtime once bcmgenet version is known */ | |
319 | static const u8 *bcmgenet_dma_regs; | |
320 | ||
321 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | |
322 | { | |
323 | return netdev_priv(dev_get_drvdata(dev)); | |
324 | } | |
325 | ||
326 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 327 | enum dma_reg r) |
1c1008c7 FF |
328 | { |
329 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
330 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
331 | } | |
332 | ||
333 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | |
334 | u32 val, enum dma_reg r) | |
335 | { | |
336 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
337 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
338 | } | |
339 | ||
340 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 341 | enum dma_reg r) |
1c1008c7 FF |
342 | { |
343 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
344 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
345 | } | |
346 | ||
347 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, | |
348 | u32 val, enum dma_reg r) | |
349 | { | |
350 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
351 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
352 | } | |
353 | ||
354 | /* RDMA/TDMA ring registers and accessors | |
355 | * we merge the common fields and just prefix with T/D the registers | |
356 | * having different meaning depending on the direction | |
357 | */ | |
358 | enum dma_ring_reg { | |
359 | TDMA_READ_PTR = 0, | |
360 | RDMA_WRITE_PTR = TDMA_READ_PTR, | |
361 | TDMA_READ_PTR_HI, | |
362 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, | |
363 | TDMA_CONS_INDEX, | |
364 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, | |
365 | TDMA_PROD_INDEX, | |
366 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, | |
367 | DMA_RING_BUF_SIZE, | |
368 | DMA_START_ADDR, | |
369 | DMA_START_ADDR_HI, | |
370 | DMA_END_ADDR, | |
371 | DMA_END_ADDR_HI, | |
372 | DMA_MBUF_DONE_THRESH, | |
373 | TDMA_FLOW_PERIOD, | |
374 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, | |
375 | TDMA_WRITE_PTR, | |
376 | RDMA_READ_PTR = TDMA_WRITE_PTR, | |
377 | TDMA_WRITE_PTR_HI, | |
378 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI | |
379 | }; | |
380 | ||
381 | /* GENET v4 supports 40-bits pointer addressing | |
382 | * for obvious reasons the LO and HI word parts | |
383 | * are contiguous, but this offsets the other | |
384 | * registers. | |
385 | */ | |
386 | static const u8 genet_dma_ring_regs_v4[] = { | |
387 | [TDMA_READ_PTR] = 0x00, | |
388 | [TDMA_READ_PTR_HI] = 0x04, | |
389 | [TDMA_CONS_INDEX] = 0x08, | |
390 | [TDMA_PROD_INDEX] = 0x0C, | |
391 | [DMA_RING_BUF_SIZE] = 0x10, | |
392 | [DMA_START_ADDR] = 0x14, | |
393 | [DMA_START_ADDR_HI] = 0x18, | |
394 | [DMA_END_ADDR] = 0x1C, | |
395 | [DMA_END_ADDR_HI] = 0x20, | |
396 | [DMA_MBUF_DONE_THRESH] = 0x24, | |
397 | [TDMA_FLOW_PERIOD] = 0x28, | |
398 | [TDMA_WRITE_PTR] = 0x2C, | |
399 | [TDMA_WRITE_PTR_HI] = 0x30, | |
400 | }; | |
401 | ||
402 | static const u8 genet_dma_ring_regs_v123[] = { | |
403 | [TDMA_READ_PTR] = 0x00, | |
404 | [TDMA_CONS_INDEX] = 0x04, | |
405 | [TDMA_PROD_INDEX] = 0x08, | |
406 | [DMA_RING_BUF_SIZE] = 0x0C, | |
407 | [DMA_START_ADDR] = 0x10, | |
408 | [DMA_END_ADDR] = 0x14, | |
409 | [DMA_MBUF_DONE_THRESH] = 0x18, | |
410 | [TDMA_FLOW_PERIOD] = 0x1C, | |
411 | [TDMA_WRITE_PTR] = 0x20, | |
412 | }; | |
413 | ||
414 | /* Set at runtime once GENET version is known */ | |
415 | static const u8 *genet_dma_ring_regs; | |
416 | ||
417 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
418 | unsigned int ring, |
419 | enum dma_ring_reg r) | |
1c1008c7 FF |
420 | { |
421 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
422 | (DMA_RING_SIZE * ring) + | |
423 | genet_dma_ring_regs[r]); | |
424 | } | |
425 | ||
426 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
427 | unsigned int ring, u32 val, |
428 | enum dma_ring_reg r) | |
1c1008c7 FF |
429 | { |
430 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
431 | (DMA_RING_SIZE * ring) + | |
432 | genet_dma_ring_regs[r]); | |
433 | } | |
434 | ||
435 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
436 | unsigned int ring, |
437 | enum dma_ring_reg r) | |
1c1008c7 FF |
438 | { |
439 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
440 | (DMA_RING_SIZE * ring) + | |
441 | genet_dma_ring_regs[r]); | |
442 | } | |
443 | ||
444 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
445 | unsigned int ring, u32 val, |
446 | enum dma_ring_reg r) | |
1c1008c7 FF |
447 | { |
448 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
449 | (DMA_RING_SIZE * ring) + | |
450 | genet_dma_ring_regs[r]); | |
451 | } | |
452 | ||
89316fa3 EC |
453 | static int bcmgenet_begin(struct net_device *dev) |
454 | { | |
455 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
456 | ||
457 | /* Turn on the clock */ | |
458 | return clk_prepare_enable(priv->clk); | |
459 | } | |
460 | ||
461 | static void bcmgenet_complete(struct net_device *dev) | |
462 | { | |
463 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
464 | ||
465 | /* Turn off the clock */ | |
466 | clk_disable_unprepare(priv->clk); | |
467 | } | |
468 | ||
fa92bf04 PR |
469 | static int bcmgenet_get_link_ksettings(struct net_device *dev, |
470 | struct ethtool_link_ksettings *cmd) | |
bac65c4b | 471 | { |
0299b6ac FF |
472 | struct bcmgenet_priv *priv = netdev_priv(dev); |
473 | ||
bac65c4b PR |
474 | if (!netif_running(dev)) |
475 | return -EINVAL; | |
476 | ||
0299b6ac | 477 | if (!priv->phydev) |
bac65c4b PR |
478 | return -ENODEV; |
479 | ||
fa92bf04 | 480 | return phy_ethtool_ksettings_get(priv->phydev, cmd); |
bac65c4b PR |
481 | } |
482 | ||
fa92bf04 PR |
483 | static int bcmgenet_set_link_ksettings(struct net_device *dev, |
484 | const struct ethtool_link_ksettings *cmd) | |
bac65c4b | 485 | { |
0299b6ac FF |
486 | struct bcmgenet_priv *priv = netdev_priv(dev); |
487 | ||
bac65c4b PR |
488 | if (!netif_running(dev)) |
489 | return -EINVAL; | |
490 | ||
0299b6ac | 491 | if (!priv->phydev) |
bac65c4b PR |
492 | return -ENODEV; |
493 | ||
fa92bf04 | 494 | return phy_ethtool_ksettings_set(priv->phydev, cmd); |
bac65c4b PR |
495 | } |
496 | ||
1c1008c7 FF |
497 | static int bcmgenet_set_rx_csum(struct net_device *dev, |
498 | netdev_features_t wanted) | |
499 | { | |
500 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
501 | u32 rbuf_chk_ctrl; | |
502 | bool rx_csum_en; | |
503 | ||
504 | rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | |
505 | ||
506 | rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); | |
507 | ||
508 | /* enable rx checksumming */ | |
509 | if (rx_csum_en) | |
510 | rbuf_chk_ctrl |= RBUF_RXCHK_EN; | |
511 | else | |
512 | rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; | |
513 | priv->desc_rxchk_en = rx_csum_en; | |
ebe5e3c6 FF |
514 | |
515 | /* If UniMAC forwards CRC, we need to skip over it to get | |
516 | * a valid CHK bit to be set in the per-packet status word | |
517 | */ | |
518 | if (rx_csum_en && priv->crc_fwd_en) | |
519 | rbuf_chk_ctrl |= RBUF_SKIP_FCS; | |
520 | else | |
521 | rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; | |
522 | ||
1c1008c7 FF |
523 | bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); |
524 | ||
525 | return 0; | |
526 | } | |
527 | ||
528 | static int bcmgenet_set_tx_csum(struct net_device *dev, | |
529 | netdev_features_t wanted) | |
530 | { | |
531 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
532 | bool desc_64b_en; | |
533 | u32 tbuf_ctrl, rbuf_ctrl; | |
534 | ||
535 | tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); | |
536 | rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
537 | ||
538 | desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
539 | ||
540 | /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ | |
541 | if (desc_64b_en) { | |
542 | tbuf_ctrl |= RBUF_64B_EN; | |
543 | rbuf_ctrl |= RBUF_64B_EN; | |
544 | } else { | |
545 | tbuf_ctrl &= ~RBUF_64B_EN; | |
546 | rbuf_ctrl &= ~RBUF_64B_EN; | |
547 | } | |
548 | priv->desc_64b_en = desc_64b_en; | |
549 | ||
550 | bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); | |
551 | bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
556 | static int bcmgenet_set_features(struct net_device *dev, | |
c91b7f66 | 557 | netdev_features_t features) |
1c1008c7 FF |
558 | { |
559 | netdev_features_t changed = features ^ dev->features; | |
560 | netdev_features_t wanted = dev->wanted_features; | |
561 | int ret = 0; | |
562 | ||
563 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) | |
564 | ret = bcmgenet_set_tx_csum(dev, wanted); | |
565 | if (changed & (NETIF_F_RXCSUM)) | |
566 | ret = bcmgenet_set_rx_csum(dev, wanted); | |
567 | ||
568 | return ret; | |
569 | } | |
570 | ||
571 | static u32 bcmgenet_get_msglevel(struct net_device *dev) | |
572 | { | |
573 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
574 | ||
575 | return priv->msg_enable; | |
576 | } | |
577 | ||
578 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) | |
579 | { | |
580 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
581 | ||
582 | priv->msg_enable = level; | |
583 | } | |
584 | ||
2f913070 FF |
585 | static int bcmgenet_get_coalesce(struct net_device *dev, |
586 | struct ethtool_coalesce *ec) | |
587 | { | |
588 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
589 | ||
590 | ec->tx_max_coalesced_frames = | |
591 | bcmgenet_tdma_ring_readl(priv, DESC_INDEX, | |
592 | DMA_MBUF_DONE_THRESH); | |
4a29645b FF |
593 | ec->rx_max_coalesced_frames = |
594 | bcmgenet_rdma_ring_readl(priv, DESC_INDEX, | |
595 | DMA_MBUF_DONE_THRESH); | |
596 | ec->rx_coalesce_usecs = | |
597 | bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; | |
2f913070 FF |
598 | |
599 | return 0; | |
600 | } | |
601 | ||
602 | static int bcmgenet_set_coalesce(struct net_device *dev, | |
603 | struct ethtool_coalesce *ec) | |
604 | { | |
605 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
606 | unsigned int i; | |
4a29645b | 607 | u32 reg; |
2f913070 | 608 | |
4a29645b FF |
609 | /* Base system clock is 125Mhz, DMA timeout is this reference clock |
610 | * divided by 1024, which yields roughly 8.192us, our maximum value | |
611 | * has to fit in the DMA_TIMEOUT_MASK (16 bits) | |
612 | */ | |
2f913070 | 613 | if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || |
4a29645b FF |
614 | ec->tx_max_coalesced_frames == 0 || |
615 | ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || | |
616 | ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) | |
617 | return -EINVAL; | |
618 | ||
619 | if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) | |
2f913070 FF |
620 | return -EINVAL; |
621 | ||
622 | /* GENET TDMA hardware does not support a configurable timeout, but will | |
623 | * always generate an interrupt either after MBDONE packets have been | |
624 | * transmitted, or when the ring is emtpy. | |
625 | */ | |
626 | if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || | |
852bcafb | 627 | ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low) |
2f913070 FF |
628 | return -EOPNOTSUPP; |
629 | ||
630 | /* Program all TX queues with the same values, as there is no | |
631 | * ethtool knob to do coalescing on a per-queue basis | |
632 | */ | |
633 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
634 | bcmgenet_tdma_ring_writel(priv, i, | |
635 | ec->tx_max_coalesced_frames, | |
636 | DMA_MBUF_DONE_THRESH); | |
637 | bcmgenet_tdma_ring_writel(priv, DESC_INDEX, | |
638 | ec->tx_max_coalesced_frames, | |
639 | DMA_MBUF_DONE_THRESH); | |
640 | ||
4a29645b FF |
641 | for (i = 0; i < priv->hw_params->rx_queues; i++) { |
642 | bcmgenet_rdma_ring_writel(priv, i, | |
643 | ec->rx_max_coalesced_frames, | |
644 | DMA_MBUF_DONE_THRESH); | |
645 | ||
646 | reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); | |
647 | reg &= ~DMA_TIMEOUT_MASK; | |
648 | reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); | |
649 | bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); | |
650 | } | |
651 | ||
652 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, | |
653 | ec->rx_max_coalesced_frames, | |
654 | DMA_MBUF_DONE_THRESH); | |
655 | ||
656 | reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT); | |
657 | reg &= ~DMA_TIMEOUT_MASK; | |
658 | reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); | |
659 | bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT); | |
660 | ||
2f913070 FF |
661 | return 0; |
662 | } | |
663 | ||
1c1008c7 FF |
664 | /* standard ethtool support functions. */ |
665 | enum bcmgenet_stat_type { | |
666 | BCMGENET_STAT_NETDEV = -1, | |
667 | BCMGENET_STAT_MIB_RX, | |
668 | BCMGENET_STAT_MIB_TX, | |
669 | BCMGENET_STAT_RUNT, | |
670 | BCMGENET_STAT_MISC, | |
f62ba9c1 | 671 | BCMGENET_STAT_SOFT, |
1c1008c7 FF |
672 | }; |
673 | ||
674 | struct bcmgenet_stats { | |
675 | char stat_string[ETH_GSTRING_LEN]; | |
676 | int stat_sizeof; | |
677 | int stat_offset; | |
678 | enum bcmgenet_stat_type type; | |
679 | /* reg offset from UMAC base for misc counters */ | |
680 | u16 reg_offset; | |
681 | }; | |
682 | ||
683 | #define STAT_NETDEV(m) { \ | |
684 | .stat_string = __stringify(m), \ | |
685 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ | |
686 | .stat_offset = offsetof(struct net_device_stats, m), \ | |
687 | .type = BCMGENET_STAT_NETDEV, \ | |
688 | } | |
689 | ||
690 | #define STAT_GENET_MIB(str, m, _type) { \ | |
691 | .stat_string = str, \ | |
692 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
693 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
694 | .type = _type, \ | |
695 | } | |
696 | ||
697 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | |
698 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | |
699 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | |
f62ba9c1 | 700 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) |
1c1008c7 FF |
701 | |
702 | #define STAT_GENET_MISC(str, m, offset) { \ | |
703 | .stat_string = str, \ | |
704 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
705 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
706 | .type = BCMGENET_STAT_MISC, \ | |
707 | .reg_offset = offset, \ | |
708 | } | |
709 | ||
710 | ||
711 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then | |
712 | * between the end of TX stats and the beginning of the RX RUNT | |
713 | */ | |
714 | #define BCMGENET_STAT_OFFSET 0xc | |
715 | ||
716 | /* Hardware counters must be kept in sync because the order/offset | |
717 | * is important here (order in structure declaration = order in hardware) | |
718 | */ | |
719 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |
720 | /* general stats */ | |
721 | STAT_NETDEV(rx_packets), | |
722 | STAT_NETDEV(tx_packets), | |
723 | STAT_NETDEV(rx_bytes), | |
724 | STAT_NETDEV(tx_bytes), | |
725 | STAT_NETDEV(rx_errors), | |
726 | STAT_NETDEV(tx_errors), | |
727 | STAT_NETDEV(rx_dropped), | |
728 | STAT_NETDEV(tx_dropped), | |
729 | STAT_NETDEV(multicast), | |
730 | /* UniMAC RSV counters */ | |
731 | STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), | |
732 | STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), | |
733 | STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), | |
734 | STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), | |
735 | STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), | |
736 | STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), | |
737 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), | |
738 | STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), | |
739 | STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), | |
740 | STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), | |
741 | STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), | |
742 | STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), | |
743 | STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), | |
744 | STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), | |
745 | STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), | |
746 | STAT_GENET_MIB_RX("rx_control", mib.rx.cf), | |
747 | STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), | |
748 | STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), | |
749 | STAT_GENET_MIB_RX("rx_align", mib.rx.aln), | |
750 | STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), | |
751 | STAT_GENET_MIB_RX("rx_code", mib.rx.cde), | |
752 | STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), | |
753 | STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), | |
754 | STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), | |
755 | STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), | |
756 | STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), | |
757 | STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), | |
758 | STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), | |
759 | STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), | |
760 | /* UniMAC TSV counters */ | |
761 | STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), | |
762 | STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), | |
763 | STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), | |
764 | STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), | |
765 | STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), | |
766 | STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), | |
767 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), | |
768 | STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), | |
769 | STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), | |
770 | STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), | |
771 | STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), | |
772 | STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), | |
773 | STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), | |
774 | STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), | |
775 | STAT_GENET_MIB_TX("tx_control", mib.tx.cf), | |
776 | STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), | |
777 | STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), | |
778 | STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), | |
779 | STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), | |
780 | STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), | |
781 | STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), | |
782 | STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), | |
783 | STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), | |
784 | STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), | |
785 | STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), | |
786 | STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), | |
787 | STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), | |
788 | STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), | |
789 | STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), | |
790 | /* UniMAC RUNT counters */ | |
791 | STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), | |
792 | STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), | |
793 | STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), | |
794 | STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), | |
795 | /* Misc UniMAC counters */ | |
796 | STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, | |
ffff7132 DB |
797 | UMAC_RBUF_OVFL_CNT_V1), |
798 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, | |
799 | UMAC_RBUF_ERR_CNT_V1), | |
1c1008c7 | 800 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), |
f62ba9c1 FF |
801 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
802 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), | |
803 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), | |
1c1008c7 FF |
804 | }; |
805 | ||
806 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | |
807 | ||
808 | static void bcmgenet_get_drvinfo(struct net_device *dev, | |
c91b7f66 | 809 | struct ethtool_drvinfo *info) |
1c1008c7 FF |
810 | { |
811 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | |
812 | strlcpy(info->version, "v2.0", sizeof(info->version)); | |
1c1008c7 FF |
813 | } |
814 | ||
815 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | |
816 | { | |
817 | switch (string_set) { | |
818 | case ETH_SS_STATS: | |
819 | return BCMGENET_STATS_LEN; | |
820 | default: | |
821 | return -EOPNOTSUPP; | |
822 | } | |
823 | } | |
824 | ||
c91b7f66 FF |
825 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
826 | u8 *data) | |
1c1008c7 FF |
827 | { |
828 | int i; | |
829 | ||
830 | switch (stringset) { | |
831 | case ETH_SS_STATS: | |
832 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
833 | memcpy(data + i * ETH_GSTRING_LEN, | |
c91b7f66 FF |
834 | bcmgenet_gstrings_stats[i].stat_string, |
835 | ETH_GSTRING_LEN); | |
1c1008c7 FF |
836 | } |
837 | break; | |
838 | } | |
839 | } | |
840 | ||
ffff7132 DB |
841 | static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset) |
842 | { | |
843 | u16 new_offset; | |
844 | u32 val; | |
845 | ||
846 | switch (offset) { | |
847 | case UMAC_RBUF_OVFL_CNT_V1: | |
848 | if (GENET_IS_V2(priv)) | |
849 | new_offset = RBUF_OVFL_CNT_V2; | |
850 | else | |
851 | new_offset = RBUF_OVFL_CNT_V3PLUS; | |
852 | ||
853 | val = bcmgenet_rbuf_readl(priv, new_offset); | |
854 | /* clear if overflowed */ | |
855 | if (val == ~0) | |
856 | bcmgenet_rbuf_writel(priv, 0, new_offset); | |
857 | break; | |
858 | case UMAC_RBUF_ERR_CNT_V1: | |
859 | if (GENET_IS_V2(priv)) | |
860 | new_offset = RBUF_ERR_CNT_V2; | |
861 | else | |
862 | new_offset = RBUF_ERR_CNT_V3PLUS; | |
863 | ||
864 | val = bcmgenet_rbuf_readl(priv, new_offset); | |
865 | /* clear if overflowed */ | |
866 | if (val == ~0) | |
867 | bcmgenet_rbuf_writel(priv, 0, new_offset); | |
868 | break; | |
869 | default: | |
870 | val = bcmgenet_umac_readl(priv, offset); | |
871 | /* clear if overflowed */ | |
872 | if (val == ~0) | |
873 | bcmgenet_umac_writel(priv, 0, offset); | |
874 | break; | |
875 | } | |
876 | ||
877 | return val; | |
878 | } | |
879 | ||
1c1008c7 FF |
880 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) |
881 | { | |
882 | int i, j = 0; | |
883 | ||
884 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
885 | const struct bcmgenet_stats *s; | |
886 | u8 offset = 0; | |
887 | u32 val = 0; | |
888 | char *p; | |
889 | ||
890 | s = &bcmgenet_gstrings_stats[i]; | |
891 | switch (s->type) { | |
892 | case BCMGENET_STAT_NETDEV: | |
f62ba9c1 | 893 | case BCMGENET_STAT_SOFT: |
1c1008c7 | 894 | continue; |
1c1008c7 | 895 | case BCMGENET_STAT_RUNT: |
1ad3d225 DB |
896 | offset += BCMGENET_STAT_OFFSET; |
897 | /* fall through */ | |
898 | case BCMGENET_STAT_MIB_TX: | |
899 | offset += BCMGENET_STAT_OFFSET; | |
900 | /* fall through */ | |
901 | case BCMGENET_STAT_MIB_RX: | |
c91b7f66 FF |
902 | val = bcmgenet_umac_readl(priv, |
903 | UMAC_MIB_START + j + offset); | |
1ad3d225 | 904 | offset = 0; /* Reset Offset */ |
1c1008c7 FF |
905 | break; |
906 | case BCMGENET_STAT_MISC: | |
ffff7132 DB |
907 | if (GENET_IS_V1(priv)) { |
908 | val = bcmgenet_umac_readl(priv, s->reg_offset); | |
909 | /* clear if overflowed */ | |
910 | if (val == ~0) | |
911 | bcmgenet_umac_writel(priv, 0, | |
912 | s->reg_offset); | |
913 | } else { | |
914 | val = bcmgenet_update_stat_misc(priv, | |
915 | s->reg_offset); | |
916 | } | |
1c1008c7 FF |
917 | break; |
918 | } | |
919 | ||
920 | j += s->stat_sizeof; | |
921 | p = (char *)priv + s->stat_offset; | |
922 | *(u32 *)p = val; | |
923 | } | |
924 | } | |
925 | ||
926 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | |
c91b7f66 FF |
927 | struct ethtool_stats *stats, |
928 | u64 *data) | |
1c1008c7 FF |
929 | { |
930 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
931 | int i; | |
932 | ||
933 | if (netif_running(dev)) | |
934 | bcmgenet_update_mib_counters(priv); | |
935 | ||
936 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
937 | const struct bcmgenet_stats *s; | |
938 | char *p; | |
939 | ||
940 | s = &bcmgenet_gstrings_stats[i]; | |
941 | if (s->type == BCMGENET_STAT_NETDEV) | |
942 | p = (char *)&dev->stats; | |
943 | else | |
944 | p = (char *)priv; | |
945 | p += s->stat_offset; | |
6517eb59 ED |
946 | if (sizeof(unsigned long) != sizeof(u32) && |
947 | s->stat_sizeof == sizeof(unsigned long)) | |
948 | data[i] = *(unsigned long *)p; | |
949 | else | |
950 | data[i] = *(u32 *)p; | |
1c1008c7 FF |
951 | } |
952 | } | |
953 | ||
6ef398ea FF |
954 | static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) |
955 | { | |
956 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
957 | u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; | |
958 | u32 reg; | |
959 | ||
960 | if (enable && !priv->clk_eee_enabled) { | |
961 | clk_prepare_enable(priv->clk_eee); | |
962 | priv->clk_eee_enabled = true; | |
963 | } | |
964 | ||
965 | reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); | |
966 | if (enable) | |
967 | reg |= EEE_EN; | |
968 | else | |
969 | reg &= ~EEE_EN; | |
970 | bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); | |
971 | ||
972 | /* Enable EEE and switch to a 27Mhz clock automatically */ | |
973 | reg = __raw_readl(priv->base + off); | |
974 | if (enable) | |
975 | reg |= TBUF_EEE_EN | TBUF_PM_EN; | |
976 | else | |
977 | reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); | |
978 | __raw_writel(reg, priv->base + off); | |
979 | ||
980 | /* Do the same for thing for RBUF */ | |
981 | reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); | |
982 | if (enable) | |
983 | reg |= RBUF_EEE_EN | RBUF_PM_EN; | |
984 | else | |
985 | reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); | |
986 | bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); | |
987 | ||
988 | if (!enable && priv->clk_eee_enabled) { | |
989 | clk_disable_unprepare(priv->clk_eee); | |
990 | priv->clk_eee_enabled = false; | |
991 | } | |
992 | ||
993 | priv->eee.eee_enabled = enable; | |
994 | priv->eee.eee_active = enable; | |
995 | } | |
996 | ||
997 | static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) | |
998 | { | |
999 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1000 | struct ethtool_eee *p = &priv->eee; | |
1001 | ||
1002 | if (GENET_IS_V1(priv)) | |
1003 | return -EOPNOTSUPP; | |
1004 | ||
1005 | e->eee_enabled = p->eee_enabled; | |
1006 | e->eee_active = p->eee_active; | |
1007 | e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); | |
1008 | ||
0299b6ac | 1009 | return phy_ethtool_get_eee(priv->phydev, e); |
6ef398ea FF |
1010 | } |
1011 | ||
1012 | static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) | |
1013 | { | |
1014 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1015 | struct ethtool_eee *p = &priv->eee; | |
1016 | int ret = 0; | |
1017 | ||
1018 | if (GENET_IS_V1(priv)) | |
1019 | return -EOPNOTSUPP; | |
1020 | ||
1021 | p->eee_enabled = e->eee_enabled; | |
1022 | ||
1023 | if (!p->eee_enabled) { | |
1024 | bcmgenet_eee_enable_set(dev, false); | |
1025 | } else { | |
0299b6ac | 1026 | ret = phy_init_eee(priv->phydev, 0); |
6ef398ea FF |
1027 | if (ret) { |
1028 | netif_err(priv, hw, dev, "EEE initialization failed\n"); | |
1029 | return ret; | |
1030 | } | |
1031 | ||
1032 | bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); | |
1033 | bcmgenet_eee_enable_set(dev, true); | |
1034 | } | |
1035 | ||
0299b6ac | 1036 | return phy_ethtool_set_eee(priv->phydev, e); |
6ef398ea FF |
1037 | } |
1038 | ||
1c1008c7 | 1039 | /* standard ethtool support functions. */ |
70591ab9 | 1040 | static const struct ethtool_ops bcmgenet_ethtool_ops = { |
89316fa3 EC |
1041 | .begin = bcmgenet_begin, |
1042 | .complete = bcmgenet_complete, | |
1c1008c7 FF |
1043 | .get_strings = bcmgenet_get_strings, |
1044 | .get_sset_count = bcmgenet_get_sset_count, | |
1045 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, | |
1c1008c7 FF |
1046 | .get_drvinfo = bcmgenet_get_drvinfo, |
1047 | .get_link = ethtool_op_get_link, | |
1048 | .get_msglevel = bcmgenet_get_msglevel, | |
1049 | .set_msglevel = bcmgenet_set_msglevel, | |
06ba8375 FF |
1050 | .get_wol = bcmgenet_get_wol, |
1051 | .set_wol = bcmgenet_set_wol, | |
6ef398ea FF |
1052 | .get_eee = bcmgenet_get_eee, |
1053 | .set_eee = bcmgenet_set_eee, | |
016e770d | 1054 | .nway_reset = phy_ethtool_nway_reset, |
2f913070 FF |
1055 | .get_coalesce = bcmgenet_get_coalesce, |
1056 | .set_coalesce = bcmgenet_set_coalesce, | |
fa92bf04 PR |
1057 | .get_link_ksettings = bcmgenet_get_link_ksettings, |
1058 | .set_link_ksettings = bcmgenet_set_link_ksettings, | |
1c1008c7 FF |
1059 | }; |
1060 | ||
1061 | /* Power down the unimac, based on mode. */ | |
ca8cf341 | 1062 | static int bcmgenet_power_down(struct bcmgenet_priv *priv, |
1c1008c7 FF |
1063 | enum bcmgenet_power_mode mode) |
1064 | { | |
ca8cf341 | 1065 | int ret = 0; |
1c1008c7 FF |
1066 | u32 reg; |
1067 | ||
1068 | switch (mode) { | |
1069 | case GENET_POWER_CABLE_SENSE: | |
0299b6ac | 1070 | phy_detach(priv->phydev); |
1c1008c7 FF |
1071 | break; |
1072 | ||
c3ae64ae | 1073 | case GENET_POWER_WOL_MAGIC: |
ca8cf341 | 1074 | ret = bcmgenet_wol_power_down_cfg(priv, mode); |
c3ae64ae FF |
1075 | break; |
1076 | ||
1c1008c7 FF |
1077 | case GENET_POWER_PASSIVE: |
1078 | /* Power down LED */ | |
1c1008c7 FF |
1079 | if (priv->hw_params->flags & GENET_HAS_EXT) { |
1080 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
1081 | reg |= (EXT_PWR_DOWN_PHY | | |
1082 | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); | |
1083 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
a642c4f7 FF |
1084 | |
1085 | bcmgenet_phy_power_set(priv->dev, false); | |
1c1008c7 FF |
1086 | } |
1087 | break; | |
1088 | default: | |
1089 | break; | |
1090 | } | |
ca8cf341 FF |
1091 | |
1092 | return 0; | |
1c1008c7 FF |
1093 | } |
1094 | ||
1095 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | |
c91b7f66 | 1096 | enum bcmgenet_power_mode mode) |
1c1008c7 FF |
1097 | { |
1098 | u32 reg; | |
1099 | ||
1100 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) | |
1101 | return; | |
1102 | ||
1103 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
1104 | ||
1105 | switch (mode) { | |
1106 | case GENET_POWER_PASSIVE: | |
1107 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | | |
1108 | EXT_PWR_DOWN_BIAS); | |
1109 | /* fallthrough */ | |
1110 | case GENET_POWER_CABLE_SENSE: | |
1111 | /* enable APD */ | |
1112 | reg |= EXT_PWR_DN_EN_LD; | |
1113 | break; | |
c3ae64ae FF |
1114 | case GENET_POWER_WOL_MAGIC: |
1115 | bcmgenet_wol_power_up_cfg(priv, mode); | |
1116 | return; | |
1c1008c7 FF |
1117 | default: |
1118 | break; | |
1119 | } | |
1120 | ||
1121 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
5dbebbb4 | 1122 | if (mode == GENET_POWER_PASSIVE) { |
bd4060a6 | 1123 | bcmgenet_phy_power_set(priv->dev, true); |
5dbebbb4 FF |
1124 | bcmgenet_mii_reset(priv->dev); |
1125 | } | |
1c1008c7 FF |
1126 | } |
1127 | ||
1128 | /* ioctl handle special commands that are not present in ethtool. */ | |
1129 | static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1130 | { | |
0299b6ac | 1131 | struct bcmgenet_priv *priv = netdev_priv(dev); |
1c1008c7 FF |
1132 | int val = 0; |
1133 | ||
1134 | if (!netif_running(dev)) | |
1135 | return -EINVAL; | |
1136 | ||
1137 | switch (cmd) { | |
1138 | case SIOCGMIIPHY: | |
1139 | case SIOCGMIIREG: | |
1140 | case SIOCSMIIREG: | |
0299b6ac | 1141 | if (!priv->phydev) |
1c1008c7 FF |
1142 | val = -ENODEV; |
1143 | else | |
0299b6ac | 1144 | val = phy_mii_ioctl(priv->phydev, rq, cmd); |
1c1008c7 FF |
1145 | break; |
1146 | ||
1147 | default: | |
1148 | val = -EINVAL; | |
1149 | break; | |
1150 | } | |
1151 | ||
1152 | return val; | |
1153 | } | |
1154 | ||
1155 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | |
1156 | struct bcmgenet_tx_ring *ring) | |
1157 | { | |
1158 | struct enet_cb *tx_cb_ptr; | |
1159 | ||
1160 | tx_cb_ptr = ring->cbs; | |
1161 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | |
014012a4 | 1162 | |
1c1008c7 FF |
1163 | /* Advancing local write pointer */ |
1164 | if (ring->write_ptr == ring->end_ptr) | |
1165 | ring->write_ptr = ring->cb_ptr; | |
1166 | else | |
1167 | ring->write_ptr++; | |
1168 | ||
1169 | return tx_cb_ptr; | |
1170 | } | |
1171 | ||
1172 | /* Simple helper to free a control block's resources */ | |
1173 | static void bcmgenet_free_cb(struct enet_cb *cb) | |
1174 | { | |
1175 | dev_kfree_skb_any(cb->skb); | |
1176 | cb->skb = NULL; | |
1177 | dma_unmap_addr_set(cb, dma_addr, 0); | |
1178 | } | |
1179 | ||
4055eaef PG |
1180 | static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) |
1181 | { | |
ee7d8c20 | 1182 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, |
4055eaef PG |
1183 | INTRL2_CPU_MASK_SET); |
1184 | } | |
1185 | ||
1186 | static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) | |
1187 | { | |
ee7d8c20 | 1188 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, |
4055eaef PG |
1189 | INTRL2_CPU_MASK_CLEAR); |
1190 | } | |
1191 | ||
1192 | static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) | |
1193 | { | |
1194 | bcmgenet_intrl2_1_writel(ring->priv, | |
1195 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | |
1196 | INTRL2_CPU_MASK_SET); | |
1197 | } | |
1198 | ||
1199 | static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) | |
1200 | { | |
1201 | bcmgenet_intrl2_1_writel(ring->priv, | |
1202 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | |
1203 | INTRL2_CPU_MASK_CLEAR); | |
1204 | } | |
1205 | ||
9dbac28f | 1206 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1207 | { |
ee7d8c20 | 1208 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, |
c91b7f66 | 1209 | INTRL2_CPU_MASK_SET); |
1c1008c7 FF |
1210 | } |
1211 | ||
9dbac28f | 1212 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1213 | { |
ee7d8c20 | 1214 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, |
c91b7f66 | 1215 | INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1216 | } |
1217 | ||
9dbac28f | 1218 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1219 | { |
9dbac28f | 1220 | bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, |
c91b7f66 | 1221 | INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1222 | } |
1223 | ||
9dbac28f | 1224 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1225 | { |
9dbac28f | 1226 | bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, |
c91b7f66 | 1227 | INTRL2_CPU_MASK_SET); |
1c1008c7 FF |
1228 | } |
1229 | ||
1230 | /* Unlocked version of the reclaim routine */ | |
4092e6ac JS |
1231 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
1232 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1233 | { |
1234 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
8c4799ac | 1235 | struct device *kdev = &priv->pdev->dev; |
1c1008c7 | 1236 | struct enet_cb *tx_cb_ptr; |
4092e6ac | 1237 | unsigned int pkts_compl = 0; |
55868120 | 1238 | unsigned int bytes_compl = 0; |
1c1008c7 | 1239 | unsigned int c_index; |
66d06757 PG |
1240 | unsigned int txbds_ready; |
1241 | unsigned int txbds_processed = 0; | |
1c1008c7 | 1242 | |
7fc527f9 | 1243 | /* Compute how many buffers are transmitted since last xmit call */ |
1c1008c7 | 1244 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); |
66d06757 | 1245 | c_index &= DMA_C_INDEX_MASK; |
1c1008c7 | 1246 | |
66d06757 PG |
1247 | if (likely(c_index >= ring->c_index)) |
1248 | txbds_ready = c_index - ring->c_index; | |
1c1008c7 | 1249 | else |
66d06757 | 1250 | txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; |
1c1008c7 FF |
1251 | |
1252 | netif_dbg(priv, tx_done, dev, | |
66d06757 PG |
1253 | "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", |
1254 | __func__, ring->index, ring->c_index, c_index, txbds_ready); | |
1c1008c7 FF |
1255 | |
1256 | /* Reclaim transmitted buffers */ | |
66d06757 PG |
1257 | while (txbds_processed < txbds_ready) { |
1258 | tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; | |
1c1008c7 | 1259 | if (tx_cb_ptr->skb) { |
4092e6ac | 1260 | pkts_compl++; |
55868120 | 1261 | bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; |
8c4799ac | 1262 | dma_unmap_single(kdev, |
c91b7f66 | 1263 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
eee57723 | 1264 | dma_unmap_len(tx_cb_ptr, dma_len), |
c91b7f66 | 1265 | DMA_TO_DEVICE); |
1c1008c7 FF |
1266 | bcmgenet_free_cb(tx_cb_ptr); |
1267 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | |
8c4799ac | 1268 | dma_unmap_page(kdev, |
c91b7f66 FF |
1269 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1270 | dma_unmap_len(tx_cb_ptr, dma_len), | |
1271 | DMA_TO_DEVICE); | |
1c1008c7 FF |
1272 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
1273 | } | |
1c1008c7 | 1274 | |
66d06757 PG |
1275 | txbds_processed++; |
1276 | if (likely(ring->clean_ptr < ring->end_ptr)) | |
1277 | ring->clean_ptr++; | |
1278 | else | |
1279 | ring->clean_ptr = ring->cb_ptr; | |
1c1008c7 FF |
1280 | } |
1281 | ||
66d06757 PG |
1282 | ring->free_bds += txbds_processed; |
1283 | ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; | |
1284 | ||
55868120 PG |
1285 | dev->stats.tx_packets += pkts_compl; |
1286 | dev->stats.tx_bytes += bytes_compl; | |
1287 | ||
6d22fe14 DB |
1288 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), |
1289 | pkts_compl, bytes_compl); | |
1c1008c7 | 1290 | |
4092e6ac | 1291 | return pkts_compl; |
1c1008c7 FF |
1292 | } |
1293 | ||
4092e6ac | 1294 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
c91b7f66 | 1295 | struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1296 | { |
4092e6ac | 1297 | unsigned int released; |
1c1008c7 FF |
1298 | unsigned long flags; |
1299 | ||
1300 | spin_lock_irqsave(&ring->lock, flags); | |
4092e6ac | 1301 | released = __bcmgenet_tx_reclaim(dev, ring); |
1c1008c7 | 1302 | spin_unlock_irqrestore(&ring->lock, flags); |
4092e6ac JS |
1303 | |
1304 | return released; | |
1305 | } | |
1306 | ||
1307 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | |
1308 | { | |
1309 | struct bcmgenet_tx_ring *ring = | |
1310 | container_of(napi, struct bcmgenet_tx_ring, napi); | |
1311 | unsigned int work_done = 0; | |
6d22fe14 DB |
1312 | struct netdev_queue *txq; |
1313 | unsigned long flags; | |
4092e6ac | 1314 | |
6d22fe14 DB |
1315 | spin_lock_irqsave(&ring->lock, flags); |
1316 | work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); | |
1317 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { | |
1318 | txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); | |
1319 | netif_tx_wake_queue(txq); | |
1320 | } | |
1321 | spin_unlock_irqrestore(&ring->lock, flags); | |
4092e6ac JS |
1322 | |
1323 | if (work_done == 0) { | |
1324 | napi_complete(napi); | |
9dbac28f | 1325 | ring->int_enable(ring); |
4092e6ac JS |
1326 | |
1327 | return 0; | |
1328 | } | |
1329 | ||
1330 | return budget; | |
1c1008c7 FF |
1331 | } |
1332 | ||
1333 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | |
1334 | { | |
1335 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1336 | int i; | |
1337 | ||
1338 | if (netif_is_multiqueue(dev)) { | |
1339 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
1340 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); | |
1341 | } | |
1342 | ||
1343 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); | |
1344 | } | |
1345 | ||
1346 | /* Transmits a single SKB (either head of a fragment or a single SKB) | |
1347 | * caller must hold priv->lock | |
1348 | */ | |
1349 | static int bcmgenet_xmit_single(struct net_device *dev, | |
1350 | struct sk_buff *skb, | |
1351 | u16 dma_desc_flags, | |
1352 | struct bcmgenet_tx_ring *ring) | |
1353 | { | |
1354 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1355 | struct device *kdev = &priv->pdev->dev; | |
1356 | struct enet_cb *tx_cb_ptr; | |
1357 | unsigned int skb_len; | |
1358 | dma_addr_t mapping; | |
1359 | u32 length_status; | |
1360 | int ret; | |
1361 | ||
1362 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1363 | ||
1364 | if (unlikely(!tx_cb_ptr)) | |
1365 | BUG(); | |
1366 | ||
1367 | tx_cb_ptr->skb = skb; | |
1368 | ||
7dd39913 | 1369 | skb_len = skb_headlen(skb); |
1c1008c7 FF |
1370 | |
1371 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | |
1372 | ret = dma_mapping_error(kdev, mapping); | |
1373 | if (ret) { | |
44c8bc3c | 1374 | priv->mib.tx_dma_failed++; |
1c1008c7 FF |
1375 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); |
1376 | dev_kfree_skb(skb); | |
1377 | return ret; | |
1378 | } | |
1379 | ||
1380 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
eee57723 | 1381 | dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); |
1c1008c7 FF |
1382 | length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
1383 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | | |
1384 | DMA_TX_APPEND_CRC; | |
1385 | ||
1386 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
1387 | length_status |= DMA_TX_DO_CSUM; | |
1388 | ||
1389 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); | |
1390 | ||
1c1008c7 FF |
1391 | return 0; |
1392 | } | |
1393 | ||
7fc527f9 | 1394 | /* Transmit a SKB fragment */ |
1c1008c7 | 1395 | static int bcmgenet_xmit_frag(struct net_device *dev, |
c91b7f66 FF |
1396 | skb_frag_t *frag, |
1397 | u16 dma_desc_flags, | |
1398 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1399 | { |
1400 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1401 | struct device *kdev = &priv->pdev->dev; | |
1402 | struct enet_cb *tx_cb_ptr; | |
824ba603 | 1403 | unsigned int frag_size; |
1c1008c7 FF |
1404 | dma_addr_t mapping; |
1405 | int ret; | |
1406 | ||
1407 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1408 | ||
1409 | if (unlikely(!tx_cb_ptr)) | |
1410 | BUG(); | |
824ba603 | 1411 | |
1c1008c7 FF |
1412 | tx_cb_ptr->skb = NULL; |
1413 | ||
824ba603 PG |
1414 | frag_size = skb_frag_size(frag); |
1415 | ||
1416 | mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE); | |
1c1008c7 FF |
1417 | ret = dma_mapping_error(kdev, mapping); |
1418 | if (ret) { | |
44c8bc3c | 1419 | priv->mib.tx_dma_failed++; |
1c1008c7 | 1420 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", |
c91b7f66 | 1421 | __func__); |
1c1008c7 FF |
1422 | return ret; |
1423 | } | |
1424 | ||
1425 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
824ba603 | 1426 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size); |
1c1008c7 FF |
1427 | |
1428 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | |
824ba603 | 1429 | (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
c91b7f66 | 1430 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); |
1c1008c7 | 1431 | |
1c1008c7 FF |
1432 | return 0; |
1433 | } | |
1434 | ||
1435 | /* Reallocate the SKB to put enough headroom in front of it and insert | |
1436 | * the transmit checksum offsets in the descriptors | |
1437 | */ | |
bc23333b PG |
1438 | static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, |
1439 | struct sk_buff *skb) | |
1c1008c7 FF |
1440 | { |
1441 | struct status_64 *status = NULL; | |
1442 | struct sk_buff *new_skb; | |
1443 | u16 offset; | |
1444 | u8 ip_proto; | |
1445 | u16 ip_ver; | |
1446 | u32 tx_csum_info; | |
1447 | ||
1448 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { | |
1449 | /* If 64 byte status block enabled, must make sure skb has | |
1450 | * enough headroom for us to insert 64B status block. | |
1451 | */ | |
1452 | new_skb = skb_realloc_headroom(skb, sizeof(*status)); | |
1453 | dev_kfree_skb(skb); | |
1454 | if (!new_skb) { | |
1c1008c7 | 1455 | dev->stats.tx_dropped++; |
bc23333b | 1456 | return NULL; |
1c1008c7 FF |
1457 | } |
1458 | skb = new_skb; | |
1459 | } | |
1460 | ||
1461 | skb_push(skb, sizeof(*status)); | |
1462 | status = (struct status_64 *)skb->data; | |
1463 | ||
1464 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1465 | ip_ver = htons(skb->protocol); | |
1466 | switch (ip_ver) { | |
1467 | case ETH_P_IP: | |
1468 | ip_proto = ip_hdr(skb)->protocol; | |
1469 | break; | |
1470 | case ETH_P_IPV6: | |
1471 | ip_proto = ipv6_hdr(skb)->nexthdr; | |
1472 | break; | |
1473 | default: | |
bc23333b | 1474 | return skb; |
1c1008c7 FF |
1475 | } |
1476 | ||
1477 | offset = skb_checksum_start_offset(skb) - sizeof(*status); | |
1478 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | | |
1479 | (offset + skb->csum_offset); | |
1480 | ||
1481 | /* Set the length valid bit for TCP and UDP and just set | |
1482 | * the special UDP flag for IPv4, else just set to 0. | |
1483 | */ | |
1484 | if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { | |
1485 | tx_csum_info |= STATUS_TX_CSUM_LV; | |
1486 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | |
1487 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | |
8900ea57 | 1488 | } else { |
1c1008c7 | 1489 | tx_csum_info = 0; |
8900ea57 | 1490 | } |
1c1008c7 FF |
1491 | |
1492 | status->tx_csum_info = tx_csum_info; | |
1493 | } | |
1494 | ||
bc23333b | 1495 | return skb; |
1c1008c7 FF |
1496 | } |
1497 | ||
1498 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |
1499 | { | |
1500 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1501 | struct bcmgenet_tx_ring *ring = NULL; | |
b2cde2cc | 1502 | struct netdev_queue *txq; |
1c1008c7 FF |
1503 | unsigned long flags = 0; |
1504 | int nr_frags, index; | |
1505 | u16 dma_desc_flags; | |
1506 | int ret; | |
1507 | int i; | |
1508 | ||
1509 | index = skb_get_queue_mapping(skb); | |
1510 | /* Mapping strategy: | |
1511 | * queue_mapping = 0, unclassified, packet xmited through ring16 | |
1512 | * queue_mapping = 1, goes to ring 0. (highest priority queue | |
1513 | * queue_mapping = 2, goes to ring 1. | |
1514 | * queue_mapping = 3, goes to ring 2. | |
1515 | * queue_mapping = 4, goes to ring 3. | |
1516 | */ | |
1517 | if (index == 0) | |
1518 | index = DESC_INDEX; | |
1519 | else | |
1520 | index -= 1; | |
1521 | ||
1c1008c7 | 1522 | ring = &priv->tx_rings[index]; |
b2cde2cc | 1523 | txq = netdev_get_tx_queue(dev, ring->queue); |
1c1008c7 | 1524 | |
f5a9ec20 PG |
1525 | nr_frags = skb_shinfo(skb)->nr_frags; |
1526 | ||
1c1008c7 | 1527 | spin_lock_irqsave(&ring->lock, flags); |
f5a9ec20 PG |
1528 | if (ring->free_bds <= (nr_frags + 1)) { |
1529 | if (!netif_tx_queue_stopped(txq)) { | |
1530 | netif_tx_stop_queue(txq); | |
1531 | netdev_err(dev, | |
1532 | "%s: tx ring %d full when queue %d awake\n", | |
1533 | __func__, index, ring->queue); | |
1534 | } | |
1c1008c7 FF |
1535 | ret = NETDEV_TX_BUSY; |
1536 | goto out; | |
1537 | } | |
1538 | ||
474ea9ca FF |
1539 | if (skb_padto(skb, ETH_ZLEN)) { |
1540 | ret = NETDEV_TX_OK; | |
1541 | goto out; | |
1542 | } | |
1543 | ||
55868120 PG |
1544 | /* Retain how many bytes will be sent on the wire, without TSB inserted |
1545 | * by transmit checksum offload | |
1546 | */ | |
1547 | GENET_CB(skb)->bytes_sent = skb->len; | |
1548 | ||
1c1008c7 FF |
1549 | /* set the SKB transmit checksum */ |
1550 | if (priv->desc_64b_en) { | |
bc23333b PG |
1551 | skb = bcmgenet_put_tx_csum(dev, skb); |
1552 | if (!skb) { | |
1c1008c7 FF |
1553 | ret = NETDEV_TX_OK; |
1554 | goto out; | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | dma_desc_flags = DMA_SOP; | |
1559 | if (nr_frags == 0) | |
1560 | dma_desc_flags |= DMA_EOP; | |
1561 | ||
1562 | /* Transmit single SKB or head of fragment list */ | |
1563 | ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); | |
1564 | if (ret) { | |
1565 | ret = NETDEV_TX_OK; | |
1566 | goto out; | |
1567 | } | |
1568 | ||
1569 | /* xmit fragment */ | |
1570 | for (i = 0; i < nr_frags; i++) { | |
1571 | ret = bcmgenet_xmit_frag(dev, | |
c91b7f66 FF |
1572 | &skb_shinfo(skb)->frags[i], |
1573 | (i == nr_frags - 1) ? DMA_EOP : 0, | |
1574 | ring); | |
1c1008c7 FF |
1575 | if (ret) { |
1576 | ret = NETDEV_TX_OK; | |
1577 | goto out; | |
1578 | } | |
1579 | } | |
1580 | ||
d03825fb FF |
1581 | skb_tx_timestamp(skb); |
1582 | ||
ae67bf01 FF |
1583 | /* Decrement total BD count and advance our write pointer */ |
1584 | ring->free_bds -= nr_frags + 1; | |
1585 | ring->prod_index += nr_frags + 1; | |
1586 | ring->prod_index &= DMA_P_INDEX_MASK; | |
1587 | ||
e178c8c2 PG |
1588 | netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); |
1589 | ||
4092e6ac | 1590 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
b2cde2cc | 1591 | netif_tx_stop_queue(txq); |
1c1008c7 | 1592 | |
ddd0ca5d FF |
1593 | if (!skb->xmit_more || netif_xmit_stopped(txq)) |
1594 | /* Packets are ready, update producer index */ | |
1595 | bcmgenet_tdma_ring_writel(priv, ring->index, | |
1596 | ring->prod_index, TDMA_PROD_INDEX); | |
1c1008c7 FF |
1597 | out: |
1598 | spin_unlock_irqrestore(&ring->lock, flags); | |
1599 | ||
1600 | return ret; | |
1601 | } | |
1602 | ||
d6707bec PG |
1603 | static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, |
1604 | struct enet_cb *cb) | |
1c1008c7 FF |
1605 | { |
1606 | struct device *kdev = &priv->pdev->dev; | |
1607 | struct sk_buff *skb; | |
d6707bec | 1608 | struct sk_buff *rx_skb; |
1c1008c7 | 1609 | dma_addr_t mapping; |
1c1008c7 | 1610 | |
d6707bec | 1611 | /* Allocate a new Rx skb */ |
c91b7f66 | 1612 | skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); |
d6707bec PG |
1613 | if (!skb) { |
1614 | priv->mib.alloc_rx_buff_failed++; | |
1615 | netif_err(priv, rx_err, priv->dev, | |
1616 | "%s: Rx skb allocation failed\n", __func__); | |
1617 | return NULL; | |
1618 | } | |
1c1008c7 | 1619 | |
d6707bec PG |
1620 | /* DMA-map the new Rx skb */ |
1621 | mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, | |
1622 | DMA_FROM_DEVICE); | |
1623 | if (dma_mapping_error(kdev, mapping)) { | |
44c8bc3c | 1624 | priv->mib.rx_dma_failed++; |
d6707bec | 1625 | dev_kfree_skb_any(skb); |
1c1008c7 | 1626 | netif_err(priv, rx_err, priv->dev, |
d6707bec PG |
1627 | "%s: Rx skb DMA mapping failed\n", __func__); |
1628 | return NULL; | |
1c1008c7 FF |
1629 | } |
1630 | ||
d6707bec PG |
1631 | /* Grab the current Rx skb from the ring and DMA-unmap it */ |
1632 | rx_skb = cb->skb; | |
1633 | if (likely(rx_skb)) | |
1634 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | |
1635 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1636 | ||
1637 | /* Put the new Rx skb on the ring */ | |
1638 | cb->skb = skb; | |
1c1008c7 | 1639 | dma_unmap_addr_set(cb, dma_addr, mapping); |
8ac467e8 | 1640 | dmadesc_set_addr(priv, cb->bd_addr, mapping); |
1c1008c7 | 1641 | |
d6707bec PG |
1642 | /* Return the current Rx skb to caller */ |
1643 | return rx_skb; | |
1c1008c7 FF |
1644 | } |
1645 | ||
1646 | /* bcmgenet_desc_rx - descriptor based rx process. | |
1647 | * this could be called from bottom half, or from NAPI polling method. | |
1648 | */ | |
4055eaef | 1649 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, |
1c1008c7 FF |
1650 | unsigned int budget) |
1651 | { | |
4055eaef | 1652 | struct bcmgenet_priv *priv = ring->priv; |
1c1008c7 FF |
1653 | struct net_device *dev = priv->dev; |
1654 | struct enet_cb *cb; | |
1655 | struct sk_buff *skb; | |
1656 | u32 dma_length_status; | |
1657 | unsigned long dma_flag; | |
d6707bec | 1658 | int len; |
1c1008c7 FF |
1659 | unsigned int rxpktprocessed = 0, rxpkttoprocess; |
1660 | unsigned int p_index; | |
d26ea6cc | 1661 | unsigned int discards; |
1c1008c7 FF |
1662 | unsigned int chksum_ok = 0; |
1663 | ||
4055eaef | 1664 | p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); |
d26ea6cc PG |
1665 | |
1666 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & | |
1667 | DMA_P_INDEX_DISCARD_CNT_MASK; | |
1668 | if (discards > ring->old_discards) { | |
1669 | discards = discards - ring->old_discards; | |
1670 | dev->stats.rx_missed_errors += discards; | |
1671 | dev->stats.rx_errors += discards; | |
1672 | ring->old_discards += discards; | |
1673 | ||
1674 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ | |
1675 | if (ring->old_discards >= 0xC000) { | |
1676 | ring->old_discards = 0; | |
4055eaef | 1677 | bcmgenet_rdma_ring_writel(priv, ring->index, 0, |
d26ea6cc PG |
1678 | RDMA_PROD_INDEX); |
1679 | } | |
1680 | } | |
1681 | ||
1c1008c7 FF |
1682 | p_index &= DMA_P_INDEX_MASK; |
1683 | ||
8ac467e8 PG |
1684 | if (likely(p_index >= ring->c_index)) |
1685 | rxpkttoprocess = p_index - ring->c_index; | |
1c1008c7 | 1686 | else |
8ac467e8 PG |
1687 | rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + |
1688 | p_index; | |
1c1008c7 FF |
1689 | |
1690 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1691 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); |
1c1008c7 FF |
1692 | |
1693 | while ((rxpktprocessed < rxpkttoprocess) && | |
c91b7f66 | 1694 | (rxpktprocessed < budget)) { |
8ac467e8 | 1695 | cb = &priv->rx_cbs[ring->read_ptr]; |
d6707bec | 1696 | skb = bcmgenet_rx_refill(priv, cb); |
b629be5c | 1697 | |
b629be5c FF |
1698 | if (unlikely(!skb)) { |
1699 | dev->stats.rx_dropped++; | |
d6707bec | 1700 | goto next; |
b629be5c FF |
1701 | } |
1702 | ||
1c1008c7 | 1703 | if (!priv->desc_64b_en) { |
c91b7f66 | 1704 | dma_length_status = |
8ac467e8 | 1705 | dmadesc_get_length_status(priv, cb->bd_addr); |
1c1008c7 FF |
1706 | } else { |
1707 | struct status_64 *status; | |
164d4f20 | 1708 | |
1c1008c7 FF |
1709 | status = (struct status_64 *)skb->data; |
1710 | dma_length_status = status->length_status; | |
1711 | } | |
1712 | ||
1713 | /* DMA flags and length are still valid no matter how | |
1714 | * we got the Receive Status Vector (64B RSB or register) | |
1715 | */ | |
1716 | dma_flag = dma_length_status & 0xffff; | |
1717 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | |
1718 | ||
1719 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1720 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", |
8ac467e8 PG |
1721 | __func__, p_index, ring->c_index, |
1722 | ring->read_ptr, dma_length_status); | |
1c1008c7 | 1723 | |
1c1008c7 FF |
1724 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1725 | netif_err(priv, rx_status, dev, | |
c91b7f66 | 1726 | "dropping fragmented packet!\n"); |
1c1008c7 | 1727 | dev->stats.rx_errors++; |
d6707bec PG |
1728 | dev_kfree_skb_any(skb); |
1729 | goto next; | |
1c1008c7 | 1730 | } |
d6707bec | 1731 | |
1c1008c7 FF |
1732 | /* report errors */ |
1733 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | | |
1734 | DMA_RX_OV | | |
1735 | DMA_RX_NO | | |
1736 | DMA_RX_LG | | |
1737 | DMA_RX_RXER))) { | |
1738 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | |
c91b7f66 | 1739 | (unsigned int)dma_flag); |
1c1008c7 FF |
1740 | if (dma_flag & DMA_RX_CRC_ERROR) |
1741 | dev->stats.rx_crc_errors++; | |
1742 | if (dma_flag & DMA_RX_OV) | |
1743 | dev->stats.rx_over_errors++; | |
1744 | if (dma_flag & DMA_RX_NO) | |
1745 | dev->stats.rx_frame_errors++; | |
1746 | if (dma_flag & DMA_RX_LG) | |
1747 | dev->stats.rx_length_errors++; | |
1c1008c7 | 1748 | dev->stats.rx_errors++; |
d6707bec PG |
1749 | dev_kfree_skb_any(skb); |
1750 | goto next; | |
1c1008c7 FF |
1751 | } /* error packet */ |
1752 | ||
1753 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | |
c91b7f66 | 1754 | priv->desc_rxchk_en; |
1c1008c7 FF |
1755 | |
1756 | skb_put(skb, len); | |
1757 | if (priv->desc_64b_en) { | |
1758 | skb_pull(skb, 64); | |
1759 | len -= 64; | |
1760 | } | |
1761 | ||
1762 | if (likely(chksum_ok)) | |
1763 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1764 | ||
1765 | /* remove hardware 2bytes added for IP alignment */ | |
1766 | skb_pull(skb, 2); | |
1767 | len -= 2; | |
1768 | ||
1769 | if (priv->crc_fwd_en) { | |
1770 | skb_trim(skb, len - ETH_FCS_LEN); | |
1771 | len -= ETH_FCS_LEN; | |
1772 | } | |
1773 | ||
1774 | /*Finish setting up the received SKB and send it to the kernel*/ | |
1775 | skb->protocol = eth_type_trans(skb, priv->dev); | |
1776 | dev->stats.rx_packets++; | |
1777 | dev->stats.rx_bytes += len; | |
1778 | if (dma_flag & DMA_RX_MULT) | |
1779 | dev->stats.multicast++; | |
1780 | ||
1781 | /* Notify kernel */ | |
4055eaef | 1782 | napi_gro_receive(&ring->napi, skb); |
1c1008c7 FF |
1783 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); |
1784 | ||
d6707bec | 1785 | next: |
cf377d88 | 1786 | rxpktprocessed++; |
8ac467e8 PG |
1787 | if (likely(ring->read_ptr < ring->end_ptr)) |
1788 | ring->read_ptr++; | |
1789 | else | |
1790 | ring->read_ptr = ring->cb_ptr; | |
1791 | ||
1792 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; | |
4055eaef | 1793 | bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); |
1c1008c7 FF |
1794 | } |
1795 | ||
1796 | return rxpktprocessed; | |
1797 | } | |
1798 | ||
3ab11339 PG |
1799 | /* Rx NAPI polling method */ |
1800 | static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) | |
1801 | { | |
4055eaef PG |
1802 | struct bcmgenet_rx_ring *ring = container_of(napi, |
1803 | struct bcmgenet_rx_ring, napi); | |
3ab11339 PG |
1804 | unsigned int work_done; |
1805 | ||
4055eaef | 1806 | work_done = bcmgenet_desc_rx(ring, budget); |
3ab11339 PG |
1807 | |
1808 | if (work_done < budget) { | |
eb96ce01 | 1809 | napi_complete_done(napi, work_done); |
4055eaef | 1810 | ring->int_enable(ring); |
3ab11339 PG |
1811 | } |
1812 | ||
1813 | return work_done; | |
1814 | } | |
1815 | ||
1c1008c7 | 1816 | /* Assign skb to RX DMA descriptor. */ |
8ac467e8 PG |
1817 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, |
1818 | struct bcmgenet_rx_ring *ring) | |
1c1008c7 FF |
1819 | { |
1820 | struct enet_cb *cb; | |
d6707bec | 1821 | struct sk_buff *skb; |
1c1008c7 FF |
1822 | int i; |
1823 | ||
8ac467e8 | 1824 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 FF |
1825 | |
1826 | /* loop here for each buffer needing assign */ | |
8ac467e8 PG |
1827 | for (i = 0; i < ring->size; i++) { |
1828 | cb = ring->cbs + i; | |
d6707bec PG |
1829 | skb = bcmgenet_rx_refill(priv, cb); |
1830 | if (skb) | |
1831 | dev_kfree_skb_any(skb); | |
1832 | if (!cb->skb) | |
1833 | return -ENOMEM; | |
1c1008c7 FF |
1834 | } |
1835 | ||
d6707bec | 1836 | return 0; |
1c1008c7 FF |
1837 | } |
1838 | ||
1839 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |
1840 | { | |
8c4799ac | 1841 | struct device *kdev = &priv->pdev->dev; |
1c1008c7 FF |
1842 | struct enet_cb *cb; |
1843 | int i; | |
1844 | ||
1845 | for (i = 0; i < priv->num_rx_bds; i++) { | |
1846 | cb = &priv->rx_cbs[i]; | |
1847 | ||
1848 | if (dma_unmap_addr(cb, dma_addr)) { | |
8c4799ac | 1849 | dma_unmap_single(kdev, |
c91b7f66 FF |
1850 | dma_unmap_addr(cb, dma_addr), |
1851 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1c1008c7 FF |
1852 | dma_unmap_addr_set(cb, dma_addr, 0); |
1853 | } | |
1854 | ||
1855 | if (cb->skb) | |
1856 | bcmgenet_free_cb(cb); | |
1857 | } | |
1858 | } | |
1859 | ||
c91b7f66 | 1860 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) |
e29585b8 FF |
1861 | { |
1862 | u32 reg; | |
1863 | ||
1864 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1865 | if (enable) | |
1866 | reg |= mask; | |
1867 | else | |
1868 | reg &= ~mask; | |
1869 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
1870 | ||
1871 | /* UniMAC stops on a packet boundary, wait for a full-size packet | |
1872 | * to be processed | |
1873 | */ | |
1874 | if (enable == 0) | |
1875 | usleep_range(1000, 2000); | |
1876 | } | |
1877 | ||
1c1008c7 FF |
1878 | static int reset_umac(struct bcmgenet_priv *priv) |
1879 | { | |
1880 | struct device *kdev = &priv->pdev->dev; | |
1881 | unsigned int timeout = 0; | |
1882 | u32 reg; | |
1883 | ||
1884 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ | |
1885 | bcmgenet_rbuf_ctrl_set(priv, 0); | |
1886 | udelay(10); | |
1887 | ||
1888 | /* disable MAC while updating its registers */ | |
1889 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1890 | ||
1891 | /* issue soft reset, wait for it to complete */ | |
1892 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); | |
1893 | while (timeout++ < 1000) { | |
1894 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1895 | if (!(reg & CMD_SW_RESET)) | |
1896 | return 0; | |
1897 | ||
1898 | udelay(1); | |
1899 | } | |
1900 | ||
1901 | if (timeout == 1000) { | |
1902 | dev_err(kdev, | |
7fc527f9 | 1903 | "timeout waiting for MAC to come out of reset\n"); |
1c1008c7 FF |
1904 | return -ETIMEDOUT; |
1905 | } | |
1906 | ||
1907 | return 0; | |
1908 | } | |
1909 | ||
909ff5ef FF |
1910 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) |
1911 | { | |
1912 | /* Mask all interrupts.*/ | |
1913 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1914 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1915 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1916 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1917 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1918 | bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1919 | } | |
1920 | ||
37850e37 FF |
1921 | static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) |
1922 | { | |
1923 | u32 int0_enable = 0; | |
1924 | ||
1925 | /* Monitor cable plug/unplugged event for internal PHY, external PHY | |
1926 | * and MoCA PHY | |
1927 | */ | |
1928 | if (priv->internal_phy) { | |
1929 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1930 | } else if (priv->ext_phy) { | |
1931 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1932 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | |
1933 | if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) | |
1934 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1935 | } | |
1936 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); | |
1937 | } | |
1938 | ||
1c1008c7 FF |
1939 | static int init_umac(struct bcmgenet_priv *priv) |
1940 | { | |
1941 | struct device *kdev = &priv->pdev->dev; | |
1942 | int ret; | |
b2e97eca PG |
1943 | u32 reg; |
1944 | u32 int0_enable = 0; | |
1945 | u32 int1_enable = 0; | |
1946 | int i; | |
1c1008c7 FF |
1947 | |
1948 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | |
1949 | ||
1950 | ret = reset_umac(priv); | |
1951 | if (ret) | |
1952 | return ret; | |
1953 | ||
1954 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1955 | /* clear tx/rx counter */ | |
1956 | bcmgenet_umac_writel(priv, | |
c91b7f66 FF |
1957 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
1958 | UMAC_MIB_CTRL); | |
1c1008c7 FF |
1959 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); |
1960 | ||
1961 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | |
1962 | ||
1963 | /* init rx registers, enable ip header optimization */ | |
1964 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
1965 | reg |= RBUF_ALIGN_2B; | |
1966 | bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); | |
1967 | ||
1968 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | |
1969 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | |
1970 | ||
909ff5ef | 1971 | bcmgenet_intr_disable(priv); |
1c1008c7 | 1972 | |
b2e97eca | 1973 | /* Enable Rx default queue 16 interrupts */ |
ee7d8c20 | 1974 | int0_enable |= UMAC_IRQ_RXDMA_DONE; |
1c1008c7 | 1975 | |
b2e97eca | 1976 | /* Enable Tx default queue 16 interrupts */ |
ee7d8c20 | 1977 | int0_enable |= UMAC_IRQ_TXDMA_DONE; |
1c1008c7 | 1978 | |
37850e37 FF |
1979 | /* Configure backpressure vectors for MoCA */ |
1980 | if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | |
1c1008c7 FF |
1981 | reg = bcmgenet_bp_mc_get(priv); |
1982 | reg |= BIT(priv->hw_params->bp_in_en_shift); | |
1983 | ||
1984 | /* bp_mask: back pressure mask */ | |
1985 | if (netif_is_multiqueue(priv->dev)) | |
1986 | reg |= priv->hw_params->bp_in_mask; | |
1987 | else | |
1988 | reg &= ~priv->hw_params->bp_in_mask; | |
1989 | bcmgenet_bp_mc_set(priv, reg); | |
1990 | } | |
1991 | ||
1992 | /* Enable MDIO interrupts on GENET v3+ */ | |
1993 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | |
b2e97eca | 1994 | int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
1c1008c7 | 1995 | |
4055eaef PG |
1996 | /* Enable Rx priority queue interrupts */ |
1997 | for (i = 0; i < priv->hw_params->rx_queues; ++i) | |
1998 | int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); | |
1999 | ||
b2e97eca PG |
2000 | /* Enable Tx priority queue interrupts */ |
2001 | for (i = 0; i < priv->hw_params->tx_queues; ++i) | |
2002 | int1_enable |= (1 << i); | |
1c1008c7 | 2003 | |
b2e97eca PG |
2004 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); |
2005 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | |
4092e6ac | 2006 | |
1c1008c7 FF |
2007 | /* Enable rx/tx engine.*/ |
2008 | dev_dbg(kdev, "done init umac\n"); | |
2009 | ||
2010 | return 0; | |
2011 | } | |
2012 | ||
4f8b2d7d | 2013 | /* Initialize a Tx ring along with corresponding hardware registers */ |
1c1008c7 FF |
2014 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
2015 | unsigned int index, unsigned int size, | |
4f8b2d7d | 2016 | unsigned int start_ptr, unsigned int end_ptr) |
1c1008c7 FF |
2017 | { |
2018 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | |
2019 | u32 words_per_bd = WORDS_PER_BD(priv); | |
2020 | u32 flow_period_val = 0; | |
1c1008c7 FF |
2021 | |
2022 | spin_lock_init(&ring->lock); | |
4092e6ac | 2023 | ring->priv = priv; |
1c1008c7 FF |
2024 | ring->index = index; |
2025 | if (index == DESC_INDEX) { | |
2026 | ring->queue = 0; | |
2027 | ring->int_enable = bcmgenet_tx_ring16_int_enable; | |
2028 | ring->int_disable = bcmgenet_tx_ring16_int_disable; | |
2029 | } else { | |
2030 | ring->queue = index + 1; | |
2031 | ring->int_enable = bcmgenet_tx_ring_int_enable; | |
2032 | ring->int_disable = bcmgenet_tx_ring_int_disable; | |
2033 | } | |
4f8b2d7d | 2034 | ring->cbs = priv->tx_cbs + start_ptr; |
1c1008c7 | 2035 | ring->size = size; |
66d06757 | 2036 | ring->clean_ptr = start_ptr; |
1c1008c7 FF |
2037 | ring->c_index = 0; |
2038 | ring->free_bds = size; | |
4f8b2d7d PG |
2039 | ring->write_ptr = start_ptr; |
2040 | ring->cb_ptr = start_ptr; | |
1c1008c7 FF |
2041 | ring->end_ptr = end_ptr - 1; |
2042 | ring->prod_index = 0; | |
2043 | ||
2044 | /* Set flow period for ring != 16 */ | |
2045 | if (index != DESC_INDEX) | |
2046 | flow_period_val = ENET_MAX_MTU_SIZE << 16; | |
2047 | ||
2048 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); | |
2049 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); | |
2050 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | |
2051 | /* Disable rate control for now */ | |
2052 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | |
c91b7f66 | 2053 | TDMA_FLOW_PERIOD); |
1c1008c7 | 2054 | bcmgenet_tdma_ring_writel(priv, index, |
c91b7f66 FF |
2055 | ((size << DMA_RING_SIZE_SHIFT) | |
2056 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 2057 | |
1c1008c7 | 2058 | /* Set start and end address, read and write pointers */ |
4f8b2d7d | 2059 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 2060 | DMA_START_ADDR); |
4f8b2d7d | 2061 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 2062 | TDMA_READ_PTR); |
4f8b2d7d | 2063 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 2064 | TDMA_WRITE_PTR); |
1c1008c7 | 2065 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
c91b7f66 | 2066 | DMA_END_ADDR); |
1c1008c7 FF |
2067 | } |
2068 | ||
2069 | /* Initialize a RDMA ring */ | |
2070 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |
8ac467e8 PG |
2071 | unsigned int index, unsigned int size, |
2072 | unsigned int start_ptr, unsigned int end_ptr) | |
1c1008c7 | 2073 | { |
8ac467e8 | 2074 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; |
1c1008c7 FF |
2075 | u32 words_per_bd = WORDS_PER_BD(priv); |
2076 | int ret; | |
2077 | ||
4055eaef | 2078 | ring->priv = priv; |
8ac467e8 | 2079 | ring->index = index; |
4055eaef PG |
2080 | if (index == DESC_INDEX) { |
2081 | ring->int_enable = bcmgenet_rx_ring16_int_enable; | |
2082 | ring->int_disable = bcmgenet_rx_ring16_int_disable; | |
2083 | } else { | |
2084 | ring->int_enable = bcmgenet_rx_ring_int_enable; | |
2085 | ring->int_disable = bcmgenet_rx_ring_int_disable; | |
2086 | } | |
8ac467e8 PG |
2087 | ring->cbs = priv->rx_cbs + start_ptr; |
2088 | ring->size = size; | |
2089 | ring->c_index = 0; | |
2090 | ring->read_ptr = start_ptr; | |
2091 | ring->cb_ptr = start_ptr; | |
2092 | ring->end_ptr = end_ptr - 1; | |
1c1008c7 | 2093 | |
8ac467e8 PG |
2094 | ret = bcmgenet_alloc_rx_buffers(priv, ring); |
2095 | if (ret) | |
1c1008c7 | 2096 | return ret; |
1c1008c7 | 2097 | |
1c1008c7 FF |
2098 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); |
2099 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | |
6f5a272c | 2100 | bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); |
1c1008c7 | 2101 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
2102 | ((size << DMA_RING_SIZE_SHIFT) | |
2103 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 2104 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
2105 | (DMA_FC_THRESH_LO << |
2106 | DMA_XOFF_THRESHOLD_SHIFT) | | |
2107 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | |
6f5a272c PG |
2108 | |
2109 | /* Set start and end address, read and write pointers */ | |
8ac467e8 PG |
2110 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, |
2111 | DMA_START_ADDR); | |
2112 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
2113 | RDMA_READ_PTR); | |
2114 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
2115 | RDMA_WRITE_PTR); | |
2116 | bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | |
6f5a272c | 2117 | DMA_END_ADDR); |
1c1008c7 FF |
2118 | |
2119 | return ret; | |
2120 | } | |
2121 | ||
e2aadb4a PG |
2122 | static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) |
2123 | { | |
2124 | unsigned int i; | |
2125 | struct bcmgenet_tx_ring *ring; | |
2126 | ||
2127 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2128 | ring = &priv->tx_rings[i]; | |
d64b5e85 | 2129 | netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); |
e2aadb4a PG |
2130 | } |
2131 | ||
2132 | ring = &priv->tx_rings[DESC_INDEX]; | |
d64b5e85 | 2133 | netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); |
e2aadb4a PG |
2134 | } |
2135 | ||
2136 | static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) | |
2137 | { | |
2138 | unsigned int i; | |
2139 | struct bcmgenet_tx_ring *ring; | |
2140 | ||
2141 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2142 | ring = &priv->tx_rings[i]; | |
2143 | napi_enable(&ring->napi); | |
2144 | } | |
2145 | ||
2146 | ring = &priv->tx_rings[DESC_INDEX]; | |
2147 | napi_enable(&ring->napi); | |
2148 | } | |
2149 | ||
2150 | static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) | |
2151 | { | |
2152 | unsigned int i; | |
2153 | struct bcmgenet_tx_ring *ring; | |
2154 | ||
2155 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2156 | ring = &priv->tx_rings[i]; | |
2157 | napi_disable(&ring->napi); | |
2158 | } | |
2159 | ||
2160 | ring = &priv->tx_rings[DESC_INDEX]; | |
2161 | napi_disable(&ring->napi); | |
2162 | } | |
2163 | ||
2164 | static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) | |
2165 | { | |
2166 | unsigned int i; | |
2167 | struct bcmgenet_tx_ring *ring; | |
2168 | ||
2169 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2170 | ring = &priv->tx_rings[i]; | |
2171 | netif_napi_del(&ring->napi); | |
2172 | } | |
2173 | ||
2174 | ring = &priv->tx_rings[DESC_INDEX]; | |
2175 | netif_napi_del(&ring->napi); | |
2176 | } | |
2177 | ||
16c6d667 | 2178 | /* Initialize Tx queues |
1c1008c7 | 2179 | * |
16c6d667 | 2180 | * Queues 0-3 are priority-based, each one has 32 descriptors, |
1c1008c7 FF |
2181 | * with queue 0 being the highest priority queue. |
2182 | * | |
16c6d667 | 2183 | * Queue 16 is the default Tx queue with |
51a966a7 | 2184 | * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. |
1c1008c7 | 2185 | * |
16c6d667 PG |
2186 | * The transmit control block pool is then partitioned as follows: |
2187 | * - Tx queue 0 uses tx_cbs[0..31] | |
2188 | * - Tx queue 1 uses tx_cbs[32..63] | |
2189 | * - Tx queue 2 uses tx_cbs[64..95] | |
2190 | * - Tx queue 3 uses tx_cbs[96..127] | |
2191 | * - Tx queue 16 uses tx_cbs[128..255] | |
1c1008c7 | 2192 | */ |
16c6d667 | 2193 | static void bcmgenet_init_tx_queues(struct net_device *dev) |
1c1008c7 FF |
2194 | { |
2195 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
16c6d667 PG |
2196 | u32 i, dma_enable; |
2197 | u32 dma_ctrl, ring_cfg; | |
37742166 | 2198 | u32 dma_priority[3] = {0, 0, 0}; |
1c1008c7 | 2199 | |
1c1008c7 FF |
2200 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); |
2201 | dma_enable = dma_ctrl & DMA_EN; | |
2202 | dma_ctrl &= ~DMA_EN; | |
2203 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2204 | ||
16c6d667 PG |
2205 | dma_ctrl = 0; |
2206 | ring_cfg = 0; | |
2207 | ||
1c1008c7 FF |
2208 | /* Enable strict priority arbiter mode */ |
2209 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | |
2210 | ||
16c6d667 | 2211 | /* Initialize Tx priority queues */ |
1c1008c7 | 2212 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
51a966a7 PG |
2213 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, |
2214 | i * priv->hw_params->tx_bds_per_q, | |
2215 | (i + 1) * priv->hw_params->tx_bds_per_q); | |
16c6d667 PG |
2216 | ring_cfg |= (1 << i); |
2217 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
2218 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= |
2219 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); | |
1c1008c7 FF |
2220 | } |
2221 | ||
16c6d667 | 2222 | /* Initialize Tx default queue 16 */ |
51a966a7 | 2223 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, |
16c6d667 | 2224 | priv->hw_params->tx_queues * |
51a966a7 | 2225 | priv->hw_params->tx_bds_per_q, |
16c6d667 PG |
2226 | TOTAL_DESC); |
2227 | ring_cfg |= (1 << DESC_INDEX); | |
2228 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
2229 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= |
2230 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << | |
2231 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); | |
16c6d667 PG |
2232 | |
2233 | /* Set Tx queue priorities */ | |
37742166 PG |
2234 | bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); |
2235 | bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); | |
2236 | bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); | |
2237 | ||
e2aadb4a PG |
2238 | /* Initialize Tx NAPI */ |
2239 | bcmgenet_init_tx_napi(priv); | |
2240 | ||
16c6d667 PG |
2241 | /* Enable Tx queues */ |
2242 | bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
1c1008c7 | 2243 | |
16c6d667 | 2244 | /* Enable Tx DMA */ |
1c1008c7 | 2245 | if (dma_enable) |
16c6d667 PG |
2246 | dma_ctrl |= DMA_EN; |
2247 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1c1008c7 FF |
2248 | } |
2249 | ||
3ab11339 PG |
2250 | static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) |
2251 | { | |
4055eaef PG |
2252 | unsigned int i; |
2253 | struct bcmgenet_rx_ring *ring; | |
2254 | ||
2255 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2256 | ring = &priv->rx_rings[i]; | |
2257 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | |
2258 | } | |
2259 | ||
2260 | ring = &priv->rx_rings[DESC_INDEX]; | |
2261 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | |
3ab11339 PG |
2262 | } |
2263 | ||
2264 | static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) | |
2265 | { | |
4055eaef PG |
2266 | unsigned int i; |
2267 | struct bcmgenet_rx_ring *ring; | |
2268 | ||
2269 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2270 | ring = &priv->rx_rings[i]; | |
2271 | napi_enable(&ring->napi); | |
2272 | } | |
2273 | ||
2274 | ring = &priv->rx_rings[DESC_INDEX]; | |
2275 | napi_enable(&ring->napi); | |
3ab11339 PG |
2276 | } |
2277 | ||
2278 | static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) | |
2279 | { | |
4055eaef PG |
2280 | unsigned int i; |
2281 | struct bcmgenet_rx_ring *ring; | |
2282 | ||
2283 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2284 | ring = &priv->rx_rings[i]; | |
2285 | napi_disable(&ring->napi); | |
2286 | } | |
2287 | ||
2288 | ring = &priv->rx_rings[DESC_INDEX]; | |
2289 | napi_disable(&ring->napi); | |
3ab11339 PG |
2290 | } |
2291 | ||
2292 | static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) | |
2293 | { | |
4055eaef PG |
2294 | unsigned int i; |
2295 | struct bcmgenet_rx_ring *ring; | |
2296 | ||
2297 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2298 | ring = &priv->rx_rings[i]; | |
2299 | netif_napi_del(&ring->napi); | |
2300 | } | |
2301 | ||
2302 | ring = &priv->rx_rings[DESC_INDEX]; | |
2303 | netif_napi_del(&ring->napi); | |
3ab11339 PG |
2304 | } |
2305 | ||
8ac467e8 PG |
2306 | /* Initialize Rx queues |
2307 | * | |
2308 | * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be | |
2309 | * used to direct traffic to these queues. | |
2310 | * | |
2311 | * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. | |
2312 | */ | |
2313 | static int bcmgenet_init_rx_queues(struct net_device *dev) | |
2314 | { | |
2315 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2316 | u32 i; | |
2317 | u32 dma_enable; | |
2318 | u32 dma_ctrl; | |
2319 | u32 ring_cfg; | |
2320 | int ret; | |
2321 | ||
2322 | dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2323 | dma_enable = dma_ctrl & DMA_EN; | |
2324 | dma_ctrl &= ~DMA_EN; | |
2325 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2326 | ||
2327 | dma_ctrl = 0; | |
2328 | ring_cfg = 0; | |
2329 | ||
2330 | /* Initialize Rx priority queues */ | |
2331 | for (i = 0; i < priv->hw_params->rx_queues; i++) { | |
2332 | ret = bcmgenet_init_rx_ring(priv, i, | |
2333 | priv->hw_params->rx_bds_per_q, | |
2334 | i * priv->hw_params->rx_bds_per_q, | |
2335 | (i + 1) * | |
2336 | priv->hw_params->rx_bds_per_q); | |
2337 | if (ret) | |
2338 | return ret; | |
2339 | ||
2340 | ring_cfg |= (1 << i); | |
2341 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2342 | } | |
2343 | ||
2344 | /* Initialize Rx default queue 16 */ | |
2345 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, | |
2346 | priv->hw_params->rx_queues * | |
2347 | priv->hw_params->rx_bds_per_q, | |
2348 | TOTAL_DESC); | |
2349 | if (ret) | |
2350 | return ret; | |
2351 | ||
2352 | ring_cfg |= (1 << DESC_INDEX); | |
2353 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
2354 | ||
3ab11339 PG |
2355 | /* Initialize Rx NAPI */ |
2356 | bcmgenet_init_rx_napi(priv); | |
2357 | ||
8ac467e8 PG |
2358 | /* Enable rings */ |
2359 | bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
2360 | ||
2361 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | |
2362 | if (dma_enable) | |
2363 | dma_ctrl |= DMA_EN; | |
2364 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2365 | ||
2366 | return 0; | |
2367 | } | |
2368 | ||
4a0c081e FF |
2369 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) |
2370 | { | |
2371 | int ret = 0; | |
2372 | int timeout = 0; | |
2373 | u32 reg; | |
b6df7d61 JS |
2374 | u32 dma_ctrl; |
2375 | int i; | |
4a0c081e FF |
2376 | |
2377 | /* Disable TDMA to stop add more frames in TX DMA */ | |
2378 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2379 | reg &= ~DMA_EN; | |
2380 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2381 | ||
2382 | /* Check TDMA status register to confirm TDMA is disabled */ | |
2383 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2384 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | |
2385 | if (reg & DMA_DISABLED) | |
2386 | break; | |
2387 | ||
2388 | udelay(1); | |
2389 | } | |
2390 | ||
2391 | if (timeout == DMA_TIMEOUT_VAL) { | |
2392 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | |
2393 | ret = -ETIMEDOUT; | |
2394 | } | |
2395 | ||
2396 | /* Wait 10ms for packet drain in both tx and rx dma */ | |
2397 | usleep_range(10000, 20000); | |
2398 | ||
2399 | /* Disable RDMA */ | |
2400 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2401 | reg &= ~DMA_EN; | |
2402 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2403 | ||
2404 | timeout = 0; | |
2405 | /* Check RDMA status register to confirm RDMA is disabled */ | |
2406 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2407 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | |
2408 | if (reg & DMA_DISABLED) | |
2409 | break; | |
2410 | ||
2411 | udelay(1); | |
2412 | } | |
2413 | ||
2414 | if (timeout == DMA_TIMEOUT_VAL) { | |
2415 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | |
2416 | ret = -ETIMEDOUT; | |
2417 | } | |
2418 | ||
b6df7d61 JS |
2419 | dma_ctrl = 0; |
2420 | for (i = 0; i < priv->hw_params->rx_queues; i++) | |
2421 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2422 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2423 | reg &= ~dma_ctrl; | |
2424 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2425 | ||
2426 | dma_ctrl = 0; | |
2427 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
2428 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2429 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2430 | reg &= ~dma_ctrl; | |
2431 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2432 | ||
4a0c081e FF |
2433 | return ret; |
2434 | } | |
2435 | ||
9abab96d | 2436 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1c1008c7 FF |
2437 | { |
2438 | int i; | |
e178c8c2 | 2439 | struct netdev_queue *txq; |
1c1008c7 | 2440 | |
9abab96d PG |
2441 | bcmgenet_fini_rx_napi(priv); |
2442 | bcmgenet_fini_tx_napi(priv); | |
2443 | ||
1c1008c7 | 2444 | /* disable DMA */ |
4a0c081e | 2445 | bcmgenet_dma_teardown(priv); |
1c1008c7 FF |
2446 | |
2447 | for (i = 0; i < priv->num_tx_bds; i++) { | |
2448 | if (priv->tx_cbs[i].skb != NULL) { | |
2449 | dev_kfree_skb(priv->tx_cbs[i].skb); | |
2450 | priv->tx_cbs[i].skb = NULL; | |
2451 | } | |
2452 | } | |
2453 | ||
e178c8c2 PG |
2454 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
2455 | txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); | |
2456 | netdev_tx_reset_queue(txq); | |
2457 | } | |
2458 | ||
2459 | txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); | |
2460 | netdev_tx_reset_queue(txq); | |
2461 | ||
1c1008c7 FF |
2462 | bcmgenet_free_rx_buffers(priv); |
2463 | kfree(priv->rx_cbs); | |
2464 | kfree(priv->tx_cbs); | |
2465 | } | |
2466 | ||
2467 | /* init_edma: Initialize DMA control register */ | |
2468 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |
2469 | { | |
2470 | int ret; | |
014012a4 PG |
2471 | unsigned int i; |
2472 | struct enet_cb *cb; | |
1c1008c7 | 2473 | |
6f5a272c | 2474 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 | 2475 | |
6f5a272c PG |
2476 | /* Initialize common Rx ring structures */ |
2477 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; | |
2478 | priv->num_rx_bds = TOTAL_DESC; | |
2479 | priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), | |
2480 | GFP_KERNEL); | |
2481 | if (!priv->rx_cbs) | |
2482 | return -ENOMEM; | |
2483 | ||
2484 | for (i = 0; i < priv->num_rx_bds; i++) { | |
2485 | cb = priv->rx_cbs + i; | |
2486 | cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; | |
2487 | } | |
2488 | ||
7fc527f9 | 2489 | /* Initialize common TX ring structures */ |
1c1008c7 FF |
2490 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; |
2491 | priv->num_tx_bds = TOTAL_DESC; | |
c489be08 | 2492 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
c91b7f66 | 2493 | GFP_KERNEL); |
1c1008c7 | 2494 | if (!priv->tx_cbs) { |
ebbd96fb | 2495 | kfree(priv->rx_cbs); |
1c1008c7 FF |
2496 | return -ENOMEM; |
2497 | } | |
2498 | ||
014012a4 PG |
2499 | for (i = 0; i < priv->num_tx_bds; i++) { |
2500 | cb = priv->tx_cbs + i; | |
2501 | cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; | |
2502 | } | |
2503 | ||
ebbd96fb PG |
2504 | /* Init rDma */ |
2505 | bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2506 | ||
2507 | /* Initialize Rx queues */ | |
2508 | ret = bcmgenet_init_rx_queues(priv->dev); | |
2509 | if (ret) { | |
2510 | netdev_err(priv->dev, "failed to initialize Rx queues\n"); | |
2511 | bcmgenet_free_rx_buffers(priv); | |
2512 | kfree(priv->rx_cbs); | |
2513 | kfree(priv->tx_cbs); | |
2514 | return ret; | |
2515 | } | |
2516 | ||
2517 | /* Init tDma */ | |
2518 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2519 | ||
16c6d667 PG |
2520 | /* Initialize Tx queues */ |
2521 | bcmgenet_init_tx_queues(priv->dev); | |
1c1008c7 FF |
2522 | |
2523 | return 0; | |
2524 | } | |
2525 | ||
1c1008c7 FF |
2526 | /* Interrupt bottom half */ |
2527 | static void bcmgenet_irq_task(struct work_struct *work) | |
2528 | { | |
07c52d6a DB |
2529 | unsigned long flags; |
2530 | unsigned int status; | |
1c1008c7 FF |
2531 | struct bcmgenet_priv *priv = container_of( |
2532 | work, struct bcmgenet_priv, bcmgenet_irq_work); | |
2533 | ||
2534 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | |
2535 | ||
07c52d6a DB |
2536 | spin_lock_irqsave(&priv->lock, flags); |
2537 | status = priv->irq0_stat; | |
2538 | priv->irq0_stat = 0; | |
2539 | spin_unlock_irqrestore(&priv->lock, flags); | |
2540 | ||
2541 | if (status & UMAC_IRQ_MPD_R) { | |
8fdb0e0f FF |
2542 | netif_dbg(priv, wol, priv->dev, |
2543 | "magic packet detected, waking up\n"); | |
2544 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
2545 | } | |
2546 | ||
1c1008c7 | 2547 | /* Link UP/DOWN event */ |
07c52d6a | 2548 | if (status & UMAC_IRQ_LINK_EVENT) |
0299b6ac | 2549 | phy_mac_interrupt(priv->phydev, |
07c52d6a | 2550 | !!(status & UMAC_IRQ_LINK_UP)); |
1c1008c7 FF |
2551 | } |
2552 | ||
4055eaef | 2553 | /* bcmgenet_isr1: handle Rx and Tx priority queues */ |
1c1008c7 FF |
2554 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
2555 | { | |
2556 | struct bcmgenet_priv *priv = dev_id; | |
4055eaef PG |
2557 | struct bcmgenet_rx_ring *rx_ring; |
2558 | struct bcmgenet_tx_ring *tx_ring; | |
07c52d6a | 2559 | unsigned int index, status; |
1c1008c7 | 2560 | |
07c52d6a DB |
2561 | /* Read irq status */ |
2562 | status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | |
4092e6ac | 2563 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
4055eaef | 2564 | |
7fc527f9 | 2565 | /* clear interrupts */ |
07c52d6a | 2566 | bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR); |
1c1008c7 FF |
2567 | |
2568 | netif_dbg(priv, intr, priv->dev, | |
07c52d6a | 2569 | "%s: IRQ=0x%x\n", __func__, status); |
4092e6ac | 2570 | |
4055eaef PG |
2571 | /* Check Rx priority queue interrupts */ |
2572 | for (index = 0; index < priv->hw_params->rx_queues; index++) { | |
07c52d6a | 2573 | if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) |
4055eaef PG |
2574 | continue; |
2575 | ||
2576 | rx_ring = &priv->rx_rings[index]; | |
2577 | ||
2578 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | |
2579 | rx_ring->int_disable(rx_ring); | |
dac916f8 | 2580 | __napi_schedule_irqoff(&rx_ring->napi); |
4055eaef PG |
2581 | } |
2582 | } | |
2583 | ||
2584 | /* Check Tx priority queue interrupts */ | |
4092e6ac | 2585 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
07c52d6a | 2586 | if (!(status & BIT(index))) |
4092e6ac JS |
2587 | continue; |
2588 | ||
4055eaef | 2589 | tx_ring = &priv->tx_rings[index]; |
4092e6ac | 2590 | |
4055eaef PG |
2591 | if (likely(napi_schedule_prep(&tx_ring->napi))) { |
2592 | tx_ring->int_disable(tx_ring); | |
dac916f8 | 2593 | __napi_schedule_irqoff(&tx_ring->napi); |
1c1008c7 FF |
2594 | } |
2595 | } | |
4092e6ac | 2596 | |
1c1008c7 FF |
2597 | return IRQ_HANDLED; |
2598 | } | |
2599 | ||
4055eaef | 2600 | /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ |
1c1008c7 FF |
2601 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
2602 | { | |
2603 | struct bcmgenet_priv *priv = dev_id; | |
4055eaef PG |
2604 | struct bcmgenet_rx_ring *rx_ring; |
2605 | struct bcmgenet_tx_ring *tx_ring; | |
07c52d6a DB |
2606 | unsigned int status; |
2607 | unsigned long flags; | |
1c1008c7 | 2608 | |
07c52d6a DB |
2609 | /* Read irq status */ |
2610 | status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | |
1c1008c7 | 2611 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); |
4055eaef | 2612 | |
7fc527f9 | 2613 | /* clear interrupts */ |
07c52d6a | 2614 | bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR); |
1c1008c7 FF |
2615 | |
2616 | netif_dbg(priv, intr, priv->dev, | |
07c52d6a | 2617 | "IRQ=0x%x\n", status); |
1c1008c7 | 2618 | |
07c52d6a | 2619 | if (status & UMAC_IRQ_RXDMA_DONE) { |
4055eaef PG |
2620 | rx_ring = &priv->rx_rings[DESC_INDEX]; |
2621 | ||
2622 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | |
2623 | rx_ring->int_disable(rx_ring); | |
dac916f8 | 2624 | __napi_schedule_irqoff(&rx_ring->napi); |
1c1008c7 FF |
2625 | } |
2626 | } | |
4092e6ac | 2627 | |
07c52d6a | 2628 | if (status & UMAC_IRQ_TXDMA_DONE) { |
4055eaef PG |
2629 | tx_ring = &priv->tx_rings[DESC_INDEX]; |
2630 | ||
2631 | if (likely(napi_schedule_prep(&tx_ring->napi))) { | |
2632 | tx_ring->int_disable(tx_ring); | |
dac916f8 | 2633 | __napi_schedule_irqoff(&tx_ring->napi); |
4092e6ac | 2634 | } |
1c1008c7 | 2635 | } |
4055eaef | 2636 | |
1c1008c7 | 2637 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && |
07c52d6a | 2638 | status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
1c1008c7 FF |
2639 | wake_up(&priv->wq); |
2640 | } | |
2641 | ||
07c52d6a DB |
2642 | /* all other interested interrupts handled in bottom half */ |
2643 | status &= (UMAC_IRQ_LINK_EVENT | | |
2644 | UMAC_IRQ_MPD_R); | |
2645 | if (status) { | |
2646 | /* Save irq status for bottom-half processing. */ | |
2647 | spin_lock_irqsave(&priv->lock, flags); | |
2648 | priv->irq0_stat |= status; | |
2649 | spin_unlock_irqrestore(&priv->lock, flags); | |
2650 | ||
2651 | schedule_work(&priv->bcmgenet_irq_work); | |
2652 | } | |
2653 | ||
1c1008c7 FF |
2654 | return IRQ_HANDLED; |
2655 | } | |
2656 | ||
8562056f FF |
2657 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) |
2658 | { | |
2659 | struct bcmgenet_priv *priv = dev_id; | |
2660 | ||
2661 | pm_wakeup_event(&priv->pdev->dev, 0); | |
2662 | ||
2663 | return IRQ_HANDLED; | |
2664 | } | |
2665 | ||
4d2e8882 FF |
2666 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2667 | static void bcmgenet_poll_controller(struct net_device *dev) | |
2668 | { | |
2669 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2670 | ||
2671 | /* Invoke the main RX/TX interrupt handler */ | |
2672 | disable_irq(priv->irq0); | |
2673 | bcmgenet_isr0(priv->irq0, priv); | |
2674 | enable_irq(priv->irq0); | |
2675 | ||
2676 | /* And the interrupt handler for RX/TX priority queues */ | |
2677 | disable_irq(priv->irq1); | |
2678 | bcmgenet_isr1(priv->irq1, priv); | |
2679 | enable_irq(priv->irq1); | |
2680 | } | |
2681 | #endif | |
2682 | ||
1c1008c7 FF |
2683 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
2684 | { | |
2685 | u32 reg; | |
2686 | ||
2687 | reg = bcmgenet_rbuf_ctrl_get(priv); | |
2688 | reg |= BIT(1); | |
2689 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2690 | udelay(10); | |
2691 | ||
2692 | reg &= ~BIT(1); | |
2693 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2694 | udelay(10); | |
2695 | } | |
2696 | ||
2697 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | |
c91b7f66 | 2698 | unsigned char *addr) |
1c1008c7 FF |
2699 | { |
2700 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | |
2701 | (addr[2] << 8) | addr[3], UMAC_MAC0); | |
2702 | bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); | |
2703 | } | |
2704 | ||
1c1008c7 FF |
2705 | /* Returns a reusable dma control register value */ |
2706 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) | |
2707 | { | |
2708 | u32 reg; | |
2709 | u32 dma_ctrl; | |
2710 | ||
2711 | /* disable DMA */ | |
2712 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; | |
2713 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2714 | reg &= ~dma_ctrl; | |
2715 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2716 | ||
2717 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2718 | reg &= ~dma_ctrl; | |
2719 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2720 | ||
2721 | bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); | |
2722 | udelay(10); | |
2723 | bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); | |
2724 | ||
2725 | return dma_ctrl; | |
2726 | } | |
2727 | ||
2728 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | |
2729 | { | |
2730 | u32 reg; | |
2731 | ||
2732 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2733 | reg |= dma_ctrl; | |
2734 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2735 | ||
2736 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2737 | reg |= dma_ctrl; | |
2738 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2739 | } | |
2740 | ||
0034de41 PG |
2741 | /* bcmgenet_hfb_clear |
2742 | * | |
2743 | * Clear Hardware Filter Block and disable all filtering. | |
2744 | */ | |
2745 | static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) | |
2746 | { | |
2747 | u32 i; | |
2748 | ||
2749 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); | |
2750 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); | |
2751 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); | |
2752 | ||
2753 | for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) | |
2754 | bcmgenet_rdma_writel(priv, 0x0, i); | |
2755 | ||
2756 | for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) | |
2757 | bcmgenet_hfb_reg_writel(priv, 0x0, | |
2758 | HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); | |
2759 | ||
2760 | for (i = 0; i < priv->hw_params->hfb_filter_cnt * | |
2761 | priv->hw_params->hfb_filter_size; i++) | |
2762 | bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); | |
2763 | } | |
2764 | ||
2765 | static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) | |
2766 | { | |
2767 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) | |
2768 | return; | |
2769 | ||
2770 | bcmgenet_hfb_clear(priv); | |
2771 | } | |
2772 | ||
909ff5ef FF |
2773 | static void bcmgenet_netif_start(struct net_device *dev) |
2774 | { | |
2775 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2776 | ||
2777 | /* Start the network engine */ | |
3ab11339 | 2778 | bcmgenet_enable_rx_napi(priv); |
e2aadb4a | 2779 | bcmgenet_enable_tx_napi(priv); |
909ff5ef FF |
2780 | |
2781 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | |
2782 | ||
909ff5ef FF |
2783 | netif_tx_start_all_queues(dev); |
2784 | ||
37850e37 FF |
2785 | /* Monitor link interrupts now */ |
2786 | bcmgenet_link_intr_enable(priv); | |
2787 | ||
0299b6ac | 2788 | phy_start(priv->phydev); |
909ff5ef FF |
2789 | } |
2790 | ||
1c1008c7 FF |
2791 | static int bcmgenet_open(struct net_device *dev) |
2792 | { | |
2793 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2794 | unsigned long dma_ctrl; | |
2795 | u32 reg; | |
2796 | int ret; | |
2797 | ||
2798 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); | |
2799 | ||
2800 | /* Turn on the clock */ | |
7d5d3075 | 2801 | clk_prepare_enable(priv->clk); |
1c1008c7 | 2802 | |
a642c4f7 FF |
2803 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
2804 | * brought out of reset as absolutely no UniMAC activity is allowed | |
2805 | */ | |
c624f891 | 2806 | if (priv->internal_phy) |
a642c4f7 FF |
2807 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); |
2808 | ||
1c1008c7 FF |
2809 | /* take MAC out of reset */ |
2810 | bcmgenet_umac_reset(priv); | |
2811 | ||
2812 | ret = init_umac(priv); | |
2813 | if (ret) | |
2814 | goto err_clk_disable; | |
2815 | ||
2816 | /* disable ethernet MAC while updating its registers */ | |
e29585b8 | 2817 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
1c1008c7 | 2818 | |
909ff5ef FF |
2819 | /* Make sure we reflect the value of CRC_CMD_FWD */ |
2820 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
2821 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | |
2822 | ||
1c1008c7 FF |
2823 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
2824 | ||
c624f891 | 2825 | if (priv->internal_phy) { |
1c1008c7 FF |
2826 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
2827 | reg |= EXT_ENERGY_DET_MASK; | |
2828 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
2829 | } | |
2830 | ||
2831 | /* Disable RX/TX DMA and flush TX queues */ | |
2832 | dma_ctrl = bcmgenet_dma_disable(priv); | |
2833 | ||
2834 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
2835 | ret = bcmgenet_init_dma(priv); | |
2836 | if (ret) { | |
2837 | netdev_err(dev, "failed to initialize DMA\n"); | |
fac25940 | 2838 | goto err_clk_disable; |
1c1008c7 FF |
2839 | } |
2840 | ||
2841 | /* Always enable ring 16 - descriptor ring */ | |
2842 | bcmgenet_enable_dma(priv, dma_ctrl); | |
2843 | ||
0034de41 PG |
2844 | /* HFB init */ |
2845 | bcmgenet_hfb_init(priv); | |
2846 | ||
1c1008c7 | 2847 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, |
c91b7f66 | 2848 | dev->name, priv); |
1c1008c7 FF |
2849 | if (ret < 0) { |
2850 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | |
2851 | goto err_fini_dma; | |
2852 | } | |
2853 | ||
2854 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | |
c91b7f66 | 2855 | dev->name, priv); |
1c1008c7 FF |
2856 | if (ret < 0) { |
2857 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | |
2858 | goto err_irq0; | |
2859 | } | |
2860 | ||
6cc8e6d4 FF |
2861 | ret = bcmgenet_mii_probe(dev); |
2862 | if (ret) { | |
2863 | netdev_err(dev, "failed to connect to PHY\n"); | |
2864 | goto err_irq1; | |
2865 | } | |
c96e731c | 2866 | |
909ff5ef | 2867 | bcmgenet_netif_start(dev); |
1c1008c7 FF |
2868 | |
2869 | return 0; | |
2870 | ||
6cc8e6d4 FF |
2871 | err_irq1: |
2872 | free_irq(priv->irq1, priv); | |
1c1008c7 | 2873 | err_irq0: |
978ffac4 | 2874 | free_irq(priv->irq0, priv); |
1c1008c7 FF |
2875 | err_fini_dma: |
2876 | bcmgenet_fini_dma(priv); | |
2877 | err_clk_disable: | |
7627409c DB |
2878 | if (priv->internal_phy) |
2879 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); | |
7d5d3075 | 2880 | clk_disable_unprepare(priv->clk); |
1c1008c7 FF |
2881 | return ret; |
2882 | } | |
2883 | ||
909ff5ef FF |
2884 | static void bcmgenet_netif_stop(struct net_device *dev) |
2885 | { | |
2886 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2887 | ||
2888 | netif_tx_stop_all_queues(dev); | |
0299b6ac | 2889 | phy_stop(priv->phydev); |
909ff5ef | 2890 | bcmgenet_intr_disable(priv); |
3ab11339 | 2891 | bcmgenet_disable_rx_napi(priv); |
e2aadb4a | 2892 | bcmgenet_disable_tx_napi(priv); |
909ff5ef FF |
2893 | |
2894 | /* Wait for pending work items to complete. Since interrupts are | |
2895 | * disabled no new work will be scheduled. | |
2896 | */ | |
2897 | cancel_work_sync(&priv->bcmgenet_irq_work); | |
cc013fb4 | 2898 | |
cc013fb4 | 2899 | priv->old_link = -1; |
5ad6e6c5 | 2900 | priv->old_speed = -1; |
cc013fb4 | 2901 | priv->old_duplex = -1; |
5ad6e6c5 | 2902 | priv->old_pause = -1; |
909ff5ef FF |
2903 | } |
2904 | ||
1c1008c7 FF |
2905 | static int bcmgenet_close(struct net_device *dev) |
2906 | { | |
2907 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2908 | int ret; | |
1c1008c7 FF |
2909 | |
2910 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | |
2911 | ||
909ff5ef | 2912 | bcmgenet_netif_stop(dev); |
1c1008c7 | 2913 | |
c96e731c | 2914 | /* Really kill the PHY state machine and disconnect from it */ |
0299b6ac | 2915 | phy_disconnect(priv->phydev); |
c96e731c | 2916 | |
1c1008c7 | 2917 | /* Disable MAC receive */ |
e29585b8 | 2918 | umac_enable_set(priv, CMD_RX_EN, false); |
1c1008c7 | 2919 | |
1c1008c7 FF |
2920 | ret = bcmgenet_dma_teardown(priv); |
2921 | if (ret) | |
2922 | return ret; | |
2923 | ||
2924 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
e29585b8 | 2925 | umac_enable_set(priv, CMD_TX_EN, false); |
1c1008c7 | 2926 | |
1c1008c7 FF |
2927 | /* tx reclaim */ |
2928 | bcmgenet_tx_reclaim_all(dev); | |
2929 | bcmgenet_fini_dma(priv); | |
2930 | ||
2931 | free_irq(priv->irq0, priv); | |
2932 | free_irq(priv->irq1, priv); | |
2933 | ||
c624f891 | 2934 | if (priv->internal_phy) |
ca8cf341 | 2935 | ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
1c1008c7 | 2936 | |
7d5d3075 | 2937 | clk_disable_unprepare(priv->clk); |
1c1008c7 | 2938 | |
ca8cf341 | 2939 | return ret; |
1c1008c7 FF |
2940 | } |
2941 | ||
13ea6578 FF |
2942 | static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) |
2943 | { | |
2944 | struct bcmgenet_priv *priv = ring->priv; | |
2945 | u32 p_index, c_index, intsts, intmsk; | |
2946 | struct netdev_queue *txq; | |
2947 | unsigned int free_bds; | |
2948 | unsigned long flags; | |
2949 | bool txq_stopped; | |
2950 | ||
2951 | if (!netif_msg_tx_err(priv)) | |
2952 | return; | |
2953 | ||
2954 | txq = netdev_get_tx_queue(priv->dev, ring->queue); | |
2955 | ||
2956 | spin_lock_irqsave(&ring->lock, flags); | |
2957 | if (ring->index == DESC_INDEX) { | |
2958 | intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
2959 | intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; | |
2960 | } else { | |
2961 | intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); | |
2962 | intmsk = 1 << ring->index; | |
2963 | } | |
2964 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); | |
2965 | p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); | |
2966 | txq_stopped = netif_tx_queue_stopped(txq); | |
2967 | free_bds = ring->free_bds; | |
2968 | spin_unlock_irqrestore(&ring->lock, flags); | |
2969 | ||
2970 | netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" | |
2971 | "TX queue status: %s, interrupts: %s\n" | |
2972 | "(sw)free_bds: %d (sw)size: %d\n" | |
2973 | "(sw)p_index: %d (hw)p_index: %d\n" | |
2974 | "(sw)c_index: %d (hw)c_index: %d\n" | |
2975 | "(sw)clean_p: %d (sw)write_p: %d\n" | |
2976 | "(sw)cb_ptr: %d (sw)end_ptr: %d\n", | |
2977 | ring->index, ring->queue, | |
2978 | txq_stopped ? "stopped" : "active", | |
2979 | intsts & intmsk ? "enabled" : "disabled", | |
2980 | free_bds, ring->size, | |
2981 | ring->prod_index, p_index & DMA_P_INDEX_MASK, | |
2982 | ring->c_index, c_index & DMA_C_INDEX_MASK, | |
2983 | ring->clean_ptr, ring->write_ptr, | |
2984 | ring->cb_ptr, ring->end_ptr); | |
2985 | } | |
2986 | ||
1c1008c7 FF |
2987 | static void bcmgenet_timeout(struct net_device *dev) |
2988 | { | |
2989 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
13ea6578 FF |
2990 | u32 int0_enable = 0; |
2991 | u32 int1_enable = 0; | |
2992 | unsigned int q; | |
1c1008c7 FF |
2993 | |
2994 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | |
2995 | ||
13ea6578 FF |
2996 | for (q = 0; q < priv->hw_params->tx_queues; q++) |
2997 | bcmgenet_dump_tx_queue(&priv->tx_rings[q]); | |
2998 | bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); | |
2999 | ||
3000 | bcmgenet_tx_reclaim_all(dev); | |
3001 | ||
3002 | for (q = 0; q < priv->hw_params->tx_queues; q++) | |
3003 | int1_enable |= (1 << q); | |
3004 | ||
3005 | int0_enable = UMAC_IRQ_TXDMA_DONE; | |
3006 | ||
3007 | /* Re-enable TX interrupts if disabled */ | |
3008 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); | |
3009 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | |
3010 | ||
860e9538 | 3011 | netif_trans_update(dev); |
1c1008c7 FF |
3012 | |
3013 | dev->stats.tx_errors++; | |
3014 | ||
3015 | netif_tx_wake_all_queues(dev); | |
3016 | } | |
3017 | ||
3018 | #define MAX_MC_COUNT 16 | |
3019 | ||
3020 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | |
3021 | unsigned char *addr, | |
3022 | int *i, | |
3023 | int *mc) | |
3024 | { | |
3025 | u32 reg; | |
3026 | ||
c91b7f66 FF |
3027 | bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], |
3028 | UMAC_MDF_ADDR + (*i * 4)); | |
3029 | bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | | |
3030 | addr[4] << 8 | addr[5], | |
3031 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | |
1c1008c7 FF |
3032 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); |
3033 | reg |= (1 << (MAX_MC_COUNT - *mc)); | |
3034 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | |
3035 | *i += 2; | |
3036 | (*mc)++; | |
3037 | } | |
3038 | ||
3039 | static void bcmgenet_set_rx_mode(struct net_device *dev) | |
3040 | { | |
3041 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3042 | struct netdev_hw_addr *ha; | |
3043 | int i, mc; | |
3044 | u32 reg; | |
3045 | ||
3046 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | |
3047 | ||
7fc527f9 | 3048 | /* Promiscuous mode */ |
1c1008c7 FF |
3049 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
3050 | if (dev->flags & IFF_PROMISC) { | |
3051 | reg |= CMD_PROMISC; | |
3052 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
3053 | bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); | |
3054 | return; | |
3055 | } else { | |
3056 | reg &= ~CMD_PROMISC; | |
3057 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
3058 | } | |
3059 | ||
3060 | /* UniMac doesn't support ALLMULTI */ | |
3061 | if (dev->flags & IFF_ALLMULTI) { | |
3062 | netdev_warn(dev, "ALLMULTI is not supported\n"); | |
3063 | return; | |
3064 | } | |
3065 | ||
3066 | /* update MDF filter */ | |
3067 | i = 0; | |
3068 | mc = 0; | |
3069 | /* Broadcast */ | |
3070 | bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); | |
3071 | /* my own address.*/ | |
3072 | bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); | |
3073 | /* Unicast list*/ | |
3074 | if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) | |
3075 | return; | |
3076 | ||
3077 | if (!netdev_uc_empty(dev)) | |
3078 | netdev_for_each_uc_addr(ha, dev) | |
3079 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
3080 | /* Multicast */ | |
3081 | if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) | |
3082 | return; | |
3083 | ||
3084 | netdev_for_each_mc_addr(ha, dev) | |
3085 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
3086 | } | |
3087 | ||
3088 | /* Set the hardware MAC address. */ | |
3089 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) | |
3090 | { | |
3091 | struct sockaddr *addr = p; | |
3092 | ||
3093 | /* Setting the MAC address at the hardware level is not possible | |
3094 | * without disabling the UniMAC RX/TX enable bits. | |
3095 | */ | |
3096 | if (netif_running(dev)) | |
3097 | return -EBUSY; | |
3098 | ||
3099 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
3100 | ||
3101 | return 0; | |
3102 | } | |
3103 | ||
1c1008c7 FF |
3104 | static const struct net_device_ops bcmgenet_netdev_ops = { |
3105 | .ndo_open = bcmgenet_open, | |
3106 | .ndo_stop = bcmgenet_close, | |
3107 | .ndo_start_xmit = bcmgenet_xmit, | |
1c1008c7 FF |
3108 | .ndo_tx_timeout = bcmgenet_timeout, |
3109 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, | |
3110 | .ndo_set_mac_address = bcmgenet_set_mac_addr, | |
3111 | .ndo_do_ioctl = bcmgenet_ioctl, | |
3112 | .ndo_set_features = bcmgenet_set_features, | |
4d2e8882 FF |
3113 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3114 | .ndo_poll_controller = bcmgenet_poll_controller, | |
3115 | #endif | |
1c1008c7 FF |
3116 | }; |
3117 | ||
3118 | /* Array of GENET hardware parameters/characteristics */ | |
3119 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |
3120 | [GENET_V1] = { | |
3121 | .tx_queues = 0, | |
51a966a7 | 3122 | .tx_bds_per_q = 0, |
1c1008c7 | 3123 | .rx_queues = 0, |
3feafa02 | 3124 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3125 | .bp_in_en_shift = 16, |
3126 | .bp_in_mask = 0xffff, | |
3127 | .hfb_filter_cnt = 16, | |
3128 | .qtag_mask = 0x1F, | |
3129 | .hfb_offset = 0x1000, | |
3130 | .rdma_offset = 0x2000, | |
3131 | .tdma_offset = 0x3000, | |
3132 | .words_per_bd = 2, | |
3133 | }, | |
3134 | [GENET_V2] = { | |
3135 | .tx_queues = 4, | |
51a966a7 | 3136 | .tx_bds_per_q = 32, |
7e906e02 | 3137 | .rx_queues = 0, |
3feafa02 | 3138 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3139 | .bp_in_en_shift = 16, |
3140 | .bp_in_mask = 0xffff, | |
3141 | .hfb_filter_cnt = 16, | |
3142 | .qtag_mask = 0x1F, | |
3143 | .tbuf_offset = 0x0600, | |
3144 | .hfb_offset = 0x1000, | |
3145 | .hfb_reg_offset = 0x2000, | |
3146 | .rdma_offset = 0x3000, | |
3147 | .tdma_offset = 0x4000, | |
3148 | .words_per_bd = 2, | |
3149 | .flags = GENET_HAS_EXT, | |
3150 | }, | |
3151 | [GENET_V3] = { | |
3152 | .tx_queues = 4, | |
51a966a7 | 3153 | .tx_bds_per_q = 32, |
7e906e02 | 3154 | .rx_queues = 0, |
3feafa02 | 3155 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3156 | .bp_in_en_shift = 17, |
3157 | .bp_in_mask = 0x1ffff, | |
3158 | .hfb_filter_cnt = 48, | |
0034de41 | 3159 | .hfb_filter_size = 128, |
1c1008c7 FF |
3160 | .qtag_mask = 0x3F, |
3161 | .tbuf_offset = 0x0600, | |
3162 | .hfb_offset = 0x8000, | |
3163 | .hfb_reg_offset = 0xfc00, | |
3164 | .rdma_offset = 0x10000, | |
3165 | .tdma_offset = 0x11000, | |
3166 | .words_per_bd = 2, | |
8d88c6eb PG |
3167 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | |
3168 | GENET_HAS_MOCA_LINK_DET, | |
1c1008c7 FF |
3169 | }, |
3170 | [GENET_V4] = { | |
3171 | .tx_queues = 4, | |
51a966a7 | 3172 | .tx_bds_per_q = 32, |
7e906e02 | 3173 | .rx_queues = 0, |
3feafa02 | 3174 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3175 | .bp_in_en_shift = 17, |
3176 | .bp_in_mask = 0x1ffff, | |
3177 | .hfb_filter_cnt = 48, | |
0034de41 | 3178 | .hfb_filter_size = 128, |
1c1008c7 FF |
3179 | .qtag_mask = 0x3F, |
3180 | .tbuf_offset = 0x0600, | |
3181 | .hfb_offset = 0x8000, | |
3182 | .hfb_reg_offset = 0xfc00, | |
3183 | .rdma_offset = 0x2000, | |
3184 | .tdma_offset = 0x4000, | |
3185 | .words_per_bd = 3, | |
8d88c6eb PG |
3186 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | |
3187 | GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, | |
1c1008c7 FF |
3188 | }, |
3189 | }; | |
3190 | ||
3191 | /* Infer hardware parameters from the detected GENET version */ | |
3192 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |
3193 | { | |
3194 | struct bcmgenet_hw_params *params; | |
3195 | u32 reg; | |
3196 | u8 major; | |
b04a2f5b | 3197 | u16 gphy_rev; |
1c1008c7 FF |
3198 | |
3199 | if (GENET_IS_V4(priv)) { | |
3200 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
3201 | genet_dma_ring_regs = genet_dma_ring_regs_v4; | |
3202 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
3203 | priv->version = GENET_V4; | |
3204 | } else if (GENET_IS_V3(priv)) { | |
3205 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
3206 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3207 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
3208 | priv->version = GENET_V3; | |
3209 | } else if (GENET_IS_V2(priv)) { | |
3210 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; | |
3211 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3212 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
3213 | priv->version = GENET_V2; | |
3214 | } else if (GENET_IS_V1(priv)) { | |
3215 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; | |
3216 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3217 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
3218 | priv->version = GENET_V1; | |
3219 | } | |
3220 | ||
3221 | /* enum genet_version starts at 1 */ | |
3222 | priv->hw_params = &bcmgenet_hw_params[priv->version]; | |
3223 | params = priv->hw_params; | |
3224 | ||
3225 | /* Read GENET HW version */ | |
3226 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); | |
3227 | major = (reg >> 24 & 0x0f); | |
3228 | if (major == 5) | |
3229 | major = 4; | |
3230 | else if (major == 0) | |
3231 | major = 1; | |
3232 | if (major != priv->version) { | |
3233 | dev_err(&priv->pdev->dev, | |
3234 | "GENET version mismatch, got: %d, configured for: %d\n", | |
3235 | major, priv->version); | |
3236 | } | |
3237 | ||
3238 | /* Print the GENET core version */ | |
3239 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | |
c91b7f66 | 3240 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
1c1008c7 | 3241 | |
487320c5 FF |
3242 | /* Store the integrated PHY revision for the MDIO probing function |
3243 | * to pass this information to the PHY driver. The PHY driver expects | |
3244 | * to find the PHY major revision in bits 15:8 while the GENET register | |
3245 | * stores that information in bits 7:0, account for that. | |
b04a2f5b FF |
3246 | * |
3247 | * On newer chips, starting with PHY revision G0, a new scheme is | |
3248 | * deployed similar to the Starfighter 2 switch with GPHY major | |
3249 | * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 | |
3250 | * is reserved as well as special value 0x01ff, we have a small | |
3251 | * heuristic to check for the new GPHY revision and re-arrange things | |
3252 | * so the GPHY driver is happy. | |
487320c5 | 3253 | */ |
b04a2f5b FF |
3254 | gphy_rev = reg & 0xffff; |
3255 | ||
eca4bad7 DB |
3256 | /* This is reserved so should require special treatment */ |
3257 | if (gphy_rev == 0 || gphy_rev == 0x01ff) { | |
3258 | pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); | |
3259 | return; | |
3260 | } | |
3261 | ||
b04a2f5b FF |
3262 | /* This is the good old scheme, just GPHY major, no minor nor patch */ |
3263 | if ((gphy_rev & 0xf0) != 0) | |
3264 | priv->gphy_rev = gphy_rev << 8; | |
3265 | ||
3266 | /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ | |
3267 | else if ((gphy_rev & 0xff00) != 0) | |
3268 | priv->gphy_rev = gphy_rev; | |
3269 | ||
1c1008c7 FF |
3270 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
3271 | if (!(params->flags & GENET_HAS_40BITS)) | |
3272 | pr_warn("GENET does not support 40-bits PA\n"); | |
3273 | #endif | |
3274 | ||
3275 | pr_debug("Configuration for version: %d\n" | |
3feafa02 | 3276 | "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" |
1c1008c7 FF |
3277 | "BP << en: %2d, BP msk: 0x%05x\n" |
3278 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | |
3279 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | |
3280 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | |
3281 | "Words/BD: %d\n", | |
3282 | priv->version, | |
51a966a7 | 3283 | params->tx_queues, params->tx_bds_per_q, |
3feafa02 | 3284 | params->rx_queues, params->rx_bds_per_q, |
1c1008c7 FF |
3285 | params->bp_in_en_shift, params->bp_in_mask, |
3286 | params->hfb_filter_cnt, params->qtag_mask, | |
3287 | params->tbuf_offset, params->hfb_offset, | |
3288 | params->hfb_reg_offset, | |
3289 | params->rdma_offset, params->tdma_offset, | |
3290 | params->words_per_bd); | |
3291 | } | |
3292 | ||
3293 | static const struct of_device_id bcmgenet_match[] = { | |
3294 | { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, | |
3295 | { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, | |
3296 | { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, | |
3297 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | |
3298 | { }, | |
3299 | }; | |
e8048e55 | 3300 | MODULE_DEVICE_TABLE(of, bcmgenet_match); |
1c1008c7 FF |
3301 | |
3302 | static int bcmgenet_probe(struct platform_device *pdev) | |
3303 | { | |
b0ba512e | 3304 | struct bcmgenet_platform_data *pd = pdev->dev.platform_data; |
1c1008c7 | 3305 | struct device_node *dn = pdev->dev.of_node; |
b0ba512e | 3306 | const struct of_device_id *of_id = NULL; |
1c1008c7 FF |
3307 | struct bcmgenet_priv *priv; |
3308 | struct net_device *dev; | |
3309 | const void *macaddr; | |
3310 | struct resource *r; | |
3311 | int err = -EIO; | |
6be371b0 | 3312 | const char *phy_mode_str; |
1c1008c7 | 3313 | |
3feafeed PG |
3314 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ |
3315 | dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, | |
3316 | GENET_MAX_MQ_CNT + 1); | |
1c1008c7 FF |
3317 | if (!dev) { |
3318 | dev_err(&pdev->dev, "can't allocate net device\n"); | |
3319 | return -ENOMEM; | |
3320 | } | |
3321 | ||
b0ba512e PG |
3322 | if (dn) { |
3323 | of_id = of_match_node(bcmgenet_match, dn); | |
3324 | if (!of_id) | |
3325 | return -EINVAL; | |
3326 | } | |
1c1008c7 FF |
3327 | |
3328 | priv = netdev_priv(dev); | |
3329 | priv->irq0 = platform_get_irq(pdev, 0); | |
3330 | priv->irq1 = platform_get_irq(pdev, 1); | |
8562056f | 3331 | priv->wol_irq = platform_get_irq(pdev, 2); |
1c1008c7 FF |
3332 | if (!priv->irq0 || !priv->irq1) { |
3333 | dev_err(&pdev->dev, "can't find IRQs\n"); | |
3334 | err = -EINVAL; | |
3335 | goto err; | |
3336 | } | |
3337 | ||
b0ba512e PG |
3338 | if (dn) { |
3339 | macaddr = of_get_mac_address(dn); | |
3340 | if (!macaddr) { | |
3341 | dev_err(&pdev->dev, "can't find MAC address\n"); | |
3342 | err = -EINVAL; | |
3343 | goto err; | |
3344 | } | |
3345 | } else { | |
3346 | macaddr = pd->mac_address; | |
1c1008c7 FF |
3347 | } |
3348 | ||
3349 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
5343a10d FE |
3350 | priv->base = devm_ioremap_resource(&pdev->dev, r); |
3351 | if (IS_ERR(priv->base)) { | |
3352 | err = PTR_ERR(priv->base); | |
1c1008c7 FF |
3353 | goto err; |
3354 | } | |
3355 | ||
07c52d6a DB |
3356 | spin_lock_init(&priv->lock); |
3357 | ||
1c1008c7 FF |
3358 | SET_NETDEV_DEV(dev, &pdev->dev); |
3359 | dev_set_drvdata(&pdev->dev, dev); | |
3360 | ether_addr_copy(dev->dev_addr, macaddr); | |
3361 | dev->watchdog_timeo = 2 * HZ; | |
7ad24ea4 | 3362 | dev->ethtool_ops = &bcmgenet_ethtool_ops; |
1c1008c7 | 3363 | dev->netdev_ops = &bcmgenet_netdev_ops; |
1c1008c7 FF |
3364 | |
3365 | priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); | |
3366 | ||
3367 | /* Set hardware features */ | |
3368 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | |
3369 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | |
3370 | ||
8562056f FF |
3371 | /* Request the WOL interrupt and advertise suspend if available */ |
3372 | priv->wol_irq_disabled = true; | |
3373 | err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, | |
3374 | dev->name, priv); | |
3375 | if (!err) | |
3376 | device_set_wakeup_capable(&pdev->dev, 1); | |
3377 | ||
1c1008c7 FF |
3378 | /* Set the needed headroom to account for any possible |
3379 | * features enabling/disabling at runtime | |
3380 | */ | |
3381 | dev->needed_headroom += 64; | |
3382 | ||
3383 | netdev_boot_setup_check(dev); | |
3384 | ||
3385 | priv->dev = dev; | |
3386 | priv->pdev = pdev; | |
b0ba512e PG |
3387 | if (of_id) |
3388 | priv->version = (enum bcmgenet_version)of_id->data; | |
3389 | else | |
3390 | priv->version = pd->genet_version; | |
1c1008c7 | 3391 | |
e4a60a93 | 3392 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); |
7d5d3075 | 3393 | if (IS_ERR(priv->clk)) { |
e4a60a93 | 3394 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); |
7d5d3075 FF |
3395 | priv->clk = NULL; |
3396 | } | |
e4a60a93 | 3397 | |
7d5d3075 | 3398 | clk_prepare_enable(priv->clk); |
e4a60a93 | 3399 | |
1c1008c7 FF |
3400 | bcmgenet_set_hw_params(priv); |
3401 | ||
1c1008c7 FF |
3402 | /* Mii wait queue */ |
3403 | init_waitqueue_head(&priv->wq); | |
3404 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ | |
3405 | priv->rx_buf_len = RX_BUF_LENGTH; | |
3406 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | |
3407 | ||
1c1008c7 | 3408 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); |
7d5d3075 | 3409 | if (IS_ERR(priv->clk_wol)) { |
1c1008c7 | 3410 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); |
7d5d3075 FF |
3411 | priv->clk_wol = NULL; |
3412 | } | |
1c1008c7 | 3413 | |
6ef398ea FF |
3414 | priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); |
3415 | if (IS_ERR(priv->clk_eee)) { | |
3416 | dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); | |
3417 | priv->clk_eee = NULL; | |
3418 | } | |
3419 | ||
6be371b0 DB |
3420 | /* If this is an internal GPHY, power it on now, before UniMAC is |
3421 | * brought out of reset as absolutely no UniMAC activity is allowed | |
3422 | */ | |
3423 | if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) && | |
3424 | !strcasecmp(phy_mode_str, "internal")) | |
3425 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); | |
3426 | ||
1c1008c7 FF |
3427 | err = reset_umac(priv); |
3428 | if (err) | |
3429 | goto err_clk_disable; | |
3430 | ||
3431 | err = bcmgenet_mii_init(dev); | |
3432 | if (err) | |
3433 | goto err_clk_disable; | |
3434 | ||
3435 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues | |
3436 | * just the ring 16 descriptor based TX | |
3437 | */ | |
3438 | netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); | |
3439 | netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); | |
3440 | ||
219575eb FF |
3441 | /* libphy will determine the link state */ |
3442 | netif_carrier_off(dev); | |
3443 | ||
1c1008c7 | 3444 | /* Turn off the main clock, WOL clock is handled separately */ |
7d5d3075 | 3445 | clk_disable_unprepare(priv->clk); |
1c1008c7 | 3446 | |
0f50ce96 FF |
3447 | err = register_netdev(dev); |
3448 | if (err) | |
3449 | goto err; | |
3450 | ||
1c1008c7 FF |
3451 | return err; |
3452 | ||
3453 | err_clk_disable: | |
7d5d3075 | 3454 | clk_disable_unprepare(priv->clk); |
1c1008c7 FF |
3455 | err: |
3456 | free_netdev(dev); | |
3457 | return err; | |
3458 | } | |
3459 | ||
3460 | static int bcmgenet_remove(struct platform_device *pdev) | |
3461 | { | |
3462 | struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); | |
3463 | ||
3464 | dev_set_drvdata(&pdev->dev, NULL); | |
3465 | unregister_netdev(priv->dev); | |
3466 | bcmgenet_mii_exit(priv->dev); | |
3467 | free_netdev(priv->dev); | |
3468 | ||
3469 | return 0; | |
3470 | } | |
3471 | ||
b6e978e5 FF |
3472 | #ifdef CONFIG_PM_SLEEP |
3473 | static int bcmgenet_suspend(struct device *d) | |
3474 | { | |
3475 | struct net_device *dev = dev_get_drvdata(d); | |
3476 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3477 | int ret; | |
3478 | ||
3479 | if (!netif_running(dev)) | |
3480 | return 0; | |
3481 | ||
3482 | bcmgenet_netif_stop(dev); | |
3483 | ||
5371bbf4 FF |
3484 | if (!device_may_wakeup(d)) |
3485 | phy_suspend(priv->phydev); | |
cc013fb4 | 3486 | |
b6e978e5 FF |
3487 | netif_device_detach(dev); |
3488 | ||
3489 | /* Disable MAC receive */ | |
3490 | umac_enable_set(priv, CMD_RX_EN, false); | |
3491 | ||
3492 | ret = bcmgenet_dma_teardown(priv); | |
3493 | if (ret) | |
3494 | return ret; | |
3495 | ||
3496 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
3497 | umac_enable_set(priv, CMD_TX_EN, false); | |
3498 | ||
3499 | /* tx reclaim */ | |
3500 | bcmgenet_tx_reclaim_all(dev); | |
3501 | bcmgenet_fini_dma(priv); | |
3502 | ||
8c90db72 FF |
3503 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
3504 | if (device_may_wakeup(d) && priv->wolopts) { | |
ca8cf341 | 3505 | ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); |
8c90db72 | 3506 | clk_prepare_enable(priv->clk_wol); |
c624f891 | 3507 | } else if (priv->internal_phy) { |
a6f31f5e | 3508 | ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
8c90db72 FF |
3509 | } |
3510 | ||
b6e978e5 FF |
3511 | /* Turn off the clocks */ |
3512 | clk_disable_unprepare(priv->clk); | |
3513 | ||
ca8cf341 | 3514 | return ret; |
b6e978e5 FF |
3515 | } |
3516 | ||
3517 | static int bcmgenet_resume(struct device *d) | |
3518 | { | |
3519 | struct net_device *dev = dev_get_drvdata(d); | |
3520 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3521 | unsigned long dma_ctrl; | |
3522 | int ret; | |
3523 | u32 reg; | |
3524 | ||
3525 | if (!netif_running(dev)) | |
3526 | return 0; | |
3527 | ||
3528 | /* Turn on the clock */ | |
3529 | ret = clk_prepare_enable(priv->clk); | |
3530 | if (ret) | |
3531 | return ret; | |
3532 | ||
a6f31f5e FF |
3533 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
3534 | * brought out of reset as absolutely no UniMAC activity is allowed | |
3535 | */ | |
c624f891 | 3536 | if (priv->internal_phy) |
a6f31f5e FF |
3537 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); |
3538 | ||
b6e978e5 FF |
3539 | bcmgenet_umac_reset(priv); |
3540 | ||
3541 | ret = init_umac(priv); | |
3542 | if (ret) | |
3543 | goto out_clk_disable; | |
3544 | ||
0a29b3da TK |
3545 | /* From WOL-enabled suspend, switch to regular clock */ |
3546 | if (priv->wolopts) | |
3547 | clk_disable_unprepare(priv->clk_wol); | |
3548 | ||
0299b6ac | 3549 | phy_init_hw(priv->phydev); |
0a29b3da | 3550 | /* Speed settings must be restored */ |
28b45910 | 3551 | bcmgenet_mii_config(priv->dev); |
8c90db72 | 3552 | |
b6e978e5 FF |
3553 | /* disable ethernet MAC while updating its registers */ |
3554 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | |
3555 | ||
3556 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | |
3557 | ||
c624f891 | 3558 | if (priv->internal_phy) { |
b6e978e5 FF |
3559 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
3560 | reg |= EXT_ENERGY_DET_MASK; | |
3561 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
3562 | } | |
3563 | ||
98bb7399 FF |
3564 | if (priv->wolopts) |
3565 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
3566 | ||
b6e978e5 FF |
3567 | /* Disable RX/TX DMA and flush TX queues */ |
3568 | dma_ctrl = bcmgenet_dma_disable(priv); | |
3569 | ||
3570 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
3571 | ret = bcmgenet_init_dma(priv); | |
3572 | if (ret) { | |
3573 | netdev_err(dev, "failed to initialize DMA\n"); | |
3574 | goto out_clk_disable; | |
3575 | } | |
3576 | ||
3577 | /* Always enable ring 16 - descriptor ring */ | |
3578 | bcmgenet_enable_dma(priv, dma_ctrl); | |
3579 | ||
3580 | netif_device_attach(dev); | |
3581 | ||
5371bbf4 FF |
3582 | if (!device_may_wakeup(d)) |
3583 | phy_resume(priv->phydev); | |
cc013fb4 | 3584 | |
6ef398ea FF |
3585 | if (priv->eee.eee_enabled) |
3586 | bcmgenet_eee_enable_set(dev, true); | |
3587 | ||
b6e978e5 FF |
3588 | bcmgenet_netif_start(dev); |
3589 | ||
3590 | return 0; | |
3591 | ||
3592 | out_clk_disable: | |
7627409c DB |
3593 | if (priv->internal_phy) |
3594 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); | |
b6e978e5 FF |
3595 | clk_disable_unprepare(priv->clk); |
3596 | return ret; | |
3597 | } | |
3598 | #endif /* CONFIG_PM_SLEEP */ | |
3599 | ||
3600 | static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); | |
3601 | ||
1c1008c7 FF |
3602 | static struct platform_driver bcmgenet_driver = { |
3603 | .probe = bcmgenet_probe, | |
3604 | .remove = bcmgenet_remove, | |
3605 | .driver = { | |
3606 | .name = "bcmgenet", | |
1c1008c7 | 3607 | .of_match_table = bcmgenet_match, |
b6e978e5 | 3608 | .pm = &bcmgenet_pm_ops, |
1c1008c7 FF |
3609 | }, |
3610 | }; | |
3611 | module_platform_driver(bcmgenet_driver); | |
3612 | ||
3613 | MODULE_AUTHOR("Broadcom Corporation"); | |
3614 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); | |
3615 | MODULE_ALIAS("platform:bcmgenet"); | |
3616 | MODULE_LICENSE("GPL"); |