]>
Commit | Line | Data |
---|---|---|
1c1008c7 FF |
1 | /* |
2 | * Broadcom GENET (Gigabit Ethernet) controller driver | |
3 | * | |
4 | * Copyright (c) 2014 Broadcom Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
1c1008c7 FF |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) "bcmgenet: " fmt | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/fcntl.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/if_ether.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/pm.h> | |
27 | #include <linux/clk.h> | |
1c1008c7 FF |
28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/of_net.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <net/arp.h> | |
34 | ||
35 | #include <linux/mii.h> | |
36 | #include <linux/ethtool.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/in.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/phy.h> | |
b0ba512e | 45 | #include <linux/platform_data/bcmgenet.h> |
1c1008c7 FF |
46 | |
47 | #include <asm/unaligned.h> | |
48 | ||
49 | #include "bcmgenet.h" | |
50 | ||
51 | /* Maximum number of hardware queues, downsized if needed */ | |
52 | #define GENET_MAX_MQ_CNT 4 | |
53 | ||
54 | /* Default highest priority queue for multi queue support */ | |
55 | #define GENET_Q0_PRIORITY 0 | |
56 | ||
3feafa02 PG |
57 | #define GENET_Q16_RX_BD_CNT \ |
58 | (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) | |
51a966a7 PG |
59 | #define GENET_Q16_TX_BD_CNT \ |
60 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) | |
1c1008c7 FF |
61 | |
62 | #define RX_BUF_LENGTH 2048 | |
63 | #define SKB_ALIGNMENT 32 | |
64 | ||
65 | /* Tx/Rx DMA register offset, skip 256 descriptors */ | |
66 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) | |
67 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) | |
68 | ||
69 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ | |
70 | TOTAL_DESC * DMA_DESC_SIZE) | |
71 | ||
72 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ | |
73 | TOTAL_DESC * DMA_DESC_SIZE) | |
74 | ||
75 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 76 | void __iomem *d, u32 value) |
1c1008c7 FF |
77 | { |
78 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | |
79 | } | |
80 | ||
81 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 82 | void __iomem *d) |
1c1008c7 FF |
83 | { |
84 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | |
85 | } | |
86 | ||
87 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |
88 | void __iomem *d, | |
89 | dma_addr_t addr) | |
90 | { | |
91 | __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); | |
92 | ||
93 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
94 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 95 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
96 | */ |
97 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
98 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
99 | __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); | |
100 | #endif | |
101 | } | |
102 | ||
103 | /* Combined address + length/status setter */ | |
104 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | |
c91b7f66 | 105 | void __iomem *d, dma_addr_t addr, u32 val) |
1c1008c7 | 106 | { |
1c1008c7 | 107 | dmadesc_set_addr(priv, d, addr); |
7ee40625 | 108 | dmadesc_set_length_status(priv, d, val); |
1c1008c7 FF |
109 | } |
110 | ||
111 | static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | |
112 | void __iomem *d) | |
113 | { | |
114 | dma_addr_t addr; | |
115 | ||
116 | addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); | |
117 | ||
118 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
119 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 120 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
121 | */ |
122 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
123 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
124 | addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; | |
125 | #endif | |
126 | return addr; | |
127 | } | |
128 | ||
129 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" | |
130 | ||
131 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
132 | NETIF_MSG_LINK) | |
133 | ||
134 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) | |
135 | { | |
136 | if (GENET_IS_V1(priv)) | |
137 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); | |
138 | else | |
139 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); | |
140 | } | |
141 | ||
142 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
143 | { | |
144 | if (GENET_IS_V1(priv)) | |
145 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); | |
146 | else | |
147 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); | |
148 | } | |
149 | ||
150 | /* These macros are defined to deal with register map change | |
151 | * between GENET1.1 and GENET2. Only those currently being used | |
152 | * by driver are defined. | |
153 | */ | |
154 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) | |
155 | { | |
156 | if (GENET_IS_V1(priv)) | |
157 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); | |
158 | else | |
159 | return __raw_readl(priv->base + | |
160 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
161 | } | |
162 | ||
163 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
164 | { | |
165 | if (GENET_IS_V1(priv)) | |
166 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); | |
167 | else | |
168 | __raw_writel(val, priv->base + | |
169 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
170 | } | |
171 | ||
172 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) | |
173 | { | |
174 | if (GENET_IS_V1(priv)) | |
175 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); | |
176 | else | |
177 | return __raw_readl(priv->base + | |
178 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
179 | } | |
180 | ||
181 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) | |
182 | { | |
183 | if (GENET_IS_V1(priv)) | |
184 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); | |
185 | else | |
186 | __raw_writel(val, priv->base + | |
187 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
188 | } | |
189 | ||
190 | /* RX/TX DMA register accessors */ | |
191 | enum dma_reg { | |
192 | DMA_RING_CFG = 0, | |
193 | DMA_CTRL, | |
194 | DMA_STATUS, | |
195 | DMA_SCB_BURST_SIZE, | |
196 | DMA_ARB_CTRL, | |
37742166 PG |
197 | DMA_PRIORITY_0, |
198 | DMA_PRIORITY_1, | |
199 | DMA_PRIORITY_2, | |
0034de41 PG |
200 | DMA_INDEX2RING_0, |
201 | DMA_INDEX2RING_1, | |
202 | DMA_INDEX2RING_2, | |
203 | DMA_INDEX2RING_3, | |
204 | DMA_INDEX2RING_4, | |
205 | DMA_INDEX2RING_5, | |
206 | DMA_INDEX2RING_6, | |
207 | DMA_INDEX2RING_7, | |
4a29645b FF |
208 | DMA_RING0_TIMEOUT, |
209 | DMA_RING1_TIMEOUT, | |
210 | DMA_RING2_TIMEOUT, | |
211 | DMA_RING3_TIMEOUT, | |
212 | DMA_RING4_TIMEOUT, | |
213 | DMA_RING5_TIMEOUT, | |
214 | DMA_RING6_TIMEOUT, | |
215 | DMA_RING7_TIMEOUT, | |
216 | DMA_RING8_TIMEOUT, | |
217 | DMA_RING9_TIMEOUT, | |
218 | DMA_RING10_TIMEOUT, | |
219 | DMA_RING11_TIMEOUT, | |
220 | DMA_RING12_TIMEOUT, | |
221 | DMA_RING13_TIMEOUT, | |
222 | DMA_RING14_TIMEOUT, | |
223 | DMA_RING15_TIMEOUT, | |
224 | DMA_RING16_TIMEOUT, | |
1c1008c7 FF |
225 | }; |
226 | ||
227 | static const u8 bcmgenet_dma_regs_v3plus[] = { | |
228 | [DMA_RING_CFG] = 0x00, | |
229 | [DMA_CTRL] = 0x04, | |
230 | [DMA_STATUS] = 0x08, | |
231 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
232 | [DMA_ARB_CTRL] = 0x2C, | |
37742166 PG |
233 | [DMA_PRIORITY_0] = 0x30, |
234 | [DMA_PRIORITY_1] = 0x34, | |
235 | [DMA_PRIORITY_2] = 0x38, | |
4a29645b FF |
236 | [DMA_RING0_TIMEOUT] = 0x2C, |
237 | [DMA_RING1_TIMEOUT] = 0x30, | |
238 | [DMA_RING2_TIMEOUT] = 0x34, | |
239 | [DMA_RING3_TIMEOUT] = 0x38, | |
240 | [DMA_RING4_TIMEOUT] = 0x3c, | |
241 | [DMA_RING5_TIMEOUT] = 0x40, | |
242 | [DMA_RING6_TIMEOUT] = 0x44, | |
243 | [DMA_RING7_TIMEOUT] = 0x48, | |
244 | [DMA_RING8_TIMEOUT] = 0x4c, | |
245 | [DMA_RING9_TIMEOUT] = 0x50, | |
246 | [DMA_RING10_TIMEOUT] = 0x54, | |
247 | [DMA_RING11_TIMEOUT] = 0x58, | |
248 | [DMA_RING12_TIMEOUT] = 0x5c, | |
249 | [DMA_RING13_TIMEOUT] = 0x60, | |
250 | [DMA_RING14_TIMEOUT] = 0x64, | |
251 | [DMA_RING15_TIMEOUT] = 0x68, | |
252 | [DMA_RING16_TIMEOUT] = 0x6C, | |
0034de41 PG |
253 | [DMA_INDEX2RING_0] = 0x70, |
254 | [DMA_INDEX2RING_1] = 0x74, | |
255 | [DMA_INDEX2RING_2] = 0x78, | |
256 | [DMA_INDEX2RING_3] = 0x7C, | |
257 | [DMA_INDEX2RING_4] = 0x80, | |
258 | [DMA_INDEX2RING_5] = 0x84, | |
259 | [DMA_INDEX2RING_6] = 0x88, | |
260 | [DMA_INDEX2RING_7] = 0x8C, | |
1c1008c7 FF |
261 | }; |
262 | ||
263 | static const u8 bcmgenet_dma_regs_v2[] = { | |
264 | [DMA_RING_CFG] = 0x00, | |
265 | [DMA_CTRL] = 0x04, | |
266 | [DMA_STATUS] = 0x08, | |
267 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
268 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
269 | [DMA_PRIORITY_0] = 0x34, |
270 | [DMA_PRIORITY_1] = 0x38, | |
271 | [DMA_PRIORITY_2] = 0x3C, | |
4a29645b FF |
272 | [DMA_RING0_TIMEOUT] = 0x2C, |
273 | [DMA_RING1_TIMEOUT] = 0x30, | |
274 | [DMA_RING2_TIMEOUT] = 0x34, | |
275 | [DMA_RING3_TIMEOUT] = 0x38, | |
276 | [DMA_RING4_TIMEOUT] = 0x3c, | |
277 | [DMA_RING5_TIMEOUT] = 0x40, | |
278 | [DMA_RING6_TIMEOUT] = 0x44, | |
279 | [DMA_RING7_TIMEOUT] = 0x48, | |
280 | [DMA_RING8_TIMEOUT] = 0x4c, | |
281 | [DMA_RING9_TIMEOUT] = 0x50, | |
282 | [DMA_RING10_TIMEOUT] = 0x54, | |
283 | [DMA_RING11_TIMEOUT] = 0x58, | |
284 | [DMA_RING12_TIMEOUT] = 0x5c, | |
285 | [DMA_RING13_TIMEOUT] = 0x60, | |
286 | [DMA_RING14_TIMEOUT] = 0x64, | |
287 | [DMA_RING15_TIMEOUT] = 0x68, | |
288 | [DMA_RING16_TIMEOUT] = 0x6C, | |
1c1008c7 FF |
289 | }; |
290 | ||
291 | static const u8 bcmgenet_dma_regs_v1[] = { | |
292 | [DMA_CTRL] = 0x00, | |
293 | [DMA_STATUS] = 0x04, | |
294 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
295 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
296 | [DMA_PRIORITY_0] = 0x34, |
297 | [DMA_PRIORITY_1] = 0x38, | |
298 | [DMA_PRIORITY_2] = 0x3C, | |
4a29645b FF |
299 | [DMA_RING0_TIMEOUT] = 0x2C, |
300 | [DMA_RING1_TIMEOUT] = 0x30, | |
301 | [DMA_RING2_TIMEOUT] = 0x34, | |
302 | [DMA_RING3_TIMEOUT] = 0x38, | |
303 | [DMA_RING4_TIMEOUT] = 0x3c, | |
304 | [DMA_RING5_TIMEOUT] = 0x40, | |
305 | [DMA_RING6_TIMEOUT] = 0x44, | |
306 | [DMA_RING7_TIMEOUT] = 0x48, | |
307 | [DMA_RING8_TIMEOUT] = 0x4c, | |
308 | [DMA_RING9_TIMEOUT] = 0x50, | |
309 | [DMA_RING10_TIMEOUT] = 0x54, | |
310 | [DMA_RING11_TIMEOUT] = 0x58, | |
311 | [DMA_RING12_TIMEOUT] = 0x5c, | |
312 | [DMA_RING13_TIMEOUT] = 0x60, | |
313 | [DMA_RING14_TIMEOUT] = 0x64, | |
314 | [DMA_RING15_TIMEOUT] = 0x68, | |
315 | [DMA_RING16_TIMEOUT] = 0x6C, | |
1c1008c7 FF |
316 | }; |
317 | ||
318 | /* Set at runtime once bcmgenet version is known */ | |
319 | static const u8 *bcmgenet_dma_regs; | |
320 | ||
321 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | |
322 | { | |
323 | return netdev_priv(dev_get_drvdata(dev)); | |
324 | } | |
325 | ||
326 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 327 | enum dma_reg r) |
1c1008c7 FF |
328 | { |
329 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
330 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
331 | } | |
332 | ||
333 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | |
334 | u32 val, enum dma_reg r) | |
335 | { | |
336 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
337 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
338 | } | |
339 | ||
340 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 341 | enum dma_reg r) |
1c1008c7 FF |
342 | { |
343 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
344 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
345 | } | |
346 | ||
347 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, | |
348 | u32 val, enum dma_reg r) | |
349 | { | |
350 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
351 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
352 | } | |
353 | ||
354 | /* RDMA/TDMA ring registers and accessors | |
355 | * we merge the common fields and just prefix with T/D the registers | |
356 | * having different meaning depending on the direction | |
357 | */ | |
358 | enum dma_ring_reg { | |
359 | TDMA_READ_PTR = 0, | |
360 | RDMA_WRITE_PTR = TDMA_READ_PTR, | |
361 | TDMA_READ_PTR_HI, | |
362 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, | |
363 | TDMA_CONS_INDEX, | |
364 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, | |
365 | TDMA_PROD_INDEX, | |
366 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, | |
367 | DMA_RING_BUF_SIZE, | |
368 | DMA_START_ADDR, | |
369 | DMA_START_ADDR_HI, | |
370 | DMA_END_ADDR, | |
371 | DMA_END_ADDR_HI, | |
372 | DMA_MBUF_DONE_THRESH, | |
373 | TDMA_FLOW_PERIOD, | |
374 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, | |
375 | TDMA_WRITE_PTR, | |
376 | RDMA_READ_PTR = TDMA_WRITE_PTR, | |
377 | TDMA_WRITE_PTR_HI, | |
378 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI | |
379 | }; | |
380 | ||
381 | /* GENET v4 supports 40-bits pointer addressing | |
382 | * for obvious reasons the LO and HI word parts | |
383 | * are contiguous, but this offsets the other | |
384 | * registers. | |
385 | */ | |
386 | static const u8 genet_dma_ring_regs_v4[] = { | |
387 | [TDMA_READ_PTR] = 0x00, | |
388 | [TDMA_READ_PTR_HI] = 0x04, | |
389 | [TDMA_CONS_INDEX] = 0x08, | |
390 | [TDMA_PROD_INDEX] = 0x0C, | |
391 | [DMA_RING_BUF_SIZE] = 0x10, | |
392 | [DMA_START_ADDR] = 0x14, | |
393 | [DMA_START_ADDR_HI] = 0x18, | |
394 | [DMA_END_ADDR] = 0x1C, | |
395 | [DMA_END_ADDR_HI] = 0x20, | |
396 | [DMA_MBUF_DONE_THRESH] = 0x24, | |
397 | [TDMA_FLOW_PERIOD] = 0x28, | |
398 | [TDMA_WRITE_PTR] = 0x2C, | |
399 | [TDMA_WRITE_PTR_HI] = 0x30, | |
400 | }; | |
401 | ||
402 | static const u8 genet_dma_ring_regs_v123[] = { | |
403 | [TDMA_READ_PTR] = 0x00, | |
404 | [TDMA_CONS_INDEX] = 0x04, | |
405 | [TDMA_PROD_INDEX] = 0x08, | |
406 | [DMA_RING_BUF_SIZE] = 0x0C, | |
407 | [DMA_START_ADDR] = 0x10, | |
408 | [DMA_END_ADDR] = 0x14, | |
409 | [DMA_MBUF_DONE_THRESH] = 0x18, | |
410 | [TDMA_FLOW_PERIOD] = 0x1C, | |
411 | [TDMA_WRITE_PTR] = 0x20, | |
412 | }; | |
413 | ||
414 | /* Set at runtime once GENET version is known */ | |
415 | static const u8 *genet_dma_ring_regs; | |
416 | ||
417 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
418 | unsigned int ring, |
419 | enum dma_ring_reg r) | |
1c1008c7 FF |
420 | { |
421 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
422 | (DMA_RING_SIZE * ring) + | |
423 | genet_dma_ring_regs[r]); | |
424 | } | |
425 | ||
426 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
427 | unsigned int ring, u32 val, |
428 | enum dma_ring_reg r) | |
1c1008c7 FF |
429 | { |
430 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
431 | (DMA_RING_SIZE * ring) + | |
432 | genet_dma_ring_regs[r]); | |
433 | } | |
434 | ||
435 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
436 | unsigned int ring, |
437 | enum dma_ring_reg r) | |
1c1008c7 FF |
438 | { |
439 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
440 | (DMA_RING_SIZE * ring) + | |
441 | genet_dma_ring_regs[r]); | |
442 | } | |
443 | ||
444 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
445 | unsigned int ring, u32 val, |
446 | enum dma_ring_reg r) | |
1c1008c7 FF |
447 | { |
448 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
449 | (DMA_RING_SIZE * ring) + | |
450 | genet_dma_ring_regs[r]); | |
451 | } | |
452 | ||
453 | static int bcmgenet_get_settings(struct net_device *dev, | |
c91b7f66 | 454 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
455 | { |
456 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
457 | ||
458 | if (!netif_running(dev)) | |
459 | return -EINVAL; | |
460 | ||
461 | if (!priv->phydev) | |
462 | return -ENODEV; | |
463 | ||
464 | return phy_ethtool_gset(priv->phydev, cmd); | |
465 | } | |
466 | ||
467 | static int bcmgenet_set_settings(struct net_device *dev, | |
c91b7f66 | 468 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
469 | { |
470 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
471 | ||
472 | if (!netif_running(dev)) | |
473 | return -EINVAL; | |
474 | ||
475 | if (!priv->phydev) | |
476 | return -ENODEV; | |
477 | ||
478 | return phy_ethtool_sset(priv->phydev, cmd); | |
479 | } | |
480 | ||
481 | static int bcmgenet_set_rx_csum(struct net_device *dev, | |
482 | netdev_features_t wanted) | |
483 | { | |
484 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
485 | u32 rbuf_chk_ctrl; | |
486 | bool rx_csum_en; | |
487 | ||
488 | rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | |
489 | ||
490 | rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); | |
491 | ||
492 | /* enable rx checksumming */ | |
493 | if (rx_csum_en) | |
494 | rbuf_chk_ctrl |= RBUF_RXCHK_EN; | |
495 | else | |
496 | rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; | |
497 | priv->desc_rxchk_en = rx_csum_en; | |
ebe5e3c6 FF |
498 | |
499 | /* If UniMAC forwards CRC, we need to skip over it to get | |
500 | * a valid CHK bit to be set in the per-packet status word | |
501 | */ | |
502 | if (rx_csum_en && priv->crc_fwd_en) | |
503 | rbuf_chk_ctrl |= RBUF_SKIP_FCS; | |
504 | else | |
505 | rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; | |
506 | ||
1c1008c7 FF |
507 | bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); |
508 | ||
509 | return 0; | |
510 | } | |
511 | ||
512 | static int bcmgenet_set_tx_csum(struct net_device *dev, | |
513 | netdev_features_t wanted) | |
514 | { | |
515 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
516 | bool desc_64b_en; | |
517 | u32 tbuf_ctrl, rbuf_ctrl; | |
518 | ||
519 | tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); | |
520 | rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
521 | ||
522 | desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
523 | ||
524 | /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ | |
525 | if (desc_64b_en) { | |
526 | tbuf_ctrl |= RBUF_64B_EN; | |
527 | rbuf_ctrl |= RBUF_64B_EN; | |
528 | } else { | |
529 | tbuf_ctrl &= ~RBUF_64B_EN; | |
530 | rbuf_ctrl &= ~RBUF_64B_EN; | |
531 | } | |
532 | priv->desc_64b_en = desc_64b_en; | |
533 | ||
534 | bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); | |
535 | bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); | |
536 | ||
537 | return 0; | |
538 | } | |
539 | ||
540 | static int bcmgenet_set_features(struct net_device *dev, | |
c91b7f66 | 541 | netdev_features_t features) |
1c1008c7 FF |
542 | { |
543 | netdev_features_t changed = features ^ dev->features; | |
544 | netdev_features_t wanted = dev->wanted_features; | |
545 | int ret = 0; | |
546 | ||
547 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) | |
548 | ret = bcmgenet_set_tx_csum(dev, wanted); | |
549 | if (changed & (NETIF_F_RXCSUM)) | |
550 | ret = bcmgenet_set_rx_csum(dev, wanted); | |
551 | ||
552 | return ret; | |
553 | } | |
554 | ||
555 | static u32 bcmgenet_get_msglevel(struct net_device *dev) | |
556 | { | |
557 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
558 | ||
559 | return priv->msg_enable; | |
560 | } | |
561 | ||
562 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) | |
563 | { | |
564 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
565 | ||
566 | priv->msg_enable = level; | |
567 | } | |
568 | ||
2f913070 FF |
569 | static int bcmgenet_get_coalesce(struct net_device *dev, |
570 | struct ethtool_coalesce *ec) | |
571 | { | |
572 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
573 | ||
574 | ec->tx_max_coalesced_frames = | |
575 | bcmgenet_tdma_ring_readl(priv, DESC_INDEX, | |
576 | DMA_MBUF_DONE_THRESH); | |
4a29645b FF |
577 | ec->rx_max_coalesced_frames = |
578 | bcmgenet_rdma_ring_readl(priv, DESC_INDEX, | |
579 | DMA_MBUF_DONE_THRESH); | |
580 | ec->rx_coalesce_usecs = | |
581 | bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; | |
2f913070 FF |
582 | |
583 | return 0; | |
584 | } | |
585 | ||
586 | static int bcmgenet_set_coalesce(struct net_device *dev, | |
587 | struct ethtool_coalesce *ec) | |
588 | { | |
589 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
590 | unsigned int i; | |
4a29645b | 591 | u32 reg; |
2f913070 | 592 | |
4a29645b FF |
593 | /* Base system clock is 125Mhz, DMA timeout is this reference clock |
594 | * divided by 1024, which yields roughly 8.192us, our maximum value | |
595 | * has to fit in the DMA_TIMEOUT_MASK (16 bits) | |
596 | */ | |
2f913070 | 597 | if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || |
4a29645b FF |
598 | ec->tx_max_coalesced_frames == 0 || |
599 | ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || | |
600 | ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) | |
601 | return -EINVAL; | |
602 | ||
603 | if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) | |
2f913070 FF |
604 | return -EINVAL; |
605 | ||
606 | /* GENET TDMA hardware does not support a configurable timeout, but will | |
607 | * always generate an interrupt either after MBDONE packets have been | |
608 | * transmitted, or when the ring is emtpy. | |
609 | */ | |
610 | if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || | |
852bcafb | 611 | ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low) |
2f913070 FF |
612 | return -EOPNOTSUPP; |
613 | ||
614 | /* Program all TX queues with the same values, as there is no | |
615 | * ethtool knob to do coalescing on a per-queue basis | |
616 | */ | |
617 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
618 | bcmgenet_tdma_ring_writel(priv, i, | |
619 | ec->tx_max_coalesced_frames, | |
620 | DMA_MBUF_DONE_THRESH); | |
621 | bcmgenet_tdma_ring_writel(priv, DESC_INDEX, | |
622 | ec->tx_max_coalesced_frames, | |
623 | DMA_MBUF_DONE_THRESH); | |
624 | ||
4a29645b FF |
625 | for (i = 0; i < priv->hw_params->rx_queues; i++) { |
626 | bcmgenet_rdma_ring_writel(priv, i, | |
627 | ec->rx_max_coalesced_frames, | |
628 | DMA_MBUF_DONE_THRESH); | |
629 | ||
630 | reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); | |
631 | reg &= ~DMA_TIMEOUT_MASK; | |
632 | reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); | |
633 | bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); | |
634 | } | |
635 | ||
636 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, | |
637 | ec->rx_max_coalesced_frames, | |
638 | DMA_MBUF_DONE_THRESH); | |
639 | ||
640 | reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT); | |
641 | reg &= ~DMA_TIMEOUT_MASK; | |
642 | reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); | |
643 | bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT); | |
644 | ||
2f913070 FF |
645 | return 0; |
646 | } | |
647 | ||
1c1008c7 FF |
648 | /* standard ethtool support functions. */ |
649 | enum bcmgenet_stat_type { | |
650 | BCMGENET_STAT_NETDEV = -1, | |
651 | BCMGENET_STAT_MIB_RX, | |
652 | BCMGENET_STAT_MIB_TX, | |
653 | BCMGENET_STAT_RUNT, | |
654 | BCMGENET_STAT_MISC, | |
f62ba9c1 | 655 | BCMGENET_STAT_SOFT, |
1c1008c7 FF |
656 | }; |
657 | ||
658 | struct bcmgenet_stats { | |
659 | char stat_string[ETH_GSTRING_LEN]; | |
660 | int stat_sizeof; | |
661 | int stat_offset; | |
662 | enum bcmgenet_stat_type type; | |
663 | /* reg offset from UMAC base for misc counters */ | |
664 | u16 reg_offset; | |
665 | }; | |
666 | ||
667 | #define STAT_NETDEV(m) { \ | |
668 | .stat_string = __stringify(m), \ | |
669 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ | |
670 | .stat_offset = offsetof(struct net_device_stats, m), \ | |
671 | .type = BCMGENET_STAT_NETDEV, \ | |
672 | } | |
673 | ||
674 | #define STAT_GENET_MIB(str, m, _type) { \ | |
675 | .stat_string = str, \ | |
676 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
677 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
678 | .type = _type, \ | |
679 | } | |
680 | ||
681 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | |
682 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | |
683 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | |
f62ba9c1 | 684 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) |
1c1008c7 FF |
685 | |
686 | #define STAT_GENET_MISC(str, m, offset) { \ | |
687 | .stat_string = str, \ | |
688 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
689 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
690 | .type = BCMGENET_STAT_MISC, \ | |
691 | .reg_offset = offset, \ | |
692 | } | |
693 | ||
694 | ||
695 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then | |
696 | * between the end of TX stats and the beginning of the RX RUNT | |
697 | */ | |
698 | #define BCMGENET_STAT_OFFSET 0xc | |
699 | ||
700 | /* Hardware counters must be kept in sync because the order/offset | |
701 | * is important here (order in structure declaration = order in hardware) | |
702 | */ | |
703 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |
704 | /* general stats */ | |
705 | STAT_NETDEV(rx_packets), | |
706 | STAT_NETDEV(tx_packets), | |
707 | STAT_NETDEV(rx_bytes), | |
708 | STAT_NETDEV(tx_bytes), | |
709 | STAT_NETDEV(rx_errors), | |
710 | STAT_NETDEV(tx_errors), | |
711 | STAT_NETDEV(rx_dropped), | |
712 | STAT_NETDEV(tx_dropped), | |
713 | STAT_NETDEV(multicast), | |
714 | /* UniMAC RSV counters */ | |
715 | STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), | |
716 | STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), | |
717 | STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), | |
718 | STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), | |
719 | STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), | |
720 | STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), | |
721 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), | |
722 | STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), | |
723 | STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), | |
724 | STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), | |
725 | STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), | |
726 | STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), | |
727 | STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), | |
728 | STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), | |
729 | STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), | |
730 | STAT_GENET_MIB_RX("rx_control", mib.rx.cf), | |
731 | STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), | |
732 | STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), | |
733 | STAT_GENET_MIB_RX("rx_align", mib.rx.aln), | |
734 | STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), | |
735 | STAT_GENET_MIB_RX("rx_code", mib.rx.cde), | |
736 | STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), | |
737 | STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), | |
738 | STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), | |
739 | STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), | |
740 | STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), | |
741 | STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), | |
742 | STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), | |
743 | STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), | |
744 | /* UniMAC TSV counters */ | |
745 | STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), | |
746 | STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), | |
747 | STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), | |
748 | STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), | |
749 | STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), | |
750 | STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), | |
751 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), | |
752 | STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), | |
753 | STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), | |
754 | STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), | |
755 | STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), | |
756 | STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), | |
757 | STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), | |
758 | STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), | |
759 | STAT_GENET_MIB_TX("tx_control", mib.tx.cf), | |
760 | STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), | |
761 | STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), | |
762 | STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), | |
763 | STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), | |
764 | STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), | |
765 | STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), | |
766 | STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), | |
767 | STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), | |
768 | STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), | |
769 | STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), | |
770 | STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), | |
771 | STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), | |
772 | STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), | |
773 | STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), | |
774 | /* UniMAC RUNT counters */ | |
775 | STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), | |
776 | STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), | |
777 | STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), | |
778 | STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), | |
779 | /* Misc UniMAC counters */ | |
780 | STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, | |
781 | UMAC_RBUF_OVFL_CNT), | |
782 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | |
783 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | |
f62ba9c1 FF |
784 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
785 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), | |
786 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), | |
1c1008c7 FF |
787 | }; |
788 | ||
789 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | |
790 | ||
791 | static void bcmgenet_get_drvinfo(struct net_device *dev, | |
c91b7f66 | 792 | struct ethtool_drvinfo *info) |
1c1008c7 FF |
793 | { |
794 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | |
795 | strlcpy(info->version, "v2.0", sizeof(info->version)); | |
1c1008c7 FF |
796 | } |
797 | ||
798 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | |
799 | { | |
800 | switch (string_set) { | |
801 | case ETH_SS_STATS: | |
802 | return BCMGENET_STATS_LEN; | |
803 | default: | |
804 | return -EOPNOTSUPP; | |
805 | } | |
806 | } | |
807 | ||
c91b7f66 FF |
808 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
809 | u8 *data) | |
1c1008c7 FF |
810 | { |
811 | int i; | |
812 | ||
813 | switch (stringset) { | |
814 | case ETH_SS_STATS: | |
815 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
816 | memcpy(data + i * ETH_GSTRING_LEN, | |
c91b7f66 FF |
817 | bcmgenet_gstrings_stats[i].stat_string, |
818 | ETH_GSTRING_LEN); | |
1c1008c7 FF |
819 | } |
820 | break; | |
821 | } | |
822 | } | |
823 | ||
824 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |
825 | { | |
826 | int i, j = 0; | |
827 | ||
828 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
829 | const struct bcmgenet_stats *s; | |
830 | u8 offset = 0; | |
831 | u32 val = 0; | |
832 | char *p; | |
833 | ||
834 | s = &bcmgenet_gstrings_stats[i]; | |
835 | switch (s->type) { | |
836 | case BCMGENET_STAT_NETDEV: | |
f62ba9c1 | 837 | case BCMGENET_STAT_SOFT: |
1c1008c7 FF |
838 | continue; |
839 | case BCMGENET_STAT_MIB_RX: | |
840 | case BCMGENET_STAT_MIB_TX: | |
841 | case BCMGENET_STAT_RUNT: | |
842 | if (s->type != BCMGENET_STAT_MIB_RX) | |
843 | offset = BCMGENET_STAT_OFFSET; | |
c91b7f66 FF |
844 | val = bcmgenet_umac_readl(priv, |
845 | UMAC_MIB_START + j + offset); | |
1c1008c7 FF |
846 | break; |
847 | case BCMGENET_STAT_MISC: | |
848 | val = bcmgenet_umac_readl(priv, s->reg_offset); | |
849 | /* clear if overflowed */ | |
850 | if (val == ~0) | |
851 | bcmgenet_umac_writel(priv, 0, s->reg_offset); | |
852 | break; | |
853 | } | |
854 | ||
855 | j += s->stat_sizeof; | |
856 | p = (char *)priv + s->stat_offset; | |
857 | *(u32 *)p = val; | |
858 | } | |
859 | } | |
860 | ||
861 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | |
c91b7f66 FF |
862 | struct ethtool_stats *stats, |
863 | u64 *data) | |
1c1008c7 FF |
864 | { |
865 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
866 | int i; | |
867 | ||
868 | if (netif_running(dev)) | |
869 | bcmgenet_update_mib_counters(priv); | |
870 | ||
871 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
872 | const struct bcmgenet_stats *s; | |
873 | char *p; | |
874 | ||
875 | s = &bcmgenet_gstrings_stats[i]; | |
876 | if (s->type == BCMGENET_STAT_NETDEV) | |
877 | p = (char *)&dev->stats; | |
878 | else | |
879 | p = (char *)priv; | |
880 | p += s->stat_offset; | |
6517eb59 ED |
881 | if (sizeof(unsigned long) != sizeof(u32) && |
882 | s->stat_sizeof == sizeof(unsigned long)) | |
883 | data[i] = *(unsigned long *)p; | |
884 | else | |
885 | data[i] = *(u32 *)p; | |
1c1008c7 FF |
886 | } |
887 | } | |
888 | ||
6ef398ea FF |
889 | static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) |
890 | { | |
891 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
892 | u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; | |
893 | u32 reg; | |
894 | ||
895 | if (enable && !priv->clk_eee_enabled) { | |
896 | clk_prepare_enable(priv->clk_eee); | |
897 | priv->clk_eee_enabled = true; | |
898 | } | |
899 | ||
900 | reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); | |
901 | if (enable) | |
902 | reg |= EEE_EN; | |
903 | else | |
904 | reg &= ~EEE_EN; | |
905 | bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); | |
906 | ||
907 | /* Enable EEE and switch to a 27Mhz clock automatically */ | |
908 | reg = __raw_readl(priv->base + off); | |
909 | if (enable) | |
910 | reg |= TBUF_EEE_EN | TBUF_PM_EN; | |
911 | else | |
912 | reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); | |
913 | __raw_writel(reg, priv->base + off); | |
914 | ||
915 | /* Do the same for thing for RBUF */ | |
916 | reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); | |
917 | if (enable) | |
918 | reg |= RBUF_EEE_EN | RBUF_PM_EN; | |
919 | else | |
920 | reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); | |
921 | bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); | |
922 | ||
923 | if (!enable && priv->clk_eee_enabled) { | |
924 | clk_disable_unprepare(priv->clk_eee); | |
925 | priv->clk_eee_enabled = false; | |
926 | } | |
927 | ||
928 | priv->eee.eee_enabled = enable; | |
929 | priv->eee.eee_active = enable; | |
930 | } | |
931 | ||
932 | static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) | |
933 | { | |
934 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
935 | struct ethtool_eee *p = &priv->eee; | |
936 | ||
937 | if (GENET_IS_V1(priv)) | |
938 | return -EOPNOTSUPP; | |
939 | ||
940 | e->eee_enabled = p->eee_enabled; | |
941 | e->eee_active = p->eee_active; | |
942 | e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); | |
943 | ||
944 | return phy_ethtool_get_eee(priv->phydev, e); | |
945 | } | |
946 | ||
947 | static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) | |
948 | { | |
949 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
950 | struct ethtool_eee *p = &priv->eee; | |
951 | int ret = 0; | |
952 | ||
953 | if (GENET_IS_V1(priv)) | |
954 | return -EOPNOTSUPP; | |
955 | ||
956 | p->eee_enabled = e->eee_enabled; | |
957 | ||
958 | if (!p->eee_enabled) { | |
959 | bcmgenet_eee_enable_set(dev, false); | |
960 | } else { | |
961 | ret = phy_init_eee(priv->phydev, 0); | |
962 | if (ret) { | |
963 | netif_err(priv, hw, dev, "EEE initialization failed\n"); | |
964 | return ret; | |
965 | } | |
966 | ||
967 | bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); | |
968 | bcmgenet_eee_enable_set(dev, true); | |
969 | } | |
970 | ||
971 | return phy_ethtool_set_eee(priv->phydev, e); | |
972 | } | |
973 | ||
6b0c5406 FF |
974 | static int bcmgenet_nway_reset(struct net_device *dev) |
975 | { | |
976 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
977 | ||
978 | return genphy_restart_aneg(priv->phydev); | |
979 | } | |
980 | ||
1c1008c7 FF |
981 | /* standard ethtool support functions. */ |
982 | static struct ethtool_ops bcmgenet_ethtool_ops = { | |
983 | .get_strings = bcmgenet_get_strings, | |
984 | .get_sset_count = bcmgenet_get_sset_count, | |
985 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, | |
986 | .get_settings = bcmgenet_get_settings, | |
987 | .set_settings = bcmgenet_set_settings, | |
988 | .get_drvinfo = bcmgenet_get_drvinfo, | |
989 | .get_link = ethtool_op_get_link, | |
990 | .get_msglevel = bcmgenet_get_msglevel, | |
991 | .set_msglevel = bcmgenet_set_msglevel, | |
06ba8375 FF |
992 | .get_wol = bcmgenet_get_wol, |
993 | .set_wol = bcmgenet_set_wol, | |
6ef398ea FF |
994 | .get_eee = bcmgenet_get_eee, |
995 | .set_eee = bcmgenet_set_eee, | |
6b0c5406 | 996 | .nway_reset = bcmgenet_nway_reset, |
2f913070 FF |
997 | .get_coalesce = bcmgenet_get_coalesce, |
998 | .set_coalesce = bcmgenet_set_coalesce, | |
1c1008c7 FF |
999 | }; |
1000 | ||
1001 | /* Power down the unimac, based on mode. */ | |
ca8cf341 | 1002 | static int bcmgenet_power_down(struct bcmgenet_priv *priv, |
1c1008c7 FF |
1003 | enum bcmgenet_power_mode mode) |
1004 | { | |
ca8cf341 | 1005 | int ret = 0; |
1c1008c7 FF |
1006 | u32 reg; |
1007 | ||
1008 | switch (mode) { | |
1009 | case GENET_POWER_CABLE_SENSE: | |
80d8e96d | 1010 | phy_detach(priv->phydev); |
1c1008c7 FF |
1011 | break; |
1012 | ||
c3ae64ae | 1013 | case GENET_POWER_WOL_MAGIC: |
ca8cf341 | 1014 | ret = bcmgenet_wol_power_down_cfg(priv, mode); |
c3ae64ae FF |
1015 | break; |
1016 | ||
1c1008c7 FF |
1017 | case GENET_POWER_PASSIVE: |
1018 | /* Power down LED */ | |
1c1008c7 FF |
1019 | if (priv->hw_params->flags & GENET_HAS_EXT) { |
1020 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
1021 | reg |= (EXT_PWR_DOWN_PHY | | |
1022 | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); | |
1023 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
a642c4f7 FF |
1024 | |
1025 | bcmgenet_phy_power_set(priv->dev, false); | |
1c1008c7 FF |
1026 | } |
1027 | break; | |
1028 | default: | |
1029 | break; | |
1030 | } | |
ca8cf341 FF |
1031 | |
1032 | return 0; | |
1c1008c7 FF |
1033 | } |
1034 | ||
1035 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | |
c91b7f66 | 1036 | enum bcmgenet_power_mode mode) |
1c1008c7 FF |
1037 | { |
1038 | u32 reg; | |
1039 | ||
1040 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) | |
1041 | return; | |
1042 | ||
1043 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
1044 | ||
1045 | switch (mode) { | |
1046 | case GENET_POWER_PASSIVE: | |
1047 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | | |
1048 | EXT_PWR_DOWN_BIAS); | |
1049 | /* fallthrough */ | |
1050 | case GENET_POWER_CABLE_SENSE: | |
1051 | /* enable APD */ | |
1052 | reg |= EXT_PWR_DN_EN_LD; | |
1053 | break; | |
c3ae64ae FF |
1054 | case GENET_POWER_WOL_MAGIC: |
1055 | bcmgenet_wol_power_up_cfg(priv, mode); | |
1056 | return; | |
1c1008c7 FF |
1057 | default: |
1058 | break; | |
1059 | } | |
1060 | ||
1061 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
5dbebbb4 | 1062 | if (mode == GENET_POWER_PASSIVE) { |
bd4060a6 | 1063 | bcmgenet_phy_power_set(priv->dev, true); |
5dbebbb4 FF |
1064 | bcmgenet_mii_reset(priv->dev); |
1065 | } | |
1c1008c7 FF |
1066 | } |
1067 | ||
1068 | /* ioctl handle special commands that are not present in ethtool. */ | |
1069 | static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1070 | { | |
1071 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1072 | int val = 0; | |
1073 | ||
1074 | if (!netif_running(dev)) | |
1075 | return -EINVAL; | |
1076 | ||
1077 | switch (cmd) { | |
1078 | case SIOCGMIIPHY: | |
1079 | case SIOCGMIIREG: | |
1080 | case SIOCSMIIREG: | |
1081 | if (!priv->phydev) | |
1082 | val = -ENODEV; | |
1083 | else | |
1084 | val = phy_mii_ioctl(priv->phydev, rq, cmd); | |
1085 | break; | |
1086 | ||
1087 | default: | |
1088 | val = -EINVAL; | |
1089 | break; | |
1090 | } | |
1091 | ||
1092 | return val; | |
1093 | } | |
1094 | ||
1095 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | |
1096 | struct bcmgenet_tx_ring *ring) | |
1097 | { | |
1098 | struct enet_cb *tx_cb_ptr; | |
1099 | ||
1100 | tx_cb_ptr = ring->cbs; | |
1101 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | |
014012a4 | 1102 | |
1c1008c7 FF |
1103 | /* Advancing local write pointer */ |
1104 | if (ring->write_ptr == ring->end_ptr) | |
1105 | ring->write_ptr = ring->cb_ptr; | |
1106 | else | |
1107 | ring->write_ptr++; | |
1108 | ||
1109 | return tx_cb_ptr; | |
1110 | } | |
1111 | ||
1112 | /* Simple helper to free a control block's resources */ | |
1113 | static void bcmgenet_free_cb(struct enet_cb *cb) | |
1114 | { | |
1115 | dev_kfree_skb_any(cb->skb); | |
1116 | cb->skb = NULL; | |
1117 | dma_unmap_addr_set(cb, dma_addr, 0); | |
1118 | } | |
1119 | ||
4055eaef PG |
1120 | static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) |
1121 | { | |
ee7d8c20 | 1122 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, |
4055eaef PG |
1123 | INTRL2_CPU_MASK_SET); |
1124 | } | |
1125 | ||
1126 | static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) | |
1127 | { | |
ee7d8c20 | 1128 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, |
4055eaef PG |
1129 | INTRL2_CPU_MASK_CLEAR); |
1130 | } | |
1131 | ||
1132 | static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) | |
1133 | { | |
1134 | bcmgenet_intrl2_1_writel(ring->priv, | |
1135 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | |
1136 | INTRL2_CPU_MASK_SET); | |
1137 | } | |
1138 | ||
1139 | static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) | |
1140 | { | |
1141 | bcmgenet_intrl2_1_writel(ring->priv, | |
1142 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | |
1143 | INTRL2_CPU_MASK_CLEAR); | |
1144 | } | |
1145 | ||
9dbac28f | 1146 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1147 | { |
ee7d8c20 | 1148 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, |
c91b7f66 | 1149 | INTRL2_CPU_MASK_SET); |
1c1008c7 FF |
1150 | } |
1151 | ||
9dbac28f | 1152 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1153 | { |
ee7d8c20 | 1154 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, |
c91b7f66 | 1155 | INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1156 | } |
1157 | ||
9dbac28f | 1158 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1159 | { |
9dbac28f | 1160 | bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, |
c91b7f66 | 1161 | INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1162 | } |
1163 | ||
9dbac28f | 1164 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1165 | { |
9dbac28f | 1166 | bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, |
c91b7f66 | 1167 | INTRL2_CPU_MASK_SET); |
1c1008c7 FF |
1168 | } |
1169 | ||
1170 | /* Unlocked version of the reclaim routine */ | |
4092e6ac JS |
1171 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
1172 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1173 | { |
1174 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1c1008c7 | 1175 | struct enet_cb *tx_cb_ptr; |
b2cde2cc | 1176 | struct netdev_queue *txq; |
4092e6ac | 1177 | unsigned int pkts_compl = 0; |
55868120 | 1178 | unsigned int bytes_compl = 0; |
1c1008c7 | 1179 | unsigned int c_index; |
66d06757 PG |
1180 | unsigned int txbds_ready; |
1181 | unsigned int txbds_processed = 0; | |
1c1008c7 | 1182 | |
7fc527f9 | 1183 | /* Compute how many buffers are transmitted since last xmit call */ |
1c1008c7 | 1184 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); |
66d06757 | 1185 | c_index &= DMA_C_INDEX_MASK; |
1c1008c7 | 1186 | |
66d06757 PG |
1187 | if (likely(c_index >= ring->c_index)) |
1188 | txbds_ready = c_index - ring->c_index; | |
1c1008c7 | 1189 | else |
66d06757 | 1190 | txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; |
1c1008c7 FF |
1191 | |
1192 | netif_dbg(priv, tx_done, dev, | |
66d06757 PG |
1193 | "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", |
1194 | __func__, ring->index, ring->c_index, c_index, txbds_ready); | |
1c1008c7 FF |
1195 | |
1196 | /* Reclaim transmitted buffers */ | |
66d06757 PG |
1197 | while (txbds_processed < txbds_ready) { |
1198 | tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; | |
1c1008c7 | 1199 | if (tx_cb_ptr->skb) { |
4092e6ac | 1200 | pkts_compl++; |
55868120 | 1201 | bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; |
1c1008c7 | 1202 | dma_unmap_single(&dev->dev, |
c91b7f66 | 1203 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
eee57723 | 1204 | dma_unmap_len(tx_cb_ptr, dma_len), |
c91b7f66 | 1205 | DMA_TO_DEVICE); |
1c1008c7 FF |
1206 | bcmgenet_free_cb(tx_cb_ptr); |
1207 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | |
1c1008c7 | 1208 | dma_unmap_page(&dev->dev, |
c91b7f66 FF |
1209 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1210 | dma_unmap_len(tx_cb_ptr, dma_len), | |
1211 | DMA_TO_DEVICE); | |
1c1008c7 FF |
1212 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
1213 | } | |
1c1008c7 | 1214 | |
66d06757 PG |
1215 | txbds_processed++; |
1216 | if (likely(ring->clean_ptr < ring->end_ptr)) | |
1217 | ring->clean_ptr++; | |
1218 | else | |
1219 | ring->clean_ptr = ring->cb_ptr; | |
1c1008c7 FF |
1220 | } |
1221 | ||
66d06757 PG |
1222 | ring->free_bds += txbds_processed; |
1223 | ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; | |
1224 | ||
55868120 PG |
1225 | dev->stats.tx_packets += pkts_compl; |
1226 | dev->stats.tx_bytes += bytes_compl; | |
1227 | ||
e178c8c2 PG |
1228 | txq = netdev_get_tx_queue(dev, ring->queue); |
1229 | netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); | |
1230 | ||
4092e6ac JS |
1231 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
1232 | if (netif_tx_queue_stopped(txq)) | |
1233 | netif_tx_wake_queue(txq); | |
1234 | } | |
1c1008c7 | 1235 | |
4092e6ac | 1236 | return pkts_compl; |
1c1008c7 FF |
1237 | } |
1238 | ||
4092e6ac | 1239 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
c91b7f66 | 1240 | struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1241 | { |
4092e6ac | 1242 | unsigned int released; |
1c1008c7 FF |
1243 | unsigned long flags; |
1244 | ||
1245 | spin_lock_irqsave(&ring->lock, flags); | |
4092e6ac | 1246 | released = __bcmgenet_tx_reclaim(dev, ring); |
1c1008c7 | 1247 | spin_unlock_irqrestore(&ring->lock, flags); |
4092e6ac JS |
1248 | |
1249 | return released; | |
1250 | } | |
1251 | ||
1252 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | |
1253 | { | |
1254 | struct bcmgenet_tx_ring *ring = | |
1255 | container_of(napi, struct bcmgenet_tx_ring, napi); | |
1256 | unsigned int work_done = 0; | |
1257 | ||
1258 | work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); | |
1259 | ||
1260 | if (work_done == 0) { | |
1261 | napi_complete(napi); | |
9dbac28f | 1262 | ring->int_enable(ring); |
4092e6ac JS |
1263 | |
1264 | return 0; | |
1265 | } | |
1266 | ||
1267 | return budget; | |
1c1008c7 FF |
1268 | } |
1269 | ||
1270 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | |
1271 | { | |
1272 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1273 | int i; | |
1274 | ||
1275 | if (netif_is_multiqueue(dev)) { | |
1276 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
1277 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); | |
1278 | } | |
1279 | ||
1280 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); | |
1281 | } | |
1282 | ||
1283 | /* Transmits a single SKB (either head of a fragment or a single SKB) | |
1284 | * caller must hold priv->lock | |
1285 | */ | |
1286 | static int bcmgenet_xmit_single(struct net_device *dev, | |
1287 | struct sk_buff *skb, | |
1288 | u16 dma_desc_flags, | |
1289 | struct bcmgenet_tx_ring *ring) | |
1290 | { | |
1291 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1292 | struct device *kdev = &priv->pdev->dev; | |
1293 | struct enet_cb *tx_cb_ptr; | |
1294 | unsigned int skb_len; | |
1295 | dma_addr_t mapping; | |
1296 | u32 length_status; | |
1297 | int ret; | |
1298 | ||
1299 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1300 | ||
1301 | if (unlikely(!tx_cb_ptr)) | |
1302 | BUG(); | |
1303 | ||
1304 | tx_cb_ptr->skb = skb; | |
1305 | ||
7dd39913 | 1306 | skb_len = skb_headlen(skb); |
1c1008c7 FF |
1307 | |
1308 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | |
1309 | ret = dma_mapping_error(kdev, mapping); | |
1310 | if (ret) { | |
44c8bc3c | 1311 | priv->mib.tx_dma_failed++; |
1c1008c7 FF |
1312 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); |
1313 | dev_kfree_skb(skb); | |
1314 | return ret; | |
1315 | } | |
1316 | ||
1317 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
eee57723 | 1318 | dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); |
1c1008c7 FF |
1319 | length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
1320 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | | |
1321 | DMA_TX_APPEND_CRC; | |
1322 | ||
1323 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
1324 | length_status |= DMA_TX_DO_CSUM; | |
1325 | ||
1326 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); | |
1327 | ||
1c1008c7 FF |
1328 | return 0; |
1329 | } | |
1330 | ||
7fc527f9 | 1331 | /* Transmit a SKB fragment */ |
1c1008c7 | 1332 | static int bcmgenet_xmit_frag(struct net_device *dev, |
c91b7f66 FF |
1333 | skb_frag_t *frag, |
1334 | u16 dma_desc_flags, | |
1335 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1336 | { |
1337 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1338 | struct device *kdev = &priv->pdev->dev; | |
1339 | struct enet_cb *tx_cb_ptr; | |
824ba603 | 1340 | unsigned int frag_size; |
1c1008c7 FF |
1341 | dma_addr_t mapping; |
1342 | int ret; | |
1343 | ||
1344 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1345 | ||
1346 | if (unlikely(!tx_cb_ptr)) | |
1347 | BUG(); | |
824ba603 | 1348 | |
1c1008c7 FF |
1349 | tx_cb_ptr->skb = NULL; |
1350 | ||
824ba603 PG |
1351 | frag_size = skb_frag_size(frag); |
1352 | ||
1353 | mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE); | |
1c1008c7 FF |
1354 | ret = dma_mapping_error(kdev, mapping); |
1355 | if (ret) { | |
44c8bc3c | 1356 | priv->mib.tx_dma_failed++; |
1c1008c7 | 1357 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", |
c91b7f66 | 1358 | __func__); |
1c1008c7 FF |
1359 | return ret; |
1360 | } | |
1361 | ||
1362 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
824ba603 | 1363 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size); |
1c1008c7 FF |
1364 | |
1365 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | |
824ba603 | 1366 | (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
c91b7f66 | 1367 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); |
1c1008c7 | 1368 | |
1c1008c7 FF |
1369 | return 0; |
1370 | } | |
1371 | ||
1372 | /* Reallocate the SKB to put enough headroom in front of it and insert | |
1373 | * the transmit checksum offsets in the descriptors | |
1374 | */ | |
bc23333b PG |
1375 | static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, |
1376 | struct sk_buff *skb) | |
1c1008c7 FF |
1377 | { |
1378 | struct status_64 *status = NULL; | |
1379 | struct sk_buff *new_skb; | |
1380 | u16 offset; | |
1381 | u8 ip_proto; | |
1382 | u16 ip_ver; | |
1383 | u32 tx_csum_info; | |
1384 | ||
1385 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { | |
1386 | /* If 64 byte status block enabled, must make sure skb has | |
1387 | * enough headroom for us to insert 64B status block. | |
1388 | */ | |
1389 | new_skb = skb_realloc_headroom(skb, sizeof(*status)); | |
1390 | dev_kfree_skb(skb); | |
1391 | if (!new_skb) { | |
1c1008c7 | 1392 | dev->stats.tx_dropped++; |
bc23333b | 1393 | return NULL; |
1c1008c7 FF |
1394 | } |
1395 | skb = new_skb; | |
1396 | } | |
1397 | ||
1398 | skb_push(skb, sizeof(*status)); | |
1399 | status = (struct status_64 *)skb->data; | |
1400 | ||
1401 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1402 | ip_ver = htons(skb->protocol); | |
1403 | switch (ip_ver) { | |
1404 | case ETH_P_IP: | |
1405 | ip_proto = ip_hdr(skb)->protocol; | |
1406 | break; | |
1407 | case ETH_P_IPV6: | |
1408 | ip_proto = ipv6_hdr(skb)->nexthdr; | |
1409 | break; | |
1410 | default: | |
bc23333b | 1411 | return skb; |
1c1008c7 FF |
1412 | } |
1413 | ||
1414 | offset = skb_checksum_start_offset(skb) - sizeof(*status); | |
1415 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | | |
1416 | (offset + skb->csum_offset); | |
1417 | ||
1418 | /* Set the length valid bit for TCP and UDP and just set | |
1419 | * the special UDP flag for IPv4, else just set to 0. | |
1420 | */ | |
1421 | if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { | |
1422 | tx_csum_info |= STATUS_TX_CSUM_LV; | |
1423 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | |
1424 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | |
8900ea57 | 1425 | } else { |
1c1008c7 | 1426 | tx_csum_info = 0; |
8900ea57 | 1427 | } |
1c1008c7 FF |
1428 | |
1429 | status->tx_csum_info = tx_csum_info; | |
1430 | } | |
1431 | ||
bc23333b | 1432 | return skb; |
1c1008c7 FF |
1433 | } |
1434 | ||
1435 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |
1436 | { | |
1437 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1438 | struct bcmgenet_tx_ring *ring = NULL; | |
b2cde2cc | 1439 | struct netdev_queue *txq; |
1c1008c7 FF |
1440 | unsigned long flags = 0; |
1441 | int nr_frags, index; | |
1442 | u16 dma_desc_flags; | |
1443 | int ret; | |
1444 | int i; | |
1445 | ||
1446 | index = skb_get_queue_mapping(skb); | |
1447 | /* Mapping strategy: | |
1448 | * queue_mapping = 0, unclassified, packet xmited through ring16 | |
1449 | * queue_mapping = 1, goes to ring 0. (highest priority queue | |
1450 | * queue_mapping = 2, goes to ring 1. | |
1451 | * queue_mapping = 3, goes to ring 2. | |
1452 | * queue_mapping = 4, goes to ring 3. | |
1453 | */ | |
1454 | if (index == 0) | |
1455 | index = DESC_INDEX; | |
1456 | else | |
1457 | index -= 1; | |
1458 | ||
1c1008c7 | 1459 | ring = &priv->tx_rings[index]; |
b2cde2cc | 1460 | txq = netdev_get_tx_queue(dev, ring->queue); |
1c1008c7 | 1461 | |
f5a9ec20 PG |
1462 | nr_frags = skb_shinfo(skb)->nr_frags; |
1463 | ||
1c1008c7 | 1464 | spin_lock_irqsave(&ring->lock, flags); |
f5a9ec20 PG |
1465 | if (ring->free_bds <= (nr_frags + 1)) { |
1466 | if (!netif_tx_queue_stopped(txq)) { | |
1467 | netif_tx_stop_queue(txq); | |
1468 | netdev_err(dev, | |
1469 | "%s: tx ring %d full when queue %d awake\n", | |
1470 | __func__, index, ring->queue); | |
1471 | } | |
1c1008c7 FF |
1472 | ret = NETDEV_TX_BUSY; |
1473 | goto out; | |
1474 | } | |
1475 | ||
474ea9ca FF |
1476 | if (skb_padto(skb, ETH_ZLEN)) { |
1477 | ret = NETDEV_TX_OK; | |
1478 | goto out; | |
1479 | } | |
1480 | ||
55868120 PG |
1481 | /* Retain how many bytes will be sent on the wire, without TSB inserted |
1482 | * by transmit checksum offload | |
1483 | */ | |
1484 | GENET_CB(skb)->bytes_sent = skb->len; | |
1485 | ||
1c1008c7 FF |
1486 | /* set the SKB transmit checksum */ |
1487 | if (priv->desc_64b_en) { | |
bc23333b PG |
1488 | skb = bcmgenet_put_tx_csum(dev, skb); |
1489 | if (!skb) { | |
1c1008c7 FF |
1490 | ret = NETDEV_TX_OK; |
1491 | goto out; | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | dma_desc_flags = DMA_SOP; | |
1496 | if (nr_frags == 0) | |
1497 | dma_desc_flags |= DMA_EOP; | |
1498 | ||
1499 | /* Transmit single SKB or head of fragment list */ | |
1500 | ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); | |
1501 | if (ret) { | |
1502 | ret = NETDEV_TX_OK; | |
1503 | goto out; | |
1504 | } | |
1505 | ||
1506 | /* xmit fragment */ | |
1507 | for (i = 0; i < nr_frags; i++) { | |
1508 | ret = bcmgenet_xmit_frag(dev, | |
c91b7f66 FF |
1509 | &skb_shinfo(skb)->frags[i], |
1510 | (i == nr_frags - 1) ? DMA_EOP : 0, | |
1511 | ring); | |
1c1008c7 FF |
1512 | if (ret) { |
1513 | ret = NETDEV_TX_OK; | |
1514 | goto out; | |
1515 | } | |
1516 | } | |
1517 | ||
d03825fb FF |
1518 | skb_tx_timestamp(skb); |
1519 | ||
ae67bf01 FF |
1520 | /* Decrement total BD count and advance our write pointer */ |
1521 | ring->free_bds -= nr_frags + 1; | |
1522 | ring->prod_index += nr_frags + 1; | |
1523 | ring->prod_index &= DMA_P_INDEX_MASK; | |
1524 | ||
e178c8c2 PG |
1525 | netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); |
1526 | ||
4092e6ac | 1527 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
b2cde2cc | 1528 | netif_tx_stop_queue(txq); |
1c1008c7 | 1529 | |
ddd0ca5d FF |
1530 | if (!skb->xmit_more || netif_xmit_stopped(txq)) |
1531 | /* Packets are ready, update producer index */ | |
1532 | bcmgenet_tdma_ring_writel(priv, ring->index, | |
1533 | ring->prod_index, TDMA_PROD_INDEX); | |
1c1008c7 FF |
1534 | out: |
1535 | spin_unlock_irqrestore(&ring->lock, flags); | |
1536 | ||
1537 | return ret; | |
1538 | } | |
1539 | ||
d6707bec PG |
1540 | static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, |
1541 | struct enet_cb *cb) | |
1c1008c7 FF |
1542 | { |
1543 | struct device *kdev = &priv->pdev->dev; | |
1544 | struct sk_buff *skb; | |
d6707bec | 1545 | struct sk_buff *rx_skb; |
1c1008c7 | 1546 | dma_addr_t mapping; |
1c1008c7 | 1547 | |
d6707bec | 1548 | /* Allocate a new Rx skb */ |
c91b7f66 | 1549 | skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); |
d6707bec PG |
1550 | if (!skb) { |
1551 | priv->mib.alloc_rx_buff_failed++; | |
1552 | netif_err(priv, rx_err, priv->dev, | |
1553 | "%s: Rx skb allocation failed\n", __func__); | |
1554 | return NULL; | |
1555 | } | |
1c1008c7 | 1556 | |
d6707bec PG |
1557 | /* DMA-map the new Rx skb */ |
1558 | mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, | |
1559 | DMA_FROM_DEVICE); | |
1560 | if (dma_mapping_error(kdev, mapping)) { | |
44c8bc3c | 1561 | priv->mib.rx_dma_failed++; |
d6707bec | 1562 | dev_kfree_skb_any(skb); |
1c1008c7 | 1563 | netif_err(priv, rx_err, priv->dev, |
d6707bec PG |
1564 | "%s: Rx skb DMA mapping failed\n", __func__); |
1565 | return NULL; | |
1c1008c7 FF |
1566 | } |
1567 | ||
d6707bec PG |
1568 | /* Grab the current Rx skb from the ring and DMA-unmap it */ |
1569 | rx_skb = cb->skb; | |
1570 | if (likely(rx_skb)) | |
1571 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | |
1572 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1573 | ||
1574 | /* Put the new Rx skb on the ring */ | |
1575 | cb->skb = skb; | |
1c1008c7 | 1576 | dma_unmap_addr_set(cb, dma_addr, mapping); |
8ac467e8 | 1577 | dmadesc_set_addr(priv, cb->bd_addr, mapping); |
1c1008c7 | 1578 | |
d6707bec PG |
1579 | /* Return the current Rx skb to caller */ |
1580 | return rx_skb; | |
1c1008c7 FF |
1581 | } |
1582 | ||
1583 | /* bcmgenet_desc_rx - descriptor based rx process. | |
1584 | * this could be called from bottom half, or from NAPI polling method. | |
1585 | */ | |
4055eaef | 1586 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, |
1c1008c7 FF |
1587 | unsigned int budget) |
1588 | { | |
4055eaef | 1589 | struct bcmgenet_priv *priv = ring->priv; |
1c1008c7 FF |
1590 | struct net_device *dev = priv->dev; |
1591 | struct enet_cb *cb; | |
1592 | struct sk_buff *skb; | |
1593 | u32 dma_length_status; | |
1594 | unsigned long dma_flag; | |
d6707bec | 1595 | int len; |
1c1008c7 FF |
1596 | unsigned int rxpktprocessed = 0, rxpkttoprocess; |
1597 | unsigned int p_index; | |
d26ea6cc | 1598 | unsigned int discards; |
1c1008c7 FF |
1599 | unsigned int chksum_ok = 0; |
1600 | ||
4055eaef | 1601 | p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); |
d26ea6cc PG |
1602 | |
1603 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & | |
1604 | DMA_P_INDEX_DISCARD_CNT_MASK; | |
1605 | if (discards > ring->old_discards) { | |
1606 | discards = discards - ring->old_discards; | |
1607 | dev->stats.rx_missed_errors += discards; | |
1608 | dev->stats.rx_errors += discards; | |
1609 | ring->old_discards += discards; | |
1610 | ||
1611 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ | |
1612 | if (ring->old_discards >= 0xC000) { | |
1613 | ring->old_discards = 0; | |
4055eaef | 1614 | bcmgenet_rdma_ring_writel(priv, ring->index, 0, |
d26ea6cc PG |
1615 | RDMA_PROD_INDEX); |
1616 | } | |
1617 | } | |
1618 | ||
1c1008c7 FF |
1619 | p_index &= DMA_P_INDEX_MASK; |
1620 | ||
8ac467e8 PG |
1621 | if (likely(p_index >= ring->c_index)) |
1622 | rxpkttoprocess = p_index - ring->c_index; | |
1c1008c7 | 1623 | else |
8ac467e8 PG |
1624 | rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + |
1625 | p_index; | |
1c1008c7 FF |
1626 | |
1627 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1628 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); |
1c1008c7 FF |
1629 | |
1630 | while ((rxpktprocessed < rxpkttoprocess) && | |
c91b7f66 | 1631 | (rxpktprocessed < budget)) { |
8ac467e8 | 1632 | cb = &priv->rx_cbs[ring->read_ptr]; |
d6707bec | 1633 | skb = bcmgenet_rx_refill(priv, cb); |
b629be5c | 1634 | |
b629be5c FF |
1635 | if (unlikely(!skb)) { |
1636 | dev->stats.rx_dropped++; | |
d6707bec | 1637 | goto next; |
b629be5c FF |
1638 | } |
1639 | ||
1c1008c7 | 1640 | if (!priv->desc_64b_en) { |
c91b7f66 | 1641 | dma_length_status = |
8ac467e8 | 1642 | dmadesc_get_length_status(priv, cb->bd_addr); |
1c1008c7 FF |
1643 | } else { |
1644 | struct status_64 *status; | |
164d4f20 | 1645 | |
1c1008c7 FF |
1646 | status = (struct status_64 *)skb->data; |
1647 | dma_length_status = status->length_status; | |
1648 | } | |
1649 | ||
1650 | /* DMA flags and length are still valid no matter how | |
1651 | * we got the Receive Status Vector (64B RSB or register) | |
1652 | */ | |
1653 | dma_flag = dma_length_status & 0xffff; | |
1654 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | |
1655 | ||
1656 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1657 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", |
8ac467e8 PG |
1658 | __func__, p_index, ring->c_index, |
1659 | ring->read_ptr, dma_length_status); | |
1c1008c7 | 1660 | |
1c1008c7 FF |
1661 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1662 | netif_err(priv, rx_status, dev, | |
c91b7f66 | 1663 | "dropping fragmented packet!\n"); |
1c1008c7 | 1664 | dev->stats.rx_errors++; |
d6707bec PG |
1665 | dev_kfree_skb_any(skb); |
1666 | goto next; | |
1c1008c7 | 1667 | } |
d6707bec | 1668 | |
1c1008c7 FF |
1669 | /* report errors */ |
1670 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | | |
1671 | DMA_RX_OV | | |
1672 | DMA_RX_NO | | |
1673 | DMA_RX_LG | | |
1674 | DMA_RX_RXER))) { | |
1675 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | |
c91b7f66 | 1676 | (unsigned int)dma_flag); |
1c1008c7 FF |
1677 | if (dma_flag & DMA_RX_CRC_ERROR) |
1678 | dev->stats.rx_crc_errors++; | |
1679 | if (dma_flag & DMA_RX_OV) | |
1680 | dev->stats.rx_over_errors++; | |
1681 | if (dma_flag & DMA_RX_NO) | |
1682 | dev->stats.rx_frame_errors++; | |
1683 | if (dma_flag & DMA_RX_LG) | |
1684 | dev->stats.rx_length_errors++; | |
1c1008c7 | 1685 | dev->stats.rx_errors++; |
d6707bec PG |
1686 | dev_kfree_skb_any(skb); |
1687 | goto next; | |
1c1008c7 FF |
1688 | } /* error packet */ |
1689 | ||
1690 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | |
c91b7f66 | 1691 | priv->desc_rxchk_en; |
1c1008c7 FF |
1692 | |
1693 | skb_put(skb, len); | |
1694 | if (priv->desc_64b_en) { | |
1695 | skb_pull(skb, 64); | |
1696 | len -= 64; | |
1697 | } | |
1698 | ||
1699 | if (likely(chksum_ok)) | |
1700 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1701 | ||
1702 | /* remove hardware 2bytes added for IP alignment */ | |
1703 | skb_pull(skb, 2); | |
1704 | len -= 2; | |
1705 | ||
1706 | if (priv->crc_fwd_en) { | |
1707 | skb_trim(skb, len - ETH_FCS_LEN); | |
1708 | len -= ETH_FCS_LEN; | |
1709 | } | |
1710 | ||
1711 | /*Finish setting up the received SKB and send it to the kernel*/ | |
1712 | skb->protocol = eth_type_trans(skb, priv->dev); | |
1713 | dev->stats.rx_packets++; | |
1714 | dev->stats.rx_bytes += len; | |
1715 | if (dma_flag & DMA_RX_MULT) | |
1716 | dev->stats.multicast++; | |
1717 | ||
1718 | /* Notify kernel */ | |
4055eaef | 1719 | napi_gro_receive(&ring->napi, skb); |
1c1008c7 FF |
1720 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); |
1721 | ||
d6707bec | 1722 | next: |
cf377d88 | 1723 | rxpktprocessed++; |
8ac467e8 PG |
1724 | if (likely(ring->read_ptr < ring->end_ptr)) |
1725 | ring->read_ptr++; | |
1726 | else | |
1727 | ring->read_ptr = ring->cb_ptr; | |
1728 | ||
1729 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; | |
4055eaef | 1730 | bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); |
1c1008c7 FF |
1731 | } |
1732 | ||
1733 | return rxpktprocessed; | |
1734 | } | |
1735 | ||
3ab11339 PG |
1736 | /* Rx NAPI polling method */ |
1737 | static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) | |
1738 | { | |
4055eaef PG |
1739 | struct bcmgenet_rx_ring *ring = container_of(napi, |
1740 | struct bcmgenet_rx_ring, napi); | |
3ab11339 PG |
1741 | unsigned int work_done; |
1742 | ||
4055eaef | 1743 | work_done = bcmgenet_desc_rx(ring, budget); |
3ab11339 PG |
1744 | |
1745 | if (work_done < budget) { | |
eb96ce01 | 1746 | napi_complete_done(napi, work_done); |
4055eaef | 1747 | ring->int_enable(ring); |
3ab11339 PG |
1748 | } |
1749 | ||
1750 | return work_done; | |
1751 | } | |
1752 | ||
1c1008c7 | 1753 | /* Assign skb to RX DMA descriptor. */ |
8ac467e8 PG |
1754 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, |
1755 | struct bcmgenet_rx_ring *ring) | |
1c1008c7 FF |
1756 | { |
1757 | struct enet_cb *cb; | |
d6707bec | 1758 | struct sk_buff *skb; |
1c1008c7 FF |
1759 | int i; |
1760 | ||
8ac467e8 | 1761 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 FF |
1762 | |
1763 | /* loop here for each buffer needing assign */ | |
8ac467e8 PG |
1764 | for (i = 0; i < ring->size; i++) { |
1765 | cb = ring->cbs + i; | |
d6707bec PG |
1766 | skb = bcmgenet_rx_refill(priv, cb); |
1767 | if (skb) | |
1768 | dev_kfree_skb_any(skb); | |
1769 | if (!cb->skb) | |
1770 | return -ENOMEM; | |
1c1008c7 FF |
1771 | } |
1772 | ||
d6707bec | 1773 | return 0; |
1c1008c7 FF |
1774 | } |
1775 | ||
1776 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |
1777 | { | |
1778 | struct enet_cb *cb; | |
1779 | int i; | |
1780 | ||
1781 | for (i = 0; i < priv->num_rx_bds; i++) { | |
1782 | cb = &priv->rx_cbs[i]; | |
1783 | ||
1784 | if (dma_unmap_addr(cb, dma_addr)) { | |
1785 | dma_unmap_single(&priv->dev->dev, | |
c91b7f66 FF |
1786 | dma_unmap_addr(cb, dma_addr), |
1787 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1c1008c7 FF |
1788 | dma_unmap_addr_set(cb, dma_addr, 0); |
1789 | } | |
1790 | ||
1791 | if (cb->skb) | |
1792 | bcmgenet_free_cb(cb); | |
1793 | } | |
1794 | } | |
1795 | ||
c91b7f66 | 1796 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) |
e29585b8 FF |
1797 | { |
1798 | u32 reg; | |
1799 | ||
1800 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1801 | if (enable) | |
1802 | reg |= mask; | |
1803 | else | |
1804 | reg &= ~mask; | |
1805 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
1806 | ||
1807 | /* UniMAC stops on a packet boundary, wait for a full-size packet | |
1808 | * to be processed | |
1809 | */ | |
1810 | if (enable == 0) | |
1811 | usleep_range(1000, 2000); | |
1812 | } | |
1813 | ||
1c1008c7 FF |
1814 | static int reset_umac(struct bcmgenet_priv *priv) |
1815 | { | |
1816 | struct device *kdev = &priv->pdev->dev; | |
1817 | unsigned int timeout = 0; | |
1818 | u32 reg; | |
1819 | ||
1820 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ | |
1821 | bcmgenet_rbuf_ctrl_set(priv, 0); | |
1822 | udelay(10); | |
1823 | ||
1824 | /* disable MAC while updating its registers */ | |
1825 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1826 | ||
1827 | /* issue soft reset, wait for it to complete */ | |
1828 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); | |
1829 | while (timeout++ < 1000) { | |
1830 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1831 | if (!(reg & CMD_SW_RESET)) | |
1832 | return 0; | |
1833 | ||
1834 | udelay(1); | |
1835 | } | |
1836 | ||
1837 | if (timeout == 1000) { | |
1838 | dev_err(kdev, | |
7fc527f9 | 1839 | "timeout waiting for MAC to come out of reset\n"); |
1c1008c7 FF |
1840 | return -ETIMEDOUT; |
1841 | } | |
1842 | ||
1843 | return 0; | |
1844 | } | |
1845 | ||
909ff5ef FF |
1846 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) |
1847 | { | |
1848 | /* Mask all interrupts.*/ | |
1849 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1850 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1851 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1852 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1853 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1854 | bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1855 | } | |
1856 | ||
37850e37 FF |
1857 | static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) |
1858 | { | |
1859 | u32 int0_enable = 0; | |
1860 | ||
1861 | /* Monitor cable plug/unplugged event for internal PHY, external PHY | |
1862 | * and MoCA PHY | |
1863 | */ | |
1864 | if (priv->internal_phy) { | |
1865 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1866 | } else if (priv->ext_phy) { | |
1867 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1868 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | |
1869 | if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) | |
1870 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1871 | } | |
1872 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); | |
1873 | } | |
1874 | ||
1c1008c7 FF |
1875 | static int init_umac(struct bcmgenet_priv *priv) |
1876 | { | |
1877 | struct device *kdev = &priv->pdev->dev; | |
1878 | int ret; | |
b2e97eca PG |
1879 | u32 reg; |
1880 | u32 int0_enable = 0; | |
1881 | u32 int1_enable = 0; | |
1882 | int i; | |
1c1008c7 FF |
1883 | |
1884 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | |
1885 | ||
1886 | ret = reset_umac(priv); | |
1887 | if (ret) | |
1888 | return ret; | |
1889 | ||
1890 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1891 | /* clear tx/rx counter */ | |
1892 | bcmgenet_umac_writel(priv, | |
c91b7f66 FF |
1893 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
1894 | UMAC_MIB_CTRL); | |
1c1008c7 FF |
1895 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); |
1896 | ||
1897 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | |
1898 | ||
1899 | /* init rx registers, enable ip header optimization */ | |
1900 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
1901 | reg |= RBUF_ALIGN_2B; | |
1902 | bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); | |
1903 | ||
1904 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | |
1905 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | |
1906 | ||
909ff5ef | 1907 | bcmgenet_intr_disable(priv); |
1c1008c7 | 1908 | |
b2e97eca | 1909 | /* Enable Rx default queue 16 interrupts */ |
ee7d8c20 | 1910 | int0_enable |= UMAC_IRQ_RXDMA_DONE; |
1c1008c7 | 1911 | |
b2e97eca | 1912 | /* Enable Tx default queue 16 interrupts */ |
ee7d8c20 | 1913 | int0_enable |= UMAC_IRQ_TXDMA_DONE; |
1c1008c7 | 1914 | |
37850e37 FF |
1915 | /* Configure backpressure vectors for MoCA */ |
1916 | if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | |
1c1008c7 FF |
1917 | reg = bcmgenet_bp_mc_get(priv); |
1918 | reg |= BIT(priv->hw_params->bp_in_en_shift); | |
1919 | ||
1920 | /* bp_mask: back pressure mask */ | |
1921 | if (netif_is_multiqueue(priv->dev)) | |
1922 | reg |= priv->hw_params->bp_in_mask; | |
1923 | else | |
1924 | reg &= ~priv->hw_params->bp_in_mask; | |
1925 | bcmgenet_bp_mc_set(priv, reg); | |
1926 | } | |
1927 | ||
1928 | /* Enable MDIO interrupts on GENET v3+ */ | |
1929 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | |
b2e97eca | 1930 | int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
1c1008c7 | 1931 | |
4055eaef PG |
1932 | /* Enable Rx priority queue interrupts */ |
1933 | for (i = 0; i < priv->hw_params->rx_queues; ++i) | |
1934 | int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); | |
1935 | ||
b2e97eca PG |
1936 | /* Enable Tx priority queue interrupts */ |
1937 | for (i = 0; i < priv->hw_params->tx_queues; ++i) | |
1938 | int1_enable |= (1 << i); | |
1c1008c7 | 1939 | |
b2e97eca PG |
1940 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); |
1941 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | |
4092e6ac | 1942 | |
1c1008c7 FF |
1943 | /* Enable rx/tx engine.*/ |
1944 | dev_dbg(kdev, "done init umac\n"); | |
1945 | ||
1946 | return 0; | |
1947 | } | |
1948 | ||
4f8b2d7d | 1949 | /* Initialize a Tx ring along with corresponding hardware registers */ |
1c1008c7 FF |
1950 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
1951 | unsigned int index, unsigned int size, | |
4f8b2d7d | 1952 | unsigned int start_ptr, unsigned int end_ptr) |
1c1008c7 FF |
1953 | { |
1954 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | |
1955 | u32 words_per_bd = WORDS_PER_BD(priv); | |
1956 | u32 flow_period_val = 0; | |
1c1008c7 FF |
1957 | |
1958 | spin_lock_init(&ring->lock); | |
4092e6ac | 1959 | ring->priv = priv; |
1c1008c7 FF |
1960 | ring->index = index; |
1961 | if (index == DESC_INDEX) { | |
1962 | ring->queue = 0; | |
1963 | ring->int_enable = bcmgenet_tx_ring16_int_enable; | |
1964 | ring->int_disable = bcmgenet_tx_ring16_int_disable; | |
1965 | } else { | |
1966 | ring->queue = index + 1; | |
1967 | ring->int_enable = bcmgenet_tx_ring_int_enable; | |
1968 | ring->int_disable = bcmgenet_tx_ring_int_disable; | |
1969 | } | |
4f8b2d7d | 1970 | ring->cbs = priv->tx_cbs + start_ptr; |
1c1008c7 | 1971 | ring->size = size; |
66d06757 | 1972 | ring->clean_ptr = start_ptr; |
1c1008c7 FF |
1973 | ring->c_index = 0; |
1974 | ring->free_bds = size; | |
4f8b2d7d PG |
1975 | ring->write_ptr = start_ptr; |
1976 | ring->cb_ptr = start_ptr; | |
1c1008c7 FF |
1977 | ring->end_ptr = end_ptr - 1; |
1978 | ring->prod_index = 0; | |
1979 | ||
1980 | /* Set flow period for ring != 16 */ | |
1981 | if (index != DESC_INDEX) | |
1982 | flow_period_val = ENET_MAX_MTU_SIZE << 16; | |
1983 | ||
1984 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); | |
1985 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); | |
1986 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | |
1987 | /* Disable rate control for now */ | |
1988 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | |
c91b7f66 | 1989 | TDMA_FLOW_PERIOD); |
1c1008c7 | 1990 | bcmgenet_tdma_ring_writel(priv, index, |
c91b7f66 FF |
1991 | ((size << DMA_RING_SIZE_SHIFT) | |
1992 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 1993 | |
1c1008c7 | 1994 | /* Set start and end address, read and write pointers */ |
4f8b2d7d | 1995 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1996 | DMA_START_ADDR); |
4f8b2d7d | 1997 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1998 | TDMA_READ_PTR); |
4f8b2d7d | 1999 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 2000 | TDMA_WRITE_PTR); |
1c1008c7 | 2001 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
c91b7f66 | 2002 | DMA_END_ADDR); |
1c1008c7 FF |
2003 | } |
2004 | ||
2005 | /* Initialize a RDMA ring */ | |
2006 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |
8ac467e8 PG |
2007 | unsigned int index, unsigned int size, |
2008 | unsigned int start_ptr, unsigned int end_ptr) | |
1c1008c7 | 2009 | { |
8ac467e8 | 2010 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; |
1c1008c7 FF |
2011 | u32 words_per_bd = WORDS_PER_BD(priv); |
2012 | int ret; | |
2013 | ||
4055eaef | 2014 | ring->priv = priv; |
8ac467e8 | 2015 | ring->index = index; |
4055eaef PG |
2016 | if (index == DESC_INDEX) { |
2017 | ring->int_enable = bcmgenet_rx_ring16_int_enable; | |
2018 | ring->int_disable = bcmgenet_rx_ring16_int_disable; | |
2019 | } else { | |
2020 | ring->int_enable = bcmgenet_rx_ring_int_enable; | |
2021 | ring->int_disable = bcmgenet_rx_ring_int_disable; | |
2022 | } | |
8ac467e8 PG |
2023 | ring->cbs = priv->rx_cbs + start_ptr; |
2024 | ring->size = size; | |
2025 | ring->c_index = 0; | |
2026 | ring->read_ptr = start_ptr; | |
2027 | ring->cb_ptr = start_ptr; | |
2028 | ring->end_ptr = end_ptr - 1; | |
1c1008c7 | 2029 | |
8ac467e8 PG |
2030 | ret = bcmgenet_alloc_rx_buffers(priv, ring); |
2031 | if (ret) | |
1c1008c7 | 2032 | return ret; |
1c1008c7 | 2033 | |
1c1008c7 FF |
2034 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); |
2035 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | |
6f5a272c | 2036 | bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); |
1c1008c7 | 2037 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
2038 | ((size << DMA_RING_SIZE_SHIFT) | |
2039 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 2040 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
2041 | (DMA_FC_THRESH_LO << |
2042 | DMA_XOFF_THRESHOLD_SHIFT) | | |
2043 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | |
6f5a272c PG |
2044 | |
2045 | /* Set start and end address, read and write pointers */ | |
8ac467e8 PG |
2046 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, |
2047 | DMA_START_ADDR); | |
2048 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
2049 | RDMA_READ_PTR); | |
2050 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
2051 | RDMA_WRITE_PTR); | |
2052 | bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | |
6f5a272c | 2053 | DMA_END_ADDR); |
1c1008c7 FF |
2054 | |
2055 | return ret; | |
2056 | } | |
2057 | ||
e2aadb4a PG |
2058 | static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) |
2059 | { | |
2060 | unsigned int i; | |
2061 | struct bcmgenet_tx_ring *ring; | |
2062 | ||
2063 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2064 | ring = &priv->tx_rings[i]; | |
d64b5e85 | 2065 | netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); |
e2aadb4a PG |
2066 | } |
2067 | ||
2068 | ring = &priv->tx_rings[DESC_INDEX]; | |
d64b5e85 | 2069 | netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); |
e2aadb4a PG |
2070 | } |
2071 | ||
2072 | static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) | |
2073 | { | |
2074 | unsigned int i; | |
2075 | struct bcmgenet_tx_ring *ring; | |
2076 | ||
2077 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2078 | ring = &priv->tx_rings[i]; | |
2079 | napi_enable(&ring->napi); | |
2080 | } | |
2081 | ||
2082 | ring = &priv->tx_rings[DESC_INDEX]; | |
2083 | napi_enable(&ring->napi); | |
2084 | } | |
2085 | ||
2086 | static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) | |
2087 | { | |
2088 | unsigned int i; | |
2089 | struct bcmgenet_tx_ring *ring; | |
2090 | ||
2091 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2092 | ring = &priv->tx_rings[i]; | |
2093 | napi_disable(&ring->napi); | |
2094 | } | |
2095 | ||
2096 | ring = &priv->tx_rings[DESC_INDEX]; | |
2097 | napi_disable(&ring->napi); | |
2098 | } | |
2099 | ||
2100 | static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) | |
2101 | { | |
2102 | unsigned int i; | |
2103 | struct bcmgenet_tx_ring *ring; | |
2104 | ||
2105 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2106 | ring = &priv->tx_rings[i]; | |
2107 | netif_napi_del(&ring->napi); | |
2108 | } | |
2109 | ||
2110 | ring = &priv->tx_rings[DESC_INDEX]; | |
2111 | netif_napi_del(&ring->napi); | |
2112 | } | |
2113 | ||
16c6d667 | 2114 | /* Initialize Tx queues |
1c1008c7 | 2115 | * |
16c6d667 | 2116 | * Queues 0-3 are priority-based, each one has 32 descriptors, |
1c1008c7 FF |
2117 | * with queue 0 being the highest priority queue. |
2118 | * | |
16c6d667 | 2119 | * Queue 16 is the default Tx queue with |
51a966a7 | 2120 | * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. |
1c1008c7 | 2121 | * |
16c6d667 PG |
2122 | * The transmit control block pool is then partitioned as follows: |
2123 | * - Tx queue 0 uses tx_cbs[0..31] | |
2124 | * - Tx queue 1 uses tx_cbs[32..63] | |
2125 | * - Tx queue 2 uses tx_cbs[64..95] | |
2126 | * - Tx queue 3 uses tx_cbs[96..127] | |
2127 | * - Tx queue 16 uses tx_cbs[128..255] | |
1c1008c7 | 2128 | */ |
16c6d667 | 2129 | static void bcmgenet_init_tx_queues(struct net_device *dev) |
1c1008c7 FF |
2130 | { |
2131 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
16c6d667 PG |
2132 | u32 i, dma_enable; |
2133 | u32 dma_ctrl, ring_cfg; | |
37742166 | 2134 | u32 dma_priority[3] = {0, 0, 0}; |
1c1008c7 | 2135 | |
1c1008c7 FF |
2136 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); |
2137 | dma_enable = dma_ctrl & DMA_EN; | |
2138 | dma_ctrl &= ~DMA_EN; | |
2139 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2140 | ||
16c6d667 PG |
2141 | dma_ctrl = 0; |
2142 | ring_cfg = 0; | |
2143 | ||
1c1008c7 FF |
2144 | /* Enable strict priority arbiter mode */ |
2145 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | |
2146 | ||
16c6d667 | 2147 | /* Initialize Tx priority queues */ |
1c1008c7 | 2148 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
51a966a7 PG |
2149 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, |
2150 | i * priv->hw_params->tx_bds_per_q, | |
2151 | (i + 1) * priv->hw_params->tx_bds_per_q); | |
16c6d667 PG |
2152 | ring_cfg |= (1 << i); |
2153 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
2154 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= |
2155 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); | |
1c1008c7 FF |
2156 | } |
2157 | ||
16c6d667 | 2158 | /* Initialize Tx default queue 16 */ |
51a966a7 | 2159 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, |
16c6d667 | 2160 | priv->hw_params->tx_queues * |
51a966a7 | 2161 | priv->hw_params->tx_bds_per_q, |
16c6d667 PG |
2162 | TOTAL_DESC); |
2163 | ring_cfg |= (1 << DESC_INDEX); | |
2164 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
2165 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= |
2166 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << | |
2167 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); | |
16c6d667 PG |
2168 | |
2169 | /* Set Tx queue priorities */ | |
37742166 PG |
2170 | bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); |
2171 | bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); | |
2172 | bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); | |
2173 | ||
e2aadb4a PG |
2174 | /* Initialize Tx NAPI */ |
2175 | bcmgenet_init_tx_napi(priv); | |
2176 | ||
16c6d667 PG |
2177 | /* Enable Tx queues */ |
2178 | bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
1c1008c7 | 2179 | |
16c6d667 | 2180 | /* Enable Tx DMA */ |
1c1008c7 | 2181 | if (dma_enable) |
16c6d667 PG |
2182 | dma_ctrl |= DMA_EN; |
2183 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1c1008c7 FF |
2184 | } |
2185 | ||
3ab11339 PG |
2186 | static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) |
2187 | { | |
4055eaef PG |
2188 | unsigned int i; |
2189 | struct bcmgenet_rx_ring *ring; | |
2190 | ||
2191 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2192 | ring = &priv->rx_rings[i]; | |
2193 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | |
2194 | } | |
2195 | ||
2196 | ring = &priv->rx_rings[DESC_INDEX]; | |
2197 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | |
3ab11339 PG |
2198 | } |
2199 | ||
2200 | static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) | |
2201 | { | |
4055eaef PG |
2202 | unsigned int i; |
2203 | struct bcmgenet_rx_ring *ring; | |
2204 | ||
2205 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2206 | ring = &priv->rx_rings[i]; | |
2207 | napi_enable(&ring->napi); | |
2208 | } | |
2209 | ||
2210 | ring = &priv->rx_rings[DESC_INDEX]; | |
2211 | napi_enable(&ring->napi); | |
3ab11339 PG |
2212 | } |
2213 | ||
2214 | static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) | |
2215 | { | |
4055eaef PG |
2216 | unsigned int i; |
2217 | struct bcmgenet_rx_ring *ring; | |
2218 | ||
2219 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2220 | ring = &priv->rx_rings[i]; | |
2221 | napi_disable(&ring->napi); | |
2222 | } | |
2223 | ||
2224 | ring = &priv->rx_rings[DESC_INDEX]; | |
2225 | napi_disable(&ring->napi); | |
3ab11339 PG |
2226 | } |
2227 | ||
2228 | static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) | |
2229 | { | |
4055eaef PG |
2230 | unsigned int i; |
2231 | struct bcmgenet_rx_ring *ring; | |
2232 | ||
2233 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2234 | ring = &priv->rx_rings[i]; | |
2235 | netif_napi_del(&ring->napi); | |
2236 | } | |
2237 | ||
2238 | ring = &priv->rx_rings[DESC_INDEX]; | |
2239 | netif_napi_del(&ring->napi); | |
3ab11339 PG |
2240 | } |
2241 | ||
8ac467e8 PG |
2242 | /* Initialize Rx queues |
2243 | * | |
2244 | * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be | |
2245 | * used to direct traffic to these queues. | |
2246 | * | |
2247 | * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. | |
2248 | */ | |
2249 | static int bcmgenet_init_rx_queues(struct net_device *dev) | |
2250 | { | |
2251 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2252 | u32 i; | |
2253 | u32 dma_enable; | |
2254 | u32 dma_ctrl; | |
2255 | u32 ring_cfg; | |
2256 | int ret; | |
2257 | ||
2258 | dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2259 | dma_enable = dma_ctrl & DMA_EN; | |
2260 | dma_ctrl &= ~DMA_EN; | |
2261 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2262 | ||
2263 | dma_ctrl = 0; | |
2264 | ring_cfg = 0; | |
2265 | ||
2266 | /* Initialize Rx priority queues */ | |
2267 | for (i = 0; i < priv->hw_params->rx_queues; i++) { | |
2268 | ret = bcmgenet_init_rx_ring(priv, i, | |
2269 | priv->hw_params->rx_bds_per_q, | |
2270 | i * priv->hw_params->rx_bds_per_q, | |
2271 | (i + 1) * | |
2272 | priv->hw_params->rx_bds_per_q); | |
2273 | if (ret) | |
2274 | return ret; | |
2275 | ||
2276 | ring_cfg |= (1 << i); | |
2277 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2278 | } | |
2279 | ||
2280 | /* Initialize Rx default queue 16 */ | |
2281 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, | |
2282 | priv->hw_params->rx_queues * | |
2283 | priv->hw_params->rx_bds_per_q, | |
2284 | TOTAL_DESC); | |
2285 | if (ret) | |
2286 | return ret; | |
2287 | ||
2288 | ring_cfg |= (1 << DESC_INDEX); | |
2289 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
2290 | ||
3ab11339 PG |
2291 | /* Initialize Rx NAPI */ |
2292 | bcmgenet_init_rx_napi(priv); | |
2293 | ||
8ac467e8 PG |
2294 | /* Enable rings */ |
2295 | bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
2296 | ||
2297 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | |
2298 | if (dma_enable) | |
2299 | dma_ctrl |= DMA_EN; | |
2300 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2301 | ||
2302 | return 0; | |
2303 | } | |
2304 | ||
4a0c081e FF |
2305 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) |
2306 | { | |
2307 | int ret = 0; | |
2308 | int timeout = 0; | |
2309 | u32 reg; | |
b6df7d61 JS |
2310 | u32 dma_ctrl; |
2311 | int i; | |
4a0c081e FF |
2312 | |
2313 | /* Disable TDMA to stop add more frames in TX DMA */ | |
2314 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2315 | reg &= ~DMA_EN; | |
2316 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2317 | ||
2318 | /* Check TDMA status register to confirm TDMA is disabled */ | |
2319 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2320 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | |
2321 | if (reg & DMA_DISABLED) | |
2322 | break; | |
2323 | ||
2324 | udelay(1); | |
2325 | } | |
2326 | ||
2327 | if (timeout == DMA_TIMEOUT_VAL) { | |
2328 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | |
2329 | ret = -ETIMEDOUT; | |
2330 | } | |
2331 | ||
2332 | /* Wait 10ms for packet drain in both tx and rx dma */ | |
2333 | usleep_range(10000, 20000); | |
2334 | ||
2335 | /* Disable RDMA */ | |
2336 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2337 | reg &= ~DMA_EN; | |
2338 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2339 | ||
2340 | timeout = 0; | |
2341 | /* Check RDMA status register to confirm RDMA is disabled */ | |
2342 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2343 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | |
2344 | if (reg & DMA_DISABLED) | |
2345 | break; | |
2346 | ||
2347 | udelay(1); | |
2348 | } | |
2349 | ||
2350 | if (timeout == DMA_TIMEOUT_VAL) { | |
2351 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | |
2352 | ret = -ETIMEDOUT; | |
2353 | } | |
2354 | ||
b6df7d61 JS |
2355 | dma_ctrl = 0; |
2356 | for (i = 0; i < priv->hw_params->rx_queues; i++) | |
2357 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2358 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2359 | reg &= ~dma_ctrl; | |
2360 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2361 | ||
2362 | dma_ctrl = 0; | |
2363 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
2364 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2365 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2366 | reg &= ~dma_ctrl; | |
2367 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2368 | ||
4a0c081e FF |
2369 | return ret; |
2370 | } | |
2371 | ||
9abab96d | 2372 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1c1008c7 FF |
2373 | { |
2374 | int i; | |
e178c8c2 | 2375 | struct netdev_queue *txq; |
1c1008c7 | 2376 | |
9abab96d PG |
2377 | bcmgenet_fini_rx_napi(priv); |
2378 | bcmgenet_fini_tx_napi(priv); | |
2379 | ||
1c1008c7 | 2380 | /* disable DMA */ |
4a0c081e | 2381 | bcmgenet_dma_teardown(priv); |
1c1008c7 FF |
2382 | |
2383 | for (i = 0; i < priv->num_tx_bds; i++) { | |
2384 | if (priv->tx_cbs[i].skb != NULL) { | |
2385 | dev_kfree_skb(priv->tx_cbs[i].skb); | |
2386 | priv->tx_cbs[i].skb = NULL; | |
2387 | } | |
2388 | } | |
2389 | ||
e178c8c2 PG |
2390 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
2391 | txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); | |
2392 | netdev_tx_reset_queue(txq); | |
2393 | } | |
2394 | ||
2395 | txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); | |
2396 | netdev_tx_reset_queue(txq); | |
2397 | ||
1c1008c7 FF |
2398 | bcmgenet_free_rx_buffers(priv); |
2399 | kfree(priv->rx_cbs); | |
2400 | kfree(priv->tx_cbs); | |
2401 | } | |
2402 | ||
2403 | /* init_edma: Initialize DMA control register */ | |
2404 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |
2405 | { | |
2406 | int ret; | |
014012a4 PG |
2407 | unsigned int i; |
2408 | struct enet_cb *cb; | |
1c1008c7 | 2409 | |
6f5a272c | 2410 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 | 2411 | |
6f5a272c PG |
2412 | /* Initialize common Rx ring structures */ |
2413 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; | |
2414 | priv->num_rx_bds = TOTAL_DESC; | |
2415 | priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), | |
2416 | GFP_KERNEL); | |
2417 | if (!priv->rx_cbs) | |
2418 | return -ENOMEM; | |
2419 | ||
2420 | for (i = 0; i < priv->num_rx_bds; i++) { | |
2421 | cb = priv->rx_cbs + i; | |
2422 | cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; | |
2423 | } | |
2424 | ||
7fc527f9 | 2425 | /* Initialize common TX ring structures */ |
1c1008c7 FF |
2426 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; |
2427 | priv->num_tx_bds = TOTAL_DESC; | |
c489be08 | 2428 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
c91b7f66 | 2429 | GFP_KERNEL); |
1c1008c7 | 2430 | if (!priv->tx_cbs) { |
ebbd96fb | 2431 | kfree(priv->rx_cbs); |
1c1008c7 FF |
2432 | return -ENOMEM; |
2433 | } | |
2434 | ||
014012a4 PG |
2435 | for (i = 0; i < priv->num_tx_bds; i++) { |
2436 | cb = priv->tx_cbs + i; | |
2437 | cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; | |
2438 | } | |
2439 | ||
ebbd96fb PG |
2440 | /* Init rDma */ |
2441 | bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2442 | ||
2443 | /* Initialize Rx queues */ | |
2444 | ret = bcmgenet_init_rx_queues(priv->dev); | |
2445 | if (ret) { | |
2446 | netdev_err(priv->dev, "failed to initialize Rx queues\n"); | |
2447 | bcmgenet_free_rx_buffers(priv); | |
2448 | kfree(priv->rx_cbs); | |
2449 | kfree(priv->tx_cbs); | |
2450 | return ret; | |
2451 | } | |
2452 | ||
2453 | /* Init tDma */ | |
2454 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2455 | ||
16c6d667 PG |
2456 | /* Initialize Tx queues */ |
2457 | bcmgenet_init_tx_queues(priv->dev); | |
1c1008c7 FF |
2458 | |
2459 | return 0; | |
2460 | } | |
2461 | ||
1c1008c7 FF |
2462 | /* Interrupt bottom half */ |
2463 | static void bcmgenet_irq_task(struct work_struct *work) | |
2464 | { | |
2465 | struct bcmgenet_priv *priv = container_of( | |
2466 | work, struct bcmgenet_priv, bcmgenet_irq_work); | |
2467 | ||
2468 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | |
2469 | ||
8fdb0e0f FF |
2470 | if (priv->irq0_stat & UMAC_IRQ_MPD_R) { |
2471 | priv->irq0_stat &= ~UMAC_IRQ_MPD_R; | |
2472 | netif_dbg(priv, wol, priv->dev, | |
2473 | "magic packet detected, waking up\n"); | |
2474 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
2475 | } | |
2476 | ||
1c1008c7 | 2477 | /* Link UP/DOWN event */ |
d07c0278 | 2478 | if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) { |
80d8e96d | 2479 | phy_mac_interrupt(priv->phydev, |
451e1ca2 | 2480 | !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); |
e122966d | 2481 | priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; |
1c1008c7 FF |
2482 | } |
2483 | } | |
2484 | ||
4055eaef | 2485 | /* bcmgenet_isr1: handle Rx and Tx priority queues */ |
1c1008c7 FF |
2486 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
2487 | { | |
2488 | struct bcmgenet_priv *priv = dev_id; | |
4055eaef PG |
2489 | struct bcmgenet_rx_ring *rx_ring; |
2490 | struct bcmgenet_tx_ring *tx_ring; | |
1c1008c7 FF |
2491 | unsigned int index; |
2492 | ||
2493 | /* Save irq status for bottom-half processing. */ | |
2494 | priv->irq1_stat = | |
2495 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | |
4092e6ac | 2496 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
4055eaef | 2497 | |
7fc527f9 | 2498 | /* clear interrupts */ |
1c1008c7 FF |
2499 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
2500 | ||
2501 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 2502 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
4092e6ac | 2503 | |
4055eaef PG |
2504 | /* Check Rx priority queue interrupts */ |
2505 | for (index = 0; index < priv->hw_params->rx_queues; index++) { | |
2506 | if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) | |
2507 | continue; | |
2508 | ||
2509 | rx_ring = &priv->rx_rings[index]; | |
2510 | ||
2511 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | |
2512 | rx_ring->int_disable(rx_ring); | |
dac916f8 | 2513 | __napi_schedule_irqoff(&rx_ring->napi); |
4055eaef PG |
2514 | } |
2515 | } | |
2516 | ||
2517 | /* Check Tx priority queue interrupts */ | |
4092e6ac JS |
2518 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
2519 | if (!(priv->irq1_stat & BIT(index))) | |
2520 | continue; | |
2521 | ||
4055eaef | 2522 | tx_ring = &priv->tx_rings[index]; |
4092e6ac | 2523 | |
4055eaef PG |
2524 | if (likely(napi_schedule_prep(&tx_ring->napi))) { |
2525 | tx_ring->int_disable(tx_ring); | |
dac916f8 | 2526 | __napi_schedule_irqoff(&tx_ring->napi); |
1c1008c7 FF |
2527 | } |
2528 | } | |
4092e6ac | 2529 | |
1c1008c7 FF |
2530 | return IRQ_HANDLED; |
2531 | } | |
2532 | ||
4055eaef | 2533 | /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ |
1c1008c7 FF |
2534 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
2535 | { | |
2536 | struct bcmgenet_priv *priv = dev_id; | |
4055eaef PG |
2537 | struct bcmgenet_rx_ring *rx_ring; |
2538 | struct bcmgenet_tx_ring *tx_ring; | |
1c1008c7 FF |
2539 | |
2540 | /* Save irq status for bottom-half processing. */ | |
2541 | priv->irq0_stat = | |
2542 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | |
2543 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
4055eaef | 2544 | |
7fc527f9 | 2545 | /* clear interrupts */ |
1c1008c7 FF |
2546 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); |
2547 | ||
2548 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 2549 | "IRQ=0x%x\n", priv->irq0_stat); |
1c1008c7 | 2550 | |
ee7d8c20 | 2551 | if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { |
4055eaef PG |
2552 | rx_ring = &priv->rx_rings[DESC_INDEX]; |
2553 | ||
2554 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | |
2555 | rx_ring->int_disable(rx_ring); | |
dac916f8 | 2556 | __napi_schedule_irqoff(&rx_ring->napi); |
1c1008c7 FF |
2557 | } |
2558 | } | |
4092e6ac | 2559 | |
ee7d8c20 | 2560 | if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { |
4055eaef PG |
2561 | tx_ring = &priv->tx_rings[DESC_INDEX]; |
2562 | ||
2563 | if (likely(napi_schedule_prep(&tx_ring->napi))) { | |
2564 | tx_ring->int_disable(tx_ring); | |
dac916f8 | 2565 | __napi_schedule_irqoff(&tx_ring->napi); |
4092e6ac | 2566 | } |
1c1008c7 | 2567 | } |
4055eaef | 2568 | |
1c1008c7 FF |
2569 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
2570 | UMAC_IRQ_PHY_DET_F | | |
e122966d | 2571 | UMAC_IRQ_LINK_EVENT | |
1c1008c7 FF |
2572 | UMAC_IRQ_HFB_SM | |
2573 | UMAC_IRQ_HFB_MM | | |
2574 | UMAC_IRQ_MPD_R)) { | |
2575 | /* all other interested interrupts handled in bottom half */ | |
2576 | schedule_work(&priv->bcmgenet_irq_work); | |
2577 | } | |
2578 | ||
2579 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
c91b7f66 | 2580 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
1c1008c7 FF |
2581 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
2582 | wake_up(&priv->wq); | |
2583 | } | |
2584 | ||
2585 | return IRQ_HANDLED; | |
2586 | } | |
2587 | ||
8562056f FF |
2588 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) |
2589 | { | |
2590 | struct bcmgenet_priv *priv = dev_id; | |
2591 | ||
2592 | pm_wakeup_event(&priv->pdev->dev, 0); | |
2593 | ||
2594 | return IRQ_HANDLED; | |
2595 | } | |
2596 | ||
4d2e8882 FF |
2597 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2598 | static void bcmgenet_poll_controller(struct net_device *dev) | |
2599 | { | |
2600 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2601 | ||
2602 | /* Invoke the main RX/TX interrupt handler */ | |
2603 | disable_irq(priv->irq0); | |
2604 | bcmgenet_isr0(priv->irq0, priv); | |
2605 | enable_irq(priv->irq0); | |
2606 | ||
2607 | /* And the interrupt handler for RX/TX priority queues */ | |
2608 | disable_irq(priv->irq1); | |
2609 | bcmgenet_isr1(priv->irq1, priv); | |
2610 | enable_irq(priv->irq1); | |
2611 | } | |
2612 | #endif | |
2613 | ||
1c1008c7 FF |
2614 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
2615 | { | |
2616 | u32 reg; | |
2617 | ||
2618 | reg = bcmgenet_rbuf_ctrl_get(priv); | |
2619 | reg |= BIT(1); | |
2620 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2621 | udelay(10); | |
2622 | ||
2623 | reg &= ~BIT(1); | |
2624 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2625 | udelay(10); | |
2626 | } | |
2627 | ||
2628 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | |
c91b7f66 | 2629 | unsigned char *addr) |
1c1008c7 FF |
2630 | { |
2631 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | |
2632 | (addr[2] << 8) | addr[3], UMAC_MAC0); | |
2633 | bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); | |
2634 | } | |
2635 | ||
1c1008c7 FF |
2636 | /* Returns a reusable dma control register value */ |
2637 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) | |
2638 | { | |
2639 | u32 reg; | |
2640 | u32 dma_ctrl; | |
2641 | ||
2642 | /* disable DMA */ | |
2643 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; | |
2644 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2645 | reg &= ~dma_ctrl; | |
2646 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2647 | ||
2648 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2649 | reg &= ~dma_ctrl; | |
2650 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2651 | ||
2652 | bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); | |
2653 | udelay(10); | |
2654 | bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); | |
2655 | ||
2656 | return dma_ctrl; | |
2657 | } | |
2658 | ||
2659 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | |
2660 | { | |
2661 | u32 reg; | |
2662 | ||
2663 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2664 | reg |= dma_ctrl; | |
2665 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2666 | ||
2667 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2668 | reg |= dma_ctrl; | |
2669 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2670 | } | |
2671 | ||
0034de41 PG |
2672 | static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, |
2673 | u32 f_index) | |
2674 | { | |
2675 | u32 offset; | |
2676 | u32 reg; | |
2677 | ||
2678 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); | |
2679 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2680 | return !!(reg & (1 << (f_index % 32))); | |
2681 | } | |
2682 | ||
2683 | static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) | |
2684 | { | |
2685 | u32 offset; | |
2686 | u32 reg; | |
2687 | ||
2688 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); | |
2689 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2690 | reg |= (1 << (f_index % 32)); | |
2691 | bcmgenet_hfb_reg_writel(priv, reg, offset); | |
2692 | } | |
2693 | ||
2694 | static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, | |
2695 | u32 f_index, u32 rx_queue) | |
2696 | { | |
2697 | u32 offset; | |
2698 | u32 reg; | |
2699 | ||
2700 | offset = f_index / 8; | |
2701 | reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); | |
2702 | reg &= ~(0xF << (4 * (f_index % 8))); | |
2703 | reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); | |
2704 | bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); | |
2705 | } | |
2706 | ||
2707 | static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, | |
2708 | u32 f_index, u32 f_length) | |
2709 | { | |
2710 | u32 offset; | |
2711 | u32 reg; | |
2712 | ||
2713 | offset = HFB_FLT_LEN_V3PLUS + | |
2714 | ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * | |
2715 | sizeof(u32); | |
2716 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2717 | reg &= ~(0xFF << (8 * (f_index % 4))); | |
2718 | reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); | |
2719 | bcmgenet_hfb_reg_writel(priv, reg, offset); | |
2720 | } | |
2721 | ||
2722 | static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) | |
2723 | { | |
2724 | u32 f_index; | |
2725 | ||
2726 | for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) | |
2727 | if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) | |
2728 | return f_index; | |
2729 | ||
2730 | return -ENOMEM; | |
2731 | } | |
2732 | ||
2733 | /* bcmgenet_hfb_add_filter | |
2734 | * | |
2735 | * Add new filter to Hardware Filter Block to match and direct Rx traffic to | |
2736 | * desired Rx queue. | |
2737 | * | |
2738 | * f_data is an array of unsigned 32-bit integers where each 32-bit integer | |
2739 | * provides filter data for 2 bytes (4 nibbles) of Rx frame: | |
2740 | * | |
2741 | * bits 31:20 - unused | |
2742 | * bit 19 - nibble 0 match enable | |
2743 | * bit 18 - nibble 1 match enable | |
2744 | * bit 17 - nibble 2 match enable | |
2745 | * bit 16 - nibble 3 match enable | |
2746 | * bits 15:12 - nibble 0 data | |
2747 | * bits 11:8 - nibble 1 data | |
2748 | * bits 7:4 - nibble 2 data | |
2749 | * bits 3:0 - nibble 3 data | |
2750 | * | |
2751 | * Example: | |
2752 | * In order to match: | |
2753 | * - Ethernet frame type = 0x0800 (IP) | |
2754 | * - IP version field = 4 | |
2755 | * - IP protocol field = 0x11 (UDP) | |
2756 | * | |
2757 | * The following filter is needed: | |
2758 | * u32 hfb_filter_ipv4_udp[] = { | |
2759 | * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, | |
2760 | * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, | |
2761 | * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, | |
2762 | * }; | |
2763 | * | |
2764 | * To add the filter to HFB and direct the traffic to Rx queue 0, call: | |
2765 | * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, | |
2766 | * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); | |
2767 | */ | |
2768 | int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, | |
2769 | u32 f_length, u32 rx_queue) | |
2770 | { | |
2771 | int f_index; | |
2772 | u32 i; | |
2773 | ||
2774 | f_index = bcmgenet_hfb_find_unused_filter(priv); | |
2775 | if (f_index < 0) | |
2776 | return -ENOMEM; | |
2777 | ||
2778 | if (f_length > priv->hw_params->hfb_filter_size) | |
2779 | return -EINVAL; | |
2780 | ||
2781 | for (i = 0; i < f_length; i++) | |
2782 | bcmgenet_hfb_writel(priv, f_data[i], | |
2783 | (f_index * priv->hw_params->hfb_filter_size + i) * | |
2784 | sizeof(u32)); | |
2785 | ||
2786 | bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); | |
2787 | bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); | |
2788 | bcmgenet_hfb_enable_filter(priv, f_index); | |
2789 | bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); | |
2790 | ||
2791 | return 0; | |
2792 | } | |
2793 | ||
2794 | /* bcmgenet_hfb_clear | |
2795 | * | |
2796 | * Clear Hardware Filter Block and disable all filtering. | |
2797 | */ | |
2798 | static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) | |
2799 | { | |
2800 | u32 i; | |
2801 | ||
2802 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); | |
2803 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); | |
2804 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); | |
2805 | ||
2806 | for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) | |
2807 | bcmgenet_rdma_writel(priv, 0x0, i); | |
2808 | ||
2809 | for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) | |
2810 | bcmgenet_hfb_reg_writel(priv, 0x0, | |
2811 | HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); | |
2812 | ||
2813 | for (i = 0; i < priv->hw_params->hfb_filter_cnt * | |
2814 | priv->hw_params->hfb_filter_size; i++) | |
2815 | bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); | |
2816 | } | |
2817 | ||
2818 | static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) | |
2819 | { | |
2820 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) | |
2821 | return; | |
2822 | ||
2823 | bcmgenet_hfb_clear(priv); | |
2824 | } | |
2825 | ||
909ff5ef FF |
2826 | static void bcmgenet_netif_start(struct net_device *dev) |
2827 | { | |
2828 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2829 | ||
2830 | /* Start the network engine */ | |
3ab11339 | 2831 | bcmgenet_enable_rx_napi(priv); |
e2aadb4a | 2832 | bcmgenet_enable_tx_napi(priv); |
909ff5ef FF |
2833 | |
2834 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | |
2835 | ||
909ff5ef FF |
2836 | netif_tx_start_all_queues(dev); |
2837 | ||
37850e37 FF |
2838 | /* Monitor link interrupts now */ |
2839 | bcmgenet_link_intr_enable(priv); | |
2840 | ||
909ff5ef FF |
2841 | phy_start(priv->phydev); |
2842 | } | |
2843 | ||
1c1008c7 FF |
2844 | static int bcmgenet_open(struct net_device *dev) |
2845 | { | |
2846 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2847 | unsigned long dma_ctrl; | |
2848 | u32 reg; | |
2849 | int ret; | |
2850 | ||
2851 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); | |
2852 | ||
2853 | /* Turn on the clock */ | |
7d5d3075 | 2854 | clk_prepare_enable(priv->clk); |
1c1008c7 | 2855 | |
a642c4f7 FF |
2856 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
2857 | * brought out of reset as absolutely no UniMAC activity is allowed | |
2858 | */ | |
c624f891 | 2859 | if (priv->internal_phy) |
a642c4f7 FF |
2860 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); |
2861 | ||
1c1008c7 FF |
2862 | /* take MAC out of reset */ |
2863 | bcmgenet_umac_reset(priv); | |
2864 | ||
2865 | ret = init_umac(priv); | |
2866 | if (ret) | |
2867 | goto err_clk_disable; | |
2868 | ||
2869 | /* disable ethernet MAC while updating its registers */ | |
e29585b8 | 2870 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
1c1008c7 | 2871 | |
909ff5ef FF |
2872 | /* Make sure we reflect the value of CRC_CMD_FWD */ |
2873 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
2874 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | |
2875 | ||
1c1008c7 FF |
2876 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
2877 | ||
c624f891 | 2878 | if (priv->internal_phy) { |
1c1008c7 FF |
2879 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
2880 | reg |= EXT_ENERGY_DET_MASK; | |
2881 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
2882 | } | |
2883 | ||
2884 | /* Disable RX/TX DMA and flush TX queues */ | |
2885 | dma_ctrl = bcmgenet_dma_disable(priv); | |
2886 | ||
2887 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
2888 | ret = bcmgenet_init_dma(priv); | |
2889 | if (ret) { | |
2890 | netdev_err(dev, "failed to initialize DMA\n"); | |
fac25940 | 2891 | goto err_clk_disable; |
1c1008c7 FF |
2892 | } |
2893 | ||
2894 | /* Always enable ring 16 - descriptor ring */ | |
2895 | bcmgenet_enable_dma(priv, dma_ctrl); | |
2896 | ||
0034de41 PG |
2897 | /* HFB init */ |
2898 | bcmgenet_hfb_init(priv); | |
2899 | ||
1c1008c7 | 2900 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, |
c91b7f66 | 2901 | dev->name, priv); |
1c1008c7 FF |
2902 | if (ret < 0) { |
2903 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | |
2904 | goto err_fini_dma; | |
2905 | } | |
2906 | ||
2907 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | |
c91b7f66 | 2908 | dev->name, priv); |
1c1008c7 FF |
2909 | if (ret < 0) { |
2910 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | |
2911 | goto err_irq0; | |
2912 | } | |
2913 | ||
6cc8e6d4 FF |
2914 | ret = bcmgenet_mii_probe(dev); |
2915 | if (ret) { | |
2916 | netdev_err(dev, "failed to connect to PHY\n"); | |
2917 | goto err_irq1; | |
2918 | } | |
c96e731c | 2919 | |
909ff5ef | 2920 | bcmgenet_netif_start(dev); |
1c1008c7 FF |
2921 | |
2922 | return 0; | |
2923 | ||
6cc8e6d4 FF |
2924 | err_irq1: |
2925 | free_irq(priv->irq1, priv); | |
1c1008c7 | 2926 | err_irq0: |
978ffac4 | 2927 | free_irq(priv->irq0, priv); |
1c1008c7 FF |
2928 | err_fini_dma: |
2929 | bcmgenet_fini_dma(priv); | |
2930 | err_clk_disable: | |
7d5d3075 | 2931 | clk_disable_unprepare(priv->clk); |
1c1008c7 FF |
2932 | return ret; |
2933 | } | |
2934 | ||
909ff5ef FF |
2935 | static void bcmgenet_netif_stop(struct net_device *dev) |
2936 | { | |
2937 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2938 | ||
2939 | netif_tx_stop_all_queues(dev); | |
909ff5ef | 2940 | phy_stop(priv->phydev); |
909ff5ef | 2941 | bcmgenet_intr_disable(priv); |
3ab11339 | 2942 | bcmgenet_disable_rx_napi(priv); |
e2aadb4a | 2943 | bcmgenet_disable_tx_napi(priv); |
909ff5ef FF |
2944 | |
2945 | /* Wait for pending work items to complete. Since interrupts are | |
2946 | * disabled no new work will be scheduled. | |
2947 | */ | |
2948 | cancel_work_sync(&priv->bcmgenet_irq_work); | |
cc013fb4 | 2949 | |
cc013fb4 | 2950 | priv->old_link = -1; |
5ad6e6c5 | 2951 | priv->old_speed = -1; |
cc013fb4 | 2952 | priv->old_duplex = -1; |
5ad6e6c5 | 2953 | priv->old_pause = -1; |
909ff5ef FF |
2954 | } |
2955 | ||
1c1008c7 FF |
2956 | static int bcmgenet_close(struct net_device *dev) |
2957 | { | |
2958 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2959 | int ret; | |
1c1008c7 FF |
2960 | |
2961 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | |
2962 | ||
909ff5ef | 2963 | bcmgenet_netif_stop(dev); |
1c1008c7 | 2964 | |
c96e731c FF |
2965 | /* Really kill the PHY state machine and disconnect from it */ |
2966 | phy_disconnect(priv->phydev); | |
2967 | ||
1c1008c7 | 2968 | /* Disable MAC receive */ |
e29585b8 | 2969 | umac_enable_set(priv, CMD_RX_EN, false); |
1c1008c7 | 2970 | |
1c1008c7 FF |
2971 | ret = bcmgenet_dma_teardown(priv); |
2972 | if (ret) | |
2973 | return ret; | |
2974 | ||
2975 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
e29585b8 | 2976 | umac_enable_set(priv, CMD_TX_EN, false); |
1c1008c7 | 2977 | |
1c1008c7 FF |
2978 | /* tx reclaim */ |
2979 | bcmgenet_tx_reclaim_all(dev); | |
2980 | bcmgenet_fini_dma(priv); | |
2981 | ||
2982 | free_irq(priv->irq0, priv); | |
2983 | free_irq(priv->irq1, priv); | |
2984 | ||
c624f891 | 2985 | if (priv->internal_phy) |
ca8cf341 | 2986 | ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
1c1008c7 | 2987 | |
7d5d3075 | 2988 | clk_disable_unprepare(priv->clk); |
1c1008c7 | 2989 | |
ca8cf341 | 2990 | return ret; |
1c1008c7 FF |
2991 | } |
2992 | ||
13ea6578 FF |
2993 | static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) |
2994 | { | |
2995 | struct bcmgenet_priv *priv = ring->priv; | |
2996 | u32 p_index, c_index, intsts, intmsk; | |
2997 | struct netdev_queue *txq; | |
2998 | unsigned int free_bds; | |
2999 | unsigned long flags; | |
3000 | bool txq_stopped; | |
3001 | ||
3002 | if (!netif_msg_tx_err(priv)) | |
3003 | return; | |
3004 | ||
3005 | txq = netdev_get_tx_queue(priv->dev, ring->queue); | |
3006 | ||
3007 | spin_lock_irqsave(&ring->lock, flags); | |
3008 | if (ring->index == DESC_INDEX) { | |
3009 | intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
3010 | intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; | |
3011 | } else { | |
3012 | intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); | |
3013 | intmsk = 1 << ring->index; | |
3014 | } | |
3015 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); | |
3016 | p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); | |
3017 | txq_stopped = netif_tx_queue_stopped(txq); | |
3018 | free_bds = ring->free_bds; | |
3019 | spin_unlock_irqrestore(&ring->lock, flags); | |
3020 | ||
3021 | netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" | |
3022 | "TX queue status: %s, interrupts: %s\n" | |
3023 | "(sw)free_bds: %d (sw)size: %d\n" | |
3024 | "(sw)p_index: %d (hw)p_index: %d\n" | |
3025 | "(sw)c_index: %d (hw)c_index: %d\n" | |
3026 | "(sw)clean_p: %d (sw)write_p: %d\n" | |
3027 | "(sw)cb_ptr: %d (sw)end_ptr: %d\n", | |
3028 | ring->index, ring->queue, | |
3029 | txq_stopped ? "stopped" : "active", | |
3030 | intsts & intmsk ? "enabled" : "disabled", | |
3031 | free_bds, ring->size, | |
3032 | ring->prod_index, p_index & DMA_P_INDEX_MASK, | |
3033 | ring->c_index, c_index & DMA_C_INDEX_MASK, | |
3034 | ring->clean_ptr, ring->write_ptr, | |
3035 | ring->cb_ptr, ring->end_ptr); | |
3036 | } | |
3037 | ||
1c1008c7 FF |
3038 | static void bcmgenet_timeout(struct net_device *dev) |
3039 | { | |
3040 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
13ea6578 FF |
3041 | u32 int0_enable = 0; |
3042 | u32 int1_enable = 0; | |
3043 | unsigned int q; | |
1c1008c7 FF |
3044 | |
3045 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | |
3046 | ||
13ea6578 FF |
3047 | for (q = 0; q < priv->hw_params->tx_queues; q++) |
3048 | bcmgenet_dump_tx_queue(&priv->tx_rings[q]); | |
3049 | bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); | |
3050 | ||
3051 | bcmgenet_tx_reclaim_all(dev); | |
3052 | ||
3053 | for (q = 0; q < priv->hw_params->tx_queues; q++) | |
3054 | int1_enable |= (1 << q); | |
3055 | ||
3056 | int0_enable = UMAC_IRQ_TXDMA_DONE; | |
3057 | ||
3058 | /* Re-enable TX interrupts if disabled */ | |
3059 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); | |
3060 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | |
3061 | ||
1c1008c7 FF |
3062 | dev->trans_start = jiffies; |
3063 | ||
3064 | dev->stats.tx_errors++; | |
3065 | ||
3066 | netif_tx_wake_all_queues(dev); | |
3067 | } | |
3068 | ||
3069 | #define MAX_MC_COUNT 16 | |
3070 | ||
3071 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | |
3072 | unsigned char *addr, | |
3073 | int *i, | |
3074 | int *mc) | |
3075 | { | |
3076 | u32 reg; | |
3077 | ||
c91b7f66 FF |
3078 | bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], |
3079 | UMAC_MDF_ADDR + (*i * 4)); | |
3080 | bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | | |
3081 | addr[4] << 8 | addr[5], | |
3082 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | |
1c1008c7 FF |
3083 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); |
3084 | reg |= (1 << (MAX_MC_COUNT - *mc)); | |
3085 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | |
3086 | *i += 2; | |
3087 | (*mc)++; | |
3088 | } | |
3089 | ||
3090 | static void bcmgenet_set_rx_mode(struct net_device *dev) | |
3091 | { | |
3092 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3093 | struct netdev_hw_addr *ha; | |
3094 | int i, mc; | |
3095 | u32 reg; | |
3096 | ||
3097 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | |
3098 | ||
7fc527f9 | 3099 | /* Promiscuous mode */ |
1c1008c7 FF |
3100 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
3101 | if (dev->flags & IFF_PROMISC) { | |
3102 | reg |= CMD_PROMISC; | |
3103 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
3104 | bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); | |
3105 | return; | |
3106 | } else { | |
3107 | reg &= ~CMD_PROMISC; | |
3108 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
3109 | } | |
3110 | ||
3111 | /* UniMac doesn't support ALLMULTI */ | |
3112 | if (dev->flags & IFF_ALLMULTI) { | |
3113 | netdev_warn(dev, "ALLMULTI is not supported\n"); | |
3114 | return; | |
3115 | } | |
3116 | ||
3117 | /* update MDF filter */ | |
3118 | i = 0; | |
3119 | mc = 0; | |
3120 | /* Broadcast */ | |
3121 | bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); | |
3122 | /* my own address.*/ | |
3123 | bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); | |
3124 | /* Unicast list*/ | |
3125 | if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) | |
3126 | return; | |
3127 | ||
3128 | if (!netdev_uc_empty(dev)) | |
3129 | netdev_for_each_uc_addr(ha, dev) | |
3130 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
3131 | /* Multicast */ | |
3132 | if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) | |
3133 | return; | |
3134 | ||
3135 | netdev_for_each_mc_addr(ha, dev) | |
3136 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
3137 | } | |
3138 | ||
3139 | /* Set the hardware MAC address. */ | |
3140 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) | |
3141 | { | |
3142 | struct sockaddr *addr = p; | |
3143 | ||
3144 | /* Setting the MAC address at the hardware level is not possible | |
3145 | * without disabling the UniMAC RX/TX enable bits. | |
3146 | */ | |
3147 | if (netif_running(dev)) | |
3148 | return -EBUSY; | |
3149 | ||
3150 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
3151 | ||
3152 | return 0; | |
3153 | } | |
3154 | ||
1c1008c7 FF |
3155 | static const struct net_device_ops bcmgenet_netdev_ops = { |
3156 | .ndo_open = bcmgenet_open, | |
3157 | .ndo_stop = bcmgenet_close, | |
3158 | .ndo_start_xmit = bcmgenet_xmit, | |
1c1008c7 FF |
3159 | .ndo_tx_timeout = bcmgenet_timeout, |
3160 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, | |
3161 | .ndo_set_mac_address = bcmgenet_set_mac_addr, | |
3162 | .ndo_do_ioctl = bcmgenet_ioctl, | |
3163 | .ndo_set_features = bcmgenet_set_features, | |
4d2e8882 FF |
3164 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3165 | .ndo_poll_controller = bcmgenet_poll_controller, | |
3166 | #endif | |
1c1008c7 FF |
3167 | }; |
3168 | ||
3169 | /* Array of GENET hardware parameters/characteristics */ | |
3170 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |
3171 | [GENET_V1] = { | |
3172 | .tx_queues = 0, | |
51a966a7 | 3173 | .tx_bds_per_q = 0, |
1c1008c7 | 3174 | .rx_queues = 0, |
3feafa02 | 3175 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3176 | .bp_in_en_shift = 16, |
3177 | .bp_in_mask = 0xffff, | |
3178 | .hfb_filter_cnt = 16, | |
3179 | .qtag_mask = 0x1F, | |
3180 | .hfb_offset = 0x1000, | |
3181 | .rdma_offset = 0x2000, | |
3182 | .tdma_offset = 0x3000, | |
3183 | .words_per_bd = 2, | |
3184 | }, | |
3185 | [GENET_V2] = { | |
3186 | .tx_queues = 4, | |
51a966a7 | 3187 | .tx_bds_per_q = 32, |
7e906e02 | 3188 | .rx_queues = 0, |
3feafa02 | 3189 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3190 | .bp_in_en_shift = 16, |
3191 | .bp_in_mask = 0xffff, | |
3192 | .hfb_filter_cnt = 16, | |
3193 | .qtag_mask = 0x1F, | |
3194 | .tbuf_offset = 0x0600, | |
3195 | .hfb_offset = 0x1000, | |
3196 | .hfb_reg_offset = 0x2000, | |
3197 | .rdma_offset = 0x3000, | |
3198 | .tdma_offset = 0x4000, | |
3199 | .words_per_bd = 2, | |
3200 | .flags = GENET_HAS_EXT, | |
3201 | }, | |
3202 | [GENET_V3] = { | |
3203 | .tx_queues = 4, | |
51a966a7 | 3204 | .tx_bds_per_q = 32, |
7e906e02 | 3205 | .rx_queues = 0, |
3feafa02 | 3206 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3207 | .bp_in_en_shift = 17, |
3208 | .bp_in_mask = 0x1ffff, | |
3209 | .hfb_filter_cnt = 48, | |
0034de41 | 3210 | .hfb_filter_size = 128, |
1c1008c7 FF |
3211 | .qtag_mask = 0x3F, |
3212 | .tbuf_offset = 0x0600, | |
3213 | .hfb_offset = 0x8000, | |
3214 | .hfb_reg_offset = 0xfc00, | |
3215 | .rdma_offset = 0x10000, | |
3216 | .tdma_offset = 0x11000, | |
3217 | .words_per_bd = 2, | |
8d88c6eb PG |
3218 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | |
3219 | GENET_HAS_MOCA_LINK_DET, | |
1c1008c7 FF |
3220 | }, |
3221 | [GENET_V4] = { | |
3222 | .tx_queues = 4, | |
51a966a7 | 3223 | .tx_bds_per_q = 32, |
7e906e02 | 3224 | .rx_queues = 0, |
3feafa02 | 3225 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3226 | .bp_in_en_shift = 17, |
3227 | .bp_in_mask = 0x1ffff, | |
3228 | .hfb_filter_cnt = 48, | |
0034de41 | 3229 | .hfb_filter_size = 128, |
1c1008c7 FF |
3230 | .qtag_mask = 0x3F, |
3231 | .tbuf_offset = 0x0600, | |
3232 | .hfb_offset = 0x8000, | |
3233 | .hfb_reg_offset = 0xfc00, | |
3234 | .rdma_offset = 0x2000, | |
3235 | .tdma_offset = 0x4000, | |
3236 | .words_per_bd = 3, | |
8d88c6eb PG |
3237 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | |
3238 | GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, | |
1c1008c7 FF |
3239 | }, |
3240 | }; | |
3241 | ||
3242 | /* Infer hardware parameters from the detected GENET version */ | |
3243 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |
3244 | { | |
3245 | struct bcmgenet_hw_params *params; | |
3246 | u32 reg; | |
3247 | u8 major; | |
b04a2f5b | 3248 | u16 gphy_rev; |
1c1008c7 FF |
3249 | |
3250 | if (GENET_IS_V4(priv)) { | |
3251 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
3252 | genet_dma_ring_regs = genet_dma_ring_regs_v4; | |
3253 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
3254 | priv->version = GENET_V4; | |
3255 | } else if (GENET_IS_V3(priv)) { | |
3256 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
3257 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3258 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
3259 | priv->version = GENET_V3; | |
3260 | } else if (GENET_IS_V2(priv)) { | |
3261 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; | |
3262 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3263 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
3264 | priv->version = GENET_V2; | |
3265 | } else if (GENET_IS_V1(priv)) { | |
3266 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; | |
3267 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3268 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
3269 | priv->version = GENET_V1; | |
3270 | } | |
3271 | ||
3272 | /* enum genet_version starts at 1 */ | |
3273 | priv->hw_params = &bcmgenet_hw_params[priv->version]; | |
3274 | params = priv->hw_params; | |
3275 | ||
3276 | /* Read GENET HW version */ | |
3277 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); | |
3278 | major = (reg >> 24 & 0x0f); | |
3279 | if (major == 5) | |
3280 | major = 4; | |
3281 | else if (major == 0) | |
3282 | major = 1; | |
3283 | if (major != priv->version) { | |
3284 | dev_err(&priv->pdev->dev, | |
3285 | "GENET version mismatch, got: %d, configured for: %d\n", | |
3286 | major, priv->version); | |
3287 | } | |
3288 | ||
3289 | /* Print the GENET core version */ | |
3290 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | |
c91b7f66 | 3291 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
1c1008c7 | 3292 | |
487320c5 FF |
3293 | /* Store the integrated PHY revision for the MDIO probing function |
3294 | * to pass this information to the PHY driver. The PHY driver expects | |
3295 | * to find the PHY major revision in bits 15:8 while the GENET register | |
3296 | * stores that information in bits 7:0, account for that. | |
b04a2f5b FF |
3297 | * |
3298 | * On newer chips, starting with PHY revision G0, a new scheme is | |
3299 | * deployed similar to the Starfighter 2 switch with GPHY major | |
3300 | * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 | |
3301 | * is reserved as well as special value 0x01ff, we have a small | |
3302 | * heuristic to check for the new GPHY revision and re-arrange things | |
3303 | * so the GPHY driver is happy. | |
487320c5 | 3304 | */ |
b04a2f5b FF |
3305 | gphy_rev = reg & 0xffff; |
3306 | ||
3307 | /* This is the good old scheme, just GPHY major, no minor nor patch */ | |
3308 | if ((gphy_rev & 0xf0) != 0) | |
3309 | priv->gphy_rev = gphy_rev << 8; | |
3310 | ||
3311 | /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ | |
3312 | else if ((gphy_rev & 0xff00) != 0) | |
3313 | priv->gphy_rev = gphy_rev; | |
3314 | ||
3315 | /* This is reserved so should require special treatment */ | |
3316 | else if (gphy_rev == 0 || gphy_rev == 0x01ff) { | |
3317 | pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); | |
3318 | return; | |
3319 | } | |
487320c5 | 3320 | |
1c1008c7 FF |
3321 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
3322 | if (!(params->flags & GENET_HAS_40BITS)) | |
3323 | pr_warn("GENET does not support 40-bits PA\n"); | |
3324 | #endif | |
3325 | ||
3326 | pr_debug("Configuration for version: %d\n" | |
3feafa02 | 3327 | "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" |
1c1008c7 FF |
3328 | "BP << en: %2d, BP msk: 0x%05x\n" |
3329 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | |
3330 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | |
3331 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | |
3332 | "Words/BD: %d\n", | |
3333 | priv->version, | |
51a966a7 | 3334 | params->tx_queues, params->tx_bds_per_q, |
3feafa02 | 3335 | params->rx_queues, params->rx_bds_per_q, |
1c1008c7 FF |
3336 | params->bp_in_en_shift, params->bp_in_mask, |
3337 | params->hfb_filter_cnt, params->qtag_mask, | |
3338 | params->tbuf_offset, params->hfb_offset, | |
3339 | params->hfb_reg_offset, | |
3340 | params->rdma_offset, params->tdma_offset, | |
3341 | params->words_per_bd); | |
3342 | } | |
3343 | ||
3344 | static const struct of_device_id bcmgenet_match[] = { | |
3345 | { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, | |
3346 | { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, | |
3347 | { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, | |
3348 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | |
3349 | { }, | |
3350 | }; | |
e8048e55 | 3351 | MODULE_DEVICE_TABLE(of, bcmgenet_match); |
1c1008c7 FF |
3352 | |
3353 | static int bcmgenet_probe(struct platform_device *pdev) | |
3354 | { | |
b0ba512e | 3355 | struct bcmgenet_platform_data *pd = pdev->dev.platform_data; |
1c1008c7 | 3356 | struct device_node *dn = pdev->dev.of_node; |
b0ba512e | 3357 | const struct of_device_id *of_id = NULL; |
1c1008c7 FF |
3358 | struct bcmgenet_priv *priv; |
3359 | struct net_device *dev; | |
3360 | const void *macaddr; | |
3361 | struct resource *r; | |
3362 | int err = -EIO; | |
3363 | ||
3feafeed PG |
3364 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ |
3365 | dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, | |
3366 | GENET_MAX_MQ_CNT + 1); | |
1c1008c7 FF |
3367 | if (!dev) { |
3368 | dev_err(&pdev->dev, "can't allocate net device\n"); | |
3369 | return -ENOMEM; | |
3370 | } | |
3371 | ||
b0ba512e PG |
3372 | if (dn) { |
3373 | of_id = of_match_node(bcmgenet_match, dn); | |
3374 | if (!of_id) | |
3375 | return -EINVAL; | |
3376 | } | |
1c1008c7 FF |
3377 | |
3378 | priv = netdev_priv(dev); | |
3379 | priv->irq0 = platform_get_irq(pdev, 0); | |
3380 | priv->irq1 = platform_get_irq(pdev, 1); | |
8562056f | 3381 | priv->wol_irq = platform_get_irq(pdev, 2); |
1c1008c7 FF |
3382 | if (!priv->irq0 || !priv->irq1) { |
3383 | dev_err(&pdev->dev, "can't find IRQs\n"); | |
3384 | err = -EINVAL; | |
3385 | goto err; | |
3386 | } | |
3387 | ||
b0ba512e PG |
3388 | if (dn) { |
3389 | macaddr = of_get_mac_address(dn); | |
3390 | if (!macaddr) { | |
3391 | dev_err(&pdev->dev, "can't find MAC address\n"); | |
3392 | err = -EINVAL; | |
3393 | goto err; | |
3394 | } | |
3395 | } else { | |
3396 | macaddr = pd->mac_address; | |
1c1008c7 FF |
3397 | } |
3398 | ||
3399 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
5343a10d FE |
3400 | priv->base = devm_ioremap_resource(&pdev->dev, r); |
3401 | if (IS_ERR(priv->base)) { | |
3402 | err = PTR_ERR(priv->base); | |
1c1008c7 FF |
3403 | goto err; |
3404 | } | |
3405 | ||
3406 | SET_NETDEV_DEV(dev, &pdev->dev); | |
3407 | dev_set_drvdata(&pdev->dev, dev); | |
3408 | ether_addr_copy(dev->dev_addr, macaddr); | |
3409 | dev->watchdog_timeo = 2 * HZ; | |
7ad24ea4 | 3410 | dev->ethtool_ops = &bcmgenet_ethtool_ops; |
1c1008c7 | 3411 | dev->netdev_ops = &bcmgenet_netdev_ops; |
1c1008c7 FF |
3412 | |
3413 | priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); | |
3414 | ||
3415 | /* Set hardware features */ | |
3416 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | |
3417 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | |
3418 | ||
8562056f FF |
3419 | /* Request the WOL interrupt and advertise suspend if available */ |
3420 | priv->wol_irq_disabled = true; | |
3421 | err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, | |
3422 | dev->name, priv); | |
3423 | if (!err) | |
3424 | device_set_wakeup_capable(&pdev->dev, 1); | |
3425 | ||
1c1008c7 FF |
3426 | /* Set the needed headroom to account for any possible |
3427 | * features enabling/disabling at runtime | |
3428 | */ | |
3429 | dev->needed_headroom += 64; | |
3430 | ||
3431 | netdev_boot_setup_check(dev); | |
3432 | ||
3433 | priv->dev = dev; | |
3434 | priv->pdev = pdev; | |
b0ba512e PG |
3435 | if (of_id) |
3436 | priv->version = (enum bcmgenet_version)of_id->data; | |
3437 | else | |
3438 | priv->version = pd->genet_version; | |
1c1008c7 | 3439 | |
e4a60a93 | 3440 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); |
7d5d3075 | 3441 | if (IS_ERR(priv->clk)) { |
e4a60a93 | 3442 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); |
7d5d3075 FF |
3443 | priv->clk = NULL; |
3444 | } | |
e4a60a93 | 3445 | |
7d5d3075 | 3446 | clk_prepare_enable(priv->clk); |
e4a60a93 | 3447 | |
1c1008c7 FF |
3448 | bcmgenet_set_hw_params(priv); |
3449 | ||
1c1008c7 FF |
3450 | /* Mii wait queue */ |
3451 | init_waitqueue_head(&priv->wq); | |
3452 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ | |
3453 | priv->rx_buf_len = RX_BUF_LENGTH; | |
3454 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | |
3455 | ||
1c1008c7 | 3456 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); |
7d5d3075 | 3457 | if (IS_ERR(priv->clk_wol)) { |
1c1008c7 | 3458 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); |
7d5d3075 FF |
3459 | priv->clk_wol = NULL; |
3460 | } | |
1c1008c7 | 3461 | |
6ef398ea FF |
3462 | priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); |
3463 | if (IS_ERR(priv->clk_eee)) { | |
3464 | dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); | |
3465 | priv->clk_eee = NULL; | |
3466 | } | |
3467 | ||
1c1008c7 FF |
3468 | err = reset_umac(priv); |
3469 | if (err) | |
3470 | goto err_clk_disable; | |
3471 | ||
3472 | err = bcmgenet_mii_init(dev); | |
3473 | if (err) | |
3474 | goto err_clk_disable; | |
3475 | ||
3476 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues | |
3477 | * just the ring 16 descriptor based TX | |
3478 | */ | |
3479 | netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); | |
3480 | netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); | |
3481 | ||
219575eb FF |
3482 | /* libphy will determine the link state */ |
3483 | netif_carrier_off(dev); | |
3484 | ||
1c1008c7 | 3485 | /* Turn off the main clock, WOL clock is handled separately */ |
7d5d3075 | 3486 | clk_disable_unprepare(priv->clk); |
1c1008c7 | 3487 | |
0f50ce96 FF |
3488 | err = register_netdev(dev); |
3489 | if (err) | |
3490 | goto err; | |
3491 | ||
1c1008c7 FF |
3492 | return err; |
3493 | ||
3494 | err_clk_disable: | |
7d5d3075 | 3495 | clk_disable_unprepare(priv->clk); |
1c1008c7 FF |
3496 | err: |
3497 | free_netdev(dev); | |
3498 | return err; | |
3499 | } | |
3500 | ||
3501 | static int bcmgenet_remove(struct platform_device *pdev) | |
3502 | { | |
3503 | struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); | |
3504 | ||
3505 | dev_set_drvdata(&pdev->dev, NULL); | |
3506 | unregister_netdev(priv->dev); | |
3507 | bcmgenet_mii_exit(priv->dev); | |
3508 | free_netdev(priv->dev); | |
3509 | ||
3510 | return 0; | |
3511 | } | |
3512 | ||
b6e978e5 FF |
3513 | #ifdef CONFIG_PM_SLEEP |
3514 | static int bcmgenet_suspend(struct device *d) | |
3515 | { | |
3516 | struct net_device *dev = dev_get_drvdata(d); | |
3517 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3518 | int ret; | |
3519 | ||
3520 | if (!netif_running(dev)) | |
3521 | return 0; | |
3522 | ||
3523 | bcmgenet_netif_stop(dev); | |
3524 | ||
cc013fb4 FF |
3525 | phy_suspend(priv->phydev); |
3526 | ||
b6e978e5 FF |
3527 | netif_device_detach(dev); |
3528 | ||
3529 | /* Disable MAC receive */ | |
3530 | umac_enable_set(priv, CMD_RX_EN, false); | |
3531 | ||
3532 | ret = bcmgenet_dma_teardown(priv); | |
3533 | if (ret) | |
3534 | return ret; | |
3535 | ||
3536 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
3537 | umac_enable_set(priv, CMD_TX_EN, false); | |
3538 | ||
3539 | /* tx reclaim */ | |
3540 | bcmgenet_tx_reclaim_all(dev); | |
3541 | bcmgenet_fini_dma(priv); | |
3542 | ||
8c90db72 FF |
3543 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
3544 | if (device_may_wakeup(d) && priv->wolopts) { | |
ca8cf341 | 3545 | ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); |
8c90db72 | 3546 | clk_prepare_enable(priv->clk_wol); |
c624f891 | 3547 | } else if (priv->internal_phy) { |
a6f31f5e | 3548 | ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
8c90db72 FF |
3549 | } |
3550 | ||
b6e978e5 FF |
3551 | /* Turn off the clocks */ |
3552 | clk_disable_unprepare(priv->clk); | |
3553 | ||
ca8cf341 | 3554 | return ret; |
b6e978e5 FF |
3555 | } |
3556 | ||
3557 | static int bcmgenet_resume(struct device *d) | |
3558 | { | |
3559 | struct net_device *dev = dev_get_drvdata(d); | |
3560 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3561 | unsigned long dma_ctrl; | |
3562 | int ret; | |
3563 | u32 reg; | |
3564 | ||
3565 | if (!netif_running(dev)) | |
3566 | return 0; | |
3567 | ||
3568 | /* Turn on the clock */ | |
3569 | ret = clk_prepare_enable(priv->clk); | |
3570 | if (ret) | |
3571 | return ret; | |
3572 | ||
a6f31f5e FF |
3573 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
3574 | * brought out of reset as absolutely no UniMAC activity is allowed | |
3575 | */ | |
c624f891 | 3576 | if (priv->internal_phy) |
a6f31f5e FF |
3577 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); |
3578 | ||
b6e978e5 FF |
3579 | bcmgenet_umac_reset(priv); |
3580 | ||
3581 | ret = init_umac(priv); | |
3582 | if (ret) | |
3583 | goto out_clk_disable; | |
3584 | ||
0a29b3da TK |
3585 | /* From WOL-enabled suspend, switch to regular clock */ |
3586 | if (priv->wolopts) | |
3587 | clk_disable_unprepare(priv->clk_wol); | |
3588 | ||
3589 | phy_init_hw(priv->phydev); | |
3590 | /* Speed settings must be restored */ | |
28b45910 | 3591 | bcmgenet_mii_config(priv->dev); |
8c90db72 | 3592 | |
b6e978e5 FF |
3593 | /* disable ethernet MAC while updating its registers */ |
3594 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | |
3595 | ||
3596 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | |
3597 | ||
c624f891 | 3598 | if (priv->internal_phy) { |
b6e978e5 FF |
3599 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
3600 | reg |= EXT_ENERGY_DET_MASK; | |
3601 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
3602 | } | |
3603 | ||
98bb7399 FF |
3604 | if (priv->wolopts) |
3605 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
3606 | ||
b6e978e5 FF |
3607 | /* Disable RX/TX DMA and flush TX queues */ |
3608 | dma_ctrl = bcmgenet_dma_disable(priv); | |
3609 | ||
3610 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
3611 | ret = bcmgenet_init_dma(priv); | |
3612 | if (ret) { | |
3613 | netdev_err(dev, "failed to initialize DMA\n"); | |
3614 | goto out_clk_disable; | |
3615 | } | |
3616 | ||
3617 | /* Always enable ring 16 - descriptor ring */ | |
3618 | bcmgenet_enable_dma(priv, dma_ctrl); | |
3619 | ||
3620 | netif_device_attach(dev); | |
3621 | ||
cc013fb4 FF |
3622 | phy_resume(priv->phydev); |
3623 | ||
6ef398ea FF |
3624 | if (priv->eee.eee_enabled) |
3625 | bcmgenet_eee_enable_set(dev, true); | |
3626 | ||
b6e978e5 FF |
3627 | bcmgenet_netif_start(dev); |
3628 | ||
3629 | return 0; | |
3630 | ||
3631 | out_clk_disable: | |
3632 | clk_disable_unprepare(priv->clk); | |
3633 | return ret; | |
3634 | } | |
3635 | #endif /* CONFIG_PM_SLEEP */ | |
3636 | ||
3637 | static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); | |
3638 | ||
1c1008c7 FF |
3639 | static struct platform_driver bcmgenet_driver = { |
3640 | .probe = bcmgenet_probe, | |
3641 | .remove = bcmgenet_remove, | |
3642 | .driver = { | |
3643 | .name = "bcmgenet", | |
1c1008c7 | 3644 | .of_match_table = bcmgenet_match, |
b6e978e5 | 3645 | .pm = &bcmgenet_pm_ops, |
1c1008c7 FF |
3646 | }, |
3647 | }; | |
3648 | module_platform_driver(bcmgenet_driver); | |
3649 | ||
3650 | MODULE_AUTHOR("Broadcom Corporation"); | |
3651 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); | |
3652 | MODULE_ALIAS("platform:bcmgenet"); | |
3653 | MODULE_LICENSE("GPL"); |