]>
Commit | Line | Data |
---|---|---|
1c1008c7 FF |
1 | /* |
2 | * Broadcom GENET (Gigabit Ethernet) controller driver | |
3 | * | |
4 | * Copyright (c) 2014 Broadcom Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
1c1008c7 FF |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) "bcmgenet: " fmt | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/fcntl.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/if_ether.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/pm.h> | |
27 | #include <linux/clk.h> | |
1c1008c7 FF |
28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/of_net.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <net/arp.h> | |
34 | ||
35 | #include <linux/mii.h> | |
36 | #include <linux/ethtool.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/in.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/phy.h> | |
b0ba512e | 45 | #include <linux/platform_data/bcmgenet.h> |
1c1008c7 FF |
46 | |
47 | #include <asm/unaligned.h> | |
48 | ||
49 | #include "bcmgenet.h" | |
50 | ||
51 | /* Maximum number of hardware queues, downsized if needed */ | |
52 | #define GENET_MAX_MQ_CNT 4 | |
53 | ||
54 | /* Default highest priority queue for multi queue support */ | |
55 | #define GENET_Q0_PRIORITY 0 | |
56 | ||
3feafa02 PG |
57 | #define GENET_Q16_RX_BD_CNT \ |
58 | (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) | |
51a966a7 PG |
59 | #define GENET_Q16_TX_BD_CNT \ |
60 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) | |
1c1008c7 FF |
61 | |
62 | #define RX_BUF_LENGTH 2048 | |
63 | #define SKB_ALIGNMENT 32 | |
64 | ||
65 | /* Tx/Rx DMA register offset, skip 256 descriptors */ | |
66 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) | |
67 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) | |
68 | ||
69 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ | |
70 | TOTAL_DESC * DMA_DESC_SIZE) | |
71 | ||
72 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ | |
73 | TOTAL_DESC * DMA_DESC_SIZE) | |
74 | ||
75 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 76 | void __iomem *d, u32 value) |
1c1008c7 FF |
77 | { |
78 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | |
79 | } | |
80 | ||
81 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 82 | void __iomem *d) |
1c1008c7 FF |
83 | { |
84 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | |
85 | } | |
86 | ||
87 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |
88 | void __iomem *d, | |
89 | dma_addr_t addr) | |
90 | { | |
91 | __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); | |
92 | ||
93 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
94 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 95 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
96 | */ |
97 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
98 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
99 | __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); | |
100 | #endif | |
101 | } | |
102 | ||
103 | /* Combined address + length/status setter */ | |
104 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | |
c91b7f66 | 105 | void __iomem *d, dma_addr_t addr, u32 val) |
1c1008c7 FF |
106 | { |
107 | dmadesc_set_length_status(priv, d, val); | |
108 | dmadesc_set_addr(priv, d, addr); | |
109 | } | |
110 | ||
111 | static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | |
112 | void __iomem *d) | |
113 | { | |
114 | dma_addr_t addr; | |
115 | ||
116 | addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); | |
117 | ||
118 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
119 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 120 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
121 | */ |
122 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
123 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
124 | addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; | |
125 | #endif | |
126 | return addr; | |
127 | } | |
128 | ||
129 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" | |
130 | ||
131 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
132 | NETIF_MSG_LINK) | |
133 | ||
134 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) | |
135 | { | |
136 | if (GENET_IS_V1(priv)) | |
137 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); | |
138 | else | |
139 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); | |
140 | } | |
141 | ||
142 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
143 | { | |
144 | if (GENET_IS_V1(priv)) | |
145 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); | |
146 | else | |
147 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); | |
148 | } | |
149 | ||
150 | /* These macros are defined to deal with register map change | |
151 | * between GENET1.1 and GENET2. Only those currently being used | |
152 | * by driver are defined. | |
153 | */ | |
154 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) | |
155 | { | |
156 | if (GENET_IS_V1(priv)) | |
157 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); | |
158 | else | |
159 | return __raw_readl(priv->base + | |
160 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
161 | } | |
162 | ||
163 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
164 | { | |
165 | if (GENET_IS_V1(priv)) | |
166 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); | |
167 | else | |
168 | __raw_writel(val, priv->base + | |
169 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
170 | } | |
171 | ||
172 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) | |
173 | { | |
174 | if (GENET_IS_V1(priv)) | |
175 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); | |
176 | else | |
177 | return __raw_readl(priv->base + | |
178 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
179 | } | |
180 | ||
181 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) | |
182 | { | |
183 | if (GENET_IS_V1(priv)) | |
184 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); | |
185 | else | |
186 | __raw_writel(val, priv->base + | |
187 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
188 | } | |
189 | ||
190 | /* RX/TX DMA register accessors */ | |
191 | enum dma_reg { | |
192 | DMA_RING_CFG = 0, | |
193 | DMA_CTRL, | |
194 | DMA_STATUS, | |
195 | DMA_SCB_BURST_SIZE, | |
196 | DMA_ARB_CTRL, | |
37742166 PG |
197 | DMA_PRIORITY_0, |
198 | DMA_PRIORITY_1, | |
199 | DMA_PRIORITY_2, | |
0034de41 PG |
200 | DMA_INDEX2RING_0, |
201 | DMA_INDEX2RING_1, | |
202 | DMA_INDEX2RING_2, | |
203 | DMA_INDEX2RING_3, | |
204 | DMA_INDEX2RING_4, | |
205 | DMA_INDEX2RING_5, | |
206 | DMA_INDEX2RING_6, | |
207 | DMA_INDEX2RING_7, | |
1c1008c7 FF |
208 | }; |
209 | ||
210 | static const u8 bcmgenet_dma_regs_v3plus[] = { | |
211 | [DMA_RING_CFG] = 0x00, | |
212 | [DMA_CTRL] = 0x04, | |
213 | [DMA_STATUS] = 0x08, | |
214 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
215 | [DMA_ARB_CTRL] = 0x2C, | |
37742166 PG |
216 | [DMA_PRIORITY_0] = 0x30, |
217 | [DMA_PRIORITY_1] = 0x34, | |
218 | [DMA_PRIORITY_2] = 0x38, | |
0034de41 PG |
219 | [DMA_INDEX2RING_0] = 0x70, |
220 | [DMA_INDEX2RING_1] = 0x74, | |
221 | [DMA_INDEX2RING_2] = 0x78, | |
222 | [DMA_INDEX2RING_3] = 0x7C, | |
223 | [DMA_INDEX2RING_4] = 0x80, | |
224 | [DMA_INDEX2RING_5] = 0x84, | |
225 | [DMA_INDEX2RING_6] = 0x88, | |
226 | [DMA_INDEX2RING_7] = 0x8C, | |
1c1008c7 FF |
227 | }; |
228 | ||
229 | static const u8 bcmgenet_dma_regs_v2[] = { | |
230 | [DMA_RING_CFG] = 0x00, | |
231 | [DMA_CTRL] = 0x04, | |
232 | [DMA_STATUS] = 0x08, | |
233 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
234 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
235 | [DMA_PRIORITY_0] = 0x34, |
236 | [DMA_PRIORITY_1] = 0x38, | |
237 | [DMA_PRIORITY_2] = 0x3C, | |
1c1008c7 FF |
238 | }; |
239 | ||
240 | static const u8 bcmgenet_dma_regs_v1[] = { | |
241 | [DMA_CTRL] = 0x00, | |
242 | [DMA_STATUS] = 0x04, | |
243 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
244 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
245 | [DMA_PRIORITY_0] = 0x34, |
246 | [DMA_PRIORITY_1] = 0x38, | |
247 | [DMA_PRIORITY_2] = 0x3C, | |
1c1008c7 FF |
248 | }; |
249 | ||
250 | /* Set at runtime once bcmgenet version is known */ | |
251 | static const u8 *bcmgenet_dma_regs; | |
252 | ||
253 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | |
254 | { | |
255 | return netdev_priv(dev_get_drvdata(dev)); | |
256 | } | |
257 | ||
258 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 259 | enum dma_reg r) |
1c1008c7 FF |
260 | { |
261 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
262 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
263 | } | |
264 | ||
265 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | |
266 | u32 val, enum dma_reg r) | |
267 | { | |
268 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
269 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
270 | } | |
271 | ||
272 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 273 | enum dma_reg r) |
1c1008c7 FF |
274 | { |
275 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
276 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
277 | } | |
278 | ||
279 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, | |
280 | u32 val, enum dma_reg r) | |
281 | { | |
282 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
283 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
284 | } | |
285 | ||
286 | /* RDMA/TDMA ring registers and accessors | |
287 | * we merge the common fields and just prefix with T/D the registers | |
288 | * having different meaning depending on the direction | |
289 | */ | |
290 | enum dma_ring_reg { | |
291 | TDMA_READ_PTR = 0, | |
292 | RDMA_WRITE_PTR = TDMA_READ_PTR, | |
293 | TDMA_READ_PTR_HI, | |
294 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, | |
295 | TDMA_CONS_INDEX, | |
296 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, | |
297 | TDMA_PROD_INDEX, | |
298 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, | |
299 | DMA_RING_BUF_SIZE, | |
300 | DMA_START_ADDR, | |
301 | DMA_START_ADDR_HI, | |
302 | DMA_END_ADDR, | |
303 | DMA_END_ADDR_HI, | |
304 | DMA_MBUF_DONE_THRESH, | |
305 | TDMA_FLOW_PERIOD, | |
306 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, | |
307 | TDMA_WRITE_PTR, | |
308 | RDMA_READ_PTR = TDMA_WRITE_PTR, | |
309 | TDMA_WRITE_PTR_HI, | |
310 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI | |
311 | }; | |
312 | ||
313 | /* GENET v4 supports 40-bits pointer addressing | |
314 | * for obvious reasons the LO and HI word parts | |
315 | * are contiguous, but this offsets the other | |
316 | * registers. | |
317 | */ | |
318 | static const u8 genet_dma_ring_regs_v4[] = { | |
319 | [TDMA_READ_PTR] = 0x00, | |
320 | [TDMA_READ_PTR_HI] = 0x04, | |
321 | [TDMA_CONS_INDEX] = 0x08, | |
322 | [TDMA_PROD_INDEX] = 0x0C, | |
323 | [DMA_RING_BUF_SIZE] = 0x10, | |
324 | [DMA_START_ADDR] = 0x14, | |
325 | [DMA_START_ADDR_HI] = 0x18, | |
326 | [DMA_END_ADDR] = 0x1C, | |
327 | [DMA_END_ADDR_HI] = 0x20, | |
328 | [DMA_MBUF_DONE_THRESH] = 0x24, | |
329 | [TDMA_FLOW_PERIOD] = 0x28, | |
330 | [TDMA_WRITE_PTR] = 0x2C, | |
331 | [TDMA_WRITE_PTR_HI] = 0x30, | |
332 | }; | |
333 | ||
334 | static const u8 genet_dma_ring_regs_v123[] = { | |
335 | [TDMA_READ_PTR] = 0x00, | |
336 | [TDMA_CONS_INDEX] = 0x04, | |
337 | [TDMA_PROD_INDEX] = 0x08, | |
338 | [DMA_RING_BUF_SIZE] = 0x0C, | |
339 | [DMA_START_ADDR] = 0x10, | |
340 | [DMA_END_ADDR] = 0x14, | |
341 | [DMA_MBUF_DONE_THRESH] = 0x18, | |
342 | [TDMA_FLOW_PERIOD] = 0x1C, | |
343 | [TDMA_WRITE_PTR] = 0x20, | |
344 | }; | |
345 | ||
346 | /* Set at runtime once GENET version is known */ | |
347 | static const u8 *genet_dma_ring_regs; | |
348 | ||
349 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
350 | unsigned int ring, |
351 | enum dma_ring_reg r) | |
1c1008c7 FF |
352 | { |
353 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
354 | (DMA_RING_SIZE * ring) + | |
355 | genet_dma_ring_regs[r]); | |
356 | } | |
357 | ||
358 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
359 | unsigned int ring, u32 val, |
360 | enum dma_ring_reg r) | |
1c1008c7 FF |
361 | { |
362 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
363 | (DMA_RING_SIZE * ring) + | |
364 | genet_dma_ring_regs[r]); | |
365 | } | |
366 | ||
367 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
368 | unsigned int ring, |
369 | enum dma_ring_reg r) | |
1c1008c7 FF |
370 | { |
371 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
372 | (DMA_RING_SIZE * ring) + | |
373 | genet_dma_ring_regs[r]); | |
374 | } | |
375 | ||
376 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
377 | unsigned int ring, u32 val, |
378 | enum dma_ring_reg r) | |
1c1008c7 FF |
379 | { |
380 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
381 | (DMA_RING_SIZE * ring) + | |
382 | genet_dma_ring_regs[r]); | |
383 | } | |
384 | ||
385 | static int bcmgenet_get_settings(struct net_device *dev, | |
c91b7f66 | 386 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
387 | { |
388 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
389 | ||
390 | if (!netif_running(dev)) | |
391 | return -EINVAL; | |
392 | ||
393 | if (!priv->phydev) | |
394 | return -ENODEV; | |
395 | ||
396 | return phy_ethtool_gset(priv->phydev, cmd); | |
397 | } | |
398 | ||
399 | static int bcmgenet_set_settings(struct net_device *dev, | |
c91b7f66 | 400 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
401 | { |
402 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
403 | ||
404 | if (!netif_running(dev)) | |
405 | return -EINVAL; | |
406 | ||
407 | if (!priv->phydev) | |
408 | return -ENODEV; | |
409 | ||
410 | return phy_ethtool_sset(priv->phydev, cmd); | |
411 | } | |
412 | ||
413 | static int bcmgenet_set_rx_csum(struct net_device *dev, | |
414 | netdev_features_t wanted) | |
415 | { | |
416 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
417 | u32 rbuf_chk_ctrl; | |
418 | bool rx_csum_en; | |
419 | ||
420 | rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | |
421 | ||
422 | rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); | |
423 | ||
424 | /* enable rx checksumming */ | |
425 | if (rx_csum_en) | |
426 | rbuf_chk_ctrl |= RBUF_RXCHK_EN; | |
427 | else | |
428 | rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; | |
429 | priv->desc_rxchk_en = rx_csum_en; | |
ebe5e3c6 FF |
430 | |
431 | /* If UniMAC forwards CRC, we need to skip over it to get | |
432 | * a valid CHK bit to be set in the per-packet status word | |
433 | */ | |
434 | if (rx_csum_en && priv->crc_fwd_en) | |
435 | rbuf_chk_ctrl |= RBUF_SKIP_FCS; | |
436 | else | |
437 | rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; | |
438 | ||
1c1008c7 FF |
439 | bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); |
440 | ||
441 | return 0; | |
442 | } | |
443 | ||
444 | static int bcmgenet_set_tx_csum(struct net_device *dev, | |
445 | netdev_features_t wanted) | |
446 | { | |
447 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
448 | bool desc_64b_en; | |
449 | u32 tbuf_ctrl, rbuf_ctrl; | |
450 | ||
451 | tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); | |
452 | rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
453 | ||
454 | desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
455 | ||
456 | /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ | |
457 | if (desc_64b_en) { | |
458 | tbuf_ctrl |= RBUF_64B_EN; | |
459 | rbuf_ctrl |= RBUF_64B_EN; | |
460 | } else { | |
461 | tbuf_ctrl &= ~RBUF_64B_EN; | |
462 | rbuf_ctrl &= ~RBUF_64B_EN; | |
463 | } | |
464 | priv->desc_64b_en = desc_64b_en; | |
465 | ||
466 | bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); | |
467 | bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
472 | static int bcmgenet_set_features(struct net_device *dev, | |
c91b7f66 | 473 | netdev_features_t features) |
1c1008c7 FF |
474 | { |
475 | netdev_features_t changed = features ^ dev->features; | |
476 | netdev_features_t wanted = dev->wanted_features; | |
477 | int ret = 0; | |
478 | ||
479 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) | |
480 | ret = bcmgenet_set_tx_csum(dev, wanted); | |
481 | if (changed & (NETIF_F_RXCSUM)) | |
482 | ret = bcmgenet_set_rx_csum(dev, wanted); | |
483 | ||
484 | return ret; | |
485 | } | |
486 | ||
487 | static u32 bcmgenet_get_msglevel(struct net_device *dev) | |
488 | { | |
489 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
490 | ||
491 | return priv->msg_enable; | |
492 | } | |
493 | ||
494 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) | |
495 | { | |
496 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
497 | ||
498 | priv->msg_enable = level; | |
499 | } | |
500 | ||
501 | /* standard ethtool support functions. */ | |
502 | enum bcmgenet_stat_type { | |
503 | BCMGENET_STAT_NETDEV = -1, | |
504 | BCMGENET_STAT_MIB_RX, | |
505 | BCMGENET_STAT_MIB_TX, | |
506 | BCMGENET_STAT_RUNT, | |
507 | BCMGENET_STAT_MISC, | |
f62ba9c1 | 508 | BCMGENET_STAT_SOFT, |
1c1008c7 FF |
509 | }; |
510 | ||
511 | struct bcmgenet_stats { | |
512 | char stat_string[ETH_GSTRING_LEN]; | |
513 | int stat_sizeof; | |
514 | int stat_offset; | |
515 | enum bcmgenet_stat_type type; | |
516 | /* reg offset from UMAC base for misc counters */ | |
517 | u16 reg_offset; | |
518 | }; | |
519 | ||
520 | #define STAT_NETDEV(m) { \ | |
521 | .stat_string = __stringify(m), \ | |
522 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ | |
523 | .stat_offset = offsetof(struct net_device_stats, m), \ | |
524 | .type = BCMGENET_STAT_NETDEV, \ | |
525 | } | |
526 | ||
527 | #define STAT_GENET_MIB(str, m, _type) { \ | |
528 | .stat_string = str, \ | |
529 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
530 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
531 | .type = _type, \ | |
532 | } | |
533 | ||
534 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | |
535 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | |
536 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | |
f62ba9c1 | 537 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) |
1c1008c7 FF |
538 | |
539 | #define STAT_GENET_MISC(str, m, offset) { \ | |
540 | .stat_string = str, \ | |
541 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
542 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
543 | .type = BCMGENET_STAT_MISC, \ | |
544 | .reg_offset = offset, \ | |
545 | } | |
546 | ||
547 | ||
548 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then | |
549 | * between the end of TX stats and the beginning of the RX RUNT | |
550 | */ | |
551 | #define BCMGENET_STAT_OFFSET 0xc | |
552 | ||
553 | /* Hardware counters must be kept in sync because the order/offset | |
554 | * is important here (order in structure declaration = order in hardware) | |
555 | */ | |
556 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |
557 | /* general stats */ | |
558 | STAT_NETDEV(rx_packets), | |
559 | STAT_NETDEV(tx_packets), | |
560 | STAT_NETDEV(rx_bytes), | |
561 | STAT_NETDEV(tx_bytes), | |
562 | STAT_NETDEV(rx_errors), | |
563 | STAT_NETDEV(tx_errors), | |
564 | STAT_NETDEV(rx_dropped), | |
565 | STAT_NETDEV(tx_dropped), | |
566 | STAT_NETDEV(multicast), | |
567 | /* UniMAC RSV counters */ | |
568 | STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), | |
569 | STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), | |
570 | STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), | |
571 | STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), | |
572 | STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), | |
573 | STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), | |
574 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), | |
575 | STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), | |
576 | STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), | |
577 | STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), | |
578 | STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), | |
579 | STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), | |
580 | STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), | |
581 | STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), | |
582 | STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), | |
583 | STAT_GENET_MIB_RX("rx_control", mib.rx.cf), | |
584 | STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), | |
585 | STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), | |
586 | STAT_GENET_MIB_RX("rx_align", mib.rx.aln), | |
587 | STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), | |
588 | STAT_GENET_MIB_RX("rx_code", mib.rx.cde), | |
589 | STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), | |
590 | STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), | |
591 | STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), | |
592 | STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), | |
593 | STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), | |
594 | STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), | |
595 | STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), | |
596 | STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), | |
597 | /* UniMAC TSV counters */ | |
598 | STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), | |
599 | STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), | |
600 | STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), | |
601 | STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), | |
602 | STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), | |
603 | STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), | |
604 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), | |
605 | STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), | |
606 | STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), | |
607 | STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), | |
608 | STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), | |
609 | STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), | |
610 | STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), | |
611 | STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), | |
612 | STAT_GENET_MIB_TX("tx_control", mib.tx.cf), | |
613 | STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), | |
614 | STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), | |
615 | STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), | |
616 | STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), | |
617 | STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), | |
618 | STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), | |
619 | STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), | |
620 | STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), | |
621 | STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), | |
622 | STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), | |
623 | STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), | |
624 | STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), | |
625 | STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), | |
626 | STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), | |
627 | /* UniMAC RUNT counters */ | |
628 | STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), | |
629 | STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), | |
630 | STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), | |
631 | STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), | |
632 | /* Misc UniMAC counters */ | |
633 | STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, | |
634 | UMAC_RBUF_OVFL_CNT), | |
635 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | |
636 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | |
f62ba9c1 FF |
637 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
638 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), | |
639 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), | |
1c1008c7 FF |
640 | }; |
641 | ||
642 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | |
643 | ||
644 | static void bcmgenet_get_drvinfo(struct net_device *dev, | |
c91b7f66 | 645 | struct ethtool_drvinfo *info) |
1c1008c7 FF |
646 | { |
647 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | |
648 | strlcpy(info->version, "v2.0", sizeof(info->version)); | |
649 | info->n_stats = BCMGENET_STATS_LEN; | |
1c1008c7 FF |
650 | } |
651 | ||
652 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | |
653 | { | |
654 | switch (string_set) { | |
655 | case ETH_SS_STATS: | |
656 | return BCMGENET_STATS_LEN; | |
657 | default: | |
658 | return -EOPNOTSUPP; | |
659 | } | |
660 | } | |
661 | ||
c91b7f66 FF |
662 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
663 | u8 *data) | |
1c1008c7 FF |
664 | { |
665 | int i; | |
666 | ||
667 | switch (stringset) { | |
668 | case ETH_SS_STATS: | |
669 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
670 | memcpy(data + i * ETH_GSTRING_LEN, | |
c91b7f66 FF |
671 | bcmgenet_gstrings_stats[i].stat_string, |
672 | ETH_GSTRING_LEN); | |
1c1008c7 FF |
673 | } |
674 | break; | |
675 | } | |
676 | } | |
677 | ||
678 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |
679 | { | |
680 | int i, j = 0; | |
681 | ||
682 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
683 | const struct bcmgenet_stats *s; | |
684 | u8 offset = 0; | |
685 | u32 val = 0; | |
686 | char *p; | |
687 | ||
688 | s = &bcmgenet_gstrings_stats[i]; | |
689 | switch (s->type) { | |
690 | case BCMGENET_STAT_NETDEV: | |
f62ba9c1 | 691 | case BCMGENET_STAT_SOFT: |
1c1008c7 FF |
692 | continue; |
693 | case BCMGENET_STAT_MIB_RX: | |
694 | case BCMGENET_STAT_MIB_TX: | |
695 | case BCMGENET_STAT_RUNT: | |
696 | if (s->type != BCMGENET_STAT_MIB_RX) | |
697 | offset = BCMGENET_STAT_OFFSET; | |
c91b7f66 FF |
698 | val = bcmgenet_umac_readl(priv, |
699 | UMAC_MIB_START + j + offset); | |
1c1008c7 FF |
700 | break; |
701 | case BCMGENET_STAT_MISC: | |
702 | val = bcmgenet_umac_readl(priv, s->reg_offset); | |
703 | /* clear if overflowed */ | |
704 | if (val == ~0) | |
705 | bcmgenet_umac_writel(priv, 0, s->reg_offset); | |
706 | break; | |
707 | } | |
708 | ||
709 | j += s->stat_sizeof; | |
710 | p = (char *)priv + s->stat_offset; | |
711 | *(u32 *)p = val; | |
712 | } | |
713 | } | |
714 | ||
715 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | |
c91b7f66 FF |
716 | struct ethtool_stats *stats, |
717 | u64 *data) | |
1c1008c7 FF |
718 | { |
719 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
720 | int i; | |
721 | ||
722 | if (netif_running(dev)) | |
723 | bcmgenet_update_mib_counters(priv); | |
724 | ||
725 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
726 | const struct bcmgenet_stats *s; | |
727 | char *p; | |
728 | ||
729 | s = &bcmgenet_gstrings_stats[i]; | |
730 | if (s->type == BCMGENET_STAT_NETDEV) | |
731 | p = (char *)&dev->stats; | |
732 | else | |
733 | p = (char *)priv; | |
734 | p += s->stat_offset; | |
735 | data[i] = *(u32 *)p; | |
736 | } | |
737 | } | |
738 | ||
6ef398ea FF |
739 | static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) |
740 | { | |
741 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
742 | u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; | |
743 | u32 reg; | |
744 | ||
745 | if (enable && !priv->clk_eee_enabled) { | |
746 | clk_prepare_enable(priv->clk_eee); | |
747 | priv->clk_eee_enabled = true; | |
748 | } | |
749 | ||
750 | reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); | |
751 | if (enable) | |
752 | reg |= EEE_EN; | |
753 | else | |
754 | reg &= ~EEE_EN; | |
755 | bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); | |
756 | ||
757 | /* Enable EEE and switch to a 27Mhz clock automatically */ | |
758 | reg = __raw_readl(priv->base + off); | |
759 | if (enable) | |
760 | reg |= TBUF_EEE_EN | TBUF_PM_EN; | |
761 | else | |
762 | reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); | |
763 | __raw_writel(reg, priv->base + off); | |
764 | ||
765 | /* Do the same for thing for RBUF */ | |
766 | reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); | |
767 | if (enable) | |
768 | reg |= RBUF_EEE_EN | RBUF_PM_EN; | |
769 | else | |
770 | reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); | |
771 | bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); | |
772 | ||
773 | if (!enable && priv->clk_eee_enabled) { | |
774 | clk_disable_unprepare(priv->clk_eee); | |
775 | priv->clk_eee_enabled = false; | |
776 | } | |
777 | ||
778 | priv->eee.eee_enabled = enable; | |
779 | priv->eee.eee_active = enable; | |
780 | } | |
781 | ||
782 | static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) | |
783 | { | |
784 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
785 | struct ethtool_eee *p = &priv->eee; | |
786 | ||
787 | if (GENET_IS_V1(priv)) | |
788 | return -EOPNOTSUPP; | |
789 | ||
790 | e->eee_enabled = p->eee_enabled; | |
791 | e->eee_active = p->eee_active; | |
792 | e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); | |
793 | ||
794 | return phy_ethtool_get_eee(priv->phydev, e); | |
795 | } | |
796 | ||
797 | static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) | |
798 | { | |
799 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
800 | struct ethtool_eee *p = &priv->eee; | |
801 | int ret = 0; | |
802 | ||
803 | if (GENET_IS_V1(priv)) | |
804 | return -EOPNOTSUPP; | |
805 | ||
806 | p->eee_enabled = e->eee_enabled; | |
807 | ||
808 | if (!p->eee_enabled) { | |
809 | bcmgenet_eee_enable_set(dev, false); | |
810 | } else { | |
811 | ret = phy_init_eee(priv->phydev, 0); | |
812 | if (ret) { | |
813 | netif_err(priv, hw, dev, "EEE initialization failed\n"); | |
814 | return ret; | |
815 | } | |
816 | ||
817 | bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); | |
818 | bcmgenet_eee_enable_set(dev, true); | |
819 | } | |
820 | ||
821 | return phy_ethtool_set_eee(priv->phydev, e); | |
822 | } | |
823 | ||
6b0c5406 FF |
824 | static int bcmgenet_nway_reset(struct net_device *dev) |
825 | { | |
826 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
827 | ||
828 | return genphy_restart_aneg(priv->phydev); | |
829 | } | |
830 | ||
1c1008c7 FF |
831 | /* standard ethtool support functions. */ |
832 | static struct ethtool_ops bcmgenet_ethtool_ops = { | |
833 | .get_strings = bcmgenet_get_strings, | |
834 | .get_sset_count = bcmgenet_get_sset_count, | |
835 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, | |
836 | .get_settings = bcmgenet_get_settings, | |
837 | .set_settings = bcmgenet_set_settings, | |
838 | .get_drvinfo = bcmgenet_get_drvinfo, | |
839 | .get_link = ethtool_op_get_link, | |
840 | .get_msglevel = bcmgenet_get_msglevel, | |
841 | .set_msglevel = bcmgenet_set_msglevel, | |
06ba8375 FF |
842 | .get_wol = bcmgenet_get_wol, |
843 | .set_wol = bcmgenet_set_wol, | |
6ef398ea FF |
844 | .get_eee = bcmgenet_get_eee, |
845 | .set_eee = bcmgenet_set_eee, | |
6b0c5406 | 846 | .nway_reset = bcmgenet_nway_reset, |
1c1008c7 FF |
847 | }; |
848 | ||
849 | /* Power down the unimac, based on mode. */ | |
850 | static void bcmgenet_power_down(struct bcmgenet_priv *priv, | |
851 | enum bcmgenet_power_mode mode) | |
852 | { | |
853 | u32 reg; | |
854 | ||
855 | switch (mode) { | |
856 | case GENET_POWER_CABLE_SENSE: | |
80d8e96d | 857 | phy_detach(priv->phydev); |
1c1008c7 FF |
858 | break; |
859 | ||
c3ae64ae FF |
860 | case GENET_POWER_WOL_MAGIC: |
861 | bcmgenet_wol_power_down_cfg(priv, mode); | |
862 | break; | |
863 | ||
1c1008c7 FF |
864 | case GENET_POWER_PASSIVE: |
865 | /* Power down LED */ | |
1c1008c7 FF |
866 | if (priv->hw_params->flags & GENET_HAS_EXT) { |
867 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
868 | reg |= (EXT_PWR_DOWN_PHY | | |
869 | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); | |
870 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
871 | } | |
872 | break; | |
873 | default: | |
874 | break; | |
875 | } | |
876 | } | |
877 | ||
878 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | |
c91b7f66 | 879 | enum bcmgenet_power_mode mode) |
1c1008c7 FF |
880 | { |
881 | u32 reg; | |
882 | ||
883 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) | |
884 | return; | |
885 | ||
886 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
887 | ||
888 | switch (mode) { | |
889 | case GENET_POWER_PASSIVE: | |
890 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | | |
891 | EXT_PWR_DOWN_BIAS); | |
892 | /* fallthrough */ | |
893 | case GENET_POWER_CABLE_SENSE: | |
894 | /* enable APD */ | |
895 | reg |= EXT_PWR_DN_EN_LD; | |
896 | break; | |
c3ae64ae FF |
897 | case GENET_POWER_WOL_MAGIC: |
898 | bcmgenet_wol_power_up_cfg(priv, mode); | |
899 | return; | |
1c1008c7 FF |
900 | default: |
901 | break; | |
902 | } | |
903 | ||
904 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
cc013fb4 FF |
905 | |
906 | if (mode == GENET_POWER_PASSIVE) | |
907 | bcmgenet_mii_reset(priv->dev); | |
1c1008c7 FF |
908 | } |
909 | ||
910 | /* ioctl handle special commands that are not present in ethtool. */ | |
911 | static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
912 | { | |
913 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
914 | int val = 0; | |
915 | ||
916 | if (!netif_running(dev)) | |
917 | return -EINVAL; | |
918 | ||
919 | switch (cmd) { | |
920 | case SIOCGMIIPHY: | |
921 | case SIOCGMIIREG: | |
922 | case SIOCSMIIREG: | |
923 | if (!priv->phydev) | |
924 | val = -ENODEV; | |
925 | else | |
926 | val = phy_mii_ioctl(priv->phydev, rq, cmd); | |
927 | break; | |
928 | ||
929 | default: | |
930 | val = -EINVAL; | |
931 | break; | |
932 | } | |
933 | ||
934 | return val; | |
935 | } | |
936 | ||
937 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | |
938 | struct bcmgenet_tx_ring *ring) | |
939 | { | |
940 | struct enet_cb *tx_cb_ptr; | |
941 | ||
942 | tx_cb_ptr = ring->cbs; | |
943 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | |
014012a4 | 944 | |
1c1008c7 FF |
945 | /* Advancing local write pointer */ |
946 | if (ring->write_ptr == ring->end_ptr) | |
947 | ring->write_ptr = ring->cb_ptr; | |
948 | else | |
949 | ring->write_ptr++; | |
950 | ||
951 | return tx_cb_ptr; | |
952 | } | |
953 | ||
954 | /* Simple helper to free a control block's resources */ | |
955 | static void bcmgenet_free_cb(struct enet_cb *cb) | |
956 | { | |
957 | dev_kfree_skb_any(cb->skb); | |
958 | cb->skb = NULL; | |
959 | dma_unmap_addr_set(cb, dma_addr, 0); | |
960 | } | |
961 | ||
962 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, | |
963 | struct bcmgenet_tx_ring *ring) | |
964 | { | |
965 | bcmgenet_intrl2_0_writel(priv, | |
c91b7f66 FF |
966 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, |
967 | INTRL2_CPU_MASK_SET); | |
1c1008c7 FF |
968 | } |
969 | ||
970 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, | |
971 | struct bcmgenet_tx_ring *ring) | |
972 | { | |
973 | bcmgenet_intrl2_0_writel(priv, | |
c91b7f66 FF |
974 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, |
975 | INTRL2_CPU_MASK_CLEAR); | |
1c1008c7 FF |
976 | } |
977 | ||
978 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, | |
c91b7f66 | 979 | struct bcmgenet_tx_ring *ring) |
1c1008c7 | 980 | { |
c91b7f66 FF |
981 | bcmgenet_intrl2_1_writel(priv, (1 << ring->index), |
982 | INTRL2_CPU_MASK_CLEAR); | |
1c1008c7 FF |
983 | priv->int1_mask &= ~(1 << ring->index); |
984 | } | |
985 | ||
986 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | |
987 | struct bcmgenet_tx_ring *ring) | |
988 | { | |
c91b7f66 FF |
989 | bcmgenet_intrl2_1_writel(priv, (1 << ring->index), |
990 | INTRL2_CPU_MASK_SET); | |
1c1008c7 FF |
991 | priv->int1_mask |= (1 << ring->index); |
992 | } | |
993 | ||
994 | /* Unlocked version of the reclaim routine */ | |
4092e6ac JS |
995 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
996 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
997 | { |
998 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1c1008c7 | 999 | struct enet_cb *tx_cb_ptr; |
b2cde2cc | 1000 | struct netdev_queue *txq; |
4092e6ac | 1001 | unsigned int pkts_compl = 0; |
1c1008c7 | 1002 | unsigned int c_index; |
66d06757 PG |
1003 | unsigned int txbds_ready; |
1004 | unsigned int txbds_processed = 0; | |
1c1008c7 | 1005 | |
7fc527f9 | 1006 | /* Compute how many buffers are transmitted since last xmit call */ |
1c1008c7 | 1007 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); |
66d06757 | 1008 | c_index &= DMA_C_INDEX_MASK; |
1c1008c7 | 1009 | |
66d06757 PG |
1010 | if (likely(c_index >= ring->c_index)) |
1011 | txbds_ready = c_index - ring->c_index; | |
1c1008c7 | 1012 | else |
66d06757 | 1013 | txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; |
1c1008c7 FF |
1014 | |
1015 | netif_dbg(priv, tx_done, dev, | |
66d06757 PG |
1016 | "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", |
1017 | __func__, ring->index, ring->c_index, c_index, txbds_ready); | |
1c1008c7 FF |
1018 | |
1019 | /* Reclaim transmitted buffers */ | |
66d06757 PG |
1020 | while (txbds_processed < txbds_ready) { |
1021 | tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; | |
1c1008c7 | 1022 | if (tx_cb_ptr->skb) { |
4092e6ac | 1023 | pkts_compl++; |
66d06757 | 1024 | dev->stats.tx_packets++; |
1c1008c7 FF |
1025 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
1026 | dma_unmap_single(&dev->dev, | |
c91b7f66 FF |
1027 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1028 | tx_cb_ptr->skb->len, | |
1029 | DMA_TO_DEVICE); | |
1c1008c7 FF |
1030 | bcmgenet_free_cb(tx_cb_ptr); |
1031 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | |
1032 | dev->stats.tx_bytes += | |
1033 | dma_unmap_len(tx_cb_ptr, dma_len); | |
1034 | dma_unmap_page(&dev->dev, | |
c91b7f66 FF |
1035 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1036 | dma_unmap_len(tx_cb_ptr, dma_len), | |
1037 | DMA_TO_DEVICE); | |
1c1008c7 FF |
1038 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
1039 | } | |
1c1008c7 | 1040 | |
66d06757 PG |
1041 | txbds_processed++; |
1042 | if (likely(ring->clean_ptr < ring->end_ptr)) | |
1043 | ring->clean_ptr++; | |
1044 | else | |
1045 | ring->clean_ptr = ring->cb_ptr; | |
1c1008c7 FF |
1046 | } |
1047 | ||
66d06757 PG |
1048 | ring->free_bds += txbds_processed; |
1049 | ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; | |
1050 | ||
4092e6ac | 1051 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
66d06757 | 1052 | txq = netdev_get_tx_queue(dev, ring->queue); |
4092e6ac JS |
1053 | if (netif_tx_queue_stopped(txq)) |
1054 | netif_tx_wake_queue(txq); | |
1055 | } | |
1c1008c7 | 1056 | |
4092e6ac | 1057 | return pkts_compl; |
1c1008c7 FF |
1058 | } |
1059 | ||
4092e6ac | 1060 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
c91b7f66 | 1061 | struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1062 | { |
4092e6ac | 1063 | unsigned int released; |
1c1008c7 FF |
1064 | unsigned long flags; |
1065 | ||
1066 | spin_lock_irqsave(&ring->lock, flags); | |
4092e6ac | 1067 | released = __bcmgenet_tx_reclaim(dev, ring); |
1c1008c7 | 1068 | spin_unlock_irqrestore(&ring->lock, flags); |
4092e6ac JS |
1069 | |
1070 | return released; | |
1071 | } | |
1072 | ||
1073 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | |
1074 | { | |
1075 | struct bcmgenet_tx_ring *ring = | |
1076 | container_of(napi, struct bcmgenet_tx_ring, napi); | |
1077 | unsigned int work_done = 0; | |
1078 | ||
1079 | work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); | |
1080 | ||
1081 | if (work_done == 0) { | |
1082 | napi_complete(napi); | |
1083 | ring->int_enable(ring->priv, ring); | |
1084 | ||
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | return budget; | |
1c1008c7 FF |
1089 | } |
1090 | ||
1091 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | |
1092 | { | |
1093 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1094 | int i; | |
1095 | ||
1096 | if (netif_is_multiqueue(dev)) { | |
1097 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
1098 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); | |
1099 | } | |
1100 | ||
1101 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); | |
1102 | } | |
1103 | ||
1104 | /* Transmits a single SKB (either head of a fragment or a single SKB) | |
1105 | * caller must hold priv->lock | |
1106 | */ | |
1107 | static int bcmgenet_xmit_single(struct net_device *dev, | |
1108 | struct sk_buff *skb, | |
1109 | u16 dma_desc_flags, | |
1110 | struct bcmgenet_tx_ring *ring) | |
1111 | { | |
1112 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1113 | struct device *kdev = &priv->pdev->dev; | |
1114 | struct enet_cb *tx_cb_ptr; | |
1115 | unsigned int skb_len; | |
1116 | dma_addr_t mapping; | |
1117 | u32 length_status; | |
1118 | int ret; | |
1119 | ||
1120 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1121 | ||
1122 | if (unlikely(!tx_cb_ptr)) | |
1123 | BUG(); | |
1124 | ||
1125 | tx_cb_ptr->skb = skb; | |
1126 | ||
1127 | skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); | |
1128 | ||
1129 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | |
1130 | ret = dma_mapping_error(kdev, mapping); | |
1131 | if (ret) { | |
44c8bc3c | 1132 | priv->mib.tx_dma_failed++; |
1c1008c7 FF |
1133 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); |
1134 | dev_kfree_skb(skb); | |
1135 | return ret; | |
1136 | } | |
1137 | ||
1138 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
1139 | dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); | |
1140 | length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | | |
1141 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | | |
1142 | DMA_TX_APPEND_CRC; | |
1143 | ||
1144 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
1145 | length_status |= DMA_TX_DO_CSUM; | |
1146 | ||
1147 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); | |
1148 | ||
1c1008c7 FF |
1149 | return 0; |
1150 | } | |
1151 | ||
7fc527f9 | 1152 | /* Transmit a SKB fragment */ |
1c1008c7 | 1153 | static int bcmgenet_xmit_frag(struct net_device *dev, |
c91b7f66 FF |
1154 | skb_frag_t *frag, |
1155 | u16 dma_desc_flags, | |
1156 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1157 | { |
1158 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1159 | struct device *kdev = &priv->pdev->dev; | |
1160 | struct enet_cb *tx_cb_ptr; | |
1161 | dma_addr_t mapping; | |
1162 | int ret; | |
1163 | ||
1164 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1165 | ||
1166 | if (unlikely(!tx_cb_ptr)) | |
1167 | BUG(); | |
1168 | tx_cb_ptr->skb = NULL; | |
1169 | ||
1170 | mapping = skb_frag_dma_map(kdev, frag, 0, | |
c91b7f66 | 1171 | skb_frag_size(frag), DMA_TO_DEVICE); |
1c1008c7 FF |
1172 | ret = dma_mapping_error(kdev, mapping); |
1173 | if (ret) { | |
44c8bc3c | 1174 | priv->mib.tx_dma_failed++; |
1c1008c7 | 1175 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", |
c91b7f66 | 1176 | __func__); |
1c1008c7 FF |
1177 | return ret; |
1178 | } | |
1179 | ||
1180 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
1181 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); | |
1182 | ||
1183 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | |
c91b7f66 FF |
1184 | (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
1185 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); | |
1c1008c7 | 1186 | |
1c1008c7 FF |
1187 | return 0; |
1188 | } | |
1189 | ||
1190 | /* Reallocate the SKB to put enough headroom in front of it and insert | |
1191 | * the transmit checksum offsets in the descriptors | |
1192 | */ | |
bc23333b PG |
1193 | static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, |
1194 | struct sk_buff *skb) | |
1c1008c7 FF |
1195 | { |
1196 | struct status_64 *status = NULL; | |
1197 | struct sk_buff *new_skb; | |
1198 | u16 offset; | |
1199 | u8 ip_proto; | |
1200 | u16 ip_ver; | |
1201 | u32 tx_csum_info; | |
1202 | ||
1203 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { | |
1204 | /* If 64 byte status block enabled, must make sure skb has | |
1205 | * enough headroom for us to insert 64B status block. | |
1206 | */ | |
1207 | new_skb = skb_realloc_headroom(skb, sizeof(*status)); | |
1208 | dev_kfree_skb(skb); | |
1209 | if (!new_skb) { | |
1210 | dev->stats.tx_errors++; | |
1211 | dev->stats.tx_dropped++; | |
bc23333b | 1212 | return NULL; |
1c1008c7 FF |
1213 | } |
1214 | skb = new_skb; | |
1215 | } | |
1216 | ||
1217 | skb_push(skb, sizeof(*status)); | |
1218 | status = (struct status_64 *)skb->data; | |
1219 | ||
1220 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1221 | ip_ver = htons(skb->protocol); | |
1222 | switch (ip_ver) { | |
1223 | case ETH_P_IP: | |
1224 | ip_proto = ip_hdr(skb)->protocol; | |
1225 | break; | |
1226 | case ETH_P_IPV6: | |
1227 | ip_proto = ipv6_hdr(skb)->nexthdr; | |
1228 | break; | |
1229 | default: | |
bc23333b | 1230 | return skb; |
1c1008c7 FF |
1231 | } |
1232 | ||
1233 | offset = skb_checksum_start_offset(skb) - sizeof(*status); | |
1234 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | | |
1235 | (offset + skb->csum_offset); | |
1236 | ||
1237 | /* Set the length valid bit for TCP and UDP and just set | |
1238 | * the special UDP flag for IPv4, else just set to 0. | |
1239 | */ | |
1240 | if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { | |
1241 | tx_csum_info |= STATUS_TX_CSUM_LV; | |
1242 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | |
1243 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | |
8900ea57 | 1244 | } else { |
1c1008c7 | 1245 | tx_csum_info = 0; |
8900ea57 | 1246 | } |
1c1008c7 FF |
1247 | |
1248 | status->tx_csum_info = tx_csum_info; | |
1249 | } | |
1250 | ||
bc23333b | 1251 | return skb; |
1c1008c7 FF |
1252 | } |
1253 | ||
1254 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |
1255 | { | |
1256 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1257 | struct bcmgenet_tx_ring *ring = NULL; | |
b2cde2cc | 1258 | struct netdev_queue *txq; |
1c1008c7 FF |
1259 | unsigned long flags = 0; |
1260 | int nr_frags, index; | |
1261 | u16 dma_desc_flags; | |
1262 | int ret; | |
1263 | int i; | |
1264 | ||
1265 | index = skb_get_queue_mapping(skb); | |
1266 | /* Mapping strategy: | |
1267 | * queue_mapping = 0, unclassified, packet xmited through ring16 | |
1268 | * queue_mapping = 1, goes to ring 0. (highest priority queue | |
1269 | * queue_mapping = 2, goes to ring 1. | |
1270 | * queue_mapping = 3, goes to ring 2. | |
1271 | * queue_mapping = 4, goes to ring 3. | |
1272 | */ | |
1273 | if (index == 0) | |
1274 | index = DESC_INDEX; | |
1275 | else | |
1276 | index -= 1; | |
1277 | ||
1c1008c7 FF |
1278 | nr_frags = skb_shinfo(skb)->nr_frags; |
1279 | ring = &priv->tx_rings[index]; | |
b2cde2cc | 1280 | txq = netdev_get_tx_queue(dev, ring->queue); |
1c1008c7 FF |
1281 | |
1282 | spin_lock_irqsave(&ring->lock, flags); | |
1283 | if (ring->free_bds <= nr_frags + 1) { | |
b2cde2cc | 1284 | netif_tx_stop_queue(txq); |
1c1008c7 | 1285 | netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", |
c91b7f66 | 1286 | __func__, index, ring->queue); |
1c1008c7 FF |
1287 | ret = NETDEV_TX_BUSY; |
1288 | goto out; | |
1289 | } | |
1290 | ||
474ea9ca FF |
1291 | if (skb_padto(skb, ETH_ZLEN)) { |
1292 | ret = NETDEV_TX_OK; | |
1293 | goto out; | |
1294 | } | |
1295 | ||
1c1008c7 FF |
1296 | /* set the SKB transmit checksum */ |
1297 | if (priv->desc_64b_en) { | |
bc23333b PG |
1298 | skb = bcmgenet_put_tx_csum(dev, skb); |
1299 | if (!skb) { | |
1c1008c7 FF |
1300 | ret = NETDEV_TX_OK; |
1301 | goto out; | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | dma_desc_flags = DMA_SOP; | |
1306 | if (nr_frags == 0) | |
1307 | dma_desc_flags |= DMA_EOP; | |
1308 | ||
1309 | /* Transmit single SKB or head of fragment list */ | |
1310 | ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); | |
1311 | if (ret) { | |
1312 | ret = NETDEV_TX_OK; | |
1313 | goto out; | |
1314 | } | |
1315 | ||
1316 | /* xmit fragment */ | |
1317 | for (i = 0; i < nr_frags; i++) { | |
1318 | ret = bcmgenet_xmit_frag(dev, | |
c91b7f66 FF |
1319 | &skb_shinfo(skb)->frags[i], |
1320 | (i == nr_frags - 1) ? DMA_EOP : 0, | |
1321 | ring); | |
1c1008c7 FF |
1322 | if (ret) { |
1323 | ret = NETDEV_TX_OK; | |
1324 | goto out; | |
1325 | } | |
1326 | } | |
1327 | ||
d03825fb FF |
1328 | skb_tx_timestamp(skb); |
1329 | ||
ae67bf01 FF |
1330 | /* Decrement total BD count and advance our write pointer */ |
1331 | ring->free_bds -= nr_frags + 1; | |
1332 | ring->prod_index += nr_frags + 1; | |
1333 | ring->prod_index &= DMA_P_INDEX_MASK; | |
1334 | ||
4092e6ac | 1335 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
b2cde2cc | 1336 | netif_tx_stop_queue(txq); |
1c1008c7 | 1337 | |
ddd0ca5d FF |
1338 | if (!skb->xmit_more || netif_xmit_stopped(txq)) |
1339 | /* Packets are ready, update producer index */ | |
1340 | bcmgenet_tdma_ring_writel(priv, ring->index, | |
1341 | ring->prod_index, TDMA_PROD_INDEX); | |
1c1008c7 FF |
1342 | out: |
1343 | spin_unlock_irqrestore(&ring->lock, flags); | |
1344 | ||
1345 | return ret; | |
1346 | } | |
1347 | ||
d6707bec PG |
1348 | static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, |
1349 | struct enet_cb *cb) | |
1c1008c7 FF |
1350 | { |
1351 | struct device *kdev = &priv->pdev->dev; | |
1352 | struct sk_buff *skb; | |
d6707bec | 1353 | struct sk_buff *rx_skb; |
1c1008c7 | 1354 | dma_addr_t mapping; |
1c1008c7 | 1355 | |
d6707bec | 1356 | /* Allocate a new Rx skb */ |
c91b7f66 | 1357 | skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); |
d6707bec PG |
1358 | if (!skb) { |
1359 | priv->mib.alloc_rx_buff_failed++; | |
1360 | netif_err(priv, rx_err, priv->dev, | |
1361 | "%s: Rx skb allocation failed\n", __func__); | |
1362 | return NULL; | |
1363 | } | |
1c1008c7 | 1364 | |
d6707bec PG |
1365 | /* DMA-map the new Rx skb */ |
1366 | mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, | |
1367 | DMA_FROM_DEVICE); | |
1368 | if (dma_mapping_error(kdev, mapping)) { | |
44c8bc3c | 1369 | priv->mib.rx_dma_failed++; |
d6707bec | 1370 | dev_kfree_skb_any(skb); |
1c1008c7 | 1371 | netif_err(priv, rx_err, priv->dev, |
d6707bec PG |
1372 | "%s: Rx skb DMA mapping failed\n", __func__); |
1373 | return NULL; | |
1c1008c7 FF |
1374 | } |
1375 | ||
d6707bec PG |
1376 | /* Grab the current Rx skb from the ring and DMA-unmap it */ |
1377 | rx_skb = cb->skb; | |
1378 | if (likely(rx_skb)) | |
1379 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | |
1380 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1381 | ||
1382 | /* Put the new Rx skb on the ring */ | |
1383 | cb->skb = skb; | |
1c1008c7 | 1384 | dma_unmap_addr_set(cb, dma_addr, mapping); |
8ac467e8 | 1385 | dmadesc_set_addr(priv, cb->bd_addr, mapping); |
1c1008c7 | 1386 | |
d6707bec PG |
1387 | /* Return the current Rx skb to caller */ |
1388 | return rx_skb; | |
1c1008c7 FF |
1389 | } |
1390 | ||
1391 | /* bcmgenet_desc_rx - descriptor based rx process. | |
1392 | * this could be called from bottom half, or from NAPI polling method. | |
1393 | */ | |
1394 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |
8ac467e8 | 1395 | unsigned int index, |
1c1008c7 FF |
1396 | unsigned int budget) |
1397 | { | |
8ac467e8 | 1398 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; |
1c1008c7 FF |
1399 | struct net_device *dev = priv->dev; |
1400 | struct enet_cb *cb; | |
1401 | struct sk_buff *skb; | |
1402 | u32 dma_length_status; | |
1403 | unsigned long dma_flag; | |
d6707bec | 1404 | int len; |
1c1008c7 FF |
1405 | unsigned int rxpktprocessed = 0, rxpkttoprocess; |
1406 | unsigned int p_index; | |
d26ea6cc | 1407 | unsigned int discards; |
1c1008c7 FF |
1408 | unsigned int chksum_ok = 0; |
1409 | ||
8ac467e8 | 1410 | p_index = bcmgenet_rdma_ring_readl(priv, index, RDMA_PROD_INDEX); |
d26ea6cc PG |
1411 | |
1412 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & | |
1413 | DMA_P_INDEX_DISCARD_CNT_MASK; | |
1414 | if (discards > ring->old_discards) { | |
1415 | discards = discards - ring->old_discards; | |
1416 | dev->stats.rx_missed_errors += discards; | |
1417 | dev->stats.rx_errors += discards; | |
1418 | ring->old_discards += discards; | |
1419 | ||
1420 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ | |
1421 | if (ring->old_discards >= 0xC000) { | |
1422 | ring->old_discards = 0; | |
1423 | bcmgenet_rdma_ring_writel(priv, index, 0, | |
1424 | RDMA_PROD_INDEX); | |
1425 | } | |
1426 | } | |
1427 | ||
1c1008c7 FF |
1428 | p_index &= DMA_P_INDEX_MASK; |
1429 | ||
8ac467e8 PG |
1430 | if (likely(p_index >= ring->c_index)) |
1431 | rxpkttoprocess = p_index - ring->c_index; | |
1c1008c7 | 1432 | else |
8ac467e8 PG |
1433 | rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + |
1434 | p_index; | |
1c1008c7 FF |
1435 | |
1436 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1437 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); |
1c1008c7 FF |
1438 | |
1439 | while ((rxpktprocessed < rxpkttoprocess) && | |
c91b7f66 | 1440 | (rxpktprocessed < budget)) { |
8ac467e8 | 1441 | cb = &priv->rx_cbs[ring->read_ptr]; |
d6707bec | 1442 | skb = bcmgenet_rx_refill(priv, cb); |
b629be5c | 1443 | |
b629be5c FF |
1444 | if (unlikely(!skb)) { |
1445 | dev->stats.rx_dropped++; | |
1446 | dev->stats.rx_errors++; | |
d6707bec | 1447 | goto next; |
b629be5c FF |
1448 | } |
1449 | ||
1c1008c7 | 1450 | if (!priv->desc_64b_en) { |
c91b7f66 | 1451 | dma_length_status = |
8ac467e8 | 1452 | dmadesc_get_length_status(priv, cb->bd_addr); |
1c1008c7 FF |
1453 | } else { |
1454 | struct status_64 *status; | |
164d4f20 | 1455 | |
1c1008c7 FF |
1456 | status = (struct status_64 *)skb->data; |
1457 | dma_length_status = status->length_status; | |
1458 | } | |
1459 | ||
1460 | /* DMA flags and length are still valid no matter how | |
1461 | * we got the Receive Status Vector (64B RSB or register) | |
1462 | */ | |
1463 | dma_flag = dma_length_status & 0xffff; | |
1464 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | |
1465 | ||
1466 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1467 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", |
8ac467e8 PG |
1468 | __func__, p_index, ring->c_index, |
1469 | ring->read_ptr, dma_length_status); | |
1c1008c7 | 1470 | |
1c1008c7 FF |
1471 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1472 | netif_err(priv, rx_status, dev, | |
c91b7f66 | 1473 | "dropping fragmented packet!\n"); |
1c1008c7 FF |
1474 | dev->stats.rx_dropped++; |
1475 | dev->stats.rx_errors++; | |
d6707bec PG |
1476 | dev_kfree_skb_any(skb); |
1477 | goto next; | |
1c1008c7 | 1478 | } |
d6707bec | 1479 | |
1c1008c7 FF |
1480 | /* report errors */ |
1481 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | | |
1482 | DMA_RX_OV | | |
1483 | DMA_RX_NO | | |
1484 | DMA_RX_LG | | |
1485 | DMA_RX_RXER))) { | |
1486 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | |
c91b7f66 | 1487 | (unsigned int)dma_flag); |
1c1008c7 FF |
1488 | if (dma_flag & DMA_RX_CRC_ERROR) |
1489 | dev->stats.rx_crc_errors++; | |
1490 | if (dma_flag & DMA_RX_OV) | |
1491 | dev->stats.rx_over_errors++; | |
1492 | if (dma_flag & DMA_RX_NO) | |
1493 | dev->stats.rx_frame_errors++; | |
1494 | if (dma_flag & DMA_RX_LG) | |
1495 | dev->stats.rx_length_errors++; | |
1496 | dev->stats.rx_dropped++; | |
1497 | dev->stats.rx_errors++; | |
d6707bec PG |
1498 | dev_kfree_skb_any(skb); |
1499 | goto next; | |
1c1008c7 FF |
1500 | } /* error packet */ |
1501 | ||
1502 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | |
c91b7f66 | 1503 | priv->desc_rxchk_en; |
1c1008c7 FF |
1504 | |
1505 | skb_put(skb, len); | |
1506 | if (priv->desc_64b_en) { | |
1507 | skb_pull(skb, 64); | |
1508 | len -= 64; | |
1509 | } | |
1510 | ||
1511 | if (likely(chksum_ok)) | |
1512 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1513 | ||
1514 | /* remove hardware 2bytes added for IP alignment */ | |
1515 | skb_pull(skb, 2); | |
1516 | len -= 2; | |
1517 | ||
1518 | if (priv->crc_fwd_en) { | |
1519 | skb_trim(skb, len - ETH_FCS_LEN); | |
1520 | len -= ETH_FCS_LEN; | |
1521 | } | |
1522 | ||
1523 | /*Finish setting up the received SKB and send it to the kernel*/ | |
1524 | skb->protocol = eth_type_trans(skb, priv->dev); | |
1525 | dev->stats.rx_packets++; | |
1526 | dev->stats.rx_bytes += len; | |
1527 | if (dma_flag & DMA_RX_MULT) | |
1528 | dev->stats.multicast++; | |
1529 | ||
1530 | /* Notify kernel */ | |
1531 | napi_gro_receive(&priv->napi, skb); | |
1c1008c7 FF |
1532 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); |
1533 | ||
d6707bec | 1534 | next: |
cf377d88 | 1535 | rxpktprocessed++; |
8ac467e8 PG |
1536 | if (likely(ring->read_ptr < ring->end_ptr)) |
1537 | ring->read_ptr++; | |
1538 | else | |
1539 | ring->read_ptr = ring->cb_ptr; | |
1540 | ||
1541 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; | |
1542 | bcmgenet_rdma_ring_writel(priv, index, ring->c_index, RDMA_CONS_INDEX); | |
1c1008c7 FF |
1543 | } |
1544 | ||
1545 | return rxpktprocessed; | |
1546 | } | |
1547 | ||
1548 | /* Assign skb to RX DMA descriptor. */ | |
8ac467e8 PG |
1549 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, |
1550 | struct bcmgenet_rx_ring *ring) | |
1c1008c7 FF |
1551 | { |
1552 | struct enet_cb *cb; | |
d6707bec | 1553 | struct sk_buff *skb; |
1c1008c7 FF |
1554 | int i; |
1555 | ||
8ac467e8 | 1556 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 FF |
1557 | |
1558 | /* loop here for each buffer needing assign */ | |
8ac467e8 PG |
1559 | for (i = 0; i < ring->size; i++) { |
1560 | cb = ring->cbs + i; | |
d6707bec PG |
1561 | skb = bcmgenet_rx_refill(priv, cb); |
1562 | if (skb) | |
1563 | dev_kfree_skb_any(skb); | |
1564 | if (!cb->skb) | |
1565 | return -ENOMEM; | |
1c1008c7 FF |
1566 | } |
1567 | ||
d6707bec | 1568 | return 0; |
1c1008c7 FF |
1569 | } |
1570 | ||
1571 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |
1572 | { | |
1573 | struct enet_cb *cb; | |
1574 | int i; | |
1575 | ||
1576 | for (i = 0; i < priv->num_rx_bds; i++) { | |
1577 | cb = &priv->rx_cbs[i]; | |
1578 | ||
1579 | if (dma_unmap_addr(cb, dma_addr)) { | |
1580 | dma_unmap_single(&priv->dev->dev, | |
c91b7f66 FF |
1581 | dma_unmap_addr(cb, dma_addr), |
1582 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1c1008c7 FF |
1583 | dma_unmap_addr_set(cb, dma_addr, 0); |
1584 | } | |
1585 | ||
1586 | if (cb->skb) | |
1587 | bcmgenet_free_cb(cb); | |
1588 | } | |
1589 | } | |
1590 | ||
c91b7f66 | 1591 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) |
e29585b8 FF |
1592 | { |
1593 | u32 reg; | |
1594 | ||
1595 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1596 | if (enable) | |
1597 | reg |= mask; | |
1598 | else | |
1599 | reg &= ~mask; | |
1600 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
1601 | ||
1602 | /* UniMAC stops on a packet boundary, wait for a full-size packet | |
1603 | * to be processed | |
1604 | */ | |
1605 | if (enable == 0) | |
1606 | usleep_range(1000, 2000); | |
1607 | } | |
1608 | ||
1c1008c7 FF |
1609 | static int reset_umac(struct bcmgenet_priv *priv) |
1610 | { | |
1611 | struct device *kdev = &priv->pdev->dev; | |
1612 | unsigned int timeout = 0; | |
1613 | u32 reg; | |
1614 | ||
1615 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ | |
1616 | bcmgenet_rbuf_ctrl_set(priv, 0); | |
1617 | udelay(10); | |
1618 | ||
1619 | /* disable MAC while updating its registers */ | |
1620 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1621 | ||
1622 | /* issue soft reset, wait for it to complete */ | |
1623 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); | |
1624 | while (timeout++ < 1000) { | |
1625 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1626 | if (!(reg & CMD_SW_RESET)) | |
1627 | return 0; | |
1628 | ||
1629 | udelay(1); | |
1630 | } | |
1631 | ||
1632 | if (timeout == 1000) { | |
1633 | dev_err(kdev, | |
7fc527f9 | 1634 | "timeout waiting for MAC to come out of reset\n"); |
1c1008c7 FF |
1635 | return -ETIMEDOUT; |
1636 | } | |
1637 | ||
1638 | return 0; | |
1639 | } | |
1640 | ||
909ff5ef FF |
1641 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) |
1642 | { | |
1643 | /* Mask all interrupts.*/ | |
1644 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1645 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1646 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1647 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1648 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1649 | bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1650 | } | |
1651 | ||
1c1008c7 FF |
1652 | static int init_umac(struct bcmgenet_priv *priv) |
1653 | { | |
1654 | struct device *kdev = &priv->pdev->dev; | |
1655 | int ret; | |
1656 | u32 reg, cpu_mask_clear; | |
4092e6ac | 1657 | int index; |
1c1008c7 FF |
1658 | |
1659 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | |
1660 | ||
1661 | ret = reset_umac(priv); | |
1662 | if (ret) | |
1663 | return ret; | |
1664 | ||
1665 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1666 | /* clear tx/rx counter */ | |
1667 | bcmgenet_umac_writel(priv, | |
c91b7f66 FF |
1668 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
1669 | UMAC_MIB_CTRL); | |
1c1008c7 FF |
1670 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); |
1671 | ||
1672 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | |
1673 | ||
1674 | /* init rx registers, enable ip header optimization */ | |
1675 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
1676 | reg |= RBUF_ALIGN_2B; | |
1677 | bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); | |
1678 | ||
1679 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | |
1680 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | |
1681 | ||
909ff5ef | 1682 | bcmgenet_intr_disable(priv); |
1c1008c7 | 1683 | |
4092e6ac | 1684 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; |
1c1008c7 FF |
1685 | |
1686 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | |
1687 | ||
7fc527f9 | 1688 | /* Monitor cable plug/unplugged event for internal PHY */ |
8900ea57 | 1689 | if (phy_is_internal(priv->phydev)) { |
1c1008c7 | 1690 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); |
8900ea57 | 1691 | } else if (priv->ext_phy) { |
1c1008c7 | 1692 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); |
8900ea57 | 1693 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
1c1008c7 FF |
1694 | reg = bcmgenet_bp_mc_get(priv); |
1695 | reg |= BIT(priv->hw_params->bp_in_en_shift); | |
1696 | ||
1697 | /* bp_mask: back pressure mask */ | |
1698 | if (netif_is_multiqueue(priv->dev)) | |
1699 | reg |= priv->hw_params->bp_in_mask; | |
1700 | else | |
1701 | reg &= ~priv->hw_params->bp_in_mask; | |
1702 | bcmgenet_bp_mc_set(priv, reg); | |
1703 | } | |
1704 | ||
1705 | /* Enable MDIO interrupts on GENET v3+ */ | |
1706 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | |
1707 | cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; | |
1708 | ||
c91b7f66 | 1709 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); |
1c1008c7 | 1710 | |
4092e6ac JS |
1711 | for (index = 0; index < priv->hw_params->tx_queues; index++) |
1712 | bcmgenet_intrl2_1_writel(priv, (1 << index), | |
1713 | INTRL2_CPU_MASK_CLEAR); | |
1714 | ||
1c1008c7 FF |
1715 | /* Enable rx/tx engine.*/ |
1716 | dev_dbg(kdev, "done init umac\n"); | |
1717 | ||
1718 | return 0; | |
1719 | } | |
1720 | ||
4f8b2d7d | 1721 | /* Initialize a Tx ring along with corresponding hardware registers */ |
1c1008c7 FF |
1722 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
1723 | unsigned int index, unsigned int size, | |
4f8b2d7d | 1724 | unsigned int start_ptr, unsigned int end_ptr) |
1c1008c7 FF |
1725 | { |
1726 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | |
1727 | u32 words_per_bd = WORDS_PER_BD(priv); | |
1728 | u32 flow_period_val = 0; | |
1c1008c7 FF |
1729 | |
1730 | spin_lock_init(&ring->lock); | |
4092e6ac JS |
1731 | ring->priv = priv; |
1732 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); | |
1c1008c7 FF |
1733 | ring->index = index; |
1734 | if (index == DESC_INDEX) { | |
1735 | ring->queue = 0; | |
1736 | ring->int_enable = bcmgenet_tx_ring16_int_enable; | |
1737 | ring->int_disable = bcmgenet_tx_ring16_int_disable; | |
1738 | } else { | |
1739 | ring->queue = index + 1; | |
1740 | ring->int_enable = bcmgenet_tx_ring_int_enable; | |
1741 | ring->int_disable = bcmgenet_tx_ring_int_disable; | |
1742 | } | |
4f8b2d7d | 1743 | ring->cbs = priv->tx_cbs + start_ptr; |
1c1008c7 | 1744 | ring->size = size; |
66d06757 | 1745 | ring->clean_ptr = start_ptr; |
1c1008c7 FF |
1746 | ring->c_index = 0; |
1747 | ring->free_bds = size; | |
4f8b2d7d PG |
1748 | ring->write_ptr = start_ptr; |
1749 | ring->cb_ptr = start_ptr; | |
1c1008c7 FF |
1750 | ring->end_ptr = end_ptr - 1; |
1751 | ring->prod_index = 0; | |
1752 | ||
1753 | /* Set flow period for ring != 16 */ | |
1754 | if (index != DESC_INDEX) | |
1755 | flow_period_val = ENET_MAX_MTU_SIZE << 16; | |
1756 | ||
1757 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); | |
1758 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); | |
1759 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | |
1760 | /* Disable rate control for now */ | |
1761 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | |
c91b7f66 | 1762 | TDMA_FLOW_PERIOD); |
1c1008c7 | 1763 | bcmgenet_tdma_ring_writel(priv, index, |
c91b7f66 FF |
1764 | ((size << DMA_RING_SIZE_SHIFT) | |
1765 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 1766 | |
1c1008c7 | 1767 | /* Set start and end address, read and write pointers */ |
4f8b2d7d | 1768 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1769 | DMA_START_ADDR); |
4f8b2d7d | 1770 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1771 | TDMA_READ_PTR); |
4f8b2d7d | 1772 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1773 | TDMA_WRITE_PTR); |
1c1008c7 | 1774 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
c91b7f66 | 1775 | DMA_END_ADDR); |
4092e6ac JS |
1776 | |
1777 | napi_enable(&ring->napi); | |
1778 | } | |
1779 | ||
1780 | static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, | |
1781 | unsigned int index) | |
1782 | { | |
1783 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | |
1784 | ||
1785 | napi_disable(&ring->napi); | |
1786 | netif_napi_del(&ring->napi); | |
1c1008c7 FF |
1787 | } |
1788 | ||
1789 | /* Initialize a RDMA ring */ | |
1790 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |
8ac467e8 PG |
1791 | unsigned int index, unsigned int size, |
1792 | unsigned int start_ptr, unsigned int end_ptr) | |
1c1008c7 | 1793 | { |
8ac467e8 | 1794 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; |
1c1008c7 FF |
1795 | u32 words_per_bd = WORDS_PER_BD(priv); |
1796 | int ret; | |
1797 | ||
8ac467e8 PG |
1798 | ring->index = index; |
1799 | ring->cbs = priv->rx_cbs + start_ptr; | |
1800 | ring->size = size; | |
1801 | ring->c_index = 0; | |
1802 | ring->read_ptr = start_ptr; | |
1803 | ring->cb_ptr = start_ptr; | |
1804 | ring->end_ptr = end_ptr - 1; | |
1c1008c7 | 1805 | |
8ac467e8 PG |
1806 | ret = bcmgenet_alloc_rx_buffers(priv, ring); |
1807 | if (ret) | |
1c1008c7 | 1808 | return ret; |
1c1008c7 | 1809 | |
1c1008c7 FF |
1810 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); |
1811 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | |
6f5a272c | 1812 | bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); |
1c1008c7 | 1813 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
1814 | ((size << DMA_RING_SIZE_SHIFT) | |
1815 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 1816 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
1817 | (DMA_FC_THRESH_LO << |
1818 | DMA_XOFF_THRESHOLD_SHIFT) | | |
1819 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | |
6f5a272c PG |
1820 | |
1821 | /* Set start and end address, read and write pointers */ | |
8ac467e8 PG |
1822 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, |
1823 | DMA_START_ADDR); | |
1824 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
1825 | RDMA_READ_PTR); | |
1826 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
1827 | RDMA_WRITE_PTR); | |
1828 | bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | |
6f5a272c | 1829 | DMA_END_ADDR); |
1c1008c7 FF |
1830 | |
1831 | return ret; | |
1832 | } | |
1833 | ||
16c6d667 | 1834 | /* Initialize Tx queues |
1c1008c7 | 1835 | * |
16c6d667 | 1836 | * Queues 0-3 are priority-based, each one has 32 descriptors, |
1c1008c7 FF |
1837 | * with queue 0 being the highest priority queue. |
1838 | * | |
16c6d667 | 1839 | * Queue 16 is the default Tx queue with |
51a966a7 | 1840 | * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. |
1c1008c7 | 1841 | * |
16c6d667 PG |
1842 | * The transmit control block pool is then partitioned as follows: |
1843 | * - Tx queue 0 uses tx_cbs[0..31] | |
1844 | * - Tx queue 1 uses tx_cbs[32..63] | |
1845 | * - Tx queue 2 uses tx_cbs[64..95] | |
1846 | * - Tx queue 3 uses tx_cbs[96..127] | |
1847 | * - Tx queue 16 uses tx_cbs[128..255] | |
1c1008c7 | 1848 | */ |
16c6d667 | 1849 | static void bcmgenet_init_tx_queues(struct net_device *dev) |
1c1008c7 FF |
1850 | { |
1851 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
16c6d667 PG |
1852 | u32 i, dma_enable; |
1853 | u32 dma_ctrl, ring_cfg; | |
37742166 | 1854 | u32 dma_priority[3] = {0, 0, 0}; |
1c1008c7 | 1855 | |
1c1008c7 FF |
1856 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); |
1857 | dma_enable = dma_ctrl & DMA_EN; | |
1858 | dma_ctrl &= ~DMA_EN; | |
1859 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1860 | ||
16c6d667 PG |
1861 | dma_ctrl = 0; |
1862 | ring_cfg = 0; | |
1863 | ||
1c1008c7 FF |
1864 | /* Enable strict priority arbiter mode */ |
1865 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | |
1866 | ||
16c6d667 | 1867 | /* Initialize Tx priority queues */ |
1c1008c7 | 1868 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
51a966a7 PG |
1869 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, |
1870 | i * priv->hw_params->tx_bds_per_q, | |
1871 | (i + 1) * priv->hw_params->tx_bds_per_q); | |
16c6d667 PG |
1872 | ring_cfg |= (1 << i); |
1873 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
1874 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= |
1875 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); | |
1c1008c7 FF |
1876 | } |
1877 | ||
16c6d667 | 1878 | /* Initialize Tx default queue 16 */ |
51a966a7 | 1879 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, |
16c6d667 | 1880 | priv->hw_params->tx_queues * |
51a966a7 | 1881 | priv->hw_params->tx_bds_per_q, |
16c6d667 PG |
1882 | TOTAL_DESC); |
1883 | ring_cfg |= (1 << DESC_INDEX); | |
1884 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
1885 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= |
1886 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << | |
1887 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); | |
16c6d667 PG |
1888 | |
1889 | /* Set Tx queue priorities */ | |
37742166 PG |
1890 | bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); |
1891 | bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); | |
1892 | bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); | |
1893 | ||
16c6d667 PG |
1894 | /* Enable Tx queues */ |
1895 | bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
1c1008c7 | 1896 | |
16c6d667 | 1897 | /* Enable Tx DMA */ |
1c1008c7 | 1898 | if (dma_enable) |
16c6d667 PG |
1899 | dma_ctrl |= DMA_EN; |
1900 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1c1008c7 FF |
1901 | } |
1902 | ||
8ac467e8 PG |
1903 | /* Initialize Rx queues |
1904 | * | |
1905 | * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be | |
1906 | * used to direct traffic to these queues. | |
1907 | * | |
1908 | * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. | |
1909 | */ | |
1910 | static int bcmgenet_init_rx_queues(struct net_device *dev) | |
1911 | { | |
1912 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1913 | u32 i; | |
1914 | u32 dma_enable; | |
1915 | u32 dma_ctrl; | |
1916 | u32 ring_cfg; | |
1917 | int ret; | |
1918 | ||
1919 | dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
1920 | dma_enable = dma_ctrl & DMA_EN; | |
1921 | dma_ctrl &= ~DMA_EN; | |
1922 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1923 | ||
1924 | dma_ctrl = 0; | |
1925 | ring_cfg = 0; | |
1926 | ||
1927 | /* Initialize Rx priority queues */ | |
1928 | for (i = 0; i < priv->hw_params->rx_queues; i++) { | |
1929 | ret = bcmgenet_init_rx_ring(priv, i, | |
1930 | priv->hw_params->rx_bds_per_q, | |
1931 | i * priv->hw_params->rx_bds_per_q, | |
1932 | (i + 1) * | |
1933 | priv->hw_params->rx_bds_per_q); | |
1934 | if (ret) | |
1935 | return ret; | |
1936 | ||
1937 | ring_cfg |= (1 << i); | |
1938 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
1939 | } | |
1940 | ||
1941 | /* Initialize Rx default queue 16 */ | |
1942 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, | |
1943 | priv->hw_params->rx_queues * | |
1944 | priv->hw_params->rx_bds_per_q, | |
1945 | TOTAL_DESC); | |
1946 | if (ret) | |
1947 | return ret; | |
1948 | ||
1949 | ring_cfg |= (1 << DESC_INDEX); | |
1950 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
1951 | ||
1952 | /* Enable rings */ | |
1953 | bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
1954 | ||
1955 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | |
1956 | if (dma_enable) | |
1957 | dma_ctrl |= DMA_EN; | |
1958 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1959 | ||
1960 | return 0; | |
1961 | } | |
1962 | ||
4a0c081e FF |
1963 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) |
1964 | { | |
1965 | int ret = 0; | |
1966 | int timeout = 0; | |
1967 | u32 reg; | |
1968 | ||
1969 | /* Disable TDMA to stop add more frames in TX DMA */ | |
1970 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
1971 | reg &= ~DMA_EN; | |
1972 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
1973 | ||
1974 | /* Check TDMA status register to confirm TDMA is disabled */ | |
1975 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
1976 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | |
1977 | if (reg & DMA_DISABLED) | |
1978 | break; | |
1979 | ||
1980 | udelay(1); | |
1981 | } | |
1982 | ||
1983 | if (timeout == DMA_TIMEOUT_VAL) { | |
1984 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | |
1985 | ret = -ETIMEDOUT; | |
1986 | } | |
1987 | ||
1988 | /* Wait 10ms for packet drain in both tx and rx dma */ | |
1989 | usleep_range(10000, 20000); | |
1990 | ||
1991 | /* Disable RDMA */ | |
1992 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
1993 | reg &= ~DMA_EN; | |
1994 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
1995 | ||
1996 | timeout = 0; | |
1997 | /* Check RDMA status register to confirm RDMA is disabled */ | |
1998 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
1999 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | |
2000 | if (reg & DMA_DISABLED) | |
2001 | break; | |
2002 | ||
2003 | udelay(1); | |
2004 | } | |
2005 | ||
2006 | if (timeout == DMA_TIMEOUT_VAL) { | |
2007 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | |
2008 | ret = -ETIMEDOUT; | |
2009 | } | |
2010 | ||
2011 | return ret; | |
2012 | } | |
2013 | ||
4092e6ac | 2014 | static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1c1008c7 FF |
2015 | { |
2016 | int i; | |
2017 | ||
2018 | /* disable DMA */ | |
4a0c081e | 2019 | bcmgenet_dma_teardown(priv); |
1c1008c7 FF |
2020 | |
2021 | for (i = 0; i < priv->num_tx_bds; i++) { | |
2022 | if (priv->tx_cbs[i].skb != NULL) { | |
2023 | dev_kfree_skb(priv->tx_cbs[i].skb); | |
2024 | priv->tx_cbs[i].skb = NULL; | |
2025 | } | |
2026 | } | |
2027 | ||
2028 | bcmgenet_free_rx_buffers(priv); | |
2029 | kfree(priv->rx_cbs); | |
2030 | kfree(priv->tx_cbs); | |
2031 | } | |
2032 | ||
4092e6ac JS |
2033 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
2034 | { | |
2035 | int i; | |
2036 | ||
2037 | bcmgenet_fini_tx_ring(priv, DESC_INDEX); | |
2038 | ||
2039 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
2040 | bcmgenet_fini_tx_ring(priv, i); | |
2041 | ||
2042 | __bcmgenet_fini_dma(priv); | |
2043 | } | |
2044 | ||
1c1008c7 FF |
2045 | /* init_edma: Initialize DMA control register */ |
2046 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |
2047 | { | |
2048 | int ret; | |
014012a4 PG |
2049 | unsigned int i; |
2050 | struct enet_cb *cb; | |
1c1008c7 | 2051 | |
6f5a272c | 2052 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 | 2053 | |
6f5a272c PG |
2054 | /* Init rDma */ |
2055 | bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2056 | ||
2057 | /* Initialize common Rx ring structures */ | |
2058 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; | |
2059 | priv->num_rx_bds = TOTAL_DESC; | |
2060 | priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), | |
2061 | GFP_KERNEL); | |
2062 | if (!priv->rx_cbs) | |
2063 | return -ENOMEM; | |
2064 | ||
2065 | for (i = 0; i < priv->num_rx_bds; i++) { | |
2066 | cb = priv->rx_cbs + i; | |
2067 | cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; | |
2068 | } | |
2069 | ||
8ac467e8 PG |
2070 | /* Initialize Rx queues */ |
2071 | ret = bcmgenet_init_rx_queues(priv->dev); | |
1c1008c7 | 2072 | if (ret) { |
8ac467e8 | 2073 | netdev_err(priv->dev, "failed to initialize Rx queues\n"); |
6f5a272c PG |
2074 | bcmgenet_free_rx_buffers(priv); |
2075 | kfree(priv->rx_cbs); | |
1c1008c7 FF |
2076 | return ret; |
2077 | } | |
2078 | ||
1c1008c7 FF |
2079 | /* Init tDma */ |
2080 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2081 | ||
7fc527f9 | 2082 | /* Initialize common TX ring structures */ |
1c1008c7 FF |
2083 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; |
2084 | priv->num_tx_bds = TOTAL_DESC; | |
c489be08 | 2085 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
c91b7f66 | 2086 | GFP_KERNEL); |
1c1008c7 | 2087 | if (!priv->tx_cbs) { |
4092e6ac | 2088 | __bcmgenet_fini_dma(priv); |
1c1008c7 FF |
2089 | return -ENOMEM; |
2090 | } | |
2091 | ||
014012a4 PG |
2092 | for (i = 0; i < priv->num_tx_bds; i++) { |
2093 | cb = priv->tx_cbs + i; | |
2094 | cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; | |
2095 | } | |
2096 | ||
16c6d667 PG |
2097 | /* Initialize Tx queues */ |
2098 | bcmgenet_init_tx_queues(priv->dev); | |
1c1008c7 FF |
2099 | |
2100 | return 0; | |
2101 | } | |
2102 | ||
2103 | /* NAPI polling method*/ | |
2104 | static int bcmgenet_poll(struct napi_struct *napi, int budget) | |
2105 | { | |
2106 | struct bcmgenet_priv *priv = container_of(napi, | |
2107 | struct bcmgenet_priv, napi); | |
2108 | unsigned int work_done; | |
2109 | ||
8ac467e8 | 2110 | work_done = bcmgenet_desc_rx(priv, DESC_INDEX, budget); |
1c1008c7 | 2111 | |
1c1008c7 FF |
2112 | if (work_done < budget) { |
2113 | napi_complete(napi); | |
c91b7f66 FF |
2114 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, |
2115 | INTRL2_CPU_MASK_CLEAR); | |
1c1008c7 FF |
2116 | } |
2117 | ||
2118 | return work_done; | |
2119 | } | |
2120 | ||
2121 | /* Interrupt bottom half */ | |
2122 | static void bcmgenet_irq_task(struct work_struct *work) | |
2123 | { | |
2124 | struct bcmgenet_priv *priv = container_of( | |
2125 | work, struct bcmgenet_priv, bcmgenet_irq_work); | |
2126 | ||
2127 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | |
2128 | ||
8fdb0e0f FF |
2129 | if (priv->irq0_stat & UMAC_IRQ_MPD_R) { |
2130 | priv->irq0_stat &= ~UMAC_IRQ_MPD_R; | |
2131 | netif_dbg(priv, wol, priv->dev, | |
2132 | "magic packet detected, waking up\n"); | |
2133 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
2134 | } | |
2135 | ||
1c1008c7 FF |
2136 | /* Link UP/DOWN event */ |
2137 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
c91b7f66 | 2138 | (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { |
80d8e96d | 2139 | phy_mac_interrupt(priv->phydev, |
c91b7f66 | 2140 | priv->irq0_stat & UMAC_IRQ_LINK_UP); |
1c1008c7 FF |
2141 | priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); |
2142 | } | |
2143 | } | |
2144 | ||
2145 | /* bcmgenet_isr1: interrupt handler for ring buffer. */ | |
2146 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | |
2147 | { | |
2148 | struct bcmgenet_priv *priv = dev_id; | |
4092e6ac | 2149 | struct bcmgenet_tx_ring *ring; |
1c1008c7 FF |
2150 | unsigned int index; |
2151 | ||
2152 | /* Save irq status for bottom-half processing. */ | |
2153 | priv->irq1_stat = | |
2154 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | |
4092e6ac | 2155 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
7fc527f9 | 2156 | /* clear interrupts */ |
1c1008c7 FF |
2157 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
2158 | ||
2159 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 2160 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
4092e6ac | 2161 | |
1c1008c7 FF |
2162 | /* Check the MBDONE interrupts. |
2163 | * packet is done, reclaim descriptors | |
2164 | */ | |
4092e6ac JS |
2165 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
2166 | if (!(priv->irq1_stat & BIT(index))) | |
2167 | continue; | |
2168 | ||
2169 | ring = &priv->tx_rings[index]; | |
2170 | ||
2171 | if (likely(napi_schedule_prep(&ring->napi))) { | |
2172 | ring->int_disable(priv, ring); | |
2173 | __napi_schedule(&ring->napi); | |
1c1008c7 FF |
2174 | } |
2175 | } | |
4092e6ac | 2176 | |
1c1008c7 FF |
2177 | return IRQ_HANDLED; |
2178 | } | |
2179 | ||
2180 | /* bcmgenet_isr0: Handle various interrupts. */ | |
2181 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |
2182 | { | |
2183 | struct bcmgenet_priv *priv = dev_id; | |
2184 | ||
2185 | /* Save irq status for bottom-half processing. */ | |
2186 | priv->irq0_stat = | |
2187 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | |
2188 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
7fc527f9 | 2189 | /* clear interrupts */ |
1c1008c7 FF |
2190 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); |
2191 | ||
2192 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 2193 | "IRQ=0x%x\n", priv->irq0_stat); |
1c1008c7 FF |
2194 | |
2195 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { | |
2196 | /* We use NAPI(software interrupt throttling, if | |
2197 | * Rx Descriptor throttling is not used. | |
2198 | * Disable interrupt, will be enabled in the poll method. | |
2199 | */ | |
2200 | if (likely(napi_schedule_prep(&priv->napi))) { | |
c91b7f66 FF |
2201 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, |
2202 | INTRL2_CPU_MASK_SET); | |
1c1008c7 FF |
2203 | __napi_schedule(&priv->napi); |
2204 | } | |
2205 | } | |
2206 | if (priv->irq0_stat & | |
2207 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | |
4092e6ac JS |
2208 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; |
2209 | ||
2210 | if (likely(napi_schedule_prep(&ring->napi))) { | |
2211 | ring->int_disable(priv, ring); | |
2212 | __napi_schedule(&ring->napi); | |
2213 | } | |
1c1008c7 FF |
2214 | } |
2215 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | |
2216 | UMAC_IRQ_PHY_DET_F | | |
2217 | UMAC_IRQ_LINK_UP | | |
2218 | UMAC_IRQ_LINK_DOWN | | |
2219 | UMAC_IRQ_HFB_SM | | |
2220 | UMAC_IRQ_HFB_MM | | |
2221 | UMAC_IRQ_MPD_R)) { | |
2222 | /* all other interested interrupts handled in bottom half */ | |
2223 | schedule_work(&priv->bcmgenet_irq_work); | |
2224 | } | |
2225 | ||
2226 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
c91b7f66 | 2227 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
1c1008c7 FF |
2228 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
2229 | wake_up(&priv->wq); | |
2230 | } | |
2231 | ||
2232 | return IRQ_HANDLED; | |
2233 | } | |
2234 | ||
8562056f FF |
2235 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) |
2236 | { | |
2237 | struct bcmgenet_priv *priv = dev_id; | |
2238 | ||
2239 | pm_wakeup_event(&priv->pdev->dev, 0); | |
2240 | ||
2241 | return IRQ_HANDLED; | |
2242 | } | |
2243 | ||
1c1008c7 FF |
2244 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
2245 | { | |
2246 | u32 reg; | |
2247 | ||
2248 | reg = bcmgenet_rbuf_ctrl_get(priv); | |
2249 | reg |= BIT(1); | |
2250 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2251 | udelay(10); | |
2252 | ||
2253 | reg &= ~BIT(1); | |
2254 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2255 | udelay(10); | |
2256 | } | |
2257 | ||
2258 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | |
c91b7f66 | 2259 | unsigned char *addr) |
1c1008c7 FF |
2260 | { |
2261 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | |
2262 | (addr[2] << 8) | addr[3], UMAC_MAC0); | |
2263 | bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); | |
2264 | } | |
2265 | ||
1c1008c7 FF |
2266 | /* Returns a reusable dma control register value */ |
2267 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) | |
2268 | { | |
2269 | u32 reg; | |
2270 | u32 dma_ctrl; | |
2271 | ||
2272 | /* disable DMA */ | |
2273 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; | |
2274 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2275 | reg &= ~dma_ctrl; | |
2276 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2277 | ||
2278 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2279 | reg &= ~dma_ctrl; | |
2280 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2281 | ||
2282 | bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); | |
2283 | udelay(10); | |
2284 | bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); | |
2285 | ||
2286 | return dma_ctrl; | |
2287 | } | |
2288 | ||
2289 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | |
2290 | { | |
2291 | u32 reg; | |
2292 | ||
2293 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2294 | reg |= dma_ctrl; | |
2295 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2296 | ||
2297 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2298 | reg |= dma_ctrl; | |
2299 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2300 | } | |
2301 | ||
0034de41 PG |
2302 | static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, |
2303 | u32 f_index) | |
2304 | { | |
2305 | u32 offset; | |
2306 | u32 reg; | |
2307 | ||
2308 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); | |
2309 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2310 | return !!(reg & (1 << (f_index % 32))); | |
2311 | } | |
2312 | ||
2313 | static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) | |
2314 | { | |
2315 | u32 offset; | |
2316 | u32 reg; | |
2317 | ||
2318 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); | |
2319 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2320 | reg |= (1 << (f_index % 32)); | |
2321 | bcmgenet_hfb_reg_writel(priv, reg, offset); | |
2322 | } | |
2323 | ||
2324 | static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, | |
2325 | u32 f_index, u32 rx_queue) | |
2326 | { | |
2327 | u32 offset; | |
2328 | u32 reg; | |
2329 | ||
2330 | offset = f_index / 8; | |
2331 | reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); | |
2332 | reg &= ~(0xF << (4 * (f_index % 8))); | |
2333 | reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); | |
2334 | bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); | |
2335 | } | |
2336 | ||
2337 | static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, | |
2338 | u32 f_index, u32 f_length) | |
2339 | { | |
2340 | u32 offset; | |
2341 | u32 reg; | |
2342 | ||
2343 | offset = HFB_FLT_LEN_V3PLUS + | |
2344 | ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * | |
2345 | sizeof(u32); | |
2346 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2347 | reg &= ~(0xFF << (8 * (f_index % 4))); | |
2348 | reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); | |
2349 | bcmgenet_hfb_reg_writel(priv, reg, offset); | |
2350 | } | |
2351 | ||
2352 | static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) | |
2353 | { | |
2354 | u32 f_index; | |
2355 | ||
2356 | for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) | |
2357 | if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) | |
2358 | return f_index; | |
2359 | ||
2360 | return -ENOMEM; | |
2361 | } | |
2362 | ||
2363 | /* bcmgenet_hfb_add_filter | |
2364 | * | |
2365 | * Add new filter to Hardware Filter Block to match and direct Rx traffic to | |
2366 | * desired Rx queue. | |
2367 | * | |
2368 | * f_data is an array of unsigned 32-bit integers where each 32-bit integer | |
2369 | * provides filter data for 2 bytes (4 nibbles) of Rx frame: | |
2370 | * | |
2371 | * bits 31:20 - unused | |
2372 | * bit 19 - nibble 0 match enable | |
2373 | * bit 18 - nibble 1 match enable | |
2374 | * bit 17 - nibble 2 match enable | |
2375 | * bit 16 - nibble 3 match enable | |
2376 | * bits 15:12 - nibble 0 data | |
2377 | * bits 11:8 - nibble 1 data | |
2378 | * bits 7:4 - nibble 2 data | |
2379 | * bits 3:0 - nibble 3 data | |
2380 | * | |
2381 | * Example: | |
2382 | * In order to match: | |
2383 | * - Ethernet frame type = 0x0800 (IP) | |
2384 | * - IP version field = 4 | |
2385 | * - IP protocol field = 0x11 (UDP) | |
2386 | * | |
2387 | * The following filter is needed: | |
2388 | * u32 hfb_filter_ipv4_udp[] = { | |
2389 | * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, | |
2390 | * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, | |
2391 | * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, | |
2392 | * }; | |
2393 | * | |
2394 | * To add the filter to HFB and direct the traffic to Rx queue 0, call: | |
2395 | * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, | |
2396 | * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); | |
2397 | */ | |
2398 | int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, | |
2399 | u32 f_length, u32 rx_queue) | |
2400 | { | |
2401 | int f_index; | |
2402 | u32 i; | |
2403 | ||
2404 | f_index = bcmgenet_hfb_find_unused_filter(priv); | |
2405 | if (f_index < 0) | |
2406 | return -ENOMEM; | |
2407 | ||
2408 | if (f_length > priv->hw_params->hfb_filter_size) | |
2409 | return -EINVAL; | |
2410 | ||
2411 | for (i = 0; i < f_length; i++) | |
2412 | bcmgenet_hfb_writel(priv, f_data[i], | |
2413 | (f_index * priv->hw_params->hfb_filter_size + i) * | |
2414 | sizeof(u32)); | |
2415 | ||
2416 | bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); | |
2417 | bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); | |
2418 | bcmgenet_hfb_enable_filter(priv, f_index); | |
2419 | bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); | |
2420 | ||
2421 | return 0; | |
2422 | } | |
2423 | ||
2424 | /* bcmgenet_hfb_clear | |
2425 | * | |
2426 | * Clear Hardware Filter Block and disable all filtering. | |
2427 | */ | |
2428 | static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) | |
2429 | { | |
2430 | u32 i; | |
2431 | ||
2432 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); | |
2433 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); | |
2434 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); | |
2435 | ||
2436 | for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) | |
2437 | bcmgenet_rdma_writel(priv, 0x0, i); | |
2438 | ||
2439 | for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) | |
2440 | bcmgenet_hfb_reg_writel(priv, 0x0, | |
2441 | HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); | |
2442 | ||
2443 | for (i = 0; i < priv->hw_params->hfb_filter_cnt * | |
2444 | priv->hw_params->hfb_filter_size; i++) | |
2445 | bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); | |
2446 | } | |
2447 | ||
2448 | static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) | |
2449 | { | |
2450 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) | |
2451 | return; | |
2452 | ||
2453 | bcmgenet_hfb_clear(priv); | |
2454 | } | |
2455 | ||
909ff5ef FF |
2456 | static void bcmgenet_netif_start(struct net_device *dev) |
2457 | { | |
2458 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2459 | ||
2460 | /* Start the network engine */ | |
2461 | napi_enable(&priv->napi); | |
2462 | ||
2463 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | |
2464 | ||
2465 | if (phy_is_internal(priv->phydev)) | |
2466 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); | |
2467 | ||
2468 | netif_tx_start_all_queues(dev); | |
2469 | ||
2470 | phy_start(priv->phydev); | |
2471 | } | |
2472 | ||
1c1008c7 FF |
2473 | static int bcmgenet_open(struct net_device *dev) |
2474 | { | |
2475 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2476 | unsigned long dma_ctrl; | |
2477 | u32 reg; | |
2478 | int ret; | |
2479 | ||
2480 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); | |
2481 | ||
2482 | /* Turn on the clock */ | |
2483 | if (!IS_ERR(priv->clk)) | |
2484 | clk_prepare_enable(priv->clk); | |
2485 | ||
2486 | /* take MAC out of reset */ | |
2487 | bcmgenet_umac_reset(priv); | |
2488 | ||
2489 | ret = init_umac(priv); | |
2490 | if (ret) | |
2491 | goto err_clk_disable; | |
2492 | ||
2493 | /* disable ethernet MAC while updating its registers */ | |
e29585b8 | 2494 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
1c1008c7 | 2495 | |
909ff5ef FF |
2496 | /* Make sure we reflect the value of CRC_CMD_FWD */ |
2497 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
2498 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | |
2499 | ||
1c1008c7 FF |
2500 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
2501 | ||
1c1008c7 FF |
2502 | if (phy_is_internal(priv->phydev)) { |
2503 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
2504 | reg |= EXT_ENERGY_DET_MASK; | |
2505 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
2506 | } | |
2507 | ||
2508 | /* Disable RX/TX DMA and flush TX queues */ | |
2509 | dma_ctrl = bcmgenet_dma_disable(priv); | |
2510 | ||
2511 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
2512 | ret = bcmgenet_init_dma(priv); | |
2513 | if (ret) { | |
2514 | netdev_err(dev, "failed to initialize DMA\n"); | |
2515 | goto err_fini_dma; | |
2516 | } | |
2517 | ||
2518 | /* Always enable ring 16 - descriptor ring */ | |
2519 | bcmgenet_enable_dma(priv, dma_ctrl); | |
2520 | ||
0034de41 PG |
2521 | /* HFB init */ |
2522 | bcmgenet_hfb_init(priv); | |
2523 | ||
1c1008c7 | 2524 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, |
c91b7f66 | 2525 | dev->name, priv); |
1c1008c7 FF |
2526 | if (ret < 0) { |
2527 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | |
2528 | goto err_fini_dma; | |
2529 | } | |
2530 | ||
2531 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | |
c91b7f66 | 2532 | dev->name, priv); |
1c1008c7 FF |
2533 | if (ret < 0) { |
2534 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | |
2535 | goto err_irq0; | |
2536 | } | |
2537 | ||
dbd479db FF |
2538 | /* Re-configure the port multiplexer towards the PHY device */ |
2539 | bcmgenet_mii_config(priv->dev, false); | |
2540 | ||
c96e731c FF |
2541 | phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, |
2542 | priv->phy_interface); | |
2543 | ||
909ff5ef | 2544 | bcmgenet_netif_start(dev); |
1c1008c7 FF |
2545 | |
2546 | return 0; | |
2547 | ||
2548 | err_irq0: | |
2549 | free_irq(priv->irq0, dev); | |
2550 | err_fini_dma: | |
2551 | bcmgenet_fini_dma(priv); | |
2552 | err_clk_disable: | |
2553 | if (!IS_ERR(priv->clk)) | |
2554 | clk_disable_unprepare(priv->clk); | |
2555 | return ret; | |
2556 | } | |
2557 | ||
909ff5ef FF |
2558 | static void bcmgenet_netif_stop(struct net_device *dev) |
2559 | { | |
2560 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2561 | ||
2562 | netif_tx_stop_all_queues(dev); | |
2563 | napi_disable(&priv->napi); | |
2564 | phy_stop(priv->phydev); | |
2565 | ||
2566 | bcmgenet_intr_disable(priv); | |
2567 | ||
2568 | /* Wait for pending work items to complete. Since interrupts are | |
2569 | * disabled no new work will be scheduled. | |
2570 | */ | |
2571 | cancel_work_sync(&priv->bcmgenet_irq_work); | |
cc013fb4 | 2572 | |
cc013fb4 | 2573 | priv->old_link = -1; |
5ad6e6c5 | 2574 | priv->old_speed = -1; |
cc013fb4 | 2575 | priv->old_duplex = -1; |
5ad6e6c5 | 2576 | priv->old_pause = -1; |
909ff5ef FF |
2577 | } |
2578 | ||
1c1008c7 FF |
2579 | static int bcmgenet_close(struct net_device *dev) |
2580 | { | |
2581 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2582 | int ret; | |
1c1008c7 FF |
2583 | |
2584 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | |
2585 | ||
909ff5ef | 2586 | bcmgenet_netif_stop(dev); |
1c1008c7 | 2587 | |
c96e731c FF |
2588 | /* Really kill the PHY state machine and disconnect from it */ |
2589 | phy_disconnect(priv->phydev); | |
2590 | ||
1c1008c7 | 2591 | /* Disable MAC receive */ |
e29585b8 | 2592 | umac_enable_set(priv, CMD_RX_EN, false); |
1c1008c7 | 2593 | |
1c1008c7 FF |
2594 | ret = bcmgenet_dma_teardown(priv); |
2595 | if (ret) | |
2596 | return ret; | |
2597 | ||
2598 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
e29585b8 | 2599 | umac_enable_set(priv, CMD_TX_EN, false); |
1c1008c7 | 2600 | |
1c1008c7 FF |
2601 | /* tx reclaim */ |
2602 | bcmgenet_tx_reclaim_all(dev); | |
2603 | bcmgenet_fini_dma(priv); | |
2604 | ||
2605 | free_irq(priv->irq0, priv); | |
2606 | free_irq(priv->irq1, priv); | |
2607 | ||
1c1008c7 FF |
2608 | if (phy_is_internal(priv->phydev)) |
2609 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); | |
2610 | ||
1c1008c7 FF |
2611 | if (!IS_ERR(priv->clk)) |
2612 | clk_disable_unprepare(priv->clk); | |
2613 | ||
2614 | return 0; | |
2615 | } | |
2616 | ||
2617 | static void bcmgenet_timeout(struct net_device *dev) | |
2618 | { | |
2619 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2620 | ||
2621 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | |
2622 | ||
2623 | dev->trans_start = jiffies; | |
2624 | ||
2625 | dev->stats.tx_errors++; | |
2626 | ||
2627 | netif_tx_wake_all_queues(dev); | |
2628 | } | |
2629 | ||
2630 | #define MAX_MC_COUNT 16 | |
2631 | ||
2632 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | |
2633 | unsigned char *addr, | |
2634 | int *i, | |
2635 | int *mc) | |
2636 | { | |
2637 | u32 reg; | |
2638 | ||
c91b7f66 FF |
2639 | bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], |
2640 | UMAC_MDF_ADDR + (*i * 4)); | |
2641 | bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | | |
2642 | addr[4] << 8 | addr[5], | |
2643 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | |
1c1008c7 FF |
2644 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); |
2645 | reg |= (1 << (MAX_MC_COUNT - *mc)); | |
2646 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | |
2647 | *i += 2; | |
2648 | (*mc)++; | |
2649 | } | |
2650 | ||
2651 | static void bcmgenet_set_rx_mode(struct net_device *dev) | |
2652 | { | |
2653 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2654 | struct netdev_hw_addr *ha; | |
2655 | int i, mc; | |
2656 | u32 reg; | |
2657 | ||
2658 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | |
2659 | ||
7fc527f9 | 2660 | /* Promiscuous mode */ |
1c1008c7 FF |
2661 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
2662 | if (dev->flags & IFF_PROMISC) { | |
2663 | reg |= CMD_PROMISC; | |
2664 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
2665 | bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); | |
2666 | return; | |
2667 | } else { | |
2668 | reg &= ~CMD_PROMISC; | |
2669 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
2670 | } | |
2671 | ||
2672 | /* UniMac doesn't support ALLMULTI */ | |
2673 | if (dev->flags & IFF_ALLMULTI) { | |
2674 | netdev_warn(dev, "ALLMULTI is not supported\n"); | |
2675 | return; | |
2676 | } | |
2677 | ||
2678 | /* update MDF filter */ | |
2679 | i = 0; | |
2680 | mc = 0; | |
2681 | /* Broadcast */ | |
2682 | bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); | |
2683 | /* my own address.*/ | |
2684 | bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); | |
2685 | /* Unicast list*/ | |
2686 | if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) | |
2687 | return; | |
2688 | ||
2689 | if (!netdev_uc_empty(dev)) | |
2690 | netdev_for_each_uc_addr(ha, dev) | |
2691 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
2692 | /* Multicast */ | |
2693 | if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) | |
2694 | return; | |
2695 | ||
2696 | netdev_for_each_mc_addr(ha, dev) | |
2697 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
2698 | } | |
2699 | ||
2700 | /* Set the hardware MAC address. */ | |
2701 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) | |
2702 | { | |
2703 | struct sockaddr *addr = p; | |
2704 | ||
2705 | /* Setting the MAC address at the hardware level is not possible | |
2706 | * without disabling the UniMAC RX/TX enable bits. | |
2707 | */ | |
2708 | if (netif_running(dev)) | |
2709 | return -EBUSY; | |
2710 | ||
2711 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
2712 | ||
2713 | return 0; | |
2714 | } | |
2715 | ||
1c1008c7 FF |
2716 | static const struct net_device_ops bcmgenet_netdev_ops = { |
2717 | .ndo_open = bcmgenet_open, | |
2718 | .ndo_stop = bcmgenet_close, | |
2719 | .ndo_start_xmit = bcmgenet_xmit, | |
1c1008c7 FF |
2720 | .ndo_tx_timeout = bcmgenet_timeout, |
2721 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, | |
2722 | .ndo_set_mac_address = bcmgenet_set_mac_addr, | |
2723 | .ndo_do_ioctl = bcmgenet_ioctl, | |
2724 | .ndo_set_features = bcmgenet_set_features, | |
2725 | }; | |
2726 | ||
2727 | /* Array of GENET hardware parameters/characteristics */ | |
2728 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |
2729 | [GENET_V1] = { | |
2730 | .tx_queues = 0, | |
51a966a7 | 2731 | .tx_bds_per_q = 0, |
1c1008c7 | 2732 | .rx_queues = 0, |
3feafa02 | 2733 | .rx_bds_per_q = 0, |
1c1008c7 FF |
2734 | .bp_in_en_shift = 16, |
2735 | .bp_in_mask = 0xffff, | |
2736 | .hfb_filter_cnt = 16, | |
2737 | .qtag_mask = 0x1F, | |
2738 | .hfb_offset = 0x1000, | |
2739 | .rdma_offset = 0x2000, | |
2740 | .tdma_offset = 0x3000, | |
2741 | .words_per_bd = 2, | |
2742 | }, | |
2743 | [GENET_V2] = { | |
2744 | .tx_queues = 4, | |
51a966a7 | 2745 | .tx_bds_per_q = 32, |
7e906e02 | 2746 | .rx_queues = 0, |
3feafa02 | 2747 | .rx_bds_per_q = 0, |
1c1008c7 FF |
2748 | .bp_in_en_shift = 16, |
2749 | .bp_in_mask = 0xffff, | |
2750 | .hfb_filter_cnt = 16, | |
2751 | .qtag_mask = 0x1F, | |
2752 | .tbuf_offset = 0x0600, | |
2753 | .hfb_offset = 0x1000, | |
2754 | .hfb_reg_offset = 0x2000, | |
2755 | .rdma_offset = 0x3000, | |
2756 | .tdma_offset = 0x4000, | |
2757 | .words_per_bd = 2, | |
2758 | .flags = GENET_HAS_EXT, | |
2759 | }, | |
2760 | [GENET_V3] = { | |
2761 | .tx_queues = 4, | |
51a966a7 | 2762 | .tx_bds_per_q = 32, |
7e906e02 | 2763 | .rx_queues = 0, |
3feafa02 | 2764 | .rx_bds_per_q = 0, |
1c1008c7 FF |
2765 | .bp_in_en_shift = 17, |
2766 | .bp_in_mask = 0x1ffff, | |
2767 | .hfb_filter_cnt = 48, | |
0034de41 | 2768 | .hfb_filter_size = 128, |
1c1008c7 FF |
2769 | .qtag_mask = 0x3F, |
2770 | .tbuf_offset = 0x0600, | |
2771 | .hfb_offset = 0x8000, | |
2772 | .hfb_reg_offset = 0xfc00, | |
2773 | .rdma_offset = 0x10000, | |
2774 | .tdma_offset = 0x11000, | |
2775 | .words_per_bd = 2, | |
2776 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, | |
2777 | }, | |
2778 | [GENET_V4] = { | |
2779 | .tx_queues = 4, | |
51a966a7 | 2780 | .tx_bds_per_q = 32, |
7e906e02 | 2781 | .rx_queues = 0, |
3feafa02 | 2782 | .rx_bds_per_q = 0, |
1c1008c7 FF |
2783 | .bp_in_en_shift = 17, |
2784 | .bp_in_mask = 0x1ffff, | |
2785 | .hfb_filter_cnt = 48, | |
0034de41 | 2786 | .hfb_filter_size = 128, |
1c1008c7 FF |
2787 | .qtag_mask = 0x3F, |
2788 | .tbuf_offset = 0x0600, | |
2789 | .hfb_offset = 0x8000, | |
2790 | .hfb_reg_offset = 0xfc00, | |
2791 | .rdma_offset = 0x2000, | |
2792 | .tdma_offset = 0x4000, | |
2793 | .words_per_bd = 3, | |
2794 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, | |
2795 | }, | |
2796 | }; | |
2797 | ||
2798 | /* Infer hardware parameters from the detected GENET version */ | |
2799 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |
2800 | { | |
2801 | struct bcmgenet_hw_params *params; | |
2802 | u32 reg; | |
2803 | u8 major; | |
b04a2f5b | 2804 | u16 gphy_rev; |
1c1008c7 FF |
2805 | |
2806 | if (GENET_IS_V4(priv)) { | |
2807 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
2808 | genet_dma_ring_regs = genet_dma_ring_regs_v4; | |
2809 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
2810 | priv->version = GENET_V4; | |
2811 | } else if (GENET_IS_V3(priv)) { | |
2812 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
2813 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
2814 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
2815 | priv->version = GENET_V3; | |
2816 | } else if (GENET_IS_V2(priv)) { | |
2817 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; | |
2818 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
2819 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
2820 | priv->version = GENET_V2; | |
2821 | } else if (GENET_IS_V1(priv)) { | |
2822 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; | |
2823 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
2824 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
2825 | priv->version = GENET_V1; | |
2826 | } | |
2827 | ||
2828 | /* enum genet_version starts at 1 */ | |
2829 | priv->hw_params = &bcmgenet_hw_params[priv->version]; | |
2830 | params = priv->hw_params; | |
2831 | ||
2832 | /* Read GENET HW version */ | |
2833 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); | |
2834 | major = (reg >> 24 & 0x0f); | |
2835 | if (major == 5) | |
2836 | major = 4; | |
2837 | else if (major == 0) | |
2838 | major = 1; | |
2839 | if (major != priv->version) { | |
2840 | dev_err(&priv->pdev->dev, | |
2841 | "GENET version mismatch, got: %d, configured for: %d\n", | |
2842 | major, priv->version); | |
2843 | } | |
2844 | ||
2845 | /* Print the GENET core version */ | |
2846 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | |
c91b7f66 | 2847 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
1c1008c7 | 2848 | |
487320c5 FF |
2849 | /* Store the integrated PHY revision for the MDIO probing function |
2850 | * to pass this information to the PHY driver. The PHY driver expects | |
2851 | * to find the PHY major revision in bits 15:8 while the GENET register | |
2852 | * stores that information in bits 7:0, account for that. | |
b04a2f5b FF |
2853 | * |
2854 | * On newer chips, starting with PHY revision G0, a new scheme is | |
2855 | * deployed similar to the Starfighter 2 switch with GPHY major | |
2856 | * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 | |
2857 | * is reserved as well as special value 0x01ff, we have a small | |
2858 | * heuristic to check for the new GPHY revision and re-arrange things | |
2859 | * so the GPHY driver is happy. | |
487320c5 | 2860 | */ |
b04a2f5b FF |
2861 | gphy_rev = reg & 0xffff; |
2862 | ||
2863 | /* This is the good old scheme, just GPHY major, no minor nor patch */ | |
2864 | if ((gphy_rev & 0xf0) != 0) | |
2865 | priv->gphy_rev = gphy_rev << 8; | |
2866 | ||
2867 | /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ | |
2868 | else if ((gphy_rev & 0xff00) != 0) | |
2869 | priv->gphy_rev = gphy_rev; | |
2870 | ||
2871 | /* This is reserved so should require special treatment */ | |
2872 | else if (gphy_rev == 0 || gphy_rev == 0x01ff) { | |
2873 | pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); | |
2874 | return; | |
2875 | } | |
487320c5 | 2876 | |
1c1008c7 FF |
2877 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
2878 | if (!(params->flags & GENET_HAS_40BITS)) | |
2879 | pr_warn("GENET does not support 40-bits PA\n"); | |
2880 | #endif | |
2881 | ||
2882 | pr_debug("Configuration for version: %d\n" | |
3feafa02 | 2883 | "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" |
1c1008c7 FF |
2884 | "BP << en: %2d, BP msk: 0x%05x\n" |
2885 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | |
2886 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | |
2887 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | |
2888 | "Words/BD: %d\n", | |
2889 | priv->version, | |
51a966a7 | 2890 | params->tx_queues, params->tx_bds_per_q, |
3feafa02 | 2891 | params->rx_queues, params->rx_bds_per_q, |
1c1008c7 FF |
2892 | params->bp_in_en_shift, params->bp_in_mask, |
2893 | params->hfb_filter_cnt, params->qtag_mask, | |
2894 | params->tbuf_offset, params->hfb_offset, | |
2895 | params->hfb_reg_offset, | |
2896 | params->rdma_offset, params->tdma_offset, | |
2897 | params->words_per_bd); | |
2898 | } | |
2899 | ||
2900 | static const struct of_device_id bcmgenet_match[] = { | |
2901 | { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, | |
2902 | { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, | |
2903 | { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, | |
2904 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | |
2905 | { }, | |
2906 | }; | |
2907 | ||
2908 | static int bcmgenet_probe(struct platform_device *pdev) | |
2909 | { | |
b0ba512e | 2910 | struct bcmgenet_platform_data *pd = pdev->dev.platform_data; |
1c1008c7 | 2911 | struct device_node *dn = pdev->dev.of_node; |
b0ba512e | 2912 | const struct of_device_id *of_id = NULL; |
1c1008c7 FF |
2913 | struct bcmgenet_priv *priv; |
2914 | struct net_device *dev; | |
2915 | const void *macaddr; | |
2916 | struct resource *r; | |
2917 | int err = -EIO; | |
2918 | ||
3feafeed PG |
2919 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ |
2920 | dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, | |
2921 | GENET_MAX_MQ_CNT + 1); | |
1c1008c7 FF |
2922 | if (!dev) { |
2923 | dev_err(&pdev->dev, "can't allocate net device\n"); | |
2924 | return -ENOMEM; | |
2925 | } | |
2926 | ||
b0ba512e PG |
2927 | if (dn) { |
2928 | of_id = of_match_node(bcmgenet_match, dn); | |
2929 | if (!of_id) | |
2930 | return -EINVAL; | |
2931 | } | |
1c1008c7 FF |
2932 | |
2933 | priv = netdev_priv(dev); | |
2934 | priv->irq0 = platform_get_irq(pdev, 0); | |
2935 | priv->irq1 = platform_get_irq(pdev, 1); | |
8562056f | 2936 | priv->wol_irq = platform_get_irq(pdev, 2); |
1c1008c7 FF |
2937 | if (!priv->irq0 || !priv->irq1) { |
2938 | dev_err(&pdev->dev, "can't find IRQs\n"); | |
2939 | err = -EINVAL; | |
2940 | goto err; | |
2941 | } | |
2942 | ||
b0ba512e PG |
2943 | if (dn) { |
2944 | macaddr = of_get_mac_address(dn); | |
2945 | if (!macaddr) { | |
2946 | dev_err(&pdev->dev, "can't find MAC address\n"); | |
2947 | err = -EINVAL; | |
2948 | goto err; | |
2949 | } | |
2950 | } else { | |
2951 | macaddr = pd->mac_address; | |
1c1008c7 FF |
2952 | } |
2953 | ||
2954 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
5343a10d FE |
2955 | priv->base = devm_ioremap_resource(&pdev->dev, r); |
2956 | if (IS_ERR(priv->base)) { | |
2957 | err = PTR_ERR(priv->base); | |
1c1008c7 FF |
2958 | goto err; |
2959 | } | |
2960 | ||
2961 | SET_NETDEV_DEV(dev, &pdev->dev); | |
2962 | dev_set_drvdata(&pdev->dev, dev); | |
2963 | ether_addr_copy(dev->dev_addr, macaddr); | |
2964 | dev->watchdog_timeo = 2 * HZ; | |
7ad24ea4 | 2965 | dev->ethtool_ops = &bcmgenet_ethtool_ops; |
1c1008c7 FF |
2966 | dev->netdev_ops = &bcmgenet_netdev_ops; |
2967 | netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); | |
2968 | ||
2969 | priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); | |
2970 | ||
2971 | /* Set hardware features */ | |
2972 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | |
2973 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | |
2974 | ||
8562056f FF |
2975 | /* Request the WOL interrupt and advertise suspend if available */ |
2976 | priv->wol_irq_disabled = true; | |
2977 | err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, | |
2978 | dev->name, priv); | |
2979 | if (!err) | |
2980 | device_set_wakeup_capable(&pdev->dev, 1); | |
2981 | ||
1c1008c7 FF |
2982 | /* Set the needed headroom to account for any possible |
2983 | * features enabling/disabling at runtime | |
2984 | */ | |
2985 | dev->needed_headroom += 64; | |
2986 | ||
2987 | netdev_boot_setup_check(dev); | |
2988 | ||
2989 | priv->dev = dev; | |
2990 | priv->pdev = pdev; | |
b0ba512e PG |
2991 | if (of_id) |
2992 | priv->version = (enum bcmgenet_version)of_id->data; | |
2993 | else | |
2994 | priv->version = pd->genet_version; | |
1c1008c7 | 2995 | |
e4a60a93 FF |
2996 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); |
2997 | if (IS_ERR(priv->clk)) | |
2998 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); | |
2999 | ||
3000 | if (!IS_ERR(priv->clk)) | |
3001 | clk_prepare_enable(priv->clk); | |
3002 | ||
1c1008c7 FF |
3003 | bcmgenet_set_hw_params(priv); |
3004 | ||
1c1008c7 FF |
3005 | /* Mii wait queue */ |
3006 | init_waitqueue_head(&priv->wq); | |
3007 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ | |
3008 | priv->rx_buf_len = RX_BUF_LENGTH; | |
3009 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | |
3010 | ||
1c1008c7 FF |
3011 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); |
3012 | if (IS_ERR(priv->clk_wol)) | |
3013 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); | |
3014 | ||
6ef398ea FF |
3015 | priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); |
3016 | if (IS_ERR(priv->clk_eee)) { | |
3017 | dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); | |
3018 | priv->clk_eee = NULL; | |
3019 | } | |
3020 | ||
1c1008c7 FF |
3021 | err = reset_umac(priv); |
3022 | if (err) | |
3023 | goto err_clk_disable; | |
3024 | ||
3025 | err = bcmgenet_mii_init(dev); | |
3026 | if (err) | |
3027 | goto err_clk_disable; | |
3028 | ||
3029 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues | |
3030 | * just the ring 16 descriptor based TX | |
3031 | */ | |
3032 | netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); | |
3033 | netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); | |
3034 | ||
219575eb FF |
3035 | /* libphy will determine the link state */ |
3036 | netif_carrier_off(dev); | |
3037 | ||
1c1008c7 FF |
3038 | /* Turn off the main clock, WOL clock is handled separately */ |
3039 | if (!IS_ERR(priv->clk)) | |
3040 | clk_disable_unprepare(priv->clk); | |
3041 | ||
0f50ce96 FF |
3042 | err = register_netdev(dev); |
3043 | if (err) | |
3044 | goto err; | |
3045 | ||
1c1008c7 FF |
3046 | return err; |
3047 | ||
3048 | err_clk_disable: | |
3049 | if (!IS_ERR(priv->clk)) | |
3050 | clk_disable_unprepare(priv->clk); | |
3051 | err: | |
3052 | free_netdev(dev); | |
3053 | return err; | |
3054 | } | |
3055 | ||
3056 | static int bcmgenet_remove(struct platform_device *pdev) | |
3057 | { | |
3058 | struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); | |
3059 | ||
3060 | dev_set_drvdata(&pdev->dev, NULL); | |
3061 | unregister_netdev(priv->dev); | |
3062 | bcmgenet_mii_exit(priv->dev); | |
3063 | free_netdev(priv->dev); | |
3064 | ||
3065 | return 0; | |
3066 | } | |
3067 | ||
b6e978e5 FF |
3068 | #ifdef CONFIG_PM_SLEEP |
3069 | static int bcmgenet_suspend(struct device *d) | |
3070 | { | |
3071 | struct net_device *dev = dev_get_drvdata(d); | |
3072 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3073 | int ret; | |
3074 | ||
3075 | if (!netif_running(dev)) | |
3076 | return 0; | |
3077 | ||
3078 | bcmgenet_netif_stop(dev); | |
3079 | ||
cc013fb4 FF |
3080 | phy_suspend(priv->phydev); |
3081 | ||
b6e978e5 FF |
3082 | netif_device_detach(dev); |
3083 | ||
3084 | /* Disable MAC receive */ | |
3085 | umac_enable_set(priv, CMD_RX_EN, false); | |
3086 | ||
3087 | ret = bcmgenet_dma_teardown(priv); | |
3088 | if (ret) | |
3089 | return ret; | |
3090 | ||
3091 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
3092 | umac_enable_set(priv, CMD_TX_EN, false); | |
3093 | ||
3094 | /* tx reclaim */ | |
3095 | bcmgenet_tx_reclaim_all(dev); | |
3096 | bcmgenet_fini_dma(priv); | |
3097 | ||
8c90db72 FF |
3098 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
3099 | if (device_may_wakeup(d) && priv->wolopts) { | |
3100 | bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); | |
3101 | clk_prepare_enable(priv->clk_wol); | |
3102 | } | |
3103 | ||
b6e978e5 FF |
3104 | /* Turn off the clocks */ |
3105 | clk_disable_unprepare(priv->clk); | |
3106 | ||
3107 | return 0; | |
3108 | } | |
3109 | ||
3110 | static int bcmgenet_resume(struct device *d) | |
3111 | { | |
3112 | struct net_device *dev = dev_get_drvdata(d); | |
3113 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3114 | unsigned long dma_ctrl; | |
3115 | int ret; | |
3116 | u32 reg; | |
3117 | ||
3118 | if (!netif_running(dev)) | |
3119 | return 0; | |
3120 | ||
3121 | /* Turn on the clock */ | |
3122 | ret = clk_prepare_enable(priv->clk); | |
3123 | if (ret) | |
3124 | return ret; | |
3125 | ||
3126 | bcmgenet_umac_reset(priv); | |
3127 | ||
3128 | ret = init_umac(priv); | |
3129 | if (ret) | |
3130 | goto out_clk_disable; | |
3131 | ||
0a29b3da TK |
3132 | /* From WOL-enabled suspend, switch to regular clock */ |
3133 | if (priv->wolopts) | |
3134 | clk_disable_unprepare(priv->clk_wol); | |
3135 | ||
3136 | phy_init_hw(priv->phydev); | |
3137 | /* Speed settings must be restored */ | |
dbd479db | 3138 | bcmgenet_mii_config(priv->dev, false); |
8c90db72 | 3139 | |
b6e978e5 FF |
3140 | /* disable ethernet MAC while updating its registers */ |
3141 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | |
3142 | ||
3143 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | |
3144 | ||
3145 | if (phy_is_internal(priv->phydev)) { | |
3146 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
3147 | reg |= EXT_ENERGY_DET_MASK; | |
3148 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
3149 | } | |
3150 | ||
98bb7399 FF |
3151 | if (priv->wolopts) |
3152 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
3153 | ||
b6e978e5 FF |
3154 | /* Disable RX/TX DMA and flush TX queues */ |
3155 | dma_ctrl = bcmgenet_dma_disable(priv); | |
3156 | ||
3157 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
3158 | ret = bcmgenet_init_dma(priv); | |
3159 | if (ret) { | |
3160 | netdev_err(dev, "failed to initialize DMA\n"); | |
3161 | goto out_clk_disable; | |
3162 | } | |
3163 | ||
3164 | /* Always enable ring 16 - descriptor ring */ | |
3165 | bcmgenet_enable_dma(priv, dma_ctrl); | |
3166 | ||
3167 | netif_device_attach(dev); | |
3168 | ||
cc013fb4 FF |
3169 | phy_resume(priv->phydev); |
3170 | ||
6ef398ea FF |
3171 | if (priv->eee.eee_enabled) |
3172 | bcmgenet_eee_enable_set(dev, true); | |
3173 | ||
b6e978e5 FF |
3174 | bcmgenet_netif_start(dev); |
3175 | ||
3176 | return 0; | |
3177 | ||
3178 | out_clk_disable: | |
3179 | clk_disable_unprepare(priv->clk); | |
3180 | return ret; | |
3181 | } | |
3182 | #endif /* CONFIG_PM_SLEEP */ | |
3183 | ||
3184 | static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); | |
3185 | ||
1c1008c7 FF |
3186 | static struct platform_driver bcmgenet_driver = { |
3187 | .probe = bcmgenet_probe, | |
3188 | .remove = bcmgenet_remove, | |
3189 | .driver = { | |
3190 | .name = "bcmgenet", | |
1c1008c7 | 3191 | .of_match_table = bcmgenet_match, |
b6e978e5 | 3192 | .pm = &bcmgenet_pm_ops, |
1c1008c7 FF |
3193 | }, |
3194 | }; | |
3195 | module_platform_driver(bcmgenet_driver); | |
3196 | ||
3197 | MODULE_AUTHOR("Broadcom Corporation"); | |
3198 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); | |
3199 | MODULE_ALIAS("platform:bcmgenet"); | |
3200 | MODULE_LICENSE("GPL"); |