]>
Commit | Line | Data |
---|---|---|
1c1008c7 FF |
1 | /* |
2 | * Broadcom GENET (Gigabit Ethernet) controller driver | |
3 | * | |
4 | * Copyright (c) 2014 Broadcom Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
1c1008c7 FF |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) "bcmgenet: " fmt | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/fcntl.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/if_ether.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/pm.h> | |
27 | #include <linux/clk.h> | |
1c1008c7 FF |
28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/of_net.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <net/arp.h> | |
34 | ||
35 | #include <linux/mii.h> | |
36 | #include <linux/ethtool.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/in.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/phy.h> | |
45 | ||
46 | #include <asm/unaligned.h> | |
47 | ||
48 | #include "bcmgenet.h" | |
49 | ||
50 | /* Maximum number of hardware queues, downsized if needed */ | |
51 | #define GENET_MAX_MQ_CNT 4 | |
52 | ||
53 | /* Default highest priority queue for multi queue support */ | |
54 | #define GENET_Q0_PRIORITY 0 | |
55 | ||
56 | #define GENET_DEFAULT_BD_CNT \ | |
57 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) | |
58 | ||
59 | #define RX_BUF_LENGTH 2048 | |
60 | #define SKB_ALIGNMENT 32 | |
61 | ||
62 | /* Tx/Rx DMA register offset, skip 256 descriptors */ | |
63 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) | |
64 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) | |
65 | ||
66 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ | |
67 | TOTAL_DESC * DMA_DESC_SIZE) | |
68 | ||
69 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ | |
70 | TOTAL_DESC * DMA_DESC_SIZE) | |
71 | ||
72 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 73 | void __iomem *d, u32 value) |
1c1008c7 FF |
74 | { |
75 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | |
76 | } | |
77 | ||
78 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 79 | void __iomem *d) |
1c1008c7 FF |
80 | { |
81 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | |
82 | } | |
83 | ||
84 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |
85 | void __iomem *d, | |
86 | dma_addr_t addr) | |
87 | { | |
88 | __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); | |
89 | ||
90 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
91 | * and are done for each packet, save these expensive writes unless | |
92 | * the platform is explicitely configured for 64-bits/LPAE. | |
93 | */ | |
94 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
95 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
96 | __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); | |
97 | #endif | |
98 | } | |
99 | ||
100 | /* Combined address + length/status setter */ | |
101 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | |
c91b7f66 | 102 | void __iomem *d, dma_addr_t addr, u32 val) |
1c1008c7 FF |
103 | { |
104 | dmadesc_set_length_status(priv, d, val); | |
105 | dmadesc_set_addr(priv, d, addr); | |
106 | } | |
107 | ||
108 | static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | |
109 | void __iomem *d) | |
110 | { | |
111 | dma_addr_t addr; | |
112 | ||
113 | addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); | |
114 | ||
115 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
116 | * and are done for each packet, save these expensive writes unless | |
117 | * the platform is explicitely configured for 64-bits/LPAE. | |
118 | */ | |
119 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
120 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
121 | addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; | |
122 | #endif | |
123 | return addr; | |
124 | } | |
125 | ||
126 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" | |
127 | ||
128 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
129 | NETIF_MSG_LINK) | |
130 | ||
131 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) | |
132 | { | |
133 | if (GENET_IS_V1(priv)) | |
134 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); | |
135 | else | |
136 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); | |
137 | } | |
138 | ||
139 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
140 | { | |
141 | if (GENET_IS_V1(priv)) | |
142 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); | |
143 | else | |
144 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); | |
145 | } | |
146 | ||
147 | /* These macros are defined to deal with register map change | |
148 | * between GENET1.1 and GENET2. Only those currently being used | |
149 | * by driver are defined. | |
150 | */ | |
151 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) | |
152 | { | |
153 | if (GENET_IS_V1(priv)) | |
154 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); | |
155 | else | |
156 | return __raw_readl(priv->base + | |
157 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
158 | } | |
159 | ||
160 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
161 | { | |
162 | if (GENET_IS_V1(priv)) | |
163 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); | |
164 | else | |
165 | __raw_writel(val, priv->base + | |
166 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
167 | } | |
168 | ||
169 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) | |
170 | { | |
171 | if (GENET_IS_V1(priv)) | |
172 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); | |
173 | else | |
174 | return __raw_readl(priv->base + | |
175 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
176 | } | |
177 | ||
178 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) | |
179 | { | |
180 | if (GENET_IS_V1(priv)) | |
181 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); | |
182 | else | |
183 | __raw_writel(val, priv->base + | |
184 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
185 | } | |
186 | ||
187 | /* RX/TX DMA register accessors */ | |
188 | enum dma_reg { | |
189 | DMA_RING_CFG = 0, | |
190 | DMA_CTRL, | |
191 | DMA_STATUS, | |
192 | DMA_SCB_BURST_SIZE, | |
193 | DMA_ARB_CTRL, | |
194 | DMA_PRIORITY, | |
195 | DMA_RING_PRIORITY, | |
196 | }; | |
197 | ||
198 | static const u8 bcmgenet_dma_regs_v3plus[] = { | |
199 | [DMA_RING_CFG] = 0x00, | |
200 | [DMA_CTRL] = 0x04, | |
201 | [DMA_STATUS] = 0x08, | |
202 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
203 | [DMA_ARB_CTRL] = 0x2C, | |
204 | [DMA_PRIORITY] = 0x30, | |
205 | [DMA_RING_PRIORITY] = 0x38, | |
206 | }; | |
207 | ||
208 | static const u8 bcmgenet_dma_regs_v2[] = { | |
209 | [DMA_RING_CFG] = 0x00, | |
210 | [DMA_CTRL] = 0x04, | |
211 | [DMA_STATUS] = 0x08, | |
212 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
213 | [DMA_ARB_CTRL] = 0x30, | |
214 | [DMA_PRIORITY] = 0x34, | |
215 | [DMA_RING_PRIORITY] = 0x3C, | |
216 | }; | |
217 | ||
218 | static const u8 bcmgenet_dma_regs_v1[] = { | |
219 | [DMA_CTRL] = 0x00, | |
220 | [DMA_STATUS] = 0x04, | |
221 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
222 | [DMA_ARB_CTRL] = 0x30, | |
223 | [DMA_PRIORITY] = 0x34, | |
224 | [DMA_RING_PRIORITY] = 0x3C, | |
225 | }; | |
226 | ||
227 | /* Set at runtime once bcmgenet version is known */ | |
228 | static const u8 *bcmgenet_dma_regs; | |
229 | ||
230 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | |
231 | { | |
232 | return netdev_priv(dev_get_drvdata(dev)); | |
233 | } | |
234 | ||
235 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 236 | enum dma_reg r) |
1c1008c7 FF |
237 | { |
238 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
239 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
240 | } | |
241 | ||
242 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | |
243 | u32 val, enum dma_reg r) | |
244 | { | |
245 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
246 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
247 | } | |
248 | ||
249 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 250 | enum dma_reg r) |
1c1008c7 FF |
251 | { |
252 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
253 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
254 | } | |
255 | ||
256 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, | |
257 | u32 val, enum dma_reg r) | |
258 | { | |
259 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
260 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
261 | } | |
262 | ||
263 | /* RDMA/TDMA ring registers and accessors | |
264 | * we merge the common fields and just prefix with T/D the registers | |
265 | * having different meaning depending on the direction | |
266 | */ | |
267 | enum dma_ring_reg { | |
268 | TDMA_READ_PTR = 0, | |
269 | RDMA_WRITE_PTR = TDMA_READ_PTR, | |
270 | TDMA_READ_PTR_HI, | |
271 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, | |
272 | TDMA_CONS_INDEX, | |
273 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, | |
274 | TDMA_PROD_INDEX, | |
275 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, | |
276 | DMA_RING_BUF_SIZE, | |
277 | DMA_START_ADDR, | |
278 | DMA_START_ADDR_HI, | |
279 | DMA_END_ADDR, | |
280 | DMA_END_ADDR_HI, | |
281 | DMA_MBUF_DONE_THRESH, | |
282 | TDMA_FLOW_PERIOD, | |
283 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, | |
284 | TDMA_WRITE_PTR, | |
285 | RDMA_READ_PTR = TDMA_WRITE_PTR, | |
286 | TDMA_WRITE_PTR_HI, | |
287 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI | |
288 | }; | |
289 | ||
290 | /* GENET v4 supports 40-bits pointer addressing | |
291 | * for obvious reasons the LO and HI word parts | |
292 | * are contiguous, but this offsets the other | |
293 | * registers. | |
294 | */ | |
295 | static const u8 genet_dma_ring_regs_v4[] = { | |
296 | [TDMA_READ_PTR] = 0x00, | |
297 | [TDMA_READ_PTR_HI] = 0x04, | |
298 | [TDMA_CONS_INDEX] = 0x08, | |
299 | [TDMA_PROD_INDEX] = 0x0C, | |
300 | [DMA_RING_BUF_SIZE] = 0x10, | |
301 | [DMA_START_ADDR] = 0x14, | |
302 | [DMA_START_ADDR_HI] = 0x18, | |
303 | [DMA_END_ADDR] = 0x1C, | |
304 | [DMA_END_ADDR_HI] = 0x20, | |
305 | [DMA_MBUF_DONE_THRESH] = 0x24, | |
306 | [TDMA_FLOW_PERIOD] = 0x28, | |
307 | [TDMA_WRITE_PTR] = 0x2C, | |
308 | [TDMA_WRITE_PTR_HI] = 0x30, | |
309 | }; | |
310 | ||
311 | static const u8 genet_dma_ring_regs_v123[] = { | |
312 | [TDMA_READ_PTR] = 0x00, | |
313 | [TDMA_CONS_INDEX] = 0x04, | |
314 | [TDMA_PROD_INDEX] = 0x08, | |
315 | [DMA_RING_BUF_SIZE] = 0x0C, | |
316 | [DMA_START_ADDR] = 0x10, | |
317 | [DMA_END_ADDR] = 0x14, | |
318 | [DMA_MBUF_DONE_THRESH] = 0x18, | |
319 | [TDMA_FLOW_PERIOD] = 0x1C, | |
320 | [TDMA_WRITE_PTR] = 0x20, | |
321 | }; | |
322 | ||
323 | /* Set at runtime once GENET version is known */ | |
324 | static const u8 *genet_dma_ring_regs; | |
325 | ||
326 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
327 | unsigned int ring, |
328 | enum dma_ring_reg r) | |
1c1008c7 FF |
329 | { |
330 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
331 | (DMA_RING_SIZE * ring) + | |
332 | genet_dma_ring_regs[r]); | |
333 | } | |
334 | ||
335 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
336 | unsigned int ring, u32 val, |
337 | enum dma_ring_reg r) | |
1c1008c7 FF |
338 | { |
339 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
340 | (DMA_RING_SIZE * ring) + | |
341 | genet_dma_ring_regs[r]); | |
342 | } | |
343 | ||
344 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
345 | unsigned int ring, |
346 | enum dma_ring_reg r) | |
1c1008c7 FF |
347 | { |
348 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
349 | (DMA_RING_SIZE * ring) + | |
350 | genet_dma_ring_regs[r]); | |
351 | } | |
352 | ||
353 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
354 | unsigned int ring, u32 val, |
355 | enum dma_ring_reg r) | |
1c1008c7 FF |
356 | { |
357 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
358 | (DMA_RING_SIZE * ring) + | |
359 | genet_dma_ring_regs[r]); | |
360 | } | |
361 | ||
362 | static int bcmgenet_get_settings(struct net_device *dev, | |
c91b7f66 | 363 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
364 | { |
365 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
366 | ||
367 | if (!netif_running(dev)) | |
368 | return -EINVAL; | |
369 | ||
370 | if (!priv->phydev) | |
371 | return -ENODEV; | |
372 | ||
373 | return phy_ethtool_gset(priv->phydev, cmd); | |
374 | } | |
375 | ||
376 | static int bcmgenet_set_settings(struct net_device *dev, | |
c91b7f66 | 377 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
378 | { |
379 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
380 | ||
381 | if (!netif_running(dev)) | |
382 | return -EINVAL; | |
383 | ||
384 | if (!priv->phydev) | |
385 | return -ENODEV; | |
386 | ||
387 | return phy_ethtool_sset(priv->phydev, cmd); | |
388 | } | |
389 | ||
390 | static int bcmgenet_set_rx_csum(struct net_device *dev, | |
391 | netdev_features_t wanted) | |
392 | { | |
393 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
394 | u32 rbuf_chk_ctrl; | |
395 | bool rx_csum_en; | |
396 | ||
397 | rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | |
398 | ||
399 | rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); | |
400 | ||
401 | /* enable rx checksumming */ | |
402 | if (rx_csum_en) | |
403 | rbuf_chk_ctrl |= RBUF_RXCHK_EN; | |
404 | else | |
405 | rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; | |
406 | priv->desc_rxchk_en = rx_csum_en; | |
ebe5e3c6 FF |
407 | |
408 | /* If UniMAC forwards CRC, we need to skip over it to get | |
409 | * a valid CHK bit to be set in the per-packet status word | |
410 | */ | |
411 | if (rx_csum_en && priv->crc_fwd_en) | |
412 | rbuf_chk_ctrl |= RBUF_SKIP_FCS; | |
413 | else | |
414 | rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; | |
415 | ||
1c1008c7 FF |
416 | bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); |
417 | ||
418 | return 0; | |
419 | } | |
420 | ||
421 | static int bcmgenet_set_tx_csum(struct net_device *dev, | |
422 | netdev_features_t wanted) | |
423 | { | |
424 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
425 | bool desc_64b_en; | |
426 | u32 tbuf_ctrl, rbuf_ctrl; | |
427 | ||
428 | tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); | |
429 | rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
430 | ||
431 | desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
432 | ||
433 | /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ | |
434 | if (desc_64b_en) { | |
435 | tbuf_ctrl |= RBUF_64B_EN; | |
436 | rbuf_ctrl |= RBUF_64B_EN; | |
437 | } else { | |
438 | tbuf_ctrl &= ~RBUF_64B_EN; | |
439 | rbuf_ctrl &= ~RBUF_64B_EN; | |
440 | } | |
441 | priv->desc_64b_en = desc_64b_en; | |
442 | ||
443 | bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); | |
444 | bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); | |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
449 | static int bcmgenet_set_features(struct net_device *dev, | |
c91b7f66 | 450 | netdev_features_t features) |
1c1008c7 FF |
451 | { |
452 | netdev_features_t changed = features ^ dev->features; | |
453 | netdev_features_t wanted = dev->wanted_features; | |
454 | int ret = 0; | |
455 | ||
456 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) | |
457 | ret = bcmgenet_set_tx_csum(dev, wanted); | |
458 | if (changed & (NETIF_F_RXCSUM)) | |
459 | ret = bcmgenet_set_rx_csum(dev, wanted); | |
460 | ||
461 | return ret; | |
462 | } | |
463 | ||
464 | static u32 bcmgenet_get_msglevel(struct net_device *dev) | |
465 | { | |
466 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
467 | ||
468 | return priv->msg_enable; | |
469 | } | |
470 | ||
471 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) | |
472 | { | |
473 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
474 | ||
475 | priv->msg_enable = level; | |
476 | } | |
477 | ||
478 | /* standard ethtool support functions. */ | |
479 | enum bcmgenet_stat_type { | |
480 | BCMGENET_STAT_NETDEV = -1, | |
481 | BCMGENET_STAT_MIB_RX, | |
482 | BCMGENET_STAT_MIB_TX, | |
483 | BCMGENET_STAT_RUNT, | |
484 | BCMGENET_STAT_MISC, | |
485 | }; | |
486 | ||
487 | struct bcmgenet_stats { | |
488 | char stat_string[ETH_GSTRING_LEN]; | |
489 | int stat_sizeof; | |
490 | int stat_offset; | |
491 | enum bcmgenet_stat_type type; | |
492 | /* reg offset from UMAC base for misc counters */ | |
493 | u16 reg_offset; | |
494 | }; | |
495 | ||
496 | #define STAT_NETDEV(m) { \ | |
497 | .stat_string = __stringify(m), \ | |
498 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ | |
499 | .stat_offset = offsetof(struct net_device_stats, m), \ | |
500 | .type = BCMGENET_STAT_NETDEV, \ | |
501 | } | |
502 | ||
503 | #define STAT_GENET_MIB(str, m, _type) { \ | |
504 | .stat_string = str, \ | |
505 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
506 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
507 | .type = _type, \ | |
508 | } | |
509 | ||
510 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | |
511 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | |
512 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | |
513 | ||
514 | #define STAT_GENET_MISC(str, m, offset) { \ | |
515 | .stat_string = str, \ | |
516 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
517 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
518 | .type = BCMGENET_STAT_MISC, \ | |
519 | .reg_offset = offset, \ | |
520 | } | |
521 | ||
522 | ||
523 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then | |
524 | * between the end of TX stats and the beginning of the RX RUNT | |
525 | */ | |
526 | #define BCMGENET_STAT_OFFSET 0xc | |
527 | ||
528 | /* Hardware counters must be kept in sync because the order/offset | |
529 | * is important here (order in structure declaration = order in hardware) | |
530 | */ | |
531 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |
532 | /* general stats */ | |
533 | STAT_NETDEV(rx_packets), | |
534 | STAT_NETDEV(tx_packets), | |
535 | STAT_NETDEV(rx_bytes), | |
536 | STAT_NETDEV(tx_bytes), | |
537 | STAT_NETDEV(rx_errors), | |
538 | STAT_NETDEV(tx_errors), | |
539 | STAT_NETDEV(rx_dropped), | |
540 | STAT_NETDEV(tx_dropped), | |
541 | STAT_NETDEV(multicast), | |
542 | /* UniMAC RSV counters */ | |
543 | STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), | |
544 | STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), | |
545 | STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), | |
546 | STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), | |
547 | STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), | |
548 | STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), | |
549 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), | |
550 | STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), | |
551 | STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), | |
552 | STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), | |
553 | STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), | |
554 | STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), | |
555 | STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), | |
556 | STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), | |
557 | STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), | |
558 | STAT_GENET_MIB_RX("rx_control", mib.rx.cf), | |
559 | STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), | |
560 | STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), | |
561 | STAT_GENET_MIB_RX("rx_align", mib.rx.aln), | |
562 | STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), | |
563 | STAT_GENET_MIB_RX("rx_code", mib.rx.cde), | |
564 | STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), | |
565 | STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), | |
566 | STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), | |
567 | STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), | |
568 | STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), | |
569 | STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), | |
570 | STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), | |
571 | STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), | |
572 | /* UniMAC TSV counters */ | |
573 | STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), | |
574 | STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), | |
575 | STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), | |
576 | STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), | |
577 | STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), | |
578 | STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), | |
579 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), | |
580 | STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), | |
581 | STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), | |
582 | STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), | |
583 | STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), | |
584 | STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), | |
585 | STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), | |
586 | STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), | |
587 | STAT_GENET_MIB_TX("tx_control", mib.tx.cf), | |
588 | STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), | |
589 | STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), | |
590 | STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), | |
591 | STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), | |
592 | STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), | |
593 | STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), | |
594 | STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), | |
595 | STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), | |
596 | STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), | |
597 | STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), | |
598 | STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), | |
599 | STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), | |
600 | STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), | |
601 | STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), | |
602 | /* UniMAC RUNT counters */ | |
603 | STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), | |
604 | STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), | |
605 | STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), | |
606 | STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), | |
607 | /* Misc UniMAC counters */ | |
608 | STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, | |
609 | UMAC_RBUF_OVFL_CNT), | |
610 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | |
611 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | |
612 | }; | |
613 | ||
614 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | |
615 | ||
616 | static void bcmgenet_get_drvinfo(struct net_device *dev, | |
c91b7f66 | 617 | struct ethtool_drvinfo *info) |
1c1008c7 FF |
618 | { |
619 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | |
620 | strlcpy(info->version, "v2.0", sizeof(info->version)); | |
621 | info->n_stats = BCMGENET_STATS_LEN; | |
1c1008c7 FF |
622 | } |
623 | ||
624 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | |
625 | { | |
626 | switch (string_set) { | |
627 | case ETH_SS_STATS: | |
628 | return BCMGENET_STATS_LEN; | |
629 | default: | |
630 | return -EOPNOTSUPP; | |
631 | } | |
632 | } | |
633 | ||
c91b7f66 FF |
634 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
635 | u8 *data) | |
1c1008c7 FF |
636 | { |
637 | int i; | |
638 | ||
639 | switch (stringset) { | |
640 | case ETH_SS_STATS: | |
641 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
642 | memcpy(data + i * ETH_GSTRING_LEN, | |
c91b7f66 FF |
643 | bcmgenet_gstrings_stats[i].stat_string, |
644 | ETH_GSTRING_LEN); | |
1c1008c7 FF |
645 | } |
646 | break; | |
647 | } | |
648 | } | |
649 | ||
650 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |
651 | { | |
652 | int i, j = 0; | |
653 | ||
654 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
655 | const struct bcmgenet_stats *s; | |
656 | u8 offset = 0; | |
657 | u32 val = 0; | |
658 | char *p; | |
659 | ||
660 | s = &bcmgenet_gstrings_stats[i]; | |
661 | switch (s->type) { | |
662 | case BCMGENET_STAT_NETDEV: | |
663 | continue; | |
664 | case BCMGENET_STAT_MIB_RX: | |
665 | case BCMGENET_STAT_MIB_TX: | |
666 | case BCMGENET_STAT_RUNT: | |
667 | if (s->type != BCMGENET_STAT_MIB_RX) | |
668 | offset = BCMGENET_STAT_OFFSET; | |
c91b7f66 FF |
669 | val = bcmgenet_umac_readl(priv, |
670 | UMAC_MIB_START + j + offset); | |
1c1008c7 FF |
671 | break; |
672 | case BCMGENET_STAT_MISC: | |
673 | val = bcmgenet_umac_readl(priv, s->reg_offset); | |
674 | /* clear if overflowed */ | |
675 | if (val == ~0) | |
676 | bcmgenet_umac_writel(priv, 0, s->reg_offset); | |
677 | break; | |
678 | } | |
679 | ||
680 | j += s->stat_sizeof; | |
681 | p = (char *)priv + s->stat_offset; | |
682 | *(u32 *)p = val; | |
683 | } | |
684 | } | |
685 | ||
686 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | |
c91b7f66 FF |
687 | struct ethtool_stats *stats, |
688 | u64 *data) | |
1c1008c7 FF |
689 | { |
690 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
691 | int i; | |
692 | ||
693 | if (netif_running(dev)) | |
694 | bcmgenet_update_mib_counters(priv); | |
695 | ||
696 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
697 | const struct bcmgenet_stats *s; | |
698 | char *p; | |
699 | ||
700 | s = &bcmgenet_gstrings_stats[i]; | |
701 | if (s->type == BCMGENET_STAT_NETDEV) | |
702 | p = (char *)&dev->stats; | |
703 | else | |
704 | p = (char *)priv; | |
705 | p += s->stat_offset; | |
706 | data[i] = *(u32 *)p; | |
707 | } | |
708 | } | |
709 | ||
710 | /* standard ethtool support functions. */ | |
711 | static struct ethtool_ops bcmgenet_ethtool_ops = { | |
712 | .get_strings = bcmgenet_get_strings, | |
713 | .get_sset_count = bcmgenet_get_sset_count, | |
714 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, | |
715 | .get_settings = bcmgenet_get_settings, | |
716 | .set_settings = bcmgenet_set_settings, | |
717 | .get_drvinfo = bcmgenet_get_drvinfo, | |
718 | .get_link = ethtool_op_get_link, | |
719 | .get_msglevel = bcmgenet_get_msglevel, | |
720 | .set_msglevel = bcmgenet_set_msglevel, | |
06ba8375 FF |
721 | .get_wol = bcmgenet_get_wol, |
722 | .set_wol = bcmgenet_set_wol, | |
1c1008c7 FF |
723 | }; |
724 | ||
725 | /* Power down the unimac, based on mode. */ | |
726 | static void bcmgenet_power_down(struct bcmgenet_priv *priv, | |
727 | enum bcmgenet_power_mode mode) | |
728 | { | |
729 | u32 reg; | |
730 | ||
731 | switch (mode) { | |
732 | case GENET_POWER_CABLE_SENSE: | |
80d8e96d | 733 | phy_detach(priv->phydev); |
1c1008c7 FF |
734 | break; |
735 | ||
c3ae64ae FF |
736 | case GENET_POWER_WOL_MAGIC: |
737 | bcmgenet_wol_power_down_cfg(priv, mode); | |
738 | break; | |
739 | ||
1c1008c7 FF |
740 | case GENET_POWER_PASSIVE: |
741 | /* Power down LED */ | |
742 | bcmgenet_mii_reset(priv->dev); | |
743 | if (priv->hw_params->flags & GENET_HAS_EXT) { | |
744 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
745 | reg |= (EXT_PWR_DOWN_PHY | | |
746 | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); | |
747 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
748 | } | |
749 | break; | |
750 | default: | |
751 | break; | |
752 | } | |
753 | } | |
754 | ||
755 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | |
c91b7f66 | 756 | enum bcmgenet_power_mode mode) |
1c1008c7 FF |
757 | { |
758 | u32 reg; | |
759 | ||
760 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) | |
761 | return; | |
762 | ||
763 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
764 | ||
765 | switch (mode) { | |
766 | case GENET_POWER_PASSIVE: | |
767 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | | |
768 | EXT_PWR_DOWN_BIAS); | |
769 | /* fallthrough */ | |
770 | case GENET_POWER_CABLE_SENSE: | |
771 | /* enable APD */ | |
772 | reg |= EXT_PWR_DN_EN_LD; | |
773 | break; | |
c3ae64ae FF |
774 | case GENET_POWER_WOL_MAGIC: |
775 | bcmgenet_wol_power_up_cfg(priv, mode); | |
776 | return; | |
1c1008c7 FF |
777 | default: |
778 | break; | |
779 | } | |
780 | ||
781 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
782 | bcmgenet_mii_reset(priv->dev); | |
783 | } | |
784 | ||
785 | /* ioctl handle special commands that are not present in ethtool. */ | |
786 | static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
787 | { | |
788 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
789 | int val = 0; | |
790 | ||
791 | if (!netif_running(dev)) | |
792 | return -EINVAL; | |
793 | ||
794 | switch (cmd) { | |
795 | case SIOCGMIIPHY: | |
796 | case SIOCGMIIREG: | |
797 | case SIOCSMIIREG: | |
798 | if (!priv->phydev) | |
799 | val = -ENODEV; | |
800 | else | |
801 | val = phy_mii_ioctl(priv->phydev, rq, cmd); | |
802 | break; | |
803 | ||
804 | default: | |
805 | val = -EINVAL; | |
806 | break; | |
807 | } | |
808 | ||
809 | return val; | |
810 | } | |
811 | ||
812 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | |
813 | struct bcmgenet_tx_ring *ring) | |
814 | { | |
815 | struct enet_cb *tx_cb_ptr; | |
816 | ||
817 | tx_cb_ptr = ring->cbs; | |
818 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | |
819 | tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; | |
820 | /* Advancing local write pointer */ | |
821 | if (ring->write_ptr == ring->end_ptr) | |
822 | ring->write_ptr = ring->cb_ptr; | |
823 | else | |
824 | ring->write_ptr++; | |
825 | ||
826 | return tx_cb_ptr; | |
827 | } | |
828 | ||
829 | /* Simple helper to free a control block's resources */ | |
830 | static void bcmgenet_free_cb(struct enet_cb *cb) | |
831 | { | |
832 | dev_kfree_skb_any(cb->skb); | |
833 | cb->skb = NULL; | |
834 | dma_unmap_addr_set(cb, dma_addr, 0); | |
835 | } | |
836 | ||
837 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, | |
838 | struct bcmgenet_tx_ring *ring) | |
839 | { | |
840 | bcmgenet_intrl2_0_writel(priv, | |
c91b7f66 FF |
841 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, |
842 | INTRL2_CPU_MASK_SET); | |
1c1008c7 FF |
843 | } |
844 | ||
845 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, | |
846 | struct bcmgenet_tx_ring *ring) | |
847 | { | |
848 | bcmgenet_intrl2_0_writel(priv, | |
c91b7f66 FF |
849 | UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, |
850 | INTRL2_CPU_MASK_CLEAR); | |
1c1008c7 FF |
851 | } |
852 | ||
853 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, | |
c91b7f66 | 854 | struct bcmgenet_tx_ring *ring) |
1c1008c7 | 855 | { |
c91b7f66 FF |
856 | bcmgenet_intrl2_1_writel(priv, (1 << ring->index), |
857 | INTRL2_CPU_MASK_CLEAR); | |
1c1008c7 FF |
858 | priv->int1_mask &= ~(1 << ring->index); |
859 | } | |
860 | ||
861 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, | |
862 | struct bcmgenet_tx_ring *ring) | |
863 | { | |
c91b7f66 FF |
864 | bcmgenet_intrl2_1_writel(priv, (1 << ring->index), |
865 | INTRL2_CPU_MASK_SET); | |
1c1008c7 FF |
866 | priv->int1_mask |= (1 << ring->index); |
867 | } | |
868 | ||
869 | /* Unlocked version of the reclaim routine */ | |
870 | static void __bcmgenet_tx_reclaim(struct net_device *dev, | |
c91b7f66 | 871 | struct bcmgenet_tx_ring *ring) |
1c1008c7 FF |
872 | { |
873 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
874 | int last_tx_cn, last_c_index, num_tx_bds; | |
875 | struct enet_cb *tx_cb_ptr; | |
b2cde2cc | 876 | struct netdev_queue *txq; |
1c1008c7 FF |
877 | unsigned int c_index; |
878 | ||
879 | /* Compute how many buffers are transmited since last xmit call */ | |
880 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); | |
b2cde2cc | 881 | txq = netdev_get_tx_queue(dev, ring->queue); |
1c1008c7 FF |
882 | |
883 | last_c_index = ring->c_index; | |
884 | num_tx_bds = ring->size; | |
885 | ||
886 | c_index &= (num_tx_bds - 1); | |
887 | ||
888 | if (c_index >= last_c_index) | |
889 | last_tx_cn = c_index - last_c_index; | |
890 | else | |
891 | last_tx_cn = num_tx_bds - last_c_index + c_index; | |
892 | ||
893 | netif_dbg(priv, tx_done, dev, | |
c91b7f66 FF |
894 | "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", |
895 | __func__, ring->index, | |
896 | c_index, last_tx_cn, last_c_index); | |
1c1008c7 FF |
897 | |
898 | /* Reclaim transmitted buffers */ | |
899 | while (last_tx_cn-- > 0) { | |
900 | tx_cb_ptr = ring->cbs + last_c_index; | |
901 | if (tx_cb_ptr->skb) { | |
902 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; | |
903 | dma_unmap_single(&dev->dev, | |
c91b7f66 FF |
904 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
905 | tx_cb_ptr->skb->len, | |
906 | DMA_TO_DEVICE); | |
1c1008c7 FF |
907 | bcmgenet_free_cb(tx_cb_ptr); |
908 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | |
909 | dev->stats.tx_bytes += | |
910 | dma_unmap_len(tx_cb_ptr, dma_len); | |
911 | dma_unmap_page(&dev->dev, | |
c91b7f66 FF |
912 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
913 | dma_unmap_len(tx_cb_ptr, dma_len), | |
914 | DMA_TO_DEVICE); | |
1c1008c7 FF |
915 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
916 | } | |
917 | dev->stats.tx_packets++; | |
918 | ring->free_bds += 1; | |
919 | ||
920 | last_c_index++; | |
921 | last_c_index &= (num_tx_bds - 1); | |
922 | } | |
923 | ||
924 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) | |
925 | ring->int_disable(priv, ring); | |
926 | ||
b2cde2cc FF |
927 | if (netif_tx_queue_stopped(txq)) |
928 | netif_tx_wake_queue(txq); | |
1c1008c7 FF |
929 | |
930 | ring->c_index = c_index; | |
931 | } | |
932 | ||
933 | static void bcmgenet_tx_reclaim(struct net_device *dev, | |
c91b7f66 | 934 | struct bcmgenet_tx_ring *ring) |
1c1008c7 FF |
935 | { |
936 | unsigned long flags; | |
937 | ||
938 | spin_lock_irqsave(&ring->lock, flags); | |
939 | __bcmgenet_tx_reclaim(dev, ring); | |
940 | spin_unlock_irqrestore(&ring->lock, flags); | |
941 | } | |
942 | ||
943 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | |
944 | { | |
945 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
946 | int i; | |
947 | ||
948 | if (netif_is_multiqueue(dev)) { | |
949 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
950 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); | |
951 | } | |
952 | ||
953 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); | |
954 | } | |
955 | ||
956 | /* Transmits a single SKB (either head of a fragment or a single SKB) | |
957 | * caller must hold priv->lock | |
958 | */ | |
959 | static int bcmgenet_xmit_single(struct net_device *dev, | |
960 | struct sk_buff *skb, | |
961 | u16 dma_desc_flags, | |
962 | struct bcmgenet_tx_ring *ring) | |
963 | { | |
964 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
965 | struct device *kdev = &priv->pdev->dev; | |
966 | struct enet_cb *tx_cb_ptr; | |
967 | unsigned int skb_len; | |
968 | dma_addr_t mapping; | |
969 | u32 length_status; | |
970 | int ret; | |
971 | ||
972 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
973 | ||
974 | if (unlikely(!tx_cb_ptr)) | |
975 | BUG(); | |
976 | ||
977 | tx_cb_ptr->skb = skb; | |
978 | ||
979 | skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); | |
980 | ||
981 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | |
982 | ret = dma_mapping_error(kdev, mapping); | |
983 | if (ret) { | |
984 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); | |
985 | dev_kfree_skb(skb); | |
986 | return ret; | |
987 | } | |
988 | ||
989 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
990 | dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); | |
991 | length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | | |
992 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | | |
993 | DMA_TX_APPEND_CRC; | |
994 | ||
995 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
996 | length_status |= DMA_TX_DO_CSUM; | |
997 | ||
998 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); | |
999 | ||
1000 | /* Decrement total BD count and advance our write pointer */ | |
1001 | ring->free_bds -= 1; | |
1002 | ring->prod_index += 1; | |
1003 | ring->prod_index &= DMA_P_INDEX_MASK; | |
1004 | ||
1005 | return 0; | |
1006 | } | |
1007 | ||
1008 | /* Transmit a SKB fragement */ | |
1009 | static int bcmgenet_xmit_frag(struct net_device *dev, | |
c91b7f66 FF |
1010 | skb_frag_t *frag, |
1011 | u16 dma_desc_flags, | |
1012 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1013 | { |
1014 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1015 | struct device *kdev = &priv->pdev->dev; | |
1016 | struct enet_cb *tx_cb_ptr; | |
1017 | dma_addr_t mapping; | |
1018 | int ret; | |
1019 | ||
1020 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1021 | ||
1022 | if (unlikely(!tx_cb_ptr)) | |
1023 | BUG(); | |
1024 | tx_cb_ptr->skb = NULL; | |
1025 | ||
1026 | mapping = skb_frag_dma_map(kdev, frag, 0, | |
c91b7f66 | 1027 | skb_frag_size(frag), DMA_TO_DEVICE); |
1c1008c7 FF |
1028 | ret = dma_mapping_error(kdev, mapping); |
1029 | if (ret) { | |
1030 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", | |
c91b7f66 | 1031 | __func__); |
1c1008c7 FF |
1032 | return ret; |
1033 | } | |
1034 | ||
1035 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
1036 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); | |
1037 | ||
1038 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | |
c91b7f66 FF |
1039 | (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
1040 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); | |
1c1008c7 FF |
1041 | |
1042 | ||
1043 | ring->free_bds -= 1; | |
1044 | ring->prod_index += 1; | |
1045 | ring->prod_index &= DMA_P_INDEX_MASK; | |
1046 | ||
1047 | return 0; | |
1048 | } | |
1049 | ||
1050 | /* Reallocate the SKB to put enough headroom in front of it and insert | |
1051 | * the transmit checksum offsets in the descriptors | |
1052 | */ | |
1053 | static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) | |
1054 | { | |
1055 | struct status_64 *status = NULL; | |
1056 | struct sk_buff *new_skb; | |
1057 | u16 offset; | |
1058 | u8 ip_proto; | |
1059 | u16 ip_ver; | |
1060 | u32 tx_csum_info; | |
1061 | ||
1062 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { | |
1063 | /* If 64 byte status block enabled, must make sure skb has | |
1064 | * enough headroom for us to insert 64B status block. | |
1065 | */ | |
1066 | new_skb = skb_realloc_headroom(skb, sizeof(*status)); | |
1067 | dev_kfree_skb(skb); | |
1068 | if (!new_skb) { | |
1069 | dev->stats.tx_errors++; | |
1070 | dev->stats.tx_dropped++; | |
1071 | return -ENOMEM; | |
1072 | } | |
1073 | skb = new_skb; | |
1074 | } | |
1075 | ||
1076 | skb_push(skb, sizeof(*status)); | |
1077 | status = (struct status_64 *)skb->data; | |
1078 | ||
1079 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1080 | ip_ver = htons(skb->protocol); | |
1081 | switch (ip_ver) { | |
1082 | case ETH_P_IP: | |
1083 | ip_proto = ip_hdr(skb)->protocol; | |
1084 | break; | |
1085 | case ETH_P_IPV6: | |
1086 | ip_proto = ipv6_hdr(skb)->nexthdr; | |
1087 | break; | |
1088 | default: | |
1089 | return 0; | |
1090 | } | |
1091 | ||
1092 | offset = skb_checksum_start_offset(skb) - sizeof(*status); | |
1093 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | | |
1094 | (offset + skb->csum_offset); | |
1095 | ||
1096 | /* Set the length valid bit for TCP and UDP and just set | |
1097 | * the special UDP flag for IPv4, else just set to 0. | |
1098 | */ | |
1099 | if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { | |
1100 | tx_csum_info |= STATUS_TX_CSUM_LV; | |
1101 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | |
1102 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | |
1103 | } else | |
1104 | tx_csum_info = 0; | |
1105 | ||
1106 | status->tx_csum_info = tx_csum_info; | |
1107 | } | |
1108 | ||
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |
1113 | { | |
1114 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1115 | struct bcmgenet_tx_ring *ring = NULL; | |
b2cde2cc | 1116 | struct netdev_queue *txq; |
1c1008c7 FF |
1117 | unsigned long flags = 0; |
1118 | int nr_frags, index; | |
1119 | u16 dma_desc_flags; | |
1120 | int ret; | |
1121 | int i; | |
1122 | ||
1123 | index = skb_get_queue_mapping(skb); | |
1124 | /* Mapping strategy: | |
1125 | * queue_mapping = 0, unclassified, packet xmited through ring16 | |
1126 | * queue_mapping = 1, goes to ring 0. (highest priority queue | |
1127 | * queue_mapping = 2, goes to ring 1. | |
1128 | * queue_mapping = 3, goes to ring 2. | |
1129 | * queue_mapping = 4, goes to ring 3. | |
1130 | */ | |
1131 | if (index == 0) | |
1132 | index = DESC_INDEX; | |
1133 | else | |
1134 | index -= 1; | |
1135 | ||
1c1008c7 FF |
1136 | nr_frags = skb_shinfo(skb)->nr_frags; |
1137 | ring = &priv->tx_rings[index]; | |
b2cde2cc | 1138 | txq = netdev_get_tx_queue(dev, ring->queue); |
1c1008c7 FF |
1139 | |
1140 | spin_lock_irqsave(&ring->lock, flags); | |
1141 | if (ring->free_bds <= nr_frags + 1) { | |
b2cde2cc | 1142 | netif_tx_stop_queue(txq); |
1c1008c7 | 1143 | netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", |
c91b7f66 | 1144 | __func__, index, ring->queue); |
1c1008c7 FF |
1145 | ret = NETDEV_TX_BUSY; |
1146 | goto out; | |
1147 | } | |
1148 | ||
1c1008c7 FF |
1149 | /* set the SKB transmit checksum */ |
1150 | if (priv->desc_64b_en) { | |
1151 | ret = bcmgenet_put_tx_csum(dev, skb); | |
1152 | if (ret) { | |
1153 | ret = NETDEV_TX_OK; | |
1154 | goto out; | |
1155 | } | |
1156 | } | |
1157 | ||
1158 | dma_desc_flags = DMA_SOP; | |
1159 | if (nr_frags == 0) | |
1160 | dma_desc_flags |= DMA_EOP; | |
1161 | ||
1162 | /* Transmit single SKB or head of fragment list */ | |
1163 | ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); | |
1164 | if (ret) { | |
1165 | ret = NETDEV_TX_OK; | |
1166 | goto out; | |
1167 | } | |
1168 | ||
1169 | /* xmit fragment */ | |
1170 | for (i = 0; i < nr_frags; i++) { | |
1171 | ret = bcmgenet_xmit_frag(dev, | |
c91b7f66 FF |
1172 | &skb_shinfo(skb)->frags[i], |
1173 | (i == nr_frags - 1) ? DMA_EOP : 0, | |
1174 | ring); | |
1c1008c7 FF |
1175 | if (ret) { |
1176 | ret = NETDEV_TX_OK; | |
1177 | goto out; | |
1178 | } | |
1179 | } | |
1180 | ||
d03825fb FF |
1181 | skb_tx_timestamp(skb); |
1182 | ||
1c1008c7 FF |
1183 | /* we kept a software copy of how much we should advance the TDMA |
1184 | * producer index, now write it down to the hardware | |
1185 | */ | |
1186 | bcmgenet_tdma_ring_writel(priv, ring->index, | |
c91b7f66 | 1187 | ring->prod_index, TDMA_PROD_INDEX); |
1c1008c7 FF |
1188 | |
1189 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { | |
b2cde2cc | 1190 | netif_tx_stop_queue(txq); |
1c1008c7 FF |
1191 | ring->int_enable(priv, ring); |
1192 | } | |
1193 | ||
1194 | out: | |
1195 | spin_unlock_irqrestore(&ring->lock, flags); | |
1196 | ||
1197 | return ret; | |
1198 | } | |
1199 | ||
1200 | ||
c91b7f66 | 1201 | static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb) |
1c1008c7 FF |
1202 | { |
1203 | struct device *kdev = &priv->pdev->dev; | |
1204 | struct sk_buff *skb; | |
1205 | dma_addr_t mapping; | |
1206 | int ret; | |
1207 | ||
c91b7f66 | 1208 | skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); |
1c1008c7 FF |
1209 | if (!skb) |
1210 | return -ENOMEM; | |
1211 | ||
1212 | /* a caller did not release this control block */ | |
1213 | WARN_ON(cb->skb != NULL); | |
1214 | cb->skb = skb; | |
1215 | mapping = dma_map_single(kdev, skb->data, | |
c91b7f66 | 1216 | priv->rx_buf_len, DMA_FROM_DEVICE); |
1c1008c7 FF |
1217 | ret = dma_mapping_error(kdev, mapping); |
1218 | if (ret) { | |
1219 | bcmgenet_free_cb(cb); | |
1220 | netif_err(priv, rx_err, priv->dev, | |
c91b7f66 | 1221 | "%s DMA map failed\n", __func__); |
1c1008c7 FF |
1222 | return ret; |
1223 | } | |
1224 | ||
1225 | dma_unmap_addr_set(cb, dma_addr, mapping); | |
1226 | /* assign packet, prepare descriptor, and advance pointer */ | |
1227 | ||
1228 | dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); | |
1229 | ||
1230 | /* turn on the newly assigned BD for DMA to use */ | |
1231 | priv->rx_bd_assign_index++; | |
1232 | priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); | |
1233 | ||
1234 | priv->rx_bd_assign_ptr = priv->rx_bds + | |
1235 | (priv->rx_bd_assign_index * DMA_DESC_SIZE); | |
1236 | ||
1237 | return 0; | |
1238 | } | |
1239 | ||
1240 | /* bcmgenet_desc_rx - descriptor based rx process. | |
1241 | * this could be called from bottom half, or from NAPI polling method. | |
1242 | */ | |
1243 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, | |
1244 | unsigned int budget) | |
1245 | { | |
1246 | struct net_device *dev = priv->dev; | |
1247 | struct enet_cb *cb; | |
1248 | struct sk_buff *skb; | |
1249 | u32 dma_length_status; | |
1250 | unsigned long dma_flag; | |
1251 | int len, err; | |
1252 | unsigned int rxpktprocessed = 0, rxpkttoprocess; | |
1253 | unsigned int p_index; | |
1254 | unsigned int chksum_ok = 0; | |
1255 | ||
c91b7f66 | 1256 | p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX); |
1c1008c7 FF |
1257 | p_index &= DMA_P_INDEX_MASK; |
1258 | ||
1259 | if (p_index < priv->rx_c_index) | |
1260 | rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - | |
1261 | priv->rx_c_index + p_index; | |
1262 | else | |
1263 | rxpkttoprocess = p_index - priv->rx_c_index; | |
1264 | ||
1265 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1266 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); |
1c1008c7 FF |
1267 | |
1268 | while ((rxpktprocessed < rxpkttoprocess) && | |
c91b7f66 | 1269 | (rxpktprocessed < budget)) { |
1c1008c7 FF |
1270 | /* Unmap the packet contents such that we can use the |
1271 | * RSV from the 64 bytes descriptor when enabled and save | |
1272 | * a 32-bits register read | |
1273 | */ | |
1274 | cb = &priv->rx_cbs[priv->rx_read_ptr]; | |
1275 | skb = cb->skb; | |
1276 | dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), | |
c91b7f66 | 1277 | priv->rx_buf_len, DMA_FROM_DEVICE); |
1c1008c7 FF |
1278 | |
1279 | if (!priv->desc_64b_en) { | |
c91b7f66 FF |
1280 | dma_length_status = |
1281 | dmadesc_get_length_status(priv, | |
1282 | priv->rx_bds + | |
1283 | (priv->rx_read_ptr * | |
1284 | DMA_DESC_SIZE)); | |
1c1008c7 FF |
1285 | } else { |
1286 | struct status_64 *status; | |
164d4f20 | 1287 | |
1c1008c7 FF |
1288 | status = (struct status_64 *)skb->data; |
1289 | dma_length_status = status->length_status; | |
1290 | } | |
1291 | ||
1292 | /* DMA flags and length are still valid no matter how | |
1293 | * we got the Receive Status Vector (64B RSB or register) | |
1294 | */ | |
1295 | dma_flag = dma_length_status & 0xffff; | |
1296 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | |
1297 | ||
1298 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 FF |
1299 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", |
1300 | __func__, p_index, priv->rx_c_index, | |
1301 | priv->rx_read_ptr, dma_length_status); | |
1c1008c7 FF |
1302 | |
1303 | rxpktprocessed++; | |
1304 | ||
1305 | priv->rx_read_ptr++; | |
1306 | priv->rx_read_ptr &= (priv->num_rx_bds - 1); | |
1307 | ||
1308 | /* out of memory, just drop packets at the hardware level */ | |
1309 | if (unlikely(!skb)) { | |
1310 | dev->stats.rx_dropped++; | |
1311 | dev->stats.rx_errors++; | |
1312 | goto refill; | |
1313 | } | |
1314 | ||
1315 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { | |
1316 | netif_err(priv, rx_status, dev, | |
c91b7f66 | 1317 | "dropping fragmented packet!\n"); |
1c1008c7 FF |
1318 | dev->stats.rx_dropped++; |
1319 | dev->stats.rx_errors++; | |
1320 | dev_kfree_skb_any(cb->skb); | |
1321 | cb->skb = NULL; | |
1322 | goto refill; | |
1323 | } | |
1324 | /* report errors */ | |
1325 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | | |
1326 | DMA_RX_OV | | |
1327 | DMA_RX_NO | | |
1328 | DMA_RX_LG | | |
1329 | DMA_RX_RXER))) { | |
1330 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | |
c91b7f66 | 1331 | (unsigned int)dma_flag); |
1c1008c7 FF |
1332 | if (dma_flag & DMA_RX_CRC_ERROR) |
1333 | dev->stats.rx_crc_errors++; | |
1334 | if (dma_flag & DMA_RX_OV) | |
1335 | dev->stats.rx_over_errors++; | |
1336 | if (dma_flag & DMA_RX_NO) | |
1337 | dev->stats.rx_frame_errors++; | |
1338 | if (dma_flag & DMA_RX_LG) | |
1339 | dev->stats.rx_length_errors++; | |
1340 | dev->stats.rx_dropped++; | |
1341 | dev->stats.rx_errors++; | |
1342 | ||
1343 | /* discard the packet and advance consumer index.*/ | |
1344 | dev_kfree_skb_any(cb->skb); | |
1345 | cb->skb = NULL; | |
1346 | goto refill; | |
1347 | } /* error packet */ | |
1348 | ||
1349 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | |
c91b7f66 | 1350 | priv->desc_rxchk_en; |
1c1008c7 FF |
1351 | |
1352 | skb_put(skb, len); | |
1353 | if (priv->desc_64b_en) { | |
1354 | skb_pull(skb, 64); | |
1355 | len -= 64; | |
1356 | } | |
1357 | ||
1358 | if (likely(chksum_ok)) | |
1359 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1360 | ||
1361 | /* remove hardware 2bytes added for IP alignment */ | |
1362 | skb_pull(skb, 2); | |
1363 | len -= 2; | |
1364 | ||
1365 | if (priv->crc_fwd_en) { | |
1366 | skb_trim(skb, len - ETH_FCS_LEN); | |
1367 | len -= ETH_FCS_LEN; | |
1368 | } | |
1369 | ||
1370 | /*Finish setting up the received SKB and send it to the kernel*/ | |
1371 | skb->protocol = eth_type_trans(skb, priv->dev); | |
1372 | dev->stats.rx_packets++; | |
1373 | dev->stats.rx_bytes += len; | |
1374 | if (dma_flag & DMA_RX_MULT) | |
1375 | dev->stats.multicast++; | |
1376 | ||
1377 | /* Notify kernel */ | |
1378 | napi_gro_receive(&priv->napi, skb); | |
1379 | cb->skb = NULL; | |
1380 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); | |
1381 | ||
1382 | /* refill RX path on the current control block */ | |
1383 | refill: | |
1384 | err = bcmgenet_rx_refill(priv, cb); | |
1385 | if (err) | |
1386 | netif_err(priv, rx_err, dev, "Rx refill failed\n"); | |
1387 | } | |
1388 | ||
1389 | return rxpktprocessed; | |
1390 | } | |
1391 | ||
1392 | /* Assign skb to RX DMA descriptor. */ | |
1393 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) | |
1394 | { | |
1395 | struct enet_cb *cb; | |
1396 | int ret = 0; | |
1397 | int i; | |
1398 | ||
1399 | netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); | |
1400 | ||
1401 | /* loop here for each buffer needing assign */ | |
1402 | for (i = 0; i < priv->num_rx_bds; i++) { | |
1403 | cb = &priv->rx_cbs[priv->rx_bd_assign_index]; | |
1404 | if (cb->skb) | |
1405 | continue; | |
1406 | ||
1c1008c7 FF |
1407 | ret = bcmgenet_rx_refill(priv, cb); |
1408 | if (ret) | |
1409 | break; | |
1c1008c7 FF |
1410 | } |
1411 | ||
1412 | return ret; | |
1413 | } | |
1414 | ||
1415 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |
1416 | { | |
1417 | struct enet_cb *cb; | |
1418 | int i; | |
1419 | ||
1420 | for (i = 0; i < priv->num_rx_bds; i++) { | |
1421 | cb = &priv->rx_cbs[i]; | |
1422 | ||
1423 | if (dma_unmap_addr(cb, dma_addr)) { | |
1424 | dma_unmap_single(&priv->dev->dev, | |
c91b7f66 FF |
1425 | dma_unmap_addr(cb, dma_addr), |
1426 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1c1008c7 FF |
1427 | dma_unmap_addr_set(cb, dma_addr, 0); |
1428 | } | |
1429 | ||
1430 | if (cb->skb) | |
1431 | bcmgenet_free_cb(cb); | |
1432 | } | |
1433 | } | |
1434 | ||
c91b7f66 | 1435 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) |
e29585b8 FF |
1436 | { |
1437 | u32 reg; | |
1438 | ||
1439 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1440 | if (enable) | |
1441 | reg |= mask; | |
1442 | else | |
1443 | reg &= ~mask; | |
1444 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
1445 | ||
1446 | /* UniMAC stops on a packet boundary, wait for a full-size packet | |
1447 | * to be processed | |
1448 | */ | |
1449 | if (enable == 0) | |
1450 | usleep_range(1000, 2000); | |
1451 | } | |
1452 | ||
1c1008c7 FF |
1453 | static int reset_umac(struct bcmgenet_priv *priv) |
1454 | { | |
1455 | struct device *kdev = &priv->pdev->dev; | |
1456 | unsigned int timeout = 0; | |
1457 | u32 reg; | |
1458 | ||
1459 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ | |
1460 | bcmgenet_rbuf_ctrl_set(priv, 0); | |
1461 | udelay(10); | |
1462 | ||
1463 | /* disable MAC while updating its registers */ | |
1464 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1465 | ||
1466 | /* issue soft reset, wait for it to complete */ | |
1467 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); | |
1468 | while (timeout++ < 1000) { | |
1469 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1470 | if (!(reg & CMD_SW_RESET)) | |
1471 | return 0; | |
1472 | ||
1473 | udelay(1); | |
1474 | } | |
1475 | ||
1476 | if (timeout == 1000) { | |
1477 | dev_err(kdev, | |
1478 | "timeout waiting for MAC to come out of resetn\n"); | |
1479 | return -ETIMEDOUT; | |
1480 | } | |
1481 | ||
1482 | return 0; | |
1483 | } | |
1484 | ||
909ff5ef FF |
1485 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) |
1486 | { | |
1487 | /* Mask all interrupts.*/ | |
1488 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1489 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1490 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1491 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1492 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1493 | bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1494 | } | |
1495 | ||
1c1008c7 FF |
1496 | static int init_umac(struct bcmgenet_priv *priv) |
1497 | { | |
1498 | struct device *kdev = &priv->pdev->dev; | |
1499 | int ret; | |
1500 | u32 reg, cpu_mask_clear; | |
1501 | ||
1502 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | |
1503 | ||
1504 | ret = reset_umac(priv); | |
1505 | if (ret) | |
1506 | return ret; | |
1507 | ||
1508 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1509 | /* clear tx/rx counter */ | |
1510 | bcmgenet_umac_writel(priv, | |
c91b7f66 FF |
1511 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
1512 | UMAC_MIB_CTRL); | |
1c1008c7 FF |
1513 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); |
1514 | ||
1515 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | |
1516 | ||
1517 | /* init rx registers, enable ip header optimization */ | |
1518 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
1519 | reg |= RBUF_ALIGN_2B; | |
1520 | bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); | |
1521 | ||
1522 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | |
1523 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | |
1524 | ||
909ff5ef | 1525 | bcmgenet_intr_disable(priv); |
1c1008c7 FF |
1526 | |
1527 | cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; | |
1528 | ||
1529 | dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); | |
1530 | ||
1531 | /* Monitor cable plug/unpluged event for internal PHY */ | |
1532 | if (phy_is_internal(priv->phydev)) | |
1533 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); | |
1534 | else if (priv->ext_phy) | |
1535 | cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); | |
1536 | else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | |
1537 | reg = bcmgenet_bp_mc_get(priv); | |
1538 | reg |= BIT(priv->hw_params->bp_in_en_shift); | |
1539 | ||
1540 | /* bp_mask: back pressure mask */ | |
1541 | if (netif_is_multiqueue(priv->dev)) | |
1542 | reg |= priv->hw_params->bp_in_mask; | |
1543 | else | |
1544 | reg &= ~priv->hw_params->bp_in_mask; | |
1545 | bcmgenet_bp_mc_set(priv, reg); | |
1546 | } | |
1547 | ||
1548 | /* Enable MDIO interrupts on GENET v3+ */ | |
1549 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | |
1550 | cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; | |
1551 | ||
c91b7f66 | 1552 | bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1553 | |
1554 | /* Enable rx/tx engine.*/ | |
1555 | dev_dbg(kdev, "done init umac\n"); | |
1556 | ||
1557 | return 0; | |
1558 | } | |
1559 | ||
1560 | /* Initialize all house-keeping variables for a TX ring, along | |
1561 | * with corresponding hardware registers | |
1562 | */ | |
1563 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, | |
1564 | unsigned int index, unsigned int size, | |
1565 | unsigned int write_ptr, unsigned int end_ptr) | |
1566 | { | |
1567 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | |
1568 | u32 words_per_bd = WORDS_PER_BD(priv); | |
1569 | u32 flow_period_val = 0; | |
1570 | unsigned int first_bd; | |
1571 | ||
1572 | spin_lock_init(&ring->lock); | |
1573 | ring->index = index; | |
1574 | if (index == DESC_INDEX) { | |
1575 | ring->queue = 0; | |
1576 | ring->int_enable = bcmgenet_tx_ring16_int_enable; | |
1577 | ring->int_disable = bcmgenet_tx_ring16_int_disable; | |
1578 | } else { | |
1579 | ring->queue = index + 1; | |
1580 | ring->int_enable = bcmgenet_tx_ring_int_enable; | |
1581 | ring->int_disable = bcmgenet_tx_ring_int_disable; | |
1582 | } | |
1583 | ring->cbs = priv->tx_cbs + write_ptr; | |
1584 | ring->size = size; | |
1585 | ring->c_index = 0; | |
1586 | ring->free_bds = size; | |
1587 | ring->write_ptr = write_ptr; | |
1588 | ring->cb_ptr = write_ptr; | |
1589 | ring->end_ptr = end_ptr - 1; | |
1590 | ring->prod_index = 0; | |
1591 | ||
1592 | /* Set flow period for ring != 16 */ | |
1593 | if (index != DESC_INDEX) | |
1594 | flow_period_val = ENET_MAX_MTU_SIZE << 16; | |
1595 | ||
1596 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); | |
1597 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); | |
1598 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | |
1599 | /* Disable rate control for now */ | |
1600 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | |
c91b7f66 | 1601 | TDMA_FLOW_PERIOD); |
1c1008c7 FF |
1602 | /* Unclassified traffic goes to ring 16 */ |
1603 | bcmgenet_tdma_ring_writel(priv, index, | |
c91b7f66 FF |
1604 | ((size << DMA_RING_SIZE_SHIFT) | |
1605 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 FF |
1606 | |
1607 | first_bd = write_ptr; | |
1608 | ||
1609 | /* Set start and end address, read and write pointers */ | |
1610 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, | |
c91b7f66 | 1611 | DMA_START_ADDR); |
1c1008c7 | 1612 | bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, |
c91b7f66 | 1613 | TDMA_READ_PTR); |
1c1008c7 | 1614 | bcmgenet_tdma_ring_writel(priv, index, first_bd, |
c91b7f66 | 1615 | TDMA_WRITE_PTR); |
1c1008c7 | 1616 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
c91b7f66 | 1617 | DMA_END_ADDR); |
1c1008c7 FF |
1618 | } |
1619 | ||
1620 | /* Initialize a RDMA ring */ | |
1621 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |
c91b7f66 | 1622 | unsigned int index, unsigned int size) |
1c1008c7 FF |
1623 | { |
1624 | u32 words_per_bd = WORDS_PER_BD(priv); | |
1625 | int ret; | |
1626 | ||
1627 | priv->num_rx_bds = TOTAL_DESC; | |
1628 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; | |
1629 | priv->rx_bd_assign_ptr = priv->rx_bds; | |
1630 | priv->rx_bd_assign_index = 0; | |
1631 | priv->rx_c_index = 0; | |
1632 | priv->rx_read_ptr = 0; | |
1633 | priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb), | |
1634 | GFP_KERNEL); | |
1635 | if (!priv->rx_cbs) | |
1636 | return -ENOMEM; | |
1637 | ||
1638 | ret = bcmgenet_alloc_rx_buffers(priv); | |
1639 | if (ret) { | |
1640 | kfree(priv->rx_cbs); | |
1641 | return ret; | |
1642 | } | |
1643 | ||
1644 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); | |
1645 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); | |
1646 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | |
1647 | bcmgenet_rdma_ring_writel(priv, index, | |
c91b7f66 FF |
1648 | ((size << DMA_RING_SIZE_SHIFT) | |
1649 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 FF |
1650 | bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); |
1651 | bcmgenet_rdma_ring_writel(priv, index, | |
c91b7f66 | 1652 | words_per_bd * size - 1, DMA_END_ADDR); |
1c1008c7 | 1653 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
1654 | (DMA_FC_THRESH_LO << |
1655 | DMA_XOFF_THRESHOLD_SHIFT) | | |
1656 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | |
1c1008c7 FF |
1657 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); |
1658 | ||
1659 | return ret; | |
1660 | } | |
1661 | ||
1662 | /* init multi xmit queues, only available for GENET2+ | |
1663 | * the queue is partitioned as follows: | |
1664 | * | |
1665 | * queue 0 - 3 is priority based, each one has 32 descriptors, | |
1666 | * with queue 0 being the highest priority queue. | |
1667 | * | |
1668 | * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT | |
1669 | * descriptors: 256 - (number of tx queues * bds per queues) = 128 | |
1670 | * descriptors. | |
1671 | * | |
1672 | * The transmit control block pool is then partitioned as following: | |
1673 | * - tx_cbs[0...127] are for queue 16 | |
1674 | * - tx_ring_cbs[0] points to tx_cbs[128..159] | |
1675 | * - tx_ring_cbs[1] points to tx_cbs[160..191] | |
1676 | * - tx_ring_cbs[2] points to tx_cbs[192..223] | |
1677 | * - tx_ring_cbs[3] points to tx_cbs[224..255] | |
1678 | */ | |
1679 | static void bcmgenet_init_multiq(struct net_device *dev) | |
1680 | { | |
1681 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1682 | unsigned int i, dma_enable; | |
1683 | u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0; | |
1684 | ||
1685 | if (!netif_is_multiqueue(dev)) { | |
1686 | netdev_warn(dev, "called with non multi queue aware HW\n"); | |
1687 | return; | |
1688 | } | |
1689 | ||
1690 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
1691 | dma_enable = dma_ctrl & DMA_EN; | |
1692 | dma_ctrl &= ~DMA_EN; | |
1693 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1694 | ||
1695 | /* Enable strict priority arbiter mode */ | |
1696 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | |
1697 | ||
1698 | for (i = 0; i < priv->hw_params->tx_queues; i++) { | |
1699 | /* first 64 tx_cbs are reserved for default tx queue | |
1700 | * (ring 16) | |
1701 | */ | |
1702 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, | |
c91b7f66 FF |
1703 | i * priv->hw_params->bds_cnt, |
1704 | (i + 1) * priv->hw_params->bds_cnt); | |
1c1008c7 FF |
1705 | |
1706 | /* Configure ring as decriptor ring and setup priority */ | |
1707 | ring_cfg |= 1 << i; | |
1708 | dma_priority |= ((GENET_Q0_PRIORITY + i) << | |
1709 | (GENET_MAX_MQ_CNT + 1) * i); | |
1710 | dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); | |
1711 | } | |
1712 | ||
1713 | /* Enable rings */ | |
1714 | reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); | |
1715 | reg |= ring_cfg; | |
1716 | bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); | |
1717 | ||
1718 | /* Use configured rings priority and set ring #16 priority */ | |
1719 | reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY); | |
1720 | reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20); | |
1721 | reg |= dma_priority; | |
1722 | bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY); | |
1723 | ||
1724 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | |
1725 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
1726 | reg |= dma_ctrl; | |
1727 | if (dma_enable) | |
1728 | reg |= DMA_EN; | |
1729 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
1730 | } | |
1731 | ||
1732 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) | |
1733 | { | |
1734 | int i; | |
1735 | ||
1736 | /* disable DMA */ | |
1737 | bcmgenet_rdma_writel(priv, 0, DMA_CTRL); | |
1738 | bcmgenet_tdma_writel(priv, 0, DMA_CTRL); | |
1739 | ||
1740 | for (i = 0; i < priv->num_tx_bds; i++) { | |
1741 | if (priv->tx_cbs[i].skb != NULL) { | |
1742 | dev_kfree_skb(priv->tx_cbs[i].skb); | |
1743 | priv->tx_cbs[i].skb = NULL; | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | bcmgenet_free_rx_buffers(priv); | |
1748 | kfree(priv->rx_cbs); | |
1749 | kfree(priv->tx_cbs); | |
1750 | } | |
1751 | ||
1752 | /* init_edma: Initialize DMA control register */ | |
1753 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |
1754 | { | |
1755 | int ret; | |
1756 | ||
1757 | netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); | |
1758 | ||
1759 | /* by default, enable ring 16 (descriptor based) */ | |
1760 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); | |
1761 | if (ret) { | |
1762 | netdev_err(priv->dev, "failed to initialize RX ring\n"); | |
1763 | return ret; | |
1764 | } | |
1765 | ||
1766 | /* init rDma */ | |
1767 | bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
1768 | ||
1769 | /* Init tDma */ | |
1770 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
1771 | ||
1772 | /* Initialize commont TX ring structures */ | |
1773 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; | |
1774 | priv->num_tx_bds = TOTAL_DESC; | |
1775 | priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb), | |
c91b7f66 | 1776 | GFP_KERNEL); |
1c1008c7 FF |
1777 | if (!priv->tx_cbs) { |
1778 | bcmgenet_fini_dma(priv); | |
1779 | return -ENOMEM; | |
1780 | } | |
1781 | ||
1782 | /* initialize multi xmit queue */ | |
1783 | bcmgenet_init_multiq(priv->dev); | |
1784 | ||
1785 | /* initialize special ring 16 */ | |
1786 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, | |
c91b7f66 FF |
1787 | priv->hw_params->tx_queues * |
1788 | priv->hw_params->bds_cnt, | |
1789 | TOTAL_DESC); | |
1c1008c7 FF |
1790 | |
1791 | return 0; | |
1792 | } | |
1793 | ||
1794 | /* NAPI polling method*/ | |
1795 | static int bcmgenet_poll(struct napi_struct *napi, int budget) | |
1796 | { | |
1797 | struct bcmgenet_priv *priv = container_of(napi, | |
1798 | struct bcmgenet_priv, napi); | |
1799 | unsigned int work_done; | |
1800 | ||
1801 | /* tx reclaim */ | |
1802 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | |
1803 | ||
1804 | work_done = bcmgenet_desc_rx(priv, budget); | |
1805 | ||
1806 | /* Advancing our consumer index*/ | |
1807 | priv->rx_c_index += work_done; | |
1808 | priv->rx_c_index &= DMA_C_INDEX_MASK; | |
1809 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, | |
c91b7f66 | 1810 | priv->rx_c_index, RDMA_CONS_INDEX); |
1c1008c7 FF |
1811 | if (work_done < budget) { |
1812 | napi_complete(napi); | |
c91b7f66 FF |
1813 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, |
1814 | INTRL2_CPU_MASK_CLEAR); | |
1c1008c7 FF |
1815 | } |
1816 | ||
1817 | return work_done; | |
1818 | } | |
1819 | ||
1820 | /* Interrupt bottom half */ | |
1821 | static void bcmgenet_irq_task(struct work_struct *work) | |
1822 | { | |
1823 | struct bcmgenet_priv *priv = container_of( | |
1824 | work, struct bcmgenet_priv, bcmgenet_irq_work); | |
1825 | ||
1826 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | |
1827 | ||
8fdb0e0f FF |
1828 | if (priv->irq0_stat & UMAC_IRQ_MPD_R) { |
1829 | priv->irq0_stat &= ~UMAC_IRQ_MPD_R; | |
1830 | netif_dbg(priv, wol, priv->dev, | |
1831 | "magic packet detected, waking up\n"); | |
1832 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
1833 | } | |
1834 | ||
1c1008c7 FF |
1835 | /* Link UP/DOWN event */ |
1836 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
c91b7f66 | 1837 | (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { |
80d8e96d | 1838 | phy_mac_interrupt(priv->phydev, |
c91b7f66 | 1839 | priv->irq0_stat & UMAC_IRQ_LINK_UP); |
1c1008c7 FF |
1840 | priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); |
1841 | } | |
1842 | } | |
1843 | ||
1844 | /* bcmgenet_isr1: interrupt handler for ring buffer. */ | |
1845 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) | |
1846 | { | |
1847 | struct bcmgenet_priv *priv = dev_id; | |
1848 | unsigned int index; | |
1849 | ||
1850 | /* Save irq status for bottom-half processing. */ | |
1851 | priv->irq1_stat = | |
1852 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | |
1853 | ~priv->int1_mask; | |
1854 | /* clear inerrupts*/ | |
1855 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); | |
1856 | ||
1857 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 1858 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
1c1008c7 FF |
1859 | /* Check the MBDONE interrupts. |
1860 | * packet is done, reclaim descriptors | |
1861 | */ | |
1862 | if (priv->irq1_stat & 0x0000ffff) { | |
1863 | index = 0; | |
1864 | for (index = 0; index < 16; index++) { | |
1865 | if (priv->irq1_stat & (1 << index)) | |
1866 | bcmgenet_tx_reclaim(priv->dev, | |
c91b7f66 | 1867 | &priv->tx_rings[index]); |
1c1008c7 FF |
1868 | } |
1869 | } | |
1870 | return IRQ_HANDLED; | |
1871 | } | |
1872 | ||
1873 | /* bcmgenet_isr0: Handle various interrupts. */ | |
1874 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) | |
1875 | { | |
1876 | struct bcmgenet_priv *priv = dev_id; | |
1877 | ||
1878 | /* Save irq status for bottom-half processing. */ | |
1879 | priv->irq0_stat = | |
1880 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | |
1881 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
1882 | /* clear inerrupts*/ | |
1883 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); | |
1884 | ||
1885 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 1886 | "IRQ=0x%x\n", priv->irq0_stat); |
1c1008c7 FF |
1887 | |
1888 | if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { | |
1889 | /* We use NAPI(software interrupt throttling, if | |
1890 | * Rx Descriptor throttling is not used. | |
1891 | * Disable interrupt, will be enabled in the poll method. | |
1892 | */ | |
1893 | if (likely(napi_schedule_prep(&priv->napi))) { | |
c91b7f66 FF |
1894 | bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE, |
1895 | INTRL2_CPU_MASK_SET); | |
1c1008c7 FF |
1896 | __napi_schedule(&priv->napi); |
1897 | } | |
1898 | } | |
1899 | if (priv->irq0_stat & | |
1900 | (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { | |
1901 | /* Tx reclaim */ | |
1902 | bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); | |
1903 | } | |
1904 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | | |
1905 | UMAC_IRQ_PHY_DET_F | | |
1906 | UMAC_IRQ_LINK_UP | | |
1907 | UMAC_IRQ_LINK_DOWN | | |
1908 | UMAC_IRQ_HFB_SM | | |
1909 | UMAC_IRQ_HFB_MM | | |
1910 | UMAC_IRQ_MPD_R)) { | |
1911 | /* all other interested interrupts handled in bottom half */ | |
1912 | schedule_work(&priv->bcmgenet_irq_work); | |
1913 | } | |
1914 | ||
1915 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
c91b7f66 | 1916 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
1c1008c7 FF |
1917 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
1918 | wake_up(&priv->wq); | |
1919 | } | |
1920 | ||
1921 | return IRQ_HANDLED; | |
1922 | } | |
1923 | ||
8562056f FF |
1924 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) |
1925 | { | |
1926 | struct bcmgenet_priv *priv = dev_id; | |
1927 | ||
1928 | pm_wakeup_event(&priv->pdev->dev, 0); | |
1929 | ||
1930 | return IRQ_HANDLED; | |
1931 | } | |
1932 | ||
1c1008c7 FF |
1933 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
1934 | { | |
1935 | u32 reg; | |
1936 | ||
1937 | reg = bcmgenet_rbuf_ctrl_get(priv); | |
1938 | reg |= BIT(1); | |
1939 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
1940 | udelay(10); | |
1941 | ||
1942 | reg &= ~BIT(1); | |
1943 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
1944 | udelay(10); | |
1945 | } | |
1946 | ||
1947 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | |
c91b7f66 | 1948 | unsigned char *addr) |
1c1008c7 FF |
1949 | { |
1950 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | |
1951 | (addr[2] << 8) | addr[3], UMAC_MAC0); | |
1952 | bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); | |
1953 | } | |
1954 | ||
1955 | static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) | |
1956 | { | |
1c1008c7 | 1957 | /* From WOL-enabled suspend, switch to regular clock */ |
1c3c1e79 | 1958 | clk_disable_unprepare(priv->clk_wol); |
1c1008c7 | 1959 | |
80d8e96d | 1960 | phy_init_hw(priv->phydev); |
1c1008c7 FF |
1961 | /* Speed settings must be restored */ |
1962 | bcmgenet_mii_config(priv->dev); | |
1963 | ||
1964 | return 0; | |
1965 | } | |
1966 | ||
1967 | /* Returns a reusable dma control register value */ | |
1968 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) | |
1969 | { | |
1970 | u32 reg; | |
1971 | u32 dma_ctrl; | |
1972 | ||
1973 | /* disable DMA */ | |
1974 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; | |
1975 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
1976 | reg &= ~dma_ctrl; | |
1977 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
1978 | ||
1979 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
1980 | reg &= ~dma_ctrl; | |
1981 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
1982 | ||
1983 | bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); | |
1984 | udelay(10); | |
1985 | bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); | |
1986 | ||
1987 | return dma_ctrl; | |
1988 | } | |
1989 | ||
1990 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | |
1991 | { | |
1992 | u32 reg; | |
1993 | ||
1994 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
1995 | reg |= dma_ctrl; | |
1996 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
1997 | ||
1998 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
1999 | reg |= dma_ctrl; | |
2000 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2001 | } | |
2002 | ||
909ff5ef FF |
2003 | static void bcmgenet_netif_start(struct net_device *dev) |
2004 | { | |
2005 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2006 | ||
2007 | /* Start the network engine */ | |
2008 | napi_enable(&priv->napi); | |
2009 | ||
2010 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | |
2011 | ||
2012 | if (phy_is_internal(priv->phydev)) | |
2013 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); | |
2014 | ||
2015 | netif_tx_start_all_queues(dev); | |
2016 | ||
2017 | phy_start(priv->phydev); | |
2018 | } | |
2019 | ||
1c1008c7 FF |
2020 | static int bcmgenet_open(struct net_device *dev) |
2021 | { | |
2022 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2023 | unsigned long dma_ctrl; | |
2024 | u32 reg; | |
2025 | int ret; | |
2026 | ||
2027 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); | |
2028 | ||
2029 | /* Turn on the clock */ | |
2030 | if (!IS_ERR(priv->clk)) | |
2031 | clk_prepare_enable(priv->clk); | |
2032 | ||
2033 | /* take MAC out of reset */ | |
2034 | bcmgenet_umac_reset(priv); | |
2035 | ||
2036 | ret = init_umac(priv); | |
2037 | if (ret) | |
2038 | goto err_clk_disable; | |
2039 | ||
2040 | /* disable ethernet MAC while updating its registers */ | |
e29585b8 | 2041 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
1c1008c7 | 2042 | |
909ff5ef FF |
2043 | /* Make sure we reflect the value of CRC_CMD_FWD */ |
2044 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
2045 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | |
2046 | ||
1c1008c7 FF |
2047 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
2048 | ||
1c1008c7 FF |
2049 | if (phy_is_internal(priv->phydev)) { |
2050 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
2051 | reg |= EXT_ENERGY_DET_MASK; | |
2052 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
2053 | } | |
2054 | ||
2055 | /* Disable RX/TX DMA and flush TX queues */ | |
2056 | dma_ctrl = bcmgenet_dma_disable(priv); | |
2057 | ||
2058 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
2059 | ret = bcmgenet_init_dma(priv); | |
2060 | if (ret) { | |
2061 | netdev_err(dev, "failed to initialize DMA\n"); | |
2062 | goto err_fini_dma; | |
2063 | } | |
2064 | ||
2065 | /* Always enable ring 16 - descriptor ring */ | |
2066 | bcmgenet_enable_dma(priv, dma_ctrl); | |
2067 | ||
2068 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, | |
c91b7f66 | 2069 | dev->name, priv); |
1c1008c7 FF |
2070 | if (ret < 0) { |
2071 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | |
2072 | goto err_fini_dma; | |
2073 | } | |
2074 | ||
2075 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | |
c91b7f66 | 2076 | dev->name, priv); |
1c1008c7 FF |
2077 | if (ret < 0) { |
2078 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | |
2079 | goto err_irq0; | |
2080 | } | |
2081 | ||
909ff5ef | 2082 | bcmgenet_netif_start(dev); |
1c1008c7 FF |
2083 | |
2084 | return 0; | |
2085 | ||
2086 | err_irq0: | |
2087 | free_irq(priv->irq0, dev); | |
2088 | err_fini_dma: | |
2089 | bcmgenet_fini_dma(priv); | |
2090 | err_clk_disable: | |
2091 | if (!IS_ERR(priv->clk)) | |
2092 | clk_disable_unprepare(priv->clk); | |
2093 | return ret; | |
2094 | } | |
2095 | ||
2096 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |
2097 | { | |
2098 | int ret = 0; | |
2099 | int timeout = 0; | |
2100 | u32 reg; | |
2101 | ||
2102 | /* Disable TDMA to stop add more frames in TX DMA */ | |
2103 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2104 | reg &= ~DMA_EN; | |
2105 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2106 | ||
2107 | /* Check TDMA status register to confirm TDMA is disabled */ | |
2108 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2109 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | |
2110 | if (reg & DMA_DISABLED) | |
2111 | break; | |
2112 | ||
2113 | udelay(1); | |
2114 | } | |
2115 | ||
2116 | if (timeout == DMA_TIMEOUT_VAL) { | |
c91b7f66 | 2117 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); |
1c1008c7 FF |
2118 | ret = -ETIMEDOUT; |
2119 | } | |
2120 | ||
2121 | /* Wait 10ms for packet drain in both tx and rx dma */ | |
2122 | usleep_range(10000, 20000); | |
2123 | ||
2124 | /* Disable RDMA */ | |
2125 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2126 | reg &= ~DMA_EN; | |
2127 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2128 | ||
2129 | timeout = 0; | |
2130 | /* Check RDMA status register to confirm RDMA is disabled */ | |
2131 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2132 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | |
2133 | if (reg & DMA_DISABLED) | |
2134 | break; | |
2135 | ||
2136 | udelay(1); | |
2137 | } | |
2138 | ||
2139 | if (timeout == DMA_TIMEOUT_VAL) { | |
c91b7f66 FF |
2140 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); |
2141 | ret = -ETIMEDOUT; | |
1c1008c7 FF |
2142 | } |
2143 | ||
2144 | return ret; | |
2145 | } | |
2146 | ||
909ff5ef FF |
2147 | static void bcmgenet_netif_stop(struct net_device *dev) |
2148 | { | |
2149 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2150 | ||
2151 | netif_tx_stop_all_queues(dev); | |
2152 | napi_disable(&priv->napi); | |
2153 | phy_stop(priv->phydev); | |
2154 | ||
2155 | bcmgenet_intr_disable(priv); | |
2156 | ||
2157 | /* Wait for pending work items to complete. Since interrupts are | |
2158 | * disabled no new work will be scheduled. | |
2159 | */ | |
2160 | cancel_work_sync(&priv->bcmgenet_irq_work); | |
2161 | } | |
2162 | ||
1c1008c7 FF |
2163 | static int bcmgenet_close(struct net_device *dev) |
2164 | { | |
2165 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2166 | int ret; | |
1c1008c7 FF |
2167 | |
2168 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | |
2169 | ||
909ff5ef | 2170 | bcmgenet_netif_stop(dev); |
1c1008c7 FF |
2171 | |
2172 | /* Disable MAC receive */ | |
e29585b8 | 2173 | umac_enable_set(priv, CMD_RX_EN, false); |
1c1008c7 | 2174 | |
1c1008c7 FF |
2175 | ret = bcmgenet_dma_teardown(priv); |
2176 | if (ret) | |
2177 | return ret; | |
2178 | ||
2179 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
e29585b8 | 2180 | umac_enable_set(priv, CMD_TX_EN, false); |
1c1008c7 | 2181 | |
1c1008c7 FF |
2182 | /* tx reclaim */ |
2183 | bcmgenet_tx_reclaim_all(dev); | |
2184 | bcmgenet_fini_dma(priv); | |
2185 | ||
2186 | free_irq(priv->irq0, priv); | |
2187 | free_irq(priv->irq1, priv); | |
2188 | ||
1c1008c7 FF |
2189 | if (phy_is_internal(priv->phydev)) |
2190 | bcmgenet_power_down(priv, GENET_POWER_PASSIVE); | |
2191 | ||
1c1008c7 FF |
2192 | if (!IS_ERR(priv->clk)) |
2193 | clk_disable_unprepare(priv->clk); | |
2194 | ||
2195 | return 0; | |
2196 | } | |
2197 | ||
2198 | static void bcmgenet_timeout(struct net_device *dev) | |
2199 | { | |
2200 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2201 | ||
2202 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | |
2203 | ||
2204 | dev->trans_start = jiffies; | |
2205 | ||
2206 | dev->stats.tx_errors++; | |
2207 | ||
2208 | netif_tx_wake_all_queues(dev); | |
2209 | } | |
2210 | ||
2211 | #define MAX_MC_COUNT 16 | |
2212 | ||
2213 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | |
2214 | unsigned char *addr, | |
2215 | int *i, | |
2216 | int *mc) | |
2217 | { | |
2218 | u32 reg; | |
2219 | ||
c91b7f66 FF |
2220 | bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], |
2221 | UMAC_MDF_ADDR + (*i * 4)); | |
2222 | bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | | |
2223 | addr[4] << 8 | addr[5], | |
2224 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | |
1c1008c7 FF |
2225 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); |
2226 | reg |= (1 << (MAX_MC_COUNT - *mc)); | |
2227 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | |
2228 | *i += 2; | |
2229 | (*mc)++; | |
2230 | } | |
2231 | ||
2232 | static void bcmgenet_set_rx_mode(struct net_device *dev) | |
2233 | { | |
2234 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2235 | struct netdev_hw_addr *ha; | |
2236 | int i, mc; | |
2237 | u32 reg; | |
2238 | ||
2239 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | |
2240 | ||
2241 | /* Promiscous mode */ | |
2242 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
2243 | if (dev->flags & IFF_PROMISC) { | |
2244 | reg |= CMD_PROMISC; | |
2245 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
2246 | bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); | |
2247 | return; | |
2248 | } else { | |
2249 | reg &= ~CMD_PROMISC; | |
2250 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
2251 | } | |
2252 | ||
2253 | /* UniMac doesn't support ALLMULTI */ | |
2254 | if (dev->flags & IFF_ALLMULTI) { | |
2255 | netdev_warn(dev, "ALLMULTI is not supported\n"); | |
2256 | return; | |
2257 | } | |
2258 | ||
2259 | /* update MDF filter */ | |
2260 | i = 0; | |
2261 | mc = 0; | |
2262 | /* Broadcast */ | |
2263 | bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); | |
2264 | /* my own address.*/ | |
2265 | bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); | |
2266 | /* Unicast list*/ | |
2267 | if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) | |
2268 | return; | |
2269 | ||
2270 | if (!netdev_uc_empty(dev)) | |
2271 | netdev_for_each_uc_addr(ha, dev) | |
2272 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
2273 | /* Multicast */ | |
2274 | if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) | |
2275 | return; | |
2276 | ||
2277 | netdev_for_each_mc_addr(ha, dev) | |
2278 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
2279 | } | |
2280 | ||
2281 | /* Set the hardware MAC address. */ | |
2282 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) | |
2283 | { | |
2284 | struct sockaddr *addr = p; | |
2285 | ||
2286 | /* Setting the MAC address at the hardware level is not possible | |
2287 | * without disabling the UniMAC RX/TX enable bits. | |
2288 | */ | |
2289 | if (netif_running(dev)) | |
2290 | return -EBUSY; | |
2291 | ||
2292 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
2293 | ||
2294 | return 0; | |
2295 | } | |
2296 | ||
1c1008c7 FF |
2297 | static const struct net_device_ops bcmgenet_netdev_ops = { |
2298 | .ndo_open = bcmgenet_open, | |
2299 | .ndo_stop = bcmgenet_close, | |
2300 | .ndo_start_xmit = bcmgenet_xmit, | |
1c1008c7 FF |
2301 | .ndo_tx_timeout = bcmgenet_timeout, |
2302 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, | |
2303 | .ndo_set_mac_address = bcmgenet_set_mac_addr, | |
2304 | .ndo_do_ioctl = bcmgenet_ioctl, | |
2305 | .ndo_set_features = bcmgenet_set_features, | |
2306 | }; | |
2307 | ||
2308 | /* Array of GENET hardware parameters/characteristics */ | |
2309 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |
2310 | [GENET_V1] = { | |
2311 | .tx_queues = 0, | |
2312 | .rx_queues = 0, | |
2313 | .bds_cnt = 0, | |
2314 | .bp_in_en_shift = 16, | |
2315 | .bp_in_mask = 0xffff, | |
2316 | .hfb_filter_cnt = 16, | |
2317 | .qtag_mask = 0x1F, | |
2318 | .hfb_offset = 0x1000, | |
2319 | .rdma_offset = 0x2000, | |
2320 | .tdma_offset = 0x3000, | |
2321 | .words_per_bd = 2, | |
2322 | }, | |
2323 | [GENET_V2] = { | |
2324 | .tx_queues = 4, | |
2325 | .rx_queues = 4, | |
2326 | .bds_cnt = 32, | |
2327 | .bp_in_en_shift = 16, | |
2328 | .bp_in_mask = 0xffff, | |
2329 | .hfb_filter_cnt = 16, | |
2330 | .qtag_mask = 0x1F, | |
2331 | .tbuf_offset = 0x0600, | |
2332 | .hfb_offset = 0x1000, | |
2333 | .hfb_reg_offset = 0x2000, | |
2334 | .rdma_offset = 0x3000, | |
2335 | .tdma_offset = 0x4000, | |
2336 | .words_per_bd = 2, | |
2337 | .flags = GENET_HAS_EXT, | |
2338 | }, | |
2339 | [GENET_V3] = { | |
2340 | .tx_queues = 4, | |
2341 | .rx_queues = 4, | |
2342 | .bds_cnt = 32, | |
2343 | .bp_in_en_shift = 17, | |
2344 | .bp_in_mask = 0x1ffff, | |
2345 | .hfb_filter_cnt = 48, | |
2346 | .qtag_mask = 0x3F, | |
2347 | .tbuf_offset = 0x0600, | |
2348 | .hfb_offset = 0x8000, | |
2349 | .hfb_reg_offset = 0xfc00, | |
2350 | .rdma_offset = 0x10000, | |
2351 | .tdma_offset = 0x11000, | |
2352 | .words_per_bd = 2, | |
2353 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, | |
2354 | }, | |
2355 | [GENET_V4] = { | |
2356 | .tx_queues = 4, | |
2357 | .rx_queues = 4, | |
2358 | .bds_cnt = 32, | |
2359 | .bp_in_en_shift = 17, | |
2360 | .bp_in_mask = 0x1ffff, | |
2361 | .hfb_filter_cnt = 48, | |
2362 | .qtag_mask = 0x3F, | |
2363 | .tbuf_offset = 0x0600, | |
2364 | .hfb_offset = 0x8000, | |
2365 | .hfb_reg_offset = 0xfc00, | |
2366 | .rdma_offset = 0x2000, | |
2367 | .tdma_offset = 0x4000, | |
2368 | .words_per_bd = 3, | |
2369 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, | |
2370 | }, | |
2371 | }; | |
2372 | ||
2373 | /* Infer hardware parameters from the detected GENET version */ | |
2374 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |
2375 | { | |
2376 | struct bcmgenet_hw_params *params; | |
2377 | u32 reg; | |
2378 | u8 major; | |
2379 | ||
2380 | if (GENET_IS_V4(priv)) { | |
2381 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
2382 | genet_dma_ring_regs = genet_dma_ring_regs_v4; | |
2383 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
2384 | priv->version = GENET_V4; | |
2385 | } else if (GENET_IS_V3(priv)) { | |
2386 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
2387 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
2388 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
2389 | priv->version = GENET_V3; | |
2390 | } else if (GENET_IS_V2(priv)) { | |
2391 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; | |
2392 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
2393 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
2394 | priv->version = GENET_V2; | |
2395 | } else if (GENET_IS_V1(priv)) { | |
2396 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; | |
2397 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
2398 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
2399 | priv->version = GENET_V1; | |
2400 | } | |
2401 | ||
2402 | /* enum genet_version starts at 1 */ | |
2403 | priv->hw_params = &bcmgenet_hw_params[priv->version]; | |
2404 | params = priv->hw_params; | |
2405 | ||
2406 | /* Read GENET HW version */ | |
2407 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); | |
2408 | major = (reg >> 24 & 0x0f); | |
2409 | if (major == 5) | |
2410 | major = 4; | |
2411 | else if (major == 0) | |
2412 | major = 1; | |
2413 | if (major != priv->version) { | |
2414 | dev_err(&priv->pdev->dev, | |
2415 | "GENET version mismatch, got: %d, configured for: %d\n", | |
2416 | major, priv->version); | |
2417 | } | |
2418 | ||
2419 | /* Print the GENET core version */ | |
2420 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | |
c91b7f66 | 2421 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
1c1008c7 FF |
2422 | |
2423 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
2424 | if (!(params->flags & GENET_HAS_40BITS)) | |
2425 | pr_warn("GENET does not support 40-bits PA\n"); | |
2426 | #endif | |
2427 | ||
2428 | pr_debug("Configuration for version: %d\n" | |
2429 | "TXq: %1d, RXq: %1d, BDs: %1d\n" | |
2430 | "BP << en: %2d, BP msk: 0x%05x\n" | |
2431 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | |
2432 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | |
2433 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | |
2434 | "Words/BD: %d\n", | |
2435 | priv->version, | |
2436 | params->tx_queues, params->rx_queues, params->bds_cnt, | |
2437 | params->bp_in_en_shift, params->bp_in_mask, | |
2438 | params->hfb_filter_cnt, params->qtag_mask, | |
2439 | params->tbuf_offset, params->hfb_offset, | |
2440 | params->hfb_reg_offset, | |
2441 | params->rdma_offset, params->tdma_offset, | |
2442 | params->words_per_bd); | |
2443 | } | |
2444 | ||
2445 | static const struct of_device_id bcmgenet_match[] = { | |
2446 | { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, | |
2447 | { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, | |
2448 | { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, | |
2449 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | |
2450 | { }, | |
2451 | }; | |
2452 | ||
2453 | static int bcmgenet_probe(struct platform_device *pdev) | |
2454 | { | |
2455 | struct device_node *dn = pdev->dev.of_node; | |
2456 | const struct of_device_id *of_id; | |
2457 | struct bcmgenet_priv *priv; | |
2458 | struct net_device *dev; | |
2459 | const void *macaddr; | |
2460 | struct resource *r; | |
2461 | int err = -EIO; | |
2462 | ||
2463 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ | |
2464 | dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); | |
2465 | if (!dev) { | |
2466 | dev_err(&pdev->dev, "can't allocate net device\n"); | |
2467 | return -ENOMEM; | |
2468 | } | |
2469 | ||
2470 | of_id = of_match_node(bcmgenet_match, dn); | |
2471 | if (!of_id) | |
2472 | return -EINVAL; | |
2473 | ||
2474 | priv = netdev_priv(dev); | |
2475 | priv->irq0 = platform_get_irq(pdev, 0); | |
2476 | priv->irq1 = platform_get_irq(pdev, 1); | |
8562056f | 2477 | priv->wol_irq = platform_get_irq(pdev, 2); |
1c1008c7 FF |
2478 | if (!priv->irq0 || !priv->irq1) { |
2479 | dev_err(&pdev->dev, "can't find IRQs\n"); | |
2480 | err = -EINVAL; | |
2481 | goto err; | |
2482 | } | |
2483 | ||
2484 | macaddr = of_get_mac_address(dn); | |
2485 | if (!macaddr) { | |
2486 | dev_err(&pdev->dev, "can't find MAC address\n"); | |
2487 | err = -EINVAL; | |
2488 | goto err; | |
2489 | } | |
2490 | ||
2491 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
5343a10d FE |
2492 | priv->base = devm_ioremap_resource(&pdev->dev, r); |
2493 | if (IS_ERR(priv->base)) { | |
2494 | err = PTR_ERR(priv->base); | |
1c1008c7 FF |
2495 | goto err; |
2496 | } | |
2497 | ||
2498 | SET_NETDEV_DEV(dev, &pdev->dev); | |
2499 | dev_set_drvdata(&pdev->dev, dev); | |
2500 | ether_addr_copy(dev->dev_addr, macaddr); | |
2501 | dev->watchdog_timeo = 2 * HZ; | |
7ad24ea4 | 2502 | dev->ethtool_ops = &bcmgenet_ethtool_ops; |
1c1008c7 FF |
2503 | dev->netdev_ops = &bcmgenet_netdev_ops; |
2504 | netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); | |
2505 | ||
2506 | priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); | |
2507 | ||
2508 | /* Set hardware features */ | |
2509 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | |
2510 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | |
2511 | ||
8562056f FF |
2512 | /* Request the WOL interrupt and advertise suspend if available */ |
2513 | priv->wol_irq_disabled = true; | |
2514 | err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, | |
2515 | dev->name, priv); | |
2516 | if (!err) | |
2517 | device_set_wakeup_capable(&pdev->dev, 1); | |
2518 | ||
1c1008c7 FF |
2519 | /* Set the needed headroom to account for any possible |
2520 | * features enabling/disabling at runtime | |
2521 | */ | |
2522 | dev->needed_headroom += 64; | |
2523 | ||
2524 | netdev_boot_setup_check(dev); | |
2525 | ||
2526 | priv->dev = dev; | |
2527 | priv->pdev = pdev; | |
2528 | priv->version = (enum bcmgenet_version)of_id->data; | |
2529 | ||
2530 | bcmgenet_set_hw_params(priv); | |
2531 | ||
1c1008c7 FF |
2532 | /* Mii wait queue */ |
2533 | init_waitqueue_head(&priv->wq); | |
2534 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ | |
2535 | priv->rx_buf_len = RX_BUF_LENGTH; | |
2536 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | |
2537 | ||
2538 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); | |
2539 | if (IS_ERR(priv->clk)) | |
2540 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); | |
2541 | ||
2542 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); | |
2543 | if (IS_ERR(priv->clk_wol)) | |
2544 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); | |
2545 | ||
2546 | if (!IS_ERR(priv->clk)) | |
2547 | clk_prepare_enable(priv->clk); | |
2548 | ||
2549 | err = reset_umac(priv); | |
2550 | if (err) | |
2551 | goto err_clk_disable; | |
2552 | ||
2553 | err = bcmgenet_mii_init(dev); | |
2554 | if (err) | |
2555 | goto err_clk_disable; | |
2556 | ||
2557 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues | |
2558 | * just the ring 16 descriptor based TX | |
2559 | */ | |
2560 | netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); | |
2561 | netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); | |
2562 | ||
219575eb FF |
2563 | /* libphy will determine the link state */ |
2564 | netif_carrier_off(dev); | |
2565 | ||
1c1008c7 FF |
2566 | /* Turn off the main clock, WOL clock is handled separately */ |
2567 | if (!IS_ERR(priv->clk)) | |
2568 | clk_disable_unprepare(priv->clk); | |
2569 | ||
0f50ce96 FF |
2570 | err = register_netdev(dev); |
2571 | if (err) | |
2572 | goto err; | |
2573 | ||
1c1008c7 FF |
2574 | return err; |
2575 | ||
2576 | err_clk_disable: | |
2577 | if (!IS_ERR(priv->clk)) | |
2578 | clk_disable_unprepare(priv->clk); | |
2579 | err: | |
2580 | free_netdev(dev); | |
2581 | return err; | |
2582 | } | |
2583 | ||
2584 | static int bcmgenet_remove(struct platform_device *pdev) | |
2585 | { | |
2586 | struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); | |
2587 | ||
2588 | dev_set_drvdata(&pdev->dev, NULL); | |
2589 | unregister_netdev(priv->dev); | |
2590 | bcmgenet_mii_exit(priv->dev); | |
2591 | free_netdev(priv->dev); | |
2592 | ||
2593 | return 0; | |
2594 | } | |
2595 | ||
b6e978e5 FF |
2596 | #ifdef CONFIG_PM_SLEEP |
2597 | static int bcmgenet_suspend(struct device *d) | |
2598 | { | |
2599 | struct net_device *dev = dev_get_drvdata(d); | |
2600 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2601 | int ret; | |
2602 | ||
2603 | if (!netif_running(dev)) | |
2604 | return 0; | |
2605 | ||
2606 | bcmgenet_netif_stop(dev); | |
2607 | ||
2608 | netif_device_detach(dev); | |
2609 | ||
2610 | /* Disable MAC receive */ | |
2611 | umac_enable_set(priv, CMD_RX_EN, false); | |
2612 | ||
2613 | ret = bcmgenet_dma_teardown(priv); | |
2614 | if (ret) | |
2615 | return ret; | |
2616 | ||
2617 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
2618 | umac_enable_set(priv, CMD_TX_EN, false); | |
2619 | ||
2620 | /* tx reclaim */ | |
2621 | bcmgenet_tx_reclaim_all(dev); | |
2622 | bcmgenet_fini_dma(priv); | |
2623 | ||
8c90db72 FF |
2624 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
2625 | if (device_may_wakeup(d) && priv->wolopts) { | |
2626 | bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); | |
2627 | clk_prepare_enable(priv->clk_wol); | |
2628 | } | |
2629 | ||
b6e978e5 FF |
2630 | /* Turn off the clocks */ |
2631 | clk_disable_unprepare(priv->clk); | |
2632 | ||
2633 | return 0; | |
2634 | } | |
2635 | ||
2636 | static int bcmgenet_resume(struct device *d) | |
2637 | { | |
2638 | struct net_device *dev = dev_get_drvdata(d); | |
2639 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2640 | unsigned long dma_ctrl; | |
2641 | int ret; | |
2642 | u32 reg; | |
2643 | ||
2644 | if (!netif_running(dev)) | |
2645 | return 0; | |
2646 | ||
2647 | /* Turn on the clock */ | |
2648 | ret = clk_prepare_enable(priv->clk); | |
2649 | if (ret) | |
2650 | return ret; | |
2651 | ||
2652 | bcmgenet_umac_reset(priv); | |
2653 | ||
2654 | ret = init_umac(priv); | |
2655 | if (ret) | |
2656 | goto out_clk_disable; | |
2657 | ||
8c90db72 FF |
2658 | if (priv->wolopts) |
2659 | ret = bcmgenet_wol_resume(priv); | |
2660 | ||
2661 | if (ret) | |
2662 | goto out_clk_disable; | |
2663 | ||
b6e978e5 FF |
2664 | /* disable ethernet MAC while updating its registers */ |
2665 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | |
2666 | ||
2667 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | |
2668 | ||
2669 | if (phy_is_internal(priv->phydev)) { | |
2670 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
2671 | reg |= EXT_ENERGY_DET_MASK; | |
2672 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
2673 | } | |
2674 | ||
2675 | /* Disable RX/TX DMA and flush TX queues */ | |
2676 | dma_ctrl = bcmgenet_dma_disable(priv); | |
2677 | ||
2678 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
2679 | ret = bcmgenet_init_dma(priv); | |
2680 | if (ret) { | |
2681 | netdev_err(dev, "failed to initialize DMA\n"); | |
2682 | goto out_clk_disable; | |
2683 | } | |
2684 | ||
2685 | /* Always enable ring 16 - descriptor ring */ | |
2686 | bcmgenet_enable_dma(priv, dma_ctrl); | |
2687 | ||
2688 | netif_device_attach(dev); | |
2689 | ||
2690 | bcmgenet_netif_start(dev); | |
2691 | ||
2692 | return 0; | |
2693 | ||
2694 | out_clk_disable: | |
2695 | clk_disable_unprepare(priv->clk); | |
2696 | return ret; | |
2697 | } | |
2698 | #endif /* CONFIG_PM_SLEEP */ | |
2699 | ||
2700 | static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); | |
2701 | ||
1c1008c7 FF |
2702 | static struct platform_driver bcmgenet_driver = { |
2703 | .probe = bcmgenet_probe, | |
2704 | .remove = bcmgenet_remove, | |
2705 | .driver = { | |
2706 | .name = "bcmgenet", | |
2707 | .owner = THIS_MODULE, | |
2708 | .of_match_table = bcmgenet_match, | |
b6e978e5 | 2709 | .pm = &bcmgenet_pm_ops, |
1c1008c7 FF |
2710 | }, |
2711 | }; | |
2712 | module_platform_driver(bcmgenet_driver); | |
2713 | ||
2714 | MODULE_AUTHOR("Broadcom Corporation"); | |
2715 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); | |
2716 | MODULE_ALIAS("platform:bcmgenet"); | |
2717 | MODULE_LICENSE("GPL"); |