1 /* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
3 * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
4 * This version introduced a lot of changes which breaks backwards
5 * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
6 * Some fields differ between version 4.00a and 4.10a, mainly the interrupt
7 * bit fields. The driver could be made compatible with 4.00, if all relevant
8 * HW erratas are handled.
10 * The GMAC is highly configurable at synthesis time. This driver has been
11 * developed for a subset of the total available feature set. Currently
14 * - Checksum offload for RX and TX.
15 * - Energy efficient ethernet.
16 * - GMII phy interface.
17 * - The statistics module.
18 * - Single RX and TX queue.
20 * Copyright (C) 2015 Axis Communications AB.
22 * This program is free software; you can redistribute it and/or modify it
23 * under the terms and conditions of the GNU General Public License,
24 * version 2, as published by the Free Software Foundation.
27 #include <linux/clk.h>
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/init.h>
32 #include <linux/ethtool.h>
33 #include <linux/stat.h>
34 #include <linux/types.h>
36 #include <linux/types.h>
37 #include <linux/slab.h>
38 #include <linux/delay.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/platform_device.h>
44 #include <linux/phy.h>
45 #include <linux/mii.h>
46 #include <linux/delay.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/vmalloc.h>
49 #include <linux/version.h>
51 #include <linux/device.h>
52 #include <linux/bitrev.h>
53 #include <linux/crc32.h>
56 #include <linux/interrupt.h>
57 #include <linux/clocksource.h>
58 #include <linux/net_tstamp.h>
59 #include <linux/pm_runtime.h>
60 #include <linux/of_net.h>
61 #include <linux/of_address.h>
62 #include <linux/of_mdio.h>
63 #include <linux/timer.h>
64 #include <linux/tcp.h>
66 #define DRIVER_NAME "dwceqos"
67 #define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver"
68 #define DRIVER_VERSION "0.9"
70 #define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
73 #define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
75 #define DWCEQOS_LPI_TIMER_MIN 8
76 #define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1)
78 #define DWCEQOS_RX_BUF_SIZE 2048
80 #define DWCEQOS_RX_DCNT 256
81 #define DWCEQOS_TX_DCNT 256
83 #define DWCEQOS_HASH_TABLE_SIZE 64
85 /* The size field in the DMA descriptor is 14 bits */
86 #define BYTES_PER_DMA_DESC 16376
88 /* Hardware registers */
89 #define START_MAC_REG_OFFSET 0x0000
90 #define MAX_MAC_REG_OFFSET 0x0bd0
91 #define START_MTL_REG_OFFSET 0x0c00
92 #define MAX_MTL_REG_OFFSET 0x0d7c
93 #define START_DMA_REG_OFFSET 0x1000
94 #define MAX_DMA_REG_OFFSET 0x117C
96 #define REG_SPACE_SIZE 0x1800
99 #define REG_DWCEQOS_DMA_MODE 0x1000
100 #define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004
101 #define REG_DWCEQOS_DMA_IS 0x1008
102 #define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c
104 /* DMA channel registers */
105 #define REG_DWCEQOS_DMA_CH0_CTRL 0x1100
106 #define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104
107 #define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108
108 #define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114
109 #define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c
110 #define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120
111 #define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128
112 #define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c
113 #define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130
114 #define REG_DWCEQOS_DMA_CH0_IE 0x1134
115 #define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144
116 #define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c
117 #define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154
118 #define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c
119 #define REG_DWCEQOS_DMA_CH0_STA 0x1160
121 #define DWCEQOS_DMA_MODE_TXPR BIT(11)
122 #define DWCEQOS_DMA_MODE_DA BIT(1)
124 #define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31)
125 #define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0)
126 #define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12)
128 #define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
129 (((x) << 16) & 0x000F0000)
130 #define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3
131 #define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16)
133 #define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
134 (((x) << 24) & 0x0F000000)
135 #define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3
136 #define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24)
138 #define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
139 #define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
140 (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
141 #define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1)
143 #define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16)
144 #define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18)
146 #define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16)
147 #define DWCEQOS_DMA_CH_CTRL_START BIT(0)
148 #define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1)
149 #define DWCEQOS_DMA_CH_TX_OSP BIT(4)
150 #define DWCEQOS_DMA_CH_TX_TSE BIT(12)
152 #define DWCEQOS_DMA_CH0_IE_NIE BIT(15)
153 #define DWCEQOS_DMA_CH0_IE_AIE BIT(14)
154 #define DWCEQOS_DMA_CH0_IE_RIE BIT(6)
155 #define DWCEQOS_DMA_CH0_IE_TIE BIT(0)
156 #define DWCEQOS_DMA_CH0_IE_FBEE BIT(12)
157 #define DWCEQOS_DMA_CH0_IE_RBUE BIT(7)
159 #define DWCEQOS_DMA_IS_DC0IS BIT(0)
160 #define DWCEQOS_DMA_IS_MTLIS BIT(16)
161 #define DWCEQOS_DMA_IS_MACIS BIT(17)
163 #define DWCEQOS_DMA_CH0_IS_TI BIT(0)
164 #define DWCEQOS_DMA_CH0_IS_RI BIT(6)
165 #define DWCEQOS_DMA_CH0_IS_RBU BIT(7)
166 #define DWCEQOS_DMA_CH0_IS_FBE BIT(12)
167 #define DWCEQOS_DMA_CH0_IS_CDE BIT(13)
168 #define DWCEQOS_DMA_CH0_IS_AIS BIT(14)
170 #define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16)
171 #define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16)
172 #define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17)
174 #define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19)
175 #define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19)
176 #define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20)
178 /* DMA descriptor bits for RX normal descriptor (read format) */
179 #define DWCEQOS_DMA_RDES3_OWN BIT(31)
180 #define DWCEQOS_DMA_RDES3_INTE BIT(30)
181 #define DWCEQOS_DMA_RDES3_BUF2V BIT(25)
182 #define DWCEQOS_DMA_RDES3_BUF1V BIT(24)
184 /* DMA descriptor bits for RX normal descriptor (write back format) */
185 #define DWCEQOS_DMA_RDES1_IPCE BIT(7)
186 #define DWCEQOS_DMA_RDES3_ES BIT(15)
187 #define DWCEQOS_DMA_RDES3_E_JT BIT(14)
188 #define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff)
189 #define DWCEQOS_DMA_RDES1_PT 0x00000007
190 #define DWCEQOS_DMA_RDES1_PT_UDP BIT(0)
191 #define DWCEQOS_DMA_RDES1_PT_TCP BIT(1)
192 #define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
194 /* DMA descriptor bits for TX normal descriptor (read format) */
195 #define DWCEQOS_DMA_TDES2_IOC BIT(31)
196 #define DWCEQOS_DMA_TDES3_OWN BIT(31)
197 #define DWCEQOS_DMA_TDES3_CTXT BIT(30)
198 #define DWCEQOS_DMA_TDES3_FD BIT(29)
199 #define DWCEQOS_DMA_TDES3_LD BIT(28)
200 #define DWCEQOS_DMA_TDES3_CIPH BIT(16)
201 #define DWCEQOS_DMA_TDES3_CIPP BIT(17)
202 #define DWCEQOS_DMA_TDES3_CA 0x00030000
203 #define DWCEQOS_DMA_TDES3_TSE BIT(18)
204 #define DWCEQOS_DMA_DES3_THL(x) ((x) << 19)
205 #define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16)
207 #define DWCEQOS_DMA_TDES3_TCMSSV BIT(26)
209 /* DMA channel states */
210 #define DMA_TX_CH_STOPPED 0
211 #define DMA_TX_CH_SUSPENDED 6
213 #define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
216 #define REG_DWCEQOS_MTL_OPER 0x0c00
217 #define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c
218 #define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08
219 #define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38
221 #define REG_DWCEQOS_MTL_IS 0x0c20
222 #define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00
223 #define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30
224 #define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34
225 #define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c
227 #define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c
229 #define DWCEQOS_MTL_SCHALG_STRICT 0x00000060
231 #define DWCEQOS_MTL_TXQ_TXQEN BIT(3)
232 #define DWCEQOS_MTL_TXQ_TSF BIT(1)
233 #define DWCEQOS_MTL_TXQ_FTQ BIT(0)
234 #define DWCEQOS_MTL_TXQ_TTC512 0x00000070
236 #define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8)
238 #define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12)
239 #define DWCEQOS_MTL_RXQ_EHFC BIT(7)
240 #define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6)
241 #define DWCEQOS_MTL_RXQ_FEP BIT(4)
242 #define DWCEQOS_MTL_RXQ_FUP BIT(3)
243 #define DWCEQOS_MTL_RXQ_RSF BIT(5)
244 #define DWCEQOS_MTL_RXQ_RTC32 BIT(0)
247 #define REG_DWCEQOS_MAC_CFG 0x0000
248 #define REG_DWCEQOS_MAC_EXT_CFG 0x0004
249 #define REG_DWCEQOS_MAC_PKT_FILT 0x0008
250 #define REG_DWCEQOS_MAC_WD_TO 0x000c
251 #define REG_DWCEQOS_HASTABLE_LO 0x0010
252 #define REG_DWCEQOS_HASTABLE_HI 0x0014
253 #define REG_DWCEQOS_MAC_IS 0x00b0
254 #define REG_DWCEQOS_MAC_IE 0x00b4
255 #define REG_DWCEQOS_MAC_STAT 0x00b8
256 #define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200
257 #define REG_DWCEQOS_MAC_MDIO_DATA 0x0204
258 #define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300
259 #define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304
260 #define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0
261 #define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c
262 #define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120
263 #define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124
264 #define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010
265 #define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014
266 #define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0
267 #define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4
268 #define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8
269 #define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc
270 #define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090
271 #define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070
273 #define DWCEQOS_MAC_CFG_ACS BIT(20)
274 #define DWCEQOS_MAC_CFG_JD BIT(17)
275 #define DWCEQOS_MAC_CFG_JE BIT(16)
276 #define DWCEQOS_MAC_CFG_PS BIT(15)
277 #define DWCEQOS_MAC_CFG_FES BIT(14)
278 #define DWCEQOS_MAC_CFG_DM BIT(13)
279 #define DWCEQOS_MAC_CFG_DO BIT(10)
280 #define DWCEQOS_MAC_CFG_TE BIT(1)
281 #define DWCEQOS_MAC_CFG_IPC BIT(27)
282 #define DWCEQOS_MAC_CFG_RE BIT(0)
284 #define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8))
285 #define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8))
287 #define DWCEQOS_MAC_IS_LPI_INT BIT(5)
288 #define DWCEQOS_MAC_IS_MMC_INT BIT(8)
290 #define DWCEQOS_MAC_RXQ_EN BIT(1)
291 #define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31)
292 #define DWCEQOS_MAC_PKT_FILT_RA BIT(31)
293 #define DWCEQOS_MAC_PKT_FILT_HPF BIT(10)
294 #define DWCEQOS_MAC_PKT_FILT_SAF BIT(9)
295 #define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8)
296 #define DWCEQOS_MAC_PKT_FILT_DBF BIT(5)
297 #define DWCEQOS_MAC_PKT_FILT_PM BIT(4)
298 #define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3)
299 #define DWCEQOS_MAC_PKT_FILT_HMC BIT(2)
300 #define DWCEQOS_MAC_PKT_FILT_HUC BIT(1)
301 #define DWCEQOS_MAC_PKT_FILT_PR BIT(0)
303 #define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8)
304 #define DWCEQOS_MAC_MDIO_ADDR_CR_20 2
305 #define DWCEQOS_MAC_MDIO_ADDR_CR_35 3
306 #define DWCEQOS_MAC_MDIO_ADDR_CR_60 0
307 #define DWCEQOS_MAC_MDIO_ADDR_CR_100 1
308 #define DWCEQOS_MAC_MDIO_ADDR_CR_150 4
309 #define DWCEQOS_MAC_MDIO_ADDR_CR_250 5
310 #define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c
311 #define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2)
312 #define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0)
314 #define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0)
315 #define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1)
316 #define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2)
317 #define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3)
318 #define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8)
319 #define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9)
320 #define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16)
321 #define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17)
322 #define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18)
323 #define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19)
324 #define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20)
325 #define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
327 #define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0))
329 #define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
330 DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
331 DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
333 #define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
335 #define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1)
336 #define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16)
337 #define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
340 #define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
341 #define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
342 #define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2)
343 #define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13)
344 #define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1)
345 #define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0)
347 #define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18)
348 #define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
349 #define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f))
351 #define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
352 (1 + (((feature1) & 0x1fc0000) >> 18))
354 #define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21)
355 #define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16)
357 #define DWCEQOS_DMA_MODE_SWR BIT(0)
359 #define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
361 /* Mac Management Counters */
362 #define REG_DWCEQOS_MMC_CTRL 0x0700
363 #define REG_DWCEQOS_MMC_RXIRQ 0x0704
364 #define REG_DWCEQOS_MMC_TXIRQ 0x0708
365 #define REG_DWCEQOS_MMC_RXIRQMASK 0x070c
366 #define REG_DWCEQOS_MMC_TXIRQMASK 0x0710
368 #define DWCEQOS_MMC_CTRL_CNTRST BIT(0)
369 #define DWCEQOS_MMC_CTRL_RSTONRD BIT(2)
371 #define DWC_MMC_TXLPITRANSCNTR 0x07F0
372 #define DWC_MMC_TXLPIUSCNTR 0x07EC
373 #define DWC_MMC_TXOVERSIZE_G 0x0778
374 #define DWC_MMC_TXVLANPACKETS_G 0x0774
375 #define DWC_MMC_TXPAUSEPACKETS 0x0770
376 #define DWC_MMC_TXEXCESSDEF 0x076C
377 #define DWC_MMC_TXPACKETCOUNT_G 0x0768
378 #define DWC_MMC_TXOCTETCOUNT_G 0x0764
379 #define DWC_MMC_TXCARRIERERROR 0x0760
380 #define DWC_MMC_TXEXCESSCOL 0x075C
381 #define DWC_MMC_TXLATECOL 0x0758
382 #define DWC_MMC_TXDEFERRED 0x0754
383 #define DWC_MMC_TXMULTICOL_G 0x0750
384 #define DWC_MMC_TXSINGLECOL_G 0x074C
385 #define DWC_MMC_TXUNDERFLOWERROR 0x0748
386 #define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744
387 #define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740
388 #define DWC_MMC_TXUNICASTPACKETS_GB 0x073C
389 #define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738
390 #define DWC_MMC_TX512TO1023OCTETS_GB 0x0734
391 #define DWC_MMC_TX256TO511OCTETS_GB 0x0730
392 #define DWC_MMC_TX128TO255OCTETS_GB 0x072C
393 #define DWC_MMC_TX65TO127OCTETS_GB 0x0728
394 #define DWC_MMC_TX64OCTETS_GB 0x0724
395 #define DWC_MMC_TXMULTICASTPACKETS_G 0x0720
396 #define DWC_MMC_TXBROADCASTPACKETS_G 0x071C
397 #define DWC_MMC_TXPACKETCOUNT_GB 0x0718
398 #define DWC_MMC_TXOCTETCOUNT_GB 0x0714
400 #define DWC_MMC_RXLPITRANSCNTR 0x07F8
401 #define DWC_MMC_RXLPIUSCNTR 0x07F4
402 #define DWC_MMC_RXCTRLPACKETS_G 0x07E4
403 #define DWC_MMC_RXRCVERROR 0x07E0
404 #define DWC_MMC_RXWATCHDOG 0x07DC
405 #define DWC_MMC_RXVLANPACKETS_GB 0x07D8
406 #define DWC_MMC_RXFIFOOVERFLOW 0x07D4
407 #define DWC_MMC_RXPAUSEPACKETS 0x07D0
408 #define DWC_MMC_RXOUTOFRANGETYPE 0x07CC
409 #define DWC_MMC_RXLENGTHERROR 0x07C8
410 #define DWC_MMC_RXUNICASTPACKETS_G 0x07C4
411 #define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0
412 #define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC
413 #define DWC_MMC_RX256TO511OCTETS_GB 0x07B8
414 #define DWC_MMC_RX128TO255OCTETS_GB 0x07B4
415 #define DWC_MMC_RX65TO127OCTETS_GB 0x07B0
416 #define DWC_MMC_RX64OCTETS_GB 0x07AC
417 #define DWC_MMC_RXOVERSIZE_G 0x07A8
418 #define DWC_MMC_RXUNDERSIZE_G 0x07A4
419 #define DWC_MMC_RXJABBERERROR 0x07A0
420 #define DWC_MMC_RXRUNTERROR 0x079C
421 #define DWC_MMC_RXALIGNMENTERROR 0x0798
422 #define DWC_MMC_RXCRCERROR 0x0794
423 #define DWC_MMC_RXMULTICASTPACKETS_G 0x0790
424 #define DWC_MMC_RXBROADCASTPACKETS_G 0x078C
425 #define DWC_MMC_RXOCTETCOUNT_G 0x0788
426 #define DWC_MMC_RXOCTETCOUNT_GB 0x0784
427 #define DWC_MMC_RXPACKETCOUNT_GB 0x0780
429 static int debug
= 3;
430 module_param(debug
, int, 0);
431 MODULE_PARM_DESC(debug
, "DWC_eth_qos debug level (0=none,...,16=all)");
433 /* DMA ring descriptor. These are used as support descriptors for the HW DMA */
440 /* DMA hardware descriptor */
441 struct dwceqos_dma_desc
{
446 } ____cacheline_aligned
;
448 struct dwceqos_mmc_counters
{
449 __u64 txlpitranscntr
;
452 __u64 txvlanpackets_g
;
453 __u64 txpausepackets
;
455 __u64 txpacketcount_g
;
456 __u64 txoctetcount_g
;
457 __u64 txcarriererror
;
463 __u64 txunderflowerror
;
464 __u64 txbroadcastpackets_gb
;
465 __u64 txmulticastpackets_gb
;
466 __u64 txunicastpackets_gb
;
467 __u64 tx1024tomaxoctets_gb
;
468 __u64 tx512to1023octets_gb
;
469 __u64 tx256to511octets_gb
;
470 __u64 tx128to255octets_gb
;
471 __u64 tx65to127octets_gb
;
473 __u64 txmulticastpackets_g
;
474 __u64 txbroadcastpackets_g
;
475 __u64 txpacketcount_gb
;
476 __u64 txoctetcount_gb
;
478 __u64 rxlpitranscntr
;
480 __u64 rxctrlpackets_g
;
483 __u64 rxvlanpackets_gb
;
484 __u64 rxfifooverflow
;
485 __u64 rxpausepackets
;
486 __u64 rxoutofrangetype
;
488 __u64 rxunicastpackets_g
;
489 __u64 rx1024tomaxoctets_gb
;
490 __u64 rx512to1023octets_gb
;
491 __u64 rx256to511octets_gb
;
492 __u64 rx128to255octets_gb
;
493 __u64 rx65to127octets_gb
;
499 __u64 rxalignmenterror
;
501 __u64 rxmulticastpackets_g
;
502 __u64 rxbroadcastpackets_g
;
503 __u64 rxoctetcount_g
;
504 __u64 rxoctetcount_gb
;
505 __u64 rxpacketcount_gb
;
508 /* Ethtool statistics */
510 struct dwceqos_stat
{
511 const char stat_name
[ETH_GSTRING_LEN
];
515 #define STAT_ITEM(name, var) \
518 offsetof(struct dwceqos_mmc_counters, var),\
521 static const struct dwceqos_stat dwceqos_ethtool_stats
[] = {
522 STAT_ITEM("tx_bytes", txoctetcount_gb
),
523 STAT_ITEM("tx_packets", txpacketcount_gb
),
524 STAT_ITEM("tx_unicst_packets", txunicastpackets_gb
),
525 STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb
),
526 STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb
),
527 STAT_ITEM("tx_pause_packets", txpausepackets
),
528 STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb
),
529 STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb
),
530 STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb
),
531 STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb
),
532 STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb
),
533 STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb
),
534 STAT_ITEM("tx_underflow_errors", txunderflowerror
),
535 STAT_ITEM("tx_lpi_count", txlpitranscntr
),
537 STAT_ITEM("rx_bytes", rxoctetcount_gb
),
538 STAT_ITEM("rx_packets", rxpacketcount_gb
),
539 STAT_ITEM("rx_unicast_packets", rxunicastpackets_g
),
540 STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g
),
541 STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g
),
542 STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb
),
543 STAT_ITEM("rx_pause_packets", rxpausepackets
),
544 STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb
),
545 STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb
),
546 STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb
),
547 STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb
),
548 STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb
),
549 STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb
),
550 STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow
),
551 STAT_ITEM("rx_oversize_packets", rxoversize_g
),
552 STAT_ITEM("rx_undersize_packets", rxundersize_g
),
553 STAT_ITEM("rx_jabbers", rxjabbererror
),
554 STAT_ITEM("rx_align_errors", rxalignmenterror
),
555 STAT_ITEM("rx_crc_errors", rxcrcerror
),
556 STAT_ITEM("rx_lpi_count", rxlpitranscntr
),
559 /* Configuration of AXI bus parameters.
560 * These values depend on the parameters set on the MAC core as well
561 * as the AXI interconnect.
563 struct dwceqos_bus_cfg
{
564 /* Enable AXI low-power interface. */
566 /* Limit on number of outstanding AXI write requests. */
568 /* Limit on number of outstanding AXI read requests. */
570 /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
572 /* DMA Programmable burst length*/
577 struct dwceqos_flowcontrol
{
586 void __iomem
*baseaddr
;
587 struct clk
*phy_ref_clk
;
588 struct clk
*apb_pclk
;
590 struct device_node
*phy_node
;
591 struct net_device
*ndev
;
592 struct platform_device
*pdev
;
596 struct tasklet_struct tx_bdreclaim_tasklet
;
597 struct workqueue_struct
*txtimeout_handler_wq
;
598 struct work_struct txtimeout_reinit
;
600 phy_interface_t phy_interface
;
601 struct phy_device
*phy_dev
;
602 struct mii_bus
*mii_bus
;
608 struct napi_struct napi
;
610 /* DMA Descriptor Areas */
611 struct ring_desc
*rx_skb
;
612 struct ring_desc
*tx_skb
;
614 struct dwceqos_dma_desc
*tx_descs
;
615 struct dwceqos_dma_desc
*rx_descs
;
617 /* DMA Mapped Descriptor areas*/
618 dma_addr_t tx_descs_addr
;
619 dma_addr_t rx_descs_addr
;
620 dma_addr_t tx_descs_tail_addr
;
621 dma_addr_t rx_descs_tail_addr
;
628 /* Spinlocks for accessing DMA Descriptors */
631 /* Spinlock for register read-modify-writes. */
638 struct dwceqos_bus_cfg bus_cfg
;
639 bool en_tx_lpi_clockgating
;
646 struct dwceqos_mmc_counters mmc_counters
;
647 /* Protect the mmc_counter updates. */
648 spinlock_t stats_lock
;
649 u32 mmc_rx_counters_mask
;
650 u32 mmc_tx_counters_mask
;
652 struct dwceqos_flowcontrol flowcontrol
;
655 static void dwceqos_read_mmc_counters(struct net_local
*lp
, u32 rx_mask
,
658 static void dwceqos_set_umac_addr(struct net_local
*lp
, unsigned char *addr
,
660 static int dwceqos_stop(struct net_device
*ndev
);
661 static int dwceqos_open(struct net_device
*ndev
);
662 static void dwceqos_tx_poll_demand(struct net_local
*lp
);
664 static void dwceqos_set_rx_flowcontrol(struct net_local
*lp
, bool enable
);
665 static void dwceqos_set_tx_flowcontrol(struct net_local
*lp
, bool enable
);
667 static void dwceqos_reset_state(struct net_local
*lp
);
669 #define dwceqos_read(lp, reg) \
670 readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
671 #define dwceqos_write(lp, reg, val) \
672 writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
674 static void dwceqos_reset_state(struct net_local
*lp
)
678 lp
->duplex
= DUPLEX_UNKNOWN
;
679 lp
->flowcontrol
.rx_current
= 0;
680 lp
->flowcontrol
.tx_current
= 0;
685 static void print_descriptor(struct net_local
*lp
, int index
, int tx
)
687 struct dwceqos_dma_desc
*dd
;
690 dd
= (struct dwceqos_dma_desc
*)&lp
->tx_descs
[index
];
692 dd
= (struct dwceqos_dma_desc
*)&lp
->rx_descs
[index
];
694 pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx
? "TX" : "RX",
696 pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd
->des0
, dd
->des1
, dd
->des2
,
700 static void print_status(struct net_local
*lp
)
704 pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp
->tx_free
,
705 lp
->tx_cur
, lp
->tx_next
);
707 print_descriptor(lp
, lp
->rx_cur
, 0);
709 for (desci
= (lp
->tx_cur
- 10) % DWCEQOS_TX_DCNT
, i
= 0;
712 print_descriptor(lp
, desci
, 1);
713 desci
= (desci
+ 1) % DWCEQOS_TX_DCNT
;
716 pr_info("DMA_Debug_Status0: 0x%08x\n",
717 dwceqos_read(lp
, REG_DWCEQOS_DMA_DEBUG_ST0
));
718 pr_info("DMA_CH0_Status: 0x%08x\n",
719 dwceqos_read(lp
, REG_DWCEQOS_DMA_IS
));
720 pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
721 dwceqos_read(lp
, 0x1144));
722 pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
723 dwceqos_read(lp
, 0x1154));
724 pr_info("MTL_Debug_Status: 0x%08x\n",
725 dwceqos_read(lp
, REG_DWCEQOS_MTL_DEBUG_ST
));
726 pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
727 dwceqos_read(lp
, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST
));
728 pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
729 dwceqos_read(lp
, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST
));
730 pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
731 dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_CUR_TXDESC
),
732 dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_CUR_RXDESC
));
735 static void dwceqos_mdio_set_csr(struct net_local
*lp
)
737 int rate
= clk_get_rate(lp
->apb_pclk
);
739 if (rate
<= 20000000)
740 lp
->csr_val
= DWCEQOS_MAC_MDIO_ADDR_CR_20
;
741 else if (rate
<= 35000000)
742 lp
->csr_val
= DWCEQOS_MAC_MDIO_ADDR_CR_35
;
743 else if (rate
<= 60000000)
744 lp
->csr_val
= DWCEQOS_MAC_MDIO_ADDR_CR_60
;
745 else if (rate
<= 100000000)
746 lp
->csr_val
= DWCEQOS_MAC_MDIO_ADDR_CR_100
;
747 else if (rate
<= 150000000)
748 lp
->csr_val
= DWCEQOS_MAC_MDIO_ADDR_CR_150
;
749 else if (rate
<= 250000000)
750 lp
->csr_val
= DWCEQOS_MAC_MDIO_ADDR_CR_250
;
753 /* Simple MDIO functions implementing mii_bus */
754 static int dwceqos_mdio_read(struct mii_bus
*bus
, int mii_id
, int phyreg
)
756 struct net_local
*lp
= bus
->priv
;
761 regval
= DWCEQOS_MDIO_PHYADDR(mii_id
) |
762 DWCEQOS_MDIO_PHYREG(phyreg
) |
763 DWCEQOS_MAC_MDIO_ADDR_CR(lp
->csr_val
) |
764 DWCEQOS_MAC_MDIO_ADDR_GB
|
765 DWCEQOS_MAC_MDIO_ADDR_GOC_READ
;
766 dwceqos_write(lp
, REG_DWCEQOS_MAC_MDIO_ADDR
, regval
);
768 for (i
= 0; i
< 5; ++i
) {
769 usleep_range(64, 128);
770 if (!(dwceqos_read(lp
, REG_DWCEQOS_MAC_MDIO_ADDR
) &
771 DWCEQOS_MAC_MDIO_ADDR_GB
))
775 data
= dwceqos_read(lp
, REG_DWCEQOS_MAC_MDIO_DATA
);
777 netdev_warn(lp
->ndev
, "MDIO read timed out\n");
781 return data
& 0xffff;
784 static int dwceqos_mdio_write(struct mii_bus
*bus
, int mii_id
, int phyreg
,
787 struct net_local
*lp
= bus
->priv
;
791 dwceqos_write(lp
, REG_DWCEQOS_MAC_MDIO_DATA
, value
);
793 regval
= DWCEQOS_MDIO_PHYADDR(mii_id
) |
794 DWCEQOS_MDIO_PHYREG(phyreg
) |
795 DWCEQOS_MAC_MDIO_ADDR_CR(lp
->csr_val
) |
796 DWCEQOS_MAC_MDIO_ADDR_GB
|
797 DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE
;
798 dwceqos_write(lp
, REG_DWCEQOS_MAC_MDIO_ADDR
, regval
);
800 for (i
= 0; i
< 5; ++i
) {
801 usleep_range(64, 128);
802 if (!(dwceqos_read(lp
, REG_DWCEQOS_MAC_MDIO_ADDR
) &
803 DWCEQOS_MAC_MDIO_ADDR_GB
))
807 netdev_warn(lp
->ndev
, "MDIO write timed out\n");
811 static int dwceqos_ioctl(struct net_device
*ndev
, struct ifreq
*rq
, int cmd
)
813 struct net_local
*lp
= netdev_priv(ndev
);
814 struct phy_device
*phydev
= lp
->phy_dev
;
816 if (!netif_running(ndev
))
826 return phy_mii_ioctl(phydev
, rq
, cmd
);
828 dev_info(&lp
->pdev
->dev
, "ioctl %X not implemented.\n", cmd
);
833 static void dwceqos_link_down(struct net_local
*lp
)
838 /* Indicate link down to the LPI state machine */
839 spin_lock_irqsave(&lp
->hw_lock
, flags
);
840 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
841 regval
&= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS
;
842 dwceqos_write(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
, regval
);
843 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
846 static void dwceqos_link_up(struct net_local
*lp
)
851 /* Indicate link up to the LPI state machine */
852 spin_lock_irqsave(&lp
->hw_lock
, flags
);
853 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
854 regval
|= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS
;
855 dwceqos_write(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
, regval
);
856 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
858 lp
->eee_active
= !phy_init_eee(lp
->phy_dev
, 0);
860 /* Check for changed EEE capability */
861 if (!lp
->eee_active
&& lp
->eee_enabled
) {
864 spin_lock_irqsave(&lp
->hw_lock
, flags
);
865 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
866 regval
&= ~DWCEQOS_LPI_CTRL_ENABLE_EEE
;
867 dwceqos_write(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
, regval
);
868 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
872 static void dwceqos_set_speed(struct net_local
*lp
)
874 struct phy_device
*phydev
= lp
->phy_dev
;
877 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_CFG
);
878 regval
&= ~(DWCEQOS_MAC_CFG_PS
| DWCEQOS_MAC_CFG_FES
|
882 regval
|= DWCEQOS_MAC_CFG_DM
;
883 if (phydev
->speed
== SPEED_10
) {
884 regval
|= DWCEQOS_MAC_CFG_PS
;
885 } else if (phydev
->speed
== SPEED_100
) {
886 regval
|= DWCEQOS_MAC_CFG_PS
|
888 } else if (phydev
->speed
!= SPEED_1000
) {
890 "unknown PHY speed %d\n",
895 dwceqos_write(lp
, REG_DWCEQOS_MAC_CFG
, regval
);
898 static void dwceqos_adjust_link(struct net_device
*ndev
)
900 struct net_local
*lp
= netdev_priv(ndev
);
901 struct phy_device
*phydev
= lp
->phy_dev
;
902 int status_change
= 0;
905 if ((lp
->speed
!= phydev
->speed
) ||
906 (lp
->duplex
!= phydev
->duplex
)) {
907 dwceqos_set_speed(lp
);
909 lp
->speed
= phydev
->speed
;
910 lp
->duplex
= phydev
->duplex
;
914 if (lp
->flowcontrol
.autoneg
) {
915 lp
->flowcontrol
.rx
= phydev
->pause
||
917 lp
->flowcontrol
.tx
= phydev
->pause
||
921 if (lp
->flowcontrol
.rx
!= lp
->flowcontrol
.rx_current
) {
922 if (netif_msg_link(lp
))
923 netdev_dbg(ndev
, "set rx flow to %d\n",
925 dwceqos_set_rx_flowcontrol(lp
, lp
->flowcontrol
.rx
);
926 lp
->flowcontrol
.rx_current
= lp
->flowcontrol
.rx
;
928 if (lp
->flowcontrol
.tx
!= lp
->flowcontrol
.tx_current
) {
929 if (netif_msg_link(lp
))
930 netdev_dbg(ndev
, "set tx flow to %d\n",
932 dwceqos_set_tx_flowcontrol(lp
, lp
->flowcontrol
.tx
);
933 lp
->flowcontrol
.tx_current
= lp
->flowcontrol
.tx
;
937 if (phydev
->link
!= lp
->link
) {
938 lp
->link
= phydev
->link
;
944 lp
->ndev
->trans_start
= jiffies
;
947 dwceqos_link_down(lp
);
949 phy_print_status(phydev
);
953 static int dwceqos_mii_probe(struct net_device
*ndev
)
955 struct net_local
*lp
= netdev_priv(ndev
);
956 struct phy_device
*phydev
= NULL
;
959 phydev
= of_phy_connect(lp
->ndev
,
961 &dwceqos_adjust_link
,
966 netdev_err(ndev
, "no PHY found\n");
970 netdev_err(ndev
, "no PHY configured\n");
974 if (netif_msg_probe(lp
))
975 phy_attached_info(phydev
);
977 phydev
->supported
&= PHY_GBIT_FEATURES
;
981 lp
->duplex
= DUPLEX_UNKNOWN
;
982 lp
->phy_dev
= phydev
;
987 static void dwceqos_alloc_rxring_desc(struct net_local
*lp
, int index
)
989 struct sk_buff
*new_skb
;
990 dma_addr_t new_skb_baddr
= 0;
992 new_skb
= netdev_alloc_skb(lp
->ndev
, DWCEQOS_RX_BUF_SIZE
);
994 netdev_err(lp
->ndev
, "alloc_skb error for desc %d\n", index
);
998 new_skb_baddr
= dma_map_single(lp
->ndev
->dev
.parent
,
999 new_skb
->data
, DWCEQOS_RX_BUF_SIZE
,
1001 if (dma_mapping_error(lp
->ndev
->dev
.parent
, new_skb_baddr
)) {
1002 netdev_err(lp
->ndev
, "DMA map error\n");
1003 dev_kfree_skb(new_skb
);
1008 lp
->rx_descs
[index
].des0
= new_skb_baddr
;
1009 lp
->rx_descs
[index
].des1
= 0;
1010 lp
->rx_descs
[index
].des2
= 0;
1011 lp
->rx_descs
[index
].des3
= DWCEQOS_DMA_RDES3_INTE
|
1012 DWCEQOS_DMA_RDES3_BUF1V
|
1013 DWCEQOS_DMA_RDES3_OWN
;
1015 lp
->rx_skb
[index
].mapping
= new_skb_baddr
;
1016 lp
->rx_skb
[index
].len
= DWCEQOS_RX_BUF_SIZE
;
1019 lp
->rx_skb
[index
].skb
= new_skb
;
1022 static void dwceqos_clean_rings(struct net_local
*lp
)
1027 for (i
= 0; i
< DWCEQOS_RX_DCNT
; i
++) {
1028 if (lp
->rx_skb
[i
].skb
) {
1029 dma_unmap_single(lp
->ndev
->dev
.parent
,
1030 lp
->rx_skb
[i
].mapping
,
1034 dev_kfree_skb(lp
->rx_skb
[i
].skb
);
1035 lp
->rx_skb
[i
].skb
= NULL
;
1036 lp
->rx_skb
[i
].mapping
= 0;
1042 for (i
= 0; i
< DWCEQOS_TX_DCNT
; i
++) {
1043 if (lp
->tx_skb
[i
].skb
) {
1044 dev_kfree_skb(lp
->tx_skb
[i
].skb
);
1045 lp
->tx_skb
[i
].skb
= NULL
;
1047 if (lp
->tx_skb
[i
].mapping
) {
1048 dma_unmap_single(lp
->ndev
->dev
.parent
,
1049 lp
->tx_skb
[i
].mapping
,
1052 lp
->tx_skb
[i
].mapping
= 0;
1058 static void dwceqos_descriptor_free(struct net_local
*lp
)
1062 dwceqos_clean_rings(lp
);
1069 size
= DWCEQOS_RX_DCNT
* sizeof(struct dwceqos_dma_desc
);
1071 dma_free_coherent(lp
->ndev
->dev
.parent
, size
,
1072 (void *)(lp
->rx_descs
), lp
->rx_descs_addr
);
1073 lp
->rx_descs
= NULL
;
1076 size
= DWCEQOS_TX_DCNT
* sizeof(struct dwceqos_dma_desc
);
1078 dma_free_coherent(lp
->ndev
->dev
.parent
, size
,
1079 (void *)(lp
->tx_descs
), lp
->tx_descs_addr
);
1080 lp
->tx_descs
= NULL
;
1084 static int dwceqos_descriptor_init(struct net_local
*lp
)
1093 lp
->rx_descs
= NULL
;
1094 lp
->tx_descs
= NULL
;
1096 /* Reset the DMA indexes */
1100 lp
->tx_free
= DWCEQOS_TX_DCNT
;
1102 /* Allocate Ring descriptors */
1103 size
= DWCEQOS_RX_DCNT
* sizeof(struct ring_desc
);
1104 lp
->rx_skb
= kzalloc(size
, GFP_KERNEL
);
1108 size
= DWCEQOS_TX_DCNT
* sizeof(struct ring_desc
);
1109 lp
->tx_skb
= kzalloc(size
, GFP_KERNEL
);
1113 /* Allocate DMA descriptors */
1114 size
= DWCEQOS_RX_DCNT
* sizeof(struct dwceqos_dma_desc
);
1115 lp
->rx_descs
= dma_alloc_coherent(lp
->ndev
->dev
.parent
, size
,
1116 &lp
->rx_descs_addr
, 0);
1119 lp
->rx_descs_tail_addr
= lp
->rx_descs_addr
+
1120 sizeof(struct dwceqos_dma_desc
) * DWCEQOS_RX_DCNT
;
1122 size
= DWCEQOS_TX_DCNT
* sizeof(struct dwceqos_dma_desc
);
1123 lp
->tx_descs
= dma_alloc_coherent(lp
->ndev
->dev
.parent
, size
,
1124 &lp
->tx_descs_addr
, 0);
1127 lp
->tx_descs_tail_addr
= lp
->tx_descs_addr
+
1128 sizeof(struct dwceqos_dma_desc
) * DWCEQOS_TX_DCNT
;
1130 /* Initialize RX Ring Descriptors and buffers */
1131 for (i
= 0; i
< DWCEQOS_RX_DCNT
; ++i
) {
1132 dwceqos_alloc_rxring_desc(lp
, i
);
1133 if (!(lp
->rx_skb
[lp
->rx_cur
].skb
))
1137 /* Initialize TX Descriptors */
1138 for (i
= 0; i
< DWCEQOS_TX_DCNT
; ++i
) {
1139 lp
->tx_descs
[i
].des0
= 0;
1140 lp
->tx_descs
[i
].des1
= 0;
1141 lp
->tx_descs
[i
].des2
= 0;
1142 lp
->tx_descs
[i
].des3
= 0;
1145 /* Make descriptor writes visible to the DMA. */
1151 dwceqos_descriptor_free(lp
);
1155 static int dwceqos_packet_avail(struct net_local
*lp
)
1157 return !(lp
->rx_descs
[lp
->rx_cur
].des3
& DWCEQOS_DMA_RDES3_OWN
);
1160 static void dwceqos_get_hwfeatures(struct net_local
*lp
)
1162 lp
->feature0
= dwceqos_read(lp
, REG_DWCEQOS_MAC_HW_FEATURE0
);
1163 lp
->feature1
= dwceqos_read(lp
, REG_DWCEQOS_MAC_HW_FEATURE1
);
1164 lp
->feature2
= dwceqos_read(lp
, REG_DWCEQOS_MAC_HW_FEATURE2
);
1167 static void dwceqos_dma_enable_txirq(struct net_local
*lp
)
1170 unsigned long flags
;
1172 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1173 regval
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_IE
);
1174 regval
|= DWCEQOS_DMA_CH0_IE_TIE
;
1175 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_IE
, regval
);
1176 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1179 static void dwceqos_dma_disable_txirq(struct net_local
*lp
)
1182 unsigned long flags
;
1184 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1185 regval
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_IE
);
1186 regval
&= ~DWCEQOS_DMA_CH0_IE_TIE
;
1187 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_IE
, regval
);
1188 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1191 static void dwceqos_dma_enable_rxirq(struct net_local
*lp
)
1194 unsigned long flags
;
1196 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1197 regval
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_IE
);
1198 regval
|= DWCEQOS_DMA_CH0_IE_RIE
;
1199 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_IE
, regval
);
1200 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1203 static void dwceqos_dma_disable_rxirq(struct net_local
*lp
)
1206 unsigned long flags
;
1208 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1209 regval
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_IE
);
1210 regval
&= ~DWCEQOS_DMA_CH0_IE_RIE
;
1211 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_IE
, regval
);
1212 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1215 static void dwceqos_enable_mmc_interrupt(struct net_local
*lp
)
1217 dwceqos_write(lp
, REG_DWCEQOS_MMC_RXIRQMASK
, 0);
1218 dwceqos_write(lp
, REG_DWCEQOS_MMC_TXIRQMASK
, 0);
1221 static int dwceqos_mii_init(struct net_local
*lp
)
1224 struct resource res
;
1225 struct device_node
*mdionode
;
1227 mdionode
= of_get_child_by_name(lp
->pdev
->dev
.of_node
, "mdio");
1232 lp
->mii_bus
= mdiobus_alloc();
1238 lp
->mii_bus
->name
= "DWCEQOS MII bus";
1239 lp
->mii_bus
->read
= &dwceqos_mdio_read
;
1240 lp
->mii_bus
->write
= &dwceqos_mdio_write
;
1241 lp
->mii_bus
->priv
= lp
;
1242 lp
->mii_bus
->parent
= &lp
->ndev
->dev
;
1244 of_address_to_resource(lp
->pdev
->dev
.of_node
, 0, &res
);
1245 snprintf(lp
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%.8llx",
1246 (unsigned long long)res
.start
);
1247 if (of_mdiobus_register(lp
->mii_bus
, mdionode
))
1248 goto err_out_free_mdiobus
;
1252 err_out_free_mdiobus
:
1253 mdiobus_free(lp
->mii_bus
);
1255 of_node_put(mdionode
);
1259 /* DMA reset. When issued also resets all MTL and MAC registers as well */
1260 static void dwceqos_reset_hw(struct net_local
*lp
)
1262 /* Wait (at most) 0.5 seconds for DMA reset*/
1266 /* Force gigabit to guarantee a TX clock for GMII. */
1267 reg
= dwceqos_read(lp
, REG_DWCEQOS_MAC_CFG
);
1268 reg
&= ~(DWCEQOS_MAC_CFG_PS
| DWCEQOS_MAC_CFG_FES
);
1269 reg
|= DWCEQOS_MAC_CFG_DM
;
1270 dwceqos_write(lp
, REG_DWCEQOS_MAC_CFG
, reg
);
1272 dwceqos_write(lp
, REG_DWCEQOS_DMA_MODE
, DWCEQOS_DMA_MODE_SWR
);
1277 reg
= dwceqos_read(lp
, REG_DWCEQOS_DMA_MODE
);
1278 } while ((reg
& DWCEQOS_DMA_MODE_SWR
) && i
);
1279 /* We might experience a timeout if the chip clock mux is broken */
1281 netdev_err(lp
->ndev
, "DMA reset timed out!\n");
1284 static void dwceqos_fatal_bus_error(struct net_local
*lp
, u32 dma_status
)
1286 if (dma_status
& DWCEQOS_DMA_CH0_IS_TEB
) {
1287 netdev_err(lp
->ndev
, "txdma bus error %s %s (status=%08x)\n",
1288 dma_status
& DWCEQOS_DMA_CH0_IS_TX_ERR_READ
?
1290 dma_status
& DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR
?
1296 if (dma_status
& DWCEQOS_DMA_CH0_IS_REB
) {
1297 netdev_err(lp
->ndev
, "rxdma bus error %s %s (status=%08x)\n",
1298 dma_status
& DWCEQOS_DMA_CH0_IS_RX_ERR_READ
?
1300 dma_status
& DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR
?
1308 static void dwceqos_mmc_interrupt(struct net_local
*lp
)
1310 unsigned long flags
;
1312 spin_lock_irqsave(&lp
->stats_lock
, flags
);
1314 /* A latched mmc interrupt can not be masked, we must read
1315 * all the counters with an interrupt pending.
1317 dwceqos_read_mmc_counters(lp
,
1318 dwceqos_read(lp
, REG_DWCEQOS_MMC_RXIRQ
),
1319 dwceqos_read(lp
, REG_DWCEQOS_MMC_TXIRQ
));
1321 spin_unlock_irqrestore(&lp
->stats_lock
, flags
);
1324 static void dwceqos_mac_interrupt(struct net_local
*lp
)
1328 cause
= dwceqos_read(lp
, REG_DWCEQOS_MAC_IS
);
1330 if (cause
& DWCEQOS_MAC_IS_MMC_INT
)
1331 dwceqos_mmc_interrupt(lp
);
1334 static irqreturn_t
dwceqos_interrupt(int irq
, void *dev_id
)
1336 struct net_device
*ndev
= dev_id
;
1337 struct net_local
*lp
= netdev_priv(ndev
);
1341 irqreturn_t ret
= IRQ_NONE
;
1343 cause
= dwceqos_read(lp
, REG_DWCEQOS_DMA_IS
);
1344 /* DMA Channel 0 Interrupt */
1345 if (cause
& DWCEQOS_DMA_IS_DC0IS
) {
1346 dma_status
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_STA
);
1348 /* Transmit Interrupt */
1349 if (dma_status
& DWCEQOS_DMA_CH0_IS_TI
) {
1350 tasklet_schedule(&lp
->tx_bdreclaim_tasklet
);
1351 dwceqos_dma_disable_txirq(lp
);
1354 /* Receive Interrupt */
1355 if (dma_status
& DWCEQOS_DMA_CH0_IS_RI
) {
1356 /* Disable RX IRQs */
1357 dwceqos_dma_disable_rxirq(lp
);
1358 napi_schedule(&lp
->napi
);
1361 /* Fatal Bus Error interrupt */
1362 if (unlikely(dma_status
& DWCEQOS_DMA_CH0_IS_FBE
)) {
1363 dwceqos_fatal_bus_error(lp
, dma_status
);
1365 /* errata 9000831707 */
1366 dma_status
|= DWCEQOS_DMA_CH0_IS_TEB
|
1367 DWCEQOS_DMA_CH0_IS_REB
;
1370 /* Ack all DMA Channel 0 IRQs */
1371 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_STA
, dma_status
);
1375 if (cause
& DWCEQOS_DMA_IS_MTLIS
) {
1376 u32 val
= dwceqos_read(lp
, REG_DWCEQOS_MTL_Q0_ISCTRL
);
1378 dwceqos_write(lp
, REG_DWCEQOS_MTL_Q0_ISCTRL
, val
);
1382 if (cause
& DWCEQOS_DMA_IS_MACIS
) {
1383 dwceqos_mac_interrupt(lp
);
1389 static void dwceqos_set_rx_flowcontrol(struct net_local
*lp
, bool enable
)
1392 unsigned long flags
;
1394 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1396 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_RX_FLOW_CTRL
);
1398 regval
|= DWCEQOS_MAC_RX_FLOW_CTRL_RFE
;
1400 regval
&= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE
;
1401 dwceqos_write(lp
, REG_DWCEQOS_MAC_RX_FLOW_CTRL
, regval
);
1403 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1406 static void dwceqos_set_tx_flowcontrol(struct net_local
*lp
, bool enable
)
1409 unsigned long flags
;
1411 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1413 /* MTL flow control */
1414 regval
= dwceqos_read(lp
, REG_DWCEQOS_MTL_RXQ0_OPER
);
1416 regval
|= DWCEQOS_MTL_RXQ_EHFC
;
1418 regval
&= ~DWCEQOS_MTL_RXQ_EHFC
;
1420 dwceqos_write(lp
, REG_DWCEQOS_MTL_RXQ0_OPER
, regval
);
1422 /* MAC flow control */
1423 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_Q0_TX_FLOW
);
1425 regval
|= DWCEQOS_MAC_Q0_TX_FLOW_TFE
;
1427 regval
&= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE
;
1428 dwceqos_write(lp
, REG_DWCEQOS_MAC_Q0_TX_FLOW
, regval
);
1430 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1433 static void dwceqos_configure_flow_control(struct net_local
*lp
)
1436 unsigned long flags
;
1439 spin_lock_irqsave(&lp
->hw_lock
, flags
);
1441 regval
= dwceqos_read(lp
, REG_DWCEQOS_MTL_RXQ0_OPER
);
1443 /* The queue size is in units of 256 bytes. We want 512 bytes units for
1444 * the threshold fields.
1446 RQS
= ((regval
>> 20) & 0x3FF) + 1;
1449 /* The thresholds are relative to a full queue, with a bias
1450 * of 1 KiByte below full.
1455 regval
= (regval
& 0xFFF000FF) | (RFD
<< 14) | (RFA
<< 8);
1457 if (RFD
>= 0 && RFA
>= 0) {
1458 dwceqos_write(lp
, REG_DWCEQOS_MTL_RXQ0_OPER
, regval
);
1460 netdev_warn(lp
->ndev
,
1461 "FIFO too small for flow control.");
1464 regval
= DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
1465 DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS
;
1467 dwceqos_write(lp
, REG_DWCEQOS_MAC_Q0_TX_FLOW
, regval
);
1469 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
1472 static void dwceqos_configure_clock(struct net_local
*lp
)
1474 unsigned long rate_mhz
= clk_get_rate(lp
->apb_pclk
) / 1000000;
1479 REG_DWCEQOS_MAC_1US_TIC_COUNTER
,
1480 DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz
- 1));
1483 static void dwceqos_configure_bus(struct net_local
*lp
)
1487 /* N.B. We do not support the Fixed Burst mode because it
1488 * opens a race window by making HW access to DMA descriptors
1492 sysbus_reg
= DWCEQOS_DMA_SYSBUS_MODE_AAL
;
1494 if (lp
->bus_cfg
.en_lpi
)
1495 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI
;
1497 if (lp
->bus_cfg
.burst_map
)
1498 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_BURST(
1499 lp
->bus_cfg
.burst_map
);
1501 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_BURST(
1502 DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT
);
1504 if (lp
->bus_cfg
.read_requests
)
1505 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
1506 lp
->bus_cfg
.read_requests
- 1);
1508 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
1509 DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT
);
1511 if (lp
->bus_cfg
.write_requests
)
1512 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
1513 lp
->bus_cfg
.write_requests
- 1);
1515 sysbus_reg
|= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
1516 DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT
);
1518 if (netif_msg_hw(lp
))
1519 netdev_dbg(lp
->ndev
, "SysbusMode %#X\n", sysbus_reg
);
1521 dwceqos_write(lp
, REG_DWCEQOS_DMA_SYSBUS_MODE
, sysbus_reg
);
1524 static void dwceqos_init_hw(struct net_local
*lp
)
1530 /* Software reset */
1531 dwceqos_reset_hw(lp
);
1533 dwceqos_configure_bus(lp
);
1535 /* Probe data bus width, 32/64/128 bits. */
1536 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL
, 0xF);
1537 regval
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL
);
1538 buswidth
= (regval
^ 0xF) + 1;
1540 /* Cache-align dma descriptors. */
1541 dma_skip
= (sizeof(struct dwceqos_dma_desc
) - 16) / buswidth
;
1542 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_CTRL
,
1543 DWCEQOS_DMA_CH_CTRL_DSL(dma_skip
) |
1544 DWCEQOS_DMA_CH_CTRL_PBLX8
);
1546 /* Initialize DMA Channel 0 */
1547 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TXDESC_LEN
, DWCEQOS_TX_DCNT
- 1);
1548 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_RXDESC_LEN
, DWCEQOS_RX_DCNT
- 1);
1549 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TXDESC_LIST
,
1550 (u32
)lp
->tx_descs_addr
);
1551 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_RXDESC_LIST
,
1552 (u32
)lp
->rx_descs_addr
);
1554 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL
,
1555 lp
->tx_descs_tail_addr
);
1556 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL
,
1557 lp
->rx_descs_tail_addr
);
1559 if (lp
->bus_cfg
.tx_pbl
)
1560 regval
= DWCEQOS_DMA_CH_CTRL_PBL(lp
->bus_cfg
.tx_pbl
);
1562 regval
= DWCEQOS_DMA_CH_CTRL_PBL(2);
1564 /* Enable TSO if the HW support it */
1565 if (lp
->feature1
& DWCEQOS_MAC_HW_FEATURE1_TSOEN
)
1566 regval
|= DWCEQOS_DMA_CH_TX_TSE
;
1568 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TX_CTRL
, regval
);
1570 if (lp
->bus_cfg
.rx_pbl
)
1571 regval
= DWCEQOS_DMA_CH_CTRL_PBL(lp
->bus_cfg
.rx_pbl
);
1573 regval
= DWCEQOS_DMA_CH_CTRL_PBL(2);
1575 regval
|= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE
);
1576 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_RX_CTRL
, regval
);
1578 regval
|= DWCEQOS_DMA_CH_CTRL_START
;
1579 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_RX_CTRL
, regval
);
1581 /* Initialize MTL Queues */
1582 regval
= DWCEQOS_MTL_SCHALG_STRICT
;
1583 dwceqos_write(lp
, REG_DWCEQOS_MTL_OPER
, regval
);
1585 regval
= DWCEQOS_MTL_TXQ_SIZE(
1586 DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp
->feature1
)) |
1587 DWCEQOS_MTL_TXQ_TXQEN
| DWCEQOS_MTL_TXQ_TSF
|
1588 DWCEQOS_MTL_TXQ_TTC512
;
1589 dwceqos_write(lp
, REG_DWCEQOS_MTL_TXQ0_OPER
, regval
);
1591 regval
= DWCEQOS_MTL_RXQ_SIZE(
1592 DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp
->feature1
)) |
1593 DWCEQOS_MTL_RXQ_FUP
| DWCEQOS_MTL_RXQ_FEP
| DWCEQOS_MTL_RXQ_RSF
;
1594 dwceqos_write(lp
, REG_DWCEQOS_MTL_RXQ0_OPER
, regval
);
1596 dwceqos_configure_flow_control(lp
);
1598 /* Initialize MAC */
1599 dwceqos_set_umac_addr(lp
, lp
->ndev
->dev_addr
, 0);
1601 lp
->eee_enabled
= 0;
1603 dwceqos_configure_clock(lp
);
1607 /* probe implemented counters */
1608 dwceqos_write(lp
, REG_DWCEQOS_MMC_RXIRQMASK
, ~0u);
1609 dwceqos_write(lp
, REG_DWCEQOS_MMC_TXIRQMASK
, ~0u);
1610 lp
->mmc_rx_counters_mask
= dwceqos_read(lp
, REG_DWCEQOS_MMC_RXIRQMASK
);
1611 lp
->mmc_tx_counters_mask
= dwceqos_read(lp
, REG_DWCEQOS_MMC_TXIRQMASK
);
1613 dwceqos_write(lp
, REG_DWCEQOS_MMC_CTRL
, DWCEQOS_MMC_CTRL_CNTRST
|
1614 DWCEQOS_MMC_CTRL_RSTONRD
);
1615 dwceqos_enable_mmc_interrupt(lp
);
1617 /* Enable Interrupts */
1618 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_IE
,
1619 DWCEQOS_DMA_CH0_IE_NIE
|
1620 DWCEQOS_DMA_CH0_IE_RIE
| DWCEQOS_DMA_CH0_IE_TIE
|
1621 DWCEQOS_DMA_CH0_IE_AIE
|
1622 DWCEQOS_DMA_CH0_IE_FBEE
);
1624 dwceqos_write(lp
, REG_DWCEQOS_MAC_IE
, 0);
1626 dwceqos_write(lp
, REG_DWCEQOS_MAC_CFG
, DWCEQOS_MAC_CFG_IPC
|
1627 DWCEQOS_MAC_CFG_DM
| DWCEQOS_MAC_CFG_TE
| DWCEQOS_MAC_CFG_RE
);
1630 regval
= dwceqos_read(lp
, REG_DWCEQOS_DMA_CH0_TX_CTRL
);
1631 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TX_CTRL
,
1632 regval
| DWCEQOS_DMA_CH_CTRL_START
);
1634 /* Enable MAC TX/RX */
1635 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_CFG
);
1636 dwceqos_write(lp
, REG_DWCEQOS_MAC_CFG
,
1637 regval
| DWCEQOS_MAC_CFG_TE
| DWCEQOS_MAC_CFG_RE
);
1640 static void dwceqos_tx_reclaim(unsigned long data
)
1642 struct net_device
*ndev
= (struct net_device
*)data
;
1643 struct net_local
*lp
= netdev_priv(ndev
);
1644 unsigned int tx_bytes
= 0;
1645 unsigned int tx_packets
= 0;
1647 spin_lock(&lp
->tx_lock
);
1649 while (lp
->tx_free
< DWCEQOS_TX_DCNT
) {
1650 struct dwceqos_dma_desc
*dd
= &lp
->tx_descs
[lp
->tx_cur
];
1651 struct ring_desc
*rd
= &lp
->tx_skb
[lp
->tx_cur
];
1653 /* Descriptor still being held by DMA ? */
1654 if (dd
->des3
& DWCEQOS_DMA_TDES3_OWN
)
1658 dma_unmap_single(ndev
->dev
.parent
, rd
->mapping
, rd
->len
,
1661 if (unlikely(rd
->skb
)) {
1663 tx_bytes
+= rd
->skb
->len
;
1664 dev_consume_skb_any(rd
->skb
);
1670 lp
->tx_cur
= (lp
->tx_cur
+ 1) % DWCEQOS_TX_DCNT
;
1672 if ((dd
->des3
& DWCEQOS_DMA_TDES3_LD
) &&
1673 (dd
->des3
& DWCEQOS_DMA_RDES3_ES
)) {
1674 if (netif_msg_tx_err(lp
))
1675 netdev_err(ndev
, "TX Error, TDES3 = 0x%x\n",
1677 if (netif_msg_hw(lp
))
1681 spin_unlock(&lp
->tx_lock
);
1683 netdev_completed_queue(ndev
, tx_packets
, tx_bytes
);
1685 dwceqos_dma_enable_txirq(lp
);
1686 netif_wake_queue(ndev
);
1689 static int dwceqos_rx(struct net_local
*lp
, int budget
)
1691 struct sk_buff
*skb
;
1693 unsigned int n_packets
= 0;
1694 unsigned int n_descs
= 0;
1697 struct dwceqos_dma_desc
*dd
;
1698 struct sk_buff
*new_skb
;
1699 dma_addr_t new_skb_baddr
= 0;
1701 while (n_descs
< budget
) {
1702 if (!dwceqos_packet_avail(lp
))
1705 new_skb
= netdev_alloc_skb(lp
->ndev
, DWCEQOS_RX_BUF_SIZE
);
1707 netdev_err(lp
->ndev
, "no memory for new sk_buff\n");
1711 /* Get dma handle of skb->data */
1712 new_skb_baddr
= (u32
)dma_map_single(lp
->ndev
->dev
.parent
,
1714 DWCEQOS_RX_BUF_SIZE
,
1716 if (dma_mapping_error(lp
->ndev
->dev
.parent
, new_skb_baddr
)) {
1717 netdev_err(lp
->ndev
, "DMA map error\n");
1718 dev_kfree_skb(new_skb
);
1722 /* Read descriptor data after reading owner bit. */
1725 dd
= &lp
->rx_descs
[lp
->rx_cur
];
1726 len
= DWCEQOS_DMA_RDES3_PL(dd
->des3
);
1727 skb
= lp
->rx_skb
[lp
->rx_cur
].skb
;
1729 /* Unmap old buffer */
1730 dma_unmap_single(lp
->ndev
->dev
.parent
,
1731 lp
->rx_skb
[lp
->rx_cur
].mapping
,
1732 lp
->rx_skb
[lp
->rx_cur
].len
, DMA_FROM_DEVICE
);
1734 /* Discard packet on reception error or bad checksum */
1735 if ((dd
->des3
& DWCEQOS_DMA_RDES3_ES
) ||
1736 (dd
->des1
& DWCEQOS_DMA_RDES1_IPCE
)) {
1741 skb
->protocol
= eth_type_trans(skb
, lp
->ndev
);
1742 switch (dd
->des1
& DWCEQOS_DMA_RDES1_PT
) {
1743 case DWCEQOS_DMA_RDES1_PT_UDP
:
1744 case DWCEQOS_DMA_RDES1_PT_TCP
:
1745 case DWCEQOS_DMA_RDES1_PT_ICMP
:
1746 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1749 skb
->ip_summed
= CHECKSUM_NONE
;
1754 if (unlikely(!skb
)) {
1755 if (netif_msg_rx_err(lp
))
1756 netdev_dbg(lp
->ndev
, "rx error: des3=%X\n",
1757 lp
->rx_descs
[lp
->rx_cur
].des3
);
1759 tot_size
+= skb
->len
;
1762 netif_receive_skb(skb
);
1765 lp
->rx_descs
[lp
->rx_cur
].des0
= new_skb_baddr
;
1766 lp
->rx_descs
[lp
->rx_cur
].des1
= 0;
1767 lp
->rx_descs
[lp
->rx_cur
].des2
= 0;
1768 /* The DMA must observe des0/1/2 written before des3. */
1770 lp
->rx_descs
[lp
->rx_cur
].des3
= DWCEQOS_DMA_RDES3_INTE
|
1771 DWCEQOS_DMA_RDES3_OWN
|
1772 DWCEQOS_DMA_RDES3_BUF1V
;
1774 lp
->rx_skb
[lp
->rx_cur
].mapping
= new_skb_baddr
;
1775 lp
->rx_skb
[lp
->rx_cur
].len
= DWCEQOS_RX_BUF_SIZE
;
1776 lp
->rx_skb
[lp
->rx_cur
].skb
= new_skb
;
1779 lp
->rx_cur
= (lp
->rx_cur
+ 1) % DWCEQOS_RX_DCNT
;
1782 /* Make sure any ownership update is written to the descriptors before
1787 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_STA
, DWCEQOS_DMA_CH0_IS_RI
);
1788 /* Wake up RX by writing tail pointer */
1789 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL
,
1790 lp
->rx_descs_tail_addr
);
1795 static int dwceqos_rx_poll(struct napi_struct
*napi
, int budget
)
1797 struct net_local
*lp
= container_of(napi
, struct net_local
, napi
);
1800 work_done
= dwceqos_rx(lp
, budget
- work_done
);
1802 if (!dwceqos_packet_avail(lp
) && work_done
< budget
) {
1803 napi_complete(napi
);
1804 dwceqos_dma_enable_rxirq(lp
);
1812 /* Reinitialize function if a TX timed out */
1813 static void dwceqos_reinit_for_txtimeout(struct work_struct
*data
)
1815 struct net_local
*lp
= container_of(data
, struct net_local
,
1818 netdev_err(lp
->ndev
, "transmit timeout %d s, resetting...\n",
1819 DWCEQOS_TX_TIMEOUT
);
1821 if (netif_msg_hw(lp
))
1825 dwceqos_stop(lp
->ndev
);
1826 dwceqos_open(lp
->ndev
);
1830 /* DT Probing function called by main probe */
1831 static inline int dwceqos_probe_config_dt(struct platform_device
*pdev
)
1833 struct net_device
*ndev
;
1834 struct net_local
*lp
;
1835 const void *mac_address
;
1836 struct dwceqos_bus_cfg
*bus_cfg
;
1837 struct device_node
*np
= pdev
->dev
.of_node
;
1839 ndev
= platform_get_drvdata(pdev
);
1840 lp
= netdev_priv(ndev
);
1841 bus_cfg
= &lp
->bus_cfg
;
1843 /* Set the MAC address. */
1844 mac_address
= of_get_mac_address(pdev
->dev
.of_node
);
1846 ether_addr_copy(ndev
->dev_addr
, mac_address
);
1848 /* These are all optional parameters */
1849 lp
->en_tx_lpi_clockgating
= of_property_read_bool(np
,
1850 "snps,en-tx-lpi-clockgating");
1851 bus_cfg
->en_lpi
= of_property_read_bool(np
, "snps,en-lpi");
1852 of_property_read_u32(np
, "snps,write-requests",
1853 &bus_cfg
->write_requests
);
1854 of_property_read_u32(np
, "snps,read-requests", &bus_cfg
->read_requests
);
1855 of_property_read_u32(np
, "snps,burst-map", &bus_cfg
->burst_map
);
1856 of_property_read_u32(np
, "snps,txpbl", &bus_cfg
->tx_pbl
);
1857 of_property_read_u32(np
, "snps,rxpbl", &bus_cfg
->rx_pbl
);
1859 netdev_dbg(ndev
, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
1861 bus_cfg
->write_requests
,
1862 bus_cfg
->read_requests
,
1870 static int dwceqos_open(struct net_device
*ndev
)
1872 struct net_local
*lp
= netdev_priv(ndev
);
1875 dwceqos_reset_state(lp
);
1876 res
= dwceqos_descriptor_init(lp
);
1878 netdev_err(ndev
, "Unable to allocate DMA memory, rc %d\n", res
);
1881 netdev_reset_queue(ndev
);
1883 dwceqos_init_hw(lp
);
1884 napi_enable(&lp
->napi
);
1885 phy_start(lp
->phy_dev
);
1887 netif_start_queue(ndev
);
1888 tasklet_enable(&lp
->tx_bdreclaim_tasklet
);
1893 static bool dweqos_is_tx_dma_suspended(struct net_local
*lp
)
1897 reg
= dwceqos_read(lp
, REG_DWCEQOS_DMA_DEBUG_ST0
);
1898 reg
= DMA_GET_TX_STATE_CH0(reg
);
1900 return reg
== DMA_TX_CH_SUSPENDED
;
1903 static void dwceqos_drain_dma(struct net_local
*lp
)
1905 /* Wait for all pending TX buffers to be sent. Upper limit based
1906 * on max frame size on a 10 Mbit link.
1908 size_t limit
= (DWCEQOS_TX_DCNT
* 1250) / 100;
1910 while (!dweqos_is_tx_dma_suspended(lp
) && limit
--)
1911 usleep_range(100, 200);
1914 static int dwceqos_stop(struct net_device
*ndev
)
1916 struct net_local
*lp
= netdev_priv(ndev
);
1918 phy_stop(lp
->phy_dev
);
1920 tasklet_disable(&lp
->tx_bdreclaim_tasklet
);
1921 netif_stop_queue(ndev
);
1922 napi_disable(&lp
->napi
);
1924 dwceqos_drain_dma(lp
);
1926 netif_tx_lock(lp
->ndev
);
1927 dwceqos_reset_hw(lp
);
1928 dwceqos_descriptor_free(lp
);
1929 netif_tx_unlock(lp
->ndev
);
1934 static void dwceqos_dmadesc_set_ctx(struct net_local
*lp
,
1935 unsigned short gso_size
)
1937 struct dwceqos_dma_desc
*dd
= &lp
->tx_descs
[lp
->tx_next
];
1941 dd
->des2
= gso_size
;
1942 dd
->des3
= DWCEQOS_DMA_TDES3_CTXT
| DWCEQOS_DMA_TDES3_TCMSSV
;
1944 lp
->tx_next
= (lp
->tx_next
+ 1) % DWCEQOS_TX_DCNT
;
1947 static void dwceqos_tx_poll_demand(struct net_local
*lp
)
1949 dwceqos_write(lp
, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL
,
1950 lp
->tx_descs_tail_addr
);
1954 size_t nr_descriptors
;
1955 size_t initial_descriptor
;
1956 size_t last_descriptor
;
1957 size_t prev_gso_size
;
1958 size_t network_header_len
;
1961 static void dwceqos_tx_prepare(struct sk_buff
*skb
, struct net_local
*lp
,
1962 struct dwceqos_tx
*tx
)
1967 if (skb_is_gso(skb
) && skb_shinfo(skb
)->gso_size
!= lp
->gso_size
)
1970 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
1971 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1973 n
+= (skb_frag_size(frag
) + BYTES_PER_DMA_DESC
- 1) /
1977 tx
->nr_descriptors
= n
;
1978 tx
->initial_descriptor
= lp
->tx_next
;
1979 tx
->last_descriptor
= lp
->tx_next
;
1980 tx
->prev_gso_size
= lp
->gso_size
;
1982 tx
->network_header_len
= skb_transport_offset(skb
);
1983 if (skb_is_gso(skb
))
1984 tx
->network_header_len
+= tcp_hdrlen(skb
);
1987 static int dwceqos_tx_linear(struct sk_buff
*skb
, struct net_local
*lp
,
1988 struct dwceqos_tx
*tx
)
1990 struct ring_desc
*rd
;
1991 struct dwceqos_dma_desc
*dd
;
1993 dma_addr_t dma_handle
;
1995 if (skb_is_gso(skb
) && skb_shinfo(skb
)->gso_size
!= lp
->gso_size
) {
1996 dwceqos_dmadesc_set_ctx(lp
, skb_shinfo(skb
)->gso_size
);
1997 lp
->gso_size
= skb_shinfo(skb
)->gso_size
;
2000 dma_handle
= dma_map_single(lp
->ndev
->dev
.parent
, skb
->data
,
2001 skb_headlen(skb
), DMA_TO_DEVICE
);
2003 if (dma_mapping_error(lp
->ndev
->dev
.parent
, dma_handle
)) {
2004 netdev_err(lp
->ndev
, "TX DMA Mapping error\n");
2008 rd
= &lp
->tx_skb
[lp
->tx_next
];
2009 dd
= &lp
->tx_descs
[lp
->tx_next
];
2012 rd
->len
= skb_headlen(skb
);
2013 rd
->mapping
= dma_handle
;
2015 /* Set up DMA Descriptor */
2016 dd
->des0
= dma_handle
;
2018 if (skb_is_gso(skb
)) {
2019 payload_len
= skb_headlen(skb
) - tx
->network_header_len
;
2022 dd
->des1
= dma_handle
+ tx
->network_header_len
;
2023 dd
->des2
= tx
->network_header_len
|
2024 DWCEQOS_DMA_DES2_B2L(payload_len
);
2025 dd
->des3
= DWCEQOS_DMA_TDES3_TSE
|
2026 DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb
) / 4)) |
2027 (skb
->len
- tx
->network_header_len
);
2030 dd
->des2
= skb_headlen(skb
);
2031 dd
->des3
= skb
->len
;
2033 switch (skb
->ip_summed
) {
2034 case CHECKSUM_PARTIAL
:
2035 dd
->des3
|= DWCEQOS_DMA_TDES3_CA
;
2037 case CHECKSUM_UNNECESSARY
:
2038 case CHECKSUM_COMPLETE
:
2044 dd
->des3
|= DWCEQOS_DMA_TDES3_FD
;
2045 if (lp
->tx_next
!= tx
->initial_descriptor
)
2046 dd
->des3
|= DWCEQOS_DMA_TDES3_OWN
;
2048 tx
->last_descriptor
= lp
->tx_next
;
2049 lp
->tx_next
= (lp
->tx_next
+ 1) % DWCEQOS_TX_DCNT
;
2054 static int dwceqos_tx_frags(struct sk_buff
*skb
, struct net_local
*lp
,
2055 struct dwceqos_tx
*tx
)
2057 struct ring_desc
*rd
= NULL
;
2058 struct dwceqos_dma_desc
*dd
;
2059 dma_addr_t dma_handle
;
2062 /* Setup more ring and DMA descriptor if the packet is fragmented */
2063 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
2064 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2066 size_t consumed_size
;
2069 dma_handle
= skb_frag_dma_map(lp
->ndev
->dev
.parent
, frag
, 0,
2070 skb_frag_size(frag
),
2072 if (dma_mapping_error(lp
->ndev
->dev
.parent
, dma_handle
)) {
2073 netdev_err(lp
->ndev
, "DMA Mapping error\n");
2077 /* order-3 fragments span more than one descriptor. */
2078 frag_size
= skb_frag_size(frag
);
2080 while (consumed_size
< frag_size
) {
2081 size_t dma_size
= min_t(size_t, 16376,
2082 frag_size
- consumed_size
);
2084 rd
= &lp
->tx_skb
[lp
->tx_next
];
2085 memset(rd
, 0, sizeof(*rd
));
2087 dd
= &lp
->tx_descs
[lp
->tx_next
];
2089 /* Set DMA Descriptor fields */
2090 dd
->des0
= dma_handle
+ consumed_size
;
2092 dd
->des2
= dma_size
;
2094 if (skb_is_gso(skb
))
2095 dd
->des3
= (skb
->len
- tx
->network_header_len
);
2097 dd
->des3
= skb
->len
;
2099 dd
->des3
|= DWCEQOS_DMA_TDES3_OWN
;
2101 tx
->last_descriptor
= lp
->tx_next
;
2102 lp
->tx_next
= (lp
->tx_next
+ 1) % DWCEQOS_TX_DCNT
;
2103 consumed_size
+= dma_size
;
2106 rd
->len
= skb_frag_size(frag
);
2107 rd
->mapping
= dma_handle
;
2113 static void dwceqos_tx_finalize(struct sk_buff
*skb
, struct net_local
*lp
,
2114 struct dwceqos_tx
*tx
)
2116 lp
->tx_descs
[tx
->last_descriptor
].des3
|= DWCEQOS_DMA_TDES3_LD
;
2117 lp
->tx_descs
[tx
->last_descriptor
].des2
|= DWCEQOS_DMA_TDES2_IOC
;
2119 lp
->tx_skb
[tx
->last_descriptor
].skb
= skb
;
2121 /* Make all descriptor updates visible to the DMA before setting the
2126 lp
->tx_descs
[tx
->initial_descriptor
].des3
|= DWCEQOS_DMA_TDES3_OWN
;
2128 /* Make the owner bit visible before TX wakeup. */
2131 dwceqos_tx_poll_demand(lp
);
2134 static void dwceqos_tx_rollback(struct net_local
*lp
, struct dwceqos_tx
*tx
)
2136 size_t i
= tx
->initial_descriptor
;
2138 while (i
!= lp
->tx_next
) {
2139 if (lp
->tx_skb
[i
].mapping
)
2140 dma_unmap_single(lp
->ndev
->dev
.parent
,
2141 lp
->tx_skb
[i
].mapping
,
2145 lp
->tx_skb
[i
].mapping
= 0;
2146 lp
->tx_skb
[i
].skb
= NULL
;
2148 memset(&lp
->tx_descs
[i
], 0, sizeof(lp
->tx_descs
[i
]));
2150 i
= (i
+ 1) % DWCEQOS_TX_DCNT
;
2153 lp
->tx_next
= tx
->initial_descriptor
;
2154 lp
->gso_size
= tx
->prev_gso_size
;
2157 static int dwceqos_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
2159 struct net_local
*lp
= netdev_priv(ndev
);
2160 struct dwceqos_tx trans
;
2163 dwceqos_tx_prepare(skb
, lp
, &trans
);
2164 if (lp
->tx_free
< trans
.nr_descriptors
) {
2165 netif_stop_queue(ndev
);
2166 return NETDEV_TX_BUSY
;
2169 err
= dwceqos_tx_linear(skb
, lp
, &trans
);
2173 err
= dwceqos_tx_frags(skb
, lp
, &trans
);
2177 WARN_ON(lp
->tx_next
!=
2178 ((trans
.initial_descriptor
+ trans
.nr_descriptors
) %
2181 dwceqos_tx_finalize(skb
, lp
, &trans
);
2183 netdev_sent_queue(ndev
, skb
->len
);
2185 spin_lock_bh(&lp
->tx_lock
);
2186 lp
->tx_free
-= trans
.nr_descriptors
;
2187 spin_unlock_bh(&lp
->tx_lock
);
2189 ndev
->trans_start
= jiffies
;
2193 dwceqos_tx_rollback(lp
, &trans
);
2198 /* Set MAC address and then update HW accordingly */
2199 static int dwceqos_set_mac_address(struct net_device
*ndev
, void *addr
)
2201 struct net_local
*lp
= netdev_priv(ndev
);
2202 struct sockaddr
*hwaddr
= (struct sockaddr
*)addr
;
2204 if (netif_running(ndev
))
2207 if (!is_valid_ether_addr(hwaddr
->sa_data
))
2208 return -EADDRNOTAVAIL
;
2210 memcpy(ndev
->dev_addr
, hwaddr
->sa_data
, ndev
->addr_len
);
2212 dwceqos_set_umac_addr(lp
, lp
->ndev
->dev_addr
, 0);
2216 static void dwceqos_tx_timeout(struct net_device
*ndev
)
2218 struct net_local
*lp
= netdev_priv(ndev
);
2220 queue_work(lp
->txtimeout_handler_wq
, &lp
->txtimeout_reinit
);
2223 static void dwceqos_set_umac_addr(struct net_local
*lp
, unsigned char *addr
,
2228 data
= (addr
[5] << 8) | addr
[4];
2229 dwceqos_write(lp
, DWCEQOS_ADDR_HIGH(reg_n
),
2230 data
| DWCEQOS_MAC_MAC_ADDR_HI_EN
);
2231 data
= (addr
[3] << 24) | (addr
[2] << 16) | (addr
[1] << 8) | addr
[0];
2232 dwceqos_write(lp
, DWCEQOS_ADDR_LOW(reg_n
), data
);
2235 static void dwceqos_disable_umac_addr(struct net_local
*lp
, unsigned int reg_n
)
2237 /* Do not disable MAC address 0 */
2239 dwceqos_write(lp
, DWCEQOS_ADDR_HIGH(reg_n
), 0);
2242 static void dwceqos_set_rx_mode(struct net_device
*ndev
)
2244 struct net_local
*lp
= netdev_priv(ndev
);
2248 struct netdev_hw_addr
*ha
;
2249 unsigned int max_mac_addr
;
2251 max_mac_addr
= DWCEQOS_MAX_PERFECT_ADDRESSES(lp
->feature1
);
2253 if (ndev
->flags
& IFF_PROMISC
) {
2254 regval
= DWCEQOS_MAC_PKT_FILT_PR
;
2255 } else if (((netdev_mc_count(ndev
) > DWCEQOS_HASH_TABLE_SIZE
) ||
2256 (ndev
->flags
& IFF_ALLMULTI
))) {
2257 regval
= DWCEQOS_MAC_PKT_FILT_PM
;
2258 dwceqos_write(lp
, REG_DWCEQOS_HASTABLE_LO
, 0xffffffff);
2259 dwceqos_write(lp
, REG_DWCEQOS_HASTABLE_HI
, 0xffffffff);
2260 } else if (!netdev_mc_empty(ndev
)) {
2261 regval
= DWCEQOS_MAC_PKT_FILT_HMC
;
2262 memset(mc_filter
, 0, sizeof(mc_filter
));
2263 netdev_for_each_mc_addr(ha
, ndev
) {
2264 /* The upper 6 bits of the calculated CRC are used to
2265 * index the contens of the hash table
2267 int bit_nr
= bitrev32(~crc32_le(~0, ha
->addr
, 6)) >> 26;
2268 /* The most significant bit determines the register
2269 * to use (H/L) while the other 5 bits determine
2270 * the bit within the register.
2272 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
2274 dwceqos_write(lp
, REG_DWCEQOS_HASTABLE_LO
, mc_filter
[0]);
2275 dwceqos_write(lp
, REG_DWCEQOS_HASTABLE_HI
, mc_filter
[1]);
2277 if (netdev_uc_count(ndev
) > max_mac_addr
) {
2278 regval
|= DWCEQOS_MAC_PKT_FILT_PR
;
2280 netdev_for_each_uc_addr(ha
, ndev
) {
2281 dwceqos_set_umac_addr(lp
, ha
->addr
, reg
);
2284 for (; reg
< DWCEQOS_MAX_PERFECT_ADDRESSES(lp
->feature1
); reg
++)
2285 dwceqos_disable_umac_addr(lp
, reg
);
2287 dwceqos_write(lp
, REG_DWCEQOS_MAC_PKT_FILT
, regval
);
2290 #ifdef CONFIG_NET_POLL_CONTROLLER
2291 static void dwceqos_poll_controller(struct net_device
*ndev
)
2293 disable_irq(ndev
->irq
);
2294 dwceqos_interrupt(ndev
->irq
, ndev
);
2295 enable_irq(ndev
->irq
);
2299 static void dwceqos_read_mmc_counters(struct net_local
*lp
, u32 rx_mask
,
2302 if (tx_mask
& BIT(27))
2303 lp
->mmc_counters
.txlpitranscntr
+=
2304 dwceqos_read(lp
, DWC_MMC_TXLPITRANSCNTR
);
2305 if (tx_mask
& BIT(26))
2306 lp
->mmc_counters
.txpiuscntr
+=
2307 dwceqos_read(lp
, DWC_MMC_TXLPIUSCNTR
);
2308 if (tx_mask
& BIT(25))
2309 lp
->mmc_counters
.txoversize_g
+=
2310 dwceqos_read(lp
, DWC_MMC_TXOVERSIZE_G
);
2311 if (tx_mask
& BIT(24))
2312 lp
->mmc_counters
.txvlanpackets_g
+=
2313 dwceqos_read(lp
, DWC_MMC_TXVLANPACKETS_G
);
2314 if (tx_mask
& BIT(23))
2315 lp
->mmc_counters
.txpausepackets
+=
2316 dwceqos_read(lp
, DWC_MMC_TXPAUSEPACKETS
);
2317 if (tx_mask
& BIT(22))
2318 lp
->mmc_counters
.txexcessdef
+=
2319 dwceqos_read(lp
, DWC_MMC_TXEXCESSDEF
);
2320 if (tx_mask
& BIT(21))
2321 lp
->mmc_counters
.txpacketcount_g
+=
2322 dwceqos_read(lp
, DWC_MMC_TXPACKETCOUNT_G
);
2323 if (tx_mask
& BIT(20))
2324 lp
->mmc_counters
.txoctetcount_g
+=
2325 dwceqos_read(lp
, DWC_MMC_TXOCTETCOUNT_G
);
2326 if (tx_mask
& BIT(19))
2327 lp
->mmc_counters
.txcarriererror
+=
2328 dwceqos_read(lp
, DWC_MMC_TXCARRIERERROR
);
2329 if (tx_mask
& BIT(18))
2330 lp
->mmc_counters
.txexcesscol
+=
2331 dwceqos_read(lp
, DWC_MMC_TXEXCESSCOL
);
2332 if (tx_mask
& BIT(17))
2333 lp
->mmc_counters
.txlatecol
+=
2334 dwceqos_read(lp
, DWC_MMC_TXLATECOL
);
2335 if (tx_mask
& BIT(16))
2336 lp
->mmc_counters
.txdeferred
+=
2337 dwceqos_read(lp
, DWC_MMC_TXDEFERRED
);
2338 if (tx_mask
& BIT(15))
2339 lp
->mmc_counters
.txmulticol_g
+=
2340 dwceqos_read(lp
, DWC_MMC_TXMULTICOL_G
);
2341 if (tx_mask
& BIT(14))
2342 lp
->mmc_counters
.txsinglecol_g
+=
2343 dwceqos_read(lp
, DWC_MMC_TXSINGLECOL_G
);
2344 if (tx_mask
& BIT(13))
2345 lp
->mmc_counters
.txunderflowerror
+=
2346 dwceqos_read(lp
, DWC_MMC_TXUNDERFLOWERROR
);
2347 if (tx_mask
& BIT(12))
2348 lp
->mmc_counters
.txbroadcastpackets_gb
+=
2349 dwceqos_read(lp
, DWC_MMC_TXBROADCASTPACKETS_GB
);
2350 if (tx_mask
& BIT(11))
2351 lp
->mmc_counters
.txmulticastpackets_gb
+=
2352 dwceqos_read(lp
, DWC_MMC_TXMULTICASTPACKETS_GB
);
2353 if (tx_mask
& BIT(10))
2354 lp
->mmc_counters
.txunicastpackets_gb
+=
2355 dwceqos_read(lp
, DWC_MMC_TXUNICASTPACKETS_GB
);
2356 if (tx_mask
& BIT(9))
2357 lp
->mmc_counters
.tx1024tomaxoctets_gb
+=
2358 dwceqos_read(lp
, DWC_MMC_TX1024TOMAXOCTETS_GB
);
2359 if (tx_mask
& BIT(8))
2360 lp
->mmc_counters
.tx512to1023octets_gb
+=
2361 dwceqos_read(lp
, DWC_MMC_TX512TO1023OCTETS_GB
);
2362 if (tx_mask
& BIT(7))
2363 lp
->mmc_counters
.tx256to511octets_gb
+=
2364 dwceqos_read(lp
, DWC_MMC_TX256TO511OCTETS_GB
);
2365 if (tx_mask
& BIT(6))
2366 lp
->mmc_counters
.tx128to255octets_gb
+=
2367 dwceqos_read(lp
, DWC_MMC_TX128TO255OCTETS_GB
);
2368 if (tx_mask
& BIT(5))
2369 lp
->mmc_counters
.tx65to127octets_gb
+=
2370 dwceqos_read(lp
, DWC_MMC_TX65TO127OCTETS_GB
);
2371 if (tx_mask
& BIT(4))
2372 lp
->mmc_counters
.tx64octets_gb
+=
2373 dwceqos_read(lp
, DWC_MMC_TX64OCTETS_GB
);
2374 if (tx_mask
& BIT(3))
2375 lp
->mmc_counters
.txmulticastpackets_g
+=
2376 dwceqos_read(lp
, DWC_MMC_TXMULTICASTPACKETS_G
);
2377 if (tx_mask
& BIT(2))
2378 lp
->mmc_counters
.txbroadcastpackets_g
+=
2379 dwceqos_read(lp
, DWC_MMC_TXBROADCASTPACKETS_G
);
2380 if (tx_mask
& BIT(1))
2381 lp
->mmc_counters
.txpacketcount_gb
+=
2382 dwceqos_read(lp
, DWC_MMC_TXPACKETCOUNT_GB
);
2383 if (tx_mask
& BIT(0))
2384 lp
->mmc_counters
.txoctetcount_gb
+=
2385 dwceqos_read(lp
, DWC_MMC_TXOCTETCOUNT_GB
);
2387 if (rx_mask
& BIT(27))
2388 lp
->mmc_counters
.rxlpitranscntr
+=
2389 dwceqos_read(lp
, DWC_MMC_RXLPITRANSCNTR
);
2390 if (rx_mask
& BIT(26))
2391 lp
->mmc_counters
.rxlpiuscntr
+=
2392 dwceqos_read(lp
, DWC_MMC_RXLPIUSCNTR
);
2393 if (rx_mask
& BIT(25))
2394 lp
->mmc_counters
.rxctrlpackets_g
+=
2395 dwceqos_read(lp
, DWC_MMC_RXCTRLPACKETS_G
);
2396 if (rx_mask
& BIT(24))
2397 lp
->mmc_counters
.rxrcverror
+=
2398 dwceqos_read(lp
, DWC_MMC_RXRCVERROR
);
2399 if (rx_mask
& BIT(23))
2400 lp
->mmc_counters
.rxwatchdog
+=
2401 dwceqos_read(lp
, DWC_MMC_RXWATCHDOG
);
2402 if (rx_mask
& BIT(22))
2403 lp
->mmc_counters
.rxvlanpackets_gb
+=
2404 dwceqos_read(lp
, DWC_MMC_RXVLANPACKETS_GB
);
2405 if (rx_mask
& BIT(21))
2406 lp
->mmc_counters
.rxfifooverflow
+=
2407 dwceqos_read(lp
, DWC_MMC_RXFIFOOVERFLOW
);
2408 if (rx_mask
& BIT(20))
2409 lp
->mmc_counters
.rxpausepackets
+=
2410 dwceqos_read(lp
, DWC_MMC_RXPAUSEPACKETS
);
2411 if (rx_mask
& BIT(19))
2412 lp
->mmc_counters
.rxoutofrangetype
+=
2413 dwceqos_read(lp
, DWC_MMC_RXOUTOFRANGETYPE
);
2414 if (rx_mask
& BIT(18))
2415 lp
->mmc_counters
.rxlengtherror
+=
2416 dwceqos_read(lp
, DWC_MMC_RXLENGTHERROR
);
2417 if (rx_mask
& BIT(17))
2418 lp
->mmc_counters
.rxunicastpackets_g
+=
2419 dwceqos_read(lp
, DWC_MMC_RXUNICASTPACKETS_G
);
2420 if (rx_mask
& BIT(16))
2421 lp
->mmc_counters
.rx1024tomaxoctets_gb
+=
2422 dwceqos_read(lp
, DWC_MMC_RX1024TOMAXOCTETS_GB
);
2423 if (rx_mask
& BIT(15))
2424 lp
->mmc_counters
.rx512to1023octets_gb
+=
2425 dwceqos_read(lp
, DWC_MMC_RX512TO1023OCTETS_GB
);
2426 if (rx_mask
& BIT(14))
2427 lp
->mmc_counters
.rx256to511octets_gb
+=
2428 dwceqos_read(lp
, DWC_MMC_RX256TO511OCTETS_GB
);
2429 if (rx_mask
& BIT(13))
2430 lp
->mmc_counters
.rx128to255octets_gb
+=
2431 dwceqos_read(lp
, DWC_MMC_RX128TO255OCTETS_GB
);
2432 if (rx_mask
& BIT(12))
2433 lp
->mmc_counters
.rx65to127octets_gb
+=
2434 dwceqos_read(lp
, DWC_MMC_RX65TO127OCTETS_GB
);
2435 if (rx_mask
& BIT(11))
2436 lp
->mmc_counters
.rx64octets_gb
+=
2437 dwceqos_read(lp
, DWC_MMC_RX64OCTETS_GB
);
2438 if (rx_mask
& BIT(10))
2439 lp
->mmc_counters
.rxoversize_g
+=
2440 dwceqos_read(lp
, DWC_MMC_RXOVERSIZE_G
);
2441 if (rx_mask
& BIT(9))
2442 lp
->mmc_counters
.rxundersize_g
+=
2443 dwceqos_read(lp
, DWC_MMC_RXUNDERSIZE_G
);
2444 if (rx_mask
& BIT(8))
2445 lp
->mmc_counters
.rxjabbererror
+=
2446 dwceqos_read(lp
, DWC_MMC_RXJABBERERROR
);
2447 if (rx_mask
& BIT(7))
2448 lp
->mmc_counters
.rxrunterror
+=
2449 dwceqos_read(lp
, DWC_MMC_RXRUNTERROR
);
2450 if (rx_mask
& BIT(6))
2451 lp
->mmc_counters
.rxalignmenterror
+=
2452 dwceqos_read(lp
, DWC_MMC_RXALIGNMENTERROR
);
2453 if (rx_mask
& BIT(5))
2454 lp
->mmc_counters
.rxcrcerror
+=
2455 dwceqos_read(lp
, DWC_MMC_RXCRCERROR
);
2456 if (rx_mask
& BIT(4))
2457 lp
->mmc_counters
.rxmulticastpackets_g
+=
2458 dwceqos_read(lp
, DWC_MMC_RXMULTICASTPACKETS_G
);
2459 if (rx_mask
& BIT(3))
2460 lp
->mmc_counters
.rxbroadcastpackets_g
+=
2461 dwceqos_read(lp
, DWC_MMC_RXBROADCASTPACKETS_G
);
2462 if (rx_mask
& BIT(2))
2463 lp
->mmc_counters
.rxoctetcount_g
+=
2464 dwceqos_read(lp
, DWC_MMC_RXOCTETCOUNT_G
);
2465 if (rx_mask
& BIT(1))
2466 lp
->mmc_counters
.rxoctetcount_gb
+=
2467 dwceqos_read(lp
, DWC_MMC_RXOCTETCOUNT_GB
);
2468 if (rx_mask
& BIT(0))
2469 lp
->mmc_counters
.rxpacketcount_gb
+=
2470 dwceqos_read(lp
, DWC_MMC_RXPACKETCOUNT_GB
);
2473 static struct rtnl_link_stats64
*
2474 dwceqos_get_stats64(struct net_device
*ndev
, struct rtnl_link_stats64
*s
)
2476 unsigned long flags
;
2477 struct net_local
*lp
= netdev_priv(ndev
);
2478 struct dwceqos_mmc_counters
*hwstats
= &lp
->mmc_counters
;
2480 spin_lock_irqsave(&lp
->stats_lock
, flags
);
2481 dwceqos_read_mmc_counters(lp
, lp
->mmc_rx_counters_mask
,
2482 lp
->mmc_tx_counters_mask
);
2483 spin_unlock_irqrestore(&lp
->stats_lock
, flags
);
2485 s
->rx_packets
= hwstats
->rxpacketcount_gb
;
2486 s
->rx_bytes
= hwstats
->rxoctetcount_gb
;
2487 s
->rx_errors
= hwstats
->rxpacketcount_gb
-
2488 hwstats
->rxbroadcastpackets_g
-
2489 hwstats
->rxmulticastpackets_g
-
2490 hwstats
->rxunicastpackets_g
;
2491 s
->multicast
= hwstats
->rxmulticastpackets_g
;
2492 s
->rx_length_errors
= hwstats
->rxlengtherror
;
2493 s
->rx_crc_errors
= hwstats
->rxcrcerror
;
2494 s
->rx_fifo_errors
= hwstats
->rxfifooverflow
;
2496 s
->tx_packets
= hwstats
->txpacketcount_gb
;
2497 s
->tx_bytes
= hwstats
->txoctetcount_gb
;
2499 if (lp
->mmc_tx_counters_mask
& BIT(21))
2500 s
->tx_errors
= hwstats
->txpacketcount_gb
-
2501 hwstats
->txpacketcount_g
;
2503 s
->tx_errors
= hwstats
->txunderflowerror
+
2504 hwstats
->txcarriererror
;
2510 dwceqos_get_settings(struct net_device
*ndev
, struct ethtool_cmd
*ecmd
)
2512 struct net_local
*lp
= netdev_priv(ndev
);
2513 struct phy_device
*phydev
= lp
->phy_dev
;
2518 return phy_ethtool_gset(phydev
, ecmd
);
2522 dwceqos_set_settings(struct net_device
*ndev
, struct ethtool_cmd
*ecmd
)
2524 struct net_local
*lp
= netdev_priv(ndev
);
2525 struct phy_device
*phydev
= lp
->phy_dev
;
2530 return phy_ethtool_sset(phydev
, ecmd
);
2534 dwceqos_get_drvinfo(struct net_device
*ndev
, struct ethtool_drvinfo
*ed
)
2536 const struct net_local
*lp
= netdev_priv(ndev
);
2538 strcpy(ed
->driver
, lp
->pdev
->dev
.driver
->name
);
2539 strcpy(ed
->version
, DRIVER_VERSION
);
2542 static void dwceqos_get_pauseparam(struct net_device
*ndev
,
2543 struct ethtool_pauseparam
*pp
)
2545 const struct net_local
*lp
= netdev_priv(ndev
);
2547 pp
->autoneg
= lp
->flowcontrol
.autoneg
;
2548 pp
->tx_pause
= lp
->flowcontrol
.tx
;
2549 pp
->rx_pause
= lp
->flowcontrol
.rx
;
2552 static int dwceqos_set_pauseparam(struct net_device
*ndev
,
2553 struct ethtool_pauseparam
*pp
)
2555 struct net_local
*lp
= netdev_priv(ndev
);
2558 lp
->flowcontrol
.autoneg
= pp
->autoneg
;
2560 lp
->phy_dev
->advertising
|= ADVERTISED_Pause
;
2561 lp
->phy_dev
->advertising
|= ADVERTISED_Asym_Pause
;
2563 lp
->phy_dev
->advertising
&= ~ADVERTISED_Pause
;
2564 lp
->phy_dev
->advertising
&= ~ADVERTISED_Asym_Pause
;
2565 lp
->flowcontrol
.rx
= pp
->rx_pause
;
2566 lp
->flowcontrol
.tx
= pp
->tx_pause
;
2569 if (netif_running(ndev
))
2570 ret
= phy_start_aneg(lp
->phy_dev
);
2575 static void dwceqos_get_strings(struct net_device
*ndev
, u32 stringset
,
2580 if (stringset
!= ETH_SS_STATS
)
2583 for (i
= 0; i
< ARRAY_SIZE(dwceqos_ethtool_stats
); ++i
) {
2584 memcpy(data
, dwceqos_ethtool_stats
[i
].stat_name
,
2586 data
+= ETH_GSTRING_LEN
;
2590 static void dwceqos_get_ethtool_stats(struct net_device
*ndev
,
2591 struct ethtool_stats
*stats
, u64
*data
)
2593 struct net_local
*lp
= netdev_priv(ndev
);
2594 unsigned long flags
;
2596 u8
*mmcstat
= (u8
*)&lp
->mmc_counters
;
2598 spin_lock_irqsave(&lp
->stats_lock
, flags
);
2599 dwceqos_read_mmc_counters(lp
, lp
->mmc_rx_counters_mask
,
2600 lp
->mmc_tx_counters_mask
);
2601 spin_unlock_irqrestore(&lp
->stats_lock
, flags
);
2603 for (i
= 0; i
< ARRAY_SIZE(dwceqos_ethtool_stats
); ++i
) {
2605 mmcstat
+ dwceqos_ethtool_stats
[i
].offset
,
2611 static int dwceqos_get_sset_count(struct net_device
*ndev
, int sset
)
2613 if (sset
== ETH_SS_STATS
)
2614 return ARRAY_SIZE(dwceqos_ethtool_stats
);
2619 static void dwceqos_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2622 const struct net_local
*lp
= netdev_priv(dev
);
2623 u32
*reg_space
= (u32
*)space
;
2628 for (reg_offset
= START_MAC_REG_OFFSET
;
2629 reg_offset
<= MAX_DMA_REG_OFFSET
; reg_offset
+= 4) {
2630 reg_space
[reg_ix
] = dwceqos_read(lp
, reg_offset
);
2634 for (reg_offset
= START_MTL_REG_OFFSET
;
2635 reg_offset
<= MAX_MTL_REG_OFFSET
; reg_offset
+= 4) {
2636 reg_space
[reg_ix
] = dwceqos_read(lp
, reg_offset
);
2641 for (reg_offset
= START_DMA_REG_OFFSET
;
2642 reg_offset
<= MAX_DMA_REG_OFFSET
; reg_offset
+= 4) {
2643 reg_space
[reg_ix
] = dwceqos_read(lp
, reg_offset
);
2647 BUG_ON(4 * reg_ix
> REG_SPACE_SIZE
);
2650 static int dwceqos_get_regs_len(struct net_device
*dev
)
2652 return REG_SPACE_SIZE
;
2655 static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl
)
2657 return (lpi_ctrl
& DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST
) ? "on" : "off";
2660 static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl
)
2662 return (lpi_ctrl
& DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST
) ? "on" : "off";
2665 static int dwceqos_get_eee(struct net_device
*ndev
, struct ethtool_eee
*edata
)
2667 struct net_local
*lp
= netdev_priv(ndev
);
2671 if (!(lp
->feature0
& DWCEQOS_MAC_HW_FEATURE0_EEESEL
))
2674 edata
->eee_active
= lp
->eee_active
;
2675 edata
->eee_enabled
= lp
->eee_enabled
;
2676 edata
->tx_lpi_timer
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER
);
2677 lpi_status
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
2678 lpi_enabled
= !!(lpi_status
& DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA
);
2679 edata
->tx_lpi_enabled
= lpi_enabled
;
2681 if (netif_msg_hw(lp
)) {
2684 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
2686 netdev_info(lp
->ndev
, "MAC LPI State: RX:%s TX:%s\n",
2687 dwceqos_get_rx_lpi_state(regval
),
2688 dwceqos_get_tx_lpi_state(regval
));
2691 return phy_ethtool_get_eee(lp
->phy_dev
, edata
);
2694 static int dwceqos_set_eee(struct net_device
*ndev
, struct ethtool_eee
*edata
)
2696 struct net_local
*lp
= netdev_priv(ndev
);
2698 unsigned long flags
;
2700 if (!(lp
->feature0
& DWCEQOS_MAC_HW_FEATURE0_EEESEL
))
2703 if (edata
->eee_enabled
&& !lp
->eee_active
)
2706 if (edata
->tx_lpi_enabled
) {
2707 if (edata
->tx_lpi_timer
< DWCEQOS_LPI_TIMER_MIN
||
2708 edata
->tx_lpi_timer
> DWCEQOS_LPI_TIMER_MAX
)
2712 lp
->eee_enabled
= edata
->eee_enabled
;
2714 if (edata
->eee_enabled
&& edata
->tx_lpi_enabled
) {
2715 dwceqos_write(lp
, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER
,
2716 edata
->tx_lpi_timer
);
2718 spin_lock_irqsave(&lp
->hw_lock
, flags
);
2719 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
2720 regval
|= DWCEQOS_LPI_CTRL_ENABLE_EEE
;
2721 if (lp
->en_tx_lpi_clockgating
)
2722 regval
|= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE
;
2723 dwceqos_write(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
, regval
);
2724 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
2726 spin_lock_irqsave(&lp
->hw_lock
, flags
);
2727 regval
= dwceqos_read(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
);
2728 regval
&= ~DWCEQOS_LPI_CTRL_ENABLE_EEE
;
2729 dwceqos_write(lp
, REG_DWCEQOS_MAC_LPI_CTRL_STATUS
, regval
);
2730 spin_unlock_irqrestore(&lp
->hw_lock
, flags
);
2733 return phy_ethtool_set_eee(lp
->phy_dev
, edata
);
2736 static u32
dwceqos_get_msglevel(struct net_device
*ndev
)
2738 const struct net_local
*lp
= netdev_priv(ndev
);
2740 return lp
->msg_enable
;
2743 static void dwceqos_set_msglevel(struct net_device
*ndev
, u32 msglevel
)
2745 struct net_local
*lp
= netdev_priv(ndev
);
2747 lp
->msg_enable
= msglevel
;
2750 static struct ethtool_ops dwceqos_ethtool_ops
= {
2751 .get_settings
= dwceqos_get_settings
,
2752 .set_settings
= dwceqos_set_settings
,
2753 .get_drvinfo
= dwceqos_get_drvinfo
,
2754 .get_link
= ethtool_op_get_link
,
2755 .get_pauseparam
= dwceqos_get_pauseparam
,
2756 .set_pauseparam
= dwceqos_set_pauseparam
,
2757 .get_strings
= dwceqos_get_strings
,
2758 .get_ethtool_stats
= dwceqos_get_ethtool_stats
,
2759 .get_sset_count
= dwceqos_get_sset_count
,
2760 .get_regs
= dwceqos_get_regs
,
2761 .get_regs_len
= dwceqos_get_regs_len
,
2762 .get_eee
= dwceqos_get_eee
,
2763 .set_eee
= dwceqos_set_eee
,
2764 .get_msglevel
= dwceqos_get_msglevel
,
2765 .set_msglevel
= dwceqos_set_msglevel
,
2768 static struct net_device_ops netdev_ops
= {
2769 .ndo_open
= dwceqos_open
,
2770 .ndo_stop
= dwceqos_stop
,
2771 .ndo_start_xmit
= dwceqos_start_xmit
,
2772 .ndo_set_rx_mode
= dwceqos_set_rx_mode
,
2773 .ndo_set_mac_address
= dwceqos_set_mac_address
,
2774 #ifdef CONFIG_NET_POLL_CONTROLLER
2775 .ndo_poll_controller
= dwceqos_poll_controller
,
2777 .ndo_do_ioctl
= dwceqos_ioctl
,
2778 .ndo_tx_timeout
= dwceqos_tx_timeout
,
2779 .ndo_get_stats64
= dwceqos_get_stats64
,
2782 static const struct of_device_id dwceq_of_match
[] = {
2783 { .compatible
= "snps,dwc-qos-ethernet-4.10", },
2786 MODULE_DEVICE_TABLE(of
, dwceq_of_match
);
2788 static int dwceqos_probe(struct platform_device
*pdev
)
2790 struct resource
*r_mem
= NULL
;
2791 struct net_device
*ndev
;
2792 struct net_local
*lp
;
2795 r_mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2797 dev_err(&pdev
->dev
, "no IO resource defined.\n");
2801 ndev
= alloc_etherdev(sizeof(*lp
));
2803 dev_err(&pdev
->dev
, "etherdev allocation failed.\n");
2807 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2809 lp
= netdev_priv(ndev
);
2812 lp
->msg_enable
= netif_msg_init(debug
, DWCEQOS_MSG_DEFAULT
);
2814 spin_lock_init(&lp
->tx_lock
);
2815 spin_lock_init(&lp
->hw_lock
);
2816 spin_lock_init(&lp
->stats_lock
);
2818 lp
->apb_pclk
= devm_clk_get(&pdev
->dev
, "apb_pclk");
2819 if (IS_ERR(lp
->apb_pclk
)) {
2820 dev_err(&pdev
->dev
, "apb_pclk clock not found.\n");
2821 ret
= PTR_ERR(lp
->apb_pclk
);
2822 goto err_out_free_netdev
;
2825 ret
= clk_prepare_enable(lp
->apb_pclk
);
2827 dev_err(&pdev
->dev
, "Unable to enable APER clock.\n");
2828 goto err_out_free_netdev
;
2831 lp
->baseaddr
= devm_ioremap_resource(&pdev
->dev
, r_mem
);
2832 if (IS_ERR(lp
->baseaddr
)) {
2833 dev_err(&pdev
->dev
, "failed to map baseaddress.\n");
2834 ret
= PTR_ERR(lp
->baseaddr
);
2835 goto err_out_clk_dis_aper
;
2838 ndev
->irq
= platform_get_irq(pdev
, 0);
2839 ndev
->watchdog_timeo
= DWCEQOS_TX_TIMEOUT
* HZ
;
2840 ndev
->netdev_ops
= &netdev_ops
;
2841 ndev
->ethtool_ops
= &dwceqos_ethtool_ops
;
2842 ndev
->base_addr
= r_mem
->start
;
2844 dwceqos_get_hwfeatures(lp
);
2845 dwceqos_mdio_set_csr(lp
);
2847 ndev
->hw_features
= NETIF_F_SG
;
2849 if (lp
->feature1
& DWCEQOS_MAC_HW_FEATURE1_TSOEN
)
2850 ndev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2852 if (lp
->feature0
& DWCEQOS_MAC_HW_FEATURE0_TXCOESEL
)
2853 ndev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2855 if (lp
->feature0
& DWCEQOS_MAC_HW_FEATURE0_RXCOESEL
)
2856 ndev
->hw_features
|= NETIF_F_RXCSUM
;
2858 ndev
->features
= ndev
->hw_features
;
2860 netif_napi_add(ndev
, &lp
->napi
, dwceqos_rx_poll
, NAPI_POLL_WEIGHT
);
2862 ret
= register_netdev(ndev
);
2864 dev_err(&pdev
->dev
, "Cannot register net device, aborting.\n");
2865 goto err_out_clk_dis_aper
;
2868 lp
->phy_ref_clk
= devm_clk_get(&pdev
->dev
, "phy_ref_clk");
2869 if (IS_ERR(lp
->phy_ref_clk
)) {
2870 dev_err(&pdev
->dev
, "phy_ref_clk clock not found.\n");
2871 ret
= PTR_ERR(lp
->phy_ref_clk
);
2872 goto err_out_unregister_netdev
;
2875 ret
= clk_prepare_enable(lp
->phy_ref_clk
);
2877 dev_err(&pdev
->dev
, "Unable to enable device clock.\n");
2878 goto err_out_unregister_netdev
;
2881 lp
->phy_node
= of_parse_phandle(lp
->pdev
->dev
.of_node
,
2883 if (!lp
->phy_node
&& of_phy_is_fixed_link(lp
->pdev
->dev
.of_node
)) {
2884 ret
= of_phy_register_fixed_link(lp
->pdev
->dev
.of_node
);
2886 dev_err(&pdev
->dev
, "invalid fixed-link");
2887 goto err_out_unregister_netdev
;
2890 lp
->phy_node
= of_node_get(lp
->pdev
->dev
.of_node
);
2893 ret
= of_get_phy_mode(lp
->pdev
->dev
.of_node
);
2895 dev_err(&lp
->pdev
->dev
, "error in getting phy i/f\n");
2896 goto err_out_unregister_clk_notifier
;
2899 lp
->phy_interface
= ret
;
2901 ret
= dwceqos_mii_init(lp
);
2903 dev_err(&lp
->pdev
->dev
, "error in dwceqos_mii_init\n");
2904 goto err_out_unregister_clk_notifier
;
2907 ret
= dwceqos_mii_probe(ndev
);
2909 netdev_err(ndev
, "mii_probe fail.\n");
2911 goto err_out_unregister_clk_notifier
;
2914 dwceqos_set_umac_addr(lp
, lp
->ndev
->dev_addr
, 0);
2916 tasklet_init(&lp
->tx_bdreclaim_tasklet
, dwceqos_tx_reclaim
,
2917 (unsigned long)ndev
);
2918 tasklet_disable(&lp
->tx_bdreclaim_tasklet
);
2920 lp
->txtimeout_handler_wq
= create_singlethread_workqueue(DRIVER_NAME
);
2921 INIT_WORK(&lp
->txtimeout_reinit
, dwceqos_reinit_for_txtimeout
);
2923 platform_set_drvdata(pdev
, ndev
);
2924 ret
= dwceqos_probe_config_dt(pdev
);
2926 dev_err(&lp
->pdev
->dev
, "Unable to retrieve DT, error %d\n",
2928 goto err_out_unregister_clk_notifier
;
2930 dev_info(&lp
->pdev
->dev
, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
2931 pdev
->id
, ndev
->base_addr
, ndev
->irq
);
2933 ret
= devm_request_irq(&pdev
->dev
, ndev
->irq
, &dwceqos_interrupt
, 0,
2936 dev_err(&lp
->pdev
->dev
, "Unable to request IRQ %d, error %d\n",
2938 goto err_out_unregister_clk_notifier
;
2941 if (netif_msg_probe(lp
))
2942 netdev_dbg(ndev
, "net_local@%p\n", lp
);
2946 err_out_unregister_clk_notifier
:
2947 clk_disable_unprepare(lp
->phy_ref_clk
);
2948 err_out_unregister_netdev
:
2949 unregister_netdev(ndev
);
2950 err_out_clk_dis_aper
:
2951 clk_disable_unprepare(lp
->apb_pclk
);
2952 err_out_free_netdev
:
2953 of_node_put(lp
->phy_node
);
2955 platform_set_drvdata(pdev
, NULL
);
2959 static int dwceqos_remove(struct platform_device
*pdev
)
2961 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2962 struct net_local
*lp
;
2965 lp
= netdev_priv(ndev
);
2968 phy_disconnect(lp
->phy_dev
);
2969 mdiobus_unregister(lp
->mii_bus
);
2970 mdiobus_free(lp
->mii_bus
);
2972 unregister_netdev(ndev
);
2974 clk_disable_unprepare(lp
->phy_ref_clk
);
2975 clk_disable_unprepare(lp
->apb_pclk
);
2983 static struct platform_driver dwceqos_driver
= {
2984 .probe
= dwceqos_probe
,
2985 .remove
= dwceqos_remove
,
2987 .name
= DRIVER_NAME
,
2988 .of_match_table
= dwceq_of_match
,
2992 module_platform_driver(dwceqos_driver
);
2994 MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
2995 MODULE_LICENSE("GPL v2");
2996 MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
2997 MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");