]>
Commit | Line | Data |
---|---|---|
ce973b14 | 1 | /* |
047584ce | 2 | * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. |
ce973b14 LY |
3 | * |
4 | * Author: Shlomi Gridish <gridish@freescale.com> | |
18a8e864 | 5 | * Li Yang <leoli@freescale.com> |
ce973b14 LY |
6 | * |
7 | * Description: | |
8 | * QE UCC Gigabit Ethernet Driver | |
9 | * | |
ce973b14 LY |
10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | */ | |
c84d8055 JP |
15 | |
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
17 | ||
ce973b14 LY |
18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/stddef.h> | |
9d9779e7 | 23 | #include <linux/module.h> |
ce973b14 LY |
24 | #include <linux/interrupt.h> |
25 | #include <linux/netdevice.h> | |
26 | #include <linux/etherdevice.h> | |
27 | #include <linux/skbuff.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/mm.h> | |
ce973b14 | 30 | #include <linux/dma-mapping.h> |
ce973b14 | 31 | #include <linux/mii.h> |
728de4c9 | 32 | #include <linux/phy.h> |
df19b6b0 | 33 | #include <linux/workqueue.h> |
5af50730 RH |
34 | #include <linux/of_address.h> |
35 | #include <linux/of_irq.h> | |
0b9da337 | 36 | #include <linux/of_mdio.h> |
4b6ba8aa | 37 | #include <linux/of_net.h> |
55b6c8e9 | 38 | #include <linux/of_platform.h> |
ce973b14 LY |
39 | |
40 | #include <asm/uaccess.h> | |
41 | #include <asm/irq.h> | |
42 | #include <asm/io.h> | |
7aa1aa6e ZQ |
43 | #include <soc/fsl/qe/immap_qe.h> |
44 | #include <soc/fsl/qe/qe.h> | |
45 | #include <soc/fsl/qe/ucc.h> | |
46 | #include <soc/fsl/qe/ucc_fast.h> | |
81abb43a | 47 | #include <asm/machdep.h> |
ce973b14 LY |
48 | |
49 | #include "ucc_geth.h" | |
ce973b14 LY |
50 | |
51 | #undef DEBUG | |
52 | ||
ce973b14 LY |
53 | #define ugeth_printk(level, format, arg...) \ |
54 | printk(level format "\n", ## arg) | |
55 | ||
56 | #define ugeth_dbg(format, arg...) \ | |
57 | ugeth_printk(KERN_DEBUG , format , ## arg) | |
ce973b14 LY |
58 | |
59 | #ifdef UGETH_VERBOSE_DEBUG | |
60 | #define ugeth_vdbg ugeth_dbg | |
61 | #else | |
62 | #define ugeth_vdbg(fmt, args...) do { } while (0) | |
63 | #endif /* UGETH_VERBOSE_DEBUG */ | |
890de95e | 64 | #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 |
ce973b14 | 65 | |
88a15f2e | 66 | |
ce973b14 LY |
67 | static DEFINE_SPINLOCK(ugeth_lock); |
68 | ||
890de95e LY |
69 | static struct { |
70 | u32 msg_enable; | |
71 | } debug = { -1 }; | |
72 | ||
73 | module_param_named(debug, debug.msg_enable, int, 0); | |
74 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); | |
75 | ||
18a8e864 | 76 | static struct ucc_geth_info ugeth_primary_info = { |
ce973b14 LY |
77 | .uf_info = { |
78 | .bd_mem_part = MEM_PART_SYSTEM, | |
79 | .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, | |
80 | .max_rx_buf_length = 1536, | |
728de4c9 | 81 | /* adjusted at startup if max-speed 1000 */ |
ce973b14 LY |
82 | .urfs = UCC_GETH_URFS_INIT, |
83 | .urfet = UCC_GETH_URFET_INIT, | |
84 | .urfset = UCC_GETH_URFSET_INIT, | |
85 | .utfs = UCC_GETH_UTFS_INIT, | |
86 | .utfet = UCC_GETH_UTFET_INIT, | |
87 | .utftt = UCC_GETH_UTFTT_INIT, | |
ce973b14 LY |
88 | .ufpt = 256, |
89 | .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, | |
90 | .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, | |
91 | .tenc = UCC_FAST_TX_ENCODING_NRZ, | |
92 | .renc = UCC_FAST_RX_ENCODING_NRZ, | |
93 | .tcrc = UCC_FAST_16_BIT_CRC, | |
94 | .synl = UCC_FAST_SYNC_LEN_NOT_USED, | |
95 | }, | |
96 | .numQueuesTx = 1, | |
97 | .numQueuesRx = 1, | |
98 | .extendedFilteringChainPointer = ((uint32_t) NULL), | |
99 | .typeorlen = 3072 /*1536 */ , | |
100 | .nonBackToBackIfgPart1 = 0x40, | |
101 | .nonBackToBackIfgPart2 = 0x60, | |
102 | .miminumInterFrameGapEnforcement = 0x50, | |
103 | .backToBackInterFrameGap = 0x60, | |
104 | .mblinterval = 128, | |
105 | .nortsrbytetime = 5, | |
106 | .fracsiz = 1, | |
107 | .strictpriorityq = 0xff, | |
108 | .altBebTruncation = 0xa, | |
109 | .excessDefer = 1, | |
110 | .maxRetransmission = 0xf, | |
111 | .collisionWindow = 0x37, | |
112 | .receiveFlowControl = 1, | |
ac421852 | 113 | .transmitFlowControl = 1, |
ce973b14 LY |
114 | .maxGroupAddrInHash = 4, |
115 | .maxIndAddrInHash = 4, | |
116 | .prel = 7, | |
70f8002d | 117 | .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ |
ce973b14 | 118 | .minFrameLength = 64, |
70f8002d JT |
119 | .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ |
120 | .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */ | |
ce973b14 LY |
121 | .vlantype = 0x8100, |
122 | .ecamptr = ((uint32_t) NULL), | |
123 | .eventRegMask = UCCE_OTHER, | |
124 | .pausePeriod = 0xf000, | |
125 | .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, | |
126 | .bdRingLenTx = { | |
127 | TX_BD_RING_LEN, | |
128 | TX_BD_RING_LEN, | |
129 | TX_BD_RING_LEN, | |
130 | TX_BD_RING_LEN, | |
131 | TX_BD_RING_LEN, | |
132 | TX_BD_RING_LEN, | |
133 | TX_BD_RING_LEN, | |
134 | TX_BD_RING_LEN}, | |
135 | ||
136 | .bdRingLenRx = { | |
137 | RX_BD_RING_LEN, | |
138 | RX_BD_RING_LEN, | |
139 | RX_BD_RING_LEN, | |
140 | RX_BD_RING_LEN, | |
141 | RX_BD_RING_LEN, | |
142 | RX_BD_RING_LEN, | |
143 | RX_BD_RING_LEN, | |
144 | RX_BD_RING_LEN}, | |
145 | ||
146 | .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, | |
147 | .largestexternallookupkeysize = | |
148 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, | |
ac421852 LY |
149 | .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | |
150 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | | |
151 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, | |
ce973b14 LY |
152 | .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, |
153 | .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, | |
154 | .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, | |
155 | .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, | |
156 | .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, | |
ffea31ed JT |
157 | .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, |
158 | .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, | |
ce973b14 LY |
159 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, |
160 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
161 | }; | |
162 | ||
18a8e864 | 163 | static struct ucc_geth_info ugeth_info[8]; |
ce973b14 LY |
164 | |
165 | #ifdef DEBUG | |
166 | static void mem_disp(u8 *addr, int size) | |
167 | { | |
168 | u8 *i; | |
169 | int size16Aling = (size >> 4) << 4; | |
170 | int size4Aling = (size >> 2) << 2; | |
171 | int notAlign = 0; | |
172 | if (size % 16) | |
173 | notAlign = 1; | |
174 | ||
175 | for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) | |
176 | printk("0x%08x: %08x %08x %08x %08x\r\n", | |
177 | (u32) i, | |
178 | *((u32 *) (i)), | |
179 | *((u32 *) (i + 4)), | |
180 | *((u32 *) (i + 8)), *((u32 *) (i + 12))); | |
181 | if (notAlign == 1) | |
182 | printk("0x%08x: ", (u32) i); | |
183 | for (; (u32) i < (u32) addr + size4Aling; i += 4) | |
184 | printk("%08x ", *((u32 *) (i))); | |
185 | for (; (u32) i < (u32) addr + size; i++) | |
64699336 | 186 | printk("%02x", *((i))); |
ce973b14 LY |
187 | if (notAlign == 1) |
188 | printk("\r\n"); | |
189 | } | |
190 | #endif /* DEBUG */ | |
191 | ||
ce973b14 LY |
192 | static struct list_head *dequeue(struct list_head *lh) |
193 | { | |
194 | unsigned long flags; | |
195 | ||
1083cfe1 | 196 | spin_lock_irqsave(&ugeth_lock, flags); |
ce973b14 LY |
197 | if (!list_empty(lh)) { |
198 | struct list_head *node = lh->next; | |
199 | list_del(node); | |
1083cfe1 | 200 | spin_unlock_irqrestore(&ugeth_lock, flags); |
ce973b14 LY |
201 | return node; |
202 | } else { | |
1083cfe1 | 203 | spin_unlock_irqrestore(&ugeth_lock, flags); |
ce973b14 LY |
204 | return NULL; |
205 | } | |
206 | } | |
207 | ||
6fee40e9 AF |
208 | static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, |
209 | u8 __iomem *bd) | |
ce973b14 | 210 | { |
acb600de | 211 | struct sk_buff *skb; |
ce973b14 | 212 | |
acb600de ED |
213 | skb = netdev_alloc_skb(ugeth->ndev, |
214 | ugeth->ug_info->uf_info.max_rx_buf_length + | |
215 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | |
50f238fd | 216 | if (!skb) |
ce973b14 LY |
217 | return NULL; |
218 | ||
219 | /* We need the data buffer to be aligned properly. We will reserve | |
220 | * as many bytes as needed to align the data properly | |
221 | */ | |
222 | skb_reserve(skb, | |
223 | UCC_GETH_RX_DATA_BUF_ALIGNMENT - | |
224 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - | |
225 | 1))); | |
226 | ||
6fee40e9 | 227 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
da1aa63e | 228 | dma_map_single(ugeth->dev, |
ce973b14 LY |
229 | skb->data, |
230 | ugeth->ug_info->uf_info.max_rx_buf_length + | |
231 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | |
232 | DMA_FROM_DEVICE)); | |
233 | ||
6fee40e9 AF |
234 | out_be32((u32 __iomem *)bd, |
235 | (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); | |
ce973b14 LY |
236 | |
237 | return skb; | |
238 | } | |
239 | ||
18a8e864 | 240 | static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) |
ce973b14 | 241 | { |
6fee40e9 | 242 | u8 __iomem *bd; |
ce973b14 LY |
243 | u32 bd_status; |
244 | struct sk_buff *skb; | |
245 | int i; | |
246 | ||
247 | bd = ugeth->p_rx_bd_ring[rxQ]; | |
248 | i = 0; | |
249 | ||
250 | do { | |
6fee40e9 | 251 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
252 | skb = get_new_skb(ugeth, bd); |
253 | ||
254 | if (!skb) /* If can not allocate data buffer, | |
255 | abort. Cleanup will be elsewhere */ | |
256 | return -ENOMEM; | |
257 | ||
258 | ugeth->rx_skbuff[rxQ][i] = skb; | |
259 | ||
260 | /* advance the BD pointer */ | |
18a8e864 | 261 | bd += sizeof(struct qe_bd); |
ce973b14 LY |
262 | i++; |
263 | } while (!(bd_status & R_W)); | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
18a8e864 | 268 | static int fill_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 269 | u32 *p_start, |
ce973b14 LY |
270 | u8 num_entries, |
271 | u32 thread_size, | |
272 | u32 thread_alignment, | |
345f8422 | 273 | unsigned int risc, |
ce973b14 LY |
274 | int skip_page_for_first_entry) |
275 | { | |
276 | u32 init_enet_offset; | |
277 | u8 i; | |
278 | int snum; | |
279 | ||
280 | for (i = 0; i < num_entries; i++) { | |
281 | if ((snum = qe_get_snum()) < 0) { | |
890de95e | 282 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 283 | pr_err("Can not get SNUM\n"); |
ce973b14 LY |
284 | return snum; |
285 | } | |
286 | if ((i == 0) && skip_page_for_first_entry) | |
287 | /* First entry of Rx does not have page */ | |
288 | init_enet_offset = 0; | |
289 | else { | |
290 | init_enet_offset = | |
291 | qe_muram_alloc(thread_size, thread_alignment); | |
4c35630c | 292 | if (IS_ERR_VALUE(init_enet_offset)) { |
890de95e | 293 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 294 | pr_err("Can not allocate DPRAM memory\n"); |
ce973b14 LY |
295 | qe_put_snum((u8) snum); |
296 | return -ENOMEM; | |
297 | } | |
298 | } | |
299 | *(p_start++) = | |
300 | ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | |
301 | | risc; | |
302 | } | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
18a8e864 | 307 | static int return_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 308 | u32 *p_start, |
ce973b14 | 309 | u8 num_entries, |
345f8422 | 310 | unsigned int risc, |
ce973b14 LY |
311 | int skip_page_for_first_entry) |
312 | { | |
313 | u32 init_enet_offset; | |
314 | u8 i; | |
315 | int snum; | |
316 | ||
317 | for (i = 0; i < num_entries; i++) { | |
6fee40e9 AF |
318 | u32 val = *p_start; |
319 | ||
ce973b14 LY |
320 | /* Check that this entry was actually valid -- |
321 | needed in case failed in allocations */ | |
6fee40e9 | 322 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
ce973b14 | 323 | snum = |
6fee40e9 | 324 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
ce973b14 LY |
325 | ENET_INIT_PARAM_SNUM_SHIFT; |
326 | qe_put_snum((u8) snum); | |
327 | if (!((i == 0) && skip_page_for_first_entry)) { | |
328 | /* First entry of Rx does not have page */ | |
329 | init_enet_offset = | |
6fee40e9 | 330 | (val & ENET_INIT_PARAM_PTR_MASK); |
ce973b14 LY |
331 | qe_muram_free(init_enet_offset); |
332 | } | |
6fee40e9 | 333 | *p_start++ = 0; |
ce973b14 LY |
334 | } |
335 | } | |
336 | ||
337 | return 0; | |
338 | } | |
339 | ||
340 | #ifdef DEBUG | |
18a8e864 | 341 | static int dump_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 342 | u32 __iomem *p_start, |
ce973b14 LY |
343 | u8 num_entries, |
344 | u32 thread_size, | |
345f8422 | 345 | unsigned int risc, |
ce973b14 LY |
346 | int skip_page_for_first_entry) |
347 | { | |
348 | u32 init_enet_offset; | |
349 | u8 i; | |
350 | int snum; | |
351 | ||
352 | for (i = 0; i < num_entries; i++) { | |
6fee40e9 AF |
353 | u32 val = in_be32(p_start); |
354 | ||
ce973b14 LY |
355 | /* Check that this entry was actually valid -- |
356 | needed in case failed in allocations */ | |
6fee40e9 | 357 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
ce973b14 | 358 | snum = |
6fee40e9 | 359 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
ce973b14 LY |
360 | ENET_INIT_PARAM_SNUM_SHIFT; |
361 | qe_put_snum((u8) snum); | |
362 | if (!((i == 0) && skip_page_for_first_entry)) { | |
363 | /* First entry of Rx does not have page */ | |
364 | init_enet_offset = | |
365 | (in_be32(p_start) & | |
366 | ENET_INIT_PARAM_PTR_MASK); | |
c84d8055 JP |
367 | pr_info("Init enet entry %d:\n", i); |
368 | pr_info("Base address: 0x%08x\n", | |
369 | (u32)qe_muram_addr(init_enet_offset)); | |
ce973b14 LY |
370 | mem_disp(qe_muram_addr(init_enet_offset), |
371 | thread_size); | |
372 | } | |
373 | p_start++; | |
374 | } | |
375 | } | |
376 | ||
377 | return 0; | |
378 | } | |
379 | #endif | |
380 | ||
18a8e864 | 381 | static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) |
ce973b14 LY |
382 | { |
383 | kfree(enet_addr_cont); | |
384 | } | |
385 | ||
df19b6b0 | 386 | static void set_mac_addr(__be16 __iomem *reg, u8 *mac) |
18a8e864 LY |
387 | { |
388 | out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); | |
389 | out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); | |
390 | out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); | |
391 | } | |
392 | ||
18a8e864 | 393 | static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) |
ce973b14 | 394 | { |
6fee40e9 | 395 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
ce973b14 | 396 | |
c84d8055 JP |
397 | if (paddr_num >= NUM_OF_PADDRS) { |
398 | pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num); | |
ce973b14 LY |
399 | return -EINVAL; |
400 | } | |
401 | ||
402 | p_82xx_addr_filt = | |
6fee40e9 | 403 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
ce973b14 LY |
404 | addressfiltering; |
405 | ||
406 | /* Writing address ff.ff.ff.ff.ff.ff disables address | |
407 | recognition for this register */ | |
408 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); | |
409 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); | |
410 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); | |
411 | ||
412 | return 0; | |
413 | } | |
414 | ||
18a8e864 LY |
415 | static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, |
416 | u8 *p_enet_addr) | |
ce973b14 | 417 | { |
6fee40e9 | 418 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
ce973b14 LY |
419 | u32 cecr_subblock; |
420 | ||
421 | p_82xx_addr_filt = | |
6fee40e9 | 422 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
ce973b14 LY |
423 | addressfiltering; |
424 | ||
425 | cecr_subblock = | |
426 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
427 | ||
428 | /* Ethernet frames are defined in Little Endian mode, | |
3ad2f3fb | 429 | therefore to insert */ |
ce973b14 | 430 | /* the address to the hash (Big Endian mode), we reverse the bytes.*/ |
18a8e864 LY |
431 | |
432 | set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); | |
ce973b14 LY |
433 | |
434 | qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, | |
18a8e864 | 435 | QE_CR_PROTOCOL_ETHERNET, 0); |
ce973b14 LY |
436 | } |
437 | ||
ce973b14 | 438 | #ifdef DEBUG |
18a8e864 LY |
439 | static void get_statistics(struct ucc_geth_private *ugeth, |
440 | struct ucc_geth_tx_firmware_statistics * | |
ce973b14 | 441 | tx_firmware_statistics, |
18a8e864 | 442 | struct ucc_geth_rx_firmware_statistics * |
ce973b14 | 443 | rx_firmware_statistics, |
18a8e864 | 444 | struct ucc_geth_hardware_statistics *hardware_statistics) |
ce973b14 | 445 | { |
6fee40e9 AF |
446 | struct ucc_fast __iomem *uf_regs; |
447 | struct ucc_geth __iomem *ug_regs; | |
18a8e864 LY |
448 | struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; |
449 | struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; | |
ce973b14 LY |
450 | |
451 | ug_regs = ugeth->ug_regs; | |
6fee40e9 | 452 | uf_regs = (struct ucc_fast __iomem *) ug_regs; |
ce973b14 LY |
453 | p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; |
454 | p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; | |
455 | ||
456 | /* Tx firmware only if user handed pointer and driver actually | |
457 | gathers Tx firmware statistics */ | |
458 | if (tx_firmware_statistics && p_tx_fw_statistics_pram) { | |
459 | tx_firmware_statistics->sicoltx = | |
460 | in_be32(&p_tx_fw_statistics_pram->sicoltx); | |
461 | tx_firmware_statistics->mulcoltx = | |
462 | in_be32(&p_tx_fw_statistics_pram->mulcoltx); | |
463 | tx_firmware_statistics->latecoltxfr = | |
464 | in_be32(&p_tx_fw_statistics_pram->latecoltxfr); | |
465 | tx_firmware_statistics->frabortduecol = | |
466 | in_be32(&p_tx_fw_statistics_pram->frabortduecol); | |
467 | tx_firmware_statistics->frlostinmactxer = | |
468 | in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); | |
469 | tx_firmware_statistics->carriersenseertx = | |
470 | in_be32(&p_tx_fw_statistics_pram->carriersenseertx); | |
471 | tx_firmware_statistics->frtxok = | |
472 | in_be32(&p_tx_fw_statistics_pram->frtxok); | |
473 | tx_firmware_statistics->txfrexcessivedefer = | |
474 | in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); | |
475 | tx_firmware_statistics->txpkts256 = | |
476 | in_be32(&p_tx_fw_statistics_pram->txpkts256); | |
477 | tx_firmware_statistics->txpkts512 = | |
478 | in_be32(&p_tx_fw_statistics_pram->txpkts512); | |
479 | tx_firmware_statistics->txpkts1024 = | |
480 | in_be32(&p_tx_fw_statistics_pram->txpkts1024); | |
481 | tx_firmware_statistics->txpktsjumbo = | |
482 | in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); | |
483 | } | |
484 | ||
485 | /* Rx firmware only if user handed pointer and driver actually | |
486 | * gathers Rx firmware statistics */ | |
487 | if (rx_firmware_statistics && p_rx_fw_statistics_pram) { | |
488 | int i; | |
489 | rx_firmware_statistics->frrxfcser = | |
490 | in_be32(&p_rx_fw_statistics_pram->frrxfcser); | |
491 | rx_firmware_statistics->fraligner = | |
492 | in_be32(&p_rx_fw_statistics_pram->fraligner); | |
493 | rx_firmware_statistics->inrangelenrxer = | |
494 | in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); | |
495 | rx_firmware_statistics->outrangelenrxer = | |
496 | in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); | |
497 | rx_firmware_statistics->frtoolong = | |
498 | in_be32(&p_rx_fw_statistics_pram->frtoolong); | |
499 | rx_firmware_statistics->runt = | |
500 | in_be32(&p_rx_fw_statistics_pram->runt); | |
501 | rx_firmware_statistics->verylongevent = | |
502 | in_be32(&p_rx_fw_statistics_pram->verylongevent); | |
503 | rx_firmware_statistics->symbolerror = | |
504 | in_be32(&p_rx_fw_statistics_pram->symbolerror); | |
505 | rx_firmware_statistics->dropbsy = | |
506 | in_be32(&p_rx_fw_statistics_pram->dropbsy); | |
507 | for (i = 0; i < 0x8; i++) | |
508 | rx_firmware_statistics->res0[i] = | |
509 | p_rx_fw_statistics_pram->res0[i]; | |
510 | rx_firmware_statistics->mismatchdrop = | |
511 | in_be32(&p_rx_fw_statistics_pram->mismatchdrop); | |
512 | rx_firmware_statistics->underpkts = | |
513 | in_be32(&p_rx_fw_statistics_pram->underpkts); | |
514 | rx_firmware_statistics->pkts256 = | |
515 | in_be32(&p_rx_fw_statistics_pram->pkts256); | |
516 | rx_firmware_statistics->pkts512 = | |
517 | in_be32(&p_rx_fw_statistics_pram->pkts512); | |
518 | rx_firmware_statistics->pkts1024 = | |
519 | in_be32(&p_rx_fw_statistics_pram->pkts1024); | |
520 | rx_firmware_statistics->pktsjumbo = | |
521 | in_be32(&p_rx_fw_statistics_pram->pktsjumbo); | |
522 | rx_firmware_statistics->frlossinmacer = | |
523 | in_be32(&p_rx_fw_statistics_pram->frlossinmacer); | |
524 | rx_firmware_statistics->pausefr = | |
525 | in_be32(&p_rx_fw_statistics_pram->pausefr); | |
526 | for (i = 0; i < 0x4; i++) | |
527 | rx_firmware_statistics->res1[i] = | |
528 | p_rx_fw_statistics_pram->res1[i]; | |
529 | rx_firmware_statistics->removevlan = | |
530 | in_be32(&p_rx_fw_statistics_pram->removevlan); | |
531 | rx_firmware_statistics->replacevlan = | |
532 | in_be32(&p_rx_fw_statistics_pram->replacevlan); | |
533 | rx_firmware_statistics->insertvlan = | |
534 | in_be32(&p_rx_fw_statistics_pram->insertvlan); | |
535 | } | |
536 | ||
537 | /* Hardware only if user handed pointer and driver actually | |
538 | gathers hardware statistics */ | |
3bc53427 TT |
539 | if (hardware_statistics && |
540 | (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { | |
ce973b14 LY |
541 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); |
542 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); | |
543 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); | |
544 | hardware_statistics->rx64 = in_be32(&ug_regs->rx64); | |
545 | hardware_statistics->rx127 = in_be32(&ug_regs->rx127); | |
546 | hardware_statistics->rx255 = in_be32(&ug_regs->rx255); | |
547 | hardware_statistics->txok = in_be32(&ug_regs->txok); | |
548 | hardware_statistics->txcf = in_be16(&ug_regs->txcf); | |
549 | hardware_statistics->tmca = in_be32(&ug_regs->tmca); | |
550 | hardware_statistics->tbca = in_be32(&ug_regs->tbca); | |
551 | hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); | |
552 | hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); | |
553 | hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); | |
554 | hardware_statistics->rmca = in_be32(&ug_regs->rmca); | |
555 | hardware_statistics->rbca = in_be32(&ug_regs->rbca); | |
556 | } | |
557 | } | |
558 | ||
18a8e864 | 559 | static void dump_bds(struct ucc_geth_private *ugeth) |
ce973b14 LY |
560 | { |
561 | int i; | |
562 | int length; | |
563 | ||
564 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
565 | if (ugeth->p_tx_bd_ring[i]) { | |
566 | length = | |
567 | (ugeth->ug_info->bdRingLenTx[i] * | |
18a8e864 | 568 | sizeof(struct qe_bd)); |
c84d8055 | 569 | pr_info("TX BDs[%d]\n", i); |
ce973b14 LY |
570 | mem_disp(ugeth->p_tx_bd_ring[i], length); |
571 | } | |
572 | } | |
573 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
574 | if (ugeth->p_rx_bd_ring[i]) { | |
575 | length = | |
576 | (ugeth->ug_info->bdRingLenRx[i] * | |
18a8e864 | 577 | sizeof(struct qe_bd)); |
c84d8055 | 578 | pr_info("RX BDs[%d]\n", i); |
ce973b14 LY |
579 | mem_disp(ugeth->p_rx_bd_ring[i], length); |
580 | } | |
581 | } | |
582 | } | |
583 | ||
18a8e864 | 584 | static void dump_regs(struct ucc_geth_private *ugeth) |
ce973b14 LY |
585 | { |
586 | int i; | |
587 | ||
c84d8055 JP |
588 | pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1); |
589 | pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs); | |
590 | ||
591 | pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n", | |
592 | (u32)&ugeth->ug_regs->maccfg1, | |
593 | in_be32(&ugeth->ug_regs->maccfg1)); | |
594 | pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n", | |
595 | (u32)&ugeth->ug_regs->maccfg2, | |
596 | in_be32(&ugeth->ug_regs->maccfg2)); | |
597 | pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n", | |
598 | (u32)&ugeth->ug_regs->ipgifg, | |
599 | in_be32(&ugeth->ug_regs->ipgifg)); | |
600 | pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n", | |
601 | (u32)&ugeth->ug_regs->hafdup, | |
602 | in_be32(&ugeth->ug_regs->hafdup)); | |
603 | pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n", | |
604 | (u32)&ugeth->ug_regs->ifctl, | |
605 | in_be32(&ugeth->ug_regs->ifctl)); | |
606 | pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n", | |
607 | (u32)&ugeth->ug_regs->ifstat, | |
608 | in_be32(&ugeth->ug_regs->ifstat)); | |
609 | pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n", | |
610 | (u32)&ugeth->ug_regs->macstnaddr1, | |
611 | in_be32(&ugeth->ug_regs->macstnaddr1)); | |
612 | pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n", | |
613 | (u32)&ugeth->ug_regs->macstnaddr2, | |
614 | in_be32(&ugeth->ug_regs->macstnaddr2)); | |
615 | pr_info("uempr : addr - 0x%08x, val - 0x%08x\n", | |
616 | (u32)&ugeth->ug_regs->uempr, | |
617 | in_be32(&ugeth->ug_regs->uempr)); | |
618 | pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n", | |
619 | (u32)&ugeth->ug_regs->utbipar, | |
620 | in_be32(&ugeth->ug_regs->utbipar)); | |
621 | pr_info("uescr : addr - 0x%08x, val - 0x%04x\n", | |
622 | (u32)&ugeth->ug_regs->uescr, | |
623 | in_be16(&ugeth->ug_regs->uescr)); | |
624 | pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n", | |
625 | (u32)&ugeth->ug_regs->tx64, | |
626 | in_be32(&ugeth->ug_regs->tx64)); | |
627 | pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n", | |
628 | (u32)&ugeth->ug_regs->tx127, | |
629 | in_be32(&ugeth->ug_regs->tx127)); | |
630 | pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n", | |
631 | (u32)&ugeth->ug_regs->tx255, | |
632 | in_be32(&ugeth->ug_regs->tx255)); | |
633 | pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n", | |
634 | (u32)&ugeth->ug_regs->rx64, | |
635 | in_be32(&ugeth->ug_regs->rx64)); | |
636 | pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n", | |
637 | (u32)&ugeth->ug_regs->rx127, | |
638 | in_be32(&ugeth->ug_regs->rx127)); | |
639 | pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n", | |
640 | (u32)&ugeth->ug_regs->rx255, | |
641 | in_be32(&ugeth->ug_regs->rx255)); | |
642 | pr_info("txok : addr - 0x%08x, val - 0x%08x\n", | |
643 | (u32)&ugeth->ug_regs->txok, | |
644 | in_be32(&ugeth->ug_regs->txok)); | |
645 | pr_info("txcf : addr - 0x%08x, val - 0x%04x\n", | |
646 | (u32)&ugeth->ug_regs->txcf, | |
647 | in_be16(&ugeth->ug_regs->txcf)); | |
648 | pr_info("tmca : addr - 0x%08x, val - 0x%08x\n", | |
649 | (u32)&ugeth->ug_regs->tmca, | |
650 | in_be32(&ugeth->ug_regs->tmca)); | |
651 | pr_info("tbca : addr - 0x%08x, val - 0x%08x\n", | |
652 | (u32)&ugeth->ug_regs->tbca, | |
653 | in_be32(&ugeth->ug_regs->tbca)); | |
654 | pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n", | |
655 | (u32)&ugeth->ug_regs->rxfok, | |
656 | in_be32(&ugeth->ug_regs->rxfok)); | |
657 | pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n", | |
658 | (u32)&ugeth->ug_regs->rxbok, | |
659 | in_be32(&ugeth->ug_regs->rxbok)); | |
660 | pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n", | |
661 | (u32)&ugeth->ug_regs->rbyt, | |
662 | in_be32(&ugeth->ug_regs->rbyt)); | |
663 | pr_info("rmca : addr - 0x%08x, val - 0x%08x\n", | |
664 | (u32)&ugeth->ug_regs->rmca, | |
665 | in_be32(&ugeth->ug_regs->rmca)); | |
666 | pr_info("rbca : addr - 0x%08x, val - 0x%08x\n", | |
667 | (u32)&ugeth->ug_regs->rbca, | |
668 | in_be32(&ugeth->ug_regs->rbca)); | |
669 | pr_info("scar : addr - 0x%08x, val - 0x%08x\n", | |
670 | (u32)&ugeth->ug_regs->scar, | |
671 | in_be32(&ugeth->ug_regs->scar)); | |
672 | pr_info("scam : addr - 0x%08x, val - 0x%08x\n", | |
673 | (u32)&ugeth->ug_regs->scam, | |
674 | in_be32(&ugeth->ug_regs->scam)); | |
ce973b14 LY |
675 | |
676 | if (ugeth->p_thread_data_tx) { | |
677 | int numThreadsTxNumerical; | |
678 | switch (ugeth->ug_info->numThreadsTx) { | |
679 | case UCC_GETH_NUM_OF_THREADS_1: | |
680 | numThreadsTxNumerical = 1; | |
681 | break; | |
682 | case UCC_GETH_NUM_OF_THREADS_2: | |
683 | numThreadsTxNumerical = 2; | |
684 | break; | |
685 | case UCC_GETH_NUM_OF_THREADS_4: | |
686 | numThreadsTxNumerical = 4; | |
687 | break; | |
688 | case UCC_GETH_NUM_OF_THREADS_6: | |
689 | numThreadsTxNumerical = 6; | |
690 | break; | |
691 | case UCC_GETH_NUM_OF_THREADS_8: | |
692 | numThreadsTxNumerical = 8; | |
693 | break; | |
694 | default: | |
695 | numThreadsTxNumerical = 0; | |
696 | break; | |
697 | } | |
698 | ||
c84d8055 JP |
699 | pr_info("Thread data TXs:\n"); |
700 | pr_info("Base address: 0x%08x\n", | |
701 | (u32)ugeth->p_thread_data_tx); | |
ce973b14 | 702 | for (i = 0; i < numThreadsTxNumerical; i++) { |
c84d8055 JP |
703 | pr_info("Thread data TX[%d]:\n", i); |
704 | pr_info("Base address: 0x%08x\n", | |
705 | (u32)&ugeth->p_thread_data_tx[i]); | |
ce973b14 | 706 | mem_disp((u8 *) & ugeth->p_thread_data_tx[i], |
18a8e864 | 707 | sizeof(struct ucc_geth_thread_data_tx)); |
ce973b14 LY |
708 | } |
709 | } | |
710 | if (ugeth->p_thread_data_rx) { | |
711 | int numThreadsRxNumerical; | |
712 | switch (ugeth->ug_info->numThreadsRx) { | |
713 | case UCC_GETH_NUM_OF_THREADS_1: | |
714 | numThreadsRxNumerical = 1; | |
715 | break; | |
716 | case UCC_GETH_NUM_OF_THREADS_2: | |
717 | numThreadsRxNumerical = 2; | |
718 | break; | |
719 | case UCC_GETH_NUM_OF_THREADS_4: | |
720 | numThreadsRxNumerical = 4; | |
721 | break; | |
722 | case UCC_GETH_NUM_OF_THREADS_6: | |
723 | numThreadsRxNumerical = 6; | |
724 | break; | |
725 | case UCC_GETH_NUM_OF_THREADS_8: | |
726 | numThreadsRxNumerical = 8; | |
727 | break; | |
728 | default: | |
729 | numThreadsRxNumerical = 0; | |
730 | break; | |
731 | } | |
732 | ||
c84d8055 JP |
733 | pr_info("Thread data RX:\n"); |
734 | pr_info("Base address: 0x%08x\n", | |
735 | (u32)ugeth->p_thread_data_rx); | |
ce973b14 | 736 | for (i = 0; i < numThreadsRxNumerical; i++) { |
c84d8055 JP |
737 | pr_info("Thread data RX[%d]:\n", i); |
738 | pr_info("Base address: 0x%08x\n", | |
739 | (u32)&ugeth->p_thread_data_rx[i]); | |
ce973b14 | 740 | mem_disp((u8 *) & ugeth->p_thread_data_rx[i], |
18a8e864 | 741 | sizeof(struct ucc_geth_thread_data_rx)); |
ce973b14 LY |
742 | } |
743 | } | |
744 | if (ugeth->p_exf_glbl_param) { | |
c84d8055 JP |
745 | pr_info("EXF global param:\n"); |
746 | pr_info("Base address: 0x%08x\n", | |
747 | (u32)ugeth->p_exf_glbl_param); | |
ce973b14 LY |
748 | mem_disp((u8 *) ugeth->p_exf_glbl_param, |
749 | sizeof(*ugeth->p_exf_glbl_param)); | |
750 | } | |
751 | if (ugeth->p_tx_glbl_pram) { | |
c84d8055 JP |
752 | pr_info("TX global param:\n"); |
753 | pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram); | |
754 | pr_info("temoder : addr - 0x%08x, val - 0x%04x\n", | |
755 | (u32)&ugeth->p_tx_glbl_pram->temoder, | |
756 | in_be16(&ugeth->p_tx_glbl_pram->temoder)); | |
757 | pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n", | |
758 | (u32)&ugeth->p_tx_glbl_pram->sqptr, | |
759 | in_be32(&ugeth->p_tx_glbl_pram->sqptr)); | |
760 | pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n", | |
761 | (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer, | |
762 | in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer)); | |
763 | pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n", | |
764 | (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr, | |
765 | in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); | |
766 | pr_info("tstate : addr - 0x%08x, val - 0x%08x\n", | |
767 | (u32)&ugeth->p_tx_glbl_pram->tstate, | |
768 | in_be32(&ugeth->p_tx_glbl_pram->tstate)); | |
769 | pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n", | |
770 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[0], | |
771 | ugeth->p_tx_glbl_pram->iphoffset[0]); | |
772 | pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n", | |
773 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[1], | |
774 | ugeth->p_tx_glbl_pram->iphoffset[1]); | |
775 | pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n", | |
776 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[2], | |
777 | ugeth->p_tx_glbl_pram->iphoffset[2]); | |
778 | pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n", | |
779 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[3], | |
780 | ugeth->p_tx_glbl_pram->iphoffset[3]); | |
781 | pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n", | |
782 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[4], | |
783 | ugeth->p_tx_glbl_pram->iphoffset[4]); | |
784 | pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n", | |
785 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[5], | |
786 | ugeth->p_tx_glbl_pram->iphoffset[5]); | |
787 | pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n", | |
788 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[6], | |
789 | ugeth->p_tx_glbl_pram->iphoffset[6]); | |
790 | pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n", | |
791 | (u32)&ugeth->p_tx_glbl_pram->iphoffset[7], | |
792 | ugeth->p_tx_glbl_pram->iphoffset[7]); | |
793 | pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n", | |
794 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[0], | |
795 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); | |
796 | pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n", | |
797 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[1], | |
798 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); | |
799 | pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n", | |
800 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[2], | |
801 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); | |
802 | pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n", | |
803 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[3], | |
804 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); | |
805 | pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n", | |
806 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[4], | |
807 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); | |
808 | pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n", | |
809 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[5], | |
810 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); | |
811 | pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n", | |
812 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[6], | |
813 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); | |
814 | pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n", | |
815 | (u32)&ugeth->p_tx_glbl_pram->vtagtable[7], | |
816 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); | |
817 | pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n", | |
818 | (u32)&ugeth->p_tx_glbl_pram->tqptr, | |
819 | in_be32(&ugeth->p_tx_glbl_pram->tqptr)); | |
ce973b14 LY |
820 | } |
821 | if (ugeth->p_rx_glbl_pram) { | |
c84d8055 JP |
822 | pr_info("RX global param:\n"); |
823 | pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram); | |
824 | pr_info("remoder : addr - 0x%08x, val - 0x%08x\n", | |
825 | (u32)&ugeth->p_rx_glbl_pram->remoder, | |
826 | in_be32(&ugeth->p_rx_glbl_pram->remoder)); | |
827 | pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n", | |
828 | (u32)&ugeth->p_rx_glbl_pram->rqptr, | |
829 | in_be32(&ugeth->p_rx_glbl_pram->rqptr)); | |
830 | pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n", | |
831 | (u32)&ugeth->p_rx_glbl_pram->typeorlen, | |
832 | in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); | |
833 | pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n", | |
834 | (u32)&ugeth->p_rx_glbl_pram->rxgstpack, | |
835 | ugeth->p_rx_glbl_pram->rxgstpack); | |
836 | pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n", | |
837 | (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr, | |
838 | in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); | |
839 | pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n", | |
840 | (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr, | |
841 | in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); | |
842 | pr_info("rstate : addr - 0x%08x, val - 0x%02x\n", | |
843 | (u32)&ugeth->p_rx_glbl_pram->rstate, | |
844 | ugeth->p_rx_glbl_pram->rstate); | |
845 | pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n", | |
846 | (u32)&ugeth->p_rx_glbl_pram->mrblr, | |
847 | in_be16(&ugeth->p_rx_glbl_pram->mrblr)); | |
848 | pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n", | |
849 | (u32)&ugeth->p_rx_glbl_pram->rbdqptr, | |
850 | in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); | |
851 | pr_info("mflr : addr - 0x%08x, val - 0x%04x\n", | |
852 | (u32)&ugeth->p_rx_glbl_pram->mflr, | |
853 | in_be16(&ugeth->p_rx_glbl_pram->mflr)); | |
854 | pr_info("minflr : addr - 0x%08x, val - 0x%04x\n", | |
855 | (u32)&ugeth->p_rx_glbl_pram->minflr, | |
856 | in_be16(&ugeth->p_rx_glbl_pram->minflr)); | |
857 | pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n", | |
858 | (u32)&ugeth->p_rx_glbl_pram->maxd1, | |
859 | in_be16(&ugeth->p_rx_glbl_pram->maxd1)); | |
860 | pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n", | |
861 | (u32)&ugeth->p_rx_glbl_pram->maxd2, | |
862 | in_be16(&ugeth->p_rx_glbl_pram->maxd2)); | |
863 | pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n", | |
864 | (u32)&ugeth->p_rx_glbl_pram->ecamptr, | |
865 | in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); | |
866 | pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n", | |
867 | (u32)&ugeth->p_rx_glbl_pram->l2qt, | |
868 | in_be32(&ugeth->p_rx_glbl_pram->l2qt)); | |
869 | pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n", | |
870 | (u32)&ugeth->p_rx_glbl_pram->l3qt[0], | |
871 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); | |
872 | pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n", | |
873 | (u32)&ugeth->p_rx_glbl_pram->l3qt[1], | |
874 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); | |
875 | pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n", | |
876 | (u32)&ugeth->p_rx_glbl_pram->l3qt[2], | |
877 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); | |
878 | pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n", | |
879 | (u32)&ugeth->p_rx_glbl_pram->l3qt[3], | |
880 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); | |
881 | pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n", | |
882 | (u32)&ugeth->p_rx_glbl_pram->l3qt[4], | |
883 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); | |
884 | pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n", | |
885 | (u32)&ugeth->p_rx_glbl_pram->l3qt[5], | |
886 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); | |
887 | pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n", | |
888 | (u32)&ugeth->p_rx_glbl_pram->l3qt[6], | |
889 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); | |
890 | pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n", | |
891 | (u32)&ugeth->p_rx_glbl_pram->l3qt[7], | |
892 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); | |
893 | pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n", | |
894 | (u32)&ugeth->p_rx_glbl_pram->vlantype, | |
895 | in_be16(&ugeth->p_rx_glbl_pram->vlantype)); | |
896 | pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n", | |
897 | (u32)&ugeth->p_rx_glbl_pram->vlantci, | |
898 | in_be16(&ugeth->p_rx_glbl_pram->vlantci)); | |
ce973b14 | 899 | for (i = 0; i < 64; i++) |
c84d8055 JP |
900 | pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n", |
901 | i, | |
902 | (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i], | |
903 | ugeth->p_rx_glbl_pram->addressfiltering[i]); | |
904 | pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n", | |
905 | (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam, | |
906 | in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); | |
ce973b14 LY |
907 | } |
908 | if (ugeth->p_send_q_mem_reg) { | |
c84d8055 JP |
909 | pr_info("Send Q memory registers:\n"); |
910 | pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg); | |
ce973b14 | 911 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { |
c84d8055 JP |
912 | pr_info("SQQD[%d]:\n", i); |
913 | pr_info("Base address: 0x%08x\n", | |
914 | (u32)&ugeth->p_send_q_mem_reg->sqqd[i]); | |
ce973b14 | 915 | mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], |
18a8e864 | 916 | sizeof(struct ucc_geth_send_queue_qd)); |
ce973b14 LY |
917 | } |
918 | } | |
919 | if (ugeth->p_scheduler) { | |
c84d8055 JP |
920 | pr_info("Scheduler:\n"); |
921 | pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler); | |
ce973b14 LY |
922 | mem_disp((u8 *) ugeth->p_scheduler, |
923 | sizeof(*ugeth->p_scheduler)); | |
924 | } | |
925 | if (ugeth->p_tx_fw_statistics_pram) { | |
c84d8055 JP |
926 | pr_info("TX FW statistics pram:\n"); |
927 | pr_info("Base address: 0x%08x\n", | |
928 | (u32)ugeth->p_tx_fw_statistics_pram); | |
ce973b14 LY |
929 | mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, |
930 | sizeof(*ugeth->p_tx_fw_statistics_pram)); | |
931 | } | |
932 | if (ugeth->p_rx_fw_statistics_pram) { | |
c84d8055 JP |
933 | pr_info("RX FW statistics pram:\n"); |
934 | pr_info("Base address: 0x%08x\n", | |
935 | (u32)ugeth->p_rx_fw_statistics_pram); | |
ce973b14 LY |
936 | mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, |
937 | sizeof(*ugeth->p_rx_fw_statistics_pram)); | |
938 | } | |
939 | if (ugeth->p_rx_irq_coalescing_tbl) { | |
c84d8055 JP |
940 | pr_info("RX IRQ coalescing tables:\n"); |
941 | pr_info("Base address: 0x%08x\n", | |
942 | (u32)ugeth->p_rx_irq_coalescing_tbl); | |
ce973b14 | 943 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { |
c84d8055 JP |
944 | pr_info("RX IRQ coalescing table entry[%d]:\n", i); |
945 | pr_info("Base address: 0x%08x\n", | |
946 | (u32)&ugeth->p_rx_irq_coalescing_tbl-> | |
947 | coalescingentry[i]); | |
948 | pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n", | |
949 | (u32)&ugeth->p_rx_irq_coalescing_tbl-> | |
950 | coalescingentry[i].interruptcoalescingmaxvalue, | |
951 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | |
952 | coalescingentry[i]. | |
953 | interruptcoalescingmaxvalue)); | |
954 | pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n", | |
955 | (u32)&ugeth->p_rx_irq_coalescing_tbl-> | |
956 | coalescingentry[i].interruptcoalescingcounter, | |
957 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | |
958 | coalescingentry[i]. | |
959 | interruptcoalescingcounter)); | |
ce973b14 LY |
960 | } |
961 | } | |
962 | if (ugeth->p_rx_bd_qs_tbl) { | |
c84d8055 JP |
963 | pr_info("RX BD QS tables:\n"); |
964 | pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl); | |
ce973b14 | 965 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { |
c84d8055 JP |
966 | pr_info("RX BD QS table[%d]:\n", i); |
967 | pr_info("Base address: 0x%08x\n", | |
968 | (u32)&ugeth->p_rx_bd_qs_tbl[i]); | |
969 | pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n", | |
970 | (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, | |
971 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); | |
972 | pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n", | |
973 | (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr, | |
974 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); | |
975 | pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n", | |
976 | (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
977 | in_be32(&ugeth->p_rx_bd_qs_tbl[i]. | |
978 | externalbdbaseptr)); | |
979 | pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n", | |
980 | (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr, | |
981 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); | |
982 | pr_info("ucode RX Prefetched BDs:\n"); | |
983 | pr_info("Base address: 0x%08x\n", | |
984 | (u32)qe_muram_addr(in_be32 | |
985 | (&ugeth->p_rx_bd_qs_tbl[i]. | |
986 | bdbaseptr))); | |
ce973b14 LY |
987 | mem_disp((u8 *) |
988 | qe_muram_addr(in_be32 | |
989 | (&ugeth->p_rx_bd_qs_tbl[i]. | |
990 | bdbaseptr)), | |
18a8e864 | 991 | sizeof(struct ucc_geth_rx_prefetched_bds)); |
ce973b14 LY |
992 | } |
993 | } | |
994 | if (ugeth->p_init_enet_param_shadow) { | |
995 | int size; | |
c84d8055 JP |
996 | pr_info("Init enet param shadow:\n"); |
997 | pr_info("Base address: 0x%08x\n", | |
998 | (u32) ugeth->p_init_enet_param_shadow); | |
ce973b14 LY |
999 | mem_disp((u8 *) ugeth->p_init_enet_param_shadow, |
1000 | sizeof(*ugeth->p_init_enet_param_shadow)); | |
1001 | ||
18a8e864 | 1002 | size = sizeof(struct ucc_geth_thread_rx_pram); |
ce973b14 LY |
1003 | if (ugeth->ug_info->rxExtendedFiltering) { |
1004 | size += | |
1005 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | |
1006 | if (ugeth->ug_info->largestexternallookupkeysize == | |
1007 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | |
1008 | size += | |
1009 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | |
1010 | if (ugeth->ug_info->largestexternallookupkeysize == | |
1011 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | |
1012 | size += | |
1013 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | |
1014 | } | |
1015 | ||
1016 | dump_init_enet_entries(ugeth, | |
1017 | &(ugeth->p_init_enet_param_shadow-> | |
1018 | txthread[0]), | |
1019 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | |
18a8e864 | 1020 | sizeof(struct ucc_geth_thread_tx_pram), |
ce973b14 LY |
1021 | ugeth->ug_info->riscTx, 0); |
1022 | dump_init_enet_entries(ugeth, | |
1023 | &(ugeth->p_init_enet_param_shadow-> | |
1024 | rxthread[0]), | |
1025 | ENET_INIT_PARAM_MAX_ENTRIES_RX, size, | |
1026 | ugeth->ug_info->riscRx, 1); | |
1027 | } | |
1028 | } | |
1029 | #endif /* DEBUG */ | |
1030 | ||
6fee40e9 AF |
1031 | static void init_default_reg_vals(u32 __iomem *upsmr_register, |
1032 | u32 __iomem *maccfg1_register, | |
1033 | u32 __iomem *maccfg2_register) | |
ce973b14 LY |
1034 | { |
1035 | out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); | |
1036 | out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); | |
1037 | out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); | |
1038 | } | |
1039 | ||
1040 | static int init_half_duplex_params(int alt_beb, | |
1041 | int back_pressure_no_backoff, | |
1042 | int no_backoff, | |
1043 | int excess_defer, | |
1044 | u8 alt_beb_truncation, | |
1045 | u8 max_retransmissions, | |
1046 | u8 collision_window, | |
6fee40e9 | 1047 | u32 __iomem *hafdup_register) |
ce973b14 LY |
1048 | { |
1049 | u32 value = 0; | |
1050 | ||
1051 | if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || | |
1052 | (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || | |
1053 | (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) | |
1054 | return -EINVAL; | |
1055 | ||
1056 | value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); | |
1057 | ||
1058 | if (alt_beb) | |
1059 | value |= HALFDUP_ALT_BEB; | |
1060 | if (back_pressure_no_backoff) | |
1061 | value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; | |
1062 | if (no_backoff) | |
1063 | value |= HALFDUP_NO_BACKOFF; | |
1064 | if (excess_defer) | |
1065 | value |= HALFDUP_EXCESSIVE_DEFER; | |
1066 | ||
1067 | value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); | |
1068 | ||
1069 | value |= collision_window; | |
1070 | ||
1071 | out_be32(hafdup_register, value); | |
1072 | return 0; | |
1073 | } | |
1074 | ||
1075 | static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, | |
1076 | u8 non_btb_ipg, | |
1077 | u8 min_ifg, | |
1078 | u8 btb_ipg, | |
6fee40e9 | 1079 | u32 __iomem *ipgifg_register) |
ce973b14 LY |
1080 | { |
1081 | u32 value = 0; | |
1082 | ||
1083 | /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back | |
1084 | IPG part 2 */ | |
1085 | if (non_btb_cs_ipg > non_btb_ipg) | |
1086 | return -EINVAL; | |
1087 | ||
1088 | if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || | |
1089 | (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || | |
1090 | /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ | |
1091 | (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) | |
1092 | return -EINVAL; | |
1093 | ||
1094 | value |= | |
1095 | ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & | |
1096 | IPGIFG_NBTB_CS_IPG_MASK); | |
1097 | value |= | |
1098 | ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & | |
1099 | IPGIFG_NBTB_IPG_MASK); | |
1100 | value |= | |
1101 | ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & | |
1102 | IPGIFG_MIN_IFG_MASK); | |
1103 | value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); | |
1104 | ||
1105 | out_be32(ipgifg_register, value); | |
1106 | return 0; | |
1107 | } | |
1108 | ||
ac421852 | 1109 | int init_flow_control_params(u32 automatic_flow_control_mode, |
ce973b14 LY |
1110 | int rx_flow_control_enable, |
1111 | int tx_flow_control_enable, | |
1112 | u16 pause_period, | |
1113 | u16 extension_field, | |
6fee40e9 AF |
1114 | u32 __iomem *upsmr_register, |
1115 | u32 __iomem *uempr_register, | |
1116 | u32 __iomem *maccfg1_register) | |
ce973b14 LY |
1117 | { |
1118 | u32 value = 0; | |
1119 | ||
1120 | /* Set UEMPR register */ | |
1121 | value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; | |
1122 | value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; | |
1123 | out_be32(uempr_register, value); | |
1124 | ||
1125 | /* Set UPSMR register */ | |
3bc53427 | 1126 | setbits32(upsmr_register, automatic_flow_control_mode); |
ce973b14 LY |
1127 | |
1128 | value = in_be32(maccfg1_register); | |
1129 | if (rx_flow_control_enable) | |
1130 | value |= MACCFG1_FLOW_RX; | |
1131 | if (tx_flow_control_enable) | |
1132 | value |= MACCFG1_FLOW_TX; | |
1133 | out_be32(maccfg1_register, value); | |
1134 | ||
1135 | return 0; | |
1136 | } | |
1137 | ||
1138 | static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, | |
1139 | int auto_zero_hardware_statistics, | |
6fee40e9 AF |
1140 | u32 __iomem *upsmr_register, |
1141 | u16 __iomem *uescr_register) | |
ce973b14 | 1142 | { |
ce973b14 | 1143 | u16 uescr_value = 0; |
3bc53427 | 1144 | |
ce973b14 | 1145 | /* Enable hardware statistics gathering if requested */ |
3bc53427 TT |
1146 | if (enable_hardware_statistics) |
1147 | setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); | |
ce973b14 LY |
1148 | |
1149 | /* Clear hardware statistics counters */ | |
1150 | uescr_value = in_be16(uescr_register); | |
1151 | uescr_value |= UESCR_CLRCNT; | |
1152 | /* Automatically zero hardware statistics counters on read, | |
1153 | if requested */ | |
1154 | if (auto_zero_hardware_statistics) | |
1155 | uescr_value |= UESCR_AUTOZ; | |
1156 | out_be16(uescr_register, uescr_value); | |
1157 | ||
1158 | return 0; | |
1159 | } | |
1160 | ||
1161 | static int init_firmware_statistics_gathering_mode(int | |
1162 | enable_tx_firmware_statistics, | |
1163 | int enable_rx_firmware_statistics, | |
6fee40e9 | 1164 | u32 __iomem *tx_rmon_base_ptr, |
ce973b14 | 1165 | u32 tx_firmware_statistics_structure_address, |
6fee40e9 | 1166 | u32 __iomem *rx_rmon_base_ptr, |
ce973b14 | 1167 | u32 rx_firmware_statistics_structure_address, |
6fee40e9 AF |
1168 | u16 __iomem *temoder_register, |
1169 | u32 __iomem *remoder_register) | |
ce973b14 LY |
1170 | { |
1171 | /* Note: this function does not check if */ | |
1172 | /* the parameters it receives are NULL */ | |
ce973b14 LY |
1173 | |
1174 | if (enable_tx_firmware_statistics) { | |
1175 | out_be32(tx_rmon_base_ptr, | |
1176 | tx_firmware_statistics_structure_address); | |
3bc53427 | 1177 | setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); |
ce973b14 LY |
1178 | } |
1179 | ||
1180 | if (enable_rx_firmware_statistics) { | |
1181 | out_be32(rx_rmon_base_ptr, | |
1182 | rx_firmware_statistics_structure_address); | |
3bc53427 | 1183 | setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); |
ce973b14 LY |
1184 | } |
1185 | ||
1186 | return 0; | |
1187 | } | |
1188 | ||
1189 | static int init_mac_station_addr_regs(u8 address_byte_0, | |
1190 | u8 address_byte_1, | |
1191 | u8 address_byte_2, | |
1192 | u8 address_byte_3, | |
1193 | u8 address_byte_4, | |
1194 | u8 address_byte_5, | |
6fee40e9 AF |
1195 | u32 __iomem *macstnaddr1_register, |
1196 | u32 __iomem *macstnaddr2_register) | |
ce973b14 LY |
1197 | { |
1198 | u32 value = 0; | |
1199 | ||
1200 | /* Example: for a station address of 0x12345678ABCD, */ | |
1201 | /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ | |
1202 | ||
1203 | /* MACSTNADDR1 Register: */ | |
1204 | ||
1205 | /* 0 7 8 15 */ | |
1206 | /* station address byte 5 station address byte 4 */ | |
1207 | /* 16 23 24 31 */ | |
1208 | /* station address byte 3 station address byte 2 */ | |
1209 | value |= (u32) ((address_byte_2 << 0) & 0x000000FF); | |
1210 | value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); | |
1211 | value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); | |
1212 | value |= (u32) ((address_byte_5 << 24) & 0xFF000000); | |
1213 | ||
1214 | out_be32(macstnaddr1_register, value); | |
1215 | ||
1216 | /* MACSTNADDR2 Register: */ | |
1217 | ||
1218 | /* 0 7 8 15 */ | |
1219 | /* station address byte 1 station address byte 0 */ | |
1220 | /* 16 23 24 31 */ | |
1221 | /* reserved reserved */ | |
1222 | value = 0; | |
1223 | value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); | |
1224 | value |= (u32) ((address_byte_1 << 24) & 0xFF000000); | |
1225 | ||
1226 | out_be32(macstnaddr2_register, value); | |
1227 | ||
1228 | return 0; | |
1229 | } | |
1230 | ||
ce973b14 | 1231 | static int init_check_frame_length_mode(int length_check, |
6fee40e9 | 1232 | u32 __iomem *maccfg2_register) |
ce973b14 LY |
1233 | { |
1234 | u32 value = 0; | |
1235 | ||
1236 | value = in_be32(maccfg2_register); | |
1237 | ||
1238 | if (length_check) | |
1239 | value |= MACCFG2_LC; | |
1240 | else | |
1241 | value &= ~MACCFG2_LC; | |
1242 | ||
1243 | out_be32(maccfg2_register, value); | |
1244 | return 0; | |
1245 | } | |
1246 | ||
1247 | static int init_preamble_length(u8 preamble_length, | |
6fee40e9 | 1248 | u32 __iomem *maccfg2_register) |
ce973b14 | 1249 | { |
ce973b14 LY |
1250 | if ((preamble_length < 3) || (preamble_length > 7)) |
1251 | return -EINVAL; | |
1252 | ||
3bc53427 TT |
1253 | clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, |
1254 | preamble_length << MACCFG2_PREL_SHIFT); | |
1255 | ||
ce973b14 LY |
1256 | return 0; |
1257 | } | |
1258 | ||
ce973b14 LY |
1259 | static int init_rx_parameters(int reject_broadcast, |
1260 | int receive_short_frames, | |
6fee40e9 | 1261 | int promiscuous, u32 __iomem *upsmr_register) |
ce973b14 LY |
1262 | { |
1263 | u32 value = 0; | |
1264 | ||
1265 | value = in_be32(upsmr_register); | |
1266 | ||
1267 | if (reject_broadcast) | |
3bc53427 | 1268 | value |= UCC_GETH_UPSMR_BRO; |
ce973b14 | 1269 | else |
3bc53427 | 1270 | value &= ~UCC_GETH_UPSMR_BRO; |
ce973b14 LY |
1271 | |
1272 | if (receive_short_frames) | |
3bc53427 | 1273 | value |= UCC_GETH_UPSMR_RSH; |
ce973b14 | 1274 | else |
3bc53427 | 1275 | value &= ~UCC_GETH_UPSMR_RSH; |
ce973b14 LY |
1276 | |
1277 | if (promiscuous) | |
3bc53427 | 1278 | value |= UCC_GETH_UPSMR_PRO; |
ce973b14 | 1279 | else |
3bc53427 | 1280 | value &= ~UCC_GETH_UPSMR_PRO; |
ce973b14 LY |
1281 | |
1282 | out_be32(upsmr_register, value); | |
1283 | ||
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | static int init_max_rx_buff_len(u16 max_rx_buf_len, | |
6fee40e9 | 1288 | u16 __iomem *mrblr_register) |
ce973b14 LY |
1289 | { |
1290 | /* max_rx_buf_len value must be a multiple of 128 */ | |
8e95a202 JP |
1291 | if ((max_rx_buf_len == 0) || |
1292 | (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) | |
ce973b14 LY |
1293 | return -EINVAL; |
1294 | ||
1295 | out_be16(mrblr_register, max_rx_buf_len); | |
1296 | return 0; | |
1297 | } | |
1298 | ||
1299 | static int init_min_frame_len(u16 min_frame_length, | |
6fee40e9 AF |
1300 | u16 __iomem *minflr_register, |
1301 | u16 __iomem *mrblr_register) | |
ce973b14 LY |
1302 | { |
1303 | u16 mrblr_value = 0; | |
1304 | ||
1305 | mrblr_value = in_be16(mrblr_register); | |
1306 | if (min_frame_length >= (mrblr_value - 4)) | |
1307 | return -EINVAL; | |
1308 | ||
1309 | out_be16(minflr_register, min_frame_length); | |
1310 | return 0; | |
1311 | } | |
1312 | ||
18a8e864 | 1313 | static int adjust_enet_interface(struct ucc_geth_private *ugeth) |
ce973b14 | 1314 | { |
18a8e864 | 1315 | struct ucc_geth_info *ug_info; |
6fee40e9 AF |
1316 | struct ucc_geth __iomem *ug_regs; |
1317 | struct ucc_fast __iomem *uf_regs; | |
728de4c9 | 1318 | int ret_val; |
81abb43a | 1319 | u32 upsmr, maccfg2; |
ce973b14 LY |
1320 | u16 value; |
1321 | ||
b39d66a8 | 1322 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
1323 | |
1324 | ug_info = ugeth->ug_info; | |
1325 | ug_regs = ugeth->ug_regs; | |
1326 | uf_regs = ugeth->uccf->uf_regs; | |
1327 | ||
ce973b14 LY |
1328 | /* Set MACCFG2 */ |
1329 | maccfg2 = in_be32(&ug_regs->maccfg2); | |
1330 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; | |
728de4c9 KP |
1331 | if ((ugeth->max_speed == SPEED_10) || |
1332 | (ugeth->max_speed == SPEED_100)) | |
ce973b14 | 1333 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; |
728de4c9 | 1334 | else if (ugeth->max_speed == SPEED_1000) |
ce973b14 LY |
1335 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; |
1336 | maccfg2 |= ug_info->padAndCrc; | |
1337 | out_be32(&ug_regs->maccfg2, maccfg2); | |
1338 | ||
1339 | /* Set UPSMR */ | |
1340 | upsmr = in_be32(&uf_regs->upsmr); | |
3bc53427 TT |
1341 | upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | |
1342 | UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); | |
728de4c9 KP |
1343 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || |
1344 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | |
1345 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
bd0ceaab KP |
1346 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1347 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | |
728de4c9 | 1348 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
cef309cf HS |
1349 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) |
1350 | upsmr |= UCC_GETH_UPSMR_RPM; | |
728de4c9 KP |
1351 | switch (ugeth->max_speed) { |
1352 | case SPEED_10: | |
3bc53427 | 1353 | upsmr |= UCC_GETH_UPSMR_R10M; |
728de4c9 KP |
1354 | /* FALLTHROUGH */ |
1355 | case SPEED_100: | |
1356 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) | |
3bc53427 | 1357 | upsmr |= UCC_GETH_UPSMR_RMM; |
728de4c9 KP |
1358 | } |
1359 | } | |
1360 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || | |
1361 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | |
3bc53427 | 1362 | upsmr |= UCC_GETH_UPSMR_TBIM; |
728de4c9 | 1363 | } |
047584ce HW |
1364 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) |
1365 | upsmr |= UCC_GETH_UPSMR_SGMM; | |
1366 | ||
ce973b14 LY |
1367 | out_be32(&uf_regs->upsmr, upsmr); |
1368 | ||
ce973b14 LY |
1369 | /* Disable autonegotiation in tbi mode, because by default it |
1370 | comes up in autonegotiation mode. */ | |
1371 | /* Note that this depends on proper setting in utbipar register. */ | |
728de4c9 KP |
1372 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || |
1373 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | |
81abb43a LYB |
1374 | struct ucc_geth_info *ug_info = ugeth->ug_info; |
1375 | struct phy_device *tbiphy; | |
1376 | ||
1377 | if (!ug_info->tbi_node) | |
c84d8055 | 1378 | pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); |
81abb43a LYB |
1379 | |
1380 | tbiphy = of_phy_find_device(ug_info->tbi_node); | |
1381 | if (!tbiphy) | |
c84d8055 | 1382 | pr_warn("Could not get TBI device\n"); |
81abb43a LYB |
1383 | |
1384 | value = phy_read(tbiphy, ENET_TBI_MII_CR); | |
ce973b14 | 1385 | value &= ~0x1000; /* Turn off autonegotiation */ |
81abb43a | 1386 | phy_write(tbiphy, ENET_TBI_MII_CR, value); |
04d53b20 | 1387 | |
e5a03bfd | 1388 | put_device(&tbiphy->mdio.dev); |
ce973b14 LY |
1389 | } |
1390 | ||
1391 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); | |
1392 | ||
1393 | ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); | |
1394 | if (ret_val != 0) { | |
890de95e | 1395 | if (netif_msg_probe(ugeth)) |
c84d8055 | 1396 | pr_err("Preamble length must be between 3 and 7 inclusive\n"); |
ce973b14 LY |
1397 | return ret_val; |
1398 | } | |
1399 | ||
1400 | return 0; | |
1401 | } | |
1402 | ||
7de8ee78 AV |
1403 | static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) |
1404 | { | |
1405 | struct ucc_fast_private *uccf; | |
1406 | u32 cecr_subblock; | |
1407 | u32 temp; | |
1408 | int i = 10; | |
1409 | ||
1410 | uccf = ugeth->uccf; | |
1411 | ||
1412 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ | |
1413 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); | |
1414 | out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ | |
1415 | ||
1416 | /* Issue host command */ | |
1417 | cecr_subblock = | |
1418 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1419 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, | |
1420 | QE_CR_PROTOCOL_ETHERNET, 0); | |
1421 | ||
1422 | /* Wait for command to complete */ | |
1423 | do { | |
1424 | msleep(10); | |
1425 | temp = in_be32(uccf->p_ucce); | |
1426 | } while (!(temp & UCC_GETH_UCCE_GRA) && --i); | |
1427 | ||
1428 | uccf->stopped_tx = 1; | |
1429 | ||
1430 | return 0; | |
1431 | } | |
1432 | ||
1433 | static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) | |
1434 | { | |
1435 | struct ucc_fast_private *uccf; | |
1436 | u32 cecr_subblock; | |
1437 | u8 temp; | |
1438 | int i = 10; | |
1439 | ||
1440 | uccf = ugeth->uccf; | |
1441 | ||
1442 | /* Clear acknowledge bit */ | |
1443 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); | |
1444 | temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; | |
1445 | out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); | |
1446 | ||
1447 | /* Keep issuing command and checking acknowledge bit until | |
1448 | it is asserted, according to spec */ | |
1449 | do { | |
1450 | /* Issue host command */ | |
1451 | cecr_subblock = | |
1452 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. | |
1453 | ucc_num); | |
1454 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, | |
1455 | QE_CR_PROTOCOL_ETHERNET, 0); | |
1456 | msleep(10); | |
1457 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); | |
1458 | } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); | |
1459 | ||
1460 | uccf->stopped_rx = 1; | |
1461 | ||
1462 | return 0; | |
1463 | } | |
1464 | ||
1465 | static int ugeth_restart_tx(struct ucc_geth_private *ugeth) | |
1466 | { | |
1467 | struct ucc_fast_private *uccf; | |
1468 | u32 cecr_subblock; | |
1469 | ||
1470 | uccf = ugeth->uccf; | |
1471 | ||
1472 | cecr_subblock = | |
1473 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1474 | qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); | |
1475 | uccf->stopped_tx = 0; | |
1476 | ||
1477 | return 0; | |
1478 | } | |
1479 | ||
1480 | static int ugeth_restart_rx(struct ucc_geth_private *ugeth) | |
1481 | { | |
1482 | struct ucc_fast_private *uccf; | |
1483 | u32 cecr_subblock; | |
1484 | ||
1485 | uccf = ugeth->uccf; | |
1486 | ||
1487 | cecr_subblock = | |
1488 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1489 | qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, | |
1490 | 0); | |
1491 | uccf->stopped_rx = 0; | |
1492 | ||
1493 | return 0; | |
1494 | } | |
1495 | ||
1496 | static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) | |
1497 | { | |
1498 | struct ucc_fast_private *uccf; | |
1499 | int enabled_tx, enabled_rx; | |
1500 | ||
1501 | uccf = ugeth->uccf; | |
1502 | ||
1503 | /* check if the UCC number is in range. */ | |
1504 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
1505 | if (netif_msg_probe(ugeth)) | |
c84d8055 | 1506 | pr_err("ucc_num out of range\n"); |
7de8ee78 AV |
1507 | return -EINVAL; |
1508 | } | |
1509 | ||
1510 | enabled_tx = uccf->enabled_tx; | |
1511 | enabled_rx = uccf->enabled_rx; | |
1512 | ||
1513 | /* Get Tx and Rx going again, in case this channel was actively | |
1514 | disabled. */ | |
1515 | if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) | |
1516 | ugeth_restart_tx(ugeth); | |
1517 | if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) | |
1518 | ugeth_restart_rx(ugeth); | |
1519 | ||
1520 | ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ | |
1521 | ||
1522 | return 0; | |
1523 | ||
1524 | } | |
1525 | ||
1526 | static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) | |
1527 | { | |
1528 | struct ucc_fast_private *uccf; | |
1529 | ||
1530 | uccf = ugeth->uccf; | |
1531 | ||
1532 | /* check if the UCC number is in range. */ | |
1533 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
1534 | if (netif_msg_probe(ugeth)) | |
c84d8055 | 1535 | pr_err("ucc_num out of range\n"); |
7de8ee78 AV |
1536 | return -EINVAL; |
1537 | } | |
1538 | ||
1539 | /* Stop any transmissions */ | |
1540 | if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) | |
1541 | ugeth_graceful_stop_tx(ugeth); | |
1542 | ||
1543 | /* Stop any receptions */ | |
1544 | if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) | |
1545 | ugeth_graceful_stop_rx(ugeth); | |
1546 | ||
1547 | ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ | |
1548 | ||
1549 | return 0; | |
1550 | } | |
1551 | ||
864fdf88 AV |
1552 | static void ugeth_quiesce(struct ucc_geth_private *ugeth) |
1553 | { | |
08b5e1c9 AV |
1554 | /* Prevent any further xmits, plus detach the device. */ |
1555 | netif_device_detach(ugeth->ndev); | |
1556 | ||
1557 | /* Wait for any current xmits to finish. */ | |
864fdf88 AV |
1558 | netif_tx_disable(ugeth->ndev); |
1559 | ||
1560 | /* Disable the interrupt to avoid NAPI rescheduling. */ | |
1561 | disable_irq(ugeth->ug_info->uf_info.irq); | |
1562 | ||
1563 | /* Stop NAPI, and possibly wait for its completion. */ | |
1564 | napi_disable(&ugeth->napi); | |
1565 | } | |
1566 | ||
1567 | static void ugeth_activate(struct ucc_geth_private *ugeth) | |
1568 | { | |
1569 | napi_enable(&ugeth->napi); | |
1570 | enable_irq(ugeth->ug_info->uf_info.irq); | |
08b5e1c9 | 1571 | netif_device_attach(ugeth->ndev); |
864fdf88 AV |
1572 | } |
1573 | ||
ce973b14 LY |
1574 | /* Called every time the controller might need to be made |
1575 | * aware of new link state. The PHY code conveys this | |
1576 | * information through variables in the ugeth structure, and this | |
1577 | * function converts those variables into the appropriate | |
1578 | * register values, and can bring down the device if needed. | |
1579 | */ | |
728de4c9 | 1580 | |
ce973b14 LY |
1581 | static void adjust_link(struct net_device *dev) |
1582 | { | |
18a8e864 | 1583 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
6fee40e9 AF |
1584 | struct ucc_geth __iomem *ug_regs; |
1585 | struct ucc_fast __iomem *uf_regs; | |
728de4c9 | 1586 | struct phy_device *phydev = ugeth->phydev; |
728de4c9 | 1587 | int new_state = 0; |
ce973b14 LY |
1588 | |
1589 | ug_regs = ugeth->ug_regs; | |
728de4c9 | 1590 | uf_regs = ugeth->uccf->uf_regs; |
ce973b14 | 1591 | |
728de4c9 KP |
1592 | if (phydev->link) { |
1593 | u32 tempval = in_be32(&ug_regs->maccfg2); | |
1594 | u32 upsmr = in_be32(&uf_regs->upsmr); | |
ce973b14 LY |
1595 | /* Now we make sure that we can be in full duplex mode. |
1596 | * If not, we operate in half-duplex mode. */ | |
728de4c9 KP |
1597 | if (phydev->duplex != ugeth->oldduplex) { |
1598 | new_state = 1; | |
1599 | if (!(phydev->duplex)) | |
ce973b14 | 1600 | tempval &= ~(MACCFG2_FDX); |
728de4c9 | 1601 | else |
ce973b14 | 1602 | tempval |= MACCFG2_FDX; |
728de4c9 | 1603 | ugeth->oldduplex = phydev->duplex; |
ce973b14 LY |
1604 | } |
1605 | ||
728de4c9 KP |
1606 | if (phydev->speed != ugeth->oldspeed) { |
1607 | new_state = 1; | |
1608 | switch (phydev->speed) { | |
1609 | case SPEED_1000: | |
1610 | tempval = ((tempval & | |
1611 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | |
1612 | MACCFG2_INTERFACE_MODE_BYTE); | |
a1862a53 | 1613 | break; |
728de4c9 KP |
1614 | case SPEED_100: |
1615 | case SPEED_10: | |
1616 | tempval = ((tempval & | |
1617 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | |
1618 | MACCFG2_INTERFACE_MODE_NIBBLE); | |
1619 | /* if reduced mode, re-set UPSMR.R10M */ | |
1620 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || | |
1621 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | |
1622 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
bd0ceaab KP |
1623 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1624 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | |
728de4c9 KP |
1625 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1626 | if (phydev->speed == SPEED_10) | |
3bc53427 | 1627 | upsmr |= UCC_GETH_UPSMR_R10M; |
728de4c9 | 1628 | else |
3bc53427 | 1629 | upsmr &= ~UCC_GETH_UPSMR_R10M; |
728de4c9 | 1630 | } |
ce973b14 LY |
1631 | break; |
1632 | default: | |
728de4c9 | 1633 | if (netif_msg_link(ugeth)) |
c84d8055 | 1634 | pr_warn( |
728de4c9 KP |
1635 | "%s: Ack! Speed (%d) is not 10/100/1000!", |
1636 | dev->name, phydev->speed); | |
ce973b14 LY |
1637 | break; |
1638 | } | |
728de4c9 | 1639 | ugeth->oldspeed = phydev->speed; |
ce973b14 LY |
1640 | } |
1641 | ||
1642 | if (!ugeth->oldlink) { | |
728de4c9 | 1643 | new_state = 1; |
ce973b14 | 1644 | ugeth->oldlink = 1; |
ce973b14 | 1645 | } |
08fafd84 AV |
1646 | |
1647 | if (new_state) { | |
1648 | /* | |
1649 | * To change the MAC configuration we need to disable | |
1650 | * the controller. To do so, we have to either grab | |
1651 | * ugeth->lock, which is a bad idea since 'graceful | |
1652 | * stop' commands might take quite a while, or we can | |
1653 | * quiesce driver's activity. | |
1654 | */ | |
1655 | ugeth_quiesce(ugeth); | |
1656 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
1657 | ||
1658 | out_be32(&ug_regs->maccfg2, tempval); | |
1659 | out_be32(&uf_regs->upsmr, upsmr); | |
1660 | ||
1661 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | |
1662 | ugeth_activate(ugeth); | |
1663 | } | |
728de4c9 KP |
1664 | } else if (ugeth->oldlink) { |
1665 | new_state = 1; | |
ce973b14 LY |
1666 | ugeth->oldlink = 0; |
1667 | ugeth->oldspeed = 0; | |
1668 | ugeth->oldduplex = -1; | |
ce973b14 | 1669 | } |
728de4c9 KP |
1670 | |
1671 | if (new_state && netif_msg_link(ugeth)) | |
1672 | phy_print_status(phydev); | |
ce973b14 LY |
1673 | } |
1674 | ||
fb1001f3 HW |
1675 | /* Initialize TBI PHY interface for communicating with the |
1676 | * SERDES lynx PHY on the chip. We communicate with this PHY | |
1677 | * through the MDIO bus on each controller, treating it as a | |
1678 | * "normal" PHY at the address found in the UTBIPA register. We assume | |
1679 | * that the UTBIPA register is valid. Either the MDIO bus code will set | |
1680 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1681 | * value doesn't matter, as there are no other PHYs on the bus. | |
1682 | */ | |
1683 | static void uec_configure_serdes(struct net_device *dev) | |
1684 | { | |
1685 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
1686 | struct ucc_geth_info *ug_info = ugeth->ug_info; | |
1687 | struct phy_device *tbiphy; | |
1688 | ||
1689 | if (!ug_info->tbi_node) { | |
1690 | dev_warn(&dev->dev, "SGMII mode requires that the device " | |
1691 | "tree specify a tbi-handle\n"); | |
1692 | return; | |
1693 | } | |
1694 | ||
1695 | tbiphy = of_phy_find_device(ug_info->tbi_node); | |
1696 | if (!tbiphy) { | |
1697 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
1698 | return; | |
1699 | } | |
1700 | ||
1701 | /* | |
1702 | * If the link is already up, we must already be ok, and don't need to | |
1703 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured | |
1704 | * everything for us? Resetting it takes the link down and requires | |
1705 | * several seconds for it to come back. | |
1706 | */ | |
04d53b20 | 1707 | if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { |
e5a03bfd | 1708 | put_device(&tbiphy->mdio.dev); |
fb1001f3 | 1709 | return; |
04d53b20 | 1710 | } |
fb1001f3 HW |
1711 | |
1712 | /* Single clk mode, mii mode off(for serdes communication) */ | |
1713 | phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); | |
1714 | ||
1715 | phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); | |
1716 | ||
1717 | phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); | |
04d53b20 | 1718 | |
5e431650 | 1719 | put_device(&tbiphy->mdio.dev); |
fb1001f3 HW |
1720 | } |
1721 | ||
ce973b14 LY |
1722 | /* Configure the PHY for dev. |
1723 | * returns 0 if success. -1 if failure | |
1724 | */ | |
1725 | static int init_phy(struct net_device *dev) | |
1726 | { | |
728de4c9 | 1727 | struct ucc_geth_private *priv = netdev_priv(dev); |
61fa9dcf | 1728 | struct ucc_geth_info *ug_info = priv->ug_info; |
728de4c9 | 1729 | struct phy_device *phydev; |
ce973b14 | 1730 | |
728de4c9 KP |
1731 | priv->oldlink = 0; |
1732 | priv->oldspeed = 0; | |
1733 | priv->oldduplex = -1; | |
ce973b14 | 1734 | |
0b9da337 GL |
1735 | phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, |
1736 | priv->phy_interface); | |
1737 | if (!phydev) { | |
3104a6ff | 1738 | dev_err(&dev->dev, "Could not attach to PHY\n"); |
0b9da337 | 1739 | return -ENODEV; |
ce973b14 LY |
1740 | } |
1741 | ||
047584ce HW |
1742 | if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) |
1743 | uec_configure_serdes(dev); | |
1744 | ||
bb24fd6a JT |
1745 | phydev->supported &= (SUPPORTED_MII | |
1746 | SUPPORTED_Autoneg | | |
1747 | ADVERTISED_10baseT_Half | | |
1748 | ADVERTISED_10baseT_Full | | |
1749 | ADVERTISED_100baseT_Half | | |
1750 | ADVERTISED_100baseT_Full); | |
ce973b14 | 1751 | |
728de4c9 KP |
1752 | if (priv->max_speed == SPEED_1000) |
1753 | phydev->supported |= ADVERTISED_1000baseT_Full; | |
ce973b14 | 1754 | |
728de4c9 | 1755 | phydev->advertising = phydev->supported; |
68dc44af | 1756 | |
728de4c9 | 1757 | priv->phydev = phydev; |
ce973b14 LY |
1758 | |
1759 | return 0; | |
ce973b14 LY |
1760 | } |
1761 | ||
18a8e864 | 1762 | static void ugeth_dump_regs(struct ucc_geth_private *ugeth) |
ce973b14 LY |
1763 | { |
1764 | #ifdef DEBUG | |
1765 | ucc_fast_dump_regs(ugeth->uccf); | |
1766 | dump_regs(ugeth); | |
1767 | dump_bds(ugeth); | |
1768 | #endif | |
1769 | } | |
1770 | ||
18a8e864 | 1771 | static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * |
ce973b14 | 1772 | ugeth, |
18a8e864 | 1773 | enum enet_addr_type |
ce973b14 LY |
1774 | enet_addr_type) |
1775 | { | |
6fee40e9 | 1776 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
18a8e864 LY |
1777 | struct ucc_fast_private *uccf; |
1778 | enum comm_dir comm_dir; | |
ce973b14 LY |
1779 | struct list_head *p_lh; |
1780 | u16 i, num; | |
6fee40e9 AF |
1781 | u32 __iomem *addr_h; |
1782 | u32 __iomem *addr_l; | |
ce973b14 LY |
1783 | u8 *p_counter; |
1784 | ||
1785 | uccf = ugeth->uccf; | |
1786 | ||
1787 | p_82xx_addr_filt = | |
6fee40e9 AF |
1788 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) |
1789 | ugeth->p_rx_glbl_pram->addressfiltering; | |
ce973b14 LY |
1790 | |
1791 | if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { | |
1792 | addr_h = &(p_82xx_addr_filt->gaddr_h); | |
1793 | addr_l = &(p_82xx_addr_filt->gaddr_l); | |
1794 | p_lh = &ugeth->group_hash_q; | |
1795 | p_counter = &(ugeth->numGroupAddrInHash); | |
1796 | } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { | |
1797 | addr_h = &(p_82xx_addr_filt->iaddr_h); | |
1798 | addr_l = &(p_82xx_addr_filt->iaddr_l); | |
1799 | p_lh = &ugeth->ind_hash_q; | |
1800 | p_counter = &(ugeth->numIndAddrInHash); | |
1801 | } else | |
1802 | return -EINVAL; | |
1803 | ||
1804 | comm_dir = 0; | |
1805 | if (uccf->enabled_tx) | |
1806 | comm_dir |= COMM_DIR_TX; | |
1807 | if (uccf->enabled_rx) | |
1808 | comm_dir |= COMM_DIR_RX; | |
1809 | if (comm_dir) | |
1810 | ugeth_disable(ugeth, comm_dir); | |
1811 | ||
1812 | /* Clear the hash table. */ | |
1813 | out_be32(addr_h, 0x00000000); | |
1814 | out_be32(addr_l, 0x00000000); | |
1815 | ||
1816 | if (!p_lh) | |
1817 | return 0; | |
1818 | ||
1819 | num = *p_counter; | |
1820 | ||
1821 | /* Delete all remaining CQ elements */ | |
1822 | for (i = 0; i < num; i++) | |
1823 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); | |
1824 | ||
1825 | *p_counter = 0; | |
1826 | ||
1827 | if (comm_dir) | |
1828 | ugeth_enable(ugeth, comm_dir); | |
1829 | ||
1830 | return 0; | |
1831 | } | |
1832 | ||
18a8e864 | 1833 | static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, |
ce973b14 LY |
1834 | u8 paddr_num) |
1835 | { | |
1836 | ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ | |
1837 | return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ | |
1838 | } | |
1839 | ||
e19a82c1 PG |
1840 | static void ucc_geth_free_rx(struct ucc_geth_private *ugeth) |
1841 | { | |
1842 | struct ucc_geth_info *ug_info; | |
1843 | struct ucc_fast_info *uf_info; | |
1844 | u16 i, j; | |
1845 | u8 __iomem *bd; | |
1846 | ||
1847 | ||
1848 | ug_info = ugeth->ug_info; | |
1849 | uf_info = &ug_info->uf_info; | |
1850 | ||
1851 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
1852 | if (ugeth->p_rx_bd_ring[i]) { | |
1853 | /* Return existing data buffers in ring */ | |
1854 | bd = ugeth->p_rx_bd_ring[i]; | |
1855 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | |
1856 | if (ugeth->rx_skbuff[i][j]) { | |
1857 | dma_unmap_single(ugeth->dev, | |
1858 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | |
1859 | ugeth->ug_info-> | |
1860 | uf_info.max_rx_buf_length + | |
1861 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | |
1862 | DMA_FROM_DEVICE); | |
1863 | dev_kfree_skb_any( | |
1864 | ugeth->rx_skbuff[i][j]); | |
1865 | ugeth->rx_skbuff[i][j] = NULL; | |
1866 | } | |
1867 | bd += sizeof(struct qe_bd); | |
1868 | } | |
1869 | ||
1870 | kfree(ugeth->rx_skbuff[i]); | |
1871 | ||
1872 | if (ugeth->ug_info->uf_info.bd_mem_part == | |
1873 | MEM_PART_SYSTEM) | |
1874 | kfree((void *)ugeth->rx_bd_ring_offset[i]); | |
1875 | else if (ugeth->ug_info->uf_info.bd_mem_part == | |
1876 | MEM_PART_MURAM) | |
1877 | qe_muram_free(ugeth->rx_bd_ring_offset[i]); | |
1878 | ugeth->p_rx_bd_ring[i] = NULL; | |
1879 | } | |
1880 | } | |
1881 | ||
1882 | } | |
1883 | ||
1884 | static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) | |
ce973b14 | 1885 | { |
e19a82c1 PG |
1886 | struct ucc_geth_info *ug_info; |
1887 | struct ucc_fast_info *uf_info; | |
ce973b14 | 1888 | u16 i, j; |
6fee40e9 | 1889 | u8 __iomem *bd; |
ce973b14 | 1890 | |
e19a82c1 PG |
1891 | ug_info = ugeth->ug_info; |
1892 | uf_info = &ug_info->uf_info; | |
1893 | ||
1894 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
1895 | bd = ugeth->p_tx_bd_ring[i]; | |
1896 | if (!bd) | |
1897 | continue; | |
1898 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { | |
1899 | if (ugeth->tx_skbuff[i][j]) { | |
1900 | dma_unmap_single(ugeth->dev, | |
1901 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | |
1902 | (in_be32((u32 __iomem *)bd) & | |
1903 | BD_LENGTH_MASK), | |
1904 | DMA_TO_DEVICE); | |
1905 | dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); | |
1906 | ugeth->tx_skbuff[i][j] = NULL; | |
1907 | } | |
1908 | } | |
1909 | ||
1910 | kfree(ugeth->tx_skbuff[i]); | |
1911 | ||
1912 | if (ugeth->p_tx_bd_ring[i]) { | |
1913 | if (ugeth->ug_info->uf_info.bd_mem_part == | |
1914 | MEM_PART_SYSTEM) | |
1915 | kfree((void *)ugeth->tx_bd_ring_offset[i]); | |
1916 | else if (ugeth->ug_info->uf_info.bd_mem_part == | |
1917 | MEM_PART_MURAM) | |
1918 | qe_muram_free(ugeth->tx_bd_ring_offset[i]); | |
1919 | ugeth->p_tx_bd_ring[i] = NULL; | |
1920 | } | |
1921 | } | |
1922 | ||
1923 | } | |
1924 | ||
1925 | static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |
1926 | { | |
ce973b14 LY |
1927 | if (!ugeth) |
1928 | return; | |
1929 | ||
80a9fad8 | 1930 | if (ugeth->uccf) { |
ce973b14 | 1931 | ucc_fast_free(ugeth->uccf); |
80a9fad8 AV |
1932 | ugeth->uccf = NULL; |
1933 | } | |
ce973b14 LY |
1934 | |
1935 | if (ugeth->p_thread_data_tx) { | |
1936 | qe_muram_free(ugeth->thread_dat_tx_offset); | |
1937 | ugeth->p_thread_data_tx = NULL; | |
1938 | } | |
1939 | if (ugeth->p_thread_data_rx) { | |
1940 | qe_muram_free(ugeth->thread_dat_rx_offset); | |
1941 | ugeth->p_thread_data_rx = NULL; | |
1942 | } | |
1943 | if (ugeth->p_exf_glbl_param) { | |
1944 | qe_muram_free(ugeth->exf_glbl_param_offset); | |
1945 | ugeth->p_exf_glbl_param = NULL; | |
1946 | } | |
1947 | if (ugeth->p_rx_glbl_pram) { | |
1948 | qe_muram_free(ugeth->rx_glbl_pram_offset); | |
1949 | ugeth->p_rx_glbl_pram = NULL; | |
1950 | } | |
1951 | if (ugeth->p_tx_glbl_pram) { | |
1952 | qe_muram_free(ugeth->tx_glbl_pram_offset); | |
1953 | ugeth->p_tx_glbl_pram = NULL; | |
1954 | } | |
1955 | if (ugeth->p_send_q_mem_reg) { | |
1956 | qe_muram_free(ugeth->send_q_mem_reg_offset); | |
1957 | ugeth->p_send_q_mem_reg = NULL; | |
1958 | } | |
1959 | if (ugeth->p_scheduler) { | |
1960 | qe_muram_free(ugeth->scheduler_offset); | |
1961 | ugeth->p_scheduler = NULL; | |
1962 | } | |
1963 | if (ugeth->p_tx_fw_statistics_pram) { | |
1964 | qe_muram_free(ugeth->tx_fw_statistics_pram_offset); | |
1965 | ugeth->p_tx_fw_statistics_pram = NULL; | |
1966 | } | |
1967 | if (ugeth->p_rx_fw_statistics_pram) { | |
1968 | qe_muram_free(ugeth->rx_fw_statistics_pram_offset); | |
1969 | ugeth->p_rx_fw_statistics_pram = NULL; | |
1970 | } | |
1971 | if (ugeth->p_rx_irq_coalescing_tbl) { | |
1972 | qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); | |
1973 | ugeth->p_rx_irq_coalescing_tbl = NULL; | |
1974 | } | |
1975 | if (ugeth->p_rx_bd_qs_tbl) { | |
1976 | qe_muram_free(ugeth->rx_bd_qs_tbl_offset); | |
1977 | ugeth->p_rx_bd_qs_tbl = NULL; | |
1978 | } | |
1979 | if (ugeth->p_init_enet_param_shadow) { | |
1980 | return_init_enet_entries(ugeth, | |
1981 | &(ugeth->p_init_enet_param_shadow-> | |
1982 | rxthread[0]), | |
1983 | ENET_INIT_PARAM_MAX_ENTRIES_RX, | |
1984 | ugeth->ug_info->riscRx, 1); | |
1985 | return_init_enet_entries(ugeth, | |
1986 | &(ugeth->p_init_enet_param_shadow-> | |
1987 | txthread[0]), | |
1988 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | |
1989 | ugeth->ug_info->riscTx, 0); | |
1990 | kfree(ugeth->p_init_enet_param_shadow); | |
1991 | ugeth->p_init_enet_param_shadow = NULL; | |
1992 | } | |
e19a82c1 PG |
1993 | ucc_geth_free_tx(ugeth); |
1994 | ucc_geth_free_rx(ugeth); | |
ce973b14 LY |
1995 | while (!list_empty(&ugeth->group_hash_q)) |
1996 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | |
1997 | (dequeue(&ugeth->group_hash_q))); | |
1998 | while (!list_empty(&ugeth->ind_hash_q)) | |
1999 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | |
2000 | (dequeue(&ugeth->ind_hash_q))); | |
3e73fc9a AV |
2001 | if (ugeth->ug_regs) { |
2002 | iounmap(ugeth->ug_regs); | |
2003 | ugeth->ug_regs = NULL; | |
2004 | } | |
ce973b14 LY |
2005 | } |
2006 | ||
2007 | static void ucc_geth_set_multi(struct net_device *dev) | |
2008 | { | |
18a8e864 | 2009 | struct ucc_geth_private *ugeth; |
22bedad3 | 2010 | struct netdev_hw_addr *ha; |
6fee40e9 AF |
2011 | struct ucc_fast __iomem *uf_regs; |
2012 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | |
ce973b14 LY |
2013 | |
2014 | ugeth = netdev_priv(dev); | |
2015 | ||
2016 | uf_regs = ugeth->uccf->uf_regs; | |
2017 | ||
2018 | if (dev->flags & IFF_PROMISC) { | |
3bc53427 | 2019 | setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
ce973b14 | 2020 | } else { |
3bc53427 | 2021 | clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
ce973b14 LY |
2022 | |
2023 | p_82xx_addr_filt = | |
6fee40e9 | 2024 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
ce973b14 LY |
2025 | p_rx_glbl_pram->addressfiltering; |
2026 | ||
2027 | if (dev->flags & IFF_ALLMULTI) { | |
2028 | /* Catch all multicast addresses, so set the | |
2029 | * filter to all 1's. | |
2030 | */ | |
2031 | out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); | |
2032 | out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); | |
2033 | } else { | |
2034 | /* Clear filter and add the addresses in the list. | |
2035 | */ | |
2036 | out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); | |
2037 | out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); | |
2038 | ||
22bedad3 | 2039 | netdev_for_each_mc_addr(ha, dev) { |
ce973b14 LY |
2040 | /* Ask CPM to run CRC and set bit in |
2041 | * filter mask. | |
2042 | */ | |
22bedad3 | 2043 | hw_add_addr_in_hash(ugeth, ha->addr); |
ce973b14 LY |
2044 | } |
2045 | } | |
2046 | } | |
2047 | } | |
2048 | ||
18a8e864 | 2049 | static void ucc_geth_stop(struct ucc_geth_private *ugeth) |
ce973b14 | 2050 | { |
6fee40e9 | 2051 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; |
728de4c9 | 2052 | struct phy_device *phydev = ugeth->phydev; |
ce973b14 | 2053 | |
b39d66a8 | 2054 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 2055 | |
75e60474 JT |
2056 | /* |
2057 | * Tell the kernel the link is down. | |
2058 | * Must be done before disabling the controller | |
2059 | * or deadlock may happen. | |
2060 | */ | |
2061 | phy_stop(phydev); | |
2062 | ||
ce973b14 LY |
2063 | /* Disable the controller */ |
2064 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
2065 | ||
ce973b14 | 2066 | /* Mask all interrupts */ |
c6f5047b | 2067 | out_be32(ugeth->uccf->p_uccm, 0x00000000); |
ce973b14 LY |
2068 | |
2069 | /* Clear all interrupts */ | |
2070 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); | |
2071 | ||
2072 | /* Disable Rx and Tx */ | |
3bc53427 | 2073 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
ce973b14 | 2074 | |
ce973b14 LY |
2075 | ucc_geth_memclean(ugeth); |
2076 | } | |
2077 | ||
728de4c9 | 2078 | static int ucc_struct_init(struct ucc_geth_private *ugeth) |
ce973b14 | 2079 | { |
18a8e864 LY |
2080 | struct ucc_geth_info *ug_info; |
2081 | struct ucc_fast_info *uf_info; | |
728de4c9 | 2082 | int i; |
ce973b14 LY |
2083 | |
2084 | ug_info = ugeth->ug_info; | |
2085 | uf_info = &ug_info->uf_info; | |
2086 | ||
2087 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || | |
2088 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { | |
890de95e | 2089 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2090 | pr_err("Bad memory partition value\n"); |
ce973b14 LY |
2091 | return -EINVAL; |
2092 | } | |
2093 | ||
2094 | /* Rx BD lengths */ | |
2095 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2096 | if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || | |
2097 | (ug_info->bdRingLenRx[i] % | |
2098 | UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { | |
890de95e | 2099 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2100 | pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n"); |
ce973b14 LY |
2101 | return -EINVAL; |
2102 | } | |
2103 | } | |
2104 | ||
2105 | /* Tx BD lengths */ | |
2106 | for (i = 0; i < ug_info->numQueuesTx; i++) { | |
2107 | if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { | |
890de95e | 2108 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2109 | pr_err("Tx BD ring length must be no smaller than 2\n"); |
ce973b14 LY |
2110 | return -EINVAL; |
2111 | } | |
2112 | } | |
2113 | ||
2114 | /* mrblr */ | |
2115 | if ((uf_info->max_rx_buf_length == 0) || | |
2116 | (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { | |
890de95e | 2117 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2118 | pr_err("max_rx_buf_length must be non-zero multiple of 128\n"); |
ce973b14 LY |
2119 | return -EINVAL; |
2120 | } | |
2121 | ||
2122 | /* num Tx queues */ | |
2123 | if (ug_info->numQueuesTx > NUM_TX_QUEUES) { | |
890de95e | 2124 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2125 | pr_err("number of tx queues too large\n"); |
ce973b14 LY |
2126 | return -EINVAL; |
2127 | } | |
2128 | ||
2129 | /* num Rx queues */ | |
2130 | if (ug_info->numQueuesRx > NUM_RX_QUEUES) { | |
890de95e | 2131 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2132 | pr_err("number of rx queues too large\n"); |
ce973b14 LY |
2133 | return -EINVAL; |
2134 | } | |
2135 | ||
2136 | /* l2qt */ | |
2137 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { | |
2138 | if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { | |
890de95e | 2139 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2140 | pr_err("VLAN priority table entry must not be larger than number of Rx queues\n"); |
ce973b14 LY |
2141 | return -EINVAL; |
2142 | } | |
2143 | } | |
2144 | ||
2145 | /* l3qt */ | |
2146 | for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { | |
2147 | if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { | |
890de95e | 2148 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2149 | pr_err("IP priority table entry must not be larger than number of Rx queues\n"); |
ce973b14 LY |
2150 | return -EINVAL; |
2151 | } | |
2152 | } | |
2153 | ||
2154 | if (ug_info->cam && !ug_info->ecamptr) { | |
890de95e | 2155 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2156 | pr_err("If cam mode is chosen, must supply cam ptr\n"); |
ce973b14 LY |
2157 | return -EINVAL; |
2158 | } | |
2159 | ||
2160 | if ((ug_info->numStationAddresses != | |
8e95a202 JP |
2161 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && |
2162 | ug_info->rxExtendedFiltering) { | |
890de95e | 2163 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2164 | pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n"); |
ce973b14 LY |
2165 | return -EINVAL; |
2166 | } | |
2167 | ||
2168 | /* Generate uccm_mask for receive */ | |
2169 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ | |
2170 | for (i = 0; i < ug_info->numQueuesRx; i++) | |
3bc53427 | 2171 | uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); |
ce973b14 LY |
2172 | |
2173 | for (i = 0; i < ug_info->numQueuesTx; i++) | |
3bc53427 | 2174 | uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); |
ce973b14 | 2175 | /* Initialize the general fast UCC block. */ |
728de4c9 | 2176 | if (ucc_fast_init(uf_info, &ugeth->uccf)) { |
890de95e | 2177 | if (netif_msg_probe(ugeth)) |
c84d8055 | 2178 | pr_err("Failed to init uccf\n"); |
ce973b14 LY |
2179 | return -ENOMEM; |
2180 | } | |
728de4c9 | 2181 | |
345f8422 HW |
2182 | /* read the number of risc engines, update the riscTx and riscRx |
2183 | * if there are 4 riscs in QE | |
2184 | */ | |
2185 | if (qe_get_num_of_risc() == 4) { | |
2186 | ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; | |
2187 | ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; | |
2188 | } | |
2189 | ||
3e73fc9a AV |
2190 | ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); |
2191 | if (!ugeth->ug_regs) { | |
2192 | if (netif_msg_probe(ugeth)) | |
c84d8055 | 2193 | pr_err("Failed to ioremap regs\n"); |
3e73fc9a AV |
2194 | return -ENOMEM; |
2195 | } | |
728de4c9 KP |
2196 | |
2197 | return 0; | |
2198 | } | |
2199 | ||
e19a82c1 PG |
2200 | static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) |
2201 | { | |
2202 | struct ucc_geth_info *ug_info; | |
2203 | struct ucc_fast_info *uf_info; | |
2204 | int length; | |
2205 | u16 i, j; | |
2206 | u8 __iomem *bd; | |
2207 | ||
2208 | ug_info = ugeth->ug_info; | |
2209 | uf_info = &ug_info->uf_info; | |
2210 | ||
2211 | /* Allocate Tx bds */ | |
2212 | for (j = 0; j < ug_info->numQueuesTx; j++) { | |
2213 | /* Allocate in multiple of | |
2214 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, | |
2215 | according to spec */ | |
2216 | length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) | |
2217 | / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) | |
2218 | * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
2219 | if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % | |
2220 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) | |
2221 | length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
2222 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | |
2223 | u32 align = 4; | |
2224 | if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) | |
2225 | align = UCC_GETH_TX_BD_RING_ALIGNMENT; | |
2226 | ugeth->tx_bd_ring_offset[j] = | |
2227 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); | |
2228 | ||
2229 | if (ugeth->tx_bd_ring_offset[j] != 0) | |
2230 | ugeth->p_tx_bd_ring[j] = | |
2231 | (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + | |
2232 | align) & ~(align - 1)); | |
2233 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | |
2234 | ugeth->tx_bd_ring_offset[j] = | |
2235 | qe_muram_alloc(length, | |
2236 | UCC_GETH_TX_BD_RING_ALIGNMENT); | |
2237 | if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) | |
2238 | ugeth->p_tx_bd_ring[j] = | |
2239 | (u8 __iomem *) qe_muram_addr(ugeth-> | |
2240 | tx_bd_ring_offset[j]); | |
2241 | } | |
2242 | if (!ugeth->p_tx_bd_ring[j]) { | |
2243 | if (netif_msg_ifup(ugeth)) | |
c84d8055 | 2244 | pr_err("Can not allocate memory for Tx bd rings\n"); |
e19a82c1 PG |
2245 | return -ENOMEM; |
2246 | } | |
2247 | /* Zero unused end of bd ring, according to spec */ | |
2248 | memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + | |
2249 | ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, | |
2250 | length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); | |
2251 | } | |
2252 | ||
2253 | /* Init Tx bds */ | |
2254 | for (j = 0; j < ug_info->numQueuesTx; j++) { | |
2255 | /* Setup the skbuff rings */ | |
2256 | ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * | |
2257 | ugeth->ug_info->bdRingLenTx[j], | |
2258 | GFP_KERNEL); | |
2259 | ||
2260 | if (ugeth->tx_skbuff[j] == NULL) { | |
2261 | if (netif_msg_ifup(ugeth)) | |
c84d8055 | 2262 | pr_err("Could not allocate tx_skbuff\n"); |
e19a82c1 PG |
2263 | return -ENOMEM; |
2264 | } | |
2265 | ||
2266 | for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) | |
2267 | ugeth->tx_skbuff[j][i] = NULL; | |
2268 | ||
2269 | ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; | |
2270 | bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; | |
2271 | for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { | |
2272 | /* clear bd buffer */ | |
2273 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); | |
2274 | /* set bd status and length */ | |
2275 | out_be32((u32 __iomem *)bd, 0); | |
2276 | bd += sizeof(struct qe_bd); | |
2277 | } | |
2278 | bd -= sizeof(struct qe_bd); | |
2279 | /* set bd status and length */ | |
2280 | out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ | |
2281 | } | |
2282 | ||
2283 | return 0; | |
2284 | } | |
2285 | ||
2286 | static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth) | |
2287 | { | |
2288 | struct ucc_geth_info *ug_info; | |
2289 | struct ucc_fast_info *uf_info; | |
2290 | int length; | |
2291 | u16 i, j; | |
2292 | u8 __iomem *bd; | |
2293 | ||
2294 | ug_info = ugeth->ug_info; | |
2295 | uf_info = &ug_info->uf_info; | |
2296 | ||
2297 | /* Allocate Rx bds */ | |
2298 | for (j = 0; j < ug_info->numQueuesRx; j++) { | |
2299 | length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); | |
2300 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | |
2301 | u32 align = 4; | |
2302 | if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) | |
2303 | align = UCC_GETH_RX_BD_RING_ALIGNMENT; | |
2304 | ugeth->rx_bd_ring_offset[j] = | |
2305 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); | |
2306 | if (ugeth->rx_bd_ring_offset[j] != 0) | |
2307 | ugeth->p_rx_bd_ring[j] = | |
2308 | (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + | |
2309 | align) & ~(align - 1)); | |
2310 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | |
2311 | ugeth->rx_bd_ring_offset[j] = | |
2312 | qe_muram_alloc(length, | |
2313 | UCC_GETH_RX_BD_RING_ALIGNMENT); | |
2314 | if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) | |
2315 | ugeth->p_rx_bd_ring[j] = | |
2316 | (u8 __iomem *) qe_muram_addr(ugeth-> | |
2317 | rx_bd_ring_offset[j]); | |
2318 | } | |
2319 | if (!ugeth->p_rx_bd_ring[j]) { | |
2320 | if (netif_msg_ifup(ugeth)) | |
c84d8055 | 2321 | pr_err("Can not allocate memory for Rx bd rings\n"); |
e19a82c1 PG |
2322 | return -ENOMEM; |
2323 | } | |
2324 | } | |
2325 | ||
2326 | /* Init Rx bds */ | |
2327 | for (j = 0; j < ug_info->numQueuesRx; j++) { | |
2328 | /* Setup the skbuff rings */ | |
2329 | ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * | |
2330 | ugeth->ug_info->bdRingLenRx[j], | |
2331 | GFP_KERNEL); | |
2332 | ||
2333 | if (ugeth->rx_skbuff[j] == NULL) { | |
2334 | if (netif_msg_ifup(ugeth)) | |
c84d8055 | 2335 | pr_err("Could not allocate rx_skbuff\n"); |
e19a82c1 PG |
2336 | return -ENOMEM; |
2337 | } | |
2338 | ||
2339 | for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) | |
2340 | ugeth->rx_skbuff[j][i] = NULL; | |
2341 | ||
2342 | ugeth->skb_currx[j] = 0; | |
2343 | bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; | |
2344 | for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { | |
2345 | /* set bd status and length */ | |
2346 | out_be32((u32 __iomem *)bd, R_I); | |
2347 | /* clear bd buffer */ | |
2348 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); | |
2349 | bd += sizeof(struct qe_bd); | |
2350 | } | |
2351 | bd -= sizeof(struct qe_bd); | |
2352 | /* set bd status and length */ | |
2353 | out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ | |
2354 | } | |
2355 | ||
2356 | return 0; | |
2357 | } | |
2358 | ||
728de4c9 KP |
2359 | static int ucc_geth_startup(struct ucc_geth_private *ugeth) |
2360 | { | |
6fee40e9 AF |
2361 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
2362 | struct ucc_geth_init_pram __iomem *p_init_enet_pram; | |
728de4c9 KP |
2363 | struct ucc_fast_private *uccf; |
2364 | struct ucc_geth_info *ug_info; | |
2365 | struct ucc_fast_info *uf_info; | |
6fee40e9 AF |
2366 | struct ucc_fast __iomem *uf_regs; |
2367 | struct ucc_geth __iomem *ug_regs; | |
728de4c9 KP |
2368 | int ret_val = -EINVAL; |
2369 | u32 remoder = UCC_GETH_REMODER_INIT; | |
3bc53427 | 2370 | u32 init_enet_pram_offset, cecr_subblock, command; |
e19a82c1 | 2371 | u32 ifstat, i, j, size, l2qt, l3qt; |
728de4c9 KP |
2372 | u16 temoder = UCC_GETH_TEMODER_INIT; |
2373 | u16 test; | |
2374 | u8 function_code = 0; | |
6fee40e9 | 2375 | u8 __iomem *endOfRing; |
728de4c9 KP |
2376 | u8 numThreadsRxNumerical, numThreadsTxNumerical; |
2377 | ||
b39d66a8 | 2378 | ugeth_vdbg("%s: IN", __func__); |
728de4c9 KP |
2379 | uccf = ugeth->uccf; |
2380 | ug_info = ugeth->ug_info; | |
2381 | uf_info = &ug_info->uf_info; | |
2382 | uf_regs = uccf->uf_regs; | |
2383 | ug_regs = ugeth->ug_regs; | |
ce973b14 LY |
2384 | |
2385 | switch (ug_info->numThreadsRx) { | |
2386 | case UCC_GETH_NUM_OF_THREADS_1: | |
2387 | numThreadsRxNumerical = 1; | |
2388 | break; | |
2389 | case UCC_GETH_NUM_OF_THREADS_2: | |
2390 | numThreadsRxNumerical = 2; | |
2391 | break; | |
2392 | case UCC_GETH_NUM_OF_THREADS_4: | |
2393 | numThreadsRxNumerical = 4; | |
2394 | break; | |
2395 | case UCC_GETH_NUM_OF_THREADS_6: | |
2396 | numThreadsRxNumerical = 6; | |
2397 | break; | |
2398 | case UCC_GETH_NUM_OF_THREADS_8: | |
2399 | numThreadsRxNumerical = 8; | |
2400 | break; | |
2401 | default: | |
890de95e | 2402 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2403 | pr_err("Bad number of Rx threads value\n"); |
ce973b14 | 2404 | return -EINVAL; |
ce973b14 LY |
2405 | } |
2406 | ||
2407 | switch (ug_info->numThreadsTx) { | |
2408 | case UCC_GETH_NUM_OF_THREADS_1: | |
2409 | numThreadsTxNumerical = 1; | |
2410 | break; | |
2411 | case UCC_GETH_NUM_OF_THREADS_2: | |
2412 | numThreadsTxNumerical = 2; | |
2413 | break; | |
2414 | case UCC_GETH_NUM_OF_THREADS_4: | |
2415 | numThreadsTxNumerical = 4; | |
2416 | break; | |
2417 | case UCC_GETH_NUM_OF_THREADS_6: | |
2418 | numThreadsTxNumerical = 6; | |
2419 | break; | |
2420 | case UCC_GETH_NUM_OF_THREADS_8: | |
2421 | numThreadsTxNumerical = 8; | |
2422 | break; | |
2423 | default: | |
890de95e | 2424 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2425 | pr_err("Bad number of Tx threads value\n"); |
ce973b14 | 2426 | return -EINVAL; |
ce973b14 LY |
2427 | } |
2428 | ||
2429 | /* Calculate rx_extended_features */ | |
2430 | ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || | |
2431 | ug_info->ipAddressAlignment || | |
2432 | (ug_info->numStationAddresses != | |
2433 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1); | |
2434 | ||
2435 | ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || | |
8e95a202 JP |
2436 | (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || |
2437 | (ug_info->vlanOperationNonTagged != | |
2438 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); | |
ce973b14 | 2439 | |
ce973b14 LY |
2440 | init_default_reg_vals(&uf_regs->upsmr, |
2441 | &ug_regs->maccfg1, &ug_regs->maccfg2); | |
2442 | ||
2443 | /* Set UPSMR */ | |
2444 | /* For more details see the hardware spec. */ | |
2445 | init_rx_parameters(ug_info->bro, | |
2446 | ug_info->rsh, ug_info->pro, &uf_regs->upsmr); | |
2447 | ||
2448 | /* We're going to ignore other registers for now, */ | |
2449 | /* except as needed to get up and running */ | |
2450 | ||
2451 | /* Set MACCFG1 */ | |
2452 | /* For more details see the hardware spec. */ | |
2453 | init_flow_control_params(ug_info->aufc, | |
2454 | ug_info->receiveFlowControl, | |
ac421852 | 2455 | ug_info->transmitFlowControl, |
ce973b14 LY |
2456 | ug_info->pausePeriod, |
2457 | ug_info->extensionField, | |
2458 | &uf_regs->upsmr, | |
2459 | &ug_regs->uempr, &ug_regs->maccfg1); | |
2460 | ||
3bc53427 | 2461 | setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
ce973b14 LY |
2462 | |
2463 | /* Set IPGIFG */ | |
2464 | /* For more details see the hardware spec. */ | |
2465 | ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, | |
2466 | ug_info->nonBackToBackIfgPart2, | |
2467 | ug_info-> | |
2468 | miminumInterFrameGapEnforcement, | |
2469 | ug_info->backToBackInterFrameGap, | |
2470 | &ug_regs->ipgifg); | |
2471 | if (ret_val != 0) { | |
890de95e | 2472 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2473 | pr_err("IPGIFG initialization parameter too large\n"); |
ce973b14 LY |
2474 | return ret_val; |
2475 | } | |
2476 | ||
2477 | /* Set HAFDUP */ | |
2478 | /* For more details see the hardware spec. */ | |
2479 | ret_val = init_half_duplex_params(ug_info->altBeb, | |
2480 | ug_info->backPressureNoBackoff, | |
2481 | ug_info->noBackoff, | |
2482 | ug_info->excessDefer, | |
2483 | ug_info->altBebTruncation, | |
2484 | ug_info->maxRetransmission, | |
2485 | ug_info->collisionWindow, | |
2486 | &ug_regs->hafdup); | |
2487 | if (ret_val != 0) { | |
890de95e | 2488 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2489 | pr_err("Half Duplex initialization parameter too large\n"); |
ce973b14 LY |
2490 | return ret_val; |
2491 | } | |
2492 | ||
2493 | /* Set IFSTAT */ | |
2494 | /* For more details see the hardware spec. */ | |
2495 | /* Read only - resets upon read */ | |
2496 | ifstat = in_be32(&ug_regs->ifstat); | |
2497 | ||
2498 | /* Clear UEMPR */ | |
2499 | /* For more details see the hardware spec. */ | |
2500 | out_be32(&ug_regs->uempr, 0); | |
2501 | ||
2502 | /* Set UESCR */ | |
2503 | /* For more details see the hardware spec. */ | |
2504 | init_hw_statistics_gathering_mode((ug_info->statisticsMode & | |
2505 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), | |
2506 | 0, &uf_regs->upsmr, &ug_regs->uescr); | |
2507 | ||
e19a82c1 PG |
2508 | ret_val = ucc_geth_alloc_tx(ugeth); |
2509 | if (ret_val != 0) | |
2510 | return ret_val; | |
ce973b14 | 2511 | |
e19a82c1 PG |
2512 | ret_val = ucc_geth_alloc_rx(ugeth); |
2513 | if (ret_val != 0) | |
2514 | return ret_val; | |
ce973b14 LY |
2515 | |
2516 | /* | |
2517 | * Global PRAM | |
2518 | */ | |
2519 | /* Tx global PRAM */ | |
2520 | /* Allocate global tx parameter RAM page */ | |
2521 | ugeth->tx_glbl_pram_offset = | |
18a8e864 | 2522 | qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), |
ce973b14 | 2523 | UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); |
4c35630c | 2524 | if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { |
890de95e | 2525 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2526 | pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n"); |
ce973b14 LY |
2527 | return -ENOMEM; |
2528 | } | |
2529 | ugeth->p_tx_glbl_pram = | |
6fee40e9 | 2530 | (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2531 | tx_glbl_pram_offset); |
2532 | /* Zero out p_tx_glbl_pram */ | |
6fee40e9 | 2533 | memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); |
ce973b14 LY |
2534 | |
2535 | /* Fill global PRAM */ | |
2536 | ||
2537 | /* TQPTR */ | |
2538 | /* Size varies with number of Tx threads */ | |
2539 | ugeth->thread_dat_tx_offset = | |
2540 | qe_muram_alloc(numThreadsTxNumerical * | |
18a8e864 | 2541 | sizeof(struct ucc_geth_thread_data_tx) + |
ce973b14 LY |
2542 | 32 * (numThreadsTxNumerical == 1), |
2543 | UCC_GETH_THREAD_DATA_ALIGNMENT); | |
4c35630c | 2544 | if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { |
890de95e | 2545 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2546 | pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n"); |
ce973b14 LY |
2547 | return -ENOMEM; |
2548 | } | |
2549 | ||
2550 | ugeth->p_thread_data_tx = | |
6fee40e9 | 2551 | (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2552 | thread_dat_tx_offset); |
2553 | out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); | |
2554 | ||
2555 | /* vtagtable */ | |
2556 | for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) | |
2557 | out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], | |
2558 | ug_info->vtagtable[i]); | |
2559 | ||
2560 | /* iphoffset */ | |
2561 | for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) | |
6fee40e9 AF |
2562 | out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], |
2563 | ug_info->iphoffset[i]); | |
ce973b14 LY |
2564 | |
2565 | /* SQPTR */ | |
2566 | /* Size varies with number of Tx queues */ | |
2567 | ugeth->send_q_mem_reg_offset = | |
2568 | qe_muram_alloc(ug_info->numQueuesTx * | |
18a8e864 | 2569 | sizeof(struct ucc_geth_send_queue_qd), |
ce973b14 | 2570 | UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); |
4c35630c | 2571 | if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { |
890de95e | 2572 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2573 | pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n"); |
ce973b14 LY |
2574 | return -ENOMEM; |
2575 | } | |
2576 | ||
2577 | ugeth->p_send_q_mem_reg = | |
6fee40e9 | 2578 | (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2579 | send_q_mem_reg_offset); |
2580 | out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); | |
2581 | ||
2582 | /* Setup the table */ | |
2583 | /* Assume BD rings are already established */ | |
2584 | for (i = 0; i < ug_info->numQueuesTx; i++) { | |
2585 | endOfRing = | |
2586 | ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - | |
18a8e864 | 2587 | 1) * sizeof(struct qe_bd); |
ce973b14 LY |
2588 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { |
2589 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | |
2590 | (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); | |
2591 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | |
2592 | last_bd_completed_address, | |
2593 | (u32) virt_to_phys(endOfRing)); | |
2594 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | |
2595 | MEM_PART_MURAM) { | |
2596 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | |
2597 | (u32) immrbar_virt_to_phys(ugeth-> | |
2598 | p_tx_bd_ring[i])); | |
2599 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | |
2600 | last_bd_completed_address, | |
2601 | (u32) immrbar_virt_to_phys(endOfRing)); | |
2602 | } | |
2603 | } | |
2604 | ||
2605 | /* schedulerbasepointer */ | |
2606 | ||
2607 | if (ug_info->numQueuesTx > 1) { | |
2608 | /* scheduler exists only if more than 1 tx queue */ | |
2609 | ugeth->scheduler_offset = | |
18a8e864 | 2610 | qe_muram_alloc(sizeof(struct ucc_geth_scheduler), |
ce973b14 | 2611 | UCC_GETH_SCHEDULER_ALIGNMENT); |
4c35630c | 2612 | if (IS_ERR_VALUE(ugeth->scheduler_offset)) { |
890de95e | 2613 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2614 | pr_err("Can not allocate DPRAM memory for p_scheduler\n"); |
ce973b14 LY |
2615 | return -ENOMEM; |
2616 | } | |
2617 | ||
2618 | ugeth->p_scheduler = | |
6fee40e9 | 2619 | (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2620 | scheduler_offset); |
2621 | out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, | |
2622 | ugeth->scheduler_offset); | |
2623 | /* Zero out p_scheduler */ | |
6fee40e9 | 2624 | memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); |
ce973b14 LY |
2625 | |
2626 | /* Set values in scheduler */ | |
2627 | out_be32(&ugeth->p_scheduler->mblinterval, | |
2628 | ug_info->mblinterval); | |
2629 | out_be16(&ugeth->p_scheduler->nortsrbytetime, | |
2630 | ug_info->nortsrbytetime); | |
6fee40e9 AF |
2631 | out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); |
2632 | out_8(&ugeth->p_scheduler->strictpriorityq, | |
2633 | ug_info->strictpriorityq); | |
2634 | out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); | |
2635 | out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); | |
ce973b14 | 2636 | for (i = 0; i < NUM_TX_QUEUES; i++) |
6fee40e9 AF |
2637 | out_8(&ugeth->p_scheduler->weightfactor[i], |
2638 | ug_info->weightfactor[i]); | |
ce973b14 LY |
2639 | |
2640 | /* Set pointers to cpucount registers in scheduler */ | |
2641 | ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); | |
2642 | ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); | |
2643 | ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); | |
2644 | ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); | |
2645 | ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); | |
2646 | ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); | |
2647 | ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); | |
2648 | ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); | |
2649 | } | |
2650 | ||
2651 | /* schedulerbasepointer */ | |
2652 | /* TxRMON_PTR (statistics) */ | |
2653 | if (ug_info-> | |
2654 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | |
2655 | ugeth->tx_fw_statistics_pram_offset = | |
2656 | qe_muram_alloc(sizeof | |
18a8e864 | 2657 | (struct ucc_geth_tx_firmware_statistics_pram), |
ce973b14 | 2658 | UCC_GETH_TX_STATISTICS_ALIGNMENT); |
4c35630c | 2659 | if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { |
890de95e | 2660 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2661 | pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n"); |
ce973b14 LY |
2662 | return -ENOMEM; |
2663 | } | |
2664 | ugeth->p_tx_fw_statistics_pram = | |
6fee40e9 | 2665 | (struct ucc_geth_tx_firmware_statistics_pram __iomem *) |
ce973b14 LY |
2666 | qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); |
2667 | /* Zero out p_tx_fw_statistics_pram */ | |
6fee40e9 | 2668 | memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, |
18a8e864 | 2669 | 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); |
ce973b14 LY |
2670 | } |
2671 | ||
2672 | /* temoder */ | |
2673 | /* Already has speed set */ | |
2674 | ||
2675 | if (ug_info->numQueuesTx > 1) | |
2676 | temoder |= TEMODER_SCHEDULER_ENABLE; | |
2677 | if (ug_info->ipCheckSumGenerate) | |
2678 | temoder |= TEMODER_IP_CHECKSUM_GENERATE; | |
2679 | temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); | |
2680 | out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); | |
2681 | ||
2682 | test = in_be16(&ugeth->p_tx_glbl_pram->temoder); | |
2683 | ||
2684 | /* Function code register value to be used later */ | |
6b0b594b | 2685 | function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; |
ce973b14 LY |
2686 | /* Required for QE */ |
2687 | ||
2688 | /* function code register */ | |
2689 | out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); | |
2690 | ||
2691 | /* Rx global PRAM */ | |
2692 | /* Allocate global rx parameter RAM page */ | |
2693 | ugeth->rx_glbl_pram_offset = | |
18a8e864 | 2694 | qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), |
ce973b14 | 2695 | UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); |
4c35630c | 2696 | if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { |
890de95e | 2697 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2698 | pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n"); |
ce973b14 LY |
2699 | return -ENOMEM; |
2700 | } | |
2701 | ugeth->p_rx_glbl_pram = | |
6fee40e9 | 2702 | (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2703 | rx_glbl_pram_offset); |
2704 | /* Zero out p_rx_glbl_pram */ | |
6fee40e9 | 2705 | memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); |
ce973b14 LY |
2706 | |
2707 | /* Fill global PRAM */ | |
2708 | ||
2709 | /* RQPTR */ | |
2710 | /* Size varies with number of Rx threads */ | |
2711 | ugeth->thread_dat_rx_offset = | |
2712 | qe_muram_alloc(numThreadsRxNumerical * | |
18a8e864 | 2713 | sizeof(struct ucc_geth_thread_data_rx), |
ce973b14 | 2714 | UCC_GETH_THREAD_DATA_ALIGNMENT); |
4c35630c | 2715 | if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { |
890de95e | 2716 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2717 | pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n"); |
ce973b14 LY |
2718 | return -ENOMEM; |
2719 | } | |
2720 | ||
2721 | ugeth->p_thread_data_rx = | |
6fee40e9 | 2722 | (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2723 | thread_dat_rx_offset); |
2724 | out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); | |
2725 | ||
2726 | /* typeorlen */ | |
2727 | out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); | |
2728 | ||
2729 | /* rxrmonbaseptr (statistics) */ | |
2730 | if (ug_info-> | |
2731 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { | |
2732 | ugeth->rx_fw_statistics_pram_offset = | |
2733 | qe_muram_alloc(sizeof | |
18a8e864 | 2734 | (struct ucc_geth_rx_firmware_statistics_pram), |
ce973b14 | 2735 | UCC_GETH_RX_STATISTICS_ALIGNMENT); |
4c35630c | 2736 | if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { |
890de95e | 2737 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2738 | pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n"); |
ce973b14 LY |
2739 | return -ENOMEM; |
2740 | } | |
2741 | ugeth->p_rx_fw_statistics_pram = | |
6fee40e9 | 2742 | (struct ucc_geth_rx_firmware_statistics_pram __iomem *) |
ce973b14 LY |
2743 | qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); |
2744 | /* Zero out p_rx_fw_statistics_pram */ | |
6fee40e9 | 2745 | memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, |
18a8e864 | 2746 | sizeof(struct ucc_geth_rx_firmware_statistics_pram)); |
ce973b14 LY |
2747 | } |
2748 | ||
2749 | /* intCoalescingPtr */ | |
2750 | ||
2751 | /* Size varies with number of Rx queues */ | |
2752 | ugeth->rx_irq_coalescing_tbl_offset = | |
2753 | qe_muram_alloc(ug_info->numQueuesRx * | |
7563907e MB |
2754 | sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) |
2755 | + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); | |
4c35630c | 2756 | if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { |
890de95e | 2757 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2758 | pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n"); |
ce973b14 LY |
2759 | return -ENOMEM; |
2760 | } | |
2761 | ||
2762 | ugeth->p_rx_irq_coalescing_tbl = | |
6fee40e9 | 2763 | (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) |
ce973b14 LY |
2764 | qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); |
2765 | out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, | |
2766 | ugeth->rx_irq_coalescing_tbl_offset); | |
2767 | ||
2768 | /* Fill interrupt coalescing table */ | |
2769 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2770 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | |
2771 | interruptcoalescingmaxvalue, | |
2772 | ug_info->interruptcoalescingmaxvalue[i]); | |
2773 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | |
2774 | interruptcoalescingcounter, | |
2775 | ug_info->interruptcoalescingmaxvalue[i]); | |
2776 | } | |
2777 | ||
2778 | /* MRBLR */ | |
2779 | init_max_rx_buff_len(uf_info->max_rx_buf_length, | |
2780 | &ugeth->p_rx_glbl_pram->mrblr); | |
2781 | /* MFLR */ | |
2782 | out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); | |
2783 | /* MINFLR */ | |
2784 | init_min_frame_len(ug_info->minFrameLength, | |
2785 | &ugeth->p_rx_glbl_pram->minflr, | |
2786 | &ugeth->p_rx_glbl_pram->mrblr); | |
2787 | /* MAXD1 */ | |
2788 | out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); | |
2789 | /* MAXD2 */ | |
2790 | out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); | |
2791 | ||
2792 | /* l2qt */ | |
2793 | l2qt = 0; | |
2794 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) | |
2795 | l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); | |
2796 | out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); | |
2797 | ||
2798 | /* l3qt */ | |
2799 | for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { | |
2800 | l3qt = 0; | |
2801 | for (i = 0; i < 8; i++) | |
2802 | l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); | |
18a8e864 | 2803 | out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); |
ce973b14 LY |
2804 | } |
2805 | ||
2806 | /* vlantype */ | |
2807 | out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); | |
2808 | ||
2809 | /* vlantci */ | |
2810 | out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); | |
2811 | ||
2812 | /* ecamptr */ | |
2813 | out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); | |
2814 | ||
2815 | /* RBDQPTR */ | |
2816 | /* Size varies with number of Rx queues */ | |
2817 | ugeth->rx_bd_qs_tbl_offset = | |
2818 | qe_muram_alloc(ug_info->numQueuesRx * | |
18a8e864 LY |
2819 | (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2820 | sizeof(struct ucc_geth_rx_prefetched_bds)), | |
ce973b14 | 2821 | UCC_GETH_RX_BD_QUEUES_ALIGNMENT); |
4c35630c | 2822 | if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { |
890de95e | 2823 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2824 | pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n"); |
ce973b14 LY |
2825 | return -ENOMEM; |
2826 | } | |
2827 | ||
2828 | ugeth->p_rx_bd_qs_tbl = | |
6fee40e9 | 2829 | (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2830 | rx_bd_qs_tbl_offset); |
2831 | out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); | |
2832 | /* Zero out p_rx_bd_qs_tbl */ | |
6fee40e9 | 2833 | memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, |
ce973b14 | 2834 | 0, |
18a8e864 LY |
2835 | ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2836 | sizeof(struct ucc_geth_rx_prefetched_bds))); | |
ce973b14 LY |
2837 | |
2838 | /* Setup the table */ | |
2839 | /* Assume BD rings are already established */ | |
2840 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2841 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | |
2842 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
2843 | (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); | |
2844 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | |
2845 | MEM_PART_MURAM) { | |
2846 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
2847 | (u32) immrbar_virt_to_phys(ugeth-> | |
2848 | p_rx_bd_ring[i])); | |
2849 | } | |
2850 | /* rest of fields handled by QE */ | |
2851 | } | |
2852 | ||
2853 | /* remoder */ | |
2854 | /* Already has speed set */ | |
2855 | ||
2856 | if (ugeth->rx_extended_features) | |
2857 | remoder |= REMODER_RX_EXTENDED_FEATURES; | |
2858 | if (ug_info->rxExtendedFiltering) | |
2859 | remoder |= REMODER_RX_EXTENDED_FILTERING; | |
2860 | if (ug_info->dynamicMaxFrameLength) | |
2861 | remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; | |
2862 | if (ug_info->dynamicMinFrameLength) | |
2863 | remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; | |
2864 | remoder |= | |
2865 | ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; | |
2866 | remoder |= | |
2867 | ug_info-> | |
2868 | vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; | |
2869 | remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; | |
2870 | remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); | |
2871 | if (ug_info->ipCheckSumCheck) | |
2872 | remoder |= REMODER_IP_CHECKSUM_CHECK; | |
2873 | if (ug_info->ipAddressAlignment) | |
2874 | remoder |= REMODER_IP_ADDRESS_ALIGNMENT; | |
2875 | out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); | |
2876 | ||
2877 | /* Note that this function must be called */ | |
2878 | /* ONLY AFTER p_tx_fw_statistics_pram */ | |
2879 | /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ | |
2880 | init_firmware_statistics_gathering_mode((ug_info-> | |
2881 | statisticsMode & | |
2882 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), | |
2883 | (ug_info->statisticsMode & | |
2884 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), | |
2885 | &ugeth->p_tx_glbl_pram->txrmonbaseptr, | |
2886 | ugeth->tx_fw_statistics_pram_offset, | |
2887 | &ugeth->p_rx_glbl_pram->rxrmonbaseptr, | |
2888 | ugeth->rx_fw_statistics_pram_offset, | |
2889 | &ugeth->p_tx_glbl_pram->temoder, | |
2890 | &ugeth->p_rx_glbl_pram->remoder); | |
2891 | ||
2892 | /* function code register */ | |
6fee40e9 | 2893 | out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); |
ce973b14 LY |
2894 | |
2895 | /* initialize extended filtering */ | |
2896 | if (ug_info->rxExtendedFiltering) { | |
2897 | if (!ug_info->extendedFilteringChainPointer) { | |
890de95e | 2898 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2899 | pr_err("Null Extended Filtering Chain Pointer\n"); |
ce973b14 LY |
2900 | return -EINVAL; |
2901 | } | |
2902 | ||
2903 | /* Allocate memory for extended filtering Mode Global | |
2904 | Parameters */ | |
2905 | ugeth->exf_glbl_param_offset = | |
18a8e864 | 2906 | qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), |
ce973b14 | 2907 | UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); |
4c35630c | 2908 | if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { |
890de95e | 2909 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2910 | pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n"); |
ce973b14 LY |
2911 | return -ENOMEM; |
2912 | } | |
2913 | ||
2914 | ugeth->p_exf_glbl_param = | |
6fee40e9 | 2915 | (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2916 | exf_glbl_param_offset); |
2917 | out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, | |
2918 | ugeth->exf_glbl_param_offset); | |
2919 | out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, | |
2920 | (u32) ug_info->extendedFilteringChainPointer); | |
2921 | ||
2922 | } else { /* initialize 82xx style address filtering */ | |
2923 | ||
2924 | /* Init individual address recognition registers to disabled */ | |
2925 | ||
2926 | for (j = 0; j < NUM_OF_PADDRS; j++) | |
2927 | ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); | |
2928 | ||
ce973b14 | 2929 | p_82xx_addr_filt = |
6fee40e9 | 2930 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
ce973b14 LY |
2931 | p_rx_glbl_pram->addressfiltering; |
2932 | ||
2933 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | |
2934 | ENET_ADDR_TYPE_GROUP); | |
2935 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | |
2936 | ENET_ADDR_TYPE_INDIVIDUAL); | |
2937 | } | |
2938 | ||
2939 | /* | |
2940 | * Initialize UCC at QE level | |
2941 | */ | |
2942 | ||
2943 | command = QE_INIT_TX_RX; | |
2944 | ||
2945 | /* Allocate shadow InitEnet command parameter structure. | |
2946 | * This is needed because after the InitEnet command is executed, | |
2947 | * the structure in DPRAM is released, because DPRAM is a premium | |
2948 | * resource. | |
2949 | * This shadow structure keeps a copy of what was done so that the | |
2950 | * allocated resources can be released when the channel is freed. | |
2951 | */ | |
2952 | if (!(ugeth->p_init_enet_param_shadow = | |
04b588d7 | 2953 | kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { |
890de95e | 2954 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2955 | pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n"); |
ce973b14 LY |
2956 | return -ENOMEM; |
2957 | } | |
2958 | /* Zero out *p_init_enet_param_shadow */ | |
2959 | memset((char *)ugeth->p_init_enet_param_shadow, | |
18a8e864 | 2960 | 0, sizeof(struct ucc_geth_init_pram)); |
ce973b14 LY |
2961 | |
2962 | /* Fill shadow InitEnet command parameter structure */ | |
2963 | ||
2964 | ugeth->p_init_enet_param_shadow->resinit1 = | |
2965 | ENET_INIT_PARAM_MAGIC_RES_INIT1; | |
2966 | ugeth->p_init_enet_param_shadow->resinit2 = | |
2967 | ENET_INIT_PARAM_MAGIC_RES_INIT2; | |
2968 | ugeth->p_init_enet_param_shadow->resinit3 = | |
2969 | ENET_INIT_PARAM_MAGIC_RES_INIT3; | |
2970 | ugeth->p_init_enet_param_shadow->resinit4 = | |
2971 | ENET_INIT_PARAM_MAGIC_RES_INIT4; | |
2972 | ugeth->p_init_enet_param_shadow->resinit5 = | |
2973 | ENET_INIT_PARAM_MAGIC_RES_INIT5; | |
2974 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2975 | ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; | |
2976 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2977 | ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; | |
2978 | ||
2979 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2980 | ugeth->rx_glbl_pram_offset | ug_info->riscRx; | |
2981 | if ((ug_info->largestexternallookupkeysize != | |
8e95a202 JP |
2982 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && |
2983 | (ug_info->largestexternallookupkeysize != | |
2984 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && | |
2985 | (ug_info->largestexternallookupkeysize != | |
2986 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { | |
890de95e | 2987 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 2988 | pr_err("Invalid largest External Lookup Key Size\n"); |
ce973b14 LY |
2989 | return -EINVAL; |
2990 | } | |
2991 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = | |
2992 | ug_info->largestexternallookupkeysize; | |
18a8e864 | 2993 | size = sizeof(struct ucc_geth_thread_rx_pram); |
ce973b14 LY |
2994 | if (ug_info->rxExtendedFiltering) { |
2995 | size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | |
2996 | if (ug_info->largestexternallookupkeysize == | |
8844a006 | 2997 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) |
ce973b14 LY |
2998 | size += |
2999 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | |
3000 | if (ug_info->largestexternallookupkeysize == | |
8844a006 | 3001 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES) |
ce973b14 LY |
3002 | size += |
3003 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | |
3004 | } | |
3005 | ||
3006 | if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> | |
3007 | p_init_enet_param_shadow->rxthread[0]), | |
3008 | (u8) (numThreadsRxNumerical + 1) | |
3009 | /* Rx needs one extra for terminator */ | |
3010 | , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, | |
3011 | ug_info->riscRx, 1)) != 0) { | |
890de95e | 3012 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 3013 | pr_err("Can not fill p_init_enet_param_shadow\n"); |
ce973b14 LY |
3014 | return ret_val; |
3015 | } | |
3016 | ||
3017 | ugeth->p_init_enet_param_shadow->txglobal = | |
3018 | ugeth->tx_glbl_pram_offset | ug_info->riscTx; | |
3019 | if ((ret_val = | |
3020 | fill_init_enet_entries(ugeth, | |
3021 | &(ugeth->p_init_enet_param_shadow-> | |
3022 | txthread[0]), numThreadsTxNumerical, | |
18a8e864 | 3023 | sizeof(struct ucc_geth_thread_tx_pram), |
ce973b14 LY |
3024 | UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, |
3025 | ug_info->riscTx, 0)) != 0) { | |
890de95e | 3026 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 3027 | pr_err("Can not fill p_init_enet_param_shadow\n"); |
ce973b14 LY |
3028 | return ret_val; |
3029 | } | |
3030 | ||
3031 | /* Load Rx bds with buffers */ | |
3032 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
3033 | if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { | |
890de95e | 3034 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 3035 | pr_err("Can not fill Rx bds with buffers\n"); |
ce973b14 LY |
3036 | return ret_val; |
3037 | } | |
3038 | } | |
3039 | ||
3040 | /* Allocate InitEnet command parameter structure */ | |
18a8e864 | 3041 | init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); |
4c35630c | 3042 | if (IS_ERR_VALUE(init_enet_pram_offset)) { |
890de95e | 3043 | if (netif_msg_ifup(ugeth)) |
c84d8055 | 3044 | pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n"); |
ce973b14 LY |
3045 | return -ENOMEM; |
3046 | } | |
3047 | p_init_enet_pram = | |
6fee40e9 | 3048 | (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); |
ce973b14 LY |
3049 | |
3050 | /* Copy shadow InitEnet command parameter structure into PRAM */ | |
6fee40e9 AF |
3051 | out_8(&p_init_enet_pram->resinit1, |
3052 | ugeth->p_init_enet_param_shadow->resinit1); | |
3053 | out_8(&p_init_enet_pram->resinit2, | |
3054 | ugeth->p_init_enet_param_shadow->resinit2); | |
3055 | out_8(&p_init_enet_pram->resinit3, | |
3056 | ugeth->p_init_enet_param_shadow->resinit3); | |
3057 | out_8(&p_init_enet_pram->resinit4, | |
3058 | ugeth->p_init_enet_param_shadow->resinit4); | |
ce973b14 LY |
3059 | out_be16(&p_init_enet_pram->resinit5, |
3060 | ugeth->p_init_enet_param_shadow->resinit5); | |
6fee40e9 AF |
3061 | out_8(&p_init_enet_pram->largestexternallookupkeysize, |
3062 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); | |
ce973b14 LY |
3063 | out_be32(&p_init_enet_pram->rgftgfrxglobal, |
3064 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal); | |
3065 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) | |
3066 | out_be32(&p_init_enet_pram->rxthread[i], | |
3067 | ugeth->p_init_enet_param_shadow->rxthread[i]); | |
3068 | out_be32(&p_init_enet_pram->txglobal, | |
3069 | ugeth->p_init_enet_param_shadow->txglobal); | |
3070 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) | |
3071 | out_be32(&p_init_enet_pram->txthread[i], | |
3072 | ugeth->p_init_enet_param_shadow->txthread[i]); | |
3073 | ||
3074 | /* Issue QE command */ | |
3075 | cecr_subblock = | |
3076 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
18a8e864 | 3077 | qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, |
ce973b14 LY |
3078 | init_enet_pram_offset); |
3079 | ||
3080 | /* Free InitEnet command parameter */ | |
3081 | qe_muram_free(init_enet_pram_offset); | |
3082 | ||
3083 | return 0; | |
3084 | } | |
3085 | ||
ce973b14 LY |
3086 | /* This is called by the kernel when a frame is ready for transmission. */ |
3087 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
3088 | static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
3089 | { | |
18a8e864 | 3090 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
d5b9049d MR |
3091 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
3092 | struct ucc_fast_private *uccf; | |
3093 | #endif | |
6fee40e9 | 3094 | u8 __iomem *bd; /* BD pointer */ |
ce973b14 LY |
3095 | u32 bd_status; |
3096 | u8 txQ = 0; | |
22580f89 | 3097 | unsigned long flags; |
ce973b14 | 3098 | |
b39d66a8 | 3099 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3100 | |
22580f89 | 3101 | spin_lock_irqsave(&ugeth->lock, flags); |
ce973b14 | 3102 | |
09f75cd7 | 3103 | dev->stats.tx_bytes += skb->len; |
ce973b14 LY |
3104 | |
3105 | /* Start from the next BD that should be filled */ | |
3106 | bd = ugeth->txBd[txQ]; | |
6fee40e9 | 3107 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3108 | /* Save the skb pointer so we can free it later */ |
3109 | ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; | |
3110 | ||
3111 | /* Update the current skb pointer (wrapping if this was the last) */ | |
3112 | ugeth->skb_curtx[txQ] = | |
3113 | (ugeth->skb_curtx[txQ] + | |
3114 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | |
3115 | ||
3116 | /* set up the buffer descriptor */ | |
6fee40e9 | 3117 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
da1aa63e | 3118 | dma_map_single(ugeth->dev, skb->data, |
7f80202b | 3119 | skb->len, DMA_TO_DEVICE)); |
ce973b14 | 3120 | |
18a8e864 | 3121 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
ce973b14 LY |
3122 | |
3123 | bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; | |
3124 | ||
18a8e864 | 3125 | /* set bd status and length */ |
6fee40e9 | 3126 | out_be32((u32 __iomem *)bd, bd_status); |
ce973b14 | 3127 | |
ce973b14 LY |
3128 | /* Move to next BD in the ring */ |
3129 | if (!(bd_status & T_W)) | |
a394f013 | 3130 | bd += sizeof(struct qe_bd); |
ce973b14 | 3131 | else |
a394f013 | 3132 | bd = ugeth->p_tx_bd_ring[txQ]; |
ce973b14 LY |
3133 | |
3134 | /* If the next BD still needs to be cleaned up, then the bds | |
3135 | are full. We need to tell the kernel to stop sending us stuff. */ | |
3136 | if (bd == ugeth->confBd[txQ]) { | |
3137 | if (!netif_queue_stopped(dev)) | |
3138 | netif_stop_queue(dev); | |
3139 | } | |
3140 | ||
a394f013 LY |
3141 | ugeth->txBd[txQ] = bd; |
3142 | ||
d13d6bff RC |
3143 | skb_tx_timestamp(skb); |
3144 | ||
ce973b14 LY |
3145 | if (ugeth->p_scheduler) { |
3146 | ugeth->cpucount[txQ]++; | |
3147 | /* Indicate to QE that there are more Tx bds ready for | |
3148 | transmission */ | |
3149 | /* This is done by writing a running counter of the bd | |
3150 | count to the scheduler PRAM. */ | |
3151 | out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); | |
3152 | } | |
3153 | ||
d5b9049d MR |
3154 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
3155 | uccf = ugeth->uccf; | |
3156 | out_be16(uccf->p_utodr, UCC_FAST_TOD); | |
3157 | #endif | |
22580f89 | 3158 | spin_unlock_irqrestore(&ugeth->lock, flags); |
ce973b14 | 3159 | |
6ed10654 | 3160 | return NETDEV_TX_OK; |
ce973b14 LY |
3161 | } |
3162 | ||
18a8e864 | 3163 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
ce973b14 LY |
3164 | { |
3165 | struct sk_buff *skb; | |
6fee40e9 | 3166 | u8 __iomem *bd; |
ce973b14 LY |
3167 | u16 length, howmany = 0; |
3168 | u32 bd_status; | |
3169 | u8 *bdBuffer; | |
4b8fdefa | 3170 | struct net_device *dev; |
ce973b14 | 3171 | |
b39d66a8 | 3172 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3173 | |
da1aa63e | 3174 | dev = ugeth->ndev; |
88a15f2e | 3175 | |
ce973b14 LY |
3176 | /* collect received buffers */ |
3177 | bd = ugeth->rxBd[rxQ]; | |
3178 | ||
6fee40e9 | 3179 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3180 | |
3181 | /* while there are received buffers and BD is full (~R_E) */ | |
3182 | while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { | |
6fee40e9 | 3183 | bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); |
ce973b14 LY |
3184 | length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); |
3185 | skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; | |
3186 | ||
3187 | /* determine whether buffer is first, last, first and last | |
3188 | (single buffer frame) or middle (not first and not last) */ | |
3189 | if (!skb || | |
3190 | (!(bd_status & (R_F | R_L))) || | |
3191 | (bd_status & R_ERRORS_FATAL)) { | |
890de95e | 3192 | if (netif_msg_rx_err(ugeth)) |
c84d8055 JP |
3193 | pr_err("%d: ERROR!!! skb - 0x%08x\n", |
3194 | __LINE__, (u32)skb); | |
66eef59f | 3195 | dev_kfree_skb(skb); |
ce973b14 LY |
3196 | |
3197 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | |
09f75cd7 | 3198 | dev->stats.rx_dropped++; |
ce973b14 | 3199 | } else { |
09f75cd7 | 3200 | dev->stats.rx_packets++; |
ce973b14 LY |
3201 | howmany++; |
3202 | ||
3203 | /* Prep the skb for the packet */ | |
3204 | skb_put(skb, length); | |
3205 | ||
3206 | /* Tell the skb what kind of packet this is */ | |
da1aa63e | 3207 | skb->protocol = eth_type_trans(skb, ugeth->ndev); |
ce973b14 | 3208 | |
09f75cd7 | 3209 | dev->stats.rx_bytes += length; |
ce973b14 | 3210 | /* Send the packet up the stack */ |
ce973b14 | 3211 | netif_receive_skb(skb); |
ce973b14 LY |
3212 | } |
3213 | ||
ce973b14 LY |
3214 | skb = get_new_skb(ugeth, bd); |
3215 | if (!skb) { | |
890de95e | 3216 | if (netif_msg_rx_err(ugeth)) |
c84d8055 | 3217 | pr_warn("No Rx Data Buffer\n"); |
09f75cd7 | 3218 | dev->stats.rx_dropped++; |
ce973b14 LY |
3219 | break; |
3220 | } | |
3221 | ||
3222 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; | |
3223 | ||
3224 | /* update to point at the next skb */ | |
3225 | ugeth->skb_currx[rxQ] = | |
3226 | (ugeth->skb_currx[rxQ] + | |
3227 | 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); | |
3228 | ||
3229 | if (bd_status & R_W) | |
3230 | bd = ugeth->p_rx_bd_ring[rxQ]; | |
3231 | else | |
18a8e864 | 3232 | bd += sizeof(struct qe_bd); |
ce973b14 | 3233 | |
6fee40e9 | 3234 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3235 | } |
3236 | ||
3237 | ugeth->rxBd[rxQ] = bd; | |
ce973b14 LY |
3238 | return howmany; |
3239 | } | |
3240 | ||
3241 | static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |
3242 | { | |
3243 | /* Start from the next BD that should be filled */ | |
18a8e864 | 3244 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
6fee40e9 | 3245 | u8 __iomem *bd; /* BD pointer */ |
ce973b14 LY |
3246 | u32 bd_status; |
3247 | ||
3248 | bd = ugeth->confBd[txQ]; | |
6fee40e9 | 3249 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3250 | |
3251 | /* Normal processing. */ | |
3252 | while ((bd_status & T_R) == 0) { | |
50f238fd AV |
3253 | struct sk_buff *skb; |
3254 | ||
ce973b14 LY |
3255 | /* BD contains already transmitted buffer. */ |
3256 | /* Handle the transmitted buffer and release */ | |
3257 | /* the BD to be used with the current frame */ | |
3258 | ||
34692421 JW |
3259 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; |
3260 | if (!skb) | |
ce973b14 LY |
3261 | break; |
3262 | ||
09f75cd7 | 3263 | dev->stats.tx_packets++; |
ce973b14 | 3264 | |
36145741 | 3265 | dev_consume_skb_any(skb); |
50f238fd | 3266 | |
ce973b14 LY |
3267 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; |
3268 | ugeth->skb_dirtytx[txQ] = | |
3269 | (ugeth->skb_dirtytx[txQ] + | |
3270 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | |
3271 | ||
3272 | /* We freed a buffer, so now we can restart transmission */ | |
3273 | if (netif_queue_stopped(dev)) | |
3274 | netif_wake_queue(dev); | |
3275 | ||
3276 | /* Advance the confirmation BD pointer */ | |
3277 | if (!(bd_status & T_W)) | |
a394f013 | 3278 | bd += sizeof(struct qe_bd); |
ce973b14 | 3279 | else |
a394f013 | 3280 | bd = ugeth->p_tx_bd_ring[txQ]; |
6fee40e9 | 3281 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 | 3282 | } |
a394f013 | 3283 | ugeth->confBd[txQ] = bd; |
ce973b14 LY |
3284 | return 0; |
3285 | } | |
3286 | ||
bea3348e | 3287 | static int ucc_geth_poll(struct napi_struct *napi, int budget) |
ce973b14 | 3288 | { |
bea3348e | 3289 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); |
702ff12c | 3290 | struct ucc_geth_info *ug_info; |
bea3348e | 3291 | int howmany, i; |
ce973b14 | 3292 | |
702ff12c MR |
3293 | ug_info = ugeth->ug_info; |
3294 | ||
0cededf3 JT |
3295 | /* Tx event processing */ |
3296 | spin_lock(&ugeth->lock); | |
3297 | for (i = 0; i < ug_info->numQueuesTx; i++) | |
3298 | ucc_geth_tx(ugeth->ndev, i); | |
3299 | spin_unlock(&ugeth->lock); | |
3300 | ||
50f238fd AV |
3301 | howmany = 0; |
3302 | for (i = 0; i < ug_info->numQueuesRx; i++) | |
3303 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | |
3304 | ||
bea3348e | 3305 | if (howmany < budget) { |
288379f0 | 3306 | napi_complete(napi); |
0cededf3 | 3307 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); |
702ff12c | 3308 | } |
ce973b14 | 3309 | |
bea3348e | 3310 | return howmany; |
ce973b14 | 3311 | } |
ce973b14 | 3312 | |
7d12e780 | 3313 | static irqreturn_t ucc_geth_irq_handler(int irq, void *info) |
ce973b14 | 3314 | { |
06efcad0 | 3315 | struct net_device *dev = info; |
18a8e864 LY |
3316 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3317 | struct ucc_fast_private *uccf; | |
3318 | struct ucc_geth_info *ug_info; | |
702ff12c MR |
3319 | register u32 ucce; |
3320 | register u32 uccm; | |
ce973b14 | 3321 | |
b39d66a8 | 3322 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3323 | |
ce973b14 LY |
3324 | uccf = ugeth->uccf; |
3325 | ug_info = ugeth->ug_info; | |
3326 | ||
702ff12c MR |
3327 | /* read and clear events */ |
3328 | ucce = (u32) in_be32(uccf->p_ucce); | |
3329 | uccm = (u32) in_be32(uccf->p_uccm); | |
3330 | ucce &= uccm; | |
3331 | out_be32(uccf->p_ucce, ucce); | |
ce973b14 | 3332 | |
702ff12c | 3333 | /* check for receive events that require processing */ |
0cededf3 | 3334 | if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { |
288379f0 | 3335 | if (napi_schedule_prep(&ugeth->napi)) { |
0cededf3 | 3336 | uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); |
702ff12c | 3337 | out_be32(uccf->p_uccm, uccm); |
288379f0 | 3338 | __napi_schedule(&ugeth->napi); |
702ff12c | 3339 | } |
702ff12c | 3340 | } |
ce973b14 | 3341 | |
702ff12c MR |
3342 | /* Errors and other events */ |
3343 | if (ucce & UCCE_OTHER) { | |
3bc53427 | 3344 | if (ucce & UCC_GETH_UCCE_BSY) |
09f75cd7 | 3345 | dev->stats.rx_errors++; |
3bc53427 | 3346 | if (ucce & UCC_GETH_UCCE_TXE) |
09f75cd7 | 3347 | dev->stats.tx_errors++; |
ce973b14 | 3348 | } |
ce973b14 LY |
3349 | |
3350 | return IRQ_HANDLED; | |
3351 | } | |
3352 | ||
26d29ea7 AV |
3353 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3354 | /* | |
3355 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
3356 | * without having to re-enable interrupts. It's not called while | |
3357 | * the interrupt routine is executing. | |
3358 | */ | |
3359 | static void ucc_netpoll(struct net_device *dev) | |
3360 | { | |
3361 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3362 | int irq = ugeth->ug_info->uf_info.irq; | |
3363 | ||
3364 | disable_irq(irq); | |
3365 | ucc_geth_irq_handler(irq, dev); | |
3366 | enable_irq(irq); | |
3367 | } | |
3368 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | |
3369 | ||
3d6593e9 KH |
3370 | static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) |
3371 | { | |
3372 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3373 | struct sockaddr *addr = p; | |
3374 | ||
3375 | if (!is_valid_ether_addr(addr->sa_data)) | |
3376 | return -EADDRNOTAVAIL; | |
3377 | ||
3378 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
3379 | ||
3380 | /* | |
3381 | * If device is not running, we will set mac addr register | |
3382 | * when opening the device. | |
3383 | */ | |
3384 | if (!netif_running(dev)) | |
3385 | return 0; | |
3386 | ||
3387 | spin_lock_irq(&ugeth->lock); | |
3388 | init_mac_station_addr_regs(dev->dev_addr[0], | |
3389 | dev->dev_addr[1], | |
3390 | dev->dev_addr[2], | |
3391 | dev->dev_addr[3], | |
3392 | dev->dev_addr[4], | |
3393 | dev->dev_addr[5], | |
3394 | &ugeth->ug_regs->macstnaddr1, | |
3395 | &ugeth->ug_regs->macstnaddr2); | |
3396 | spin_unlock_irq(&ugeth->lock); | |
3397 | ||
3398 | return 0; | |
3399 | } | |
3400 | ||
54b15983 | 3401 | static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) |
ce973b14 | 3402 | { |
54b15983 | 3403 | struct net_device *dev = ugeth->ndev; |
ce973b14 LY |
3404 | int err; |
3405 | ||
728de4c9 KP |
3406 | err = ucc_struct_init(ugeth); |
3407 | if (err) { | |
c84d8055 | 3408 | netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n"); |
54b15983 | 3409 | goto err; |
728de4c9 KP |
3410 | } |
3411 | ||
ce973b14 LY |
3412 | err = ucc_geth_startup(ugeth); |
3413 | if (err) { | |
c84d8055 | 3414 | netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); |
54b15983 | 3415 | goto err; |
ce973b14 LY |
3416 | } |
3417 | ||
3418 | err = adjust_enet_interface(ugeth); | |
3419 | if (err) { | |
c84d8055 | 3420 | netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); |
54b15983 | 3421 | goto err; |
ce973b14 LY |
3422 | } |
3423 | ||
3424 | /* Set MACSTNADDR1, MACSTNADDR2 */ | |
3425 | /* For more details see the hardware spec. */ | |
3426 | init_mac_station_addr_regs(dev->dev_addr[0], | |
3427 | dev->dev_addr[1], | |
3428 | dev->dev_addr[2], | |
3429 | dev->dev_addr[3], | |
3430 | dev->dev_addr[4], | |
3431 | dev->dev_addr[5], | |
3432 | &ugeth->ug_regs->macstnaddr1, | |
3433 | &ugeth->ug_regs->macstnaddr2); | |
3434 | ||
67c2fb8f | 3435 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); |
ce973b14 | 3436 | if (err) { |
c84d8055 | 3437 | netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n"); |
54b15983 AV |
3438 | goto err; |
3439 | } | |
3440 | ||
3441 | return 0; | |
3442 | err: | |
3443 | ucc_geth_stop(ugeth); | |
3444 | return err; | |
3445 | } | |
3446 | ||
3447 | /* Called when something needs to use the ethernet device */ | |
3448 | /* Returns 0 for success. */ | |
3449 | static int ucc_geth_open(struct net_device *dev) | |
3450 | { | |
3451 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3452 | int err; | |
3453 | ||
3454 | ugeth_vdbg("%s: IN", __func__); | |
3455 | ||
3456 | /* Test station address */ | |
3457 | if (dev->dev_addr[0] & ENET_GROUP_ADDR) { | |
c84d8055 JP |
3458 | netif_err(ugeth, ifup, dev, |
3459 | "Multicast address used for station address - is this what you wanted?\n"); | |
54b15983 AV |
3460 | return -EINVAL; |
3461 | } | |
3462 | ||
3463 | err = init_phy(dev); | |
3464 | if (err) { | |
c84d8055 | 3465 | netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n"); |
54b15983 AV |
3466 | return err; |
3467 | } | |
3468 | ||
3469 | err = ucc_geth_init_mac(ugeth); | |
3470 | if (err) { | |
c84d8055 | 3471 | netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n"); |
54b15983 | 3472 | goto err; |
ce973b14 | 3473 | } |
ce973b14 | 3474 | |
67c2fb8f AV |
3475 | err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, |
3476 | 0, "UCC Geth", dev); | |
ce973b14 | 3477 | if (err) { |
c84d8055 | 3478 | netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n"); |
54b15983 | 3479 | goto err; |
ce973b14 LY |
3480 | } |
3481 | ||
54b15983 AV |
3482 | phy_start(ugeth->phydev); |
3483 | napi_enable(&ugeth->napi); | |
ce973b14 LY |
3484 | netif_start_queue(dev); |
3485 | ||
2394905f AV |
3486 | device_set_wakeup_capable(&dev->dev, |
3487 | qe_alive_during_sleep() || ugeth->phydev->irq); | |
3488 | device_set_wakeup_enable(&dev->dev, ugeth->wol_en); | |
3489 | ||
ce973b14 | 3490 | return err; |
bea3348e | 3491 | |
54b15983 | 3492 | err: |
ba574696 | 3493 | ucc_geth_stop(ugeth); |
bea3348e | 3494 | return err; |
ce973b14 LY |
3495 | } |
3496 | ||
3497 | /* Stops the kernel queue, and halts the controller */ | |
3498 | static int ucc_geth_close(struct net_device *dev) | |
3499 | { | |
18a8e864 | 3500 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
ce973b14 | 3501 | |
b39d66a8 | 3502 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3503 | |
bea3348e | 3504 | napi_disable(&ugeth->napi); |
bea3348e | 3505 | |
2040bd57 | 3506 | cancel_work_sync(&ugeth->timeout_work); |
ce973b14 | 3507 | ucc_geth_stop(ugeth); |
2040bd57 JT |
3508 | phy_disconnect(ugeth->phydev); |
3509 | ugeth->phydev = NULL; | |
ce973b14 | 3510 | |
da1aa63e | 3511 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); |
67c2fb8f | 3512 | |
ce973b14 LY |
3513 | netif_stop_queue(dev); |
3514 | ||
3515 | return 0; | |
3516 | } | |
3517 | ||
fdb614c2 AV |
3518 | /* Reopen device. This will reset the MAC and PHY. */ |
3519 | static void ucc_geth_timeout_work(struct work_struct *work) | |
3520 | { | |
3521 | struct ucc_geth_private *ugeth; | |
3522 | struct net_device *dev; | |
3523 | ||
3524 | ugeth = container_of(work, struct ucc_geth_private, timeout_work); | |
da1aa63e | 3525 | dev = ugeth->ndev; |
fdb614c2 AV |
3526 | |
3527 | ugeth_vdbg("%s: IN", __func__); | |
3528 | ||
3529 | dev->stats.tx_errors++; | |
3530 | ||
3531 | ugeth_dump_regs(ugeth); | |
3532 | ||
3533 | if (dev->flags & IFF_UP) { | |
3534 | /* | |
3535 | * Must reset MAC *and* PHY. This is done by reopening | |
3536 | * the device. | |
3537 | */ | |
2040bd57 JT |
3538 | netif_tx_stop_all_queues(dev); |
3539 | ucc_geth_stop(ugeth); | |
3540 | ucc_geth_init_mac(ugeth); | |
3541 | /* Must start PHY here */ | |
3542 | phy_start(ugeth->phydev); | |
3543 | netif_tx_start_all_queues(dev); | |
fdb614c2 AV |
3544 | } |
3545 | ||
3546 | netif_tx_schedule_all(dev); | |
3547 | } | |
3548 | ||
3549 | /* | |
3550 | * ucc_geth_timeout gets called when a packet has not been | |
3551 | * transmitted after a set amount of time. | |
3552 | */ | |
3553 | static void ucc_geth_timeout(struct net_device *dev) | |
3554 | { | |
3555 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3556 | ||
fdb614c2 AV |
3557 | schedule_work(&ugeth->timeout_work); |
3558 | } | |
3559 | ||
2394905f AV |
3560 | |
3561 | #ifdef CONFIG_PM | |
3562 | ||
2dc11581 | 3563 | static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) |
2394905f | 3564 | { |
8513fbd8 | 3565 | struct net_device *ndev = platform_get_drvdata(ofdev); |
2394905f AV |
3566 | struct ucc_geth_private *ugeth = netdev_priv(ndev); |
3567 | ||
3568 | if (!netif_running(ndev)) | |
3569 | return 0; | |
3570 | ||
29fb00e0 | 3571 | netif_device_detach(ndev); |
2394905f AV |
3572 | napi_disable(&ugeth->napi); |
3573 | ||
3574 | /* | |
3575 | * Disable the controller, otherwise we'll wakeup on any network | |
3576 | * activity. | |
3577 | */ | |
3578 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
3579 | ||
3580 | if (ugeth->wol_en & WAKE_MAGIC) { | |
3581 | setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); | |
3582 | setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); | |
3583 | ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); | |
3584 | } else if (!(ugeth->wol_en & WAKE_PHY)) { | |
3585 | phy_stop(ugeth->phydev); | |
3586 | } | |
3587 | ||
3588 | return 0; | |
3589 | } | |
3590 | ||
2dc11581 | 3591 | static int ucc_geth_resume(struct platform_device *ofdev) |
2394905f | 3592 | { |
8513fbd8 | 3593 | struct net_device *ndev = platform_get_drvdata(ofdev); |
2394905f AV |
3594 | struct ucc_geth_private *ugeth = netdev_priv(ndev); |
3595 | int err; | |
3596 | ||
3597 | if (!netif_running(ndev)) | |
3598 | return 0; | |
3599 | ||
3600 | if (qe_alive_during_sleep()) { | |
3601 | if (ugeth->wol_en & WAKE_MAGIC) { | |
3602 | ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); | |
3603 | clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); | |
3604 | clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); | |
3605 | } | |
3606 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | |
3607 | } else { | |
3608 | /* | |
3609 | * Full reinitialization is required if QE shuts down | |
3610 | * during sleep. | |
3611 | */ | |
3612 | ucc_geth_memclean(ugeth); | |
3613 | ||
3614 | err = ucc_geth_init_mac(ugeth); | |
3615 | if (err) { | |
c84d8055 | 3616 | netdev_err(ndev, "Cannot initialize MAC, aborting\n"); |
2394905f AV |
3617 | return err; |
3618 | } | |
3619 | } | |
3620 | ||
3621 | ugeth->oldlink = 0; | |
3622 | ugeth->oldspeed = 0; | |
3623 | ugeth->oldduplex = -1; | |
3624 | ||
3625 | phy_stop(ugeth->phydev); | |
3626 | phy_start(ugeth->phydev); | |
3627 | ||
3628 | napi_enable(&ugeth->napi); | |
29fb00e0 | 3629 | netif_device_attach(ndev); |
2394905f AV |
3630 | |
3631 | return 0; | |
3632 | } | |
3633 | ||
3634 | #else | |
3635 | #define ucc_geth_suspend NULL | |
3636 | #define ucc_geth_resume NULL | |
3637 | #endif | |
3638 | ||
4e19b5c1 | 3639 | static phy_interface_t to_phy_interface(const char *phy_connection_type) |
728de4c9 | 3640 | { |
4e19b5c1 | 3641 | if (strcasecmp(phy_connection_type, "mii") == 0) |
728de4c9 | 3642 | return PHY_INTERFACE_MODE_MII; |
4e19b5c1 | 3643 | if (strcasecmp(phy_connection_type, "gmii") == 0) |
728de4c9 | 3644 | return PHY_INTERFACE_MODE_GMII; |
4e19b5c1 | 3645 | if (strcasecmp(phy_connection_type, "tbi") == 0) |
728de4c9 | 3646 | return PHY_INTERFACE_MODE_TBI; |
4e19b5c1 | 3647 | if (strcasecmp(phy_connection_type, "rmii") == 0) |
728de4c9 | 3648 | return PHY_INTERFACE_MODE_RMII; |
4e19b5c1 | 3649 | if (strcasecmp(phy_connection_type, "rgmii") == 0) |
728de4c9 | 3650 | return PHY_INTERFACE_MODE_RGMII; |
4e19b5c1 | 3651 | if (strcasecmp(phy_connection_type, "rgmii-id") == 0) |
728de4c9 | 3652 | return PHY_INTERFACE_MODE_RGMII_ID; |
bd0ceaab KP |
3653 | if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) |
3654 | return PHY_INTERFACE_MODE_RGMII_TXID; | |
3655 | if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) | |
3656 | return PHY_INTERFACE_MODE_RGMII_RXID; | |
4e19b5c1 | 3657 | if (strcasecmp(phy_connection_type, "rtbi") == 0) |
728de4c9 | 3658 | return PHY_INTERFACE_MODE_RTBI; |
047584ce HW |
3659 | if (strcasecmp(phy_connection_type, "sgmii") == 0) |
3660 | return PHY_INTERFACE_MODE_SGMII; | |
728de4c9 KP |
3661 | |
3662 | return PHY_INTERFACE_MODE_MII; | |
3663 | } | |
3664 | ||
d19b5149 SM |
3665 | static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
3666 | { | |
3667 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3668 | ||
3669 | if (!netif_running(dev)) | |
3670 | return -EINVAL; | |
3671 | ||
3672 | if (!ugeth->phydev) | |
3673 | return -ENODEV; | |
3674 | ||
28b04113 | 3675 | return phy_mii_ioctl(ugeth->phydev, rq, cmd); |
d19b5149 SM |
3676 | } |
3677 | ||
a9dbae78 JT |
3678 | static const struct net_device_ops ucc_geth_netdev_ops = { |
3679 | .ndo_open = ucc_geth_open, | |
3680 | .ndo_stop = ucc_geth_close, | |
3681 | .ndo_start_xmit = ucc_geth_start_xmit, | |
3682 | .ndo_validate_addr = eth_validate_addr, | |
3d6593e9 | 3683 | .ndo_set_mac_address = ucc_geth_set_mac_addr, |
a9dbae78 | 3684 | .ndo_change_mtu = eth_change_mtu, |
afc4b13d | 3685 | .ndo_set_rx_mode = ucc_geth_set_multi, |
a9dbae78 | 3686 | .ndo_tx_timeout = ucc_geth_timeout, |
d19b5149 | 3687 | .ndo_do_ioctl = ucc_geth_ioctl, |
a9dbae78 JT |
3688 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3689 | .ndo_poll_controller = ucc_netpoll, | |
3690 | #endif | |
3691 | }; | |
3692 | ||
74888760 | 3693 | static int ucc_geth_probe(struct platform_device* ofdev) |
ce973b14 | 3694 | { |
18a8e864 | 3695 | struct device *device = &ofdev->dev; |
61c7a080 | 3696 | struct device_node *np = ofdev->dev.of_node; |
ce973b14 LY |
3697 | struct net_device *dev = NULL; |
3698 | struct ucc_geth_private *ugeth = NULL; | |
3699 | struct ucc_geth_info *ug_info; | |
18a8e864 | 3700 | struct resource res; |
728de4c9 | 3701 | int err, ucc_num, max_speed = 0; |
18a8e864 | 3702 | const unsigned int *prop; |
9fb1e350 | 3703 | const char *sprop; |
9b4c7a4e | 3704 | const void *mac_addr; |
728de4c9 KP |
3705 | phy_interface_t phy_interface; |
3706 | static const int enet_to_speed[] = { | |
3707 | SPEED_10, SPEED_10, SPEED_10, | |
3708 | SPEED_100, SPEED_100, SPEED_100, | |
3709 | SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, | |
3710 | }; | |
3711 | static const phy_interface_t enet_to_phy_interface[] = { | |
3712 | PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, | |
3713 | PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, | |
3714 | PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, | |
3715 | PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, | |
3716 | PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, | |
047584ce | 3717 | PHY_INTERFACE_MODE_SGMII, |
728de4c9 | 3718 | }; |
ce973b14 | 3719 | |
b39d66a8 | 3720 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3721 | |
56626f33 AV |
3722 | prop = of_get_property(np, "cell-index", NULL); |
3723 | if (!prop) { | |
3724 | prop = of_get_property(np, "device-id", NULL); | |
3725 | if (!prop) | |
3726 | return -ENODEV; | |
3727 | } | |
3728 | ||
18a8e864 LY |
3729 | ucc_num = *prop - 1; |
3730 | if ((ucc_num < 0) || (ucc_num > 7)) | |
3731 | return -ENODEV; | |
3732 | ||
3733 | ug_info = &ugeth_info[ucc_num]; | |
890de95e LY |
3734 | if (ug_info == NULL) { |
3735 | if (netif_msg_probe(&debug)) | |
c84d8055 | 3736 | pr_err("[%d] Missing additional data!\n", ucc_num); |
890de95e LY |
3737 | return -ENODEV; |
3738 | } | |
3739 | ||
18a8e864 | 3740 | ug_info->uf_info.ucc_num = ucc_num; |
728de4c9 | 3741 | |
9fb1e350 TT |
3742 | sprop = of_get_property(np, "rx-clock-name", NULL); |
3743 | if (sprop) { | |
3744 | ug_info->uf_info.rx_clock = qe_clock_source(sprop); | |
3745 | if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || | |
3746 | (ug_info->uf_info.rx_clock > QE_CLK24)) { | |
c84d8055 | 3747 | pr_err("invalid rx-clock-name property\n"); |
9fb1e350 TT |
3748 | return -EINVAL; |
3749 | } | |
3750 | } else { | |
3751 | prop = of_get_property(np, "rx-clock", NULL); | |
3752 | if (!prop) { | |
3753 | /* If both rx-clock-name and rx-clock are missing, | |
3754 | we want to tell people to use rx-clock-name. */ | |
c84d8055 | 3755 | pr_err("missing rx-clock-name property\n"); |
9fb1e350 TT |
3756 | return -EINVAL; |
3757 | } | |
3758 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | |
b9780a81 | 3759 | pr_err("invalid rx-clock property\n"); |
9fb1e350 TT |
3760 | return -EINVAL; |
3761 | } | |
3762 | ug_info->uf_info.rx_clock = *prop; | |
3763 | } | |
3764 | ||
3765 | sprop = of_get_property(np, "tx-clock-name", NULL); | |
3766 | if (sprop) { | |
3767 | ug_info->uf_info.tx_clock = qe_clock_source(sprop); | |
3768 | if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || | |
3769 | (ug_info->uf_info.tx_clock > QE_CLK24)) { | |
c84d8055 | 3770 | pr_err("invalid tx-clock-name property\n"); |
9fb1e350 TT |
3771 | return -EINVAL; |
3772 | } | |
3773 | } else { | |
e410553f | 3774 | prop = of_get_property(np, "tx-clock", NULL); |
9fb1e350 | 3775 | if (!prop) { |
c84d8055 | 3776 | pr_err("missing tx-clock-name property\n"); |
9fb1e350 TT |
3777 | return -EINVAL; |
3778 | } | |
3779 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | |
c84d8055 | 3780 | pr_err("invalid tx-clock property\n"); |
9fb1e350 TT |
3781 | return -EINVAL; |
3782 | } | |
3783 | ug_info->uf_info.tx_clock = *prop; | |
3784 | } | |
3785 | ||
18a8e864 LY |
3786 | err = of_address_to_resource(np, 0, &res); |
3787 | if (err) | |
3788 | return -EINVAL; | |
3789 | ||
3790 | ug_info->uf_info.regs = res.start; | |
3791 | ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); | |
3104a6ff AV |
3792 | |
3793 | ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); | |
a1f7d81b UKK |
3794 | if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { |
3795 | /* | |
3796 | * In the case of a fixed PHY, the DT node associated | |
87009814 FF |
3797 | * to the PHY is the Ethernet MAC DT node. |
3798 | */ | |
a1f7d81b UKK |
3799 | err = of_phy_register_fixed_link(np); |
3800 | if (err) | |
3801 | return err; | |
f1f02fa4 | 3802 | ug_info->phy_node = of_node_get(np); |
87009814 | 3803 | } |
728de4c9 | 3804 | |
fb1001f3 HW |
3805 | /* Find the TBI PHY node. If it's not there, we don't support SGMII */ |
3806 | ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | |
3807 | ||
728de4c9 | 3808 | /* get the phy interface type, or default to MII */ |
4e19b5c1 | 3809 | prop = of_get_property(np, "phy-connection-type", NULL); |
728de4c9 KP |
3810 | if (!prop) { |
3811 | /* handle interface property present in old trees */ | |
3104a6ff | 3812 | prop = of_get_property(ug_info->phy_node, "interface", NULL); |
4e19b5c1 | 3813 | if (prop != NULL) { |
728de4c9 | 3814 | phy_interface = enet_to_phy_interface[*prop]; |
4e19b5c1 KP |
3815 | max_speed = enet_to_speed[*prop]; |
3816 | } else | |
728de4c9 KP |
3817 | phy_interface = PHY_INTERFACE_MODE_MII; |
3818 | } else { | |
3819 | phy_interface = to_phy_interface((const char *)prop); | |
3820 | } | |
3821 | ||
4e19b5c1 KP |
3822 | /* get speed, or derive from PHY interface */ |
3823 | if (max_speed == 0) | |
728de4c9 KP |
3824 | switch (phy_interface) { |
3825 | case PHY_INTERFACE_MODE_GMII: | |
3826 | case PHY_INTERFACE_MODE_RGMII: | |
3827 | case PHY_INTERFACE_MODE_RGMII_ID: | |
bd0ceaab KP |
3828 | case PHY_INTERFACE_MODE_RGMII_RXID: |
3829 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
728de4c9 KP |
3830 | case PHY_INTERFACE_MODE_TBI: |
3831 | case PHY_INTERFACE_MODE_RTBI: | |
047584ce | 3832 | case PHY_INTERFACE_MODE_SGMII: |
728de4c9 KP |
3833 | max_speed = SPEED_1000; |
3834 | break; | |
3835 | default: | |
3836 | max_speed = SPEED_100; | |
3837 | break; | |
3838 | } | |
728de4c9 KP |
3839 | |
3840 | if (max_speed == SPEED_1000) { | |
fa1b42b4 DL |
3841 | unsigned int snums = qe_get_num_of_snums(); |
3842 | ||
4e19b5c1 | 3843 | /* configure muram FIFOs for gigabit operation */ |
728de4c9 KP |
3844 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; |
3845 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; | |
3846 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; | |
3847 | ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; | |
3848 | ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; | |
3849 | ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; | |
ffea31ed | 3850 | ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; |
674e4f93 | 3851 | |
fa1b42b4 | 3852 | /* If QE's snum number is 46/76 which means we need to support |
674e4f93 HW |
3853 | * 4 UECs at 1000Base-T simultaneously, we need to allocate |
3854 | * more Threads to Rx. | |
3855 | */ | |
fa1b42b4 | 3856 | if ((snums == 76) || (snums == 46)) |
674e4f93 HW |
3857 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; |
3858 | else | |
3859 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; | |
728de4c9 KP |
3860 | } |
3861 | ||
890de95e | 3862 | if (netif_msg_probe(&debug)) |
c84d8055 | 3863 | pr_info("UCC%1d at 0x%8x (irq = %d)\n", |
890de95e LY |
3864 | ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, |
3865 | ug_info->uf_info.irq); | |
ce973b14 | 3866 | |
ce973b14 LY |
3867 | /* Create an ethernet device instance */ |
3868 | dev = alloc_etherdev(sizeof(*ugeth)); | |
3869 | ||
fa310789 | 3870 | if (dev == NULL) { |
0807c4ce JH |
3871 | err = -ENOMEM; |
3872 | goto err_deregister_fixed_link; | |
fa310789 | 3873 | } |
ce973b14 LY |
3874 | |
3875 | ugeth = netdev_priv(dev); | |
3876 | spin_lock_init(&ugeth->lock); | |
3877 | ||
80a9fad8 AV |
3878 | /* Create CQs for hash tables */ |
3879 | INIT_LIST_HEAD(&ugeth->group_hash_q); | |
3880 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | |
3881 | ||
ce973b14 LY |
3882 | dev_set_drvdata(device, dev); |
3883 | ||
3884 | /* Set the dev->base_addr to the gfar reg region */ | |
3885 | dev->base_addr = (unsigned long)(ug_info->uf_info.regs); | |
3886 | ||
ce973b14 LY |
3887 | SET_NETDEV_DEV(dev, device); |
3888 | ||
3889 | /* Fill in the dev structure */ | |
ac421852 | 3890 | uec_set_ethtool_ops(dev); |
a9dbae78 | 3891 | dev->netdev_ops = &ucc_geth_netdev_ops; |
ce973b14 | 3892 | dev->watchdog_timeo = TX_TIMEOUT; |
1762a29a | 3893 | INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); |
0cededf3 | 3894 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); |
ce973b14 | 3895 | dev->mtu = 1500; |
ce973b14 | 3896 | |
890de95e | 3897 | ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); |
728de4c9 KP |
3898 | ugeth->phy_interface = phy_interface; |
3899 | ugeth->max_speed = max_speed; | |
3900 | ||
1452db76 CC |
3901 | /* Carrier starts down, phylib will bring it up */ |
3902 | netif_carrier_off(dev); | |
3903 | ||
ce973b14 LY |
3904 | err = register_netdev(dev); |
3905 | if (err) { | |
890de95e | 3906 | if (netif_msg_probe(ugeth)) |
c84d8055 JP |
3907 | pr_err("%s: Cannot register net device, aborting\n", |
3908 | dev->name); | |
0807c4ce | 3909 | goto err_free_netdev; |
ce973b14 LY |
3910 | } |
3911 | ||
e9eb70c9 | 3912 | mac_addr = of_get_mac_address(np); |
9b4c7a4e | 3913 | if (mac_addr) |
d458cdf7 | 3914 | memcpy(dev->dev_addr, mac_addr, ETH_ALEN); |
ce973b14 | 3915 | |
728de4c9 | 3916 | ugeth->ug_info = ug_info; |
da1aa63e AV |
3917 | ugeth->dev = device; |
3918 | ugeth->ndev = dev; | |
b1c4a9dd | 3919 | ugeth->node = np; |
728de4c9 | 3920 | |
ce973b14 | 3921 | return 0; |
0807c4ce JH |
3922 | |
3923 | err_free_netdev: | |
3924 | free_netdev(dev); | |
3925 | err_deregister_fixed_link: | |
3926 | if (of_phy_is_fixed_link(np)) | |
3927 | of_phy_deregister_fixed_link(np); | |
3928 | of_node_put(ug_info->tbi_node); | |
3929 | of_node_put(ug_info->phy_node); | |
3930 | ||
3931 | return err; | |
ce973b14 LY |
3932 | } |
3933 | ||
2dc11581 | 3934 | static int ucc_geth_remove(struct platform_device* ofdev) |
ce973b14 | 3935 | { |
65d7e7ad | 3936 | struct net_device *dev = platform_get_drvdata(ofdev); |
ce973b14 | 3937 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
0807c4ce | 3938 | struct device_node *np = ofdev->dev.of_node; |
ce973b14 | 3939 | |
80a9fad8 | 3940 | unregister_netdev(dev); |
ce973b14 | 3941 | free_netdev(dev); |
80a9fad8 | 3942 | ucc_geth_memclean(ugeth); |
0807c4ce JH |
3943 | if (of_phy_is_fixed_link(np)) |
3944 | of_phy_deregister_fixed_link(np); | |
4da5e6a0 UKK |
3945 | of_node_put(ugeth->ug_info->tbi_node); |
3946 | of_node_put(ugeth->ug_info->phy_node); | |
ce973b14 LY |
3947 | |
3948 | return 0; | |
3949 | } | |
3950 | ||
94e5a2a8 | 3951 | static const struct of_device_id ucc_geth_match[] = { |
18a8e864 LY |
3952 | { |
3953 | .type = "network", | |
3954 | .compatible = "ucc_geth", | |
3955 | }, | |
3956 | {}, | |
3957 | }; | |
3958 | ||
3959 | MODULE_DEVICE_TABLE(of, ucc_geth_match); | |
3960 | ||
74888760 | 3961 | static struct platform_driver ucc_geth_driver = { |
4018294b GL |
3962 | .driver = { |
3963 | .name = DRV_NAME, | |
4018294b GL |
3964 | .of_match_table = ucc_geth_match, |
3965 | }, | |
18a8e864 LY |
3966 | .probe = ucc_geth_probe, |
3967 | .remove = ucc_geth_remove, | |
2394905f AV |
3968 | .suspend = ucc_geth_suspend, |
3969 | .resume = ucc_geth_resume, | |
ce973b14 LY |
3970 | }; |
3971 | ||
3972 | static int __init ucc_geth_init(void) | |
3973 | { | |
728de4c9 KP |
3974 | int i, ret; |
3975 | ||
890de95e | 3976 | if (netif_msg_drv(&debug)) |
c84d8055 | 3977 | pr_info(DRV_DESC "\n"); |
ce973b14 LY |
3978 | for (i = 0; i < 8; i++) |
3979 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, | |
3980 | sizeof(ugeth_primary_info)); | |
3981 | ||
74888760 | 3982 | ret = platform_driver_register(&ucc_geth_driver); |
728de4c9 | 3983 | |
728de4c9 | 3984 | return ret; |
ce973b14 LY |
3985 | } |
3986 | ||
3987 | static void __exit ucc_geth_exit(void) | |
3988 | { | |
74888760 | 3989 | platform_driver_unregister(&ucc_geth_driver); |
ce973b14 LY |
3990 | } |
3991 | ||
3992 | module_init(ucc_geth_init); | |
3993 | module_exit(ucc_geth_exit); | |
3994 | ||
3995 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | |
3996 | MODULE_DESCRIPTION(DRV_DESC); | |
c2bcf00b | 3997 | MODULE_VERSION(DRV_VERSION); |
ce973b14 | 3998 | MODULE_LICENSE("GPL"); |