]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/sxg/sxg.c
fbf5825bdd737c0e7ebd7380d80469114c9b23c5
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / sxg / sxg.c
1 /**************************************************************************
2 *
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
32 *
33 **************************************************************************/
34
35 /*
36 * FILENAME: sxg.c
37 *
38 * The SXG driver for Alacritech's 10Gbe products.
39 *
40 * NOTE: This is the standard, non-accelerated version of Alacritech's
41 * IS-NIC driver.
42 */
43
44 #include <linux/kernel.h>
45 #include <linux/string.h>
46 #include <linux/errno.h>
47 #include <linux/module.h>
48 #include <linux/moduleparam.h>
49 #include <linux/ioport.h>
50 #include <linux/slab.h>
51 #include <linux/interrupt.h>
52 #include <linux/timer.h>
53 #include <linux/pci.h>
54 #include <linux/spinlock.h>
55 #include <linux/init.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/ethtool.h>
59 #include <linux/skbuff.h>
60 #include <linux/delay.h>
61 #include <linux/types.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/mii.h>
64
65 #define SLIC_GET_STATS_ENABLED 0
66 #define LINUX_FREES_ADAPTER_RESOURCES 1
67 #define SXG_OFFLOAD_IP_CHECKSUM 0
68 #define SXG_POWER_MANAGEMENT_ENABLED 0
69 #define VPCI 0
70 #define ATK_DEBUG 1
71
72 #include "sxg_os.h"
73 #include "sxghw.h"
74 #include "sxghif.h"
75 #include "sxg.h"
76 #include "sxgdbg.h"
77
78 #include "sxgphycode.h"
79 #include "saharadbgdownload.h"
80
81 static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
82 enum sxg_buffer_type BufferType);
83 static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
84 void *RcvBlock,
85 dma_addr_t PhysicalAddress,
86 u32 Length);
87 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
88 struct sxg_scatter_gather *SxgSgl,
89 dma_addr_t PhysicalAddress,
90 u32 Length);
91
92 static void sxg_mcast_init_crc32(void);
93 static int sxg_entry_open(struct net_device *dev);
94 static int sxg_entry_halt(struct net_device *dev);
95 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
96 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
97 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
98 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
99 struct sxg_scatter_gather *SxgSgl);
100
101 static void sxg_handle_interrupt(struct adapter_t *adapter);
102 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
103 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId);
104 static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context);
105 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
106 struct sxg_event *Event);
107 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
108 /* See if we need sxg_mac_filter() in future. If not remove it
109 static bool sxg_mac_filter(struct adapter_t *adapter,
110 struct ether_header *EtherHdr, ushort length);
111 */
112 static struct net_device_stats *sxg_get_stats(struct net_device * dev);
113 void sxg_free_resources(struct adapter_t *adapter);
114 void sxg_free_rcvblocks(struct adapter_t *adapter);
115 void sxg_free_sgl_buffers(struct adapter_t *adapter);
116 void sxg_unmap_resources(struct adapter_t *adapter);
117 void sxg_free_mcast_addrs(struct adapter_t *adapter);
118 void sxg_collect_statistics(struct adapter_t *adapter);
119
120 #define XXXTODO 0
121
122 static int sxg_mac_set_address(struct net_device *dev, void *ptr);
123 static void sxg_mcast_set_list(struct net_device *dev);
124
125 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
126
127 static void sxg_unmap_mmio_space(struct adapter_t *adapter);
128
129 static int sxg_initialize_adapter(struct adapter_t *adapter);
130 static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
131 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
132 unsigned char Index);
133 static int sxg_initialize_link(struct adapter_t *adapter);
134 static int sxg_phy_init(struct adapter_t *adapter);
135 static void sxg_link_event(struct adapter_t *adapter);
136 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
137 static void sxg_link_state(struct adapter_t *adapter,
138 enum SXG_LINK_STATE LinkState);
139 static int sxg_write_mdio_reg(struct adapter_t *adapter,
140 u32 DevAddr, u32 RegAddr, u32 Value);
141 static int sxg_read_mdio_reg(struct adapter_t *adapter,
142 u32 DevAddr, u32 RegAddr, u32 *pValue);
143
144 static unsigned int sxg_first_init = 1;
145 static char *sxg_banner =
146 "Alacritech SLIC Technology(tm) Server and Storage \
147 10Gbe Accelerator (Non-Accelerated)\n";
148
149 static int sxg_debug = 1;
150 static int debug = -1;
151 static struct net_device *head_netdevice = NULL;
152
153 static struct sxgbase_driver sxg_global = {
154 .dynamic_intagg = 1,
155 };
156 static int intagg_delay = 100;
157 static u32 dynamic_intagg = 0;
158
159 char sxg_driver_name[] = "sxg_nic";
160 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
161 #define DRV_DESCRIPTION \
162 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
163 #define DRV_COPYRIGHT \
164 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
165
166 MODULE_AUTHOR(DRV_AUTHOR);
167 MODULE_DESCRIPTION(DRV_DESCRIPTION);
168 MODULE_LICENSE("GPL");
169
170 module_param(dynamic_intagg, int, 0);
171 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
172 module_param(intagg_delay, int, 0);
173 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
174
175 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
176 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
177 {0,}
178 };
179
180 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
181
182 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
183 {
184 writel(value, reg);
185 if (flush)
186 mb();
187 }
188
189 static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
190 u64 value, u32 cpu)
191 {
192 u32 value_high = (u32) (value >> 32);
193 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
194 unsigned long flags;
195
196 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
197 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
198 writel(value_low, reg);
199 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
200 }
201
202 static void sxg_init_driver(void)
203 {
204 if (sxg_first_init) {
205 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
206 __func__, jiffies);
207 sxg_first_init = 0;
208 spin_lock_init(&sxg_global.driver_lock);
209 }
210 }
211
212 static void sxg_dbg_macaddrs(struct adapter_t *adapter)
213 {
214 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
215 adapter->netdev->name, adapter->currmacaddr[0],
216 adapter->currmacaddr[1], adapter->currmacaddr[2],
217 adapter->currmacaddr[3], adapter->currmacaddr[4],
218 adapter->currmacaddr[5]);
219 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
220 adapter->netdev->name, adapter->macaddr[0],
221 adapter->macaddr[1], adapter->macaddr[2],
222 adapter->macaddr[3], adapter->macaddr[4],
223 adapter->macaddr[5]);
224 return;
225 }
226
227 /* SXG Globals */
228 static struct sxg_driver SxgDriver;
229
230 #ifdef ATKDBG
231 static struct sxg_trace_buffer LSxgTraceBuffer;
232 #endif /* ATKDBG */
233 static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
234
235 /*
236 * sxg_download_microcode
237 *
238 * Download Microcode to Sahara adapter
239 *
240 * Arguments -
241 * adapter - A pointer to our adapter structure
242 * UcodeSel - microcode file selection
243 *
244 * Return
245 * int
246 */
247 static bool sxg_download_microcode(struct adapter_t *adapter,
248 enum SXG_UCODE_SEL UcodeSel)
249 {
250 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
251 u32 Section;
252 u32 ThisSectionSize;
253 u32 *Instruction = NULL;
254 u32 BaseAddress, AddressOffset, Address;
255 /* u32 Failure; */
256 u32 ValueRead;
257 u32 i;
258 u32 numSections = 0;
259 u32 sectionSize[16];
260 u32 sectionStart[16];
261
262 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
263 adapter, 0, 0, 0);
264 DBG_ERROR("sxg: %s ENTER\n", __func__);
265
266 switch (UcodeSel) {
267 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
268 numSections = SNumSections;
269 for (i = 0; i < numSections; i++) {
270 sectionSize[i] = SSectionSize[i];
271 sectionStart[i] = SSectionStart[i];
272 }
273 break;
274 default:
275 printk(KERN_ERR KBUILD_MODNAME
276 ": Woah, big error with the microcode!\n");
277 break;
278 }
279
280 DBG_ERROR("sxg: RESET THE CARD\n");
281 /* First, reset the card */
282 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
283
284 /*
285 * Download each section of the microcode as specified in
286 * its download file. The *download.c file is generated using
287 * the saharaobjtoc facility which converts the metastep .obj
288 * file to a .c file which contains a two dimentional array.
289 */
290 for (Section = 0; Section < numSections; Section++) {
291 DBG_ERROR("sxg: SECTION # %d\n", Section);
292 switch (UcodeSel) {
293 case SXG_UCODE_SAHARA:
294 Instruction = (u32 *) & SaharaUCode[Section][0];
295 break;
296 default:
297 ASSERT(0);
298 break;
299 }
300 BaseAddress = sectionStart[Section];
301 /* Size in instructions */
302 ThisSectionSize = sectionSize[Section] / 12;
303 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
304 AddressOffset++) {
305 Address = BaseAddress + AddressOffset;
306 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
307 /* Write instruction bits 31 - 0 */
308 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
309 /* Write instruction bits 63-32 */
310 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
311 FLUSH);
312 /* Write instruction bits 95-64 */
313 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
314 FLUSH);
315 /* Write instruction address with the WRITE bit set */
316 WRITE_REG(HwRegs->UcodeAddr,
317 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
318 /*
319 * Sahara bug in the ucode download logic - the write to DataLow
320 * for the next instruction could get corrupted. To avoid this,
321 * write to DataLow again for this instruction (which may get
322 * corrupted, but it doesn't matter), then increment the address
323 * and write the data for the next instruction to DataLow. That
324 * write should succeed.
325 */
326 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
327 /* Advance 3 u32S to start of next instruction */
328 Instruction += 3;
329 }
330 }
331 /*
332 * Now repeat the entire operation reading the instruction back and
333 * checking for parity errors
334 */
335 for (Section = 0; Section < numSections; Section++) {
336 DBG_ERROR("sxg: check SECTION # %d\n", Section);
337 switch (UcodeSel) {
338 case SXG_UCODE_SAHARA:
339 Instruction = (u32 *) & SaharaUCode[Section][0];
340 break;
341 default:
342 ASSERT(0);
343 break;
344 }
345 BaseAddress = sectionStart[Section];
346 /* Size in instructions */
347 ThisSectionSize = sectionSize[Section] / 12;
348 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
349 AddressOffset++) {
350 Address = BaseAddress + AddressOffset;
351 /* Write the address with the READ bit set */
352 WRITE_REG(HwRegs->UcodeAddr,
353 (Address | MICROCODE_ADDRESS_READ), FLUSH);
354 /* Read it back and check parity bit. */
355 READ_REG(HwRegs->UcodeAddr, ValueRead);
356 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
357 DBG_ERROR("sxg: %s PARITY ERROR\n",
358 __func__);
359
360 return FALSE; /* Parity error */
361 }
362 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
363 /* Read the instruction back and compare */
364 READ_REG(HwRegs->UcodeDataLow, ValueRead);
365 if (ValueRead != *Instruction) {
366 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
367 __func__);
368 return FALSE; /* Miscompare */
369 }
370 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
371 if (ValueRead != *(Instruction + 1)) {
372 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
373 __func__);
374 return FALSE; /* Miscompare */
375 }
376 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
377 if (ValueRead != *(Instruction + 2)) {
378 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
379 __func__);
380 return FALSE; /* Miscompare */
381 }
382 /* Advance 3 u32S to start of next instruction */
383 Instruction += 3;
384 }
385 }
386
387 /* Everything OK, Go. */
388 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
389
390 /*
391 * Poll the CardUp register to wait for microcode to initialize
392 * Give up after 10,000 attemps (500ms).
393 */
394 for (i = 0; i < 10000; i++) {
395 udelay(50);
396 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
397 if (ValueRead == 0xCAFE) {
398 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
399 break;
400 }
401 }
402 if (i == 10000) {
403 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
404
405 return FALSE; /* Timeout */
406 }
407 /*
408 * Now write the LoadSync register. This is used to
409 * synchronize with the card so it can scribble on the memory
410 * that contained 0xCAFE from the "CardUp" step above
411 */
412 if (UcodeSel == SXG_UCODE_SAHARA) {
413 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
414 }
415
416 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
417 adapter, 0, 0, 0);
418 DBG_ERROR("sxg: %s EXIT\n", __func__);
419
420 return (TRUE);
421 }
422
423 /*
424 * sxg_allocate_resources - Allocate memory and locks
425 *
426 * Arguments -
427 * adapter - A pointer to our adapter structure
428 *
429 * Return - int
430 */
431 static int sxg_allocate_resources(struct adapter_t *adapter)
432 {
433 int status;
434 u32 i;
435 u32 RssIds, IsrCount;
436 /* struct sxg_xmt_ring *XmtRing; */
437 /* struct sxg_rcv_ring *RcvRing; */
438
439 DBG_ERROR("%s ENTER\n", __func__);
440
441 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
442 adapter, 0, 0, 0);
443
444 /* Windows tells us how many CPUs it plans to use for */
445 /* RSS */
446 RssIds = SXG_RSS_CPU_COUNT(adapter);
447 IsrCount = adapter->MsiEnabled ? RssIds : 1;
448
449 DBG_ERROR("%s Setup the spinlocks\n", __func__);
450
451 /* Allocate spinlocks and initialize listheads first. */
452 spin_lock_init(&adapter->RcvQLock);
453 spin_lock_init(&adapter->SglQLock);
454 spin_lock_init(&adapter->XmtZeroLock);
455 spin_lock_init(&adapter->Bit64RegLock);
456 spin_lock_init(&adapter->AdapterLock);
457 atomic_set(&adapter->pending_allocations, 0);
458
459 DBG_ERROR("%s Setup the lists\n", __func__);
460
461 InitializeListHead(&adapter->FreeRcvBuffers);
462 InitializeListHead(&adapter->FreeRcvBlocks);
463 InitializeListHead(&adapter->AllRcvBlocks);
464 InitializeListHead(&adapter->FreeSglBuffers);
465 InitializeListHead(&adapter->AllSglBuffers);
466
467 /*
468 * Mark these basic allocations done. This flags essentially
469 * tells the SxgFreeResources routine that it can grab spinlocks
470 * and reference listheads.
471 */
472 adapter->BasicAllocations = TRUE;
473 /*
474 * Main allocation loop. Start with the maximum supported by
475 * the microcode and back off if memory allocation
476 * fails. If we hit a minimum, fail.
477 */
478
479 for (;;) {
480 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
481 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
482
483 /*
484 * Start with big items first - receive and transmit rings.
485 * At the moment I'm going to keep the ring size fixed and
486 * adjust the TCBs if we fail. Later we might
487 * consider reducing the ring size as well..
488 */
489 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
490 sizeof(struct sxg_xmt_ring) *
491 1,
492 &adapter->PXmtRings);
493 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
494
495 if (!adapter->XmtRings) {
496 goto per_tcb_allocation_failed;
497 }
498 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
499
500 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
501 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
502 adapter->RcvRings =
503 pci_alloc_consistent(adapter->pcidev,
504 sizeof(struct sxg_rcv_ring) * 1,
505 &adapter->PRcvRings);
506 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
507 if (!adapter->RcvRings) {
508 goto per_tcb_allocation_failed;
509 }
510 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
511 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
512 adapter->pucode_stats = pci_map_single(adapter->pcidev,
513 adapter->ucode_stats,
514 sizeof(struct sxg_ucode_stats),
515 PCI_DMA_FROMDEVICE);
516 // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
517 break;
518
519 per_tcb_allocation_failed:
520 /* an allocation failed. Free any successful allocations. */
521 if (adapter->XmtRings) {
522 pci_free_consistent(adapter->pcidev,
523 sizeof(struct sxg_xmt_ring) * 1,
524 adapter->XmtRings,
525 adapter->PXmtRings);
526 adapter->XmtRings = NULL;
527 }
528 if (adapter->RcvRings) {
529 pci_free_consistent(adapter->pcidev,
530 sizeof(struct sxg_rcv_ring) * 1,
531 adapter->RcvRings,
532 adapter->PRcvRings);
533 adapter->RcvRings = NULL;
534 }
535 /* Loop around and try again.... */
536 if (adapter->ucode_stats) {
537 pci_unmap_single(adapter->pcidev,
538 sizeof(struct sxg_ucode_stats),
539 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
540 adapter->ucode_stats = NULL;
541 }
542
543 }
544
545 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
546 /* Initialize rcv zero and xmt zero rings */
547 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
548 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
549
550 /* Sanity check receive data structure format */
551 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
552 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
553 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
554 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
555
556 /*
557 * Allocate receive data buffers. We allocate a block of buffers and
558 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
559 */
560 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
561 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
562 sxg_allocate_buffer_memory(adapter,
563 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
564 SXG_BUFFER_TYPE_RCV);
565 }
566 /*
567 * NBL resource allocation can fail in the 'AllocateComplete' routine,
568 * which doesn't return status. Make sure we got the number of buffers
569 * we requested
570 */
571 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
572 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
573 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
574 0);
575 return (STATUS_RESOURCES);
576 }
577
578 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
579 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
580
581 /* Allocate event queues. */
582 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
583 sizeof(struct sxg_event_ring) *
584 RssIds,
585 &adapter->PEventRings);
586
587 if (!adapter->EventRings) {
588 /* Caller will call SxgFreeAdapter to clean up above
589 * allocations */
590 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
591 adapter, SXG_MAX_ENTRIES, 0, 0);
592 status = STATUS_RESOURCES;
593 goto per_tcb_allocation_failed;
594 }
595 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
596
597 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
598 /* Allocate ISR */
599 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
600 IsrCount, &adapter->PIsr);
601 if (!adapter->Isr) {
602 /* Caller will call SxgFreeAdapter to clean up above
603 * allocations */
604 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
605 adapter, SXG_MAX_ENTRIES, 0, 0);
606 status = STATUS_RESOURCES;
607 goto per_tcb_allocation_failed;
608 }
609 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
610
611 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
612 __func__, (unsigned int)sizeof(u32));
613
614 /* Allocate shared XMT ring zero index location */
615 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
616 sizeof(u32),
617 &adapter->
618 PXmtRingZeroIndex);
619 if (!adapter->XmtRingZeroIndex) {
620 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
621 adapter, SXG_MAX_ENTRIES, 0, 0);
622 status = STATUS_RESOURCES;
623 goto per_tcb_allocation_failed;
624 }
625 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
626
627 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
628 adapter, SXG_MAX_ENTRIES, 0, 0);
629
630 DBG_ERROR("%s EXIT\n", __func__);
631 return (STATUS_SUCCESS);
632 }
633
634 /*
635 * sxg_config_pci -
636 *
637 * Set up PCI Configuration space
638 *
639 * Arguments -
640 * pcidev - A pointer to our adapter structure
641 */
642 static void sxg_config_pci(struct pci_dev *pcidev)
643 {
644 u16 pci_command;
645 u16 new_command;
646
647 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
648 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
649 /* Set the command register */
650 new_command = pci_command | (
651 /* Memory Space Enable */
652 PCI_COMMAND_MEMORY |
653 /* Bus master enable */
654 PCI_COMMAND_MASTER |
655 /* Memory write and invalidate */
656 PCI_COMMAND_INVALIDATE |
657 /* Parity error response */
658 PCI_COMMAND_PARITY |
659 /* System ERR */
660 PCI_COMMAND_SERR |
661 /* Fast back-to-back */
662 PCI_COMMAND_FAST_BACK);
663 if (pci_command != new_command) {
664 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
665 __func__, pci_command, new_command);
666 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
667 }
668 }
669
670 /*
671 * sxg_read_config
672 * @adapter : Pointer to the adapter structure for the card
673 * This function will read the configuration data from EEPROM/FLASH
674 */
675 static inline int sxg_read_config(struct adapter_t *adapter)
676 {
677 /* struct sxg_config data; */
678 struct sw_cfg_data *data;
679 dma_addr_t p_addr;
680 unsigned long status;
681 unsigned long i;
682
683 data = pci_alloc_consistent(adapter->pcidev,
684 sizeof(struct sw_cfg_data), &p_addr);
685 if(!data) {
686 /*
687 * We cant get even this much memory. Raise a hell
688 * Get out of here
689 */
690 printk(KERN_ERR"%s : Could not allocate memory for reading \
691 EEPROM\n", __FUNCTION__);
692 return -ENOMEM;
693 }
694
695 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
696
697 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
698 for(i=0; i<1000; i++) {
699 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
700 if (status != SXG_CFG_TIMEOUT)
701 break;
702 mdelay(1); /* Do we really need this */
703 }
704
705 switch(status) {
706 /* Config read from EEPROM succeeded */
707 case SXG_CFG_LOAD_EEPROM:
708 /* Config read from Flash succeeded */
709 case SXG_CFG_LOAD_FLASH:
710 /* Copy the MAC address to adapter structure */
711 /* TODO: We are not doing the remaining part : FRU,
712 * etc
713 */
714 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
715 sizeof(struct sxg_config_mac));
716 break;
717 case SXG_CFG_TIMEOUT:
718 case SXG_CFG_LOAD_INVALID:
719 case SXG_CFG_LOAD_ERROR:
720 default: /* Fix default handler later */
721 printk(KERN_WARNING"%s : We could not read the config \
722 word. Status = %ld\n", __FUNCTION__, status);
723 break;
724 }
725 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
726 p_addr);
727 if (adapter->netdev) {
728 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
729 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
730 }
731 printk("LINSYS : These are the new MAC address\n");
732 sxg_dbg_macaddrs(adapter);
733
734 return status;
735 }
736
737 static int sxg_entry_probe(struct pci_dev *pcidev,
738 const struct pci_device_id *pci_tbl_entry)
739 {
740 static int did_version = 0;
741 int err;
742 struct net_device *netdev;
743 struct adapter_t *adapter;
744 void __iomem *memmapped_ioaddr;
745 u32 status = 0;
746 ulong mmio_start = 0;
747 ulong mmio_len = 0;
748
749 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
750 __func__, jiffies, smp_processor_id());
751
752 /* Initialize trace buffer */
753 #ifdef ATKDBG
754 SxgTraceBuffer = &LSxgTraceBuffer;
755 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
756 #endif
757
758 sxg_global.dynamic_intagg = dynamic_intagg;
759
760 err = pci_enable_device(pcidev);
761
762 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
763 if (err) {
764 return err;
765 }
766
767 if (sxg_debug > 0 && did_version++ == 0) {
768 printk(KERN_INFO "%s\n", sxg_banner);
769 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
770 }
771
772 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
773 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
774 } else {
775 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
776 DBG_ERROR
777 ("No usable DMA configuration, aborting err[%x]\n",
778 err);
779 return err;
780 }
781 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
782 }
783
784 DBG_ERROR("Call pci_request_regions\n");
785
786 err = pci_request_regions(pcidev, sxg_driver_name);
787 if (err) {
788 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
789 return err;
790 }
791
792 DBG_ERROR("call pci_set_master\n");
793 pci_set_master(pcidev);
794
795 DBG_ERROR("call alloc_etherdev\n");
796 netdev = alloc_etherdev(sizeof(struct adapter_t));
797 if (!netdev) {
798 err = -ENOMEM;
799 goto err_out_exit_sxg_probe;
800 }
801 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
802
803 SET_NETDEV_DEV(netdev, &pcidev->dev);
804
805 pci_set_drvdata(pcidev, netdev);
806 adapter = netdev_priv(netdev);
807 adapter->netdev = netdev;
808 adapter->pcidev = pcidev;
809
810 mmio_start = pci_resource_start(pcidev, 0);
811 mmio_len = pci_resource_len(pcidev, 0);
812
813 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
814 mmio_start, mmio_len);
815
816 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
817 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
818 memmapped_ioaddr);
819 if (!memmapped_ioaddr) {
820 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
821 __func__, mmio_len, mmio_start);
822 goto err_out_free_mmio_region;
823 }
824
825 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
826 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
827 mmio_len, pcidev->irq);
828
829 adapter->HwRegs = (void *)memmapped_ioaddr;
830 adapter->base_addr = memmapped_ioaddr;
831
832 mmio_start = pci_resource_start(pcidev, 2);
833 mmio_len = pci_resource_len(pcidev, 2);
834
835 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
836 mmio_start, mmio_len);
837
838 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
839 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
840 memmapped_ioaddr);
841 if (!memmapped_ioaddr) {
842 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
843 __func__, mmio_len, mmio_start);
844 goto err_out_free_mmio_region;
845 }
846
847 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
848 "start[%lx] len[%lx], IRQ %d.\n", __func__,
849 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
850
851 adapter->UcodeRegs = (void *)memmapped_ioaddr;
852
853 adapter->State = SXG_STATE_INITIALIZING;
854 /*
855 * Maintain a list of all adapters anchored by
856 * the global SxgDriver structure.
857 */
858 adapter->Next = SxgDriver.Adapters;
859 SxgDriver.Adapters = adapter;
860 adapter->AdapterID = ++SxgDriver.AdapterID;
861
862 /* Initialize CRC table used to determine multicast hash */
863 sxg_mcast_init_crc32();
864
865 adapter->JumboEnabled = FALSE;
866 adapter->RssEnabled = FALSE;
867 if (adapter->JumboEnabled) {
868 adapter->FrameSize = JUMBOMAXFRAME;
869 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
870 } else {
871 adapter->FrameSize = ETHERMAXFRAME;
872 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
873 }
874
875 /*
876 * status = SXG_READ_EEPROM(adapter);
877 * if (!status) {
878 * goto sxg_init_bad;
879 * }
880 */
881
882 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
883 sxg_config_pci(pcidev);
884 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
885
886 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
887 sxg_init_driver();
888 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
889
890 adapter->vendid = pci_tbl_entry->vendor;
891 adapter->devid = pci_tbl_entry->device;
892 adapter->subsysid = pci_tbl_entry->subdevice;
893 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
894 adapter->functionnumber = (pcidev->devfn & 0x7);
895 adapter->memorylength = pci_resource_len(pcidev, 0);
896 adapter->irq = pcidev->irq;
897 adapter->next_netdevice = head_netdevice;
898 head_netdevice = netdev;
899 adapter->port = 0; /*adapter->functionnumber; */
900
901 /* Allocate memory and other resources */
902 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
903 status = sxg_allocate_resources(adapter);
904 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
905 __func__, status);
906 if (status != STATUS_SUCCESS) {
907 goto err_out_unmap;
908 }
909
910 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
911 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
912 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
913 __func__);
914 sxg_read_config(adapter);
915 status = sxg_adapter_set_hwaddr(adapter);
916 } else {
917 adapter->state = ADAPT_FAIL;
918 adapter->linkstate = LINK_DOWN;
919 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
920 }
921
922 netdev->base_addr = (unsigned long)adapter->base_addr;
923 netdev->irq = adapter->irq;
924 netdev->open = sxg_entry_open;
925 netdev->stop = sxg_entry_halt;
926 netdev->hard_start_xmit = sxg_send_packets;
927 netdev->do_ioctl = sxg_ioctl;
928 #if XXXTODO
929 netdev->set_mac_address = sxg_mac_set_address;
930 #endif
931 netdev->get_stats = sxg_get_stats;
932 netdev->set_multicast_list = sxg_mcast_set_list;
933 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
934
935 strcpy(netdev->name, "eth%d");
936 /* strcpy(netdev->name, pci_name(pcidev)); */
937 if ((err = register_netdev(netdev))) {
938 DBG_ERROR("Cannot register net device, aborting. %s\n",
939 netdev->name);
940 goto err_out_unmap;
941 }
942
943 DBG_ERROR
944 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
945 %02X:%02X:%02X:%02X:%02X:%02X\n",
946 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
947 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
948 netdev->dev_addr[4], netdev->dev_addr[5]);
949
950 /* sxg_init_bad: */
951 ASSERT(status == FALSE);
952 /* sxg_free_adapter(adapter); */
953
954 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
955 status, jiffies, smp_processor_id());
956 return status;
957
958 err_out_unmap:
959 iounmap((void *)memmapped_ioaddr);
960
961 err_out_free_mmio_region:
962 release_mem_region(mmio_start, mmio_len);
963
964 err_out_exit_sxg_probe:
965
966 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
967 smp_processor_id());
968
969 return -ENODEV;
970 }
971
972 /*
973 * LINE BASE Interrupt routines..
974 *
975 * sxg_disable_interrupt
976 *
977 * DisableInterrupt Handler
978 *
979 * Arguments:
980 *
981 * adapter: Our adapter structure
982 *
983 * Return Value:
984 * None.
985 */
986 static void sxg_disable_interrupt(struct adapter_t *adapter)
987 {
988 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
989 adapter, adapter->InterruptsEnabled, 0, 0);
990 /* For now, RSS is disabled with line based interrupts */
991 ASSERT(adapter->RssEnabled == FALSE);
992 ASSERT(adapter->MsiEnabled == FALSE);
993 /* Turn off interrupts by writing to the icr register. */
994 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
995
996 adapter->InterruptsEnabled = 0;
997
998 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
999 adapter, adapter->InterruptsEnabled, 0, 0);
1000 }
1001
1002 /*
1003 * sxg_enable_interrupt
1004 *
1005 * EnableInterrupt Handler
1006 *
1007 * Arguments:
1008 *
1009 * adapter: Our adapter structure
1010 *
1011 * Return Value:
1012 * None.
1013 */
1014 static void sxg_enable_interrupt(struct adapter_t *adapter)
1015 {
1016 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1017 adapter, adapter->InterruptsEnabled, 0, 0);
1018 /* For now, RSS is disabled with line based interrupts */
1019 ASSERT(adapter->RssEnabled == FALSE);
1020 ASSERT(adapter->MsiEnabled == FALSE);
1021 /* Turn on interrupts by writing to the icr register. */
1022 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1023
1024 adapter->InterruptsEnabled = 1;
1025
1026 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1027 adapter, 0, 0, 0);
1028 }
1029
1030 /*
1031 * sxg_isr - Process an line-based interrupt
1032 *
1033 * Arguments:
1034 * Context - Our adapter structure
1035 * QueueDefault - Output parameter to queue to default CPU
1036 * TargetCpus - Output bitmap to schedule DPC's
1037 *
1038 * Return Value: TRUE if our interrupt
1039 */
1040 static irqreturn_t sxg_isr(int irq, void *dev_id)
1041 {
1042 struct net_device *dev = (struct net_device *) dev_id;
1043 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1044
1045 if(adapter->state != ADAPT_UP)
1046 return IRQ_NONE;
1047 adapter->Stats.NumInts++;
1048 if (adapter->Isr[0] == 0) {
1049 /*
1050 * The SLIC driver used to experience a number of spurious
1051 * interrupts due to the delay associated with the masking of
1052 * the interrupt (we'd bounce back in here). If we see that
1053 * again with Sahara,add a READ_REG of the Icr register after
1054 * the WRITE_REG below.
1055 */
1056 adapter->Stats.FalseInts++;
1057 return IRQ_NONE;
1058 }
1059 /*
1060 * Move the Isr contents and clear the value in
1061 * shared memory, and mask interrupts
1062 */
1063 adapter->IsrCopy[0] = adapter->Isr[0];
1064 adapter->Isr[0] = 0;
1065 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1066 /* ASSERT(adapter->IsrDpcsPending == 0); */
1067 #if XXXTODO /* RSS Stuff */
1068 /*
1069 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1070 * schedule DPC's based on event queues.
1071 */
1072 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1073 for (i = 0;
1074 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1075 i++) {
1076 struct sxg_event_ring *EventRing =
1077 &adapter->EventRings[i];
1078 struct sxg_event *Event =
1079 &EventRing->Ring[adapter->NextEvent[i]];
1080 unsigned char Cpu =
1081 adapter->RssSystemInfo->RssIdToCpu[i];
1082 if (Event->Status & EVENT_STATUS_VALID) {
1083 adapter->IsrDpcsPending++;
1084 CpuMask |= (1 << Cpu);
1085 }
1086 }
1087 }
1088 /*
1089 * Now, either schedule the CPUs specified by the CpuMask,
1090 * or queue default
1091 */
1092 if (CpuMask) {
1093 *QueueDefault = FALSE;
1094 } else {
1095 adapter->IsrDpcsPending = 1;
1096 *QueueDefault = TRUE;
1097 }
1098 *TargetCpus = CpuMask;
1099 #endif
1100 /* There are no DPCs in Linux, so call the handler now */
1101 sxg_handle_interrupt(adapter);
1102
1103 return IRQ_HANDLED;
1104 }
1105
1106 static void sxg_handle_interrupt(struct adapter_t *adapter)
1107 {
1108 /* unsigned char RssId = 0; */
1109 u32 NewIsr;
1110
1111 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1112 adapter, adapter->IsrCopy[0], 0, 0);
1113 /* For now, RSS is disabled with line based interrupts */
1114 ASSERT(adapter->RssEnabled == FALSE);
1115 ASSERT(adapter->MsiEnabled == FALSE);
1116 ASSERT(adapter->IsrCopy[0]);
1117
1118 /* Always process the event queue. */
1119 sxg_process_event_queue(adapter,
1120 (adapter->RssEnabled ? /*RssId */ 0 : 0));
1121
1122 #if XXXTODO /* RSS stuff */
1123 if (--adapter->IsrDpcsPending) {
1124 /* We're done. */
1125 ASSERT(adapter->RssEnabled);
1126 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1127 adapter, 0, 0, 0);
1128 return;
1129 }
1130 #endif
1131 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1132 NewIsr = sxg_process_isr(adapter, 0);
1133 /* Reenable interrupts */
1134 adapter->IsrCopy[0] = 0;
1135 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1136 adapter, NewIsr, 0, 0);
1137
1138 WRITE_REG(adapter->UcodeRegs[0].Isr, NewIsr, TRUE);
1139
1140 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1141 adapter, 0, 0, 0);
1142 }
1143
1144 /*
1145 * sxg_process_isr - Process an interrupt. Called from the line-based and
1146 * message based interrupt DPC routines
1147 *
1148 * Arguments:
1149 * adapter - Our adapter structure
1150 * Queue - The ISR that needs processing
1151 *
1152 * Return Value:
1153 * None
1154 */
1155 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1156 {
1157 u32 Isr = adapter->IsrCopy[MessageId];
1158 u32 NewIsr = 0;
1159
1160 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1161 adapter, Isr, 0, 0);
1162
1163 /* Error */
1164 if (Isr & SXG_ISR_ERR) {
1165 if (Isr & SXG_ISR_PDQF) {
1166 adapter->Stats.PdqFull++;
1167 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1168 }
1169 /* No host buffer */
1170 if (Isr & SXG_ISR_RMISS) {
1171 /*
1172 * There is a bunch of code in the SLIC driver which
1173 * attempts to process more receive events per DPC
1174 * if we start to fall behind. We'll probablyd
1175 * need to do something similar here, but hold
1176 * off for now. I don't want to make the code more
1177 * complicated than strictly needed.
1178 */
1179 adapter->stats.rx_missed_errors++;
1180 if (adapter->stats.rx_missed_errors< 5) {
1181 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1182 __func__);
1183 }
1184 }
1185 /* Card crash */
1186 if (Isr & SXG_ISR_DEAD) {
1187 /*
1188 * Set aside the crash info and set the adapter state
1189 * to RESET
1190 */
1191 adapter->CrashCpu = (unsigned char)
1192 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1193 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1194 adapter->Dead = TRUE;
1195 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1196 adapter->CrashLocation, adapter->CrashCpu);
1197 }
1198 /* Event ring full */
1199 if (Isr & SXG_ISR_ERFULL) {
1200 /*
1201 * Same issue as RMISS, really. This means the
1202 * host is falling behind the card. Need to increase
1203 * event ring size, process more events per interrupt,
1204 * and/or reduce/remove interrupt aggregation.
1205 */
1206 adapter->Stats.EventRingFull++;
1207 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1208 __func__);
1209 }
1210 /* Transmit drop - no DRAM buffers or XMT error */
1211 if (Isr & SXG_ISR_XDROP) {
1212 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1213 }
1214 }
1215 /* Slowpath send completions */
1216 if (Isr & SXG_ISR_SPSEND) {
1217 sxg_complete_slow_send(adapter, 1);
1218 }
1219 /* Dump */
1220 if (Isr & SXG_ISR_UPC) {
1221 /* Maybe change when debug is added.. */
1222 // ASSERT(adapter->DumpCmdRunning);
1223 adapter->DumpCmdRunning = FALSE;
1224 }
1225 /* Link event */
1226 if (Isr & SXG_ISR_LINK) {
1227 sxg_link_event(adapter);
1228 }
1229 /* Debug - breakpoint hit */
1230 if (Isr & SXG_ISR_BREAK) {
1231 /*
1232 * At the moment AGDB isn't written to support interactive
1233 * debug sessions. When it is, this interrupt will be used to
1234 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
1235 */
1236 ASSERT(0);
1237 }
1238 /* Heartbeat response */
1239 if (Isr & SXG_ISR_PING) {
1240 adapter->PingOutstanding = FALSE;
1241 }
1242 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1243 adapter, Isr, NewIsr, 0);
1244
1245 return (NewIsr);
1246 }
1247
1248 /*
1249 * sxg_process_event_queue - Process our event queue
1250 *
1251 * Arguments:
1252 * - adapter - Adapter structure
1253 * - RssId - The event queue requiring processing
1254 *
1255 * Return Value:
1256 * None.
1257 */
1258 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId)
1259 {
1260 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1261 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1262 u32 EventsProcessed = 0, Batches = 0;
1263 u32 num_skbs = 0;
1264 struct sk_buff *skb;
1265 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1266 struct sk_buff *prev_skb = NULL;
1267 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1268 u32 Index;
1269 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1270 #endif
1271 u32 ReturnStatus = 0;
1272
1273 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1274 (adapter->State == SXG_STATE_PAUSING) ||
1275 (adapter->State == SXG_STATE_PAUSED) ||
1276 (adapter->State == SXG_STATE_HALTING));
1277 /*
1278 * We may still have unprocessed events on the queue if
1279 * the card crashed. Don't process them.
1280 */
1281 if (adapter->Dead) {
1282 return (0);
1283 }
1284 /*
1285 * In theory there should only be a single processor that
1286 * accesses this queue, and only at interrupt-DPC time. So/
1287 * we shouldn't need a lock for any of this.
1288 */
1289 while (Event->Status & EVENT_STATUS_VALID) {
1290 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1291 Event, Event->Code, Event->Status,
1292 adapter->NextEvent);
1293 switch (Event->Code) {
1294 case EVENT_CODE_BUFFERS:
1295 /* struct sxg_ring_info Head & Tail == unsigned char */
1296 ASSERT(!(Event->CommandIndex & 0xFF00));
1297 sxg_complete_descriptor_blocks(adapter,
1298 Event->CommandIndex);
1299 break;
1300 case EVENT_CODE_SLOWRCV:
1301 --adapter->RcvBuffersOnCard;
1302 if ((skb = sxg_slow_receive(adapter, Event))) {
1303 u32 rx_bytes;
1304 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1305 /* Add it to our indication list */
1306 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1307 IndicationList, num_skbs);
1308 /*
1309 * Linux, we just pass up each skb to the
1310 * protocol above at this point, there is no
1311 * capability of an indication list.
1312 */
1313 #else
1314 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1315 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1316 rx_bytes = Event->Length;
1317 adapter->stats.rx_packets++;
1318 adapter->stats.rx_bytes += rx_bytes;
1319 #if SXG_OFFLOAD_IP_CHECKSUM
1320 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321 #endif
1322 skb->dev = adapter->netdev;
1323 netif_rx(skb);
1324 #endif
1325 }
1326 break;
1327 default:
1328 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1329 __func__, Event->Code);
1330 /* ASSERT(0); */
1331 }
1332 /*
1333 * See if we need to restock card receive buffers.
1334 * There are two things to note here:
1335 * First - This test is not SMP safe. The
1336 * adapter->BuffersOnCard field is protected via atomic
1337 * interlocked calls, but we do not protect it with respect
1338 * to these tests. The only way to do that is with a lock,
1339 * and I don't want to grab a lock every time we adjust the
1340 * BuffersOnCard count. Instead, we allow the buffer
1341 * replenishment to be off once in a while. The worst that
1342 * can happen is the card is given on more-or-less descriptor
1343 * block than the arbitrary value we've chosen. No big deal
1344 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1345 * is adjusted.
1346 * Second - We expect this test to rarely
1347 * evaluate to true. We attempt to refill descriptor blocks
1348 * as they are returned to us (sxg_complete_descriptor_blocks)
1349 * so The only time this should evaluate to true is when
1350 * sxg_complete_descriptor_blocks failed to allocate
1351 * receive buffers.
1352 */
1353 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1354 sxg_stock_rcv_buffers(adapter);
1355 }
1356 /*
1357 * It's more efficient to just set this to zero.
1358 * But clearing the top bit saves potential debug info...
1359 */
1360 Event->Status &= ~EVENT_STATUS_VALID;
1361 /* Advance to the next event */
1362 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1363 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1364 EventsProcessed++;
1365 if (EventsProcessed == EVENT_RING_BATCH) {
1366 /* Release a batch of events back to the card */
1367 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1368 EVENT_RING_BATCH, FALSE);
1369 EventsProcessed = 0;
1370 /*
1371 * If we've processed our batch limit, break out of the
1372 * loop and return SXG_ISR_EVENT to arrange for us to
1373 * be called again
1374 */
1375 if (Batches++ == EVENT_BATCH_LIMIT) {
1376 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1377 TRACE_NOISY, "EvtLimit", Batches,
1378 adapter->NextEvent, 0, 0);
1379 ReturnStatus = SXG_ISR_EVENT;
1380 break;
1381 }
1382 }
1383 }
1384 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1385 /* Indicate any received dumb-nic frames */
1386 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1387 #endif
1388 /* Release events back to the card. */
1389 if (EventsProcessed) {
1390 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1391 EventsProcessed, FALSE);
1392 }
1393 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1394 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1395
1396 return (ReturnStatus);
1397 }
1398
1399 /*
1400 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1401 *
1402 * Arguments -
1403 * adapter - A pointer to our adapter structure
1404 * irq_context - An integer to denote if we are in interrupt context
1405 * Return
1406 * None
1407 */
1408 static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
1409 {
1410 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1411 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1412 u32 *ContextType;
1413 struct sxg_cmd *XmtCmd;
1414 unsigned long flags = 0;
1415 unsigned long sgl_flags = 0;
1416 unsigned int processed_count = 0;
1417
1418 /*
1419 * NOTE - This lock is dropped and regrabbed in this loop.
1420 * This means two different processors can both be running/
1421 * through this loop. Be *very* careful.
1422 */
1423 if(irq_context) {
1424 if(!spin_trylock(&adapter->XmtZeroLock))
1425 goto lock_busy;
1426 }
1427 else
1428 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1429
1430 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1431 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1432
1433 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1434 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
1435 /*
1436 * Locate the current Cmd (ring descriptor entry), and
1437 * associated SGL, and advance the tail
1438 */
1439 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1440 ASSERT(ContextType);
1441 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1442 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1443 /* Clear the SGL field. */
1444 XmtCmd->Sgl = 0;
1445
1446 switch (*ContextType) {
1447 case SXG_SGL_DUMB:
1448 {
1449 struct sk_buff *skb;
1450 struct sxg_scatter_gather *SxgSgl =
1451 (struct sxg_scatter_gather *)ContextType;
1452 dma64_addr_t FirstSgeAddress;
1453 u32 FirstSgeLength;
1454
1455 /* Dumb-nic send. Command context is the dumb-nic SGL */
1456 skb = (struct sk_buff *)ContextType;
1457 skb = SxgSgl->DumbPacket;
1458 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1459 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1460 /* Complete the send */
1461 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1462 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1463 0, 0);
1464 ASSERT(adapter->Stats.XmtQLen);
1465 /*
1466 * Now drop the lock and complete the send
1467 * back to Microsoft. We need to drop the lock
1468 * because Microsoft can come back with a
1469 * chimney send, which results in a double trip
1470 * in SxgTcpOuput
1471 */
1472 if(irq_context)
1473 spin_unlock(&adapter->XmtZeroLock);
1474 else
1475 spin_unlock_irqrestore(
1476 &adapter->XmtZeroLock, flags);
1477
1478 SxgSgl->DumbPacket = NULL;
1479 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1480 FirstSgeAddress,
1481 FirstSgeLength);
1482 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL,
1483 irq_context);
1484 /* and reacquire.. */
1485 if(irq_context) {
1486 if(!spin_trylock(&adapter->XmtZeroLock))
1487 goto lock_busy;
1488 }
1489 else
1490 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1491 }
1492 break;
1493 default:
1494 ASSERT(0);
1495 }
1496 }
1497 if(irq_context)
1498 spin_unlock(&adapter->XmtZeroLock);
1499 else
1500 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1501 lock_busy:
1502 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1503 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1504 }
1505
1506 /*
1507 * sxg_slow_receive
1508 *
1509 * Arguments -
1510 * adapter - A pointer to our adapter structure
1511 * Event - Receive event
1512 *
1513 * Return - skb
1514 */
1515 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1516 struct sxg_event *Event)
1517 {
1518 u32 BufferSize = adapter->ReceiveBufferSize;
1519 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1520 struct sk_buff *Packet;
1521 static int read_counter = 0;
1522
1523 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1524 if(read_counter++ & 0x100)
1525 {
1526 sxg_collect_statistics(adapter);
1527 read_counter = 0;
1528 }
1529 ASSERT(RcvDataBufferHdr);
1530 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1531 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1532 RcvDataBufferHdr, RcvDataBufferHdr->State,
1533 /*RcvDataBufferHdr->VirtualAddress*/ 0);
1534 /* Drop rcv frames in non-running state */
1535 switch (adapter->State) {
1536 case SXG_STATE_RUNNING:
1537 break;
1538 case SXG_STATE_PAUSING:
1539 case SXG_STATE_PAUSED:
1540 case SXG_STATE_HALTING:
1541 goto drop;
1542 default:
1543 ASSERT(0);
1544 goto drop;
1545 }
1546
1547 /*
1548 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1549 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1550 */
1551
1552 /* Change buffer state to UPSTREAM */
1553 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1554 if (Event->Status & EVENT_STATUS_RCVERR) {
1555 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1556 Event, Event->Status, Event->HostHandle, 0);
1557 /* XXXTODO - Remove this print later */
1558 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1559 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1560 sxg_process_rcv_error(adapter, *(u32 *)
1561 SXG_RECEIVE_DATA_LOCATION
1562 (RcvDataBufferHdr));
1563 goto drop;
1564 }
1565 #if XXXTODO /* VLAN stuff */
1566 /* If there's a VLAN tag, extract it and validate it */
1567 if (((struct ether_header *)
1568 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1569 == ETHERTYPE_VLAN) {
1570 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1571 STATUS_SUCCESS) {
1572 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1573 "BadVlan", Event,
1574 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1575 Event->Length, 0);
1576 goto drop;
1577 }
1578 }
1579 #endif
1580 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1581
1582 /*
1583 * ASK if (!sxg_mac_filter(adapter,
1584 * SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1585 * Event->Length)) {
1586 * SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1587 * Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1588 * Event->Length, 0);
1589 * goto drop;
1590 * }
1591 */
1592
1593 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1594 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1595 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
1596
1597 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1598 RcvDataBufferHdr, Packet, Event->Length, 0);
1599 /* Lastly adjust the receive packet length. */
1600 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1601 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
1602 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1603 if (RcvDataBufferHdr->skb)
1604 {
1605 spin_lock(&adapter->RcvQLock);
1606 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1607 // adapter->RcvBuffersOnCard ++;
1608 spin_unlock(&adapter->RcvQLock);
1609 }
1610 return (Packet);
1611
1612 drop:
1613 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1614 RcvDataBufferHdr, Event->Length, 0, 0);
1615 adapter->stats.rx_dropped++;
1616 // adapter->Stats.RcvDiscards++;
1617 spin_lock(&adapter->RcvQLock);
1618 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1619 spin_unlock(&adapter->RcvQLock);
1620 return (NULL);
1621 }
1622
1623 /*
1624 * sxg_process_rcv_error - process receive error and update
1625 * stats
1626 *
1627 * Arguments:
1628 * adapter - Adapter structure
1629 * ErrorStatus - 4-byte receive error status
1630 *
1631 * Return Value : None
1632 */
1633 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
1634 {
1635 u32 Error;
1636
1637 adapter->stats.rx_errors++;
1638
1639 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1640 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1641 switch (Error) {
1642 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1643 adapter->Stats.TransportCsum++;
1644 break;
1645 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1646 adapter->Stats.TransportUflow++;
1647 break;
1648 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1649 adapter->Stats.TransportHdrLen++;
1650 break;
1651 }
1652 }
1653 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1654 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1655 switch (Error) {
1656 case SXG_RCV_STATUS_NETWORK_CSUM:
1657 adapter->Stats.NetworkCsum++;
1658 break;
1659 case SXG_RCV_STATUS_NETWORK_UFLOW:
1660 adapter->Stats.NetworkUflow++;
1661 break;
1662 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1663 adapter->Stats.NetworkHdrLen++;
1664 break;
1665 }
1666 }
1667 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1668 adapter->Stats.Parity++;
1669 }
1670 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1671 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1672 switch (Error) {
1673 case SXG_RCV_STATUS_LINK_PARITY:
1674 adapter->Stats.LinkParity++;
1675 break;
1676 case SXG_RCV_STATUS_LINK_EARLY:
1677 adapter->Stats.LinkEarly++;
1678 break;
1679 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1680 adapter->Stats.LinkBufOflow++;
1681 break;
1682 case SXG_RCV_STATUS_LINK_CODE:
1683 adapter->Stats.LinkCode++;
1684 break;
1685 case SXG_RCV_STATUS_LINK_DRIBBLE:
1686 adapter->Stats.LinkDribble++;
1687 break;
1688 case SXG_RCV_STATUS_LINK_CRC:
1689 adapter->Stats.LinkCrc++;
1690 break;
1691 case SXG_RCV_STATUS_LINK_OFLOW:
1692 adapter->Stats.LinkOflow++;
1693 break;
1694 case SXG_RCV_STATUS_LINK_UFLOW:
1695 adapter->Stats.LinkUflow++;
1696 break;
1697 }
1698 }
1699 }
1700
1701 #if 0 /* Find out if this code will be needed in future */
1702 /*
1703 * sxg_mac_filter
1704 *
1705 * Arguments:
1706 * adapter - Adapter structure
1707 * pether - Ethernet header
1708 * length - Frame length
1709 *
1710 * Return Value : TRUE if the frame is to be allowed
1711 */
1712 static bool sxg_mac_filter(struct adapter_t *adapter,
1713 struct ether_header *EtherHdr, ushort length)
1714 {
1715 bool EqualAddr;
1716
1717 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1718 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1719 /* broadcast */
1720 if (adapter->MacFilter & MAC_BCAST) {
1721 adapter->Stats.DumbRcvBcastPkts++;
1722 adapter->Stats.DumbRcvBcastBytes += length;
1723 adapter->Stats.DumbRcvPkts++;
1724 adapter->Stats.DumbRcvBytes += length;
1725 return (TRUE);
1726 }
1727 } else {
1728 /* multicast */
1729 if (adapter->MacFilter & MAC_ALLMCAST) {
1730 adapter->Stats.DumbRcvMcastPkts++;
1731 adapter->Stats.DumbRcvMcastBytes += length;
1732 adapter->Stats.DumbRcvPkts++;
1733 adapter->Stats.DumbRcvBytes += length;
1734 return (TRUE);
1735 }
1736 if (adapter->MacFilter & MAC_MCAST) {
1737 struct sxg_multicast_address *MulticastAddrs =
1738 adapter->MulticastAddrs;
1739 while (MulticastAddrs) {
1740 ETHER_EQ_ADDR(MulticastAddrs->Address,
1741 EtherHdr->ether_dhost,
1742 EqualAddr);
1743 if (EqualAddr) {
1744 adapter->Stats.
1745 DumbRcvMcastPkts++;
1746 adapter->Stats.
1747 DumbRcvMcastBytes += length;
1748 adapter->Stats.DumbRcvPkts++;
1749 adapter->Stats.DumbRcvBytes +=
1750 length;
1751 return (TRUE);
1752 }
1753 MulticastAddrs = MulticastAddrs->Next;
1754 }
1755 }
1756 }
1757 } else if (adapter->MacFilter & MAC_DIRECTED) {
1758 /*
1759 * Not broadcast or multicast. Must be directed at us or
1760 * the card is in promiscuous mode. Either way, consider it
1761 * ours if MAC_DIRECTED is set
1762 */
1763 adapter->Stats.DumbRcvUcastPkts++;
1764 adapter->Stats.DumbRcvUcastBytes += length;
1765 adapter->Stats.DumbRcvPkts++;
1766 adapter->Stats.DumbRcvBytes += length;
1767 return (TRUE);
1768 }
1769 if (adapter->MacFilter & MAC_PROMISC) {
1770 /* Whatever it is, keep it. */
1771 adapter->Stats.DumbRcvPkts++;
1772 adapter->Stats.DumbRcvBytes += length;
1773 return (TRUE);
1774 }
1775 adapter->Stats.RcvDiscards++;
1776 return (FALSE);
1777 }
1778 #endif
1779 static int sxg_register_interrupt(struct adapter_t *adapter)
1780 {
1781 if (!adapter->intrregistered) {
1782 int retval;
1783
1784 DBG_ERROR
1785 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1786 __func__, adapter, adapter->netdev->irq, NR_IRQS);
1787
1788 spin_unlock_irqrestore(&sxg_global.driver_lock,
1789 sxg_global.flags);
1790
1791 retval = request_irq(adapter->netdev->irq,
1792 &sxg_isr,
1793 IRQF_SHARED,
1794 adapter->netdev->name, adapter->netdev);
1795
1796 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1797
1798 if (retval) {
1799 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1800 adapter->netdev->name, retval);
1801 return (retval);
1802 }
1803 adapter->intrregistered = 1;
1804 adapter->IntRegistered = TRUE;
1805 /* Disable RSS with line-based interrupts */
1806 adapter->MsiEnabled = FALSE;
1807 adapter->RssEnabled = FALSE;
1808 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1809 __func__, adapter, adapter->netdev->irq);
1810 }
1811 return (STATUS_SUCCESS);
1812 }
1813
1814 static void sxg_deregister_interrupt(struct adapter_t *adapter)
1815 {
1816 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
1817 #if XXXTODO
1818 slic_init_cleanup(adapter);
1819 #endif
1820 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1821 adapter->error_interrupts = 0;
1822 adapter->rcv_interrupts = 0;
1823 adapter->xmit_interrupts = 0;
1824 adapter->linkevent_interrupts = 0;
1825 adapter->upr_interrupts = 0;
1826 adapter->num_isrs = 0;
1827 adapter->xmit_completes = 0;
1828 adapter->rcv_broadcasts = 0;
1829 adapter->rcv_multicasts = 0;
1830 adapter->rcv_unicasts = 0;
1831 DBG_ERROR("sxg: %s EXIT\n", __func__);
1832 }
1833
1834 /*
1835 * sxg_if_init
1836 *
1837 * Perform initialization of our slic interface.
1838 *
1839 */
1840 static int sxg_if_init(struct adapter_t *adapter)
1841 {
1842 struct net_device *dev = adapter->netdev;
1843 int status = 0;
1844
1845 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
1846 __func__, adapter->netdev->name,
1847 adapter->state,
1848 adapter->linkstate, dev->flags);
1849
1850 /* adapter should be down at this point */
1851 if (adapter->state != ADAPT_DOWN) {
1852 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
1853 return (-EIO);
1854 }
1855 ASSERT(adapter->linkstate == LINK_DOWN);
1856
1857 adapter->devflags_prev = dev->flags;
1858 adapter->macopts = MAC_DIRECTED;
1859 if (dev->flags) {
1860 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
1861 adapter->netdev->name);
1862 if (dev->flags & IFF_BROADCAST) {
1863 adapter->macopts |= MAC_BCAST;
1864 DBG_ERROR("BCAST ");
1865 }
1866 if (dev->flags & IFF_PROMISC) {
1867 adapter->macopts |= MAC_PROMISC;
1868 DBG_ERROR("PROMISC ");
1869 }
1870 if (dev->flags & IFF_ALLMULTI) {
1871 adapter->macopts |= MAC_ALLMCAST;
1872 DBG_ERROR("ALL_MCAST ");
1873 }
1874 if (dev->flags & IFF_MULTICAST) {
1875 adapter->macopts |= MAC_MCAST;
1876 DBG_ERROR("MCAST ");
1877 }
1878 DBG_ERROR("\n");
1879 }
1880 status = sxg_register_interrupt(adapter);
1881 if (status != STATUS_SUCCESS) {
1882 DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
1883 status);
1884 sxg_deregister_interrupt(adapter);
1885 return (status);
1886 }
1887
1888 adapter->state = ADAPT_UP;
1889
1890 /* clear any pending events, then enable interrupts */
1891 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
1892
1893 return (STATUS_SUCCESS);
1894 }
1895
1896 static int sxg_entry_open(struct net_device *dev)
1897 {
1898 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1899 int status;
1900
1901 ASSERT(adapter);
1902 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
1903 adapter->activated);
1904 DBG_ERROR
1905 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
1906 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
1907 adapter->netdev, adapter, adapter->port);
1908
1909 netif_stop_queue(adapter->netdev);
1910
1911 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1912 if (!adapter->activated) {
1913 sxg_global.num_sxg_ports_active++;
1914 adapter->activated = 1;
1915 }
1916 /* Initialize the adapter */
1917 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
1918 status = sxg_initialize_adapter(adapter);
1919 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
1920 __func__, status);
1921
1922 if (status == STATUS_SUCCESS) {
1923 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
1924 status = sxg_if_init(adapter);
1925 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
1926 status);
1927 }
1928
1929 if (status != STATUS_SUCCESS) {
1930 if (adapter->activated) {
1931 sxg_global.num_sxg_ports_active--;
1932 adapter->activated = 0;
1933 }
1934 spin_unlock_irqrestore(&sxg_global.driver_lock,
1935 sxg_global.flags);
1936 return (status);
1937 }
1938 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
1939
1940 /* Enable interrupts */
1941 SXG_ENABLE_ALL_INTERRUPTS(adapter);
1942
1943 DBG_ERROR("sxg: %s EXIT\n", __func__);
1944
1945 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1946 return STATUS_SUCCESS;
1947 }
1948
1949 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1950 {
1951 struct net_device *dev = pci_get_drvdata(pcidev);
1952 u32 mmio_start = 0;
1953 unsigned int mmio_len = 0;
1954 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1955
1956 flush_scheduled_work();
1957
1958 /* Deallocate Resources */
1959 unregister_netdev(dev);
1960 sxg_free_resources(adapter);
1961
1962 ASSERT(adapter);
1963 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
1964 adapter);
1965
1966 mmio_start = pci_resource_start(pcidev, 0);
1967 mmio_len = pci_resource_len(pcidev, 0);
1968
1969 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
1970 mmio_start, mmio_len);
1971 release_mem_region(mmio_start, mmio_len);
1972
1973 mmio_start = pci_resource_start(pcidev, 2);
1974 mmio_len = pci_resource_len(pcidev, 2);
1975
1976 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
1977 mmio_start, mmio_len);
1978 release_mem_region(mmio_start, mmio_len);
1979
1980 pci_disable_device(pcidev);
1981
1982 DBG_ERROR("sxg: %s deallocate device\n", __func__);
1983 kfree(dev);
1984 DBG_ERROR("sxg: %s EXIT\n", __func__);
1985 }
1986
1987 static int sxg_entry_halt(struct net_device *dev)
1988 {
1989 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1990
1991 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1992 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
1993
1994 netif_stop_queue(adapter->netdev);
1995 adapter->state = ADAPT_DOWN;
1996 adapter->linkstate = LINK_DOWN;
1997 adapter->devflags_prev = 0;
1998 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
1999 __func__, dev->name, adapter, adapter->state);
2000
2001 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
2002 DBG_ERROR("sxg: %s EXIT\n", __func__);
2003
2004 /* Disable interrupts */
2005 SXG_DISABLE_ALL_INTERRUPTS(adapter);
2006
2007 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2008
2009 sxg_deregister_interrupt(adapter);
2010 return (STATUS_SUCCESS);
2011 }
2012
2013 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2014 {
2015 ASSERT(rq);
2016 /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2017 switch (cmd) {
2018 case SIOCSLICSETINTAGG:
2019 {
2020 /* struct adapter_t *adapter = (struct adapter_t *)
2021 * netdev_priv(dev);
2022 */
2023 u32 data[7];
2024 u32 intagg;
2025
2026 if (copy_from_user(data, rq->ifr_data, 28)) {
2027 DBG_ERROR("copy_from_user FAILED getting \
2028 initial params\n");
2029 return -EFAULT;
2030 }
2031 intagg = data[0];
2032 printk(KERN_EMERG
2033 "%s: set interrupt aggregation to %d\n",
2034 __func__, intagg);
2035 return 0;
2036 }
2037
2038 default:
2039 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2040 return -EOPNOTSUPP;
2041 }
2042 return 0;
2043 }
2044
2045 #define NORMAL_ETHFRAME 0
2046
2047 /*
2048 * sxg_send_packets - Send a skb packet
2049 *
2050 * Arguments:
2051 * skb - The packet to send
2052 * dev - Our linux net device that refs our adapter
2053 *
2054 * Return:
2055 * 0 regardless of outcome XXXTODO refer to e1000 driver
2056 */
2057 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2058 {
2059 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2060 u32 status = STATUS_SUCCESS;
2061
2062 /*
2063 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2064 * skb);
2065 */
2066 printk("ASK:sxg_send_packets: skb[%p]\n", skb);
2067
2068 /* Check the adapter state */
2069 switch (adapter->State) {
2070 case SXG_STATE_INITIALIZING:
2071 case SXG_STATE_HALTED:
2072 case SXG_STATE_SHUTDOWN:
2073 ASSERT(0); /* unexpected */
2074 /* fall through */
2075 case SXG_STATE_RESETTING:
2076 case SXG_STATE_SLEEP:
2077 case SXG_STATE_BOOTDIAG:
2078 case SXG_STATE_DIAG:
2079 case SXG_STATE_HALTING:
2080 status = STATUS_FAILURE;
2081 break;
2082 case SXG_STATE_RUNNING:
2083 if (adapter->LinkState != SXG_LINK_UP) {
2084 status = STATUS_FAILURE;
2085 }
2086 break;
2087 default:
2088 ASSERT(0);
2089 status = STATUS_FAILURE;
2090 }
2091 if (status != STATUS_SUCCESS) {
2092 goto xmit_fail;
2093 }
2094 /* send a packet */
2095 status = sxg_transmit_packet(adapter, skb);
2096 if (status == STATUS_SUCCESS) {
2097 goto xmit_done;
2098 }
2099
2100 xmit_fail:
2101 /* reject & complete all the packets if they cant be sent */
2102 if (status != STATUS_SUCCESS) {
2103 #if XXXTODO
2104 /* sxg_send_packets_fail(adapter, skb, status); */
2105 #else
2106 SXG_DROP_DUMB_SEND(adapter, skb);
2107 adapter->stats.tx_dropped++;
2108 return NETDEV_TX_BUSY;
2109 #endif
2110 }
2111 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2112 status);
2113
2114 xmit_done:
2115 return NETDEV_TX_OK;
2116 }
2117
2118 /*
2119 * sxg_transmit_packet
2120 *
2121 * This function transmits a single packet.
2122 *
2123 * Arguments -
2124 * adapter - Pointer to our adapter structure
2125 * skb - The packet to be sent
2126 *
2127 * Return - STATUS of send
2128 */
2129 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2130 {
2131 struct sxg_x64_sgl *pSgl;
2132 struct sxg_scatter_gather *SxgSgl;
2133 unsigned long sgl_flags;
2134 /* void *SglBuffer; */
2135 /* u32 SglBufferLength; */
2136
2137 /*
2138 * The vast majority of work is done in the shared
2139 * sxg_dumb_sgl routine.
2140 */
2141 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2142 adapter, skb, 0, 0);
2143
2144 /* Allocate a SGL buffer */
2145 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2146 if (!SxgSgl) {
2147 adapter->Stats.NoSglBuf++;
2148 adapter->stats.tx_errors++;
2149 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2150 adapter, skb, 0, 0);
2151 return (STATUS_RESOURCES);
2152 }
2153 ASSERT(SxgSgl->adapter == adapter);
2154 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2155 SglBufferLength = SXG_SGL_BUF_SIZE; */
2156 SxgSgl->VlanTag.VlanTci = 0;
2157 SxgSgl->VlanTag.VlanTpid = 0;
2158 SxgSgl->Type = SXG_SGL_DUMB;
2159 SxgSgl->DumbPacket = skb;
2160 pSgl = NULL;
2161
2162 /* Call the common sxg_dumb_sgl routine to complete the send. */
2163 return (sxg_dumb_sgl(pSgl, SxgSgl));
2164 }
2165
2166 /*
2167 * sxg_dumb_sgl
2168 *
2169 * Arguments:
2170 * pSgl -
2171 * SxgSgl - struct sxg_scatter_gather
2172 *
2173 * Return Value:
2174 * Status of send operation.
2175 */
2176 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2177 struct sxg_scatter_gather *SxgSgl)
2178 {
2179 struct adapter_t *adapter = SxgSgl->adapter;
2180 struct sk_buff *skb = SxgSgl->DumbPacket;
2181 /* For now, all dumb-nic sends go on RSS queue zero */
2182 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2183 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2184 struct sxg_cmd *XmtCmd = NULL;
2185 /* u32 Index = 0; */
2186 u32 DataLength = skb->len;
2187 /* unsigned int BufLen; */
2188 /* u32 SglOffset; */
2189 u64 phys_addr;
2190 unsigned long flags;
2191
2192 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2193 pSgl, SxgSgl, 0, 0);
2194
2195 /* Set aside a pointer to the sgl */
2196 SxgSgl->pSgl = pSgl;
2197
2198 /* Sanity check that our SGL format is as we expect. */
2199 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
2200 /* Shouldn't be a vlan tag on this frame */
2201 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2202 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2203
2204 /*
2205 * From here below we work with the SGL placed in our
2206 * buffer.
2207 */
2208
2209 SxgSgl->Sgl.NumberOfElements = 1;
2210
2211 /* Grab the spinlock and acquire a command */
2212 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2213 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2214 if (XmtCmd == NULL) {
2215 /*
2216 * Call sxg_complete_slow_send to see if we can
2217 * free up any XmtRingZero entries and then try again
2218 */
2219
2220 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2221 sxg_complete_slow_send(adapter, 0);
2222 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2223 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2224 if (XmtCmd == NULL) {
2225 adapter->Stats.XmtZeroFull++;
2226 goto abortcmd;
2227 }
2228 }
2229 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2230 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2231 /* Update stats */
2232 adapter->stats.tx_packets++;
2233 adapter->stats.tx_bytes += DataLength;
2234 #if XXXTODO /* Stats stuff */
2235 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2236 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2237 adapter->Stats.DumbXmtBcastPkts++;
2238 adapter->Stats.DumbXmtBcastBytes += DataLength;
2239 } else {
2240 adapter->Stats.DumbXmtMcastPkts++;
2241 adapter->Stats.DumbXmtMcastBytes += DataLength;
2242 }
2243 } else {
2244 adapter->Stats.DumbXmtUcastPkts++;
2245 adapter->Stats.DumbXmtUcastBytes += DataLength;
2246 }
2247 #endif
2248 /*
2249 * Fill in the command
2250 * Copy out the first SGE to the command and adjust for offset
2251 */
2252 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
2253 PCI_DMA_TODEVICE);
2254 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2255 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
2256 XmtCmd->Buffer.FirstSgeLength = DataLength;
2257 XmtCmd->Buffer.SgeOffset = 0;
2258 XmtCmd->Buffer.TotalLength = DataLength;
2259 XmtCmd->SgEntries = 1;
2260 XmtCmd->Flags = 0;
2261 /*
2262 * Advance transmit cmd descripter by 1.
2263 * NOTE - See comments in SxgTcpOutput where we write
2264 * to the XmtCmd register regarding CPU ID values and/or
2265 * multiple commands.
2266 */
2267 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2268 adapter->Stats.XmtQLen++; /* Stats within lock */
2269 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2270 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2271 XmtCmd, pSgl, SxgSgl, 0);
2272 return STATUS_SUCCESS;
2273
2274 abortcmd:
2275 /*
2276 * NOTE - Only jump to this label AFTER grabbing the
2277 * XmtZeroLock, and DO NOT DROP IT between the
2278 * command allocation and the following abort.
2279 */
2280 if (XmtCmd) {
2281 SXG_ABORT_CMD(XmtRingInfo);
2282 }
2283 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2284
2285 /*
2286 * failsgl:
2287 * Jump to this label if failure occurs before the
2288 * XmtZeroLock is grabbed
2289 */
2290 adapter->stats.tx_errors++;
2291 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2292 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2293 /* SxgSgl->DumbPacket is the skb */
2294 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2295
2296 return STATUS_FAILURE;
2297 }
2298
2299 /*
2300 * Link management functions
2301 *
2302 * sxg_initialize_link - Initialize the link stuff
2303 *
2304 * Arguments -
2305 * adapter - A pointer to our adapter structure
2306 *
2307 * Return
2308 * status
2309 */
2310 static int sxg_initialize_link(struct adapter_t *adapter)
2311 {
2312 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2313 u32 Value;
2314 u32 ConfigData;
2315 u32 MaxFrame;
2316 int status;
2317
2318 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2319 adapter, 0, 0, 0);
2320
2321 /* Reset PHY and XGXS module */
2322 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2323
2324 /* Reset transmit configuration register */
2325 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2326
2327 /* Reset receive configuration register */
2328 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2329
2330 /* Reset all MAC modules */
2331 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2332
2333 /*
2334 * Link address 0
2335 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2336 * is stored with the first nibble (0a) in the byte 0
2337 * of the Mac address. Possibly reverse?
2338 */
2339 Value = *(u32 *) adapter->macaddr;
2340 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2341 /* also write the MAC address to the MAC. Endian is reversed. */
2342 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2343 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
2344 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2345 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2346 Value = ntohl(Value);
2347 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2348 /* Link address 1 */
2349 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2350 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2351 /* Link address 2 */
2352 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2353 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2354 /* Link address 3 */
2355 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2356 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2357
2358 /* Enable MAC modules */
2359 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2360
2361 /* Configure MAC */
2362 WRITE_REG(HwRegs->MacConfig1, (
2363 /* Allow sending of pause */
2364 AXGMAC_CFG1_XMT_PAUSE |
2365 /* Enable XMT */
2366 AXGMAC_CFG1_XMT_EN |
2367 /* Enable detection of pause */
2368 AXGMAC_CFG1_RCV_PAUSE |
2369 /* Enable receive */
2370 AXGMAC_CFG1_RCV_EN |
2371 /* short frame detection */
2372 AXGMAC_CFG1_SHORT_ASSERT |
2373 /* Verify frame length */
2374 AXGMAC_CFG1_CHECK_LEN |
2375 /* Generate FCS */
2376 AXGMAC_CFG1_GEN_FCS |
2377 /* Pad frames to 64 bytes */
2378 AXGMAC_CFG1_PAD_64),
2379 TRUE);
2380
2381 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2382 if (adapter->JumboEnabled) {
2383 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2384 }
2385 /*
2386 * AMIIM Configuration Register -
2387 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2388 * (bottom bits) of this register is used to determine the MDC frequency
2389 * as specified in the A-XGMAC Design Document. This value must not be
2390 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2391 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2392 * frequency of 2.5 MHz (see the PHY spec), we get:
2393 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2394 * This value happens to be the default value for this register, so we
2395 * really don't have to do this.
2396 */
2397 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2398
2399 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2400 WRITE_REG(HwRegs->LinkStatus,
2401 (LS_PHY_CLR_RESET |
2402 LS_XGXS_ENABLE |
2403 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2404 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2405
2406 /*
2407 * Per information given by Aeluros, wait 100 ms after removing reset.
2408 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2409 * clear.
2410 */
2411 mdelay(100);
2412
2413 /* Verify the PHY has come up by checking that the Reset bit has
2414 * cleared.
2415 */
2416 status = sxg_read_mdio_reg(adapter,
2417 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2418 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2419 &Value);
2420 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2421 (Value & PMA_CONTROL1_RESET));
2422 if (status != STATUS_SUCCESS)
2423 return (STATUS_FAILURE);
2424 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2425 return (STATUS_FAILURE);
2426
2427 /* The SERDES should be initialized by now - confirm */
2428 READ_REG(HwRegs->LinkStatus, Value);
2429 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2430 return (STATUS_FAILURE);
2431
2432 /* The XAUI link should also be up - confirm */
2433 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2434 return (STATUS_FAILURE);
2435
2436 /* Initialize the PHY */
2437 status = sxg_phy_init(adapter);
2438 if (status != STATUS_SUCCESS)
2439 return (STATUS_FAILURE);
2440
2441 /* Enable the Link Alarm */
2442
2443 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2444 * LASI_CONTROL - LASI control register
2445 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2446 */
2447 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2448 LASI_CONTROL,
2449 LASI_CTL_LS_ALARM_ENABLE);
2450 if (status != STATUS_SUCCESS)
2451 return (STATUS_FAILURE);
2452
2453 /* XXXTODO - temporary - verify bit is set */
2454
2455 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2456 * LASI_CONTROL - LASI control register
2457 */
2458 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2459 LASI_CONTROL,
2460 &Value);
2461
2462 if (status != STATUS_SUCCESS)
2463 return (STATUS_FAILURE);
2464 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2465 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2466 }
2467 /* Enable receive */
2468 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2469 ConfigData = (RCV_CONFIG_ENABLE |
2470 RCV_CONFIG_ENPARSE |
2471 RCV_CONFIG_RCVBAD |
2472 RCV_CONFIG_RCVPAUSE |
2473 RCV_CONFIG_TZIPV6 |
2474 RCV_CONFIG_TZIPV4 |
2475 RCV_CONFIG_HASH_16 |
2476 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2477 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2478
2479 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2480
2481 /* Mark the link as down. We'll get a link event when it comes up. */
2482 sxg_link_state(adapter, SXG_LINK_DOWN);
2483
2484 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2485 adapter, 0, 0, 0);
2486 return (STATUS_SUCCESS);
2487 }
2488
2489 /*
2490 * sxg_phy_init - Initialize the PHY
2491 *
2492 * Arguments -
2493 * adapter - A pointer to our adapter structure
2494 *
2495 * Return
2496 * status
2497 */
2498 static int sxg_phy_init(struct adapter_t *adapter)
2499 {
2500 u32 Value;
2501 struct phy_ucode *p;
2502 int status;
2503
2504 DBG_ERROR("ENTER %s\n", __func__);
2505
2506 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2507 * 0xC205 - PHY ID register (?)
2508 * &Value - XXXTODO - add def
2509 */
2510 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2511 0xC205,
2512 &Value);
2513 if (status != STATUS_SUCCESS)
2514 return (STATUS_FAILURE);
2515
2516 if (Value == 0x0012) {
2517 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2518 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2519 microcode.\n");
2520
2521 /* Initialize AEL2005C PHY and download PHY microcode */
2522 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2523 if (p->Addr == 0) {
2524 /* if address == 0, data == sleep time in ms */
2525 mdelay(p->Data);
2526 } else {
2527 /* write the given data to the specified address */
2528 status = sxg_write_mdio_reg(adapter,
2529 MIIM_DEV_PHY_PMA,
2530 /* PHY address */
2531 p->Addr,
2532 /* PHY data */
2533 p->Data);
2534 if (status != STATUS_SUCCESS)
2535 return (STATUS_FAILURE);
2536 }
2537 }
2538 }
2539 DBG_ERROR("EXIT %s\n", __func__);
2540
2541 return (STATUS_SUCCESS);
2542 }
2543
2544 /*
2545 * sxg_link_event - Process a link event notification from the card
2546 *
2547 * Arguments -
2548 * adapter - A pointer to our adapter structure
2549 *
2550 * Return
2551 * None
2552 */
2553 static void sxg_link_event(struct adapter_t *adapter)
2554 {
2555 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2556 enum SXG_LINK_STATE LinkState;
2557 int status;
2558 u32 Value;
2559
2560 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2561 adapter, 0, 0, 0);
2562 DBG_ERROR("ENTER %s\n", __func__);
2563
2564 /* Check the Link Status register. We should have a Link Alarm. */
2565 READ_REG(HwRegs->LinkStatus, Value);
2566 if (Value & LS_LINK_ALARM) {
2567 /*
2568 * We got a Link Status alarm. First, pause to let the
2569 * link state settle (it can bounce a number of times)
2570 */
2571 mdelay(10);
2572
2573 /* Now clear the alarm by reading the LASI status register. */
2574 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2575 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2576 /* LASI status register */
2577 LASI_STATUS,
2578 &Value);
2579 if (status != STATUS_SUCCESS) {
2580 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2581 sxg_link_state(adapter, SXG_LINK_DOWN);
2582 /* ASSERT(0); */
2583 }
2584 ASSERT(Value & LASI_STATUS_LS_ALARM);
2585
2586 /* Now get and set the link state */
2587 LinkState = sxg_get_link_state(adapter);
2588 sxg_link_state(adapter, LinkState);
2589 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2590 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2591 } else {
2592 /*
2593 * XXXTODO - Assuming Link Attention is only being generated
2594 * for the Link Alarm pin (and not for a XAUI Link Status change)
2595 * , then it's impossible to get here. Yet we've gotten here
2596 * twice (under extreme conditions - bouncing the link up and
2597 * down many times a second). Needs further investigation.
2598 */
2599 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2600 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2601 /* ASSERT(0); */
2602 }
2603 DBG_ERROR("EXIT %s\n", __func__);
2604
2605 }
2606
2607 /*
2608 * sxg_get_link_state - Determine if the link is up or down
2609 *
2610 * Arguments -
2611 * adapter - A pointer to our adapter structure
2612 *
2613 * Return
2614 * Link State
2615 */
2616 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
2617 {
2618 int status;
2619 u32 Value;
2620
2621 DBG_ERROR("ENTER %s\n", __func__);
2622
2623 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2624 adapter, 0, 0, 0);
2625
2626 /*
2627 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
2628 * the following 3 bits (from 3 different MDIO registers) are all true.
2629 */
2630
2631 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2632 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2633 /* PMA/PMD Receive Signal Detect register */
2634 PHY_PMA_RCV_DET,
2635 &Value);
2636 if (status != STATUS_SUCCESS)
2637 goto bad;
2638
2639 /* If PMA/PMD receive signal detect is 0, then the link is down */
2640 if (!(Value & PMA_RCV_DETECT))
2641 return (SXG_LINK_DOWN);
2642
2643 /* MIIM_DEV_PHY_PCS - PHY PCS module */
2644 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
2645 /* PCS 10GBASE-R Status 1 register */
2646 PHY_PCS_10G_STATUS1,
2647 &Value);
2648 if (status != STATUS_SUCCESS)
2649 goto bad;
2650
2651 /* If PCS is not locked to receive blocks, then the link is down */
2652 if (!(Value & PCS_10B_BLOCK_LOCK))
2653 return (SXG_LINK_DOWN);
2654
2655 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
2656 /* XS Lane Status register */
2657 PHY_XS_LANE_STATUS,
2658 &Value);
2659 if (status != STATUS_SUCCESS)
2660 goto bad;
2661
2662 /* If XS transmit lanes are not aligned, then the link is down */
2663 if (!(Value & XS_LANE_ALIGN))
2664 return (SXG_LINK_DOWN);
2665
2666 /* All 3 bits are true, so the link is up */
2667 DBG_ERROR("EXIT %s\n", __func__);
2668
2669 return (SXG_LINK_UP);
2670
2671 bad:
2672 /* An error occurred reading an MDIO register. This shouldn't happen. */
2673 DBG_ERROR("Error reading an MDIO register!\n");
2674 ASSERT(0);
2675 return (SXG_LINK_DOWN);
2676 }
2677
2678 static void sxg_indicate_link_state(struct adapter_t *adapter,
2679 enum SXG_LINK_STATE LinkState)
2680 {
2681 if (adapter->LinkState == SXG_LINK_UP) {
2682 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
2683 __func__);
2684 netif_start_queue(adapter->netdev);
2685 } else {
2686 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
2687 __func__);
2688 netif_stop_queue(adapter->netdev);
2689 }
2690 }
2691
2692 /*
2693 * sxg_link_state - Set the link state and if necessary, indicate.
2694 * This routine the central point of processing for all link state changes.
2695 * Nothing else in the driver should alter the link state or perform
2696 * link state indications
2697 *
2698 * Arguments -
2699 * adapter - A pointer to our adapter structure
2700 * LinkState - The link state
2701 *
2702 * Return
2703 * None
2704 */
2705 static void sxg_link_state(struct adapter_t *adapter,
2706 enum SXG_LINK_STATE LinkState)
2707 {
2708 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2709 adapter, LinkState, adapter->LinkState, adapter->State);
2710
2711 DBG_ERROR("ENTER %s\n", __func__);
2712
2713 /*
2714 * Hold the adapter lock during this routine. Maybe move
2715 * the lock to the caller.
2716 */
2717 /* IMP TODO : Check if we can survive without taking this lock */
2718 // spin_lock(&adapter->AdapterLock);
2719 if (LinkState == adapter->LinkState) {
2720 /* Nothing changed.. */
2721 // spin_unlock(&adapter->AdapterLock);
2722 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
2723 __func__, LinkState);
2724 return;
2725 }
2726 /* Save the adapter state */
2727 adapter->LinkState = LinkState;
2728
2729 /* Drop the lock and indicate link state */
2730 // spin_unlock(&adapter->AdapterLock);
2731 DBG_ERROR("EXIT #1 %s\n", __func__);
2732
2733 sxg_indicate_link_state(adapter, LinkState);
2734 }
2735
2736 /*
2737 * sxg_write_mdio_reg - Write to a register on the MDIO bus
2738 *
2739 * Arguments -
2740 * adapter - A pointer to our adapter structure
2741 * DevAddr - MDIO device number being addressed
2742 * RegAddr - register address for the specified MDIO device
2743 * Value - value to write to the MDIO register
2744 *
2745 * Return
2746 * status
2747 */
2748 static int sxg_write_mdio_reg(struct adapter_t *adapter,
2749 u32 DevAddr, u32 RegAddr, u32 Value)
2750 {
2751 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2752 /* Address operation (written to MIIM field reg) */
2753 u32 AddrOp;
2754 /* Write operation (written to MIIM field reg) */
2755 u32 WriteOp;
2756 u32 Cmd;/* Command (written to MIIM command reg) */
2757 u32 ValueRead;
2758 u32 Timeout;
2759
2760 /* DBG_ERROR("ENTER %s\n", __func__); */
2761
2762 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2763 adapter, 0, 0, 0);
2764
2765 /* Ensure values don't exceed field width */
2766 DevAddr &= 0x001F; /* 5-bit field */
2767 RegAddr &= 0xFFFF; /* 16-bit field */
2768 Value &= 0xFFFF; /* 16-bit field */
2769
2770 /* Set MIIM field register bits for an MIIM address operation */
2771 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2772 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2773 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2774 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2775
2776 /* Set MIIM field register bits for an MIIM write operation */
2777 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2778 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2779 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2780 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2781
2782 /* Set MIIM command register bits to execute an MIIM command */
2783 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2784
2785 /* Reset the command register command bit (in case it's not 0) */
2786 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2787
2788 /* MIIM write to set the address of the specified MDIO register */
2789 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2790
2791 /* Write to MIIM Command Register to execute to address operation */
2792 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2793
2794 /* Poll AMIIM Indicator register to wait for completion */
2795 Timeout = SXG_LINK_TIMEOUT;
2796 do {
2797 udelay(100); /* Timeout in 100us units */
2798 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2799 if (--Timeout == 0) {
2800 return (STATUS_FAILURE);
2801 }
2802 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2803
2804 /* Reset the command register command bit */
2805 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2806
2807 /* MIIM write to set up an MDIO write operation */
2808 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2809
2810 /* Write to MIIM Command Register to execute the write operation */
2811 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2812
2813 /* Poll AMIIM Indicator register to wait for completion */
2814 Timeout = SXG_LINK_TIMEOUT;
2815 do {
2816 udelay(100); /* Timeout in 100us units */
2817 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2818 if (--Timeout == 0) {
2819 return (STATUS_FAILURE);
2820 }
2821 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2822
2823 /* DBG_ERROR("EXIT %s\n", __func__); */
2824
2825 return (STATUS_SUCCESS);
2826 }
2827
2828 /*
2829 * sxg_read_mdio_reg - Read a register on the MDIO bus
2830 *
2831 * Arguments -
2832 * adapter - A pointer to our adapter structure
2833 * DevAddr - MDIO device number being addressed
2834 * RegAddr - register address for the specified MDIO device
2835 * pValue - pointer to where to put data read from the MDIO register
2836 *
2837 * Return
2838 * status
2839 */
2840 static int sxg_read_mdio_reg(struct adapter_t *adapter,
2841 u32 DevAddr, u32 RegAddr, u32 *pValue)
2842 {
2843 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2844 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2845 u32 ReadOp; /* Read operation (written to MIIM field reg) */
2846 u32 Cmd; /* Command (written to MIIM command reg) */
2847 u32 ValueRead;
2848 u32 Timeout;
2849
2850 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2851 adapter, 0, 0, 0);
2852 DBG_ERROR("ENTER %s\n", __FUNCTION__);
2853
2854 /* Ensure values don't exceed field width */
2855 DevAddr &= 0x001F; /* 5-bit field */
2856 RegAddr &= 0xFFFF; /* 16-bit field */
2857
2858 /* Set MIIM field register bits for an MIIM address operation */
2859 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2860 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2861 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2862 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2863
2864 /* Set MIIM field register bits for an MIIM read operation */
2865 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2866 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2867 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2868 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
2869
2870 /* Set MIIM command register bits to execute an MIIM command */
2871 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2872
2873 /* Reset the command register command bit (in case it's not 0) */
2874 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2875
2876 /* MIIM write to set the address of the specified MDIO register */
2877 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2878
2879 /* Write to MIIM Command Register to execute to address operation */
2880 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2881
2882 /* Poll AMIIM Indicator register to wait for completion */
2883 Timeout = SXG_LINK_TIMEOUT;
2884 do {
2885 udelay(100); /* Timeout in 100us units */
2886 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2887 if (--Timeout == 0) {
2888 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
2889
2890 return (STATUS_FAILURE);
2891 }
2892 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2893
2894 /* Reset the command register command bit */
2895 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2896
2897 /* MIIM write to set up an MDIO register read operation */
2898 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
2899
2900 /* Write to MIIM Command Register to execute the read operation */
2901 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2902
2903 /* Poll AMIIM Indicator register to wait for completion */
2904 Timeout = SXG_LINK_TIMEOUT;
2905 do {
2906 udelay(100); /* Timeout in 100us units */
2907 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2908 if (--Timeout == 0) {
2909 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
2910
2911 return (STATUS_FAILURE);
2912 }
2913 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2914
2915 /* Read the MDIO register data back from the field register */
2916 READ_REG(HwRegs->MacAmiimField, *pValue);
2917 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
2918
2919 DBG_ERROR("EXIT %s\n", __FUNCTION__);
2920
2921 return (STATUS_SUCCESS);
2922 }
2923
2924 /*
2925 * Functions to obtain the CRC corresponding to the destination mac address.
2926 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
2927 * the polynomial:
2928 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
2929 * + x^4 + x^2 + x^1.
2930 *
2931 * After the CRC for the 6 bytes is generated (but before the value is
2932 * complemented), we must then transpose the value and return bits 30-23.
2933 */
2934 static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
2935 static u32 sxg_crc_init; /* Is table initialized */
2936
2937 /* Contruct the CRC32 table */
2938 static void sxg_mcast_init_crc32(void)
2939 {
2940 u32 c; /* CRC shit reg */
2941 u32 e = 0; /* Poly X-or pattern */
2942 int i; /* counter */
2943 int k; /* byte being shifted into crc */
2944
2945 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
2946
2947 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
2948 e |= 1L << (31 - p[i]);
2949 }
2950
2951 for (i = 1; i < 256; i++) {
2952 c = i;
2953 for (k = 8; k; k--) {
2954 c = c & 1 ? (c >> 1) ^ e : c >> 1;
2955 }
2956 sxg_crc_table[i] = c;
2957 }
2958 }
2959
2960 /*
2961 * Return the MAC hast as described above.
2962 */
2963 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
2964 {
2965 u32 crc;
2966 char *p;
2967 int i;
2968 unsigned char machash = 0;
2969
2970 if (!sxg_crc_init) {
2971 sxg_mcast_init_crc32();
2972 sxg_crc_init = 1;
2973 }
2974
2975 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
2976 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
2977 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
2978 }
2979
2980 /* Return bits 1-8, transposed */
2981 for (i = 1; i < 9; i++) {
2982 machash |= (((crc >> i) & 1) << (8 - i));
2983 }
2984
2985 return (machash);
2986 }
2987
2988 static void sxg_mcast_set_mask(struct adapter_t *adapter)
2989 {
2990 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
2991
2992 DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
2993 adapter->netdev->name, (unsigned int)adapter->MacFilter,
2994 adapter->MulticastMask);
2995
2996 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
2997 /*
2998 * Turn on all multicast addresses. We have to do this for
2999 * promiscuous mode as well as ALLMCAST mode. It saves the
3000 * Microcode from having keep state about the MAC configuration
3001 */
3002 /* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n
3003 * SLUT MODE!!!\n",__func__);
3004 */
3005 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3006 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
3007 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3008 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3009 */
3010
3011 } else {
3012 /*
3013 * Commit our multicast mast to the SLIC by writing to the
3014 * multicast address mask registers
3015 */
3016 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3017 __func__, adapter->netdev->name,
3018 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3019 ((ulong)
3020 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3021
3022 WRITE_REG(sxg_regs->McastLow,
3023 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3024 WRITE_REG(sxg_regs->McastHigh,
3025 (u32) ((adapter->
3026 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3027 }
3028 }
3029
3030 /*
3031 * Allocate a mcast_address structure to hold the multicast address.
3032 * Link it in.
3033 */
3034 static int sxg_mcast_add_list(struct adapter_t *adapter, char *address)
3035 {
3036 struct mcast_address *mcaddr, *mlist;
3037 bool equaladdr;
3038
3039 /* Check to see if it already exists */
3040 mlist = adapter->mcastaddrs;
3041 while (mlist) {
3042 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
3043 if (equaladdr) {
3044 return (STATUS_SUCCESS);
3045 }
3046 mlist = mlist->next;
3047 }
3048
3049 /* Doesn't already exist. Allocate a structure to hold it */
3050 mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
3051 if (mcaddr == NULL)
3052 return 1;
3053
3054 memcpy(mcaddr->address, address, 6);
3055
3056 mcaddr->next = adapter->mcastaddrs;
3057 adapter->mcastaddrs = mcaddr;
3058
3059 return (STATUS_SUCCESS);
3060 }
3061
3062 static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
3063 {
3064 unsigned char crcpoly;
3065
3066 /* Get the CRC polynomial for the mac address */
3067 crcpoly = sxg_mcast_get_mac_hash(address);
3068
3069 /*
3070 * We only have space on the SLIC for 64 entries. Lop
3071 * off the top two bits. (2^6 = 64)
3072 */
3073 crcpoly &= 0x3F;
3074
3075 /* OR in the new bit into our 64 bit mask. */
3076 adapter->MulticastMask |= (u64) 1 << crcpoly;
3077 }
3078
3079 static void sxg_mcast_set_list(struct net_device *dev)
3080 {
3081 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3082
3083 ASSERT(adapter);
3084 if (dev->flags & IFF_PROMISC) {
3085 adapter->MacFilter |= MAC_PROMISC;
3086 }
3087 //XXX handle other flags as well
3088 sxg_mcast_set_mask(adapter);
3089 }
3090
3091 static void sxg_unmap_mmio_space(struct adapter_t *adapter)
3092 {
3093 #if LINUX_FREES_ADAPTER_RESOURCES
3094 /*
3095 * if (adapter->Regs) {
3096 * iounmap(adapter->Regs);
3097 * }
3098 * adapter->slic_regs = NULL;
3099 */
3100 #endif
3101 }
3102
3103 void sxg_free_sgl_buffers(struct adapter_t *adapter)
3104 {
3105 struct list_entry *ple;
3106 struct sxg_scatter_gather *Sgl;
3107
3108 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3109 ple = RemoveHeadList(&adapter->AllSglBuffers);
3110 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3111 kfree(Sgl);
3112 adapter->AllSglBufferCount--;
3113 }
3114 }
3115
3116 void sxg_free_rcvblocks(struct adapter_t *adapter)
3117 {
3118 u32 i;
3119 void *temp_RcvBlock;
3120 struct list_entry *ple;
3121 struct sxg_rcv_block_hdr *RcvBlockHdr;
3122 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3123 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3124 (adapter->state == SXG_STATE_HALTING));
3125 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3126
3127 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3128 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3129
3130 if(RcvBlockHdr->VirtualAddress) {
3131 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3132
3133 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3134 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3135 RcvDataBufferHdr =
3136 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3137 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3138 }
3139 }
3140
3141 pci_free_consistent(adapter->pcidev,
3142 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3143 RcvBlockHdr->VirtualAddress,
3144 RcvBlockHdr->PhysicalAddress);
3145 adapter->AllRcvBlockCount--;
3146 }
3147 ASSERT(adapter->AllRcvBlockCount == 0);
3148 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3149 adapter, 0, 0, 0);
3150 }
3151 void sxg_free_mcast_addrs(struct adapter_t *adapter)
3152 {
3153 struct sxg_multicast_address *address;
3154 while(adapter->MulticastAddrs) {
3155 address = adapter->MulticastAddrs;
3156 adapter->MulticastAddrs = address->Next;
3157 kfree(address);
3158 }
3159
3160 adapter->MulticastMask= 0;
3161 }
3162
3163 void sxg_unmap_resources(struct adapter_t *adapter)
3164 {
3165 if(adapter->HwRegs) {
3166 iounmap((void *)adapter->HwRegs);
3167 }
3168 if(adapter->UcodeRegs) {
3169 iounmap((void *)adapter->UcodeRegs);
3170 }
3171
3172 ASSERT(adapter->AllRcvBlockCount == 0);
3173 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3174 adapter, 0, 0, 0);
3175 }
3176
3177
3178
3179 /*
3180 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3181 *
3182 * Arguments -
3183 * adapter - A pointer to our adapter structure
3184 *
3185 * Return
3186 * none
3187 */
3188 void sxg_free_resources(struct adapter_t *adapter)
3189 {
3190 u32 RssIds, IsrCount;
3191 struct net_device *netdev = adapter->netdev;
3192 RssIds = SXG_RSS_CPU_COUNT(adapter);
3193 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3194
3195 if (adapter->BasicAllocations == FALSE) {
3196 /*
3197 * No allocations have been made, including spinlocks,
3198 * or listhead initializations. Return.
3199 */
3200 return;
3201 }
3202
3203 /* Free Irq */
3204 free_irq(adapter->netdev->irq, netdev);
3205
3206 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3207 sxg_free_rcvblocks(adapter);
3208 }
3209 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3210 sxg_free_sgl_buffers(adapter);
3211 }
3212
3213 if (adapter->XmtRingZeroIndex) {
3214 pci_free_consistent(adapter->pcidev,
3215 sizeof(u32),
3216 adapter->XmtRingZeroIndex,
3217 adapter->PXmtRingZeroIndex);
3218 }
3219 if (adapter->Isr) {
3220 pci_free_consistent(adapter->pcidev,
3221 sizeof(u32) * IsrCount,
3222 adapter->Isr, adapter->PIsr);
3223 }
3224
3225 if (adapter->EventRings) {
3226 pci_free_consistent(adapter->pcidev,
3227 sizeof(struct sxg_event_ring) * RssIds,
3228 adapter->EventRings, adapter->PEventRings);
3229 }
3230 if (adapter->RcvRings) {
3231 pci_free_consistent(adapter->pcidev,
3232 sizeof(struct sxg_rcv_ring) * 1,
3233 adapter->RcvRings,
3234 adapter->PRcvRings);
3235 adapter->RcvRings = NULL;
3236 }
3237
3238 if(adapter->XmtRings) {
3239 pci_free_consistent(adapter->pcidev,
3240 sizeof(struct sxg_xmt_ring) * 1,
3241 adapter->XmtRings,
3242 adapter->PXmtRings);
3243 adapter->XmtRings = NULL;
3244 }
3245
3246 if (adapter->ucode_stats) {
3247 pci_unmap_single(adapter->pcidev,
3248 sizeof(struct sxg_ucode_stats),
3249 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3250 adapter->ucode_stats = NULL;
3251 }
3252
3253
3254 /* Unmap register spaces */
3255 sxg_unmap_resources(adapter);
3256
3257 sxg_free_mcast_addrs(adapter);
3258
3259 adapter->BasicAllocations = FALSE;
3260
3261 }
3262
3263 /*
3264 * sxg_allocate_complete -
3265 *
3266 * This routine is called when a memory allocation has completed.
3267 *
3268 * Arguments -
3269 * struct adapter_t * - Our adapter structure
3270 * VirtualAddress - Memory virtual address
3271 * PhysicalAddress - Memory physical address
3272 * Length - Length of memory allocated (or 0)
3273 * Context - The type of buffer allocated
3274 *
3275 * Return
3276 * None.
3277 */
3278 static void sxg_allocate_complete(struct adapter_t *adapter,
3279 void *VirtualAddress,
3280 dma_addr_t PhysicalAddress,
3281 u32 Length, enum sxg_buffer_type Context)
3282 {
3283 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3284 adapter, VirtualAddress, Length, Context);
3285 ASSERT(atomic_read(&adapter->pending_allocations));
3286 atomic_dec(&adapter->pending_allocations);
3287
3288 switch (Context) {
3289
3290 case SXG_BUFFER_TYPE_RCV:
3291 sxg_allocate_rcvblock_complete(adapter,
3292 VirtualAddress,
3293 PhysicalAddress, Length);
3294 break;
3295 case SXG_BUFFER_TYPE_SGL:
3296 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
3297 VirtualAddress,
3298 PhysicalAddress, Length);
3299 break;
3300 }
3301 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3302 adapter, VirtualAddress, Length, Context);
3303 }
3304
3305 /*
3306 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3307 * synchronous and asynchronous buffer allocations
3308 *
3309 * Arguments -
3310 * adapter - A pointer to our adapter structure
3311 * Size - block size to allocate
3312 * BufferType - Type of buffer to allocate
3313 *
3314 * Return
3315 * int
3316 */
3317 static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3318 u32 Size, enum sxg_buffer_type BufferType)
3319 {
3320 int status;
3321 void *Buffer;
3322 dma_addr_t pBuffer;
3323
3324 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3325 adapter, Size, BufferType, 0);
3326 /*
3327 * Grab the adapter lock and check the state. If we're in anything other
3328 * than INITIALIZING or RUNNING state, fail. This is to prevent
3329 * allocations in an improper driver state
3330 */
3331
3332 atomic_inc(&adapter->pending_allocations);
3333
3334 if(BufferType != SXG_BUFFER_TYPE_SGL)
3335 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3336 else {
3337 Buffer = kzalloc(Size, GFP_ATOMIC);
3338 pBuffer = (dma_addr_t)NULL;
3339 }
3340 if (Buffer == NULL) {
3341 /*
3342 * Decrement the AllocationsPending count while holding
3343 * the lock. Pause processing relies on this
3344 */
3345 atomic_dec(&adapter->pending_allocations);
3346 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3347 adapter, Size, BufferType, 0);
3348 return (STATUS_RESOURCES);
3349 }
3350 sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3351 status = STATUS_SUCCESS;
3352
3353 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3354 adapter, Size, BufferType, status);
3355 return (status);
3356 }
3357
3358 /*
3359 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3360 * block allocation
3361 *
3362 * Arguments -
3363 * adapter - A pointer to our adapter structure
3364 * RcvBlock - receive block virtual address
3365 * PhysicalAddress - Physical address
3366 * Length - Memory length
3367 *
3368 * Return
3369 */
3370 static void sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
3371 void *RcvBlock,
3372 dma_addr_t PhysicalAddress,
3373 u32 Length)
3374 {
3375 u32 i;
3376 u32 BufferSize = adapter->ReceiveBufferSize;
3377 u64 Paddr;
3378 void *temp_RcvBlock;
3379 struct sxg_rcv_block_hdr *RcvBlockHdr;
3380 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3381 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3382 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3383
3384 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3385 adapter, RcvBlock, Length, 0);
3386 if (RcvBlock == NULL) {
3387 goto fail;
3388 }
3389 memset(RcvBlock, 0, Length);
3390 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3391 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3392 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
3393 /*
3394 * First, initialize the contained pool of receive data buffers.
3395 * This initialization requires NBL/NB/MDL allocations, if any of them
3396 * fail, free the block and return without queueing the shared memory
3397 */
3398 //RcvDataBuffer = RcvBlock;
3399 temp_RcvBlock = RcvBlock;
3400 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3401 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3402 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3403 temp_RcvBlock;
3404 /* For FREE macro assertion */
3405 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3406 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3407 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3408 goto fail;
3409
3410 }
3411
3412 /*
3413 * Place this entire block of memory on the AllRcvBlocks queue so it
3414 * can be free later
3415 */
3416
3417 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3418 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
3419 RcvBlockHdr->VirtualAddress = RcvBlock;
3420 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3421 spin_lock(&adapter->RcvQLock);
3422 adapter->AllRcvBlockCount++;
3423 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3424 spin_unlock(&adapter->RcvQLock);
3425
3426 /* Now free the contained receive data buffers that we
3427 * initialized above */
3428 temp_RcvBlock = RcvBlock;
3429 for (i = 0, Paddr = PhysicalAddress;
3430 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3431 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3432 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3433 RcvDataBufferHdr =
3434 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3435 spin_lock(&adapter->RcvQLock);
3436 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3437 spin_unlock(&adapter->RcvQLock);
3438 }
3439
3440 /* Locate the descriptor block and put it on a separate free queue */
3441 RcvDescriptorBlock =
3442 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
3443 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3444 (SXG_RCV_DATA_HDR_SIZE));
3445 RcvDescriptorBlockHdr =
3446 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
3447 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3448 (SXG_RCV_DATA_HDR_SIZE));
3449 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3450 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3451 spin_lock(&adapter->RcvQLock);
3452 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3453 spin_unlock(&adapter->RcvQLock);
3454 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3455 adapter, RcvBlock, Length, 0);
3456 return;
3457 fail:
3458 /* Free any allocated resources */
3459 if (RcvBlock) {
3460 temp_RcvBlock = RcvBlock;
3461 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3462 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3463 RcvDataBufferHdr =
3464 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3465 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3466 }
3467 pci_free_consistent(adapter->pcidev,
3468 Length, RcvBlock, PhysicalAddress);
3469 }
3470 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3471 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3472 adapter, adapter->FreeRcvBufferCount,
3473 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3474 adapter->Stats.NoMem++;
3475 }
3476
3477 /*
3478 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3479 *
3480 * Arguments -
3481 * adapter - A pointer to our adapter structure
3482 * SxgSgl - struct sxg_scatter_gather buffer
3483 * PhysicalAddress - Physical address
3484 * Length - Memory length
3485 *
3486 * Return
3487 */
3488 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3489 struct sxg_scatter_gather *SxgSgl,
3490 dma_addr_t PhysicalAddress,
3491 u32 Length)
3492 {
3493 unsigned long sgl_flags;
3494 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3495 adapter, SxgSgl, Length, 0);
3496 if(!in_irq())
3497 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
3498 else
3499 spin_lock(&adapter->SglQLock);
3500 adapter->AllSglBufferCount++;
3501 /* PhysicalAddress; */
3502 SxgSgl->PhysicalAddress = PhysicalAddress;
3503 /* Initialize backpointer once */
3504 SxgSgl->adapter = adapter;
3505 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3506 if(!in_irq())
3507 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3508 else
3509 spin_unlock(&adapter->SglQLock);
3510 SxgSgl->State = SXG_BUFFER_BUSY;
3511 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, in_irq());
3512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3513 adapter, SxgSgl, Length, 0);
3514 }
3515
3516
3517 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
3518 {
3519 /*
3520 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3521 * funct#[%d]\n", __func__, card->config_set,
3522 * adapter->port, adapter->physport, adapter->functionnumber);
3523 *
3524 * sxg_dbg_macaddrs(adapter);
3525 */
3526 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3527 * __FUNCTION__);
3528 */
3529
3530 /* sxg_dbg_macaddrs(adapter); */
3531
3532 struct net_device * dev = adapter->netdev;
3533 if(!dev)
3534 {
3535 printk("sxg: Dev is Null\n");
3536 }
3537
3538 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3539
3540 if (netif_running(dev)) {
3541 return -EBUSY;
3542 }
3543 if (!adapter) {
3544 return -EBUSY;
3545 }
3546
3547 if (!(adapter->currmacaddr[0] ||
3548 adapter->currmacaddr[1] ||
3549 adapter->currmacaddr[2] ||
3550 adapter->currmacaddr[3] ||
3551 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3552 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3553 }
3554 if (adapter->netdev) {
3555 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3556 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
3557 }
3558 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3559 sxg_dbg_macaddrs(adapter);
3560
3561 return 0;
3562 }
3563
3564 #if XXXTODO
3565 static int sxg_mac_set_address(struct net_device *dev, void *ptr)
3566 {
3567 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3568 struct sockaddr *addr = ptr;
3569
3570 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
3571
3572 if (netif_running(dev)) {
3573 return -EBUSY;
3574 }
3575 if (!adapter) {
3576 return -EBUSY;
3577 }
3578 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3579 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3580 adapter->currmacaddr[1], adapter->currmacaddr[2],
3581 adapter->currmacaddr[3], adapter->currmacaddr[4],
3582 adapter->currmacaddr[5]);
3583 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3584 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3585 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3586 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3587 adapter->currmacaddr[1], adapter->currmacaddr[2],
3588 adapter->currmacaddr[3], adapter->currmacaddr[4],
3589 adapter->currmacaddr[5]);
3590
3591 sxg_config_set(adapter, TRUE);
3592 return 0;
3593 }
3594 #endif
3595
3596 /*
3597 * SXG DRIVER FUNCTIONS (below)
3598 *
3599 * sxg_initialize_adapter - Initialize adapter
3600 *
3601 * Arguments -
3602 * adapter - A pointer to our adapter structure
3603 *
3604 * Return - int
3605 */
3606 static int sxg_initialize_adapter(struct adapter_t *adapter)
3607 {
3608 u32 RssIds, IsrCount;
3609 u32 i;
3610 int status;
3611
3612 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3613 adapter, 0, 0, 0);
3614
3615 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
3616 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3617
3618 /*
3619 * Sanity check SXG_UCODE_REGS structure definition to
3620 * make sure the length is correct
3621 */
3622 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
3623
3624 /* Disable interrupts */
3625 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3626
3627 /* Set MTU */
3628 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3629 (adapter->FrameSize == JUMBOMAXFRAME));
3630 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3631
3632 /* Set event ring base address and size */
3633 WRITE_REG64(adapter,
3634 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3635 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3636
3637 /* Per-ISR initialization */
3638 for (i = 0; i < IsrCount; i++) {
3639 u64 Addr;
3640 /* Set interrupt status pointer */
3641 Addr = adapter->PIsr + (i * sizeof(u32));
3642 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3643 }
3644
3645 /* XMT ring zero index */
3646 WRITE_REG64(adapter,
3647 adapter->UcodeRegs[0].SPSendIndex,
3648 adapter->PXmtRingZeroIndex, 0);
3649
3650 /* Per-RSS initialization */
3651 for (i = 0; i < RssIds; i++) {
3652 /* Release all event ring entries to the Microcode */
3653 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3654 TRUE);
3655 }
3656
3657 /* Transmit ring base and size */
3658 WRITE_REG64(adapter,
3659 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3660 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3661
3662 /* Receive ring base and size */
3663 WRITE_REG64(adapter,
3664 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3665 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3666
3667 /* Populate the card with receive buffers */
3668 sxg_stock_rcv_buffers(adapter);
3669
3670 /*
3671 * Initialize checksum offload capabilities. At the moment we always
3672 * enable IP and TCP receive checksums on the card. Depending on the
3673 * checksum configuration specified by the user, we can choose to
3674 * report or ignore the checksum information provided by the card.
3675 */
3676 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3677 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3678
3679 /* Initialize the MAC, XAUI */
3680 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
3681 status = sxg_initialize_link(adapter);
3682 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
3683 status);
3684 if (status != STATUS_SUCCESS) {
3685 return (status);
3686 }
3687 /*
3688 * Initialize Dead to FALSE.
3689 * SlicCheckForHang or SlicDumpThread will take it from here.
3690 */
3691 adapter->Dead = FALSE;
3692 adapter->PingOutstanding = FALSE;
3693 adapter->State = SXG_STATE_RUNNING;
3694
3695 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
3696 adapter, 0, 0, 0);
3697 return (STATUS_SUCCESS);
3698 }
3699
3700 /*
3701 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
3702 * the card. The caller should hold the RcvQLock
3703 *
3704 * Arguments -
3705 * adapter - A pointer to our adapter structure
3706 * RcvDescriptorBlockHdr - Descriptor block to fill
3707 *
3708 * Return
3709 * status
3710 */
3711 static int sxg_fill_descriptor_block(struct adapter_t *adapter,
3712 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
3713 {
3714 u32 i;
3715 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3716 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3717 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3718 struct sxg_cmd *RingDescriptorCmd;
3719 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
3720
3721 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
3722 adapter, adapter->RcvBuffersOnCard,
3723 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3724
3725 ASSERT(RcvDescriptorBlockHdr);
3726
3727 /*
3728 * If we don't have the resources to fill the descriptor block,
3729 * return failure
3730 */
3731 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3732 SXG_RING_FULL(RcvRingInfo)) {
3733 adapter->Stats.NoMem++;
3734 return (STATUS_FAILURE);
3735 }
3736 /* Get a ring descriptor command */
3737 SXG_GET_CMD(RingZero,
3738 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3739 ASSERT(RingDescriptorCmd);
3740 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
3741 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
3742 RcvDescriptorBlockHdr->VirtualAddress;
3743
3744 /* Fill in the descriptor block */
3745 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3746 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3747 ASSERT(RcvDataBufferHdr);
3748 // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
3749 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
3750 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
3751 adapter->ReceiveBufferSize);
3752 if(RcvDataBufferHdr->skb)
3753 RcvDataBufferHdr->SxgDumbRcvPacket =
3754 RcvDataBufferHdr->skb;
3755 else
3756 goto no_memory;
3757 }
3758 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
3759 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
3760 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
3761 (void *)RcvDataBufferHdr;
3762
3763 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3764 RcvDataBufferHdr->PhysicalAddress;
3765 }
3766 /* Add the descriptor block to receive descriptor ring 0 */
3767 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3768
3769 /*
3770 * RcvBuffersOnCard is not protected via the receive lock (see
3771 * sxg_process_event_queue) We don't want to grap a lock every time a
3772 * buffer is returned to us, so we use atomic interlocked functions
3773 * instead.
3774 */
3775 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3776
3777 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
3778 RcvDescriptorBlockHdr,
3779 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
3780
3781 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
3782 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
3783 adapter, adapter->RcvBuffersOnCard,
3784 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3785 return (STATUS_SUCCESS);
3786 no_memory:
3787 return (-ENOMEM);
3788 }
3789
3790 /*
3791 * sxg_stock_rcv_buffers - Stock the card with receive buffers
3792 *
3793 * Arguments -
3794 * adapter - A pointer to our adapter structure
3795 *
3796 * Return
3797 * None
3798 */
3799 static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
3800 {
3801 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3802
3803 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3804 adapter, adapter->RcvBuffersOnCard,
3805 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3806 /*
3807 * First, see if we've got less than our minimum threshold of
3808 * receive buffers, there isn't an allocation in progress, and
3809 * we haven't exceeded our maximum.. get another block of buffers
3810 * None of this needs to be SMP safe. It's round numbers.
3811 */
3812 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3813 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3814 (atomic_read(&adapter->pending_allocations) == 0)) {
3815 sxg_allocate_buffer_memory(adapter,
3816 SXG_RCV_BLOCK_SIZE
3817 (SXG_RCV_DATA_HDR_SIZE),
3818 SXG_BUFFER_TYPE_RCV);
3819 }
3820 /* Now grab the RcvQLock lock and proceed */
3821 spin_lock(&adapter->RcvQLock);
3822 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3823 struct list_entry *_ple;
3824
3825 /* Get a descriptor block */
3826 RcvDescriptorBlockHdr = NULL;
3827 if (adapter->FreeRcvBlockCount) {
3828 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
3829 RcvDescriptorBlockHdr =
3830 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
3831 FreeList);
3832 adapter->FreeRcvBlockCount--;
3833 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
3834 }
3835
3836 if (RcvDescriptorBlockHdr == NULL) {
3837 /* Bail out.. */
3838 adapter->Stats.NoMem++;
3839 break;
3840 }
3841 /* Fill in the descriptor block and give it to the card */
3842 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3843 STATUS_FAILURE) {
3844 /* Free the descriptor block */
3845 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3846 RcvDescriptorBlockHdr);
3847 break;
3848 }
3849 }
3850 spin_unlock(&adapter->RcvQLock);
3851 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
3852 adapter, adapter->RcvBuffersOnCard,
3853 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3854 }
3855
3856 /*
3857 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
3858 * completed by the microcode
3859 *
3860 * Arguments -
3861 * adapter - A pointer to our adapter structure
3862 * Index - Where the microcode is up to
3863 *
3864 * Return
3865 * None
3866 */
3867 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
3868 unsigned char Index)
3869 {
3870 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
3871 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3872 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3873 struct sxg_cmd *RingDescriptorCmd;
3874
3875 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
3876 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3877
3878 /* Now grab the RcvQLock lock and proceed */
3879 spin_lock(&adapter->RcvQLock);
3880 ASSERT(Index != RcvRingInfo->Tail);
3881 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
3882 RcvRingInfo->Tail) > 3) {
3883 /*
3884 * Locate the current Cmd (ring descriptor entry), and
3885 * associated receive descriptor block, and advance
3886 * the tail
3887 */
3888 SXG_RETURN_CMD(RingZero,
3889 RcvRingInfo,
3890 RingDescriptorCmd, RcvDescriptorBlockHdr);
3891 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
3892 RcvRingInfo->Head, RcvRingInfo->Tail,
3893 RingDescriptorCmd, RcvDescriptorBlockHdr);
3894
3895 /* Clear the SGL field */
3896 RingDescriptorCmd->Sgl = 0;
3897 /*
3898 * Attempt to refill it and hand it right back to the
3899 * card. If we fail to refill it, free the descriptor block
3900 * header. The card will be restocked later via the
3901 * RcvBuffersOnCard test
3902 */
3903 if (sxg_fill_descriptor_block(adapter,
3904 RcvDescriptorBlockHdr) == STATUS_FAILURE)
3905 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3906 RcvDescriptorBlockHdr);
3907 }
3908 spin_unlock(&adapter->RcvQLock);
3909 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
3910 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3911 }
3912
3913 /*
3914 * Read the statistics which the card has been maintaining.
3915 */
3916 void sxg_collect_statistics(struct adapter_t *adapter)
3917 {
3918 if(adapter->ucode_stats)
3919 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
3920 adapter->pucode_stats, 0);
3921 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
3922 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
3923 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
3924 }
3925
3926 static struct net_device_stats *sxg_get_stats(struct net_device * dev)
3927 {
3928 struct adapter_t *adapter = netdev_priv(dev);
3929
3930 sxg_collect_statistics(adapter);
3931 return (&adapter->stats);
3932 }
3933
3934 static struct pci_driver sxg_driver = {
3935 .name = sxg_driver_name,
3936 .id_table = sxg_pci_tbl,
3937 .probe = sxg_entry_probe,
3938 .remove = sxg_entry_remove,
3939 #if SXG_POWER_MANAGEMENT_ENABLED
3940 .suspend = sxgpm_suspend,
3941 .resume = sxgpm_resume,
3942 #endif
3943 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
3944 };
3945
3946 static int __init sxg_module_init(void)
3947 {
3948 sxg_init_driver();
3949
3950 if (debug >= 0)
3951 sxg_debug = debug;
3952
3953 return pci_register_driver(&sxg_driver);
3954 }
3955
3956 static void __exit sxg_module_cleanup(void)
3957 {
3958 pci_unregister_driver(&sxg_driver);
3959 }
3960
3961 module_init(sxg_module_init);
3962 module_exit(sxg_module_cleanup);