]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/netxen/netxen_nic_main.c
Merge master.kernel.org:/home/rmk/linux-2.6-arm
[mirror_ubuntu-artful-kernel.git] / drivers / net / netxen / netxen_nic_main.c
1 /*
2 * Copyright (C) 2003 - 2006 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen,
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
28 *
29 *
30 * Main source file for NetXen NIC Driver on Linux
31 *
32 */
33
34 #include <linux/vmalloc.h>
35 #include <linux/highmem.h>
36 #include "netxen_nic_hw.h"
37
38 #include "netxen_nic.h"
39 #include "netxen_nic_phan_reg.h"
40
41 #include <linux/dma-mapping.h>
42 #include <linux/vmalloc.h>
43 #include <net/ip.h>
44
45 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
48
49 char netxen_nic_driver_name[] = "netxen-nic";
50 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
51 NETXEN_NIC_LINUX_VERSIONID;
52
53 #define NETXEN_NETDEV_WEIGHT 120
54 #define NETXEN_ADAPTER_UP_MAGIC 777
55 #define NETXEN_NIC_PEG_TUNE 0
56
57 u8 nx_p2_id = NX_P2_C0;
58
59 #define DMA_32BIT_MASK 0x00000000ffffffffULL
60 #define DMA_35BIT_MASK 0x00000007ffffffffULL
61
62 /* Local functions to NetXen NIC driver */
63 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65 static void __devexit netxen_nic_remove(struct pci_dev *pdev);
66 static int netxen_nic_open(struct net_device *netdev);
67 static int netxen_nic_close(struct net_device *netdev);
68 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
69 static void netxen_tx_timeout(struct net_device *netdev);
70 static void netxen_tx_timeout_task(struct work_struct *work);
71 static void netxen_watchdog(unsigned long);
72 static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
73 static int netxen_nic_poll(struct net_device *dev, int *budget);
74 #ifdef CONFIG_NET_POLL_CONTROLLER
75 static void netxen_nic_poll_controller(struct net_device *netdev);
76 #endif
77 static irqreturn_t netxen_intr(int irq, void *data);
78
79 int physical_port[] = {0, 1, 2, 3};
80
81 /* PCI Device ID Table */
82 static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
83 {PCI_DEVICE(0x4040, 0x0001)},
84 {PCI_DEVICE(0x4040, 0x0002)},
85 {PCI_DEVICE(0x4040, 0x0003)},
86 {PCI_DEVICE(0x4040, 0x0004)},
87 {PCI_DEVICE(0x4040, 0x0005)},
88 {PCI_DEVICE(0x4040, 0x0024)},
89 {PCI_DEVICE(0x4040, 0x0025)},
90 {0,}
91 };
92
93 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
94
95 struct workqueue_struct *netxen_workq;
96 static void netxen_watchdog(unsigned long);
97
98 static inline void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
99 uint32_t crb_producer)
100 {
101 switch (adapter->portnum) {
102 case 0:
103 writel(crb_producer, NETXEN_CRB_NORMALIZE
104 (adapter, CRB_CMD_PRODUCER_OFFSET));
105 return;
106 case 1:
107 writel(crb_producer, NETXEN_CRB_NORMALIZE
108 (adapter, CRB_CMD_PRODUCER_OFFSET_1));
109 return;
110 case 2:
111 writel(crb_producer, NETXEN_CRB_NORMALIZE
112 (adapter, CRB_CMD_PRODUCER_OFFSET_2));
113 return;
114 case 3:
115 writel(crb_producer, NETXEN_CRB_NORMALIZE
116 (adapter, CRB_CMD_PRODUCER_OFFSET_3));
117 return;
118 default:
119 printk(KERN_WARNING "We tried to update "
120 "CRB_CMD_PRODUCER_OFFSET for invalid "
121 "PCI function id %d\n",
122 adapter->portnum);
123 return;
124 }
125 }
126
127 static inline void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
128 u32 crb_consumer)
129 {
130 switch (adapter->portnum) {
131 case 0:
132 writel(crb_consumer, NETXEN_CRB_NORMALIZE
133 (adapter, CRB_CMD_CONSUMER_OFFSET));
134 return;
135 case 1:
136 writel(crb_consumer, NETXEN_CRB_NORMALIZE
137 (adapter, CRB_CMD_CONSUMER_OFFSET_1));
138 return;
139 case 2:
140 writel(crb_consumer, NETXEN_CRB_NORMALIZE
141 (adapter, CRB_CMD_CONSUMER_OFFSET_2));
142 return;
143 case 3:
144 writel(crb_consumer, NETXEN_CRB_NORMALIZE
145 (adapter, CRB_CMD_CONSUMER_OFFSET_3));
146 return;
147 default:
148 printk(KERN_WARNING "We tried to update "
149 "CRB_CMD_PRODUCER_OFFSET for invalid "
150 "PCI function id %d\n",
151 adapter->portnum);
152 return;
153 }
154 }
155
156 #define ADAPTER_LIST_SIZE 12
157 int netxen_cards_found;
158
159 /*
160 * netxen_nic_probe()
161 *
162 * The Linux system will invoke this after identifying the vendor ID and
163 * device Id in the pci_tbl supported by this module.
164 *
165 * A quad port card has one operational PCI config space, (function 0),
166 * which is used to access all four ports.
167 *
168 * This routine will initialize the adapter, and setup the global parameters
169 * along with the port's specific structure.
170 */
171 static int __devinit
172 netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
173 {
174 struct net_device *netdev = NULL;
175 struct netxen_adapter *adapter = NULL;
176 void __iomem *mem_ptr0 = NULL;
177 void __iomem *mem_ptr1 = NULL;
178 void __iomem *mem_ptr2 = NULL;
179 unsigned long first_page_group_end;
180 unsigned long first_page_group_start;
181
182
183 u8 __iomem *db_ptr = NULL;
184 unsigned long mem_base, mem_len, db_base, db_len;
185 int pci_using_dac, i = 0, err;
186 int ring;
187 struct netxen_recv_context *recv_ctx = NULL;
188 struct netxen_rcv_desc_ctx *rcv_desc = NULL;
189 struct netxen_cmd_buffer *cmd_buf_arr = NULL;
190 u64 mac_addr[FLASH_NUM_PORTS + 1];
191 int valid_mac = 0;
192 u32 val;
193 int pci_func_id = PCI_FUNC(pdev->devfn);
194
195 printk(KERN_INFO "%s \n", netxen_nic_driver_string);
196
197 if (pdev->class != 0x020000) {
198 printk(KERN_ERR"NetXen function %d, class %x will not"
199 "be enabled.\n",pci_func_id, pdev->class);
200 return -ENODEV;
201 }
202 if ((err = pci_enable_device(pdev)))
203 return err;
204 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
205 err = -ENODEV;
206 goto err_out_disable_pdev;
207 }
208
209 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
210 goto err_out_disable_pdev;
211
212 pci_set_master(pdev);
213 pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id);
214 if (nx_p2_id == NX_P2_C1 &&
215 (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
216 (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
217 pci_using_dac = 1;
218 } else {
219 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
220 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
221 goto err_out_free_res;
222
223 pci_using_dac = 0;
224 }
225
226
227 netdev = alloc_etherdev(sizeof(struct netxen_adapter));
228 if(!netdev) {
229 printk(KERN_ERR"%s: Failed to allocate memory for the "
230 "device block.Check system memory resource"
231 " usage.\n", netxen_nic_driver_name);
232 goto err_out_free_res;
233 }
234
235 SET_MODULE_OWNER(netdev);
236 SET_NETDEV_DEV(netdev, &pdev->dev);
237
238 adapter = netdev->priv;
239 memset(adapter, 0 , sizeof(struct netxen_adapter));
240
241 adapter->ahw.pdev = pdev;
242 adapter->ahw.pci_func = pci_func_id;
243 spin_lock_init(&adapter->tx_lock);
244 spin_lock_init(&adapter->lock);
245
246 /* remap phys address */
247 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
248 mem_len = pci_resource_len(pdev, 0);
249
250 /* 128 Meg of memory */
251 if (mem_len == NETXEN_PCI_128MB_SIZE) {
252 mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
253 mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
254 SECOND_PAGE_GROUP_SIZE);
255 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
256 THIRD_PAGE_GROUP_SIZE);
257 first_page_group_start = FIRST_PAGE_GROUP_START;
258 first_page_group_end = FIRST_PAGE_GROUP_END;
259 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
260 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
261 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
262 SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
263 first_page_group_start = 0;
264 first_page_group_end = 0;
265 } else {
266 err = -EIO;
267 goto err_out_free_netdev;
268 }
269
270 if (((mem_ptr0 == 0UL) && (mem_len == NETXEN_PCI_128MB_SIZE)) ||
271 (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) {
272 DPRINTK(ERR,
273 "Cannot remap adapter memory aborting.:"
274 "0 -> %p, 1 -> %p, 2 -> %p\n",
275 mem_ptr0, mem_ptr1, mem_ptr2);
276
277 err = -EIO;
278 goto err_out_iounmap;
279 }
280 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
281 db_len = pci_resource_len(pdev, 4);
282
283 if (db_len == 0) {
284 printk(KERN_ERR "%s: doorbell is disabled\n",
285 netxen_nic_driver_name);
286 err = -EIO;
287 goto err_out_iounmap;
288 }
289 DPRINTK(INFO, "doorbell ioremap from %lx a size of %lx\n", db_base,
290 db_len);
291
292 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
293 if (!db_ptr) {
294 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
295 netxen_nic_driver_name);
296 err = -EIO;
297 goto err_out_iounmap;
298 }
299 DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
300
301 adapter->ahw.pci_base0 = mem_ptr0;
302 adapter->ahw.first_page_group_start = first_page_group_start;
303 adapter->ahw.first_page_group_end = first_page_group_end;
304 adapter->ahw.pci_base1 = mem_ptr1;
305 adapter->ahw.pci_base2 = mem_ptr2;
306 adapter->ahw.db_base = db_ptr;
307 adapter->ahw.db_len = db_len;
308
309 adapter->netdev = netdev;
310 adapter->pdev = pdev;
311 adapter->portnum = pci_func_id;
312
313 netdev->open = netxen_nic_open;
314 netdev->stop = netxen_nic_close;
315 netdev->hard_start_xmit = netxen_nic_xmit_frame;
316 netdev->get_stats = netxen_nic_get_stats;
317 netdev->set_multicast_list = netxen_nic_set_multi;
318 netdev->set_mac_address = netxen_nic_set_mac;
319 netdev->change_mtu = netxen_nic_change_mtu;
320 netdev->tx_timeout = netxen_tx_timeout;
321 netdev->watchdog_timeo = HZ;
322
323 netxen_nic_change_mtu(netdev, netdev->mtu);
324
325 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
326 netdev->poll = netxen_nic_poll;
327 netdev->weight = NETXEN_NETDEV_WEIGHT;
328 #ifdef CONFIG_NET_POLL_CONTROLLER
329 netdev->poll_controller = netxen_nic_poll_controller;
330 #endif
331 /* ScatterGather support */
332 netdev->features = NETIF_F_SG;
333 netdev->features |= NETIF_F_IP_CSUM;
334 netdev->features |= NETIF_F_TSO;
335
336 if (pci_using_dac)
337 netdev->features |= NETIF_F_HIGHDMA;
338
339 if (pci_enable_msi(pdev)) {
340 adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
341 printk(KERN_WARNING "%s: unable to allocate MSI interrupt"
342 " error\n", netxen_nic_driver_name);
343 } else
344 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
345
346 netdev->irq = pdev->irq;
347 INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
348
349 /*
350 * Set the CRB window to invalid. If any register in window 0 is
351 * accessed it should set the window to 0 and then reset it to 1.
352 */
353 adapter->curr_window = 255;
354
355 /* initialize the adapter */
356 netxen_initialize_adapter_hw(adapter);
357
358 #ifdef CONFIG_PPC
359 if ((adapter->ahw.boardcfg.board_type ==
360 NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) &&
361 (pci_func_id == 2))
362 goto err_out_free_adapter;
363 #endif /* CONFIG_PPC */
364
365 /*
366 * Adapter in our case is quad port so initialize it before
367 * initializing the ports
368 */
369
370 netxen_initialize_adapter_ops(adapter);
371
372 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST;
373 if ((adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB35_4G) ||
374 (adapter->ahw.boardcfg.board_type ==
375 NETXEN_BRDTYPE_P2_SB31_2G))
376 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
377 else
378 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
379 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
380 adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
381
382 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
383 if (cmd_buf_arr == NULL) {
384 printk(KERN_ERR
385 "%s: Could not allocate cmd_buf_arr memory:%d\n",
386 netxen_nic_driver_name, (int)TX_RINGSIZE);
387 err = -ENOMEM;
388 goto err_out_free_adapter;
389 }
390 memset(cmd_buf_arr, 0, TX_RINGSIZE);
391 adapter->cmd_buf_arr = cmd_buf_arr;
392
393 for (i = 0; i < MAX_RCV_CTX; ++i) {
394 recv_ctx = &adapter->recv_ctx[i];
395 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
396 rcv_desc = &recv_ctx->rcv_desc[ring];
397 switch (RCV_DESC_TYPE(ring)) {
398 case RCV_DESC_NORMAL:
399 rcv_desc->max_rx_desc_count =
400 adapter->max_rx_desc_count;
401 rcv_desc->flags = RCV_DESC_NORMAL;
402 rcv_desc->dma_size = RX_DMA_MAP_LEN;
403 rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH;
404 break;
405
406 case RCV_DESC_JUMBO:
407 rcv_desc->max_rx_desc_count =
408 adapter->max_jumbo_rx_desc_count;
409 rcv_desc->flags = RCV_DESC_JUMBO;
410 rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN;
411 rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
412 break;
413
414 case RCV_RING_LRO:
415 rcv_desc->max_rx_desc_count =
416 adapter->max_lro_rx_desc_count;
417 rcv_desc->flags = RCV_DESC_LRO;
418 rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN;
419 rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
420 break;
421
422 }
423 rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
424 vmalloc(RCV_BUFFSIZE);
425
426 if (rcv_desc->rx_buf_arr == NULL) {
427 printk(KERN_ERR "%s: Could not allocate"
428 "rcv_desc->rx_buf_arr memory:%d\n",
429 netxen_nic_driver_name,
430 (int)RCV_BUFFSIZE);
431 err = -ENOMEM;
432 goto err_out_free_rx_buffer;
433 }
434 memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE);
435 }
436
437 }
438
439 netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */
440
441 /* Mezz cards have PCI function 0,2,3 enabled */
442 if ((adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ)
443 && (pci_func_id >= 2))
444 adapter->portnum = pci_func_id - 2;
445
446 #ifdef CONFIG_IA64
447 if(adapter->portnum == 0) {
448 netxen_pinit_from_rom(adapter, 0);
449 udelay(500);
450 netxen_load_firmware(adapter);
451 }
452 #endif
453
454 init_timer(&adapter->watchdog_timer);
455 adapter->ahw.xg_linkup = 0;
456 adapter->watchdog_timer.function = &netxen_watchdog;
457 adapter->watchdog_timer.data = (unsigned long)adapter;
458 INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
459 adapter->ahw.pdev = pdev;
460 adapter->proc_cmd_buf_counter = 0;
461 adapter->ahw.revision_id = nx_p2_id;
462
463 /* make sure Window == 1 */
464 netxen_nic_pci_change_crbwindow(adapter, 1);
465
466 netxen_nic_update_cmd_producer(adapter, 0);
467 netxen_nic_update_cmd_consumer(adapter, 0);
468 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
469
470 if (netxen_is_flash_supported(adapter) == 0 &&
471 netxen_get_flash_mac_addr(adapter, mac_addr) == 0)
472 valid_mac = 1;
473 else
474 valid_mac = 0;
475
476 if (valid_mac) {
477 unsigned char *p = (unsigned char *)&mac_addr[adapter->portnum];
478 netdev->dev_addr[0] = *(p + 5);
479 netdev->dev_addr[1] = *(p + 4);
480 netdev->dev_addr[2] = *(p + 3);
481 netdev->dev_addr[3] = *(p + 2);
482 netdev->dev_addr[4] = *(p + 1);
483 netdev->dev_addr[5] = *(p + 0);
484
485 memcpy(netdev->perm_addr, netdev->dev_addr,
486 netdev->addr_len);
487 if (!is_valid_ether_addr(netdev->perm_addr)) {
488 printk(KERN_ERR "%s: Bad MAC address "
489 "%02x:%02x:%02x:%02x:%02x:%02x.\n",
490 netxen_nic_driver_name,
491 netdev->dev_addr[0],
492 netdev->dev_addr[1],
493 netdev->dev_addr[2],
494 netdev->dev_addr[3],
495 netdev->dev_addr[4],
496 netdev->dev_addr[5]);
497 } else {
498 if (adapter->macaddr_set)
499 adapter->macaddr_set(adapter,
500 netdev->dev_addr);
501 }
502 }
503
504 if (adapter->portnum == 0) {
505 err = netxen_initialize_adapter_offload(adapter);
506 if (err)
507 goto err_out_free_rx_buffer;
508 val = readl(NETXEN_CRB_NORMALIZE(adapter,
509 NETXEN_CAM_RAM(0x1fc)));
510 if (val == 0x55555555) {
511 /* This is the first boot after power up */
512 val = readl(NETXEN_CRB_NORMALIZE(adapter,
513 NETXEN_ROMUSB_GLB_SW_RESET));
514 printk(KERN_INFO"NetXen: read 0x%08x for reset reg.\n",val);
515 if (val != 0x80000f) {
516 /* clear the register for future unloads/loads */
517 writel(0, NETXEN_CRB_NORMALIZE(adapter,
518 NETXEN_CAM_RAM(0x1fc)));
519 printk(KERN_ERR "ERROR in NetXen HW init sequence.\n");
520 err = -ENODEV;
521 goto err_out_free_dev;
522 }
523
524 /* clear the register for future unloads/loads */
525 writel(0, NETXEN_CRB_NORMALIZE(adapter,
526 NETXEN_CAM_RAM(0x1fc)));
527 }
528 printk(KERN_INFO "State: 0x%0x\n",
529 readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)));
530
531 /*
532 * Tell the hardware our version number.
533 */
534 i = (_NETXEN_NIC_LINUX_MAJOR << 16)
535 | ((_NETXEN_NIC_LINUX_MINOR << 8))
536 | (_NETXEN_NIC_LINUX_SUBVERSION);
537 writel(i, NETXEN_CRB_NORMALIZE(adapter, CRB_DRIVER_VERSION));
538
539 /* Unlock the HW, prompting the boot sequence */
540 writel(1,
541 NETXEN_CRB_NORMALIZE(adapter,
542 NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
543 /* Handshake with the card before we register the devices. */
544 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
545
546 /* leave the hw in the same state as reboot */
547 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
548 netxen_pinit_from_rom(adapter, 0);
549 udelay(500);
550 netxen_load_firmware(adapter);
551 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
552 }
553
554 /*
555 * See if the firmware gave us a virtual-physical port mapping.
556 */
557 i = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_V2P(adapter->portnum)));
558 if (i != 0x55555555)
559 physical_port[adapter->portnum] = i;
560
561 netif_carrier_off(netdev);
562 netif_stop_queue(netdev);
563
564 if ((err = register_netdev(netdev))) {
565 printk(KERN_ERR "%s: register_netdev failed port #%d"
566 " aborting\n", netxen_nic_driver_name,
567 adapter->portnum);
568 err = -EIO;
569 goto err_out_free_dev;
570 }
571
572 pci_set_drvdata(pdev, adapter);
573
574 switch (adapter->ahw.board_type) {
575 case NETXEN_NIC_GBE:
576 printk(KERN_INFO "%s: QUAD GbE board initialized\n",
577 netxen_nic_driver_name);
578 break;
579
580 case NETXEN_NIC_XGBE:
581 printk(KERN_INFO "%s: XGbE board initialized\n",
582 netxen_nic_driver_name);
583 break;
584 }
585
586 adapter->driver_mismatch = 0;
587
588 return 0;
589
590 err_out_free_dev:
591 if (adapter->portnum == 0)
592 netxen_free_adapter_offload(adapter);
593
594 err_out_free_rx_buffer:
595 for (i = 0; i < MAX_RCV_CTX; ++i) {
596 recv_ctx = &adapter->recv_ctx[i];
597 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
598 rcv_desc = &recv_ctx->rcv_desc[ring];
599 if (rcv_desc->rx_buf_arr != NULL) {
600 vfree(rcv_desc->rx_buf_arr);
601 rcv_desc->rx_buf_arr = NULL;
602 }
603 }
604 }
605 vfree(cmd_buf_arr);
606
607 err_out_free_adapter:
608 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
609 pci_disable_msi(pdev);
610
611 pci_set_drvdata(pdev, NULL);
612
613 if (db_ptr)
614 iounmap(db_ptr);
615
616 err_out_iounmap:
617 if (mem_ptr0)
618 iounmap(mem_ptr0);
619 if (mem_ptr1)
620 iounmap(mem_ptr1);
621 if (mem_ptr2)
622 iounmap(mem_ptr2);
623
624 err_out_free_netdev:
625 free_netdev(netdev);
626
627 err_out_free_res:
628 pci_release_regions(pdev);
629
630 err_out_disable_pdev:
631 pci_disable_device(pdev);
632 return err;
633 }
634
635 static void __devexit netxen_nic_remove(struct pci_dev *pdev)
636 {
637 struct netxen_adapter *adapter;
638 struct net_device *netdev;
639 struct netxen_rx_buffer *buffer;
640 struct netxen_recv_context *recv_ctx;
641 struct netxen_rcv_desc_ctx *rcv_desc;
642 int i;
643 int ctxid, ring;
644
645 adapter = pci_get_drvdata(pdev);
646 if (adapter == NULL)
647 return;
648
649 netdev = adapter->netdev;
650
651 netxen_nic_disable_int(adapter);
652 if (adapter->irq)
653 free_irq(adapter->irq, adapter);
654
655 if (adapter->stop_port)
656 adapter->stop_port(adapter);
657
658 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
659 pci_disable_msi(pdev);
660
661 if (adapter->portnum == 0)
662 netxen_free_adapter_offload(adapter);
663
664 if(adapter->portnum == 0) {
665 /* leave the hw in the same state as reboot */
666 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
667 netxen_pinit_from_rom(adapter, 0);
668 udelay(500);
669 netxen_load_firmware(adapter);
670 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
671 }
672
673 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
674 netxen_free_hw_resources(adapter);
675
676 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
677 recv_ctx = &adapter->recv_ctx[ctxid];
678 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
679 rcv_desc = &recv_ctx->rcv_desc[ring];
680 for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) {
681 buffer = &(rcv_desc->rx_buf_arr[i]);
682 if (buffer->state == NETXEN_BUFFER_FREE)
683 continue;
684 pci_unmap_single(pdev, buffer->dma,
685 rcv_desc->dma_size,
686 PCI_DMA_FROMDEVICE);
687 if (buffer->skb != NULL)
688 dev_kfree_skb_any(buffer->skb);
689 }
690 vfree(rcv_desc->rx_buf_arr);
691 }
692 }
693
694 unregister_netdev(netdev);
695
696 vfree(adapter->cmd_buf_arr);
697
698 iounmap(adapter->ahw.db_base);
699 iounmap(adapter->ahw.pci_base0);
700 iounmap(adapter->ahw.pci_base1);
701 iounmap(adapter->ahw.pci_base2);
702
703 pci_release_regions(pdev);
704 pci_disable_device(pdev);
705 pci_set_drvdata(pdev, NULL);
706
707 free_netdev(netdev);
708 }
709
710 /*
711 * Called when a network interface is made active
712 * @returns 0 on success, negative value on failure
713 */
714 static int netxen_nic_open(struct net_device *netdev)
715 {
716 struct netxen_adapter *adapter = (struct netxen_adapter *)netdev->priv;
717 int err = 0;
718 int ctx, ring;
719
720 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
721 err = netxen_init_firmware(adapter);
722 if (err != 0) {
723 printk(KERN_ERR "Failed to init firmware\n");
724 return -EIO;
725 }
726 netxen_nic_flash_print(adapter);
727
728 /* setup all the resources for the Phantom... */
729 /* this include the descriptors for rcv, tx, and status */
730 netxen_nic_clear_stats(adapter);
731 err = netxen_nic_hw_resources(adapter);
732 if (err) {
733 printk(KERN_ERR "Error in setting hw resources:%d\n",
734 err);
735 return err;
736 }
737 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
738 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
739 netxen_post_rx_buffers(adapter, ctx, ring);
740 }
741 adapter->irq = adapter->ahw.pdev->irq;
742 err = request_irq(adapter->ahw.pdev->irq, netxen_intr,
743 IRQF_SHARED|IRQF_SAMPLE_RANDOM, netdev->name,
744 adapter);
745 if (err) {
746 printk(KERN_ERR "request_irq failed with: %d\n", err);
747 netxen_free_hw_resources(adapter);
748 return err;
749 }
750
751 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
752 }
753 if (!adapter->driver_mismatch)
754 mod_timer(&adapter->watchdog_timer, jiffies);
755
756 netxen_nic_enable_int(adapter);
757
758 /* Done here again so that even if phantom sw overwrote it,
759 * we set it */
760 if (adapter->macaddr_set)
761 adapter->macaddr_set(adapter, netdev->dev_addr);
762 if (adapter->init_port
763 && adapter->init_port(adapter, adapter->portnum) != 0) {
764 del_timer_sync(&adapter->watchdog_timer);
765 printk(KERN_ERR "%s: Failed to initialize port %d\n",
766 netxen_nic_driver_name, adapter->portnum);
767 return -EIO;
768 }
769
770 netxen_nic_set_link_parameters(adapter);
771
772 netxen_nic_set_multi(netdev);
773 if (adapter->set_mtu)
774 adapter->set_mtu(adapter, netdev->mtu);
775
776 if (!adapter->driver_mismatch)
777 netif_start_queue(netdev);
778
779 return 0;
780 }
781
782 /*
783 * netxen_nic_close - Disables a network interface entry point
784 */
785 static int netxen_nic_close(struct net_device *netdev)
786 {
787 struct netxen_adapter *adapter = netdev_priv(netdev);
788 int i, j;
789 struct netxen_cmd_buffer *cmd_buff;
790 struct netxen_skb_frag *buffrag;
791
792 netif_carrier_off(netdev);
793 netif_stop_queue(netdev);
794
795 cmd_buff = adapter->cmd_buf_arr;
796 for (i = 0; i < adapter->max_tx_desc_count; i++) {
797 buffrag = cmd_buff->frag_array;
798 if (buffrag->dma) {
799 pci_unmap_single(adapter->pdev, buffrag->dma,
800 buffrag->length, PCI_DMA_TODEVICE);
801 buffrag->dma = (u64) NULL;
802 }
803 for (j = 0; j < cmd_buff->frag_count; j++) {
804 buffrag++;
805 if (buffrag->dma) {
806 pci_unmap_page(adapter->pdev, buffrag->dma,
807 buffrag->length,
808 PCI_DMA_TODEVICE);
809 buffrag->dma = (u64) NULL;
810 }
811 }
812 /* Free the skb we received in netxen_nic_xmit_frame */
813 if (cmd_buff->skb) {
814 dev_kfree_skb_any(cmd_buff->skb);
815 cmd_buff->skb = NULL;
816 }
817 cmd_buff++;
818 }
819 FLUSH_SCHEDULED_WORK();
820 del_timer_sync(&adapter->watchdog_timer);
821
822 return 0;
823 }
824
825 static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
826 {
827 struct netxen_adapter *adapter = netdev_priv(netdev);
828 struct netxen_hardware_context *hw = &adapter->ahw;
829 unsigned int first_seg_len = skb->len - skb->data_len;
830 struct netxen_skb_frag *buffrag;
831 unsigned int i;
832
833 u32 producer = 0;
834 u32 saved_producer = 0;
835 struct cmd_desc_type0 *hwdesc;
836 int k;
837 struct netxen_cmd_buffer *pbuf = NULL;
838 static int dropped_packet = 0;
839 int frag_count;
840 u32 local_producer = 0;
841 u32 max_tx_desc_count = 0;
842 u32 last_cmd_consumer = 0;
843 int no_of_desc;
844
845 adapter->stats.xmitcalled++;
846 frag_count = skb_shinfo(skb)->nr_frags + 1;
847
848 if (unlikely(skb->len <= 0)) {
849 dev_kfree_skb_any(skb);
850 adapter->stats.badskblen++;
851 return NETDEV_TX_OK;
852 }
853
854 if (frag_count > MAX_BUFFERS_PER_CMD) {
855 printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)"
856 "too large, can handle only %d frags\n",
857 netxen_nic_driver_name, netdev->name,
858 frag_count, MAX_BUFFERS_PER_CMD);
859 adapter->stats.txdropped++;
860 if ((++dropped_packet & 0xff) == 0xff)
861 printk("%s: %s droppped packets = %d\n",
862 netxen_nic_driver_name, netdev->name,
863 dropped_packet);
864
865 return NETDEV_TX_OK;
866 }
867
868 /*
869 * Everything is set up. Now, we just need to transmit it out.
870 * Note that we have to copy the contents of buffer over to
871 * right place. Later on, this can be optimized out by de-coupling the
872 * producer index from the buffer index.
873 */
874 retry_getting_window:
875 spin_lock_bh(&adapter->tx_lock);
876 if (adapter->total_threads >= MAX_XMIT_PRODUCERS) {
877 spin_unlock_bh(&adapter->tx_lock);
878 /*
879 * Yield CPU
880 */
881 if (!in_atomic())
882 schedule();
883 else {
884 for (i = 0; i < 20; i++)
885 cpu_relax(); /*This a nop instr on i386 */
886 }
887 goto retry_getting_window;
888 }
889 local_producer = adapter->cmd_producer;
890 /* There 4 fragments per descriptor */
891 no_of_desc = (frag_count + 3) >> 2;
892 if (netdev->features & NETIF_F_TSO) {
893 if (skb_shinfo(skb)->gso_size > 0) {
894
895 no_of_desc++;
896 if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
897 sizeof(struct ethhdr)) >
898 (sizeof(struct cmd_desc_type0) - 2)) {
899 no_of_desc++;
900 }
901 }
902 }
903 k = adapter->cmd_producer;
904 max_tx_desc_count = adapter->max_tx_desc_count;
905 last_cmd_consumer = adapter->last_cmd_consumer;
906 if ((k + no_of_desc) >=
907 ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
908 last_cmd_consumer)) {
909 netif_stop_queue(netdev);
910 adapter->flags |= NETXEN_NETDEV_STATUS;
911 spin_unlock_bh(&adapter->tx_lock);
912 return NETDEV_TX_BUSY;
913 }
914 k = get_index_range(k, max_tx_desc_count, no_of_desc);
915 adapter->cmd_producer = k;
916 adapter->total_threads++;
917 adapter->num_threads++;
918
919 spin_unlock_bh(&adapter->tx_lock);
920 /* Copy the descriptors into the hardware */
921 producer = local_producer;
922 saved_producer = producer;
923 hwdesc = &hw->cmd_desc_head[producer];
924 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
925 /* Take skb->data itself */
926 pbuf = &adapter->cmd_buf_arr[producer];
927 if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
928 pbuf->mss = skb_shinfo(skb)->gso_size;
929 hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
930 } else {
931 pbuf->mss = 0;
932 hwdesc->mss = 0;
933 }
934 pbuf->total_length = skb->len;
935 pbuf->skb = skb;
936 pbuf->cmd = TX_ETHER_PKT;
937 pbuf->frag_count = frag_count;
938 pbuf->port = adapter->portnum;
939 buffrag = &pbuf->frag_array[0];
940 buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len,
941 PCI_DMA_TODEVICE);
942 buffrag->length = first_seg_len;
943 netxen_set_cmd_desc_totallength(hwdesc, skb->len);
944 netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
945 netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
946
947 netxen_set_cmd_desc_port(hwdesc, adapter->portnum);
948 netxen_set_cmd_desc_ctxid(hwdesc, adapter->portnum);
949 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
950 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
951
952 for (i = 1, k = 1; i < frag_count; i++, k++) {
953 struct skb_frag_struct *frag;
954 int len, temp_len;
955 unsigned long offset;
956 dma_addr_t temp_dma;
957
958 /* move to next desc. if there is a need */
959 if ((i & 0x3) == 0) {
960 k = 0;
961 producer = get_next_index(producer,
962 adapter->max_tx_desc_count);
963 hwdesc = &hw->cmd_desc_head[producer];
964 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
965 }
966 frag = &skb_shinfo(skb)->frags[i - 1];
967 len = frag->size;
968 offset = frag->page_offset;
969
970 temp_len = len;
971 temp_dma = pci_map_page(adapter->pdev, frag->page, offset,
972 len, PCI_DMA_TODEVICE);
973
974 buffrag++;
975 buffrag->dma = temp_dma;
976 buffrag->length = temp_len;
977
978 DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k);
979 switch (k) {
980 case 0:
981 hwdesc->buffer1_length = cpu_to_le16(temp_len);
982 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
983 break;
984 case 1:
985 hwdesc->buffer2_length = cpu_to_le16(temp_len);
986 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
987 break;
988 case 2:
989 hwdesc->buffer3_length = cpu_to_le16(temp_len);
990 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
991 break;
992 case 3:
993 hwdesc->buffer4_length = cpu_to_le16(temp_len);
994 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
995 break;
996 }
997 frag++;
998 }
999 producer = get_next_index(producer, adapter->max_tx_desc_count);
1000
1001 /* might change opcode to TX_TCP_LSO */
1002 netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
1003
1004 /* For LSO, we need to copy the MAC/IP/TCP headers into
1005 * the descriptor ring
1006 */
1007 if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
1008 == TX_TCP_LSO) {
1009 int hdr_len, first_hdr_len, more_hdr;
1010 hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
1011 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1012 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1013 more_hdr = 1;
1014 } else {
1015 first_hdr_len = hdr_len;
1016 more_hdr = 0;
1017 }
1018 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1019 hwdesc = &hw->cmd_desc_head[producer];
1020
1021 /* copy the first 64 bytes */
1022 memcpy(((void *)hwdesc) + 2,
1023 (void *)(skb->data), first_hdr_len);
1024 producer = get_next_index(producer, max_tx_desc_count);
1025
1026 if (more_hdr) {
1027 hwdesc = &hw->cmd_desc_head[producer];
1028 /* copy the next 64 bytes - should be enough except
1029 * for pathological case
1030 */
1031 skb_copy_from_linear_data_offset(skb, first_hdr_len,
1032 hwdesc,
1033 (hdr_len -
1034 first_hdr_len));
1035 producer = get_next_index(producer, max_tx_desc_count);
1036 }
1037 }
1038
1039 i = netxen_get_cmd_desc_totallength(&hw->cmd_desc_head[saved_producer]);
1040
1041 hw->cmd_desc_head[saved_producer].flags_opcode =
1042 cpu_to_le16(hw->cmd_desc_head[saved_producer].flags_opcode);
1043 hw->cmd_desc_head[saved_producer].num_of_buffers_total_length =
1044 cpu_to_le32(hw->cmd_desc_head[saved_producer].
1045 num_of_buffers_total_length);
1046
1047 spin_lock_bh(&adapter->tx_lock);
1048 adapter->stats.txbytes += i;
1049
1050 /* Code to update the adapter considering how many producer threads
1051 are currently working */
1052 if ((--adapter->num_threads) == 0) {
1053 /* This is the last thread */
1054 u32 crb_producer = adapter->cmd_producer;
1055 netxen_nic_update_cmd_producer(adapter, crb_producer);
1056 wmb();
1057 adapter->total_threads = 0;
1058 }
1059
1060 adapter->stats.xmitfinished++;
1061 spin_unlock_bh(&adapter->tx_lock);
1062
1063 netdev->trans_start = jiffies;
1064
1065 DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer);
1066
1067 DPRINTK(INFO, "Done. Send\n");
1068 return NETDEV_TX_OK;
1069 }
1070
1071 static void netxen_watchdog(unsigned long v)
1072 {
1073 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1074
1075 SCHEDULE_WORK(&adapter->watchdog_task);
1076 }
1077
1078 static void netxen_tx_timeout(struct net_device *netdev)
1079 {
1080 struct netxen_adapter *adapter = (struct netxen_adapter *)
1081 netdev_priv(netdev);
1082 SCHEDULE_WORK(&adapter->tx_timeout_task);
1083 }
1084
1085 static void netxen_tx_timeout_task(struct work_struct *work)
1086 {
1087 struct netxen_adapter *adapter =
1088 container_of(work, struct netxen_adapter, tx_timeout_task);
1089 unsigned long flags;
1090
1091 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
1092 netxen_nic_driver_name, adapter->netdev->name);
1093
1094 spin_lock_irqsave(&adapter->lock, flags);
1095 netxen_nic_close(adapter->netdev);
1096 netxen_nic_open(adapter->netdev);
1097 spin_unlock_irqrestore(&adapter->lock, flags);
1098 adapter->netdev->trans_start = jiffies;
1099 netif_wake_queue(adapter->netdev);
1100 }
1101
1102 static int
1103 netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
1104 {
1105 u32 ret = 0;
1106
1107 DPRINTK(INFO, "Entered handle ISR\n");
1108 adapter->stats.ints++;
1109
1110 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
1111 int count = 0;
1112 u32 mask;
1113 u32 our_int = 0;
1114 our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR));
1115 /* not our interrupt */
1116 if ((our_int & (0x80 << adapter->portnum)) == 0)
1117 return ret;
1118 netxen_nic_disable_int(adapter);
1119 /* Window = 0 or 1 */
1120 do {
1121 writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter,
1122 ISR_INT_TARGET_STATUS));
1123 mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
1124 } while (((mask & 0x80) != 0) && (++count < 32));
1125 if ((mask & 0x80) != 0)
1126 printk("Could not disable interrupt completely\n");
1127
1128 }
1129
1130 if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) {
1131 if (netif_rx_schedule_prep(netdev)) {
1132 /*
1133 * Interrupts are already disabled.
1134 */
1135 __netif_rx_schedule(netdev);
1136 } else {
1137 static unsigned int intcount = 0;
1138 if ((++intcount & 0xfff) == 0xfff)
1139 printk(KERN_ERR
1140 "%s: %s interrupt %d while in poll\n",
1141 netxen_nic_driver_name, netdev->name,
1142 intcount);
1143 }
1144 ret = 1;
1145 }
1146
1147 if (ret == 0) {
1148 netxen_nic_enable_int(adapter);
1149 }
1150
1151 return ret;
1152 }
1153
1154 /*
1155 * netxen_intr - Interrupt Handler
1156 * @irq: interrupt number
1157 * data points to adapter stucture (which may be handling more than 1 port
1158 */
1159 irqreturn_t netxen_intr(int irq, void *data)
1160 {
1161 struct netxen_adapter *adapter;
1162 struct net_device *netdev;
1163
1164 if (unlikely(!irq)) {
1165 return IRQ_NONE; /* Not our interrupt */
1166 }
1167
1168 adapter = (struct netxen_adapter *)data;
1169 netdev = adapter->netdev;
1170 /* process our status queue (for all 4 ports) */
1171 if (netif_running(netdev))
1172 netxen_handle_int(adapter, netdev);
1173
1174 return IRQ_HANDLED;
1175 }
1176
1177 static int netxen_nic_poll(struct net_device *netdev, int *budget)
1178 {
1179 struct netxen_adapter *adapter = netdev_priv(netdev);
1180 int work_to_do = min(*budget, netdev->quota);
1181 int done = 1;
1182 int ctx;
1183 int this_work_done;
1184 int work_done = 0;
1185
1186 DPRINTK(INFO, "polling for %d descriptors\n", *budget);
1187
1188 work_done = 0;
1189 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1190 /*
1191 * Fairness issue. This will give undue weight to the
1192 * receive context 0.
1193 */
1194
1195 /*
1196 * To avoid starvation, we give each of our receivers,
1197 * a fraction of the quota. Sometimes, it might happen that we
1198 * have enough quota to process every packet, but since all the
1199 * packets are on one context, it gets only half of the quota,
1200 * and ends up not processing it.
1201 */
1202 this_work_done = netxen_process_rcv_ring(adapter, ctx,
1203 work_to_do /
1204 MAX_RCV_CTX);
1205 work_done += this_work_done;
1206 }
1207
1208 netdev->quota -= work_done;
1209 *budget -= work_done;
1210
1211 if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0)
1212 done = 0;
1213
1214 if (netxen_process_cmd_ring((unsigned long)adapter) == 0)
1215 done = 0;
1216
1217 DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
1218 work_done, work_to_do);
1219 if (done) {
1220 netif_rx_complete(netdev);
1221 netxen_nic_enable_int(adapter);
1222 }
1223
1224 return !done;
1225 }
1226
1227 #ifdef CONFIG_NET_POLL_CONTROLLER
1228 static void netxen_nic_poll_controller(struct net_device *netdev)
1229 {
1230 struct netxen_adapter *adapter = netdev_priv(netdev);
1231 disable_irq(adapter->irq);
1232 netxen_intr(adapter->irq, adapter);
1233 enable_irq(adapter->irq);
1234 }
1235 #endif
1236
1237 static struct pci_driver netxen_driver = {
1238 .name = netxen_nic_driver_name,
1239 .id_table = netxen_pci_tbl,
1240 .probe = netxen_nic_probe,
1241 .remove = __devexit_p(netxen_nic_remove)
1242 };
1243
1244 /* Driver Registration on NetXen card */
1245
1246 static int __init netxen_init_module(void)
1247 {
1248 if ((netxen_workq = create_singlethread_workqueue("netxen")) == 0)
1249 return -ENOMEM;
1250
1251 return pci_register_driver(&netxen_driver);
1252 }
1253
1254 module_init(netxen_init_module);
1255
1256 static void __exit netxen_exit_module(void)
1257 {
1258 /*
1259 * Wait for some time to allow the dma to drain, if any.
1260 */
1261 pci_unregister_driver(&netxen_driver);
1262 destroy_workqueue(netxen_workq);
1263 }
1264
1265 module_exit(netxen_exit_module);