2 * Copyright (C) 2003 - 2009 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
27 * Cupertino, CA 95014-0701
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include "netxen_nic_hw.h"
35 #include "netxen_nic.h"
36 #include "netxen_nic_phan_reg.h"
38 #include <linux/dma-mapping.h>
39 #include <linux/if_vlan.h>
41 #include <linux/ipv6.h>
43 MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID
);
47 char netxen_nic_driver_name
[] = "netxen_nic";
48 static char netxen_nic_driver_string
[] = "NetXen Network Driver version "
49 NETXEN_NIC_LINUX_VERSIONID
;
51 static int port_mode
= NETXEN_PORT_MODE_AUTO_NEG
;
53 /* Default to restricted 1G auto-neg mode */
54 static int wol_port_mode
= 5;
56 static int use_msi
= 1;
58 static int use_msi_x
= 1;
60 /* Local functions to NetXen NIC driver */
61 static int __devinit
netxen_nic_probe(struct pci_dev
*pdev
,
62 const struct pci_device_id
*ent
);
63 static void __devexit
netxen_nic_remove(struct pci_dev
*pdev
);
64 static int netxen_nic_open(struct net_device
*netdev
);
65 static int netxen_nic_close(struct net_device
*netdev
);
66 static int netxen_nic_xmit_frame(struct sk_buff
*, struct net_device
*);
67 static void netxen_tx_timeout(struct net_device
*netdev
);
68 static void netxen_tx_timeout_task(struct work_struct
*work
);
69 static void netxen_watchdog(unsigned long);
70 static int netxen_nic_poll(struct napi_struct
*napi
, int budget
);
71 #ifdef CONFIG_NET_POLL_CONTROLLER
72 static void netxen_nic_poll_controller(struct net_device
*netdev
);
74 static irqreturn_t
netxen_intr(int irq
, void *data
);
75 static irqreturn_t
netxen_msi_intr(int irq
, void *data
);
76 static irqreturn_t
netxen_msix_intr(int irq
, void *data
);
78 /* PCI Device ID Table */
79 #define ENTRY(device) \
80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
83 static struct pci_device_id netxen_pci_tbl
[] __devinitdata
= {
84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR
),
85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4
),
86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU
),
87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ
),
88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ
),
89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT
),
90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2
),
91 ENTRY(PCI_DEVICE_ID_NX3031
),
95 MODULE_DEVICE_TABLE(pci
, netxen_pci_tbl
);
97 static struct workqueue_struct
*netxen_workq
;
98 #define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
99 #define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
101 static void netxen_watchdog(unsigned long);
103 static uint32_t crb_cmd_producer
[4] = {
104 CRB_CMD_PRODUCER_OFFSET
, CRB_CMD_PRODUCER_OFFSET_1
,
105 CRB_CMD_PRODUCER_OFFSET_2
, CRB_CMD_PRODUCER_OFFSET_3
109 netxen_nic_update_cmd_producer(struct netxen_adapter
*adapter
,
110 struct nx_host_tx_ring
*tx_ring
, u32 producer
)
112 NXWR32(adapter
, tx_ring
->crb_cmd_producer
, producer
);
115 static uint32_t crb_cmd_consumer
[4] = {
116 CRB_CMD_CONSUMER_OFFSET
, CRB_CMD_CONSUMER_OFFSET_1
,
117 CRB_CMD_CONSUMER_OFFSET_2
, CRB_CMD_CONSUMER_OFFSET_3
121 netxen_nic_update_cmd_consumer(struct netxen_adapter
*adapter
,
122 struct nx_host_tx_ring
*tx_ring
, u32 consumer
)
124 NXWR32(adapter
, tx_ring
->crb_cmd_consumer
, consumer
);
127 static uint32_t msi_tgt_status
[8] = {
128 ISR_INT_TARGET_STATUS
, ISR_INT_TARGET_STATUS_F1
,
129 ISR_INT_TARGET_STATUS_F2
, ISR_INT_TARGET_STATUS_F3
,
130 ISR_INT_TARGET_STATUS_F4
, ISR_INT_TARGET_STATUS_F5
,
131 ISR_INT_TARGET_STATUS_F6
, ISR_INT_TARGET_STATUS_F7
134 static struct netxen_legacy_intr_set legacy_intr
[] = NX_LEGACY_INTR_CONFIG
;
136 static inline void netxen_nic_disable_int(struct nx_host_sds_ring
*sds_ring
)
138 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
140 NXWR32(adapter
, sds_ring
->crb_intr_mask
, 0);
143 static inline void netxen_nic_enable_int(struct nx_host_sds_ring
*sds_ring
)
145 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
147 NXWR32(adapter
, sds_ring
->crb_intr_mask
, 0x1);
149 if (!NETXEN_IS_MSI_FAMILY(adapter
))
150 adapter
->pci_write_immediate(adapter
,
151 adapter
->legacy_intr
.tgt_mask_reg
, 0xfbff);
155 netxen_alloc_sds_rings(struct netxen_recv_context
*recv_ctx
, int count
)
157 int size
= sizeof(struct nx_host_sds_ring
) * count
;
159 recv_ctx
->sds_rings
= kzalloc(size
, GFP_KERNEL
);
161 return (recv_ctx
->sds_rings
== NULL
);
165 netxen_free_sds_rings(struct netxen_recv_context
*recv_ctx
)
167 if (recv_ctx
->sds_rings
!= NULL
)
168 kfree(recv_ctx
->sds_rings
);
172 netxen_napi_add(struct netxen_adapter
*adapter
, struct net_device
*netdev
)
175 struct nx_host_sds_ring
*sds_ring
;
176 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
178 if ((adapter
->flags
& NETXEN_NIC_MSIX_ENABLED
) &&
179 adapter
->rss_supported
)
180 adapter
->max_sds_rings
= (num_online_cpus() >= 4) ? 4 : 2;
182 adapter
->max_sds_rings
= 1;
184 if (netxen_alloc_sds_rings(recv_ctx
, adapter
->max_sds_rings
))
187 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
188 sds_ring
= &recv_ctx
->sds_rings
[ring
];
189 netif_napi_add(netdev
, &sds_ring
->napi
,
190 netxen_nic_poll
, NETXEN_NETDEV_WEIGHT
);
197 netxen_napi_enable(struct netxen_adapter
*adapter
)
200 struct nx_host_sds_ring
*sds_ring
;
201 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
203 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
204 sds_ring
= &recv_ctx
->sds_rings
[ring
];
205 napi_enable(&sds_ring
->napi
);
206 netxen_nic_enable_int(sds_ring
);
211 netxen_napi_disable(struct netxen_adapter
*adapter
)
214 struct nx_host_sds_ring
*sds_ring
;
215 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
217 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
218 sds_ring
= &recv_ctx
->sds_rings
[ring
];
219 netxen_nic_disable_int(sds_ring
);
220 napi_disable(&sds_ring
->napi
);
224 static int nx_set_dma_mask(struct netxen_adapter
*adapter
, uint8_t revision_id
)
226 struct pci_dev
*pdev
= adapter
->pdev
;
227 uint64_t mask
, cmask
;
229 adapter
->pci_using_dac
= 0;
231 mask
= DMA_BIT_MASK(32);
233 * Consistent DMA mask is set to 32 bit because it cannot be set to
234 * 35 bits. For P3 also leave it at 32 bits for now. Only the rings
235 * come off this pool.
237 cmask
= DMA_BIT_MASK(32);
240 if (revision_id
>= NX_P3_B0
)
241 mask
= DMA_BIT_MASK(39);
242 else if (revision_id
== NX_P2_C1
)
243 mask
= DMA_BIT_MASK(35);
245 if (pci_set_dma_mask(pdev
, mask
) == 0 &&
246 pci_set_consistent_dma_mask(pdev
, cmask
) == 0) {
247 adapter
->pci_using_dac
= 1;
254 /* Update addressable range if firmware supports it */
256 nx_update_dma_mask(struct netxen_adapter
*adapter
)
258 int change
, shift
, err
;
259 uint64_t mask
, old_mask
;
260 struct pci_dev
*pdev
= adapter
->pdev
;
264 shift
= NXRD32(adapter
, CRB_DMA_SHIFT
);
268 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
) && (shift
> 9))
270 else if ((adapter
->ahw
.revision_id
== NX_P2_C1
) && (shift
<= 4))
274 old_mask
= pdev
->dma_mask
;
275 mask
= (1ULL<<(32+shift
)) - 1;
277 err
= pci_set_dma_mask(pdev
, mask
);
279 return pci_set_dma_mask(pdev
, old_mask
);
285 static void netxen_check_options(struct netxen_adapter
*adapter
)
287 if (adapter
->ahw
.port_type
== NETXEN_NIC_XGBE
)
288 adapter
->num_rxd
= MAX_RCV_DESCRIPTORS_10G
;
289 else if (adapter
->ahw
.port_type
== NETXEN_NIC_GBE
)
290 adapter
->num_rxd
= MAX_RCV_DESCRIPTORS_1G
;
292 adapter
->msix_supported
= 0;
293 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
)) {
294 adapter
->msix_supported
= !!use_msi_x
;
295 adapter
->rss_supported
= !!use_msi_x
;
296 } else if (adapter
->fw_version
>= NETXEN_VERSION_CODE(3, 4, 336)) {
297 switch (adapter
->ahw
.board_type
) {
298 case NETXEN_BRDTYPE_P2_SB31_10G
:
299 case NETXEN_BRDTYPE_P2_SB31_10G_CX4
:
300 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ
:
301 adapter
->msix_supported
= !!use_msi_x
;
302 adapter
->rss_supported
= !!use_msi_x
;
309 adapter
->num_txd
= MAX_CMD_DESCRIPTORS_HOST
;
310 adapter
->num_jumbo_rxd
= MAX_JUMBO_RCV_DESCRIPTORS
;
311 adapter
->num_lro_rxd
= MAX_LRO_RCV_DESCRIPTORS
;
317 netxen_check_hw_init(struct netxen_adapter
*adapter
, int first_boot
)
321 if (first_boot
== 0x55555555) {
322 /* This is the first boot after power up */
323 NXWR32(adapter
, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC
);
325 if (!NX_IS_REVISION_P2(adapter
->ahw
.revision_id
))
328 /* PCI bus master workaround */
329 first_boot
= NXRD32(adapter
, NETXEN_PCIE_REG(0x4));
330 if (!(first_boot
& 0x4)) {
332 NXWR32(adapter
, NETXEN_PCIE_REG(0x4), first_boot
);
333 first_boot
= NXRD32(adapter
, NETXEN_PCIE_REG(0x4));
336 /* This is the first boot after power up */
337 first_boot
= NXRD32(adapter
, NETXEN_ROMUSB_GLB_SW_RESET
);
338 if (first_boot
!= 0x80000f) {
339 /* clear the register for future unloads/loads */
340 NXWR32(adapter
, NETXEN_CAM_RAM(0x1fc), 0);
344 /* Start P2 boot loader */
345 val
= NXRD32(adapter
, NETXEN_ROMUSB_GLB_PEGTUNE_DONE
);
346 NXWR32(adapter
, NETXEN_ROMUSB_GLB_PEGTUNE_DONE
, val
| 0x1);
350 val
= NXRD32(adapter
, NETXEN_CAM_RAM(0x1fc));
352 if (++timeout
> 5000)
355 } while (val
== NETXEN_BDINFO_MAGIC
);
360 static void netxen_set_port_mode(struct netxen_adapter
*adapter
)
364 val
= adapter
->ahw
.board_type
;
365 if ((val
== NETXEN_BRDTYPE_P3_HMEZ
) ||
366 (val
== NETXEN_BRDTYPE_P3_XG_LOM
)) {
367 if (port_mode
== NETXEN_PORT_MODE_802_3_AP
) {
368 data
= NETXEN_PORT_MODE_802_3_AP
;
369 NXWR32(adapter
, NETXEN_PORT_MODE_ADDR
, data
);
370 } else if (port_mode
== NETXEN_PORT_MODE_XG
) {
371 data
= NETXEN_PORT_MODE_XG
;
372 NXWR32(adapter
, NETXEN_PORT_MODE_ADDR
, data
);
373 } else if (port_mode
== NETXEN_PORT_MODE_AUTO_NEG_1G
) {
374 data
= NETXEN_PORT_MODE_AUTO_NEG_1G
;
375 NXWR32(adapter
, NETXEN_PORT_MODE_ADDR
, data
);
376 } else if (port_mode
== NETXEN_PORT_MODE_AUTO_NEG_XG
) {
377 data
= NETXEN_PORT_MODE_AUTO_NEG_XG
;
378 NXWR32(adapter
, NETXEN_PORT_MODE_ADDR
, data
);
380 data
= NETXEN_PORT_MODE_AUTO_NEG
;
381 NXWR32(adapter
, NETXEN_PORT_MODE_ADDR
, data
);
384 if ((wol_port_mode
!= NETXEN_PORT_MODE_802_3_AP
) &&
385 (wol_port_mode
!= NETXEN_PORT_MODE_XG
) &&
386 (wol_port_mode
!= NETXEN_PORT_MODE_AUTO_NEG_1G
) &&
387 (wol_port_mode
!= NETXEN_PORT_MODE_AUTO_NEG_XG
)) {
388 wol_port_mode
= NETXEN_PORT_MODE_AUTO_NEG
;
390 NXWR32(adapter
, NETXEN_WOL_PORT_MODE
, wol_port_mode
);
394 static void netxen_set_msix_bit(struct pci_dev
*pdev
, int enable
)
399 pos
= pci_find_capability(pdev
, PCI_CAP_ID_MSIX
);
401 pci_read_config_dword(pdev
, pos
, &control
);
403 control
|= PCI_MSIX_FLAGS_ENABLE
;
406 pci_write_config_dword(pdev
, pos
, control
);
410 static void netxen_init_msix_entries(struct netxen_adapter
*adapter
)
414 for (i
= 0; i
< MSIX_ENTRIES_PER_ADAPTER
; i
++)
415 adapter
->msix_entries
[i
].entry
= i
;
419 netxen_read_mac_addr(struct netxen_adapter
*adapter
)
424 struct net_device
*netdev
= adapter
->netdev
;
425 struct pci_dev
*pdev
= adapter
->pdev
;
427 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
)) {
428 if (netxen_p3_get_mac_addr(adapter
, &mac_addr
) != 0)
431 if (netxen_get_flash_mac_addr(adapter
, &mac_addr
) != 0)
435 p
= (unsigned char *)&mac_addr
;
436 for (i
= 0; i
< 6; i
++)
437 netdev
->dev_addr
[i
] = *(p
+ 5 - i
);
439 memcpy(netdev
->perm_addr
, netdev
->dev_addr
, netdev
->addr_len
);
441 /* set station address */
443 if (!is_valid_ether_addr(netdev
->perm_addr
))
444 dev_warn(&pdev
->dev
, "Bad MAC address %pM.\n", netdev
->dev_addr
);
446 adapter
->macaddr_set(adapter
, netdev
->dev_addr
);
451 static void netxen_set_multicast_list(struct net_device
*dev
)
453 struct netxen_adapter
*adapter
= netdev_priv(dev
);
455 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
456 netxen_p3_nic_set_multi(dev
);
458 netxen_p2_nic_set_multi(dev
);
461 static const struct net_device_ops netxen_netdev_ops
= {
462 .ndo_open
= netxen_nic_open
,
463 .ndo_stop
= netxen_nic_close
,
464 .ndo_start_xmit
= netxen_nic_xmit_frame
,
465 .ndo_get_stats
= netxen_nic_get_stats
,
466 .ndo_validate_addr
= eth_validate_addr
,
467 .ndo_set_multicast_list
= netxen_set_multicast_list
,
468 .ndo_set_mac_address
= netxen_nic_set_mac
,
469 .ndo_change_mtu
= netxen_nic_change_mtu
,
470 .ndo_tx_timeout
= netxen_tx_timeout
,
471 #ifdef CONFIG_NET_POLL_CONTROLLER
472 .ndo_poll_controller
= netxen_nic_poll_controller
,
477 netxen_setup_intr(struct netxen_adapter
*adapter
)
479 struct netxen_legacy_intr_set
*legacy_intrp
;
480 struct pci_dev
*pdev
= adapter
->pdev
;
482 adapter
->flags
&= ~(NETXEN_NIC_MSI_ENABLED
| NETXEN_NIC_MSIX_ENABLED
);
484 if (adapter
->ahw
.revision_id
>= NX_P3_B0
)
485 legacy_intrp
= &legacy_intr
[adapter
->ahw
.pci_func
];
487 legacy_intrp
= &legacy_intr
[0];
488 adapter
->legacy_intr
.int_vec_bit
= legacy_intrp
->int_vec_bit
;
489 adapter
->legacy_intr
.tgt_status_reg
= legacy_intrp
->tgt_status_reg
;
490 adapter
->legacy_intr
.tgt_mask_reg
= legacy_intrp
->tgt_mask_reg
;
491 adapter
->legacy_intr
.pci_int_reg
= legacy_intrp
->pci_int_reg
;
493 netxen_set_msix_bit(pdev
, 0);
495 if (adapter
->msix_supported
) {
497 netxen_init_msix_entries(adapter
);
498 if (pci_enable_msix(pdev
, adapter
->msix_entries
,
499 MSIX_ENTRIES_PER_ADAPTER
))
502 adapter
->flags
|= NETXEN_NIC_MSIX_ENABLED
;
503 netxen_set_msix_bit(pdev
, 1);
504 dev_info(&pdev
->dev
, "using msi-x interrupts\n");
508 if (use_msi
&& !pci_enable_msi(pdev
)) {
509 adapter
->flags
|= NETXEN_NIC_MSI_ENABLED
;
510 dev_info(&pdev
->dev
, "using msi interrupts\n");
512 dev_info(&pdev
->dev
, "using legacy interrupts\n");
513 adapter
->msix_entries
[0].vector
= pdev
->irq
;
518 netxen_teardown_intr(struct netxen_adapter
*adapter
)
520 if (adapter
->flags
& NETXEN_NIC_MSIX_ENABLED
)
521 pci_disable_msix(adapter
->pdev
);
522 if (adapter
->flags
& NETXEN_NIC_MSI_ENABLED
)
523 pci_disable_msi(adapter
->pdev
);
527 netxen_cleanup_pci_map(struct netxen_adapter
*adapter
)
529 if (adapter
->ahw
.db_base
!= NULL
)
530 iounmap(adapter
->ahw
.db_base
);
531 if (adapter
->ahw
.pci_base0
!= NULL
)
532 iounmap(adapter
->ahw
.pci_base0
);
533 if (adapter
->ahw
.pci_base1
!= NULL
)
534 iounmap(adapter
->ahw
.pci_base1
);
535 if (adapter
->ahw
.pci_base2
!= NULL
)
536 iounmap(adapter
->ahw
.pci_base2
);
540 netxen_setup_pci_map(struct netxen_adapter
*adapter
)
542 void __iomem
*mem_ptr0
= NULL
;
543 void __iomem
*mem_ptr1
= NULL
;
544 void __iomem
*mem_ptr2
= NULL
;
545 void __iomem
*db_ptr
= NULL
;
547 unsigned long mem_base
, mem_len
, db_base
, db_len
= 0, pci_len0
= 0;
549 struct pci_dev
*pdev
= adapter
->pdev
;
550 int pci_func
= adapter
->ahw
.pci_func
;
555 * Set the CRB window to invalid. If any register in window 0 is
556 * accessed it should set the window to 0 and then reset it to 1.
558 adapter
->curr_window
= 255;
559 adapter
->ahw
.qdr_sn_window
= -1;
560 adapter
->ahw
.ddr_mn_window
= -1;
562 /* remap phys address */
563 mem_base
= pci_resource_start(pdev
, 0); /* 0 is for BAR 0 */
564 mem_len
= pci_resource_len(pdev
, 0);
567 adapter
->hw_write_wx
= netxen_nic_hw_write_wx_128M
;
568 adapter
->hw_read_wx
= netxen_nic_hw_read_wx_128M
;
569 adapter
->pci_read_immediate
= netxen_nic_pci_read_immediate_128M
;
570 adapter
->pci_write_immediate
= netxen_nic_pci_write_immediate_128M
;
571 adapter
->pci_set_window
= netxen_nic_pci_set_window_128M
;
572 adapter
->pci_mem_read
= netxen_nic_pci_mem_read_128M
;
573 adapter
->pci_mem_write
= netxen_nic_pci_mem_write_128M
;
575 /* 128 Meg of memory */
576 if (mem_len
== NETXEN_PCI_128MB_SIZE
) {
577 mem_ptr0
= ioremap(mem_base
, FIRST_PAGE_GROUP_SIZE
);
578 mem_ptr1
= ioremap(mem_base
+ SECOND_PAGE_GROUP_START
,
579 SECOND_PAGE_GROUP_SIZE
);
580 mem_ptr2
= ioremap(mem_base
+ THIRD_PAGE_GROUP_START
,
581 THIRD_PAGE_GROUP_SIZE
);
582 } else if (mem_len
== NETXEN_PCI_32MB_SIZE
) {
583 mem_ptr1
= ioremap(mem_base
, SECOND_PAGE_GROUP_SIZE
);
584 mem_ptr2
= ioremap(mem_base
+ THIRD_PAGE_GROUP_START
-
585 SECOND_PAGE_GROUP_START
, THIRD_PAGE_GROUP_SIZE
);
586 } else if (mem_len
== NETXEN_PCI_2MB_SIZE
) {
587 adapter
->hw_write_wx
= netxen_nic_hw_write_wx_2M
;
588 adapter
->hw_read_wx
= netxen_nic_hw_read_wx_2M
;
589 adapter
->pci_read_immediate
= netxen_nic_pci_read_immediate_2M
;
590 adapter
->pci_write_immediate
=
591 netxen_nic_pci_write_immediate_2M
;
592 adapter
->pci_set_window
= netxen_nic_pci_set_window_2M
;
593 adapter
->pci_mem_read
= netxen_nic_pci_mem_read_2M
;
594 adapter
->pci_mem_write
= netxen_nic_pci_mem_write_2M
;
596 mem_ptr0
= pci_ioremap_bar(pdev
, 0);
597 if (mem_ptr0
== NULL
) {
598 dev_err(&pdev
->dev
, "failed to map PCI bar 0\n");
603 adapter
->ahw
.ddr_mn_window
= 0;
604 adapter
->ahw
.qdr_sn_window
= 0;
606 adapter
->ahw
.mn_win_crb
= 0x100000 + PCIX_MN_WINDOW
+
608 adapter
->ahw
.ms_win_crb
= 0x100000 + PCIX_SN_WINDOW
;
610 adapter
->ahw
.ms_win_crb
+= (pci_func
* 0x20);
612 adapter
->ahw
.ms_win_crb
+=
613 0xA0 + ((pci_func
- 4) * 0x10);
618 dev_info(&pdev
->dev
, "%dMB memory map\n", (int)(mem_len
>>20));
620 adapter
->ahw
.pci_base0
= mem_ptr0
;
621 adapter
->ahw
.pci_len0
= pci_len0
;
622 adapter
->ahw
.pci_base1
= mem_ptr1
;
623 adapter
->ahw
.pci_base2
= mem_ptr2
;
625 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
628 db_base
= pci_resource_start(pdev
, 4); /* doorbell is on bar 4 */
629 db_len
= pci_resource_len(pdev
, 4);
632 printk(KERN_ERR
"%s: doorbell is disabled\n",
633 netxen_nic_driver_name
);
638 db_ptr
= ioremap(db_base
, NETXEN_DB_MAPSIZE_BYTES
);
640 printk(KERN_ERR
"%s: Failed to allocate doorbell map.",
641 netxen_nic_driver_name
);
647 adapter
->ahw
.db_base
= db_ptr
;
648 adapter
->ahw
.db_len
= db_len
;
652 netxen_cleanup_pci_map(adapter
);
657 netxen_start_firmware(struct netxen_adapter
*adapter
, int request_fw
)
659 int val
, err
, first_boot
;
660 struct pci_dev
*pdev
= adapter
->pdev
;
662 int first_driver
= 0;
664 if (NX_IS_REVISION_P2(adapter
->ahw
.revision_id
))
665 first_driver
= (adapter
->portnum
== 0);
667 first_driver
= (adapter
->ahw
.pci_func
== 0);
672 first_boot
= NXRD32(adapter
, NETXEN_CAM_RAM(0x1fc));
674 err
= netxen_check_hw_init(adapter
, first_boot
);
676 dev_err(&pdev
->dev
, "error in init HW init sequence\n");
681 netxen_request_firmware(adapter
);
683 if (first_boot
!= 0x55555555) {
684 NXWR32(adapter
, CRB_CMDPEG_STATE
, 0);
685 netxen_pinit_from_rom(adapter
, 0);
689 NXWR32(adapter
, CRB_DMA_SHIFT
, 0x55555555);
690 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
691 netxen_set_port_mode(adapter
);
693 netxen_load_firmware(adapter
);
695 if (NX_IS_REVISION_P2(adapter
->ahw
.revision_id
)) {
697 /* Initialize multicast addr pool owners */
699 if (adapter
->ahw
.port_type
== NETXEN_NIC_XGBE
)
701 NXWR32(adapter
, NETXEN_MAC_ADDR_CNTL_REG
, val
);
705 err
= netxen_initialize_adapter_offload(adapter
);
710 * Tell the hardware our version number.
712 val
= (_NETXEN_NIC_LINUX_MAJOR
<< 16)
713 | ((_NETXEN_NIC_LINUX_MINOR
<< 8))
714 | (_NETXEN_NIC_LINUX_SUBVERSION
);
715 NXWR32(adapter
, CRB_DRIVER_VERSION
, val
);
717 /* Handshake with the card before we register the devices. */
718 err
= netxen_phantom_init(adapter
, NETXEN_NIC_PEG_TUNE
);
720 netxen_free_adapter_offload(adapter
);
728 netxen_nic_request_irq(struct netxen_adapter
*adapter
)
730 irq_handler_t handler
;
731 struct nx_host_sds_ring
*sds_ring
;
734 unsigned long flags
= IRQF_SAMPLE_RANDOM
;
735 struct net_device
*netdev
= adapter
->netdev
;
736 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
738 if (adapter
->flags
& NETXEN_NIC_MSIX_ENABLED
)
739 handler
= netxen_msix_intr
;
740 else if (adapter
->flags
& NETXEN_NIC_MSI_ENABLED
)
741 handler
= netxen_msi_intr
;
743 flags
|= IRQF_SHARED
;
744 handler
= netxen_intr
;
746 adapter
->irq
= netdev
->irq
;
748 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
749 sds_ring
= &recv_ctx
->sds_rings
[ring
];
750 sprintf(sds_ring
->name
, "%16s[%d]", netdev
->name
, ring
);
751 err
= request_irq(sds_ring
->irq
, handler
,
752 flags
, sds_ring
->name
, sds_ring
);
761 netxen_nic_free_irq(struct netxen_adapter
*adapter
)
764 struct nx_host_sds_ring
*sds_ring
;
766 struct netxen_recv_context
*recv_ctx
= &adapter
->recv_ctx
;
768 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
769 sds_ring
= &recv_ctx
->sds_rings
[ring
];
770 free_irq(sds_ring
->irq
, sds_ring
);
775 netxen_nic_up(struct netxen_adapter
*adapter
, struct net_device
*netdev
)
779 err
= adapter
->init_port(adapter
, adapter
->physical_port
);
781 printk(KERN_ERR
"%s: Failed to initialize port %d\n",
782 netxen_nic_driver_name
, adapter
->portnum
);
785 adapter
->macaddr_set(adapter
, netdev
->dev_addr
);
787 netxen_nic_set_link_parameters(adapter
);
789 netxen_set_multicast_list(netdev
);
790 if (adapter
->set_mtu
)
791 adapter
->set_mtu(adapter
, netdev
->mtu
);
793 adapter
->ahw
.linkup
= 0;
794 mod_timer(&adapter
->watchdog_timer
, jiffies
);
796 netxen_napi_enable(adapter
);
798 if (adapter
->max_sds_rings
> 1)
799 netxen_config_rss(adapter
, 1);
801 if (adapter
->capabilities
& NX_FW_CAPABILITY_LINK_NOTIFICATION
)
802 netxen_linkevent_request(adapter
, 1);
808 netxen_nic_down(struct netxen_adapter
*adapter
, struct net_device
*netdev
)
810 netif_carrier_off(netdev
);
811 netif_stop_queue(netdev
);
812 netxen_napi_disable(adapter
);
814 if (adapter
->stop_port
)
815 adapter
->stop_port(adapter
);
817 netxen_release_tx_buffers(adapter
);
819 FLUSH_SCHEDULED_WORK();
820 del_timer_sync(&adapter
->watchdog_timer
);
825 netxen_nic_attach(struct netxen_adapter
*adapter
)
827 struct net_device
*netdev
= adapter
->netdev
;
828 struct pci_dev
*pdev
= adapter
->pdev
;
830 struct nx_host_rds_ring
*rds_ring
;
831 struct nx_host_tx_ring
*tx_ring
;
833 err
= netxen_init_firmware(adapter
);
835 printk(KERN_ERR
"Failed to init firmware\n");
839 if (adapter
->fw_major
< 4)
840 adapter
->max_rds_rings
= 3;
842 adapter
->max_rds_rings
= 2;
844 err
= netxen_alloc_sw_resources(adapter
);
846 printk(KERN_ERR
"%s: Error in setting sw resources\n",
851 netxen_nic_clear_stats(adapter
);
853 err
= netxen_alloc_hw_resources(adapter
);
855 printk(KERN_ERR
"%s: Error in setting hw resources\n",
857 goto err_out_free_sw
;
860 if (adapter
->fw_major
< 4) {
861 tx_ring
= adapter
->tx_ring
;
862 tx_ring
->crb_cmd_producer
= crb_cmd_producer
[adapter
->portnum
];
863 tx_ring
->crb_cmd_consumer
= crb_cmd_consumer
[adapter
->portnum
];
865 netxen_nic_update_cmd_producer(adapter
, tx_ring
, 0);
866 netxen_nic_update_cmd_consumer(adapter
, tx_ring
, 0);
869 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
870 rds_ring
= &adapter
->recv_ctx
.rds_rings
[ring
];
871 netxen_post_rx_buffers(adapter
, ring
, rds_ring
);
874 err
= netxen_nic_request_irq(adapter
);
876 dev_err(&pdev
->dev
, "%s: failed to setup interrupt\n",
878 goto err_out_free_rxbuf
;
881 adapter
->is_up
= NETXEN_ADAPTER_UP_MAGIC
;
885 netxen_release_rx_buffers(adapter
);
886 netxen_free_hw_resources(adapter
);
888 netxen_free_sw_resources(adapter
);
893 netxen_nic_detach(struct netxen_adapter
*adapter
)
895 netxen_nic_free_irq(adapter
);
897 netxen_release_rx_buffers(adapter
);
898 netxen_free_hw_resources(adapter
);
899 netxen_free_sw_resources(adapter
);
905 netxen_nic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
907 struct net_device
*netdev
= NULL
;
908 struct netxen_adapter
*adapter
= NULL
;
910 int pci_func_id
= PCI_FUNC(pdev
->devfn
);
913 if (pdev
->class != 0x020000) {
914 printk(KERN_DEBUG
"NetXen function %d, class %x will not "
915 "be enabled.\n",pci_func_id
, pdev
->class);
919 if (pdev
->revision
>= NX_P3_A0
&& pdev
->revision
< NX_P3_B1
) {
920 printk(KERN_WARNING
"NetXen chip revisions between 0x%x-0x%x"
921 "will not be enabled.\n",
926 if ((err
= pci_enable_device(pdev
)))
929 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
931 goto err_out_disable_pdev
;
934 if ((err
= pci_request_regions(pdev
, netxen_nic_driver_name
)))
935 goto err_out_disable_pdev
;
937 pci_set_master(pdev
);
939 netdev
= alloc_etherdev(sizeof(struct netxen_adapter
));
941 printk(KERN_ERR
"%s: Failed to allocate memory for the "
942 "device block.Check system memory resource"
943 " usage.\n", netxen_nic_driver_name
);
944 goto err_out_free_res
;
947 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
949 adapter
= netdev_priv(netdev
);
950 adapter
->netdev
= netdev
;
951 adapter
->pdev
= pdev
;
952 adapter
->ahw
.pci_func
= pci_func_id
;
954 revision_id
= pdev
->revision
;
955 adapter
->ahw
.revision_id
= revision_id
;
957 err
= nx_set_dma_mask(adapter
, revision_id
);
959 goto err_out_free_netdev
;
961 rwlock_init(&adapter
->adapter_lock
);
962 spin_lock_init(&adapter
->tx_clean_lock
);
964 err
= netxen_setup_pci_map(adapter
);
966 goto err_out_free_netdev
;
968 /* This will be reset for mezz cards */
969 adapter
->portnum
= pci_func_id
;
970 adapter
->rx_csum
= 1;
971 adapter
->mc_enabled
= 0;
972 if (NX_IS_REVISION_P3(revision_id
))
973 adapter
->max_mc_count
= 38;
975 adapter
->max_mc_count
= 16;
977 netdev
->netdev_ops
= &netxen_netdev_ops
;
978 netdev
->watchdog_timeo
= 2*HZ
;
980 netxen_nic_change_mtu(netdev
, netdev
->mtu
);
982 SET_ETHTOOL_OPS(netdev
, &netxen_nic_ethtool_ops
);
984 netdev
->features
|= (NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
);
985 netdev
->features
|= (NETIF_F_GRO
);
986 netdev
->vlan_features
|= (NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
);
988 if (NX_IS_REVISION_P3(revision_id
)) {
989 netdev
->features
|= (NETIF_F_IPV6_CSUM
| NETIF_F_TSO6
);
990 netdev
->vlan_features
|= (NETIF_F_IPV6_CSUM
| NETIF_F_TSO6
);
993 if (adapter
->pci_using_dac
) {
994 netdev
->features
|= NETIF_F_HIGHDMA
;
995 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
998 if (netxen_nic_get_board_info(adapter
) != 0) {
999 printk("%s: Error getting board config info.\n",
1000 netxen_nic_driver_name
);
1002 goto err_out_iounmap
;
1005 netxen_initialize_adapter_ops(adapter
);
1007 /* Mezz cards have PCI function 0,2,3 enabled */
1008 switch (adapter
->ahw
.board_type
) {
1009 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ
:
1010 case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ
:
1011 if (pci_func_id
>= 2)
1012 adapter
->portnum
= pci_func_id
- 2;
1018 err
= netxen_start_firmware(adapter
, 1);
1020 goto err_out_iounmap
;
1022 nx_update_dma_mask(adapter
);
1024 netxen_nic_get_firmware_info(adapter
);
1027 * See if the firmware gave us a virtual-physical port mapping.
1029 adapter
->physical_port
= adapter
->portnum
;
1030 if (adapter
->fw_major
< 4) {
1031 i
= NXRD32(adapter
, CRB_V2P(adapter
->portnum
));
1032 if (i
!= 0x55555555)
1033 adapter
->physical_port
= i
;
1036 netxen_check_options(adapter
);
1038 netxen_setup_intr(adapter
);
1040 netdev
->irq
= adapter
->msix_entries
[0].vector
;
1042 if (netxen_napi_add(adapter
, netdev
))
1043 goto err_out_disable_msi
;
1045 init_timer(&adapter
->watchdog_timer
);
1046 adapter
->watchdog_timer
.function
= &netxen_watchdog
;
1047 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
1048 INIT_WORK(&adapter
->watchdog_task
, netxen_watchdog_task
);
1049 INIT_WORK(&adapter
->tx_timeout_task
, netxen_tx_timeout_task
);
1051 err
= netxen_read_mac_addr(adapter
);
1053 dev_warn(&pdev
->dev
, "failed to read mac addr\n");
1055 netif_carrier_off(netdev
);
1056 netif_stop_queue(netdev
);
1058 if ((err
= register_netdev(netdev
))) {
1059 printk(KERN_ERR
"%s: register_netdev failed port #%d"
1060 " aborting\n", netxen_nic_driver_name
,
1063 goto err_out_disable_msi
;
1066 pci_set_drvdata(pdev
, adapter
);
1068 switch (adapter
->ahw
.port_type
) {
1069 case NETXEN_NIC_GBE
:
1070 dev_info(&adapter
->pdev
->dev
, "%s: GbE port initialized\n",
1071 adapter
->netdev
->name
);
1073 case NETXEN_NIC_XGBE
:
1074 dev_info(&adapter
->pdev
->dev
, "%s: XGbE port initialized\n",
1075 adapter
->netdev
->name
);
1081 err_out_disable_msi
:
1082 netxen_teardown_intr(adapter
);
1084 netxen_free_adapter_offload(adapter
);
1087 netxen_cleanup_pci_map(adapter
);
1089 err_out_free_netdev
:
1090 free_netdev(netdev
);
1093 pci_release_regions(pdev
);
1095 err_out_disable_pdev
:
1096 pci_set_drvdata(pdev
, NULL
);
1097 pci_disable_device(pdev
);
1101 static void __devexit
netxen_nic_remove(struct pci_dev
*pdev
)
1103 struct netxen_adapter
*adapter
;
1104 struct net_device
*netdev
;
1106 adapter
= pci_get_drvdata(pdev
);
1107 if (adapter
== NULL
)
1110 netdev
= adapter
->netdev
;
1112 unregister_netdev(netdev
);
1114 if (adapter
->is_up
== NETXEN_ADAPTER_UP_MAGIC
) {
1115 netxen_nic_detach(adapter
);
1117 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
))
1118 netxen_p3_free_mac_list(adapter
);
1121 if (adapter
->portnum
== 0)
1122 netxen_free_adapter_offload(adapter
);
1124 netxen_teardown_intr(adapter
);
1125 netxen_free_sds_rings(&adapter
->recv_ctx
);
1127 netxen_cleanup_pci_map(adapter
);
1129 netxen_release_firmware(adapter
);
1131 pci_release_regions(pdev
);
1132 pci_disable_device(pdev
);
1133 pci_set_drvdata(pdev
, NULL
);
1135 free_netdev(netdev
);
1139 netxen_nic_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1142 struct netxen_adapter
*adapter
= pci_get_drvdata(pdev
);
1143 struct net_device
*netdev
= adapter
->netdev
;
1145 netif_device_detach(netdev
);
1147 if (netif_running(netdev
))
1148 netxen_nic_down(adapter
, netdev
);
1150 if (adapter
->is_up
== NETXEN_ADAPTER_UP_MAGIC
)
1151 netxen_nic_detach(adapter
);
1153 pci_save_state(pdev
);
1155 if (netxen_nic_wol_supported(adapter
)) {
1156 pci_enable_wake(pdev
, PCI_D3cold
, 1);
1157 pci_enable_wake(pdev
, PCI_D3hot
, 1);
1160 pci_disable_device(pdev
);
1161 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
1167 netxen_nic_resume(struct pci_dev
*pdev
)
1169 struct netxen_adapter
*adapter
= pci_get_drvdata(pdev
);
1170 struct net_device
*netdev
= adapter
->netdev
;
1173 pci_set_power_state(pdev
, PCI_D0
);
1174 pci_restore_state(pdev
);
1176 err
= pci_enable_device(pdev
);
1180 adapter
->curr_window
= 255;
1182 err
= netxen_start_firmware(adapter
, 0);
1184 dev_err(&pdev
->dev
, "failed to start firmware\n");
1188 if (netif_running(netdev
)) {
1189 err
= netxen_nic_attach(adapter
);
1193 err
= netxen_nic_up(adapter
, netdev
);
1197 netif_device_attach(netdev
);
1203 static int netxen_nic_open(struct net_device
*netdev
)
1205 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1208 if (adapter
->driver_mismatch
)
1211 if (adapter
->is_up
!= NETXEN_ADAPTER_UP_MAGIC
) {
1212 err
= netxen_nic_attach(adapter
);
1217 err
= netxen_nic_up(adapter
, netdev
);
1221 netif_start_queue(netdev
);
1226 netxen_nic_detach(adapter
);
1231 * netxen_nic_close - Disables a network interface entry point
1233 static int netxen_nic_close(struct net_device
*netdev
)
1235 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1237 netxen_nic_down(adapter
, netdev
);
1241 static bool netxen_tso_check(struct net_device
*netdev
,
1242 struct cmd_desc_type0
*desc
, struct sk_buff
*skb
)
1245 u8 opcode
= TX_ETHER_PKT
;
1246 __be16 protocol
= skb
->protocol
;
1249 if (protocol
== cpu_to_be16(ETH_P_8021Q
)) {
1250 struct vlan_ethhdr
*vh
= (struct vlan_ethhdr
*)skb
->data
;
1251 protocol
= vh
->h_vlan_encapsulated_proto
;
1252 flags
= FLAGS_VLAN_TAGGED
;
1255 if ((netdev
->features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) &&
1256 skb_shinfo(skb
)->gso_size
> 0) {
1258 desc
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
1259 desc
->total_hdr_length
=
1260 skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1262 opcode
= (protocol
== cpu_to_be16(ETH_P_IPV6
)) ?
1263 TX_TCP_LSO6
: TX_TCP_LSO
;
1266 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1269 if (protocol
== cpu_to_be16(ETH_P_IP
)) {
1270 l4proto
= ip_hdr(skb
)->protocol
;
1272 if (l4proto
== IPPROTO_TCP
)
1273 opcode
= TX_TCP_PKT
;
1274 else if(l4proto
== IPPROTO_UDP
)
1275 opcode
= TX_UDP_PKT
;
1276 } else if (protocol
== cpu_to_be16(ETH_P_IPV6
)) {
1277 l4proto
= ipv6_hdr(skb
)->nexthdr
;
1279 if (l4proto
== IPPROTO_TCP
)
1280 opcode
= TX_TCPV6_PKT
;
1281 else if(l4proto
== IPPROTO_UDP
)
1282 opcode
= TX_UDPV6_PKT
;
1285 desc
->tcp_hdr_offset
= skb_transport_offset(skb
);
1286 desc
->ip_hdr_offset
= skb_network_offset(skb
);
1287 netxen_set_tx_flags_opcode(desc
, flags
, opcode
);
1292 netxen_clean_tx_dma_mapping(struct pci_dev
*pdev
,
1293 struct netxen_cmd_buffer
*pbuf
, int last
)
1296 struct netxen_skb_frag
*buffrag
;
1298 buffrag
= &pbuf
->frag_array
[0];
1299 pci_unmap_single(pdev
, buffrag
->dma
,
1300 buffrag
->length
, PCI_DMA_TODEVICE
);
1302 for (k
= 1; k
< last
; k
++) {
1303 buffrag
= &pbuf
->frag_array
[k
];
1304 pci_unmap_page(pdev
, buffrag
->dma
,
1305 buffrag
->length
, PCI_DMA_TODEVICE
);
1310 netxen_clear_cmddesc(u64
*desc
)
1313 for (i
= 0; i
< 8; i
++)
1318 netxen_nic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
1320 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1321 struct nx_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
1322 unsigned int first_seg_len
= skb
->len
- skb
->data_len
;
1323 struct netxen_cmd_buffer
*pbuf
;
1324 struct netxen_skb_frag
*buffrag
;
1325 struct cmd_desc_type0
*hwdesc
;
1326 struct pci_dev
*pdev
= adapter
->pdev
;
1327 dma_addr_t temp_dma
;
1330 u32 producer
, consumer
;
1331 int frag_count
, no_of_desc
;
1332 u32 num_txd
= tx_ring
->num_desc
;
1333 bool is_tso
= false;
1335 frag_count
= skb_shinfo(skb
)->nr_frags
+ 1;
1337 /* 4 fragments per cmd des */
1338 no_of_desc
= (frag_count
+ 3) >> 2;
1340 producer
= tx_ring
->producer
;
1342 consumer
= tx_ring
->sw_consumer
;
1343 if ((no_of_desc
+2) > find_diff_among(producer
, consumer
, num_txd
)) {
1344 netif_stop_queue(netdev
);
1346 return NETDEV_TX_BUSY
;
1349 hwdesc
= &tx_ring
->desc_head
[producer
];
1350 netxen_clear_cmddesc((u64
*)hwdesc
);
1351 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1353 is_tso
= netxen_tso_check(netdev
, hwdesc
, skb
);
1356 pbuf
->frag_count
= frag_count
;
1357 buffrag
= &pbuf
->frag_array
[0];
1358 temp_dma
= pci_map_single(pdev
, skb
->data
, first_seg_len
,
1360 if (pci_dma_mapping_error(pdev
, temp_dma
))
1363 buffrag
->dma
= temp_dma
;
1364 buffrag
->length
= first_seg_len
;
1365 netxen_set_tx_frags_len(hwdesc
, frag_count
, skb
->len
);
1366 netxen_set_tx_port(hwdesc
, adapter
->portnum
);
1368 hwdesc
->buffer_length
[0] = cpu_to_le16(first_seg_len
);
1369 hwdesc
->addr_buffer1
= cpu_to_le64(buffrag
->dma
);
1371 for (i
= 1, k
= 1; i
< frag_count
; i
++, k
++) {
1372 struct skb_frag_struct
*frag
;
1374 unsigned long offset
;
1376 /* move to next desc. if there is a need */
1377 if ((i
& 0x3) == 0) {
1379 producer
= get_next_index(producer
, num_txd
);
1380 hwdesc
= &tx_ring
->desc_head
[producer
];
1381 netxen_clear_cmddesc((u64
*)hwdesc
);
1382 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1385 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1387 offset
= frag
->page_offset
;
1390 temp_dma
= pci_map_page(pdev
, frag
->page
, offset
,
1391 len
, PCI_DMA_TODEVICE
);
1392 if (pci_dma_mapping_error(pdev
, temp_dma
)) {
1393 netxen_clean_tx_dma_mapping(pdev
, pbuf
, i
);
1398 buffrag
->dma
= temp_dma
;
1399 buffrag
->length
= temp_len
;
1401 hwdesc
->buffer_length
[k
] = cpu_to_le16(temp_len
);
1404 hwdesc
->addr_buffer1
= cpu_to_le64(temp_dma
);
1407 hwdesc
->addr_buffer2
= cpu_to_le64(temp_dma
);
1410 hwdesc
->addr_buffer3
= cpu_to_le64(temp_dma
);
1413 hwdesc
->addr_buffer4
= cpu_to_le64(temp_dma
);
1418 producer
= get_next_index(producer
, num_txd
);
1420 /* For LSO, we need to copy the MAC/IP/TCP headers into
1421 * the descriptor ring
1424 int hdr_len
, first_hdr_len
, more_hdr
;
1425 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1426 if (hdr_len
> (sizeof(struct cmd_desc_type0
) - 2)) {
1427 first_hdr_len
= sizeof(struct cmd_desc_type0
) - 2;
1430 first_hdr_len
= hdr_len
;
1433 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
1434 hwdesc
= &tx_ring
->desc_head
[producer
];
1435 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1438 /* copy the first 64 bytes */
1439 memcpy(((void *)hwdesc
) + 2,
1440 (void *)(skb
->data
), first_hdr_len
);
1441 producer
= get_next_index(producer
, num_txd
);
1444 hwdesc
= &tx_ring
->desc_head
[producer
];
1445 pbuf
= &tx_ring
->cmd_buf_arr
[producer
];
1447 /* copy the next 64 bytes - should be enough except
1448 * for pathological case
1450 skb_copy_from_linear_data_offset(skb
, first_hdr_len
,
1454 producer
= get_next_index(producer
, num_txd
);
1458 tx_ring
->producer
= producer
;
1459 adapter
->stats
.txbytes
+= skb
->len
;
1461 netxen_nic_update_cmd_producer(adapter
, tx_ring
, producer
);
1463 adapter
->stats
.xmitcalled
++;
1464 netdev
->trans_start
= jiffies
;
1466 return NETDEV_TX_OK
;
1469 adapter
->stats
.txdropped
++;
1470 dev_kfree_skb_any(skb
);
1471 return NETDEV_TX_OK
;
1474 static int netxen_nic_check_temp(struct netxen_adapter
*adapter
)
1476 struct net_device
*netdev
= adapter
->netdev
;
1477 uint32_t temp
, temp_state
, temp_val
;
1480 temp
= NXRD32(adapter
, CRB_TEMP_STATE
);
1482 temp_state
= nx_get_temp_state(temp
);
1483 temp_val
= nx_get_temp_val(temp
);
1485 if (temp_state
== NX_TEMP_PANIC
) {
1487 "%s: Device temperature %d degrees C exceeds"
1488 " maximum allowed. Hardware has been shut down.\n",
1489 netxen_nic_driver_name
, temp_val
);
1491 netif_carrier_off(netdev
);
1492 netif_stop_queue(netdev
);
1494 } else if (temp_state
== NX_TEMP_WARN
) {
1495 if (adapter
->temp
== NX_TEMP_NORMAL
) {
1497 "%s: Device temperature %d degrees C "
1498 "exceeds operating range."
1499 " Immediate action needed.\n",
1500 netxen_nic_driver_name
, temp_val
);
1503 if (adapter
->temp
== NX_TEMP_WARN
) {
1505 "%s: Device temperature is now %d degrees C"
1506 " in normal range.\n", netxen_nic_driver_name
,
1510 adapter
->temp
= temp_state
;
1514 void netxen_advert_link_change(struct netxen_adapter
*adapter
, int linkup
)
1516 struct net_device
*netdev
= adapter
->netdev
;
1518 if (adapter
->ahw
.linkup
&& !linkup
) {
1519 printk(KERN_INFO
"%s: %s NIC Link is down\n",
1520 netxen_nic_driver_name
, netdev
->name
);
1521 adapter
->ahw
.linkup
= 0;
1522 if (netif_running(netdev
)) {
1523 netif_carrier_off(netdev
);
1524 netif_stop_queue(netdev
);
1527 if (!adapter
->has_link_events
)
1528 netxen_nic_set_link_parameters(adapter
);
1530 } else if (!adapter
->ahw
.linkup
&& linkup
) {
1531 printk(KERN_INFO
"%s: %s NIC Link is up\n",
1532 netxen_nic_driver_name
, netdev
->name
);
1533 adapter
->ahw
.linkup
= 1;
1534 if (netif_running(netdev
)) {
1535 netif_carrier_on(netdev
);
1536 netif_wake_queue(netdev
);
1539 if (!adapter
->has_link_events
)
1540 netxen_nic_set_link_parameters(adapter
);
1544 static void netxen_nic_handle_phy_intr(struct netxen_adapter
*adapter
)
1546 u32 val
, port
, linkup
;
1548 port
= adapter
->physical_port
;
1550 if (NX_IS_REVISION_P3(adapter
->ahw
.revision_id
)) {
1551 val
= NXRD32(adapter
, CRB_XG_STATE_P3
);
1552 val
= XG_LINK_STATE_P3(adapter
->ahw
.pci_func
, val
);
1553 linkup
= (val
== XG_LINK_UP_P3
);
1555 val
= NXRD32(adapter
, CRB_XG_STATE
);
1556 if (adapter
->ahw
.port_type
== NETXEN_NIC_GBE
)
1557 linkup
= (val
>> port
) & 1;
1559 val
= (val
>> port
*8) & 0xff;
1560 linkup
= (val
== XG_LINK_UP
);
1564 netxen_advert_link_change(adapter
, linkup
);
1567 static void netxen_watchdog(unsigned long v
)
1569 struct netxen_adapter
*adapter
= (struct netxen_adapter
*)v
;
1571 SCHEDULE_WORK(&adapter
->watchdog_task
);
1574 void netxen_watchdog_task(struct work_struct
*work
)
1576 struct netxen_adapter
*adapter
=
1577 container_of(work
, struct netxen_adapter
, watchdog_task
);
1579 if ((adapter
->portnum
== 0) && netxen_nic_check_temp(adapter
))
1582 if (!adapter
->has_link_events
)
1583 netxen_nic_handle_phy_intr(adapter
);
1585 if (netif_running(adapter
->netdev
))
1586 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2 * HZ
);
1589 static void netxen_tx_timeout(struct net_device
*netdev
)
1591 struct netxen_adapter
*adapter
= (struct netxen_adapter
*)
1592 netdev_priv(netdev
);
1593 SCHEDULE_WORK(&adapter
->tx_timeout_task
);
1596 static void netxen_tx_timeout_task(struct work_struct
*work
)
1598 struct netxen_adapter
*adapter
=
1599 container_of(work
, struct netxen_adapter
, tx_timeout_task
);
1601 printk(KERN_ERR
"%s %s: transmit timeout, resetting.\n",
1602 netxen_nic_driver_name
, adapter
->netdev
->name
);
1604 netxen_napi_disable(adapter
);
1606 adapter
->netdev
->trans_start
= jiffies
;
1608 netxen_napi_enable(adapter
);
1609 netif_wake_queue(adapter
->netdev
);
1612 struct net_device_stats
*netxen_nic_get_stats(struct net_device
*netdev
)
1614 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1615 struct net_device_stats
*stats
= &adapter
->net_stats
;
1617 memset(stats
, 0, sizeof(*stats
));
1619 stats
->rx_packets
= adapter
->stats
.no_rcv
;
1620 stats
->tx_packets
= adapter
->stats
.xmitfinished
;
1621 stats
->rx_bytes
= adapter
->stats
.rxbytes
;
1622 stats
->tx_bytes
= adapter
->stats
.txbytes
;
1623 stats
->rx_dropped
= adapter
->stats
.rxdropped
;
1624 stats
->tx_dropped
= adapter
->stats
.txdropped
;
1629 static irqreturn_t
netxen_intr(int irq
, void *data
)
1631 struct nx_host_sds_ring
*sds_ring
= data
;
1632 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
1635 status
= adapter
->pci_read_immediate(adapter
, ISR_INT_VECTOR
);
1637 if (!(status
& adapter
->legacy_intr
.int_vec_bit
))
1640 if (adapter
->ahw
.revision_id
>= NX_P3_B1
) {
1641 /* check interrupt state machine, to be sure */
1642 status
= adapter
->pci_read_immediate(adapter
,
1644 if (!ISR_LEGACY_INT_TRIGGERED(status
))
1648 unsigned long our_int
= 0;
1650 our_int
= NXRD32(adapter
, CRB_INT_VECTOR
);
1652 /* not our interrupt */
1653 if (!test_and_clear_bit((7 + adapter
->portnum
), &our_int
))
1656 /* claim interrupt */
1657 NXWR32(adapter
, CRB_INT_VECTOR
, (our_int
& 0xffffffff));
1660 /* clear interrupt */
1661 if (adapter
->fw_major
< 4)
1662 netxen_nic_disable_int(sds_ring
);
1664 adapter
->pci_write_immediate(adapter
,
1665 adapter
->legacy_intr
.tgt_status_reg
,
1667 /* read twice to ensure write is flushed */
1668 adapter
->pci_read_immediate(adapter
, ISR_INT_VECTOR
);
1669 adapter
->pci_read_immediate(adapter
, ISR_INT_VECTOR
);
1671 napi_schedule(&sds_ring
->napi
);
1676 static irqreturn_t
netxen_msi_intr(int irq
, void *data
)
1678 struct nx_host_sds_ring
*sds_ring
= data
;
1679 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
1681 /* clear interrupt */
1682 adapter
->pci_write_immediate(adapter
,
1683 msi_tgt_status
[adapter
->ahw
.pci_func
], 0xffffffff);
1685 napi_schedule(&sds_ring
->napi
);
1689 static irqreturn_t
netxen_msix_intr(int irq
, void *data
)
1691 struct nx_host_sds_ring
*sds_ring
= data
;
1693 napi_schedule(&sds_ring
->napi
);
1697 static int netxen_nic_poll(struct napi_struct
*napi
, int budget
)
1699 struct nx_host_sds_ring
*sds_ring
=
1700 container_of(napi
, struct nx_host_sds_ring
, napi
);
1702 struct netxen_adapter
*adapter
= sds_ring
->adapter
;
1707 tx_complete
= netxen_process_cmd_ring(adapter
);
1709 work_done
= netxen_process_rcv_ring(sds_ring
, budget
);
1711 if ((work_done
< budget
) && tx_complete
) {
1712 napi_complete(&sds_ring
->napi
);
1713 netxen_nic_enable_int(sds_ring
);
1719 #ifdef CONFIG_NET_POLL_CONTROLLER
1720 static void netxen_nic_poll_controller(struct net_device
*netdev
)
1722 struct netxen_adapter
*adapter
= netdev_priv(netdev
);
1723 disable_irq(adapter
->irq
);
1724 netxen_intr(adapter
->irq
, adapter
);
1725 enable_irq(adapter
->irq
);
1729 static struct pci_driver netxen_driver
= {
1730 .name
= netxen_nic_driver_name
,
1731 .id_table
= netxen_pci_tbl
,
1732 .probe
= netxen_nic_probe
,
1733 .remove
= __devexit_p(netxen_nic_remove
),
1734 .suspend
= netxen_nic_suspend
,
1735 .resume
= netxen_nic_resume
1738 /* Driver Registration on NetXen card */
1740 static int __init
netxen_init_module(void)
1742 printk(KERN_INFO
"%s\n", netxen_nic_driver_string
);
1744 if ((netxen_workq
= create_singlethread_workqueue("netxen")) == NULL
)
1747 return pci_register_driver(&netxen_driver
);
1750 module_init(netxen_init_module
);
1752 static void __exit
netxen_exit_module(void)
1754 pci_unregister_driver(&netxen_driver
);
1755 destroy_workqueue(netxen_workq
);
1758 module_exit(netxen_exit_module
);