1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-main.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
14 * The module loadable parameters that are supported by the driver and a brief
15 * explanation of all the variables:
17 * Strip VLAN Tag enable/disable. Instructs the device to remove
18 * the VLAN tag from all received tagged frames that are not
19 * replicated at the internal L2 switch.
20 * 0 - Do not strip the VLAN tag.
21 * 1 - Strip the VLAN tag.
24 * Enable learning the mac address of the guest OS interface in
25 * a virtualization environment.
30 * Maximum number of port to be supported.
34 * This configures the maximum no of VPATH configures for each
36 * MIN - 1 and MAX - 17
39 * This configures maximum no of Device function to be enabled.
40 * MIN - 1 and MAX - 17
42 ******************************************************************************/
44 #include <linux/if_vlan.h>
45 #include <linux/pci.h>
46 #include <linux/tcp.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include "vxge-main.h"
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
55 "Virtualized Server Adapter");
57 static struct pci_device_id vxge_id_table
[] __devinitdata
= {
58 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_WIN
, PCI_ANY_ID
,
60 {PCI_VENDOR_ID_S2IO
, PCI_DEVICE_ID_TITAN_UNI
, PCI_ANY_ID
,
65 MODULE_DEVICE_TABLE(pci
, vxge_id_table
);
67 VXGE_MODULE_PARAM_INT(vlan_tag_strip
, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
);
68 VXGE_MODULE_PARAM_INT(addr_learn_en
, VXGE_HW_MAC_ADDR_LEARN_DEFAULT
);
69 VXGE_MODULE_PARAM_INT(max_config_port
, VXGE_MAX_CONFIG_PORT
);
70 VXGE_MODULE_PARAM_INT(max_config_vpath
, VXGE_USE_DEFAULT
);
71 VXGE_MODULE_PARAM_INT(max_mac_vpath
, VXGE_MAX_MAC_ADDR_COUNT
);
72 VXGE_MODULE_PARAM_INT(max_config_dev
, VXGE_MAX_CONFIG_DEV
);
74 static u16 vpath_selector
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
75 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
76 static unsigned int bw_percentage
[VXGE_HW_MAX_VIRTUAL_PATHS
] =
77 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS
- 1)] = 0xFF};
78 module_param_array(bw_percentage
, uint
, NULL
, 0);
80 static struct vxge_drv_config
*driver_config
;
82 static inline int is_vxge_card_up(struct vxgedev
*vdev
)
84 return test_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
87 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo
*fifo
)
89 unsigned long flags
= 0;
90 struct sk_buff
*skb_ptr
= NULL
;
91 struct sk_buff
**temp
, *head
, *skb
;
93 if (spin_trylock_irqsave(&fifo
->tx_lock
, flags
)) {
94 vxge_hw_vpath_poll_tx(fifo
->handle
, (void **)&skb_ptr
);
95 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
101 temp
= (struct sk_buff
**)&skb
->cb
;
104 dev_kfree_skb_irq(skb
);
108 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev
*vdev
)
112 /* Complete all transmits */
113 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
114 VXGE_COMPLETE_VPATH_TX(&vdev
->vpaths
[i
].fifo
);
117 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev
*vdev
)
120 struct vxge_ring
*ring
;
122 /* Complete all receives*/
123 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
124 ring
= &vdev
->vpaths
[i
].ring
;
125 vxge_hw_vpath_poll_rx(ring
->handle
);
130 * MultiQ manipulation helper functions
132 void vxge_stop_all_tx_queue(struct vxgedev
*vdev
)
135 struct net_device
*dev
= vdev
->ndev
;
137 if (vdev
->config
.tx_steering_type
!= TX_MULTIQ_STEERING
) {
138 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
139 vdev
->vpaths
[i
].fifo
.queue_state
= VPATH_QUEUE_STOP
;
141 netif_tx_stop_all_queues(dev
);
144 void vxge_stop_tx_queue(struct vxge_fifo
*fifo
)
146 struct net_device
*dev
= fifo
->ndev
;
148 struct netdev_queue
*txq
= NULL
;
149 if (fifo
->tx_steering_type
== TX_MULTIQ_STEERING
)
150 txq
= netdev_get_tx_queue(dev
, fifo
->driver_id
);
152 txq
= netdev_get_tx_queue(dev
, 0);
153 fifo
->queue_state
= VPATH_QUEUE_STOP
;
156 netif_tx_stop_queue(txq
);
159 void vxge_start_all_tx_queue(struct vxgedev
*vdev
)
162 struct net_device
*dev
= vdev
->ndev
;
164 if (vdev
->config
.tx_steering_type
!= TX_MULTIQ_STEERING
) {
165 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
166 vdev
->vpaths
[i
].fifo
.queue_state
= VPATH_QUEUE_START
;
168 netif_tx_start_all_queues(dev
);
171 static void vxge_wake_all_tx_queue(struct vxgedev
*vdev
)
174 struct net_device
*dev
= vdev
->ndev
;
176 if (vdev
->config
.tx_steering_type
!= TX_MULTIQ_STEERING
) {
177 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
178 vdev
->vpaths
[i
].fifo
.queue_state
= VPATH_QUEUE_START
;
180 netif_tx_wake_all_queues(dev
);
183 void vxge_wake_tx_queue(struct vxge_fifo
*fifo
, struct sk_buff
*skb
)
185 struct net_device
*dev
= fifo
->ndev
;
187 int vpath_no
= fifo
->driver_id
;
188 struct netdev_queue
*txq
= NULL
;
189 if (fifo
->tx_steering_type
== TX_MULTIQ_STEERING
) {
190 txq
= netdev_get_tx_queue(dev
, vpath_no
);
191 if (netif_tx_queue_stopped(txq
))
192 netif_tx_wake_queue(txq
);
194 txq
= netdev_get_tx_queue(dev
, 0);
195 if (fifo
->queue_state
== VPATH_QUEUE_STOP
)
196 if (netif_tx_queue_stopped(txq
)) {
197 fifo
->queue_state
= VPATH_QUEUE_START
;
198 netif_tx_wake_queue(txq
);
204 * vxge_callback_link_up
206 * This function is called during interrupt context to notify link up state
210 vxge_callback_link_up(struct __vxge_hw_device
*hldev
)
212 struct net_device
*dev
= hldev
->ndev
;
213 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
215 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
216 vdev
->ndev
->name
, __func__
, __LINE__
);
217 printk(KERN_NOTICE
"%s: Link Up\n", vdev
->ndev
->name
);
218 vdev
->stats
.link_up
++;
220 netif_carrier_on(vdev
->ndev
);
221 vxge_wake_all_tx_queue(vdev
);
223 vxge_debug_entryexit(VXGE_TRACE
,
224 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
228 * vxge_callback_link_down
230 * This function is called during interrupt context to notify link down state
234 vxge_callback_link_down(struct __vxge_hw_device
*hldev
)
236 struct net_device
*dev
= hldev
->ndev
;
237 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
239 vxge_debug_entryexit(VXGE_TRACE
,
240 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
241 printk(KERN_NOTICE
"%s: Link Down\n", vdev
->ndev
->name
);
243 vdev
->stats
.link_down
++;
244 netif_carrier_off(vdev
->ndev
);
245 vxge_stop_all_tx_queue(vdev
);
247 vxge_debug_entryexit(VXGE_TRACE
,
248 "%s: %s:%d Exiting...", vdev
->ndev
->name
, __func__
, __LINE__
);
256 static struct sk_buff
*
257 vxge_rx_alloc(void *dtrh
, struct vxge_ring
*ring
, const int skb_size
)
259 struct net_device
*dev
;
261 struct vxge_rx_priv
*rx_priv
;
264 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
265 ring
->ndev
->name
, __func__
, __LINE__
);
267 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
269 /* try to allocate skb first. this one may fail */
270 skb
= netdev_alloc_skb(dev
, skb_size
+
271 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
273 vxge_debug_mem(VXGE_ERR
,
274 "%s: out of memory to allocate SKB", dev
->name
);
275 ring
->stats
.skb_alloc_fail
++;
279 vxge_debug_mem(VXGE_TRACE
,
280 "%s: %s:%d Skb : 0x%p", ring
->ndev
->name
,
281 __func__
, __LINE__
, skb
);
283 skb_reserve(skb
, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
286 rx_priv
->data_size
= skb_size
;
287 vxge_debug_entryexit(VXGE_TRACE
,
288 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
296 static int vxge_rx_map(void *dtrh
, struct vxge_ring
*ring
)
298 struct vxge_rx_priv
*rx_priv
;
301 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
302 ring
->ndev
->name
, __func__
, __LINE__
);
303 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
305 dma_addr
= pci_map_single(ring
->pdev
, rx_priv
->skb
->data
,
306 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
309 ring
->stats
.pci_map_fail
++;
312 vxge_debug_mem(VXGE_TRACE
,
313 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
314 ring
->ndev
->name
, __func__
, __LINE__
,
315 (unsigned long long)dma_addr
);
316 vxge_hw_ring_rxd_1b_set(dtrh
, dma_addr
, rx_priv
->data_size
);
318 rx_priv
->data_dma
= dma_addr
;
319 vxge_debug_entryexit(VXGE_TRACE
,
320 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
326 * vxge_rx_initial_replenish
327 * Allocation of RxD as an initial replenish procedure.
329 static enum vxge_hw_status
330 vxge_rx_initial_replenish(void *dtrh
, void *userdata
)
332 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
333 struct vxge_rx_priv
*rx_priv
;
335 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
336 ring
->ndev
->name
, __func__
, __LINE__
);
337 if (vxge_rx_alloc(dtrh
, ring
,
338 VXGE_LL_MAX_FRAME_SIZE(ring
->ndev
)) == NULL
)
341 if (vxge_rx_map(dtrh
, ring
)) {
342 rx_priv
= vxge_hw_ring_rxd_private_get(dtrh
);
343 dev_kfree_skb(rx_priv
->skb
);
347 vxge_debug_entryexit(VXGE_TRACE
,
348 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
354 vxge_rx_complete(struct vxge_ring
*ring
, struct sk_buff
*skb
, u16 vlan
,
355 int pkt_length
, struct vxge_hw_ring_rxd_info
*ext_info
)
358 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
359 ring
->ndev
->name
, __func__
, __LINE__
);
360 skb_record_rx_queue(skb
, ring
->driver_id
);
361 skb
->protocol
= eth_type_trans(skb
, ring
->ndev
);
363 ring
->stats
.rx_frms
++;
364 ring
->stats
.rx_bytes
+= pkt_length
;
366 if (skb
->pkt_type
== PACKET_MULTICAST
)
367 ring
->stats
.rx_mcast
++;
369 vxge_debug_rx(VXGE_TRACE
,
370 "%s: %s:%d skb protocol = %d",
371 ring
->ndev
->name
, __func__
, __LINE__
, skb
->protocol
);
373 if (ring
->gro_enable
) {
374 if (ring
->vlgrp
&& ext_info
->vlan
&&
375 (ring
->vlan_tag_strip
==
376 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
))
377 vlan_gro_receive(&ring
->napi
, ring
->vlgrp
,
378 ext_info
->vlan
, skb
);
380 napi_gro_receive(&ring
->napi
, skb
);
382 if (ring
->vlgrp
&& vlan
&&
383 (ring
->vlan_tag_strip
==
384 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE
))
385 vlan_hwaccel_receive_skb(skb
, ring
->vlgrp
, vlan
);
387 netif_receive_skb(skb
);
389 vxge_debug_entryexit(VXGE_TRACE
,
390 "%s: %s:%d Exiting...", ring
->ndev
->name
, __func__
, __LINE__
);
393 static inline void vxge_re_pre_post(void *dtr
, struct vxge_ring
*ring
,
394 struct vxge_rx_priv
*rx_priv
)
396 pci_dma_sync_single_for_device(ring
->pdev
,
397 rx_priv
->data_dma
, rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
399 vxge_hw_ring_rxd_1b_set(dtr
, rx_priv
->data_dma
, rx_priv
->data_size
);
400 vxge_hw_ring_rxd_pre_post(ring
->handle
, dtr
);
403 static inline void vxge_post(int *dtr_cnt
, void **first_dtr
,
404 void *post_dtr
, struct __vxge_hw_ring
*ringh
)
406 int dtr_count
= *dtr_cnt
;
407 if ((*dtr_cnt
% VXGE_HW_RXSYNC_FREQ_CNT
) == 0) {
409 vxge_hw_ring_rxd_post_post_wmb(ringh
, *first_dtr
);
410 *first_dtr
= post_dtr
;
412 vxge_hw_ring_rxd_post_post(ringh
, post_dtr
);
414 *dtr_cnt
= dtr_count
;
420 * If the interrupt is because of a received frame or if the receive ring
421 * contains fresh as yet un-processed frames, this function is called.
424 vxge_rx_1b_compl(struct __vxge_hw_ring
*ringh
, void *dtr
,
425 u8 t_code
, void *userdata
)
427 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
428 struct net_device
*dev
= ring
->ndev
;
429 unsigned int dma_sizes
;
430 void *first_dtr
= NULL
;
436 struct vxge_rx_priv
*rx_priv
;
437 struct vxge_hw_ring_rxd_info ext_info
;
438 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
439 ring
->ndev
->name
, __func__
, __LINE__
);
440 ring
->pkts_processed
= 0;
442 vxge_hw_ring_replenish(ringh
, 0);
445 rx_priv
= vxge_hw_ring_rxd_private_get(dtr
);
447 data_size
= rx_priv
->data_size
;
448 data_dma
= rx_priv
->data_dma
;
450 vxge_debug_rx(VXGE_TRACE
,
451 "%s: %s:%d skb = 0x%p",
452 ring
->ndev
->name
, __func__
, __LINE__
, skb
);
454 vxge_hw_ring_rxd_1b_get(ringh
, dtr
, &dma_sizes
);
455 pkt_length
= dma_sizes
;
457 vxge_debug_rx(VXGE_TRACE
,
458 "%s: %s:%d Packet Length = %d",
459 ring
->ndev
->name
, __func__
, __LINE__
, pkt_length
);
461 vxge_hw_ring_rxd_1b_info_get(ringh
, dtr
, &ext_info
);
463 /* check skb validity */
466 prefetch((char *)skb
+ L1_CACHE_BYTES
);
467 if (unlikely(t_code
)) {
469 if (vxge_hw_ring_handle_tcode(ringh
, dtr
, t_code
) !=
472 ring
->stats
.rx_errors
++;
473 vxge_debug_rx(VXGE_TRACE
,
474 "%s: %s :%d Rx T_code is %d",
475 ring
->ndev
->name
, __func__
,
478 /* If the t_code is not supported and if the
479 * t_code is other than 0x5 (unparseable packet
480 * such as unknown UPV6 header), Drop it !!!
482 vxge_re_pre_post(dtr
, ring
, rx_priv
);
484 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
485 ring
->stats
.rx_dropped
++;
490 if (pkt_length
> VXGE_LL_RX_COPY_THRESHOLD
) {
492 if (vxge_rx_alloc(dtr
, ring
, data_size
) != NULL
) {
494 if (!vxge_rx_map(dtr
, ring
)) {
495 skb_put(skb
, pkt_length
);
497 pci_unmap_single(ring
->pdev
, data_dma
,
498 data_size
, PCI_DMA_FROMDEVICE
);
500 vxge_hw_ring_rxd_pre_post(ringh
, dtr
);
501 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
504 dev_kfree_skb(rx_priv
->skb
);
506 rx_priv
->data_size
= data_size
;
507 vxge_re_pre_post(dtr
, ring
, rx_priv
);
509 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
511 ring
->stats
.rx_dropped
++;
515 vxge_re_pre_post(dtr
, ring
, rx_priv
);
517 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
518 ring
->stats
.rx_dropped
++;
522 struct sk_buff
*skb_up
;
524 skb_up
= netdev_alloc_skb(dev
, pkt_length
+
525 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
526 if (skb_up
!= NULL
) {
528 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN
);
530 pci_dma_sync_single_for_cpu(ring
->pdev
,
534 vxge_debug_mem(VXGE_TRACE
,
535 "%s: %s:%d skb_up = %p",
536 ring
->ndev
->name
, __func__
,
538 memcpy(skb_up
->data
, skb
->data
, pkt_length
);
540 vxge_re_pre_post(dtr
, ring
, rx_priv
);
542 vxge_post(&dtr_cnt
, &first_dtr
, dtr
,
544 /* will netif_rx small SKB instead */
546 skb_put(skb
, pkt_length
);
548 vxge_re_pre_post(dtr
, ring
, rx_priv
);
550 vxge_post(&dtr_cnt
, &first_dtr
, dtr
, ringh
);
551 vxge_debug_rx(VXGE_ERR
,
552 "%s: vxge_rx_1b_compl: out of "
553 "memory", dev
->name
);
554 ring
->stats
.skb_alloc_fail
++;
559 if ((ext_info
.proto
& VXGE_HW_FRAME_PROTO_TCP_OR_UDP
) &&
560 !(ext_info
.proto
& VXGE_HW_FRAME_PROTO_IP_FRAG
) &&
561 ring
->rx_csum
&& /* Offload Rx side CSUM */
562 ext_info
.l3_cksum
== VXGE_HW_L3_CKSUM_OK
&&
563 ext_info
.l4_cksum
== VXGE_HW_L4_CKSUM_OK
)
564 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
566 skb
->ip_summed
= CHECKSUM_NONE
;
568 vxge_rx_complete(ring
, skb
, ext_info
.vlan
,
569 pkt_length
, &ext_info
);
572 ring
->pkts_processed
++;
576 } while (vxge_hw_ring_rxd_next_completed(ringh
, &dtr
,
577 &t_code
) == VXGE_HW_OK
);
580 vxge_hw_ring_rxd_post_post_wmb(ringh
, first_dtr
);
582 dev
->last_rx
= jiffies
;
584 vxge_debug_entryexit(VXGE_TRACE
,
593 * If an interrupt was raised to indicate DMA complete of the Tx packet,
594 * this function is called. It identifies the last TxD whose buffer was
595 * freed and frees all skbs whose data have already DMA'ed into the NICs
599 vxge_xmit_compl(struct __vxge_hw_fifo
*fifo_hw
, void *dtr
,
600 enum vxge_hw_fifo_tcode t_code
, void *userdata
,
603 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
604 struct sk_buff
*skb
, *head
= NULL
;
605 struct sk_buff
**temp
;
608 vxge_debug_entryexit(VXGE_TRACE
,
609 "%s:%d Entered....", __func__
, __LINE__
);
615 struct vxge_tx_priv
*txd_priv
=
616 vxge_hw_fifo_txdl_private_get(dtr
);
619 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
620 frag
= &skb_shinfo(skb
)->frags
[0];
622 vxge_debug_tx(VXGE_TRACE
,
623 "%s: %s:%d fifo_hw = %p dtr = %p "
624 "tcode = 0x%x", fifo
->ndev
->name
, __func__
,
625 __LINE__
, fifo_hw
, dtr
, t_code
);
626 /* check skb validity */
628 vxge_debug_tx(VXGE_TRACE
,
629 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
630 fifo
->ndev
->name
, __func__
, __LINE__
,
631 skb
, txd_priv
, frg_cnt
);
632 if (unlikely(t_code
)) {
633 fifo
->stats
.tx_errors
++;
634 vxge_debug_tx(VXGE_ERR
,
635 "%s: tx: dtr %p completed due to "
636 "error t_code %01x", fifo
->ndev
->name
,
638 vxge_hw_fifo_handle_tcode(fifo_hw
, dtr
, t_code
);
641 /* for unfragmented skb */
642 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
643 skb_headlen(skb
), PCI_DMA_TODEVICE
);
645 for (j
= 0; j
< frg_cnt
; j
++) {
646 pci_unmap_page(fifo
->pdev
,
647 txd_priv
->dma_buffers
[i
++],
648 frag
->size
, PCI_DMA_TODEVICE
);
652 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
654 /* Updating the statistics block */
655 fifo
->stats
.tx_frms
++;
656 fifo
->stats
.tx_bytes
+= skb
->len
;
658 temp
= (struct sk_buff
**)&skb
->cb
;
663 if (pkt_cnt
> fifo
->indicate_max_pkts
)
666 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw
,
667 &dtr
, &t_code
) == VXGE_HW_OK
);
669 vxge_wake_tx_queue(fifo
, skb
);
672 *skb_ptr
= (void *) head
;
674 vxge_debug_entryexit(VXGE_TRACE
,
675 "%s: %s:%d Exiting...",
676 fifo
->ndev
->name
, __func__
, __LINE__
);
680 /* select a vpath to transmit the packet */
681 static u32
vxge_get_vpath_no(struct vxgedev
*vdev
, struct sk_buff
*skb
,
684 u16 queue_len
, counter
= 0;
685 if (skb
->protocol
== htons(ETH_P_IP
)) {
691 if ((ip
->frag_off
& htons(IP_OFFSET
|IP_MF
)) == 0) {
692 th
= (struct tcphdr
*)(((unsigned char *)ip
) +
695 queue_len
= vdev
->no_of_vpath
;
696 counter
= (ntohs(th
->source
) +
698 vdev
->vpath_selector
[queue_len
- 1];
699 if (counter
>= queue_len
)
700 counter
= queue_len
- 1;
702 if (ip
->protocol
== IPPROTO_UDP
) {
712 static enum vxge_hw_status
vxge_search_mac_addr_in_list(
713 struct vxge_vpath
*vpath
, u64 del_mac
)
715 struct list_head
*entry
, *next
;
716 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
717 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
)
723 static int vxge_learn_mac(struct vxgedev
*vdev
, u8
*mac_header
)
725 struct macInfo mac_info
;
726 u8
*mac_address
= NULL
;
727 u64 mac_addr
= 0, vpath_vector
= 0;
729 enum vxge_hw_status status
= VXGE_HW_OK
;
730 struct vxge_vpath
*vpath
= NULL
;
731 struct __vxge_hw_device
*hldev
;
733 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
735 mac_address
= (u8
*)&mac_addr
;
736 memcpy(mac_address
, mac_header
, ETH_ALEN
);
738 /* Is this mac address already in the list? */
739 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
740 vpath
= &vdev
->vpaths
[vpath_idx
];
741 if (vxge_search_mac_addr_in_list(vpath
, mac_addr
))
745 memset(&mac_info
, 0, sizeof(struct macInfo
));
746 memcpy(mac_info
.macaddr
, mac_header
, ETH_ALEN
);
748 /* Any vpath has room to add mac address to its da table? */
749 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
750 vpath
= &vdev
->vpaths
[vpath_idx
];
751 if (vpath
->mac_addr_cnt
< vpath
->max_mac_addr_cnt
) {
752 /* Add this mac address to this vpath */
753 mac_info
.vpath_no
= vpath_idx
;
754 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
755 status
= vxge_add_mac_addr(vdev
, &mac_info
);
756 if (status
!= VXGE_HW_OK
)
762 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_LIST
;
764 mac_info
.vpath_no
= vpath_idx
;
765 /* Is the first vpath already selected as catch-basin ? */
766 vpath
= &vdev
->vpaths
[vpath_idx
];
767 if (vpath
->mac_addr_cnt
> vpath
->max_mac_addr_cnt
) {
768 /* Add this mac address to this vpath */
769 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
774 /* Select first vpath as catch-basin */
775 vpath_vector
= vxge_mBIT(vpath
->device_id
);
776 status
= vxge_hw_mgmt_reg_write(vpath
->vdev
->devh
,
777 vxge_hw_mgmt_reg_type_mrpcim
,
780 struct vxge_hw_mrpcim_reg
,
783 if (status
!= VXGE_HW_OK
) {
784 vxge_debug_tx(VXGE_ERR
,
785 "%s: Unable to set the vpath-%d in catch-basin mode",
786 VXGE_DRIVER_NAME
, vpath
->device_id
);
790 if (FALSE
== vxge_mac_list_add(vpath
, &mac_info
))
798 * @skb : the socket buffer containing the Tx data.
799 * @dev : device pointer.
801 * This function is the Tx entry point of the driver. Neterion NIC supports
802 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
803 * NOTE: when device cant queue the pkt, just the trans_start variable will
807 vxge_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
809 struct vxge_fifo
*fifo
= NULL
;
812 struct vxgedev
*vdev
= NULL
;
813 enum vxge_hw_status status
;
814 int frg_cnt
, first_frg_len
;
816 int i
= 0, j
= 0, avail
;
818 struct vxge_tx_priv
*txdl_priv
= NULL
;
819 struct __vxge_hw_fifo
*fifo_hw
;
822 unsigned long flags
= 0;
824 int do_spin_tx_lock
= 1;
826 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
827 dev
->name
, __func__
, __LINE__
);
829 /* A buffer with no data will be dropped */
830 if (unlikely(skb
->len
<= 0)) {
831 vxge_debug_tx(VXGE_ERR
,
832 "%s: Buffer has no data..", dev
->name
);
837 vdev
= (struct vxgedev
*)netdev_priv(dev
);
839 if (unlikely(!is_vxge_card_up(vdev
))) {
840 vxge_debug_tx(VXGE_ERR
,
841 "%s: vdev not initialized", dev
->name
);
846 if (vdev
->config
.addr_learn_en
) {
847 vpath_no
= vxge_learn_mac(vdev
, skb
->data
+ ETH_ALEN
);
848 if (vpath_no
== -EPERM
) {
849 vxge_debug_tx(VXGE_ERR
,
850 "%s: Failed to store the mac address",
857 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
858 vpath_no
= skb_get_queue_mapping(skb
);
859 else if (vdev
->config
.tx_steering_type
== TX_PORT_STEERING
)
860 vpath_no
= vxge_get_vpath_no(vdev
, skb
, &do_spin_tx_lock
);
862 vxge_debug_tx(VXGE_TRACE
, "%s: vpath_no= %d", dev
->name
, vpath_no
);
864 if (vpath_no
>= vdev
->no_of_vpath
)
867 fifo
= &vdev
->vpaths
[vpath_no
].fifo
;
868 fifo_hw
= fifo
->handle
;
871 spin_lock_irqsave(&fifo
->tx_lock
, flags
);
873 if (unlikely(!spin_trylock_irqsave(&fifo
->tx_lock
, flags
)))
874 return NETDEV_TX_LOCKED
;
877 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
) {
878 if (netif_subqueue_stopped(dev
, skb
)) {
879 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
880 return NETDEV_TX_BUSY
;
882 } else if (unlikely(fifo
->queue_state
== VPATH_QUEUE_STOP
)) {
883 if (netif_queue_stopped(dev
)) {
884 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
885 return NETDEV_TX_BUSY
;
888 avail
= vxge_hw_fifo_free_txdl_count_get(fifo_hw
);
890 vxge_debug_tx(VXGE_ERR
,
891 "%s: No free TXDs available", dev
->name
);
892 fifo
->stats
.txd_not_free
++;
893 vxge_stop_tx_queue(fifo
);
897 status
= vxge_hw_fifo_txdl_reserve(fifo_hw
, &dtr
, &dtr_priv
);
898 if (unlikely(status
!= VXGE_HW_OK
)) {
899 vxge_debug_tx(VXGE_ERR
,
900 "%s: Out of descriptors .", dev
->name
);
901 fifo
->stats
.txd_out_of_desc
++;
902 vxge_stop_tx_queue(fifo
);
906 vxge_debug_tx(VXGE_TRACE
,
907 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
908 dev
->name
, __func__
, __LINE__
,
909 fifo_hw
, dtr
, dtr_priv
);
911 if (vdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
912 u16 vlan_tag
= vlan_tx_tag_get(skb
);
913 vxge_hw_fifo_txdl_vlan_set(dtr
, vlan_tag
);
916 first_frg_len
= skb_headlen(skb
);
918 dma_pointer
= pci_map_single(fifo
->pdev
, skb
->data
, first_frg_len
,
921 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
))) {
922 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
923 vxge_stop_tx_queue(fifo
);
924 fifo
->stats
.pci_map_fail
++;
928 txdl_priv
= vxge_hw_fifo_txdl_private_get(dtr
);
929 txdl_priv
->skb
= skb
;
930 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
932 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
933 vxge_debug_tx(VXGE_TRACE
,
934 "%s: %s:%d skb = %p txdl_priv = %p "
935 "frag_cnt = %d dma_pointer = 0x%llx", dev
->name
,
936 __func__
, __LINE__
, skb
, txdl_priv
,
937 frg_cnt
, (unsigned long long)dma_pointer
);
939 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
942 frag
= &skb_shinfo(skb
)->frags
[0];
943 for (i
= 0; i
< frg_cnt
; i
++) {
944 /* ignore 0 length fragment */
949 (u64
)pci_map_page(fifo
->pdev
, frag
->page
,
950 frag
->page_offset
, frag
->size
,
953 if (unlikely(pci_dma_mapping_error(fifo
->pdev
, dma_pointer
)))
955 vxge_debug_tx(VXGE_TRACE
,
956 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
957 dev
->name
, __func__
, __LINE__
, i
,
958 (unsigned long long)dma_pointer
);
960 txdl_priv
->dma_buffers
[j
] = dma_pointer
;
961 vxge_hw_fifo_txdl_buffer_set(fifo_hw
, dtr
, j
++, dma_pointer
,
966 offload_type
= vxge_offload_type(skb
);
968 if (offload_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)) {
970 int mss
= vxge_tcp_mss(skb
);
972 max_mss
= dev
->mtu
+ ETH_HLEN
-
973 VXGE_HW_TCPIP_HEADER_MAX_SIZE
;
976 vxge_debug_tx(VXGE_TRACE
,
977 "%s: %s:%d mss = %d",
978 dev
->name
, __func__
, __LINE__
, mss
);
979 vxge_hw_fifo_txdl_mss_set(dtr
, mss
);
981 vxge_assert(skb
->len
<=
982 dev
->mtu
+ VXGE_HW_MAC_HEADER_MAX_SIZE
);
988 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
989 vxge_hw_fifo_txdl_cksum_set_bits(dtr
,
990 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN
|
991 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN
|
992 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN
);
994 vxge_hw_fifo_txdl_post(fifo_hw
, dtr
);
996 dev
->trans_start
= jiffies
; /* NETIF_F_LLTX driver :( */
998 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
1000 VXGE_COMPLETE_VPATH_TX(fifo
);
1001 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
1002 dev
->name
, __func__
, __LINE__
);
1006 vxge_debug_tx(VXGE_TRACE
, "%s: pci_map_page failed", dev
->name
);
1010 frag
= &skb_shinfo(skb
)->frags
[0];
1012 pci_unmap_single(fifo
->pdev
, txdl_priv
->dma_buffers
[j
++],
1013 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1015 for (; j
< i
; j
++) {
1016 pci_unmap_page(fifo
->pdev
, txdl_priv
->dma_buffers
[j
],
1017 frag
->size
, PCI_DMA_TODEVICE
);
1021 vxge_hw_fifo_txdl_free(fifo_hw
, dtr
);
1024 spin_unlock_irqrestore(&fifo
->tx_lock
, flags
);
1025 VXGE_COMPLETE_VPATH_TX(fifo
);
1033 * Function will be called by hw function to abort all outstanding receive
1037 vxge_rx_term(void *dtrh
, enum vxge_hw_rxd_state state
, void *userdata
)
1039 struct vxge_ring
*ring
= (struct vxge_ring
*)userdata
;
1040 struct vxge_rx_priv
*rx_priv
=
1041 vxge_hw_ring_rxd_private_get(dtrh
);
1043 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
1044 ring
->ndev
->name
, __func__
, __LINE__
);
1045 if (state
!= VXGE_HW_RXD_STATE_POSTED
)
1048 pci_unmap_single(ring
->pdev
, rx_priv
->data_dma
,
1049 rx_priv
->data_size
, PCI_DMA_FROMDEVICE
);
1051 dev_kfree_skb(rx_priv
->skb
);
1053 vxge_debug_entryexit(VXGE_TRACE
,
1054 "%s: %s:%d Exiting...",
1055 ring
->ndev
->name
, __func__
, __LINE__
);
1061 * Function will be called to abort all outstanding tx descriptors
1064 vxge_tx_term(void *dtrh
, enum vxge_hw_txdl_state state
, void *userdata
)
1066 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)userdata
;
1068 int i
= 0, j
, frg_cnt
;
1069 struct vxge_tx_priv
*txd_priv
= vxge_hw_fifo_txdl_private_get(dtrh
);
1070 struct sk_buff
*skb
= txd_priv
->skb
;
1072 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1074 if (state
!= VXGE_HW_TXDL_STATE_POSTED
)
1077 /* check skb validity */
1079 frg_cnt
= skb_shinfo(skb
)->nr_frags
;
1080 frag
= &skb_shinfo(skb
)->frags
[0];
1082 /* for unfragmented skb */
1083 pci_unmap_single(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1084 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1086 for (j
= 0; j
< frg_cnt
; j
++) {
1087 pci_unmap_page(fifo
->pdev
, txd_priv
->dma_buffers
[i
++],
1088 frag
->size
, PCI_DMA_TODEVICE
);
1094 vxge_debug_entryexit(VXGE_TRACE
,
1095 "%s:%d Exiting...", __func__
, __LINE__
);
1099 * vxge_set_multicast
1100 * @dev: pointer to the device structure
1102 * Entry point for multicast address enable/disable
1103 * This function is a driver entry point which gets called by the kernel
1104 * whenever multicast addresses must be enabled/disabled. This also gets
1105 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1106 * determine, if multicast address must be enabled or if promiscuous mode
1107 * is to be disabled etc.
1109 static void vxge_set_multicast(struct net_device
*dev
)
1111 struct dev_mc_list
*mclist
;
1112 struct vxgedev
*vdev
;
1113 int i
, mcast_cnt
= 0;
1114 struct __vxge_hw_device
*hldev
;
1115 enum vxge_hw_status status
= VXGE_HW_OK
;
1116 struct macInfo mac_info
;
1118 struct vxge_mac_addrs
*mac_entry
;
1119 struct list_head
*list_head
;
1120 struct list_head
*entry
, *next
;
1121 u8
*mac_address
= NULL
;
1123 vxge_debug_entryexit(VXGE_TRACE
,
1124 "%s:%d", __func__
, __LINE__
);
1126 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1127 hldev
= (struct __vxge_hw_device
*)vdev
->devh
;
1129 if (unlikely(!is_vxge_card_up(vdev
)))
1132 if ((dev
->flags
& IFF_ALLMULTI
) && (!vdev
->all_multi_flg
)) {
1133 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1134 vxge_assert(vdev
->vpaths
[i
].is_open
);
1135 status
= vxge_hw_vpath_mcast_enable(
1136 vdev
->vpaths
[i
].handle
);
1137 vdev
->all_multi_flg
= 1;
1139 } else if ((dev
->flags
& IFF_ALLMULTI
) && (vdev
->all_multi_flg
)) {
1140 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1141 vxge_assert(vdev
->vpaths
[i
].is_open
);
1142 status
= vxge_hw_vpath_mcast_disable(
1143 vdev
->vpaths
[i
].handle
);
1144 vdev
->all_multi_flg
= 1;
1148 if (status
!= VXGE_HW_OK
)
1149 vxge_debug_init(VXGE_ERR
,
1150 "failed to %s multicast, status %d",
1151 dev
->flags
& IFF_ALLMULTI
?
1152 "enable" : "disable", status
);
1154 if (!vdev
->config
.addr_learn_en
) {
1155 if (dev
->flags
& IFF_PROMISC
) {
1156 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1157 vxge_assert(vdev
->vpaths
[i
].is_open
);
1158 status
= vxge_hw_vpath_promisc_enable(
1159 vdev
->vpaths
[i
].handle
);
1162 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1163 vxge_assert(vdev
->vpaths
[i
].is_open
);
1164 status
= vxge_hw_vpath_promisc_disable(
1165 vdev
->vpaths
[i
].handle
);
1170 memset(&mac_info
, 0, sizeof(struct macInfo
));
1171 /* Update individual M_CAST address list */
1172 if ((!vdev
->all_multi_flg
) && dev
->mc_count
) {
1174 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1175 list_head
= &vdev
->vpaths
[0].mac_addr_list
;
1176 if ((dev
->mc_count
+
1177 (vdev
->vpaths
[0].mac_addr_cnt
- mcast_cnt
)) >
1178 vdev
->vpaths
[0].max_mac_addr_cnt
)
1179 goto _set_all_mcast
;
1181 /* Delete previous MC's */
1182 for (i
= 0; i
< mcast_cnt
; i
++) {
1183 if (!list_empty(list_head
))
1184 mac_entry
= (struct vxge_mac_addrs
*)
1185 list_first_entry(list_head
,
1186 struct vxge_mac_addrs
,
1189 list_for_each_safe(entry
, next
, list_head
) {
1191 mac_entry
= (struct vxge_mac_addrs
*) entry
;
1192 /* Copy the mac address to delete */
1193 mac_address
= (u8
*)&mac_entry
->macaddr
;
1194 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1196 /* Is this a multicast address */
1197 if (0x01 & mac_info
.macaddr
[0]) {
1198 for (vpath_idx
= 0; vpath_idx
<
1201 mac_info
.vpath_no
= vpath_idx
;
1202 status
= vxge_del_mac_addr(
1211 for (i
= 0, mclist
= dev
->mc_list
; i
< dev
->mc_count
;
1212 i
++, mclist
= mclist
->next
) {
1214 memcpy(mac_info
.macaddr
, mclist
->dmi_addr
, ETH_ALEN
);
1215 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1217 mac_info
.vpath_no
= vpath_idx
;
1218 mac_info
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1219 status
= vxge_add_mac_addr(vdev
, &mac_info
);
1220 if (status
!= VXGE_HW_OK
) {
1221 vxge_debug_init(VXGE_ERR
,
1222 "%s:%d Setting individual"
1223 "multicast address failed",
1224 __func__
, __LINE__
);
1225 goto _set_all_mcast
;
1232 mcast_cnt
= vdev
->vpaths
[0].mcast_addr_cnt
;
1233 /* Delete previous MC's */
1234 for (i
= 0; i
< mcast_cnt
; i
++) {
1236 list_for_each_safe(entry
, next
, list_head
) {
1238 mac_entry
= (struct vxge_mac_addrs
*) entry
;
1239 /* Copy the mac address to delete */
1240 mac_address
= (u8
*)&mac_entry
->macaddr
;
1241 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1243 /* Is this a multicast address */
1244 if (0x01 & mac_info
.macaddr
[0])
1248 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
;
1250 mac_info
.vpath_no
= vpath_idx
;
1251 status
= vxge_del_mac_addr(vdev
, &mac_info
);
1255 /* Enable all multicast */
1256 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1257 vxge_assert(vdev
->vpaths
[i
].is_open
);
1258 status
= vxge_hw_vpath_mcast_enable(
1259 vdev
->vpaths
[i
].handle
);
1260 if (status
!= VXGE_HW_OK
) {
1261 vxge_debug_init(VXGE_ERR
,
1262 "%s:%d Enabling all multicasts failed",
1263 __func__
, __LINE__
);
1265 vdev
->all_multi_flg
= 1;
1267 dev
->flags
|= IFF_ALLMULTI
;
1270 vxge_debug_entryexit(VXGE_TRACE
,
1271 "%s:%d Exiting...", __func__
, __LINE__
);
1276 * @dev: pointer to the device structure
1278 * Update entry "0" (default MAC addr)
1280 static int vxge_set_mac_addr(struct net_device
*dev
, void *p
)
1282 struct sockaddr
*addr
= p
;
1283 struct vxgedev
*vdev
;
1284 struct __vxge_hw_device
*hldev
;
1285 enum vxge_hw_status status
= VXGE_HW_OK
;
1286 struct macInfo mac_info_new
, mac_info_old
;
1289 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1291 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1294 if (!is_valid_ether_addr(addr
->sa_data
))
1297 memset(&mac_info_new
, 0, sizeof(struct macInfo
));
1298 memset(&mac_info_old
, 0, sizeof(struct macInfo
));
1300 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d Exiting...",
1301 __func__
, __LINE__
);
1303 /* Get the old address */
1304 memcpy(mac_info_old
.macaddr
, dev
->dev_addr
, dev
->addr_len
);
1306 /* Copy the new address */
1307 memcpy(mac_info_new
.macaddr
, addr
->sa_data
, dev
->addr_len
);
1309 /* First delete the old mac address from all the vpaths
1310 as we can't specify the index while adding new mac address */
1311 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1312 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vpath_idx
];
1313 if (!vpath
->is_open
) {
1314 /* This can happen when this interface is added/removed
1315 to the bonding interface. Delete this station address
1316 from the linked list */
1317 vxge_mac_list_del(vpath
, &mac_info_old
);
1319 /* Add this new address to the linked list
1320 for later restoring */
1321 vxge_mac_list_add(vpath
, &mac_info_new
);
1325 /* Delete the station address */
1326 mac_info_old
.vpath_no
= vpath_idx
;
1327 status
= vxge_del_mac_addr(vdev
, &mac_info_old
);
1330 if (unlikely(!is_vxge_card_up(vdev
))) {
1331 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1335 /* Set this mac address to all the vpaths */
1336 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++) {
1337 mac_info_new
.vpath_no
= vpath_idx
;
1338 mac_info_new
.state
= VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1339 status
= vxge_add_mac_addr(vdev
, &mac_info_new
);
1340 if (status
!= VXGE_HW_OK
)
1344 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1350 * vxge_vpath_intr_enable
1351 * @vdev: pointer to vdev
1352 * @vp_id: vpath for which to enable the interrupts
1354 * Enables the interrupts for the vpath
1356 void vxge_vpath_intr_enable(struct vxgedev
*vdev
, int vp_id
)
1358 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1359 int msix_id
, alarm_msix_id
;
1360 int tim_msix_id
[4] = {[0 ...3] = 0};
1362 vxge_hw_vpath_intr_enable(vpath
->handle
);
1364 if (vdev
->config
.intr_type
== INTA
)
1365 vxge_hw_vpath_inta_unmask_tx_rx(vpath
->handle
);
1367 msix_id
= vp_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1369 VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
1371 tim_msix_id
[0] = msix_id
;
1372 tim_msix_id
[1] = msix_id
+ 1;
1373 vxge_hw_vpath_msix_set(vpath
->handle
, tim_msix_id
,
1376 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
);
1377 vxge_hw_vpath_msix_unmask(vpath
->handle
, msix_id
+ 1);
1379 /* enable the alarm vector */
1380 vxge_hw_vpath_msix_unmask(vpath
->handle
, alarm_msix_id
);
1385 * vxge_vpath_intr_disable
1386 * @vdev: pointer to vdev
1387 * @vp_id: vpath for which to disable the interrupts
1389 * Disables the interrupts for the vpath
1391 void vxge_vpath_intr_disable(struct vxgedev
*vdev
, int vp_id
)
1393 struct vxge_vpath
*vpath
= &vdev
->vpaths
[vp_id
];
1396 vxge_hw_vpath_intr_disable(vpath
->handle
);
1398 if (vdev
->config
.intr_type
== INTA
)
1399 vxge_hw_vpath_inta_mask_tx_rx(vpath
->handle
);
1401 msix_id
= vp_id
* VXGE_HW_VPATH_MSIX_ACTIVE
;
1402 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1403 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
+ 1);
1405 /* disable the alarm vector */
1406 msix_id
= VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
1407 vxge_hw_vpath_msix_mask(vpath
->handle
, msix_id
);
1413 * @vdev: pointer to vdev
1414 * @vp_id: vpath to reset
1418 static int vxge_reset_vpath(struct vxgedev
*vdev
, int vp_id
)
1420 enum vxge_hw_status status
= VXGE_HW_OK
;
1423 /* check if device is down already */
1424 if (unlikely(!is_vxge_card_up(vdev
)))
1427 /* is device reset already scheduled */
1428 if (test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1431 if (vdev
->vpaths
[vp_id
].handle
) {
1432 if (vxge_hw_vpath_reset(vdev
->vpaths
[vp_id
].handle
)
1434 if (is_vxge_card_up(vdev
) &&
1435 vxge_hw_vpath_recover_from_reset(
1436 vdev
->vpaths
[vp_id
].handle
)
1438 vxge_debug_init(VXGE_ERR
,
1439 "vxge_hw_vpath_recover_from_reset"
1440 "failed for vpath:%d", vp_id
);
1444 vxge_debug_init(VXGE_ERR
,
1445 "vxge_hw_vpath_reset failed for"
1450 return VXGE_HW_FAIL
;
1452 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1453 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1455 /* Enable all broadcast */
1456 vxge_hw_vpath_bcast_enable(vdev
->vpaths
[vp_id
].handle
);
1458 /* Enable the interrupts */
1459 vxge_vpath_intr_enable(vdev
, vp_id
);
1463 /* Enable the flow of traffic through the vpath */
1464 vxge_hw_vpath_enable(vdev
->vpaths
[vp_id
].handle
);
1467 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[vp_id
].handle
);
1468 vdev
->vpaths
[vp_id
].ring
.last_status
= VXGE_HW_OK
;
1470 /* Vpath reset done */
1471 clear_bit(vp_id
, &vdev
->vp_reset
);
1473 /* Start the vpath queue */
1474 vxge_wake_tx_queue(&vdev
->vpaths
[vp_id
].fifo
, NULL
);
1479 static int do_vxge_reset(struct vxgedev
*vdev
, int event
)
1481 enum vxge_hw_status status
;
1482 int ret
= 0, vp_id
, i
;
1484 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1486 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
)) {
1487 /* check if device is down already */
1488 if (unlikely(!is_vxge_card_up(vdev
)))
1491 /* is reset already scheduled */
1492 if (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
1496 if (event
== VXGE_LL_FULL_RESET
) {
1497 /* wait for all the vpath reset to complete */
1498 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1499 while (test_bit(vp_id
, &vdev
->vp_reset
))
1503 /* if execution mode is set to debug, don't reset the adapter */
1504 if (unlikely(vdev
->exec_mode
)) {
1505 vxge_debug_init(VXGE_ERR
,
1506 "%s: execution mode is debug, returning..",
1508 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1509 vxge_stop_all_tx_queue(vdev
);
1514 if (event
== VXGE_LL_FULL_RESET
) {
1515 vxge_hw_device_intr_disable(vdev
->devh
);
1517 switch (vdev
->cric_err_event
) {
1518 case VXGE_HW_EVENT_UNKNOWN
:
1519 vxge_stop_all_tx_queue(vdev
);
1520 vxge_debug_init(VXGE_ERR
,
1521 "fatal: %s: Disabling device due to"
1526 case VXGE_HW_EVENT_RESET_START
:
1528 case VXGE_HW_EVENT_RESET_COMPLETE
:
1529 case VXGE_HW_EVENT_LINK_DOWN
:
1530 case VXGE_HW_EVENT_LINK_UP
:
1531 case VXGE_HW_EVENT_ALARM_CLEARED
:
1532 case VXGE_HW_EVENT_ECCERR
:
1533 case VXGE_HW_EVENT_MRPCIM_ECCERR
:
1536 case VXGE_HW_EVENT_FIFO_ERR
:
1537 case VXGE_HW_EVENT_VPATH_ERR
:
1539 case VXGE_HW_EVENT_CRITICAL_ERR
:
1540 vxge_stop_all_tx_queue(vdev
);
1541 vxge_debug_init(VXGE_ERR
,
1542 "fatal: %s: Disabling device due to"
1545 /* SOP or device reset required */
1546 /* This event is not currently used */
1549 case VXGE_HW_EVENT_SERR
:
1550 vxge_stop_all_tx_queue(vdev
);
1551 vxge_debug_init(VXGE_ERR
,
1552 "fatal: %s: Disabling device due to"
1557 case VXGE_HW_EVENT_SRPCIM_SERR
:
1558 case VXGE_HW_EVENT_MRPCIM_SERR
:
1561 case VXGE_HW_EVENT_SLOT_FREEZE
:
1562 vxge_stop_all_tx_queue(vdev
);
1563 vxge_debug_init(VXGE_ERR
,
1564 "fatal: %s: Disabling device due to"
1575 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_START_RESET
))
1576 vxge_stop_all_tx_queue(vdev
);
1578 if (event
== VXGE_LL_FULL_RESET
) {
1579 status
= vxge_reset_all_vpaths(vdev
);
1580 if (status
!= VXGE_HW_OK
) {
1581 vxge_debug_init(VXGE_ERR
,
1582 "fatal: %s: can not reset vpaths",
1589 if (event
== VXGE_LL_COMPL_RESET
) {
1590 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1591 if (vdev
->vpaths
[i
].handle
) {
1592 if (vxge_hw_vpath_recover_from_reset(
1593 vdev
->vpaths
[i
].handle
)
1595 vxge_debug_init(VXGE_ERR
,
1596 "vxge_hw_vpath_recover_"
1597 "from_reset failed for vpath: "
1603 vxge_debug_init(VXGE_ERR
,
1604 "vxge_hw_vpath_reset failed for "
1611 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
)) {
1612 /* Reprogram the DA table with populated mac addresses */
1613 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
1614 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[vp_id
]);
1615 vxge_restore_vpath_vid_table(&vdev
->vpaths
[vp_id
]);
1618 /* enable vpath interrupts */
1619 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
1620 vxge_vpath_intr_enable(vdev
, i
);
1622 vxge_hw_device_intr_enable(vdev
->devh
);
1626 /* Indicate card up */
1627 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
1629 /* Get the traffic to flow through the vpaths */
1630 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1631 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
1633 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
1636 vxge_wake_all_tx_queue(vdev
);
1640 vxge_debug_entryexit(VXGE_TRACE
,
1641 "%s:%d Exiting...", __func__
, __LINE__
);
1643 /* Indicate reset done */
1644 if ((event
== VXGE_LL_FULL_RESET
) || (event
== VXGE_LL_COMPL_RESET
))
1645 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
1651 * @vdev: pointer to ll device
1653 * driver may reset the chip on events of serr, eccerr, etc
1655 int vxge_reset(struct vxgedev
*vdev
)
1657 do_vxge_reset(vdev
, VXGE_LL_FULL_RESET
);
1662 * vxge_poll - Receive handler when Receive Polling is used.
1663 * @dev: pointer to the device structure.
1664 * @budget: Number of packets budgeted to be processed in this iteration.
1666 * This function comes into picture only if Receive side is being handled
1667 * through polling (called NAPI in linux). It mostly does what the normal
1668 * Rx interrupt handler does in terms of descriptor and packet processing
1669 * but not in an interrupt context. Also it will process a specified number
1670 * of packets at most in one iteration. This value is passed down by the
1671 * kernel as the function argument 'budget'.
1673 static int vxge_poll_msix(struct napi_struct
*napi
, int budget
)
1675 struct vxge_ring
*ring
=
1676 container_of(napi
, struct vxge_ring
, napi
);
1677 int budget_org
= budget
;
1678 ring
->budget
= budget
;
1680 vxge_hw_vpath_poll_rx(ring
->handle
);
1682 if (ring
->pkts_processed
< budget_org
) {
1683 napi_complete(napi
);
1684 /* Re enable the Rx interrupts for the vpath */
1685 vxge_hw_channel_msix_unmask(
1686 (struct __vxge_hw_channel
*)ring
->handle
,
1687 ring
->rx_vector_no
);
1690 return ring
->pkts_processed
;
1693 static int vxge_poll_inta(struct napi_struct
*napi
, int budget
)
1695 struct vxgedev
*vdev
= container_of(napi
, struct vxgedev
, napi
);
1696 int pkts_processed
= 0;
1698 int budget_org
= budget
;
1699 struct vxge_ring
*ring
;
1701 struct __vxge_hw_device
*hldev
= (struct __vxge_hw_device
*)
1702 pci_get_drvdata(vdev
->pdev
);
1704 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
1705 ring
= &vdev
->vpaths
[i
].ring
;
1706 ring
->budget
= budget
;
1707 vxge_hw_vpath_poll_rx(ring
->handle
);
1708 pkts_processed
+= ring
->pkts_processed
;
1709 budget
-= ring
->pkts_processed
;
1714 VXGE_COMPLETE_ALL_TX(vdev
);
1716 if (pkts_processed
< budget_org
) {
1717 napi_complete(napi
);
1718 /* Re enable the Rx interrupts for the ring */
1719 vxge_hw_device_unmask_all(hldev
);
1720 vxge_hw_device_flush_io(hldev
);
1723 return pkts_processed
;
1726 #ifdef CONFIG_NET_POLL_CONTROLLER
1728 * vxge_netpoll - netpoll event handler entry point
1729 * @dev : pointer to the device structure.
1731 * This function will be called by upper layer to check for events on the
1732 * interface in situations where interrupts are disabled. It is used for
1733 * specific in-kernel networking tasks, such as remote consoles and kernel
1734 * debugging over the network (example netdump in RedHat).
1736 static void vxge_netpoll(struct net_device
*dev
)
1738 struct __vxge_hw_device
*hldev
;
1739 struct vxgedev
*vdev
;
1741 vdev
= (struct vxgedev
*)netdev_priv(dev
);
1742 hldev
= (struct __vxge_hw_device
*)pci_get_drvdata(vdev
->pdev
);
1744 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
1746 if (pci_channel_offline(vdev
->pdev
))
1749 disable_irq(dev
->irq
);
1750 vxge_hw_device_clear_tx_rx(hldev
);
1752 vxge_hw_device_clear_tx_rx(hldev
);
1753 VXGE_COMPLETE_ALL_RX(vdev
);
1754 VXGE_COMPLETE_ALL_TX(vdev
);
1756 enable_irq(dev
->irq
);
1758 vxge_debug_entryexit(VXGE_TRACE
,
1759 "%s:%d Exiting...", __func__
, __LINE__
);
1764 /* RTH configuration */
1765 static enum vxge_hw_status
vxge_rth_configure(struct vxgedev
*vdev
)
1767 enum vxge_hw_status status
= VXGE_HW_OK
;
1768 struct vxge_hw_rth_hash_types hash_types
;
1769 u8 itable
[256] = {0}; /* indirection table */
1770 u8 mtable
[256] = {0}; /* CPU to vpath mapping */
1775 * - itable with bucket numbers
1776 * - mtable with bucket-to-vpath mapping
1778 for (index
= 0; index
< (1 << vdev
->config
.rth_bkt_sz
); index
++) {
1779 itable
[index
] = index
;
1780 mtable
[index
] = index
% vdev
->no_of_vpath
;
1783 /* Fill RTH hash types */
1784 hash_types
.hash_type_tcpipv4_en
= vdev
->config
.rth_hash_type_tcpipv4
;
1785 hash_types
.hash_type_ipv4_en
= vdev
->config
.rth_hash_type_ipv4
;
1786 hash_types
.hash_type_tcpipv6_en
= vdev
->config
.rth_hash_type_tcpipv6
;
1787 hash_types
.hash_type_ipv6_en
= vdev
->config
.rth_hash_type_ipv6
;
1788 hash_types
.hash_type_tcpipv6ex_en
=
1789 vdev
->config
.rth_hash_type_tcpipv6ex
;
1790 hash_types
.hash_type_ipv6ex_en
= vdev
->config
.rth_hash_type_ipv6ex
;
1792 /* set indirection table, bucket-to-vpath mapping */
1793 status
= vxge_hw_vpath_rts_rth_itable_set(vdev
->vp_handles
,
1796 vdev
->config
.rth_bkt_sz
);
1797 if (status
!= VXGE_HW_OK
) {
1798 vxge_debug_init(VXGE_ERR
,
1799 "RTH indirection table configuration failed "
1800 "for vpath:%d", vdev
->vpaths
[0].device_id
);
1805 * Because the itable_set() method uses the active_table field
1806 * for the target virtual path the RTH config should be updated
1807 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1808 * when steering frames.
1810 for (index
= 0; index
< vdev
->no_of_vpath
; index
++) {
1811 status
= vxge_hw_vpath_rts_rth_set(
1812 vdev
->vpaths
[index
].handle
,
1813 vdev
->config
.rth_algorithm
,
1815 vdev
->config
.rth_bkt_sz
);
1817 if (status
!= VXGE_HW_OK
) {
1818 vxge_debug_init(VXGE_ERR
,
1819 "RTH configuration failed for vpath:%d",
1820 vdev
->vpaths
[index
].device_id
);
1828 int vxge_mac_list_add(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1830 struct vxge_mac_addrs
*new_mac_entry
;
1831 u8
*mac_address
= NULL
;
1833 if (vpath
->mac_addr_cnt
>= VXGE_MAX_LEARN_MAC_ADDR_CNT
)
1836 new_mac_entry
= kzalloc(sizeof(struct vxge_mac_addrs
), GFP_ATOMIC
);
1837 if (!new_mac_entry
) {
1838 vxge_debug_mem(VXGE_ERR
,
1839 "%s: memory allocation failed",
1844 list_add(&new_mac_entry
->item
, &vpath
->mac_addr_list
);
1846 /* Copy the new mac address to the list */
1847 mac_address
= (u8
*)&new_mac_entry
->macaddr
;
1848 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1850 new_mac_entry
->state
= mac
->state
;
1851 vpath
->mac_addr_cnt
++;
1853 /* Is this a multicast address */
1854 if (0x01 & mac
->macaddr
[0])
1855 vpath
->mcast_addr_cnt
++;
1860 /* Add a mac address to DA table */
1861 enum vxge_hw_status
vxge_add_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
1863 enum vxge_hw_status status
= VXGE_HW_OK
;
1864 struct vxge_vpath
*vpath
;
1865 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode
;
1867 if (0x01 & mac
->macaddr
[0]) /* multicast address */
1868 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
;
1870 duplicate_mode
= VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE
;
1872 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1873 status
= vxge_hw_vpath_mac_addr_add(vpath
->handle
, mac
->macaddr
,
1874 mac
->macmask
, duplicate_mode
);
1875 if (status
!= VXGE_HW_OK
) {
1876 vxge_debug_init(VXGE_ERR
,
1877 "DA config add entry failed for vpath:%d",
1880 if (FALSE
== vxge_mac_list_add(vpath
, mac
))
1886 int vxge_mac_list_del(struct vxge_vpath
*vpath
, struct macInfo
*mac
)
1888 struct list_head
*entry
, *next
;
1890 u8
*mac_address
= (u8
*) (&del_mac
);
1892 /* Copy the mac address to delete from the list */
1893 memcpy(mac_address
, mac
->macaddr
, ETH_ALEN
);
1895 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1896 if (((struct vxge_mac_addrs
*)entry
)->macaddr
== del_mac
) {
1898 kfree((struct vxge_mac_addrs
*)entry
);
1899 vpath
->mac_addr_cnt
--;
1901 /* Is this a multicast address */
1902 if (0x01 & mac
->macaddr
[0])
1903 vpath
->mcast_addr_cnt
--;
1910 /* delete a mac address from DA table */
1911 enum vxge_hw_status
vxge_del_mac_addr(struct vxgedev
*vdev
, struct macInfo
*mac
)
1913 enum vxge_hw_status status
= VXGE_HW_OK
;
1914 struct vxge_vpath
*vpath
;
1916 vpath
= &vdev
->vpaths
[mac
->vpath_no
];
1917 status
= vxge_hw_vpath_mac_addr_delete(vpath
->handle
, mac
->macaddr
,
1919 if (status
!= VXGE_HW_OK
) {
1920 vxge_debug_init(VXGE_ERR
,
1921 "DA config delete entry failed for vpath:%d",
1924 vxge_mac_list_del(vpath
, mac
);
1928 /* list all mac addresses from DA table */
1930 static vxge_search_mac_addr_in_da_table(struct vxge_vpath
*vpath
,
1931 struct macInfo
*mac
)
1933 enum vxge_hw_status status
= VXGE_HW_OK
;
1934 unsigned char macmask
[ETH_ALEN
];
1935 unsigned char macaddr
[ETH_ALEN
];
1937 status
= vxge_hw_vpath_mac_addr_get(vpath
->handle
,
1939 if (status
!= VXGE_HW_OK
) {
1940 vxge_debug_init(VXGE_ERR
,
1941 "DA config list entry failed for vpath:%d",
1946 while (memcmp(mac
->macaddr
, macaddr
, ETH_ALEN
)) {
1948 status
= vxge_hw_vpath_mac_addr_get_next(vpath
->handle
,
1950 if (status
!= VXGE_HW_OK
)
1957 /* Store all vlan ids from the list to the vid table */
1958 enum vxge_hw_status
vxge_restore_vpath_vid_table(struct vxge_vpath
*vpath
)
1960 enum vxge_hw_status status
= VXGE_HW_OK
;
1961 struct vxgedev
*vdev
= vpath
->vdev
;
1964 if (vdev
->vlgrp
&& vpath
->is_open
) {
1966 for (vid
= 0; vid
< VLAN_GROUP_ARRAY_LEN
; vid
++) {
1967 if (!vlan_group_get_device(vdev
->vlgrp
, vid
))
1969 /* Add these vlan to the vid table */
1970 status
= vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
1977 /* Store all mac addresses from the list to the DA table */
1978 enum vxge_hw_status
vxge_restore_vpath_mac_addr(struct vxge_vpath
*vpath
)
1980 enum vxge_hw_status status
= VXGE_HW_OK
;
1981 struct macInfo mac_info
;
1982 u8
*mac_address
= NULL
;
1983 struct list_head
*entry
, *next
;
1985 memset(&mac_info
, 0, sizeof(struct macInfo
));
1987 if (vpath
->is_open
) {
1989 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
1992 ((struct vxge_mac_addrs
*)entry
)->macaddr
;
1993 memcpy(mac_info
.macaddr
, mac_address
, ETH_ALEN
);
1994 ((struct vxge_mac_addrs
*)entry
)->state
=
1995 VXGE_LL_MAC_ADDR_IN_DA_TABLE
;
1996 /* does this mac address already exist in da table? */
1997 status
= vxge_search_mac_addr_in_da_table(vpath
,
1999 if (status
!= VXGE_HW_OK
) {
2000 /* Add this mac address to the DA table */
2001 status
= vxge_hw_vpath_mac_addr_add(
2002 vpath
->handle
, mac_info
.macaddr
,
2004 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE
);
2005 if (status
!= VXGE_HW_OK
) {
2006 vxge_debug_init(VXGE_ERR
,
2007 "DA add entry failed for vpath:%d",
2009 ((struct vxge_mac_addrs
*)entry
)->state
2010 = VXGE_LL_MAC_ADDR_IN_LIST
;
2020 enum vxge_hw_status
vxge_reset_all_vpaths(struct vxgedev
*vdev
)
2023 enum vxge_hw_status status
= VXGE_HW_OK
;
2025 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2026 if (vdev
->vpaths
[i
].handle
) {
2027 if (vxge_hw_vpath_reset(vdev
->vpaths
[i
].handle
)
2029 if (is_vxge_card_up(vdev
) &&
2030 vxge_hw_vpath_recover_from_reset(
2031 vdev
->vpaths
[i
].handle
)
2033 vxge_debug_init(VXGE_ERR
,
2034 "vxge_hw_vpath_recover_"
2035 "from_reset failed for vpath: "
2040 vxge_debug_init(VXGE_ERR
,
2041 "vxge_hw_vpath_reset failed for "
2050 void vxge_close_vpaths(struct vxgedev
*vdev
, int index
)
2053 for (i
= index
; i
< vdev
->no_of_vpath
; i
++) {
2054 if (vdev
->vpaths
[i
].handle
&& vdev
->vpaths
[i
].is_open
) {
2055 vxge_hw_vpath_close(vdev
->vpaths
[i
].handle
);
2056 vdev
->stats
.vpaths_open
--;
2058 vdev
->vpaths
[i
].is_open
= 0;
2059 vdev
->vpaths
[i
].handle
= NULL
;
2064 int vxge_open_vpaths(struct vxgedev
*vdev
)
2066 enum vxge_hw_status status
;
2069 struct vxge_hw_vpath_attr attr
;
2071 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2072 vxge_assert(vdev
->vpaths
[i
].is_configured
);
2073 attr
.vp_id
= vdev
->vpaths
[i
].device_id
;
2074 attr
.fifo_attr
.callback
= vxge_xmit_compl
;
2075 attr
.fifo_attr
.txdl_term
= vxge_tx_term
;
2076 attr
.fifo_attr
.per_txdl_space
= sizeof(struct vxge_tx_priv
);
2077 attr
.fifo_attr
.userdata
= (void *)&vdev
->vpaths
[i
].fifo
;
2079 attr
.ring_attr
.callback
= vxge_rx_1b_compl
;
2080 attr
.ring_attr
.rxd_init
= vxge_rx_initial_replenish
;
2081 attr
.ring_attr
.rxd_term
= vxge_rx_term
;
2082 attr
.ring_attr
.per_rxd_space
= sizeof(struct vxge_rx_priv
);
2083 attr
.ring_attr
.userdata
= (void *)&vdev
->vpaths
[i
].ring
;
2085 vdev
->vpaths
[i
].ring
.ndev
= vdev
->ndev
;
2086 vdev
->vpaths
[i
].ring
.pdev
= vdev
->pdev
;
2087 status
= vxge_hw_vpath_open(vdev
->devh
, &attr
,
2088 &(vdev
->vpaths
[i
].handle
));
2089 if (status
== VXGE_HW_OK
) {
2090 vdev
->vpaths
[i
].fifo
.handle
=
2091 (struct __vxge_hw_fifo
*)attr
.fifo_attr
.userdata
;
2092 vdev
->vpaths
[i
].ring
.handle
=
2093 (struct __vxge_hw_ring
*)attr
.ring_attr
.userdata
;
2094 vdev
->vpaths
[i
].fifo
.tx_steering_type
=
2095 vdev
->config
.tx_steering_type
;
2096 vdev
->vpaths
[i
].fifo
.ndev
= vdev
->ndev
;
2097 vdev
->vpaths
[i
].fifo
.pdev
= vdev
->pdev
;
2098 vdev
->vpaths
[i
].fifo
.indicate_max_pkts
=
2099 vdev
->config
.fifo_indicate_max_pkts
;
2100 vdev
->vpaths
[i
].ring
.rx_vector_no
= 0;
2101 vdev
->vpaths
[i
].ring
.rx_csum
= vdev
->rx_csum
;
2102 vdev
->vpaths
[i
].is_open
= 1;
2103 vdev
->vp_handles
[i
] = vdev
->vpaths
[i
].handle
;
2104 vdev
->vpaths
[i
].ring
.gro_enable
=
2105 vdev
->config
.gro_enable
;
2106 vdev
->vpaths
[i
].ring
.vlan_tag_strip
=
2107 vdev
->vlan_tag_strip
;
2108 vdev
->stats
.vpaths_open
++;
2110 vdev
->stats
.vpath_open_fail
++;
2111 vxge_debug_init(VXGE_ERR
,
2112 "%s: vpath: %d failed to open "
2114 vdev
->ndev
->name
, vdev
->vpaths
[i
].device_id
,
2116 vxge_close_vpaths(vdev
, 0);
2121 ((struct __vxge_hw_vpath_handle
*)vdev
->vpaths
[i
].handle
)->
2123 vdev
->vpaths_deployed
|= vxge_mBIT(vp_id
);
2130 * @irq: the irq of the device.
2131 * @dev_id: a void pointer to the hldev structure of the Titan device
2132 * @ptregs: pointer to the registers pushed on the stack.
2134 * This function is the ISR handler of the device when napi is enabled. It
2135 * identifies the reason for the interrupt and calls the relevant service
2138 static irqreturn_t
vxge_isr_napi(int irq
, void *dev_id
)
2140 struct __vxge_hw_device
*hldev
= (struct __vxge_hw_device
*)dev_id
;
2141 struct vxgedev
*vdev
;
2142 struct net_device
*dev
;
2144 enum vxge_hw_status status
;
2146 vxge_debug_intr(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
2149 vdev
= netdev_priv(dev
);
2151 if (pci_channel_offline(vdev
->pdev
))
2154 if (unlikely(!is_vxge_card_up(vdev
)))
2157 status
= vxge_hw_device_begin_irq(hldev
, vdev
->exec_mode
,
2159 if (status
== VXGE_HW_OK
) {
2160 vxge_hw_device_mask_all(hldev
);
2163 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2164 vdev
->vpaths_deployed
>>
2165 (64 - VXGE_HW_MAX_VIRTUAL_PATHS
))) {
2167 vxge_hw_device_clear_tx_rx(hldev
);
2168 napi_schedule(&vdev
->napi
);
2169 vxge_debug_intr(VXGE_TRACE
,
2170 "%s:%d Exiting...", __func__
, __LINE__
);
2173 vxge_hw_device_unmask_all(hldev
);
2174 } else if (unlikely((status
== VXGE_HW_ERR_VPATH
) ||
2175 (status
== VXGE_HW_ERR_CRITICAL
) ||
2176 (status
== VXGE_HW_ERR_FIFO
))) {
2177 vxge_hw_device_mask_all(hldev
);
2178 vxge_hw_device_flush_io(hldev
);
2180 } else if (unlikely(status
== VXGE_HW_ERR_SLOT_FREEZE
))
2183 vxge_debug_intr(VXGE_TRACE
, "%s:%d Exiting...", __func__
, __LINE__
);
2187 #ifdef CONFIG_PCI_MSI
2190 vxge_tx_msix_handle(int irq
, void *dev_id
)
2192 struct vxge_fifo
*fifo
= (struct vxge_fifo
*)dev_id
;
2194 VXGE_COMPLETE_VPATH_TX(fifo
);
2200 vxge_rx_msix_napi_handle(int irq
, void *dev_id
)
2202 struct vxge_ring
*ring
= (struct vxge_ring
*)dev_id
;
2204 /* MSIX_IDX for Rx is 1 */
2205 vxge_hw_channel_msix_mask((struct __vxge_hw_channel
*)ring
->handle
,
2206 ring
->rx_vector_no
);
2208 napi_schedule(&ring
->napi
);
2213 vxge_alarm_msix_handle(int irq
, void *dev_id
)
2216 enum vxge_hw_status status
;
2217 struct vxge_vpath
*vpath
= (struct vxge_vpath
*)dev_id
;
2218 struct vxgedev
*vdev
= vpath
->vdev
;
2220 VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
2222 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2223 vxge_hw_vpath_msix_mask(vdev
->vpaths
[i
].handle
,
2226 status
= vxge_hw_vpath_alarm_process(vdev
->vpaths
[i
].handle
,
2228 if (status
== VXGE_HW_OK
) {
2230 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[i
].handle
,
2234 vxge_debug_intr(VXGE_ERR
,
2235 "%s: vxge_hw_vpath_alarm_process failed %x ",
2236 VXGE_DRIVER_NAME
, status
);
2241 static int vxge_alloc_msix(struct vxgedev
*vdev
)
2245 int alarm_msix_id
= 0, msix_intr_vect
= 0;
2248 /* Tx/Rx MSIX Vectors count */
2249 vdev
->intr_cnt
= vdev
->no_of_vpath
* 2;
2251 /* Alarm MSIX Vectors count */
2254 intr_cnt
= (vdev
->max_vpath_supported
* 2) + 1;
2255 vdev
->entries
= kzalloc(intr_cnt
* sizeof(struct msix_entry
),
2257 if (!vdev
->entries
) {
2258 vxge_debug_init(VXGE_ERR
,
2259 "%s: memory allocation failed",
2264 vdev
->vxge_entries
= kzalloc(intr_cnt
* sizeof(struct vxge_msix_entry
),
2266 if (!vdev
->vxge_entries
) {
2267 vxge_debug_init(VXGE_ERR
, "%s: memory allocation failed",
2269 kfree(vdev
->entries
);
2273 /* Last vector in the list is used for alarm */
2274 alarm_msix_id
= VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
2275 for (i
= 0, j
= 0; i
< vdev
->max_vpath_supported
; i
++) {
2277 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2279 /* Initialize the fifo vector */
2280 vdev
->entries
[j
].entry
= msix_intr_vect
;
2281 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
;
2282 vdev
->vxge_entries
[j
].in_use
= 0;
2285 /* Initialize the ring vector */
2286 vdev
->entries
[j
].entry
= msix_intr_vect
+ 1;
2287 vdev
->vxge_entries
[j
].entry
= msix_intr_vect
+ 1;
2288 vdev
->vxge_entries
[j
].in_use
= 0;
2292 /* Initialize the alarm vector */
2293 vdev
->entries
[j
].entry
= alarm_msix_id
;
2294 vdev
->vxge_entries
[j
].entry
= alarm_msix_id
;
2295 vdev
->vxge_entries
[j
].in_use
= 0;
2297 ret
= pci_enable_msix(vdev
->pdev
, vdev
->entries
, intr_cnt
);
2298 /* if driver request exceeeds available irq's, request with a small
2302 vxge_debug_init(VXGE_ERR
,
2303 "%s: MSI-X enable failed for %d vectors, available: %d",
2304 VXGE_DRIVER_NAME
, intr_cnt
, ret
);
2305 vdev
->max_vpath_supported
= vdev
->no_of_vpath
;
2306 intr_cnt
= (vdev
->max_vpath_supported
* 2) + 1;
2308 /* Reset the alarm vector setting */
2309 vdev
->entries
[j
].entry
= 0;
2310 vdev
->vxge_entries
[j
].entry
= 0;
2312 /* Initialize the alarm vector with new setting */
2313 vdev
->entries
[intr_cnt
- 1].entry
= alarm_msix_id
;
2314 vdev
->vxge_entries
[intr_cnt
- 1].entry
= alarm_msix_id
;
2315 vdev
->vxge_entries
[intr_cnt
- 1].in_use
= 0;
2317 ret
= pci_enable_msix(vdev
->pdev
, vdev
->entries
, intr_cnt
);
2319 vxge_debug_init(VXGE_ERR
,
2320 "%s: MSI-X enabled for %d vectors",
2321 VXGE_DRIVER_NAME
, intr_cnt
);
2325 vxge_debug_init(VXGE_ERR
,
2326 "%s: MSI-X enable failed for %d vectors, ret: %d",
2327 VXGE_DRIVER_NAME
, intr_cnt
, ret
);
2328 kfree(vdev
->entries
);
2329 kfree(vdev
->vxge_entries
);
2330 vdev
->entries
= NULL
;
2331 vdev
->vxge_entries
= NULL
;
2337 static int vxge_enable_msix(struct vxgedev
*vdev
)
2341 enum vxge_hw_status status
;
2342 /* 0 - Tx, 1 - Rx */
2344 int alarm_msix_id
= 0, msix_intr_vect
= 0;;
2347 /* allocate msix vectors */
2348 ret
= vxge_alloc_msix(vdev
);
2350 /* Last vector in the list is used for alarm */
2352 VXGE_HW_VPATH_MSIX_ACTIVE
* vdev
->no_of_vpath
- 2;
2353 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2355 /* If fifo or ring are not enabled
2356 the MSIX vector for that should be set to 0
2357 Hence initializeing this array to all 0s.
2359 memset(tim_msix_id
, 0, sizeof(tim_msix_id
));
2360 msix_intr_vect
= i
* VXGE_HW_VPATH_MSIX_ACTIVE
;
2361 tim_msix_id
[0] = msix_intr_vect
;
2363 tim_msix_id
[1] = msix_intr_vect
+ 1;
2364 vdev
->vpaths
[i
].ring
.rx_vector_no
= tim_msix_id
[1];
2366 status
= vxge_hw_vpath_msix_set(
2367 vdev
->vpaths
[i
].handle
,
2368 tim_msix_id
, alarm_msix_id
);
2369 if (status
!= VXGE_HW_OK
) {
2370 vxge_debug_init(VXGE_ERR
,
2371 "vxge_hw_vpath_msix_set "
2372 "failed with status : %x", status
);
2373 kfree(vdev
->entries
);
2374 kfree(vdev
->vxge_entries
);
2375 pci_disable_msix(vdev
->pdev
);
2384 static void vxge_rem_msix_isr(struct vxgedev
*vdev
)
2388 for (intr_cnt
= 0; intr_cnt
< (vdev
->max_vpath_supported
* 2 + 1);
2390 if (vdev
->vxge_entries
[intr_cnt
].in_use
) {
2391 synchronize_irq(vdev
->entries
[intr_cnt
].vector
);
2392 free_irq(vdev
->entries
[intr_cnt
].vector
,
2393 vdev
->vxge_entries
[intr_cnt
].arg
);
2394 vdev
->vxge_entries
[intr_cnt
].in_use
= 0;
2398 kfree(vdev
->entries
);
2399 kfree(vdev
->vxge_entries
);
2400 vdev
->entries
= NULL
;
2401 vdev
->vxge_entries
= NULL
;
2403 if (vdev
->config
.intr_type
== MSI_X
)
2404 pci_disable_msix(vdev
->pdev
);
2408 static void vxge_rem_isr(struct vxgedev
*vdev
)
2410 struct __vxge_hw_device
*hldev
;
2411 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2413 #ifdef CONFIG_PCI_MSI
2414 if (vdev
->config
.intr_type
== MSI_X
) {
2415 vxge_rem_msix_isr(vdev
);
2418 if (vdev
->config
.intr_type
== INTA
) {
2419 synchronize_irq(vdev
->pdev
->irq
);
2420 free_irq(vdev
->pdev
->irq
, hldev
);
2424 static int vxge_add_isr(struct vxgedev
*vdev
)
2427 struct __vxge_hw_device
*hldev
=
2428 (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2429 #ifdef CONFIG_PCI_MSI
2430 int vp_idx
= 0, intr_idx
= 0, intr_cnt
= 0, msix_idx
= 0, irq_req
= 0;
2431 u64 function_mode
= vdev
->config
.device_hw_info
.function_mode
;
2432 int pci_fun
= PCI_FUNC(vdev
->pdev
->devfn
);
2434 if (vdev
->config
.intr_type
== MSI_X
)
2435 ret
= vxge_enable_msix(vdev
);
2438 vxge_debug_init(VXGE_ERR
,
2439 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME
);
2440 if ((function_mode
== VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2441 test_and_set_bit(__VXGE_STATE_CARD_UP
,
2442 &driver_config
->inta_dev_open
))
2443 return VXGE_HW_FAIL
;
2445 vxge_debug_init(VXGE_ERR
,
2446 "%s: Defaulting to INTA", VXGE_DRIVER_NAME
);
2447 vdev
->config
.intr_type
= INTA
;
2448 vxge_hw_device_set_intr_type(vdev
->devh
,
2449 VXGE_HW_INTR_MODE_IRQLINE
);
2450 vxge_close_vpaths(vdev
, 1);
2451 vdev
->no_of_vpath
= 1;
2452 vdev
->stats
.vpaths_open
= 1;
2456 if (vdev
->config
.intr_type
== MSI_X
) {
2458 intr_idx
< (vdev
->no_of_vpath
*
2459 VXGE_HW_VPATH_MSIX_ACTIVE
); intr_idx
++) {
2461 msix_idx
= intr_idx
% VXGE_HW_VPATH_MSIX_ACTIVE
;
2466 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2467 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d",
2468 vdev
->ndev
->name
, pci_fun
, vp_idx
,
2469 vdev
->entries
[intr_cnt
].entry
);
2471 vdev
->entries
[intr_cnt
].vector
,
2472 vxge_tx_msix_handle
, 0,
2473 vdev
->desc
[intr_cnt
],
2474 &vdev
->vpaths
[vp_idx
].fifo
);
2475 vdev
->vxge_entries
[intr_cnt
].arg
=
2476 &vdev
->vpaths
[vp_idx
].fifo
;
2480 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2481 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d",
2482 vdev
->ndev
->name
, pci_fun
, vp_idx
,
2483 vdev
->entries
[intr_cnt
].entry
);
2485 vdev
->entries
[intr_cnt
].vector
,
2486 vxge_rx_msix_napi_handle
,
2488 vdev
->desc
[intr_cnt
],
2489 &vdev
->vpaths
[vp_idx
].ring
);
2490 vdev
->vxge_entries
[intr_cnt
].arg
=
2491 &vdev
->vpaths
[vp_idx
].ring
;
2497 vxge_debug_init(VXGE_ERR
,
2498 "%s: MSIX - %d Registration failed",
2499 vdev
->ndev
->name
, intr_cnt
);
2500 vxge_rem_msix_isr(vdev
);
2501 if ((function_mode
==
2502 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2503 test_and_set_bit(__VXGE_STATE_CARD_UP
,
2504 &driver_config
->inta_dev_open
))
2505 return VXGE_HW_FAIL
;
2507 vxge_hw_device_set_intr_type(
2509 VXGE_HW_INTR_MODE_IRQLINE
);
2510 vdev
->config
.intr_type
= INTA
;
2511 vxge_debug_init(VXGE_ERR
,
2512 "%s: Defaulting to INTA"
2513 , vdev
->ndev
->name
);
2514 vxge_close_vpaths(vdev
, 1);
2515 vdev
->no_of_vpath
= 1;
2516 vdev
->stats
.vpaths_open
= 1;
2522 /* We requested for this msix interrupt */
2523 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2524 vxge_hw_vpath_msix_unmask(
2525 vdev
->vpaths
[vp_idx
].handle
,
2530 /* Point to next vpath handler */
2531 if (((intr_idx
+ 1) % VXGE_HW_VPATH_MSIX_ACTIVE
== 0)
2532 && (vp_idx
< (vdev
->no_of_vpath
- 1)))
2536 intr_cnt
= vdev
->max_vpath_supported
* 2;
2537 snprintf(vdev
->desc
[intr_cnt
], VXGE_INTR_STRLEN
,
2538 "%s:vxge Alarm fn: %d MSI-X: %d",
2539 vdev
->ndev
->name
, pci_fun
,
2540 vdev
->entries
[intr_cnt
].entry
);
2541 /* For Alarm interrupts */
2542 ret
= request_irq(vdev
->entries
[intr_cnt
].vector
,
2543 vxge_alarm_msix_handle
, 0,
2544 vdev
->desc
[intr_cnt
],
2545 &vdev
->vpaths
[vp_idx
]);
2547 vxge_debug_init(VXGE_ERR
,
2548 "%s: MSIX - %d Registration failed",
2549 vdev
->ndev
->name
, intr_cnt
);
2550 vxge_rem_msix_isr(vdev
);
2551 if ((function_mode
==
2552 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2553 test_and_set_bit(__VXGE_STATE_CARD_UP
,
2554 &driver_config
->inta_dev_open
))
2555 return VXGE_HW_FAIL
;
2557 vxge_hw_device_set_intr_type(vdev
->devh
,
2558 VXGE_HW_INTR_MODE_IRQLINE
);
2559 vdev
->config
.intr_type
= INTA
;
2560 vxge_debug_init(VXGE_ERR
,
2561 "%s: Defaulting to INTA",
2563 vxge_close_vpaths(vdev
, 1);
2564 vdev
->no_of_vpath
= 1;
2565 vdev
->stats
.vpaths_open
= 1;
2570 vxge_hw_vpath_msix_unmask(vdev
->vpaths
[vp_idx
].handle
,
2572 vdev
->vxge_entries
[intr_cnt
].in_use
= 1;
2573 vdev
->vxge_entries
[intr_cnt
].arg
= &vdev
->vpaths
[vp_idx
];
2577 snprintf(vdev
->desc
[0], VXGE_INTR_STRLEN
, "%s:vxge", vdev
->ndev
->name
);
2579 if (vdev
->config
.intr_type
== INTA
) {
2580 ret
= request_irq((int) vdev
->pdev
->irq
,
2582 IRQF_SHARED
, vdev
->desc
[0], hldev
);
2584 vxge_debug_init(VXGE_ERR
,
2585 "%s %s-%d: ISR registration failed",
2586 VXGE_DRIVER_NAME
, "IRQ", vdev
->pdev
->irq
);
2589 vxge_debug_init(VXGE_TRACE
,
2590 "new %s-%d line allocated",
2591 "IRQ", vdev
->pdev
->irq
);
2597 static void vxge_poll_vp_reset(unsigned long data
)
2599 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2602 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2603 if (test_bit(i
, &vdev
->vp_reset
)) {
2604 vxge_reset_vpath(vdev
, i
);
2608 if (j
&& (vdev
->config
.intr_type
!= MSI_X
)) {
2609 vxge_hw_device_unmask_all(vdev
->devh
);
2610 vxge_hw_device_flush_io(vdev
->devh
);
2613 mod_timer(&vdev
->vp_reset_timer
, jiffies
+ HZ
/ 2);
2616 static void vxge_poll_vp_lockup(unsigned long data
)
2618 struct vxgedev
*vdev
= (struct vxgedev
*)data
;
2620 struct vxge_ring
*ring
;
2621 enum vxge_hw_status status
= VXGE_HW_OK
;
2623 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2624 ring
= &vdev
->vpaths
[i
].ring
;
2625 /* Did this vpath received any packets */
2626 if (ring
->stats
.prev_rx_frms
== ring
->stats
.rx_frms
) {
2627 status
= vxge_hw_vpath_check_leak(ring
->handle
);
2629 /* Did it received any packets last time */
2630 if ((VXGE_HW_FAIL
== status
) &&
2631 (VXGE_HW_FAIL
== ring
->last_status
)) {
2633 /* schedule vpath reset */
2634 if (!test_and_set_bit(i
, &vdev
->vp_reset
)) {
2636 /* disable interrupts for this vpath */
2637 vxge_vpath_intr_disable(vdev
, i
);
2639 /* stop the queue for this vpath */
2640 vxge_stop_tx_queue(&vdev
->vpaths
[i
].
2646 ring
->stats
.prev_rx_frms
= ring
->stats
.rx_frms
;
2647 ring
->last_status
= status
;
2650 /* Check every 1 milli second */
2651 mod_timer(&vdev
->vp_lockup_timer
, jiffies
+ HZ
/ 1000);
2656 * @dev: pointer to the device structure.
2658 * This function is the open entry point of the driver. It mainly calls a
2659 * function to allocate Rx buffers and inserts them into the buffer
2660 * descriptors and then enables the Rx part of the NIC.
2661 * Return value: '0' on success and an appropriate (-)ve integer as
2662 * defined in errno.h file on failure.
2665 vxge_open(struct net_device
*dev
)
2667 enum vxge_hw_status status
;
2668 struct vxgedev
*vdev
;
2669 struct __vxge_hw_device
*hldev
;
2672 u64 val64
, function_mode
;
2673 vxge_debug_entryexit(VXGE_TRACE
,
2674 "%s: %s:%d", dev
->name
, __func__
, __LINE__
);
2676 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2677 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2678 function_mode
= vdev
->config
.device_hw_info
.function_mode
;
2680 /* make sure you have link off by default every time Nic is
2682 netif_carrier_off(dev
);
2684 /* Check for another device already opn with INTA */
2685 if ((function_mode
== VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION
) &&
2686 test_bit(__VXGE_STATE_CARD_UP
, &driver_config
->inta_dev_open
)) {
2692 status
= vxge_open_vpaths(vdev
);
2693 if (status
!= VXGE_HW_OK
) {
2694 vxge_debug_init(VXGE_ERR
,
2695 "%s: fatal: Vpath open failed", vdev
->ndev
->name
);
2700 vdev
->mtu
= dev
->mtu
;
2702 status
= vxge_add_isr(vdev
);
2703 if (status
!= VXGE_HW_OK
) {
2704 vxge_debug_init(VXGE_ERR
,
2705 "%s: fatal: ISR add failed", dev
->name
);
2711 if (vdev
->config
.intr_type
!= MSI_X
) {
2712 netif_napi_add(dev
, &vdev
->napi
, vxge_poll_inta
,
2713 vdev
->config
.napi_weight
);
2714 napi_enable(&vdev
->napi
);
2716 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2717 netif_napi_add(dev
, &vdev
->vpaths
[i
].ring
.napi
,
2718 vxge_poll_msix
, vdev
->config
.napi_weight
);
2719 napi_enable(&vdev
->vpaths
[i
].ring
.napi
);
2724 if (vdev
->config
.rth_steering
) {
2725 status
= vxge_rth_configure(vdev
);
2726 if (status
!= VXGE_HW_OK
) {
2727 vxge_debug_init(VXGE_ERR
,
2728 "%s: fatal: RTH configuration failed",
2735 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2736 /* set initial mtu before enabling the device */
2737 status
= vxge_hw_vpath_mtu_set(vdev
->vpaths
[i
].handle
,
2739 if (status
!= VXGE_HW_OK
) {
2740 vxge_debug_init(VXGE_ERR
,
2741 "%s: fatal: can not set new MTU", dev
->name
);
2747 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE
, VXGE_COMPONENT_LL
, vdev
);
2748 vxge_debug_init(vdev
->level_trace
,
2749 "%s: MTU is %d", vdev
->ndev
->name
, vdev
->mtu
);
2750 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR
, VXGE_COMPONENT_LL
, vdev
);
2752 /* Reprogram the DA table with populated mac addresses */
2753 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2754 vxge_restore_vpath_mac_addr(&vdev
->vpaths
[i
]);
2755 vxge_restore_vpath_vid_table(&vdev
->vpaths
[i
]);
2758 /* Enable vpath to sniff all unicast/multicast traffic that not
2759 * addressed to them. We allow promiscous mode for PF only
2763 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
2764 val64
|= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i
);
2766 vxge_hw_mgmt_reg_write(vdev
->devh
,
2767 vxge_hw_mgmt_reg_type_mrpcim
,
2769 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2770 rxmac_authorize_all_addr
),
2773 vxge_hw_mgmt_reg_write(vdev
->devh
,
2774 vxge_hw_mgmt_reg_type_mrpcim
,
2776 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2777 rxmac_authorize_all_vid
),
2780 vxge_set_multicast(dev
);
2782 /* Enabling Bcast and mcast for all vpath */
2783 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2784 status
= vxge_hw_vpath_bcast_enable(vdev
->vpaths
[i
].handle
);
2785 if (status
!= VXGE_HW_OK
)
2786 vxge_debug_init(VXGE_ERR
,
2787 "%s : Can not enable bcast for vpath "
2788 "id %d", dev
->name
, i
);
2789 if (vdev
->config
.addr_learn_en
) {
2791 vxge_hw_vpath_mcast_enable(vdev
->vpaths
[i
].handle
);
2792 if (status
!= VXGE_HW_OK
)
2793 vxge_debug_init(VXGE_ERR
,
2794 "%s : Can not enable mcast for vpath "
2795 "id %d", dev
->name
, i
);
2799 vxge_hw_device_setpause_data(vdev
->devh
, 0,
2800 vdev
->config
.tx_pause_enable
,
2801 vdev
->config
.rx_pause_enable
);
2803 if (vdev
->vp_reset_timer
.function
== NULL
)
2804 vxge_os_timer(vdev
->vp_reset_timer
,
2805 vxge_poll_vp_reset
, vdev
, (HZ
/2));
2807 if (vdev
->vp_lockup_timer
.function
== NULL
)
2808 vxge_os_timer(vdev
->vp_lockup_timer
,
2809 vxge_poll_vp_lockup
, vdev
, (HZ
/2));
2811 set_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2815 if (vxge_hw_device_link_state_get(vdev
->devh
) == VXGE_HW_LINK_UP
) {
2816 netif_carrier_on(vdev
->ndev
);
2817 printk(KERN_NOTICE
"%s: Link Up\n", vdev
->ndev
->name
);
2818 vdev
->stats
.link_up
++;
2821 vxge_hw_device_intr_enable(vdev
->devh
);
2825 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
2826 vxge_hw_vpath_enable(vdev
->vpaths
[i
].handle
);
2828 vxge_hw_vpath_rx_doorbell_init(vdev
->vpaths
[i
].handle
);
2831 vxge_start_all_tx_queue(vdev
);
2838 if (vdev
->config
.intr_type
!= MSI_X
)
2839 napi_disable(&vdev
->napi
);
2841 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2842 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2846 vxge_close_vpaths(vdev
, 0);
2848 vxge_debug_entryexit(VXGE_TRACE
,
2849 "%s: %s:%d Exiting...",
2850 dev
->name
, __func__
, __LINE__
);
2854 /* Loop throught the mac address list and delete all the entries */
2855 void vxge_free_mac_add_list(struct vxge_vpath
*vpath
)
2858 struct list_head
*entry
, *next
;
2859 if (list_empty(&vpath
->mac_addr_list
))
2862 list_for_each_safe(entry
, next
, &vpath
->mac_addr_list
) {
2864 kfree((struct vxge_mac_addrs
*)entry
);
2868 static void vxge_napi_del_all(struct vxgedev
*vdev
)
2871 if (vdev
->config
.intr_type
!= MSI_X
)
2872 netif_napi_del(&vdev
->napi
);
2874 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2875 netif_napi_del(&vdev
->vpaths
[i
].ring
.napi
);
2880 int do_vxge_close(struct net_device
*dev
, int do_io
)
2882 enum vxge_hw_status status
;
2883 struct vxgedev
*vdev
;
2884 struct __vxge_hw_device
*hldev
;
2886 u64 val64
, vpath_vector
;
2887 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d",
2888 dev
->name
, __func__
, __LINE__
);
2890 vdev
= (struct vxgedev
*)netdev_priv(dev
);
2891 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(vdev
->pdev
);
2893 /* If vxge_handle_crit_err task is executing,
2894 * wait till it completes. */
2895 while (test_and_set_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
))
2898 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
2900 /* Put the vpath back in normal mode */
2901 vpath_vector
= vxge_mBIT(vdev
->vpaths
[0].device_id
);
2902 status
= vxge_hw_mgmt_reg_read(vdev
->devh
,
2903 vxge_hw_mgmt_reg_type_mrpcim
,
2906 struct vxge_hw_mrpcim_reg
,
2907 rts_mgr_cbasin_cfg
),
2910 if (status
== VXGE_HW_OK
) {
2911 val64
&= ~vpath_vector
;
2912 status
= vxge_hw_mgmt_reg_write(vdev
->devh
,
2913 vxge_hw_mgmt_reg_type_mrpcim
,
2916 struct vxge_hw_mrpcim_reg
,
2917 rts_mgr_cbasin_cfg
),
2921 /* Remove the function 0 from promiscous mode */
2922 vxge_hw_mgmt_reg_write(vdev
->devh
,
2923 vxge_hw_mgmt_reg_type_mrpcim
,
2925 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2926 rxmac_authorize_all_addr
),
2929 vxge_hw_mgmt_reg_write(vdev
->devh
,
2930 vxge_hw_mgmt_reg_type_mrpcim
,
2932 (ulong
)offsetof(struct vxge_hw_mrpcim_reg
,
2933 rxmac_authorize_all_vid
),
2938 del_timer_sync(&vdev
->vp_lockup_timer
);
2940 del_timer_sync(&vdev
->vp_reset_timer
);
2943 if (vdev
->config
.intr_type
!= MSI_X
)
2944 napi_disable(&vdev
->napi
);
2946 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
2947 napi_disable(&vdev
->vpaths
[i
].ring
.napi
);
2950 netif_carrier_off(vdev
->ndev
);
2951 printk(KERN_NOTICE
"%s: Link Down\n", vdev
->ndev
->name
);
2952 vxge_stop_all_tx_queue(vdev
);
2954 /* Note that at this point xmit() is stopped by upper layer */
2956 vxge_hw_device_intr_disable(vdev
->devh
);
2962 vxge_napi_del_all(vdev
);
2965 vxge_reset_all_vpaths(vdev
);
2967 vxge_close_vpaths(vdev
, 0);
2969 vxge_debug_entryexit(VXGE_TRACE
,
2970 "%s: %s:%d Exiting...", dev
->name
, __func__
, __LINE__
);
2972 clear_bit(__VXGE_STATE_CARD_UP
, &driver_config
->inta_dev_open
);
2973 clear_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
);
2980 * @dev: device pointer.
2982 * This is the stop entry point of the driver. It needs to undo exactly
2983 * whatever was done by the open entry point, thus it's usually referred to
2984 * as the close function.Among other things this function mainly stops the
2985 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2986 * Return value: '0' on success and an appropriate (-)ve integer as
2987 * defined in errno.h file on failure.
2990 vxge_close(struct net_device
*dev
)
2992 do_vxge_close(dev
, 1);
2998 * @dev: net device pointer.
2999 * @new_mtu :the new MTU size for the device.
3001 * A driver entry point to change MTU size for the device. Before changing
3002 * the MTU the device must be stopped.
3004 static int vxge_change_mtu(struct net_device
*dev
, int new_mtu
)
3006 struct vxgedev
*vdev
= netdev_priv(dev
);
3008 vxge_debug_entryexit(vdev
->level_trace
,
3009 "%s:%d", __func__
, __LINE__
);
3010 if ((new_mtu
< VXGE_HW_MIN_MTU
) || (new_mtu
> VXGE_HW_MAX_MTU
)) {
3011 vxge_debug_init(vdev
->level_err
,
3012 "%s: mtu size is invalid", dev
->name
);
3016 /* check if device is down already */
3017 if (unlikely(!is_vxge_card_up(vdev
))) {
3018 /* just store new value, will use later on open() */
3020 vxge_debug_init(vdev
->level_err
,
3021 "%s", "device is down on MTU change");
3025 vxge_debug_init(vdev
->level_trace
,
3026 "trying to apply new MTU %d", new_mtu
);
3028 if (vxge_close(dev
))
3032 vdev
->mtu
= new_mtu
;
3037 vxge_debug_init(vdev
->level_trace
,
3038 "%s: MTU changed to %d", vdev
->ndev
->name
, new_mtu
);
3040 vxge_debug_entryexit(vdev
->level_trace
,
3041 "%s:%d Exiting...", __func__
, __LINE__
);
3048 * @dev: pointer to the device structure
3050 * Updates the device statistics structure. This function updates the device
3051 * statistics structure in the net_device structure and returns a pointer
3054 static struct net_device_stats
*
3055 vxge_get_stats(struct net_device
*dev
)
3057 struct vxgedev
*vdev
;
3058 struct net_device_stats
*net_stats
;
3061 vdev
= netdev_priv(dev
);
3063 net_stats
= &vdev
->stats
.net_stats
;
3065 memset(net_stats
, 0, sizeof(struct net_device_stats
));
3067 for (k
= 0; k
< vdev
->no_of_vpath
; k
++) {
3068 net_stats
->rx_packets
+= vdev
->vpaths
[k
].ring
.stats
.rx_frms
;
3069 net_stats
->rx_bytes
+= vdev
->vpaths
[k
].ring
.stats
.rx_bytes
;
3070 net_stats
->rx_errors
+= vdev
->vpaths
[k
].ring
.stats
.rx_errors
;
3071 net_stats
->multicast
+= vdev
->vpaths
[k
].ring
.stats
.rx_mcast
;
3072 net_stats
->rx_dropped
+=
3073 vdev
->vpaths
[k
].ring
.stats
.rx_dropped
;
3075 net_stats
->tx_packets
+= vdev
->vpaths
[k
].fifo
.stats
.tx_frms
;
3076 net_stats
->tx_bytes
+= vdev
->vpaths
[k
].fifo
.stats
.tx_bytes
;
3077 net_stats
->tx_errors
+= vdev
->vpaths
[k
].fifo
.stats
.tx_errors
;
3085 * @dev: Device pointer.
3086 * @ifr: An IOCTL specific structure, that can contain a pointer to
3087 * a proprietary structure used to pass information to the driver.
3088 * @cmd: This is used to distinguish between the different commands that
3089 * can be passed to the IOCTL functions.
3091 * Entry point for the Ioctl.
3093 static int vxge_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
3100 * @dev: pointer to net device structure
3102 * Watchdog for transmit side.
3103 * This function is triggered if the Tx Queue is stopped
3104 * for a pre-defined amount of time when the Interface is still up.
3107 vxge_tx_watchdog(struct net_device
*dev
)
3109 struct vxgedev
*vdev
;
3111 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3113 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3115 vdev
->cric_err_event
= VXGE_HW_EVENT_RESET_START
;
3118 vxge_debug_entryexit(VXGE_TRACE
,
3119 "%s:%d Exiting...", __func__
, __LINE__
);
3123 * vxge_vlan_rx_register
3124 * @dev: net device pointer.
3127 * Vlan group registration
3130 vxge_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
3132 struct vxgedev
*vdev
;
3133 struct vxge_vpath
*vpath
;
3136 enum vxge_hw_status status
;
3139 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3141 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3143 vpath
= &vdev
->vpaths
[0];
3144 if ((NULL
== grp
) && (vpath
->is_open
)) {
3145 /* Get the first vlan */
3146 status
= vxge_hw_vpath_vid_get(vpath
->handle
, &vid
);
3148 while (status
== VXGE_HW_OK
) {
3150 /* Delete this vlan from the vid table */
3151 for (vp
= 0; vp
< vdev
->no_of_vpath
; vp
++) {
3152 vpath
= &vdev
->vpaths
[vp
];
3153 if (!vpath
->is_open
)
3156 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3159 /* Get the next vlan to be deleted */
3160 vpath
= &vdev
->vpaths
[0];
3161 status
= vxge_hw_vpath_vid_get(vpath
->handle
, &vid
);
3167 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
3168 if (vdev
->vpaths
[i
].is_configured
)
3169 vdev
->vpaths
[i
].ring
.vlgrp
= grp
;
3172 vxge_debug_entryexit(VXGE_TRACE
,
3173 "%s:%d Exiting...", __func__
, __LINE__
);
3177 * vxge_vlan_rx_add_vid
3178 * @dev: net device pointer.
3181 * Add the vlan id to the devices vlan id table
3184 vxge_vlan_rx_add_vid(struct net_device
*dev
, unsigned short vid
)
3186 struct vxgedev
*vdev
;
3187 struct vxge_vpath
*vpath
;
3190 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3192 /* Add these vlan to the vid table */
3193 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3194 vpath
= &vdev
->vpaths
[vp_id
];
3195 if (!vpath
->is_open
)
3197 vxge_hw_vpath_vid_add(vpath
->handle
, vid
);
3202 * vxge_vlan_rx_add_vid
3203 * @dev: net device pointer.
3206 * Remove the vlan id from the device's vlan id table
3209 vxge_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
3211 struct vxgedev
*vdev
;
3212 struct vxge_vpath
*vpath
;
3215 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
3217 vdev
= (struct vxgedev
*)netdev_priv(dev
);
3219 vlan_group_set_device(vdev
->vlgrp
, vid
, NULL
);
3221 /* Delete this vlan from the vid table */
3222 for (vp_id
= 0; vp_id
< vdev
->no_of_vpath
; vp_id
++) {
3223 vpath
= &vdev
->vpaths
[vp_id
];
3224 if (!vpath
->is_open
)
3226 vxge_hw_vpath_vid_delete(vpath
->handle
, vid
);
3228 vxge_debug_entryexit(VXGE_TRACE
,
3229 "%s:%d Exiting...", __func__
, __LINE__
);
3232 static const struct net_device_ops vxge_netdev_ops
= {
3233 .ndo_open
= vxge_open
,
3234 .ndo_stop
= vxge_close
,
3235 .ndo_get_stats
= vxge_get_stats
,
3236 .ndo_start_xmit
= vxge_xmit
,
3237 .ndo_validate_addr
= eth_validate_addr
,
3238 .ndo_set_multicast_list
= vxge_set_multicast
,
3240 .ndo_do_ioctl
= vxge_ioctl
,
3242 .ndo_set_mac_address
= vxge_set_mac_addr
,
3243 .ndo_change_mtu
= vxge_change_mtu
,
3244 .ndo_vlan_rx_register
= vxge_vlan_rx_register
,
3245 .ndo_vlan_rx_kill_vid
= vxge_vlan_rx_kill_vid
,
3246 .ndo_vlan_rx_add_vid
= vxge_vlan_rx_add_vid
,
3248 .ndo_tx_timeout
= vxge_tx_watchdog
,
3249 #ifdef CONFIG_NET_POLL_CONTROLLER
3250 .ndo_poll_controller
= vxge_netpoll
,
3254 int __devinit
vxge_device_register(struct __vxge_hw_device
*hldev
,
3255 struct vxge_config
*config
,
3256 int high_dma
, int no_of_vpath
,
3257 struct vxgedev
**vdev_out
)
3259 struct net_device
*ndev
;
3260 enum vxge_hw_status status
= VXGE_HW_OK
;
3261 struct vxgedev
*vdev
;
3262 int i
, ret
= 0, no_of_queue
= 1;
3266 if (config
->tx_steering_type
== TX_MULTIQ_STEERING
)
3267 no_of_queue
= no_of_vpath
;
3269 ndev
= alloc_etherdev_mq(sizeof(struct vxgedev
),
3273 vxge_hw_device_trace_level_get(hldev
),
3274 "%s : device allocation failed", __func__
);
3279 vxge_debug_entryexit(
3280 vxge_hw_device_trace_level_get(hldev
),
3281 "%s: %s:%d Entering...",
3282 ndev
->name
, __func__
, __LINE__
);
3284 vdev
= netdev_priv(ndev
);
3285 memset(vdev
, 0, sizeof(struct vxgedev
));
3289 vdev
->pdev
= hldev
->pdev
;
3290 memcpy(&vdev
->config
, config
, sizeof(struct vxge_config
));
3291 vdev
->rx_csum
= 1; /* Enable Rx CSUM by default. */
3293 SET_NETDEV_DEV(ndev
, &vdev
->pdev
->dev
);
3295 ndev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
3296 NETIF_F_HW_VLAN_FILTER
;
3297 /* Driver entry points */
3298 ndev
->irq
= vdev
->pdev
->irq
;
3299 ndev
->base_addr
= (unsigned long) hldev
->bar0
;
3301 ndev
->netdev_ops
= &vxge_netdev_ops
;
3303 ndev
->watchdog_timeo
= VXGE_LL_WATCH_DOG_TIMEOUT
;
3305 initialize_ethtool_ops(ndev
);
3307 /* Allocate memory for vpath */
3308 vdev
->vpaths
= kzalloc((sizeof(struct vxge_vpath
)) *
3309 no_of_vpath
, GFP_KERNEL
);
3310 if (!vdev
->vpaths
) {
3311 vxge_debug_init(VXGE_ERR
,
3312 "%s: vpath memory allocation failed",
3318 ndev
->features
|= NETIF_F_SG
;
3320 ndev
->features
|= NETIF_F_HW_CSUM
;
3321 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3322 "%s : checksuming enabled", __func__
);
3325 ndev
->features
|= NETIF_F_HIGHDMA
;
3326 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3327 "%s : using High DMA", __func__
);
3330 ndev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
3332 if (vdev
->config
.gro_enable
)
3333 ndev
->features
|= NETIF_F_GRO
;
3335 if (vdev
->config
.tx_steering_type
== TX_MULTIQ_STEERING
)
3336 ndev
->real_num_tx_queues
= no_of_vpath
;
3339 ndev
->features
|= NETIF_F_LLTX
;
3342 for (i
= 0; i
< no_of_vpath
; i
++)
3343 spin_lock_init(&vdev
->vpaths
[i
].fifo
.tx_lock
);
3345 if (register_netdev(ndev
)) {
3346 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3347 "%s: %s : device registration failed!",
3348 ndev
->name
, __func__
);
3353 /* Set the factory defined MAC address initially */
3354 ndev
->addr_len
= ETH_ALEN
;
3356 /* Make Link state as off at this point, when the Link change
3357 * interrupt comes the state will be automatically changed to
3360 netif_carrier_off(ndev
);
3362 vxge_debug_init(vxge_hw_device_trace_level_get(hldev
),
3363 "%s: Ethernet device registered",
3368 /* Resetting the Device stats */
3369 status
= vxge_hw_mrpcim_stats_access(
3371 VXGE_HW_STATS_OP_CLEAR_ALL_STATS
,
3376 if (status
== VXGE_HW_ERR_PRIVILAGED_OPEARATION
)
3378 vxge_hw_device_trace_level_get(hldev
),
3379 "%s: device stats clear returns"
3380 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev
->name
);
3382 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev
),
3383 "%s: %s:%d Exiting...",
3384 ndev
->name
, __func__
, __LINE__
);
3388 kfree(vdev
->vpaths
);
3396 * vxge_device_unregister
3398 * This function will unregister and free network device
3401 vxge_device_unregister(struct __vxge_hw_device
*hldev
)
3403 struct vxgedev
*vdev
;
3404 struct net_device
*dev
;
3406 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3407 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3412 vdev
= netdev_priv(dev
);
3413 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3414 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3415 level_trace
= vdev
->level_trace
;
3417 vxge_debug_entryexit(level_trace
,
3418 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3420 memcpy(buf
, vdev
->ndev
->name
, IFNAMSIZ
);
3422 /* in 2.6 will call stop() if device is up */
3423 unregister_netdev(dev
);
3425 flush_scheduled_work();
3427 vxge_debug_init(level_trace
, "%s: ethernet device unregistered", buf
);
3428 vxge_debug_entryexit(level_trace
,
3429 "%s: %s:%d Exiting...", buf
, __func__
, __LINE__
);
3433 * vxge_callback_crit_err
3435 * This function is called by the alarm handler in interrupt context.
3436 * Driver must analyze it based on the event type.
3439 vxge_callback_crit_err(struct __vxge_hw_device
*hldev
,
3440 enum vxge_hw_event type
, u64 vp_id
)
3442 struct net_device
*dev
= hldev
->ndev
;
3443 struct vxgedev
*vdev
= (struct vxgedev
*)netdev_priv(dev
);
3446 vxge_debug_entryexit(vdev
->level_trace
,
3447 "%s: %s:%d", vdev
->ndev
->name
, __func__
, __LINE__
);
3449 /* Note: This event type should be used for device wide
3450 * indications only - Serious errors, Slot freeze and critical errors
3452 vdev
->cric_err_event
= type
;
3454 for (vpath_idx
= 0; vpath_idx
< vdev
->no_of_vpath
; vpath_idx
++)
3455 if (vdev
->vpaths
[vpath_idx
].device_id
== vp_id
)
3458 if (!test_bit(__VXGE_STATE_RESET_CARD
, &vdev
->state
)) {
3459 if (type
== VXGE_HW_EVENT_SLOT_FREEZE
) {
3460 vxge_debug_init(VXGE_ERR
,
3461 "%s: Slot is frozen", vdev
->ndev
->name
);
3462 } else if (type
== VXGE_HW_EVENT_SERR
) {
3463 vxge_debug_init(VXGE_ERR
,
3464 "%s: Encountered Serious Error",
3466 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
)
3467 vxge_debug_init(VXGE_ERR
,
3468 "%s: Encountered Critical Error",
3472 if ((type
== VXGE_HW_EVENT_SERR
) ||
3473 (type
== VXGE_HW_EVENT_SLOT_FREEZE
)) {
3474 if (unlikely(vdev
->exec_mode
))
3475 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3476 } else if (type
== VXGE_HW_EVENT_CRITICAL_ERR
) {
3477 vxge_hw_device_mask_all(hldev
);
3478 if (unlikely(vdev
->exec_mode
))
3479 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3480 } else if ((type
== VXGE_HW_EVENT_FIFO_ERR
) ||
3481 (type
== VXGE_HW_EVENT_VPATH_ERR
)) {
3483 if (unlikely(vdev
->exec_mode
))
3484 clear_bit(__VXGE_STATE_CARD_UP
, &vdev
->state
);
3486 /* check if this vpath is already set for reset */
3487 if (!test_and_set_bit(vpath_idx
, &vdev
->vp_reset
)) {
3489 /* disable interrupts for this vpath */
3490 vxge_vpath_intr_disable(vdev
, vpath_idx
);
3492 /* stop the queue for this vpath */
3493 vxge_stop_tx_queue(&vdev
->vpaths
[vpath_idx
].
3499 vxge_debug_entryexit(vdev
->level_trace
,
3500 "%s: %s:%d Exiting...",
3501 vdev
->ndev
->name
, __func__
, __LINE__
);
3504 static void verify_bandwidth(void)
3506 int i
, band_width
, total
= 0, equal_priority
= 0;
3508 /* 1. If user enters 0 for some fifo, give equal priority to all */
3509 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3510 if (bw_percentage
[i
] == 0) {
3516 if (!equal_priority
) {
3517 /* 2. If sum exceeds 100, give equal priority to all */
3518 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3519 if (bw_percentage
[i
] == 0xFF)
3522 total
+= bw_percentage
[i
];
3523 if (total
> VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3530 if (!equal_priority
) {
3531 /* Is all the bandwidth consumed? */
3532 if (total
< VXGE_HW_VPATH_BANDWIDTH_MAX
) {
3533 if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
) {
3534 /* Split rest of bw equally among next VPs*/
3536 (VXGE_HW_VPATH_BANDWIDTH_MAX
- total
) /
3537 (VXGE_HW_MAX_VIRTUAL_PATHS
- i
);
3538 if (band_width
< 2) /* min of 2% */
3541 for (; i
< VXGE_HW_MAX_VIRTUAL_PATHS
;
3547 } else if (i
< VXGE_HW_MAX_VIRTUAL_PATHS
)
3551 if (equal_priority
) {
3552 vxge_debug_init(VXGE_ERR
,
3553 "%s: Assigning equal bandwidth to all the vpaths",
3555 bw_percentage
[0] = VXGE_HW_VPATH_BANDWIDTH_MAX
/
3556 VXGE_HW_MAX_VIRTUAL_PATHS
;
3557 for (i
= 1; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3558 bw_percentage
[i
] = bw_percentage
[0];
3565 * Vpath configuration
3567 static int __devinit
vxge_config_vpaths(
3568 struct vxge_hw_device_config
*device_config
,
3569 u64 vpath_mask
, struct vxge_config
*config_param
)
3571 int i
, no_of_vpaths
= 0, default_no_vpath
= 0, temp
;
3572 u32 txdl_size
, txdl_per_memblock
;
3574 temp
= driver_config
->vpath_per_dev
;
3575 if ((driver_config
->vpath_per_dev
== VXGE_USE_DEFAULT
) &&
3576 (max_config_dev
== VXGE_MAX_CONFIG_DEV
)) {
3577 /* No more CPU. Return vpath number as zero.*/
3578 if (driver_config
->g_no_cpus
== -1)
3581 if (!driver_config
->g_no_cpus
)
3582 driver_config
->g_no_cpus
= num_online_cpus();
3584 driver_config
->vpath_per_dev
= driver_config
->g_no_cpus
>> 1;
3585 if (!driver_config
->vpath_per_dev
)
3586 driver_config
->vpath_per_dev
= 1;
3588 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3589 if (!vxge_bVALn(vpath_mask
, i
, 1))
3593 if (default_no_vpath
< driver_config
->vpath_per_dev
)
3594 driver_config
->vpath_per_dev
= default_no_vpath
;
3596 driver_config
->g_no_cpus
= driver_config
->g_no_cpus
-
3597 (driver_config
->vpath_per_dev
* 2);
3598 if (driver_config
->g_no_cpus
<= 0)
3599 driver_config
->g_no_cpus
= -1;
3602 if (driver_config
->vpath_per_dev
== 1) {
3603 vxge_debug_ll_config(VXGE_TRACE
,
3604 "%s: Disable tx and rx steering, "
3605 "as single vpath is configured", VXGE_DRIVER_NAME
);
3606 config_param
->rth_steering
= NO_STEERING
;
3607 config_param
->tx_steering_type
= NO_STEERING
;
3608 device_config
->rth_en
= 0;
3611 /* configure bandwidth */
3612 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++)
3613 device_config
->vp_config
[i
].min_bandwidth
= bw_percentage
[i
];
3615 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3616 device_config
->vp_config
[i
].vp_id
= i
;
3617 device_config
->vp_config
[i
].mtu
= VXGE_HW_DEFAULT_MTU
;
3618 if (no_of_vpaths
< driver_config
->vpath_per_dev
) {
3619 if (!vxge_bVALn(vpath_mask
, i
, 1)) {
3620 vxge_debug_ll_config(VXGE_TRACE
,
3621 "%s: vpath: %d is not available",
3622 VXGE_DRIVER_NAME
, i
);
3625 vxge_debug_ll_config(VXGE_TRACE
,
3626 "%s: vpath: %d available",
3627 VXGE_DRIVER_NAME
, i
);
3631 vxge_debug_ll_config(VXGE_TRACE
,
3632 "%s: vpath: %d is not configured, "
3633 "max_config_vpath exceeded",
3634 VXGE_DRIVER_NAME
, i
);
3638 /* Configure Tx fifo's */
3639 device_config
->vp_config
[i
].fifo
.enable
=
3640 VXGE_HW_FIFO_ENABLE
;
3641 device_config
->vp_config
[i
].fifo
.max_frags
=
3643 device_config
->vp_config
[i
].fifo
.memblock_size
=
3644 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
;
3646 txdl_size
= MAX_SKB_FRAGS
* sizeof(struct vxge_hw_fifo_txd
);
3647 txdl_per_memblock
= VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE
/ txdl_size
;
3649 device_config
->vp_config
[i
].fifo
.fifo_blocks
=
3650 ((VXGE_DEF_FIFO_LENGTH
- 1) / txdl_per_memblock
) + 1;
3652 device_config
->vp_config
[i
].fifo
.intr
=
3653 VXGE_HW_FIFO_QUEUE_INTR_DISABLE
;
3655 /* Configure tti properties */
3656 device_config
->vp_config
[i
].tti
.intr_enable
=
3657 VXGE_HW_TIM_INTR_ENABLE
;
3659 device_config
->vp_config
[i
].tti
.btimer_val
=
3660 (VXGE_TTI_BTIMER_VAL
* 1000) / 272;
3662 device_config
->vp_config
[i
].tti
.timer_ac_en
=
3663 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3665 /* For msi-x with napi (each vector
3666 has a handler of its own) -
3667 Set CI to OFF for all vpaths */
3668 device_config
->vp_config
[i
].tti
.timer_ci_en
=
3669 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3671 device_config
->vp_config
[i
].tti
.timer_ri_en
=
3672 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3674 device_config
->vp_config
[i
].tti
.util_sel
=
3675 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL
;
3677 device_config
->vp_config
[i
].tti
.ltimer_val
=
3678 (VXGE_TTI_LTIMER_VAL
* 1000) / 272;
3680 device_config
->vp_config
[i
].tti
.rtimer_val
=
3681 (VXGE_TTI_RTIMER_VAL
* 1000) / 272;
3683 device_config
->vp_config
[i
].tti
.urange_a
= TTI_TX_URANGE_A
;
3684 device_config
->vp_config
[i
].tti
.urange_b
= TTI_TX_URANGE_B
;
3685 device_config
->vp_config
[i
].tti
.urange_c
= TTI_TX_URANGE_C
;
3686 device_config
->vp_config
[i
].tti
.uec_a
= TTI_TX_UFC_A
;
3687 device_config
->vp_config
[i
].tti
.uec_b
= TTI_TX_UFC_B
;
3688 device_config
->vp_config
[i
].tti
.uec_c
= TTI_TX_UFC_C
;
3689 device_config
->vp_config
[i
].tti
.uec_d
= TTI_TX_UFC_D
;
3691 /* Configure Rx rings */
3692 device_config
->vp_config
[i
].ring
.enable
=
3693 VXGE_HW_RING_ENABLE
;
3695 device_config
->vp_config
[i
].ring
.ring_blocks
=
3696 VXGE_HW_DEF_RING_BLOCKS
;
3697 device_config
->vp_config
[i
].ring
.buffer_mode
=
3698 VXGE_HW_RING_RXD_BUFFER_MODE_1
;
3699 device_config
->vp_config
[i
].ring
.rxds_limit
=
3700 VXGE_HW_DEF_RING_RXDS_LIMIT
;
3701 device_config
->vp_config
[i
].ring
.scatter_mode
=
3702 VXGE_HW_RING_SCATTER_MODE_A
;
3704 /* Configure rti properties */
3705 device_config
->vp_config
[i
].rti
.intr_enable
=
3706 VXGE_HW_TIM_INTR_ENABLE
;
3708 device_config
->vp_config
[i
].rti
.btimer_val
=
3709 (VXGE_RTI_BTIMER_VAL
* 1000)/272;
3711 device_config
->vp_config
[i
].rti
.timer_ac_en
=
3712 VXGE_HW_TIM_TIMER_AC_ENABLE
;
3714 device_config
->vp_config
[i
].rti
.timer_ci_en
=
3715 VXGE_HW_TIM_TIMER_CI_DISABLE
;
3717 device_config
->vp_config
[i
].rti
.timer_ri_en
=
3718 VXGE_HW_TIM_TIMER_RI_DISABLE
;
3720 device_config
->vp_config
[i
].rti
.util_sel
=
3721 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL
;
3723 device_config
->vp_config
[i
].rti
.urange_a
=
3725 device_config
->vp_config
[i
].rti
.urange_b
=
3727 device_config
->vp_config
[i
].rti
.urange_c
=
3729 device_config
->vp_config
[i
].rti
.uec_a
= RTI_RX_UFC_A
;
3730 device_config
->vp_config
[i
].rti
.uec_b
= RTI_RX_UFC_B
;
3731 device_config
->vp_config
[i
].rti
.uec_c
= RTI_RX_UFC_C
;
3732 device_config
->vp_config
[i
].rti
.uec_d
= RTI_RX_UFC_D
;
3734 device_config
->vp_config
[i
].rti
.rtimer_val
=
3735 (VXGE_RTI_RTIMER_VAL
* 1000) / 272;
3737 device_config
->vp_config
[i
].rti
.ltimer_val
=
3738 (VXGE_RTI_LTIMER_VAL
* 1000) / 272;
3740 device_config
->vp_config
[i
].rpa_strip_vlan_tag
=
3744 driver_config
->vpath_per_dev
= temp
;
3745 return no_of_vpaths
;
3748 /* initialize device configuratrions */
3749 static void __devinit
vxge_device_config_init(
3750 struct vxge_hw_device_config
*device_config
,
3753 /* Used for CQRQ/SRQ. */
3754 device_config
->dma_blockpool_initial
=
3755 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE
;
3757 device_config
->dma_blockpool_max
=
3758 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE
;
3760 if (max_mac_vpath
> VXGE_MAX_MAC_ADDR_COUNT
)
3761 max_mac_vpath
= VXGE_MAX_MAC_ADDR_COUNT
;
3763 #ifndef CONFIG_PCI_MSI
3764 vxge_debug_init(VXGE_ERR
,
3765 "%s: This Kernel does not support "
3766 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME
);
3770 /* Configure whether MSI-X or IRQL. */
3771 switch (*intr_type
) {
3773 device_config
->intr_mode
= VXGE_HW_INTR_MODE_IRQLINE
;
3777 device_config
->intr_mode
= VXGE_HW_INTR_MODE_MSIX
;
3780 /* Timer period between device poll */
3781 device_config
->device_poll_millis
= VXGE_TIMER_DELAY
;
3783 /* Configure mac based steering. */
3784 device_config
->rts_mac_en
= addr_learn_en
;
3786 /* Configure Vpaths */
3787 device_config
->rth_it_type
= VXGE_HW_RTH_IT_TYPE_MULTI_IT
;
3789 vxge_debug_ll_config(VXGE_TRACE
, "%s : Device Config Params ",
3791 vxge_debug_ll_config(VXGE_TRACE
, "dma_blockpool_initial : %d",
3792 device_config
->dma_blockpool_initial
);
3793 vxge_debug_ll_config(VXGE_TRACE
, "dma_blockpool_max : %d",
3794 device_config
->dma_blockpool_max
);
3795 vxge_debug_ll_config(VXGE_TRACE
, "intr_mode : %d",
3796 device_config
->intr_mode
);
3797 vxge_debug_ll_config(VXGE_TRACE
, "device_poll_millis : %d",
3798 device_config
->device_poll_millis
);
3799 vxge_debug_ll_config(VXGE_TRACE
, "rts_mac_en : %d",
3800 device_config
->rts_mac_en
);
3801 vxge_debug_ll_config(VXGE_TRACE
, "rth_en : %d",
3802 device_config
->rth_en
);
3803 vxge_debug_ll_config(VXGE_TRACE
, "rth_it_type : %d",
3804 device_config
->rth_it_type
);
3807 static void __devinit
vxge_print_parm(struct vxgedev
*vdev
, u64 vpath_mask
)
3811 vxge_debug_init(VXGE_TRACE
,
3812 "%s: %d Vpath(s) opened",
3813 vdev
->ndev
->name
, vdev
->no_of_vpath
);
3815 switch (vdev
->config
.intr_type
) {
3817 vxge_debug_init(VXGE_TRACE
,
3818 "%s: Interrupt type INTA", vdev
->ndev
->name
);
3822 vxge_debug_init(VXGE_TRACE
,
3823 "%s: Interrupt type MSI-X", vdev
->ndev
->name
);
3827 if (vdev
->config
.rth_steering
) {
3828 vxge_debug_init(VXGE_TRACE
,
3829 "%s: RTH steering enabled for TCP_IPV4",
3832 vxge_debug_init(VXGE_TRACE
,
3833 "%s: RTH steering disabled", vdev
->ndev
->name
);
3836 switch (vdev
->config
.tx_steering_type
) {
3838 vxge_debug_init(VXGE_TRACE
,
3839 "%s: Tx steering disabled", vdev
->ndev
->name
);
3841 case TX_PRIORITY_STEERING
:
3842 vxge_debug_init(VXGE_TRACE
,
3843 "%s: Unsupported tx steering option",
3845 vxge_debug_init(VXGE_TRACE
,
3846 "%s: Tx steering disabled", vdev
->ndev
->name
);
3847 vdev
->config
.tx_steering_type
= 0;
3849 case TX_VLAN_STEERING
:
3850 vxge_debug_init(VXGE_TRACE
,
3851 "%s: Unsupported tx steering option",
3853 vxge_debug_init(VXGE_TRACE
,
3854 "%s: Tx steering disabled", vdev
->ndev
->name
);
3855 vdev
->config
.tx_steering_type
= 0;
3857 case TX_MULTIQ_STEERING
:
3858 vxge_debug_init(VXGE_TRACE
,
3859 "%s: Tx multiqueue steering enabled",
3862 case TX_PORT_STEERING
:
3863 vxge_debug_init(VXGE_TRACE
,
3864 "%s: Tx port steering enabled",
3868 vxge_debug_init(VXGE_ERR
,
3869 "%s: Unsupported tx steering type",
3871 vxge_debug_init(VXGE_TRACE
,
3872 "%s: Tx steering disabled", vdev
->ndev
->name
);
3873 vdev
->config
.tx_steering_type
= 0;
3876 if (vdev
->config
.gro_enable
) {
3877 vxge_debug_init(VXGE_ERR
,
3878 "%s: Generic receive offload enabled",
3881 vxge_debug_init(VXGE_TRACE
,
3882 "%s: Generic receive offload disabled",
3885 if (vdev
->config
.addr_learn_en
)
3886 vxge_debug_init(VXGE_TRACE
,
3887 "%s: MAC Address learning enabled", vdev
->ndev
->name
);
3889 vxge_debug_init(VXGE_TRACE
,
3890 "%s: Rx doorbell mode enabled", vdev
->ndev
->name
);
3892 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
3893 if (!vxge_bVALn(vpath_mask
, i
, 1))
3895 vxge_debug_ll_config(VXGE_TRACE
,
3896 "%s: MTU size - %d", vdev
->ndev
->name
,
3897 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3898 config
.vp_config
[i
].mtu
);
3899 vxge_debug_init(VXGE_TRACE
,
3900 "%s: VLAN tag stripping %s", vdev
->ndev
->name
,
3901 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3902 config
.vp_config
[i
].rpa_strip_vlan_tag
3903 ? "Enabled" : "Disabled");
3904 vxge_debug_init(VXGE_TRACE
,
3905 "%s: Ring blocks : %d", vdev
->ndev
->name
,
3906 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3907 config
.vp_config
[i
].ring
.ring_blocks
);
3908 vxge_debug_init(VXGE_TRACE
,
3909 "%s: Fifo blocks : %d", vdev
->ndev
->name
,
3910 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3911 config
.vp_config
[i
].fifo
.fifo_blocks
);
3912 vxge_debug_ll_config(VXGE_TRACE
,
3913 "%s: Max frags : %d", vdev
->ndev
->name
,
3914 ((struct __vxge_hw_device
*)(vdev
->devh
))->
3915 config
.vp_config
[i
].fifo
.max_frags
);
3922 * vxge_pm_suspend - vxge power management suspend entry point
3925 static int vxge_pm_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3930 * vxge_pm_resume - vxge power management resume entry point
3933 static int vxge_pm_resume(struct pci_dev
*pdev
)
3941 * vxge_io_error_detected - called when PCI error is detected
3942 * @pdev: Pointer to PCI device
3943 * @state: The current pci connection state
3945 * This function is called after a PCI bus error affecting
3946 * this device has been detected.
3948 static pci_ers_result_t
vxge_io_error_detected(struct pci_dev
*pdev
,
3949 pci_channel_state_t state
)
3951 struct __vxge_hw_device
*hldev
=
3952 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3953 struct net_device
*netdev
= hldev
->ndev
;
3955 netif_device_detach(netdev
);
3957 if (netif_running(netdev
)) {
3958 /* Bring down the card, while avoiding PCI I/O */
3959 do_vxge_close(netdev
, 0);
3962 pci_disable_device(pdev
);
3964 return PCI_ERS_RESULT_NEED_RESET
;
3968 * vxge_io_slot_reset - called after the pci bus has been reset.
3969 * @pdev: Pointer to PCI device
3971 * Restart the card from scratch, as if from a cold-boot.
3972 * At this point, the card has exprienced a hard reset,
3973 * followed by fixups by BIOS, and has its config space
3974 * set up identically to what it was at cold boot.
3976 static pci_ers_result_t
vxge_io_slot_reset(struct pci_dev
*pdev
)
3978 struct __vxge_hw_device
*hldev
=
3979 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
3980 struct net_device
*netdev
= hldev
->ndev
;
3982 struct vxgedev
*vdev
= netdev_priv(netdev
);
3984 if (pci_enable_device(pdev
)) {
3985 printk(KERN_ERR
"%s: "
3986 "Cannot re-enable device after reset\n",
3988 return PCI_ERS_RESULT_DISCONNECT
;
3991 pci_set_master(pdev
);
3994 return PCI_ERS_RESULT_RECOVERED
;
3998 * vxge_io_resume - called when traffic can start flowing again.
3999 * @pdev: Pointer to PCI device
4001 * This callback is called when the error recovery driver tells
4002 * us that its OK to resume normal operation.
4004 static void vxge_io_resume(struct pci_dev
*pdev
)
4006 struct __vxge_hw_device
*hldev
=
4007 (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
4008 struct net_device
*netdev
= hldev
->ndev
;
4010 if (netif_running(netdev
)) {
4011 if (vxge_open(netdev
)) {
4012 printk(KERN_ERR
"%s: "
4013 "Can't bring device back up after reset\n",
4019 netif_device_attach(netdev
);
4024 * @pdev : structure containing the PCI related information of the device.
4025 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4027 * This function is called when a new PCI device gets detected and initializes
4030 * returns 0 on success and negative on failure.
4033 static int __devinit
4034 vxge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pre
)
4036 struct __vxge_hw_device
*hldev
;
4037 enum vxge_hw_status status
;
4041 struct vxgedev
*vdev
;
4042 struct vxge_config ll_config
;
4043 struct vxge_hw_device_config
*device_config
= NULL
;
4044 struct vxge_hw_device_attr attr
;
4045 int i
, j
, no_of_vpath
= 0, max_vpath_supported
= 0;
4047 struct vxge_mac_addrs
*entry
;
4048 static int bus
= -1, device
= -1;
4051 vxge_debug_entryexit(VXGE_TRACE
, "%s:%d", __func__
, __LINE__
);
4054 if (bus
!= pdev
->bus
->number
)
4056 if (device
!= PCI_SLOT(pdev
->devfn
))
4059 bus
= pdev
->bus
->number
;
4060 device
= PCI_SLOT(pdev
->devfn
);
4063 if (driver_config
->config_dev_cnt
&&
4064 (driver_config
->config_dev_cnt
!=
4065 driver_config
->total_dev_cnt
))
4066 vxge_debug_init(VXGE_ERR
,
4067 "%s: Configured %d of %d devices",
4069 driver_config
->config_dev_cnt
,
4070 driver_config
->total_dev_cnt
);
4071 driver_config
->config_dev_cnt
= 0;
4072 driver_config
->total_dev_cnt
= 0;
4073 driver_config
->g_no_cpus
= 0;
4074 driver_config
->vpath_per_dev
= max_config_vpath
;
4077 driver_config
->total_dev_cnt
++;
4078 if (++driver_config
->config_dev_cnt
> max_config_dev
) {
4083 device_config
= kzalloc(sizeof(struct vxge_hw_device_config
),
4085 if (!device_config
) {
4087 vxge_debug_init(VXGE_ERR
,
4088 "device_config : malloc failed %s %d",
4089 __FILE__
, __LINE__
);
4093 memset(&ll_config
, 0, sizeof(struct vxge_config
));
4094 ll_config
.tx_steering_type
= TX_MULTIQ_STEERING
;
4095 ll_config
.intr_type
= MSI_X
;
4096 ll_config
.napi_weight
= NEW_NAPI_WEIGHT
;
4097 ll_config
.rth_steering
= RTH_STEERING
;
4099 /* get the default configuration parameters */
4100 vxge_hw_device_config_default_get(device_config
);
4102 /* initialize configuration parameters */
4103 vxge_device_config_init(device_config
, &ll_config
.intr_type
);
4105 ret
= pci_enable_device(pdev
);
4107 vxge_debug_init(VXGE_ERR
,
4108 "%s : can not enable PCI device", __func__
);
4112 if (!pci_set_dma_mask(pdev
, 0xffffffffffffffffULL
)) {
4113 vxge_debug_ll_config(VXGE_TRACE
,
4114 "%s : using 64bit DMA", __func__
);
4118 if (pci_set_consistent_dma_mask(pdev
,
4119 0xffffffffffffffffULL
)) {
4120 vxge_debug_init(VXGE_ERR
,
4121 "%s : unable to obtain 64bit DMA for "
4122 "consistent allocations", __func__
);
4126 } else if (!pci_set_dma_mask(pdev
, 0xffffffffUL
)) {
4127 vxge_debug_ll_config(VXGE_TRACE
,
4128 "%s : using 32bit DMA", __func__
);
4134 if (pci_request_regions(pdev
, VXGE_DRIVER_NAME
)) {
4135 vxge_debug_init(VXGE_ERR
,
4136 "%s : request regions failed", __func__
);
4141 pci_set_master(pdev
);
4143 attr
.bar0
= pci_ioremap_bar(pdev
, 0);
4145 vxge_debug_init(VXGE_ERR
,
4146 "%s : cannot remap io memory bar0", __func__
);
4150 vxge_debug_ll_config(VXGE_TRACE
,
4151 "pci ioremap bar0: %p:0x%llx",
4153 (unsigned long long)pci_resource_start(pdev
, 0));
4155 attr
.bar1
= pci_ioremap_bar(pdev
, 2);
4157 vxge_debug_init(VXGE_ERR
,
4158 "%s : cannot remap io memory bar2", __func__
);
4162 vxge_debug_ll_config(VXGE_TRACE
,
4163 "pci ioremap bar1: %p:0x%llx",
4165 (unsigned long long)pci_resource_start(pdev
, 2));
4167 status
= vxge_hw_device_hw_info_get(attr
.bar0
,
4168 &ll_config
.device_hw_info
);
4169 if (status
!= VXGE_HW_OK
) {
4170 vxge_debug_init(VXGE_ERR
,
4171 "%s: Reading of hardware info failed."
4172 "Please try upgrading the firmware.", VXGE_DRIVER_NAME
);
4177 if (ll_config
.device_hw_info
.fw_version
.major
!=
4178 VXGE_DRIVER_VERSION_MAJOR
) {
4179 vxge_debug_init(VXGE_ERR
,
4180 "FW Ver.(maj): %d not driver's expected version: %d",
4181 ll_config
.device_hw_info
.fw_version
.major
,
4182 VXGE_DRIVER_VERSION_MAJOR
);
4187 vpath_mask
= ll_config
.device_hw_info
.vpath_mask
;
4188 if (vpath_mask
== 0) {
4189 vxge_debug_ll_config(VXGE_TRACE
,
4190 "%s: No vpaths available in device", VXGE_DRIVER_NAME
);
4195 vxge_debug_ll_config(VXGE_TRACE
,
4196 "%s:%d Vpath mask = %llx", __func__
, __LINE__
,
4197 (unsigned long long)vpath_mask
);
4199 /* Check how many vpaths are available */
4200 for (i
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4201 if (!((vpath_mask
) & vxge_mBIT(i
)))
4203 max_vpath_supported
++;
4207 * Configure vpaths and get driver configured number of vpaths
4208 * which is less than or equal to the maximum vpaths per function.
4210 no_of_vpath
= vxge_config_vpaths(device_config
, vpath_mask
, &ll_config
);
4212 vxge_debug_ll_config(VXGE_ERR
,
4213 "%s: No more vpaths to configure", VXGE_DRIVER_NAME
);
4218 /* Setting driver callbacks */
4219 attr
.uld_callbacks
.link_up
= vxge_callback_link_up
;
4220 attr
.uld_callbacks
.link_down
= vxge_callback_link_down
;
4221 attr
.uld_callbacks
.crit_err
= vxge_callback_crit_err
;
4223 status
= vxge_hw_device_initialize(&hldev
, &attr
, device_config
);
4224 if (status
!= VXGE_HW_OK
) {
4225 vxge_debug_init(VXGE_ERR
,
4226 "Failed to initialize device (%d)", status
);
4231 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4233 /* set private device info */
4234 pci_set_drvdata(pdev
, hldev
);
4236 ll_config
.gro_enable
= VXGE_GRO_ALWAYS_AGGREGATE
;
4237 ll_config
.fifo_indicate_max_pkts
= VXGE_FIFO_INDICATE_MAX_PKTS
;
4238 ll_config
.addr_learn_en
= addr_learn_en
;
4239 ll_config
.rth_algorithm
= RTH_ALG_JENKINS
;
4240 ll_config
.rth_hash_type_tcpipv4
= VXGE_HW_RING_HASH_TYPE_TCP_IPV4
;
4241 ll_config
.rth_hash_type_ipv4
= VXGE_HW_RING_HASH_TYPE_NONE
;
4242 ll_config
.rth_hash_type_tcpipv6
= VXGE_HW_RING_HASH_TYPE_NONE
;
4243 ll_config
.rth_hash_type_ipv6
= VXGE_HW_RING_HASH_TYPE_NONE
;
4244 ll_config
.rth_hash_type_tcpipv6ex
= VXGE_HW_RING_HASH_TYPE_NONE
;
4245 ll_config
.rth_hash_type_ipv6ex
= VXGE_HW_RING_HASH_TYPE_NONE
;
4246 ll_config
.rth_bkt_sz
= RTH_BUCKET_SIZE
;
4247 ll_config
.tx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4248 ll_config
.rx_pause_enable
= VXGE_PAUSE_CTRL_ENABLE
;
4250 if (vxge_device_register(hldev
, &ll_config
, high_dma
, no_of_vpath
,
4256 vxge_hw_device_debug_set(hldev
, VXGE_TRACE
, VXGE_COMPONENT_LL
);
4257 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4258 vxge_hw_device_trace_level_get(hldev
));
4260 /* set private HW device info */
4261 hldev
->ndev
= vdev
->ndev
;
4262 vdev
->mtu
= VXGE_HW_DEFAULT_MTU
;
4263 vdev
->bar0
= attr
.bar0
;
4264 vdev
->bar1
= attr
.bar1
;
4265 vdev
->max_vpath_supported
= max_vpath_supported
;
4266 vdev
->no_of_vpath
= no_of_vpath
;
4268 /* Virtual Path count */
4269 for (i
= 0, j
= 0; i
< VXGE_HW_MAX_VIRTUAL_PATHS
; i
++) {
4270 if (!vxge_bVALn(vpath_mask
, i
, 1))
4272 if (j
>= vdev
->no_of_vpath
)
4275 vdev
->vpaths
[j
].is_configured
= 1;
4276 vdev
->vpaths
[j
].device_id
= i
;
4277 vdev
->vpaths
[j
].fifo
.driver_id
= j
;
4278 vdev
->vpaths
[j
].ring
.driver_id
= j
;
4279 vdev
->vpaths
[j
].vdev
= vdev
;
4280 vdev
->vpaths
[j
].max_mac_addr_cnt
= max_mac_vpath
;
4281 memcpy((u8
*)vdev
->vpaths
[j
].macaddr
,
4282 (u8
*)ll_config
.device_hw_info
.mac_addrs
[i
],
4285 /* Initialize the mac address list header */
4286 INIT_LIST_HEAD(&vdev
->vpaths
[j
].mac_addr_list
);
4288 vdev
->vpaths
[j
].mac_addr_cnt
= 0;
4289 vdev
->vpaths
[j
].mcast_addr_cnt
= 0;
4292 vdev
->exec_mode
= VXGE_EXEC_MODE_DISABLE
;
4293 vdev
->max_config_port
= max_config_port
;
4295 vdev
->vlan_tag_strip
= vlan_tag_strip
;
4297 /* map the hashing selector table to the configured vpaths */
4298 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4299 vdev
->vpath_selector
[i
] = vpath_selector
[i
];
4301 macaddr
= (u8
*)vdev
->vpaths
[0].macaddr
;
4303 ll_config
.device_hw_info
.serial_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4304 ll_config
.device_hw_info
.product_desc
[VXGE_HW_INFO_LEN
- 1] = '\0';
4305 ll_config
.device_hw_info
.part_number
[VXGE_HW_INFO_LEN
- 1] = '\0';
4307 vxge_debug_init(VXGE_TRACE
, "%s: SERIAL NUMBER: %s",
4308 vdev
->ndev
->name
, ll_config
.device_hw_info
.serial_number
);
4310 vxge_debug_init(VXGE_TRACE
, "%s: PART NUMBER: %s",
4311 vdev
->ndev
->name
, ll_config
.device_hw_info
.part_number
);
4313 vxge_debug_init(VXGE_TRACE
, "%s: Neterion %s Server Adapter",
4314 vdev
->ndev
->name
, ll_config
.device_hw_info
.product_desc
);
4316 vxge_debug_init(VXGE_TRACE
,
4317 "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
4318 vdev
->ndev
->name
, macaddr
[0], macaddr
[1], macaddr
[2],
4319 macaddr
[3], macaddr
[4], macaddr
[5]);
4321 vxge_debug_init(VXGE_TRACE
, "%s: Link Width x%d",
4322 vdev
->ndev
->name
, vxge_hw_device_link_width_get(hldev
));
4324 vxge_debug_init(VXGE_TRACE
,
4325 "%s: Firmware version : %s Date : %s", vdev
->ndev
->name
,
4326 ll_config
.device_hw_info
.fw_version
.version
,
4327 ll_config
.device_hw_info
.fw_date
.date
);
4329 vxge_print_parm(vdev
, vpath_mask
);
4331 /* Store the fw version for ethttool option */
4332 strcpy(vdev
->fw_version
, ll_config
.device_hw_info
.fw_version
.version
);
4333 memcpy(vdev
->ndev
->dev_addr
, (u8
*)vdev
->vpaths
[0].macaddr
, ETH_ALEN
);
4334 memcpy(vdev
->ndev
->perm_addr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4336 /* Copy the station mac address to the list */
4337 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4338 entry
= (struct vxge_mac_addrs
*)
4339 kzalloc(sizeof(struct vxge_mac_addrs
),
4341 if (NULL
== entry
) {
4342 vxge_debug_init(VXGE_ERR
,
4343 "%s: mac_addr_list : memory allocation failed",
4348 macaddr
= (u8
*)&entry
->macaddr
;
4349 memcpy(macaddr
, vdev
->ndev
->dev_addr
, ETH_ALEN
);
4350 list_add(&entry
->item
, &vdev
->vpaths
[i
].mac_addr_list
);
4351 vdev
->vpaths
[i
].mac_addr_cnt
= 1;
4354 vxge_debug_entryexit(VXGE_TRACE
, "%s: %s:%d Exiting...",
4355 vdev
->ndev
->name
, __func__
, __LINE__
);
4357 vxge_hw_device_debug_set(hldev
, VXGE_ERR
, VXGE_COMPONENT_LL
);
4358 VXGE_COPY_DEBUG_INFO_TO_LL(vdev
, vxge_hw_device_error_level_get(hldev
),
4359 vxge_hw_device_trace_level_get(hldev
));
4364 for (i
= 0; i
< vdev
->no_of_vpath
; i
++)
4365 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4367 vxge_device_unregister(hldev
);
4369 vxge_hw_device_terminate(hldev
);
4375 pci_release_regions(pdev
);
4377 pci_disable_device(pdev
);
4379 kfree(device_config
);
4380 driver_config
->config_dev_cnt
--;
4381 pci_set_drvdata(pdev
, NULL
);
4386 * vxge_rem_nic - Free the PCI device
4387 * @pdev: structure containing the PCI related information of the device.
4388 * Description: This function is called by the Pci subsystem to release a
4389 * PCI device and free up all resource held up by the device.
4391 static void __devexit
4392 vxge_remove(struct pci_dev
*pdev
)
4394 struct __vxge_hw_device
*hldev
;
4395 struct vxgedev
*vdev
= NULL
;
4396 struct net_device
*dev
;
4398 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4399 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4403 hldev
= (struct __vxge_hw_device
*) pci_get_drvdata(pdev
);
4408 vdev
= netdev_priv(dev
);
4410 #if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4411 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4412 level_trace
= vdev
->level_trace
;
4414 vxge_debug_entryexit(level_trace
,
4415 "%s:%d", __func__
, __LINE__
);
4417 vxge_debug_init(level_trace
,
4418 "%s : removing PCI device...", __func__
);
4419 vxge_device_unregister(hldev
);
4421 for (i
= 0; i
< vdev
->no_of_vpath
; i
++) {
4422 vxge_free_mac_add_list(&vdev
->vpaths
[i
]);
4423 vdev
->vpaths
[i
].mcast_addr_cnt
= 0;
4424 vdev
->vpaths
[i
].mac_addr_cnt
= 0;
4427 kfree(vdev
->vpaths
);
4429 iounmap(vdev
->bar0
);
4430 iounmap(vdev
->bar1
);
4432 /* we are safe to free it now */
4435 vxge_debug_init(level_trace
,
4436 "%s:%d Device unregistered", __func__
, __LINE__
);
4438 vxge_hw_device_terminate(hldev
);
4440 pci_disable_device(pdev
);
4441 pci_release_regions(pdev
);
4442 pci_set_drvdata(pdev
, NULL
);
4443 vxge_debug_entryexit(level_trace
,
4444 "%s:%d Exiting...", __func__
, __LINE__
);
4447 static struct pci_error_handlers vxge_err_handler
= {
4448 .error_detected
= vxge_io_error_detected
,
4449 .slot_reset
= vxge_io_slot_reset
,
4450 .resume
= vxge_io_resume
,
4453 static struct pci_driver vxge_driver
= {
4454 .name
= VXGE_DRIVER_NAME
,
4455 .id_table
= vxge_id_table
,
4456 .probe
= vxge_probe
,
4457 .remove
= __devexit_p(vxge_remove
),
4459 .suspend
= vxge_pm_suspend
,
4460 .resume
= vxge_pm_resume
,
4462 .err_handler
= &vxge_err_handler
,
4470 snprintf(version
, 32, "%s", DRV_VERSION
);
4472 printk(KERN_CRIT
"%s: Copyright(c) 2002-2009 Neterion Inc\n",
4474 printk(KERN_CRIT
"%s: Driver version: %s\n",
4475 VXGE_DRIVER_NAME
, version
);
4479 driver_config
= kzalloc(sizeof(struct vxge_drv_config
), GFP_KERNEL
);
4483 ret
= pci_register_driver(&vxge_driver
);
4485 if (driver_config
->config_dev_cnt
&&
4486 (driver_config
->config_dev_cnt
!= driver_config
->total_dev_cnt
))
4487 vxge_debug_init(VXGE_ERR
,
4488 "%s: Configured %d of %d devices",
4489 VXGE_DRIVER_NAME
, driver_config
->config_dev_cnt
,
4490 driver_config
->total_dev_cnt
);
4493 kfree(driver_config
);
4501 pci_unregister_driver(&vxge_driver
);
4502 kfree(driver_config
);
4504 module_init(vxge_starter
);
4505 module_exit(vxge_closer
);