2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ktime.h>
42 #ifdef CONFIG_RFS_ACCEL
43 #include <linux/cpu_rmap.h>
46 #include "cq_enet_desc.h"
48 #include "vnic_intr.h"
49 #include "vnic_stats.h"
56 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
57 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
58 #define MAX_TSO (1 << 16)
59 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
61 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
62 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
63 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
65 /* Supported devices */
66 static DEFINE_PCI_DEVICE_TABLE(enic_id_table
) = {
67 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET
) },
68 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
) },
69 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET_VF
) },
70 { 0, } /* end of table */
73 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
74 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_VERSION
);
77 MODULE_DEVICE_TABLE(pci
, enic_id_table
);
79 #define ENIC_LARGE_PKT_THRESHOLD 1000
80 #define ENIC_MAX_COALESCE_TIMERS 10
81 /* Interrupt moderation table, which will be used to decide the
82 * coalescing timer values
83 * {rx_rate in Mbps, mapping percentage of the range}
85 struct enic_intr_mod_table mod_table
[ENIC_MAX_COALESCE_TIMERS
+ 1] = {
99 /* This table helps the driver to pick different ranges for rx coalescing
100 * timer depending on the link speed.
102 struct enic_intr_mod_range mod_range
[ENIC_MAX_LINK_SPEEDS
] = {
103 {0, 0}, /* 0 - 4 Gbps */
104 {0, 3}, /* 4 - 10 Gbps */
105 {3, 6}, /* 10 - 40 Gbps */
108 int enic_is_dynamic(struct enic
*enic
)
110 return enic
->pdev
->device
== PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
;
113 int enic_sriov_enabled(struct enic
*enic
)
115 return (enic
->priv_flags
& ENIC_SRIOV_ENABLED
) ? 1 : 0;
118 static int enic_is_sriov_vf(struct enic
*enic
)
120 return enic
->pdev
->device
== PCI_DEVICE_ID_CISCO_VIC_ENET_VF
;
123 int enic_is_valid_vf(struct enic
*enic
, int vf
)
125 #ifdef CONFIG_PCI_IOV
126 return vf
>= 0 && vf
< enic
->num_vfs
;
132 static void enic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
134 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
137 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
138 buf
->len
, PCI_DMA_TODEVICE
);
140 pci_unmap_page(enic
->pdev
, buf
->dma_addr
,
141 buf
->len
, PCI_DMA_TODEVICE
);
144 dev_kfree_skb_any(buf
->os_buf
);
147 static void enic_wq_free_buf(struct vnic_wq
*wq
,
148 struct cq_desc
*cq_desc
, struct vnic_wq_buf
*buf
, void *opaque
)
150 enic_free_wq_buf(wq
, buf
);
153 static int enic_wq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
154 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
156 struct enic
*enic
= vnic_dev_priv(vdev
);
158 spin_lock(&enic
->wq_lock
[q_number
]);
160 vnic_wq_service(&enic
->wq
[q_number
], cq_desc
,
161 completed_index
, enic_wq_free_buf
,
164 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic
->netdev
, q_number
)) &&
165 vnic_wq_desc_avail(&enic
->wq
[q_number
]) >=
166 (MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
))
167 netif_wake_subqueue(enic
->netdev
, q_number
);
169 spin_unlock(&enic
->wq_lock
[q_number
]);
174 static void enic_log_q_error(struct enic
*enic
)
179 for (i
= 0; i
< enic
->wq_count
; i
++) {
180 error_status
= vnic_wq_error_status(&enic
->wq
[i
]);
182 netdev_err(enic
->netdev
, "WQ[%d] error_status %d\n",
186 for (i
= 0; i
< enic
->rq_count
; i
++) {
187 error_status
= vnic_rq_error_status(&enic
->rq
[i
]);
189 netdev_err(enic
->netdev
, "RQ[%d] error_status %d\n",
194 static void enic_msglvl_check(struct enic
*enic
)
196 u32 msg_enable
= vnic_dev_msg_lvl(enic
->vdev
);
198 if (msg_enable
!= enic
->msg_enable
) {
199 netdev_info(enic
->netdev
, "msg lvl changed from 0x%x to 0x%x\n",
200 enic
->msg_enable
, msg_enable
);
201 enic
->msg_enable
= msg_enable
;
205 static void enic_mtu_check(struct enic
*enic
)
207 u32 mtu
= vnic_dev_mtu(enic
->vdev
);
208 struct net_device
*netdev
= enic
->netdev
;
210 if (mtu
&& mtu
!= enic
->port_mtu
) {
211 enic
->port_mtu
= mtu
;
212 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
)) {
213 mtu
= max_t(int, ENIC_MIN_MTU
,
214 min_t(int, ENIC_MAX_MTU
, mtu
));
215 if (mtu
!= netdev
->mtu
)
216 schedule_work(&enic
->change_mtu_work
);
218 if (mtu
< netdev
->mtu
)
220 "interface MTU (%d) set higher "
221 "than switch port MTU (%d)\n",
227 static void enic_link_check(struct enic
*enic
)
229 int link_status
= vnic_dev_link_status(enic
->vdev
);
230 int carrier_ok
= netif_carrier_ok(enic
->netdev
);
232 if (link_status
&& !carrier_ok
) {
233 netdev_info(enic
->netdev
, "Link UP\n");
234 netif_carrier_on(enic
->netdev
);
235 } else if (!link_status
&& carrier_ok
) {
236 netdev_info(enic
->netdev
, "Link DOWN\n");
237 netif_carrier_off(enic
->netdev
);
241 static void enic_notify_check(struct enic
*enic
)
243 enic_msglvl_check(enic
);
244 enic_mtu_check(enic
);
245 enic_link_check(enic
);
248 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
250 static irqreturn_t
enic_isr_legacy(int irq
, void *data
)
252 struct net_device
*netdev
= data
;
253 struct enic
*enic
= netdev_priv(netdev
);
254 unsigned int io_intr
= enic_legacy_io_intr();
255 unsigned int err_intr
= enic_legacy_err_intr();
256 unsigned int notify_intr
= enic_legacy_notify_intr();
259 vnic_intr_mask(&enic
->intr
[io_intr
]);
261 pba
= vnic_intr_legacy_pba(enic
->legacy_pba
);
263 vnic_intr_unmask(&enic
->intr
[io_intr
]);
264 return IRQ_NONE
; /* not our interrupt */
267 if (ENIC_TEST_INTR(pba
, notify_intr
)) {
268 vnic_intr_return_all_credits(&enic
->intr
[notify_intr
]);
269 enic_notify_check(enic
);
272 if (ENIC_TEST_INTR(pba
, err_intr
)) {
273 vnic_intr_return_all_credits(&enic
->intr
[err_intr
]);
274 enic_log_q_error(enic
);
275 /* schedule recovery from WQ/RQ error */
276 schedule_work(&enic
->reset
);
280 if (ENIC_TEST_INTR(pba
, io_intr
)) {
281 if (napi_schedule_prep(&enic
->napi
[0]))
282 __napi_schedule(&enic
->napi
[0]);
284 vnic_intr_unmask(&enic
->intr
[io_intr
]);
290 static irqreturn_t
enic_isr_msi(int irq
, void *data
)
292 struct enic
*enic
= data
;
294 /* With MSI, there is no sharing of interrupts, so this is
295 * our interrupt and there is no need to ack it. The device
296 * is not providing per-vector masking, so the OS will not
297 * write to PCI config space to mask/unmask the interrupt.
298 * We're using mask_on_assertion for MSI, so the device
299 * automatically masks the interrupt when the interrupt is
300 * generated. Later, when exiting polling, the interrupt
301 * will be unmasked (see enic_poll).
303 * Also, the device uses the same PCIe Traffic Class (TC)
304 * for Memory Write data and MSI, so there are no ordering
305 * issues; the MSI will always arrive at the Root Complex
306 * _after_ corresponding Memory Writes (i.e. descriptor
310 napi_schedule(&enic
->napi
[0]);
315 static irqreturn_t
enic_isr_msix_rq(int irq
, void *data
)
317 struct napi_struct
*napi
= data
;
319 /* schedule NAPI polling for RQ cleanup */
325 static irqreturn_t
enic_isr_msix_wq(int irq
, void *data
)
327 struct enic
*enic
= data
;
330 unsigned int wq_work_to_do
= -1; /* no limit */
331 unsigned int wq_work_done
;
334 wq_irq
= (u32
)irq
- enic
->msix_entry
[enic_msix_wq_intr(enic
, 0)].vector
;
335 cq
= enic_cq_wq(enic
, wq_irq
);
336 intr
= enic_msix_wq_intr(enic
, wq_irq
);
338 wq_work_done
= vnic_cq_service(&enic
->cq
[cq
],
339 wq_work_to_do
, enic_wq_service
, NULL
);
341 vnic_intr_return_credits(&enic
->intr
[intr
],
344 1 /* reset intr timer */);
349 static irqreturn_t
enic_isr_msix_err(int irq
, void *data
)
351 struct enic
*enic
= data
;
352 unsigned int intr
= enic_msix_err_intr(enic
);
354 vnic_intr_return_all_credits(&enic
->intr
[intr
]);
356 enic_log_q_error(enic
);
358 /* schedule recovery from WQ/RQ error */
359 schedule_work(&enic
->reset
);
364 static irqreturn_t
enic_isr_msix_notify(int irq
, void *data
)
366 struct enic
*enic
= data
;
367 unsigned int intr
= enic_msix_notify_intr(enic
);
369 vnic_intr_return_all_credits(&enic
->intr
[intr
]);
370 enic_notify_check(enic
);
375 static inline void enic_queue_wq_skb_cont(struct enic
*enic
,
376 struct vnic_wq
*wq
, struct sk_buff
*skb
,
377 unsigned int len_left
, int loopback
)
379 const skb_frag_t
*frag
;
381 /* Queue additional data fragments */
382 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
383 len_left
-= skb_frag_size(frag
);
384 enic_queue_wq_desc_cont(wq
, skb
,
385 skb_frag_dma_map(&enic
->pdev
->dev
,
386 frag
, 0, skb_frag_size(frag
),
389 (len_left
== 0), /* EOP? */
394 static inline void enic_queue_wq_skb_vlan(struct enic
*enic
,
395 struct vnic_wq
*wq
, struct sk_buff
*skb
,
396 int vlan_tag_insert
, unsigned int vlan_tag
, int loopback
)
398 unsigned int head_len
= skb_headlen(skb
);
399 unsigned int len_left
= skb
->len
- head_len
;
400 int eop
= (len_left
== 0);
402 /* Queue the main skb fragment. The fragments are no larger
403 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
404 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
405 * per fragment is queued.
407 enic_queue_wq_desc(wq
, skb
,
408 pci_map_single(enic
->pdev
, skb
->data
,
409 head_len
, PCI_DMA_TODEVICE
),
411 vlan_tag_insert
, vlan_tag
,
415 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
, loopback
);
418 static inline void enic_queue_wq_skb_csum_l4(struct enic
*enic
,
419 struct vnic_wq
*wq
, struct sk_buff
*skb
,
420 int vlan_tag_insert
, unsigned int vlan_tag
, int loopback
)
422 unsigned int head_len
= skb_headlen(skb
);
423 unsigned int len_left
= skb
->len
- head_len
;
424 unsigned int hdr_len
= skb_checksum_start_offset(skb
);
425 unsigned int csum_offset
= hdr_len
+ skb
->csum_offset
;
426 int eop
= (len_left
== 0);
428 /* Queue the main skb fragment. The fragments are no larger
429 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
430 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
431 * per fragment is queued.
433 enic_queue_wq_desc_csum_l4(wq
, skb
,
434 pci_map_single(enic
->pdev
, skb
->data
,
435 head_len
, PCI_DMA_TODEVICE
),
439 vlan_tag_insert
, vlan_tag
,
443 enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
, loopback
);
446 static inline void enic_queue_wq_skb_tso(struct enic
*enic
,
447 struct vnic_wq
*wq
, struct sk_buff
*skb
, unsigned int mss
,
448 int vlan_tag_insert
, unsigned int vlan_tag
, int loopback
)
450 unsigned int frag_len_left
= skb_headlen(skb
);
451 unsigned int len_left
= skb
->len
- frag_len_left
;
452 unsigned int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
453 int eop
= (len_left
== 0);
456 unsigned int offset
= 0;
459 /* Preload TCP csum field with IP pseudo hdr calculated
460 * with IP length set to zero. HW will later add in length
461 * to each TCP segment resulting from the TSO.
464 if (skb
->protocol
== cpu_to_be16(ETH_P_IP
)) {
465 ip_hdr(skb
)->check
= 0;
466 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
467 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
468 } else if (skb
->protocol
== cpu_to_be16(ETH_P_IPV6
)) {
469 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
470 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
473 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
474 * for the main skb fragment
476 while (frag_len_left
) {
477 len
= min(frag_len_left
, (unsigned int)WQ_ENET_MAX_DESC_LEN
);
478 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
+ offset
,
479 len
, PCI_DMA_TODEVICE
);
480 enic_queue_wq_desc_tso(wq
, skb
,
484 vlan_tag_insert
, vlan_tag
,
485 eop
&& (len
== frag_len_left
), loopback
);
486 frag_len_left
-= len
;
493 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
494 * for additional data fragments
496 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
497 len_left
-= skb_frag_size(frag
);
498 frag_len_left
= skb_frag_size(frag
);
501 while (frag_len_left
) {
502 len
= min(frag_len_left
,
503 (unsigned int)WQ_ENET_MAX_DESC_LEN
);
504 dma_addr
= skb_frag_dma_map(&enic
->pdev
->dev
, frag
,
507 enic_queue_wq_desc_cont(wq
, skb
,
511 (len
== frag_len_left
), /* EOP? */
513 frag_len_left
-= len
;
519 static inline void enic_queue_wq_skb(struct enic
*enic
,
520 struct vnic_wq
*wq
, struct sk_buff
*skb
)
522 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
523 unsigned int vlan_tag
= 0;
524 int vlan_tag_insert
= 0;
527 if (vlan_tx_tag_present(skb
)) {
528 /* VLAN tag from trunking driver */
530 vlan_tag
= vlan_tx_tag_get(skb
);
531 } else if (enic
->loop_enable
) {
532 vlan_tag
= enic
->loop_tag
;
537 enic_queue_wq_skb_tso(enic
, wq
, skb
, mss
,
538 vlan_tag_insert
, vlan_tag
, loopback
);
539 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
540 enic_queue_wq_skb_csum_l4(enic
, wq
, skb
,
541 vlan_tag_insert
, vlan_tag
, loopback
);
543 enic_queue_wq_skb_vlan(enic
, wq
, skb
,
544 vlan_tag_insert
, vlan_tag
, loopback
);
547 /* netif_tx_lock held, process context with BHs disabled, or BH */
548 static netdev_tx_t
enic_hard_start_xmit(struct sk_buff
*skb
,
549 struct net_device
*netdev
)
551 struct enic
*enic
= netdev_priv(netdev
);
554 unsigned int txq_map
;
557 dev_kfree_skb_any(skb
);
561 txq_map
= skb_get_queue_mapping(skb
) % enic
->wq_count
;
562 wq
= &enic
->wq
[txq_map
];
564 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
565 * which is very likely. In the off chance it's going to take
566 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
569 if (skb_shinfo(skb
)->gso_size
== 0 &&
570 skb_shinfo(skb
)->nr_frags
+ 1 > ENIC_NON_TSO_MAX_DESC
&&
571 skb_linearize(skb
)) {
572 dev_kfree_skb_any(skb
);
576 spin_lock_irqsave(&enic
->wq_lock
[txq_map
], flags
);
578 if (vnic_wq_desc_avail(wq
) <
579 skb_shinfo(skb
)->nr_frags
+ ENIC_DESC_MAX_SPLITS
) {
580 netif_tx_stop_queue(netdev_get_tx_queue(netdev
, txq_map
));
581 /* This is a hard error, log it */
582 netdev_err(netdev
, "BUG! Tx ring full when queue awake!\n");
583 spin_unlock_irqrestore(&enic
->wq_lock
[txq_map
], flags
);
584 return NETDEV_TX_BUSY
;
587 enic_queue_wq_skb(enic
, wq
, skb
);
589 if (vnic_wq_desc_avail(wq
) < MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
)
590 netif_tx_stop_queue(netdev_get_tx_queue(netdev
, txq_map
));
592 spin_unlock_irqrestore(&enic
->wq_lock
[txq_map
], flags
);
597 /* dev_base_lock rwlock held, nominally process context */
598 static struct rtnl_link_stats64
*enic_get_stats(struct net_device
*netdev
,
599 struct rtnl_link_stats64
*net_stats
)
601 struct enic
*enic
= netdev_priv(netdev
);
602 struct vnic_stats
*stats
;
604 enic_dev_stats_dump(enic
, &stats
);
606 net_stats
->tx_packets
= stats
->tx
.tx_frames_ok
;
607 net_stats
->tx_bytes
= stats
->tx
.tx_bytes_ok
;
608 net_stats
->tx_errors
= stats
->tx
.tx_errors
;
609 net_stats
->tx_dropped
= stats
->tx
.tx_drops
;
611 net_stats
->rx_packets
= stats
->rx
.rx_frames_ok
;
612 net_stats
->rx_bytes
= stats
->rx
.rx_bytes_ok
;
613 net_stats
->rx_errors
= stats
->rx
.rx_errors
;
614 net_stats
->multicast
= stats
->rx
.rx_multicast_frames_ok
;
615 net_stats
->rx_over_errors
= enic
->rq_truncated_pkts
;
616 net_stats
->rx_crc_errors
= enic
->rq_bad_fcs
;
617 net_stats
->rx_dropped
= stats
->rx
.rx_no_bufs
+ stats
->rx
.rx_drop
;
622 static int enic_mc_sync(struct net_device
*netdev
, const u8
*mc_addr
)
624 struct enic
*enic
= netdev_priv(netdev
);
626 if (enic
->mc_count
== ENIC_MULTICAST_PERFECT_FILTERS
) {
627 unsigned int mc_count
= netdev_mc_count(netdev
);
629 netdev_warn(netdev
, "Registering only %d out of %d multicast addresses\n",
630 ENIC_MULTICAST_PERFECT_FILTERS
, mc_count
);
635 enic_dev_add_addr(enic
, mc_addr
);
641 static int enic_mc_unsync(struct net_device
*netdev
, const u8
*mc_addr
)
643 struct enic
*enic
= netdev_priv(netdev
);
645 enic_dev_del_addr(enic
, mc_addr
);
651 static int enic_uc_sync(struct net_device
*netdev
, const u8
*uc_addr
)
653 struct enic
*enic
= netdev_priv(netdev
);
655 if (enic
->uc_count
== ENIC_UNICAST_PERFECT_FILTERS
) {
656 unsigned int uc_count
= netdev_uc_count(netdev
);
658 netdev_warn(netdev
, "Registering only %d out of %d unicast addresses\n",
659 ENIC_UNICAST_PERFECT_FILTERS
, uc_count
);
664 enic_dev_add_addr(enic
, uc_addr
);
670 static int enic_uc_unsync(struct net_device
*netdev
, const u8
*uc_addr
)
672 struct enic
*enic
= netdev_priv(netdev
);
674 enic_dev_del_addr(enic
, uc_addr
);
680 void enic_reset_addr_lists(struct enic
*enic
)
682 struct net_device
*netdev
= enic
->netdev
;
684 __dev_uc_unsync(netdev
, NULL
);
685 __dev_mc_unsync(netdev
, NULL
);
692 static int enic_set_mac_addr(struct net_device
*netdev
, char *addr
)
694 struct enic
*enic
= netdev_priv(netdev
);
696 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
)) {
697 if (!is_valid_ether_addr(addr
) && !is_zero_ether_addr(addr
))
698 return -EADDRNOTAVAIL
;
700 if (!is_valid_ether_addr(addr
))
701 return -EADDRNOTAVAIL
;
704 memcpy(netdev
->dev_addr
, addr
, netdev
->addr_len
);
709 static int enic_set_mac_address_dynamic(struct net_device
*netdev
, void *p
)
711 struct enic
*enic
= netdev_priv(netdev
);
712 struct sockaddr
*saddr
= p
;
713 char *addr
= saddr
->sa_data
;
716 if (netif_running(enic
->netdev
)) {
717 err
= enic_dev_del_station_addr(enic
);
722 err
= enic_set_mac_addr(netdev
, addr
);
726 if (netif_running(enic
->netdev
)) {
727 err
= enic_dev_add_station_addr(enic
);
735 static int enic_set_mac_address(struct net_device
*netdev
, void *p
)
737 struct sockaddr
*saddr
= p
;
738 char *addr
= saddr
->sa_data
;
739 struct enic
*enic
= netdev_priv(netdev
);
742 err
= enic_dev_del_station_addr(enic
);
746 err
= enic_set_mac_addr(netdev
, addr
);
750 return enic_dev_add_station_addr(enic
);
753 /* netif_tx_lock held, BHs disabled */
754 static void enic_set_rx_mode(struct net_device
*netdev
)
756 struct enic
*enic
= netdev_priv(netdev
);
758 int multicast
= (netdev
->flags
& IFF_MULTICAST
) ? 1 : 0;
759 int broadcast
= (netdev
->flags
& IFF_BROADCAST
) ? 1 : 0;
760 int promisc
= (netdev
->flags
& IFF_PROMISC
) ||
761 netdev_uc_count(netdev
) > ENIC_UNICAST_PERFECT_FILTERS
;
762 int allmulti
= (netdev
->flags
& IFF_ALLMULTI
) ||
763 netdev_mc_count(netdev
) > ENIC_MULTICAST_PERFECT_FILTERS
;
764 unsigned int flags
= netdev
->flags
|
765 (allmulti
? IFF_ALLMULTI
: 0) |
766 (promisc
? IFF_PROMISC
: 0);
768 if (enic
->flags
!= flags
) {
770 enic_dev_packet_filter(enic
, directed
,
771 multicast
, broadcast
, promisc
, allmulti
);
775 __dev_uc_sync(netdev
, enic_uc_sync
, enic_uc_unsync
);
777 __dev_mc_sync(netdev
, enic_mc_sync
, enic_mc_unsync
);
781 /* netif_tx_lock held, BHs disabled */
782 static void enic_tx_timeout(struct net_device
*netdev
)
784 struct enic
*enic
= netdev_priv(netdev
);
785 schedule_work(&enic
->reset
);
788 static int enic_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
790 struct enic
*enic
= netdev_priv(netdev
);
791 struct enic_port_profile
*pp
;
794 ENIC_PP_BY_INDEX(enic
, vf
, pp
, &err
);
798 if (is_valid_ether_addr(mac
) || is_zero_ether_addr(mac
)) {
799 if (vf
== PORT_SELF_VF
) {
800 memcpy(pp
->vf_mac
, mac
, ETH_ALEN
);
804 * For sriov vf's set the mac in hw
806 ENIC_DEVCMD_PROXY_BY_INDEX(vf
, err
, enic
,
807 vnic_dev_set_mac_addr
, mac
);
808 return enic_dev_status_to_errno(err
);
814 static int enic_set_vf_port(struct net_device
*netdev
, int vf
,
815 struct nlattr
*port
[])
817 struct enic
*enic
= netdev_priv(netdev
);
818 struct enic_port_profile prev_pp
;
819 struct enic_port_profile
*pp
;
820 int err
= 0, restore_pp
= 1;
822 ENIC_PP_BY_INDEX(enic
, vf
, pp
, &err
);
826 if (!port
[IFLA_PORT_REQUEST
])
829 memcpy(&prev_pp
, pp
, sizeof(*enic
->pp
));
830 memset(pp
, 0, sizeof(*enic
->pp
));
832 pp
->set
|= ENIC_SET_REQUEST
;
833 pp
->request
= nla_get_u8(port
[IFLA_PORT_REQUEST
]);
835 if (port
[IFLA_PORT_PROFILE
]) {
836 pp
->set
|= ENIC_SET_NAME
;
837 memcpy(pp
->name
, nla_data(port
[IFLA_PORT_PROFILE
]),
841 if (port
[IFLA_PORT_INSTANCE_UUID
]) {
842 pp
->set
|= ENIC_SET_INSTANCE
;
843 memcpy(pp
->instance_uuid
,
844 nla_data(port
[IFLA_PORT_INSTANCE_UUID
]), PORT_UUID_MAX
);
847 if (port
[IFLA_PORT_HOST_UUID
]) {
848 pp
->set
|= ENIC_SET_HOST
;
849 memcpy(pp
->host_uuid
,
850 nla_data(port
[IFLA_PORT_HOST_UUID
]), PORT_UUID_MAX
);
853 if (vf
== PORT_SELF_VF
) {
854 /* Special case handling: mac came from IFLA_VF_MAC */
855 if (!is_zero_ether_addr(prev_pp
.vf_mac
))
856 memcpy(pp
->mac_addr
, prev_pp
.vf_mac
, ETH_ALEN
);
858 if (is_zero_ether_addr(netdev
->dev_addr
))
859 eth_hw_addr_random(netdev
);
861 /* SR-IOV VF: get mac from adapter */
862 ENIC_DEVCMD_PROXY_BY_INDEX(vf
, err
, enic
,
863 vnic_dev_get_mac_addr
, pp
->mac_addr
);
865 netdev_err(netdev
, "Error getting mac for vf %d\n", vf
);
866 memcpy(pp
, &prev_pp
, sizeof(*pp
));
867 return enic_dev_status_to_errno(err
);
871 err
= enic_process_set_pp_request(enic
, vf
, &prev_pp
, &restore_pp
);
874 /* Things are still the way they were: Implicit
875 * DISASSOCIATE failed
877 memcpy(pp
, &prev_pp
, sizeof(*pp
));
879 memset(pp
, 0, sizeof(*pp
));
880 if (vf
== PORT_SELF_VF
)
881 memset(netdev
->dev_addr
, 0, ETH_ALEN
);
884 /* Set flag to indicate that the port assoc/disassoc
885 * request has been sent out to fw
887 pp
->set
|= ENIC_PORT_REQUEST_APPLIED
;
889 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
890 if (pp
->request
== PORT_REQUEST_DISASSOCIATE
) {
891 memset(pp
->mac_addr
, 0, ETH_ALEN
);
892 if (vf
== PORT_SELF_VF
)
893 memset(netdev
->dev_addr
, 0, ETH_ALEN
);
897 if (vf
== PORT_SELF_VF
)
898 memset(pp
->vf_mac
, 0, ETH_ALEN
);
903 static int enic_get_vf_port(struct net_device
*netdev
, int vf
,
906 struct enic
*enic
= netdev_priv(netdev
);
907 u16 response
= PORT_PROFILE_RESPONSE_SUCCESS
;
908 struct enic_port_profile
*pp
;
911 ENIC_PP_BY_INDEX(enic
, vf
, pp
, &err
);
915 if (!(pp
->set
& ENIC_PORT_REQUEST_APPLIED
))
918 err
= enic_process_get_pp_request(enic
, vf
, pp
->request
, &response
);
922 if (nla_put_u16(skb
, IFLA_PORT_REQUEST
, pp
->request
) ||
923 nla_put_u16(skb
, IFLA_PORT_RESPONSE
, response
) ||
924 ((pp
->set
& ENIC_SET_NAME
) &&
925 nla_put(skb
, IFLA_PORT_PROFILE
, PORT_PROFILE_MAX
, pp
->name
)) ||
926 ((pp
->set
& ENIC_SET_INSTANCE
) &&
927 nla_put(skb
, IFLA_PORT_INSTANCE_UUID
, PORT_UUID_MAX
,
928 pp
->instance_uuid
)) ||
929 ((pp
->set
& ENIC_SET_HOST
) &&
930 nla_put(skb
, IFLA_PORT_HOST_UUID
, PORT_UUID_MAX
, pp
->host_uuid
)))
931 goto nla_put_failure
;
938 static void enic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
940 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
945 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
946 buf
->len
, PCI_DMA_FROMDEVICE
);
947 dev_kfree_skb_any(buf
->os_buf
);
950 static int enic_rq_alloc_buf(struct vnic_rq
*rq
)
952 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
953 struct net_device
*netdev
= enic
->netdev
;
955 unsigned int len
= netdev
->mtu
+ VLAN_ETH_HLEN
;
956 unsigned int os_buf_index
= 0;
959 skb
= netdev_alloc_skb_ip_align(netdev
, len
);
963 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
,
964 len
, PCI_DMA_FROMDEVICE
);
966 enic_queue_rq_desc(rq
, skb
, os_buf_index
,
972 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter
*pkt_size
,
975 if (ENIC_LARGE_PKT_THRESHOLD
<= pkt_len
)
976 pkt_size
->large_pkt_bytes_cnt
+= pkt_len
;
978 pkt_size
->small_pkt_bytes_cnt
+= pkt_len
;
981 static void enic_rq_indicate_buf(struct vnic_rq
*rq
,
982 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
983 int skipped
, void *opaque
)
985 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
986 struct net_device
*netdev
= enic
->netdev
;
988 struct vnic_cq
*cq
= &enic
->cq
[enic_cq_rq(enic
, rq
->index
)];
990 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
991 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
992 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
993 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
995 u16 q_number
, completed_index
, bytes_written
, vlan_tci
, checksum
;
1002 prefetch(skb
->data
- NET_IP_ALIGN
);
1003 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
1004 buf
->len
, PCI_DMA_FROMDEVICE
);
1006 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
1007 &type
, &color
, &q_number
, &completed_index
,
1008 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1009 &csum_not_calc
, &rss_hash
, &bytes_written
,
1010 &packet_error
, &vlan_stripped
, &vlan_tci
, &checksum
,
1011 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1012 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1013 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1019 if (bytes_written
> 0)
1021 else if (bytes_written
== 0)
1022 enic
->rq_truncated_pkts
++;
1025 dev_kfree_skb_any(skb
);
1030 if (eop
&& bytes_written
> 0) {
1035 skb_put(skb
, bytes_written
);
1036 skb
->protocol
= eth_type_trans(skb
, netdev
);
1037 skb_record_rx_queue(skb
, q_number
);
1038 if (netdev
->features
& NETIF_F_RXHASH
) {
1039 skb_set_hash(skb
, rss_hash
,
1041 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX
|
1042 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6
|
1043 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4
)) ?
1044 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
1047 if ((netdev
->features
& NETIF_F_RXCSUM
) && !csum_not_calc
) {
1048 skb
->csum
= htons(checksum
);
1049 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1053 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tci
);
1055 if (netdev
->features
& NETIF_F_GRO
)
1056 napi_gro_receive(&enic
->napi
[q_number
], skb
);
1058 netif_receive_skb(skb
);
1059 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1060 enic_intr_update_pkt_size(&cq
->pkt_size_counter
,
1067 dev_kfree_skb_any(skb
);
1071 static int enic_rq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1072 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1074 struct enic
*enic
= vnic_dev_priv(vdev
);
1076 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1077 completed_index
, VNIC_RQ_RETURN_DESC
,
1078 enic_rq_indicate_buf
, opaque
);
1083 static int enic_poll(struct napi_struct
*napi
, int budget
)
1085 struct net_device
*netdev
= napi
->dev
;
1086 struct enic
*enic
= netdev_priv(netdev
);
1087 unsigned int cq_rq
= enic_cq_rq(enic
, 0);
1088 unsigned int cq_wq
= enic_cq_wq(enic
, 0);
1089 unsigned int intr
= enic_legacy_io_intr();
1090 unsigned int rq_work_to_do
= budget
;
1091 unsigned int wq_work_to_do
= -1; /* no limit */
1092 unsigned int work_done
, rq_work_done
= 0, wq_work_done
;
1095 /* Service RQ (first) and WQ
1099 rq_work_done
= vnic_cq_service(&enic
->cq
[cq_rq
],
1100 rq_work_to_do
, enic_rq_service
, NULL
);
1102 wq_work_done
= vnic_cq_service(&enic
->cq
[cq_wq
],
1103 wq_work_to_do
, enic_wq_service
, NULL
);
1105 /* Accumulate intr event credits for this polling
1106 * cycle. An intr event is the completion of a
1107 * a WQ or RQ packet.
1110 work_done
= rq_work_done
+ wq_work_done
;
1113 vnic_intr_return_credits(&enic
->intr
[intr
],
1115 0 /* don't unmask intr */,
1116 0 /* don't reset intr timer */);
1118 err
= vnic_rq_fill(&enic
->rq
[0], enic_rq_alloc_buf
);
1120 /* Buffer allocation failed. Stay in polling
1121 * mode so we can try to fill the ring again.
1125 rq_work_done
= rq_work_to_do
;
1127 if (rq_work_done
< rq_work_to_do
) {
1129 /* Some work done, but not enough to stay in polling,
1133 napi_complete(napi
);
1134 vnic_intr_unmask(&enic
->intr
[intr
]);
1137 return rq_work_done
;
1140 static void enic_set_int_moderation(struct enic
*enic
, struct vnic_rq
*rq
)
1142 unsigned int intr
= enic_msix_rq_intr(enic
, rq
->index
);
1143 struct vnic_cq
*cq
= &enic
->cq
[enic_cq_rq(enic
, rq
->index
)];
1144 u32 timer
= cq
->tobe_rx_coal_timeval
;
1146 if (cq
->tobe_rx_coal_timeval
!= cq
->cur_rx_coal_timeval
) {
1147 vnic_intr_coalescing_timer_set(&enic
->intr
[intr
], timer
);
1148 cq
->cur_rx_coal_timeval
= cq
->tobe_rx_coal_timeval
;
1152 static void enic_calc_int_moderation(struct enic
*enic
, struct vnic_rq
*rq
)
1154 struct enic_rx_coal
*rx_coal
= &enic
->rx_coalesce_setting
;
1155 struct vnic_cq
*cq
= &enic
->cq
[enic_cq_rq(enic
, rq
->index
)];
1156 struct vnic_rx_bytes_counter
*pkt_size_counter
= &cq
->pkt_size_counter
;
1162 ktime_t now
= ktime_get();
1164 delta
= ktime_us_delta(now
, cq
->prev_ts
);
1165 if (delta
< ENIC_AIC_TS_BREAK
)
1169 traffic
= pkt_size_counter
->large_pkt_bytes_cnt
+
1170 pkt_size_counter
->small_pkt_bytes_cnt
;
1171 /* The table takes Mbps
1172 * traffic *= 8 => bits
1173 * traffic *= (10^6 / delta) => bps
1174 * traffic /= 10^6 => Mbps
1176 * Combining, traffic *= (8 / delta)
1180 traffic
= delta
> UINT_MAX
? 0 : traffic
/ (u32
)delta
;
1182 for (index
= 0; index
< ENIC_MAX_COALESCE_TIMERS
; index
++)
1183 if (traffic
< mod_table
[index
].rx_rate
)
1185 range_start
= (pkt_size_counter
->small_pkt_bytes_cnt
>
1186 pkt_size_counter
->large_pkt_bytes_cnt
<< 1) ?
1187 rx_coal
->small_pkt_range_start
:
1188 rx_coal
->large_pkt_range_start
;
1189 timer
= range_start
+ ((rx_coal
->range_end
- range_start
) *
1190 mod_table
[index
].range_percent
/ 100);
1192 cq
->tobe_rx_coal_timeval
= (timer
+ cq
->tobe_rx_coal_timeval
) >> 1;
1194 pkt_size_counter
->large_pkt_bytes_cnt
= 0;
1195 pkt_size_counter
->small_pkt_bytes_cnt
= 0;
1198 #ifdef CONFIG_RFS_ACCEL
1199 static void enic_free_rx_cpu_rmap(struct enic
*enic
)
1201 free_irq_cpu_rmap(enic
->netdev
->rx_cpu_rmap
);
1202 enic
->netdev
->rx_cpu_rmap
= NULL
;
1205 static void enic_set_rx_cpu_rmap(struct enic
*enic
)
1209 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
) {
1210 enic
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(enic
->rq_count
);
1211 if (unlikely(!enic
->netdev
->rx_cpu_rmap
))
1213 for (i
= 0; i
< enic
->rq_count
; i
++) {
1214 res
= irq_cpu_rmap_add(enic
->netdev
->rx_cpu_rmap
,
1215 enic
->msix_entry
[i
].vector
);
1216 if (unlikely(res
)) {
1217 enic_free_rx_cpu_rmap(enic
);
1226 static void enic_free_rx_cpu_rmap(struct enic
*enic
)
1230 static void enic_set_rx_cpu_rmap(struct enic
*enic
)
1234 #endif /* CONFIG_RFS_ACCEL */
1236 static int enic_poll_msix(struct napi_struct
*napi
, int budget
)
1238 struct net_device
*netdev
= napi
->dev
;
1239 struct enic
*enic
= netdev_priv(netdev
);
1240 unsigned int rq
= (napi
- &enic
->napi
[0]);
1241 unsigned int cq
= enic_cq_rq(enic
, rq
);
1242 unsigned int intr
= enic_msix_rq_intr(enic
, rq
);
1243 unsigned int work_to_do
= budget
;
1244 unsigned int work_done
= 0;
1251 work_done
= vnic_cq_service(&enic
->cq
[cq
],
1252 work_to_do
, enic_rq_service
, NULL
);
1254 /* Return intr event credits for this polling
1255 * cycle. An intr event is the completion of a
1260 vnic_intr_return_credits(&enic
->intr
[intr
],
1262 0 /* don't unmask intr */,
1263 0 /* don't reset intr timer */);
1265 err
= vnic_rq_fill(&enic
->rq
[rq
], enic_rq_alloc_buf
);
1267 /* Buffer allocation failed. Stay in polling mode
1268 * so we can try to fill the ring again.
1272 work_done
= work_to_do
;
1273 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1274 /* Call the function which refreshes
1275 * the intr coalescing timer value based on
1276 * the traffic. This is supported only in
1277 * the case of MSI-x mode
1279 enic_calc_int_moderation(enic
, &enic
->rq
[rq
]);
1281 if (work_done
< work_to_do
) {
1283 /* Some work done, but not enough to stay in polling,
1287 napi_complete(napi
);
1288 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1289 enic_set_int_moderation(enic
, &enic
->rq
[rq
]);
1290 vnic_intr_unmask(&enic
->intr
[intr
]);
1296 static void enic_notify_timer(unsigned long data
)
1298 struct enic
*enic
= (struct enic
*)data
;
1300 enic_notify_check(enic
);
1302 mod_timer(&enic
->notify_timer
,
1303 round_jiffies(jiffies
+ ENIC_NOTIFY_TIMER_PERIOD
));
1306 static void enic_free_intr(struct enic
*enic
)
1308 struct net_device
*netdev
= enic
->netdev
;
1311 enic_free_rx_cpu_rmap(enic
);
1312 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1313 case VNIC_DEV_INTR_MODE_INTX
:
1314 free_irq(enic
->pdev
->irq
, netdev
);
1316 case VNIC_DEV_INTR_MODE_MSI
:
1317 free_irq(enic
->pdev
->irq
, enic
);
1319 case VNIC_DEV_INTR_MODE_MSIX
:
1320 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1321 if (enic
->msix
[i
].requested
)
1322 free_irq(enic
->msix_entry
[i
].vector
,
1323 enic
->msix
[i
].devid
);
1330 static int enic_request_intr(struct enic
*enic
)
1332 struct net_device
*netdev
= enic
->netdev
;
1333 unsigned int i
, intr
;
1336 enic_set_rx_cpu_rmap(enic
);
1337 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1339 case VNIC_DEV_INTR_MODE_INTX
:
1341 err
= request_irq(enic
->pdev
->irq
, enic_isr_legacy
,
1342 IRQF_SHARED
, netdev
->name
, netdev
);
1345 case VNIC_DEV_INTR_MODE_MSI
:
1347 err
= request_irq(enic
->pdev
->irq
, enic_isr_msi
,
1348 0, netdev
->name
, enic
);
1351 case VNIC_DEV_INTR_MODE_MSIX
:
1353 for (i
= 0; i
< enic
->rq_count
; i
++) {
1354 intr
= enic_msix_rq_intr(enic
, i
);
1355 snprintf(enic
->msix
[intr
].devname
,
1356 sizeof(enic
->msix
[intr
].devname
),
1357 "%.11s-rx-%d", netdev
->name
, i
);
1358 enic
->msix
[intr
].isr
= enic_isr_msix_rq
;
1359 enic
->msix
[intr
].devid
= &enic
->napi
[i
];
1362 for (i
= 0; i
< enic
->wq_count
; i
++) {
1363 intr
= enic_msix_wq_intr(enic
, i
);
1364 snprintf(enic
->msix
[intr
].devname
,
1365 sizeof(enic
->msix
[intr
].devname
),
1366 "%.11s-tx-%d", netdev
->name
, i
);
1367 enic
->msix
[intr
].isr
= enic_isr_msix_wq
;
1368 enic
->msix
[intr
].devid
= enic
;
1371 intr
= enic_msix_err_intr(enic
);
1372 snprintf(enic
->msix
[intr
].devname
,
1373 sizeof(enic
->msix
[intr
].devname
),
1374 "%.11s-err", netdev
->name
);
1375 enic
->msix
[intr
].isr
= enic_isr_msix_err
;
1376 enic
->msix
[intr
].devid
= enic
;
1378 intr
= enic_msix_notify_intr(enic
);
1379 snprintf(enic
->msix
[intr
].devname
,
1380 sizeof(enic
->msix
[intr
].devname
),
1381 "%.11s-notify", netdev
->name
);
1382 enic
->msix
[intr
].isr
= enic_isr_msix_notify
;
1383 enic
->msix
[intr
].devid
= enic
;
1385 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1386 enic
->msix
[i
].requested
= 0;
1388 for (i
= 0; i
< enic
->intr_count
; i
++) {
1389 err
= request_irq(enic
->msix_entry
[i
].vector
,
1390 enic
->msix
[i
].isr
, 0,
1391 enic
->msix
[i
].devname
,
1392 enic
->msix
[i
].devid
);
1394 enic_free_intr(enic
);
1397 enic
->msix
[i
].requested
= 1;
1409 static void enic_synchronize_irqs(struct enic
*enic
)
1413 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1414 case VNIC_DEV_INTR_MODE_INTX
:
1415 case VNIC_DEV_INTR_MODE_MSI
:
1416 synchronize_irq(enic
->pdev
->irq
);
1418 case VNIC_DEV_INTR_MODE_MSIX
:
1419 for (i
= 0; i
< enic
->intr_count
; i
++)
1420 synchronize_irq(enic
->msix_entry
[i
].vector
);
1427 static void enic_set_rx_coal_setting(struct enic
*enic
)
1431 struct enic_rx_coal
*rx_coal
= &enic
->rx_coalesce_setting
;
1433 /* If intr mode is not MSIX, do not do adaptive coalescing */
1434 if (VNIC_DEV_INTR_MODE_MSIX
!= vnic_dev_get_intr_mode(enic
->vdev
)) {
1435 netdev_info(enic
->netdev
, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1439 /* 1. Read the link speed from fw
1440 * 2. Pick the default range for the speed
1441 * 3. Update it in enic->rx_coalesce_setting
1443 speed
= vnic_dev_port_speed(enic
->vdev
);
1444 if (ENIC_LINK_SPEED_10G
< speed
)
1445 index
= ENIC_LINK_40G_INDEX
;
1446 else if (ENIC_LINK_SPEED_4G
< speed
)
1447 index
= ENIC_LINK_10G_INDEX
;
1449 index
= ENIC_LINK_4G_INDEX
;
1451 rx_coal
->small_pkt_range_start
= mod_range
[index
].small_pkt_range_start
;
1452 rx_coal
->large_pkt_range_start
= mod_range
[index
].large_pkt_range_start
;
1453 rx_coal
->range_end
= ENIC_RX_COALESCE_RANGE_END
;
1455 /* Start with the value provided by UCSM */
1456 for (index
= 0; index
< enic
->rq_count
; index
++)
1457 enic
->cq
[index
].cur_rx_coal_timeval
=
1458 enic
->config
.intr_timer_usec
;
1460 rx_coal
->use_adaptive_rx_coalesce
= 1;
1463 static int enic_dev_notify_set(struct enic
*enic
)
1467 spin_lock(&enic
->devcmd_lock
);
1468 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1469 case VNIC_DEV_INTR_MODE_INTX
:
1470 err
= vnic_dev_notify_set(enic
->vdev
,
1471 enic_legacy_notify_intr());
1473 case VNIC_DEV_INTR_MODE_MSIX
:
1474 err
= vnic_dev_notify_set(enic
->vdev
,
1475 enic_msix_notify_intr(enic
));
1478 err
= vnic_dev_notify_set(enic
->vdev
, -1 /* no intr */);
1481 spin_unlock(&enic
->devcmd_lock
);
1486 static void enic_notify_timer_start(struct enic
*enic
)
1488 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1489 case VNIC_DEV_INTR_MODE_MSI
:
1490 mod_timer(&enic
->notify_timer
, jiffies
);
1493 /* Using intr for notification for INTx/MSI-X */
1498 /* rtnl lock is held, process context */
1499 static int enic_open(struct net_device
*netdev
)
1501 struct enic
*enic
= netdev_priv(netdev
);
1505 err
= enic_request_intr(enic
);
1507 netdev_err(netdev
, "Unable to request irq.\n");
1511 err
= enic_dev_notify_set(enic
);
1514 "Failed to alloc notify buffer, aborting.\n");
1515 goto err_out_free_intr
;
1518 for (i
= 0; i
< enic
->rq_count
; i
++) {
1519 vnic_rq_fill(&enic
->rq
[i
], enic_rq_alloc_buf
);
1520 /* Need at least one buffer on ring to get going */
1521 if (vnic_rq_desc_used(&enic
->rq
[i
]) == 0) {
1522 netdev_err(netdev
, "Unable to alloc receive buffers\n");
1524 goto err_out_notify_unset
;
1528 for (i
= 0; i
< enic
->wq_count
; i
++)
1529 vnic_wq_enable(&enic
->wq
[i
]);
1530 for (i
= 0; i
< enic
->rq_count
; i
++)
1531 vnic_rq_enable(&enic
->rq
[i
]);
1533 if (!enic_is_dynamic(enic
) && !enic_is_sriov_vf(enic
))
1534 enic_dev_add_station_addr(enic
);
1536 enic_set_rx_mode(netdev
);
1538 netif_tx_wake_all_queues(netdev
);
1540 for (i
= 0; i
< enic
->rq_count
; i
++)
1541 napi_enable(&enic
->napi
[i
]);
1543 enic_dev_enable(enic
);
1545 for (i
= 0; i
< enic
->intr_count
; i
++)
1546 vnic_intr_unmask(&enic
->intr
[i
]);
1548 enic_notify_timer_start(enic
);
1552 err_out_notify_unset
:
1553 enic_dev_notify_unset(enic
);
1555 enic_free_intr(enic
);
1560 /* rtnl lock is held, process context */
1561 static int enic_stop(struct net_device
*netdev
)
1563 struct enic
*enic
= netdev_priv(netdev
);
1567 for (i
= 0; i
< enic
->intr_count
; i
++) {
1568 vnic_intr_mask(&enic
->intr
[i
]);
1569 (void)vnic_intr_masked(&enic
->intr
[i
]); /* flush write */
1572 enic_synchronize_irqs(enic
);
1574 del_timer_sync(&enic
->notify_timer
);
1576 enic_dev_disable(enic
);
1578 for (i
= 0; i
< enic
->rq_count
; i
++)
1579 napi_disable(&enic
->napi
[i
]);
1581 netif_carrier_off(netdev
);
1582 netif_tx_disable(netdev
);
1584 if (!enic_is_dynamic(enic
) && !enic_is_sriov_vf(enic
))
1585 enic_dev_del_station_addr(enic
);
1587 for (i
= 0; i
< enic
->wq_count
; i
++) {
1588 err
= vnic_wq_disable(&enic
->wq
[i
]);
1592 for (i
= 0; i
< enic
->rq_count
; i
++) {
1593 err
= vnic_rq_disable(&enic
->rq
[i
]);
1598 enic_dev_notify_unset(enic
);
1599 enic_free_intr(enic
);
1601 for (i
= 0; i
< enic
->wq_count
; i
++)
1602 vnic_wq_clean(&enic
->wq
[i
], enic_free_wq_buf
);
1603 for (i
= 0; i
< enic
->rq_count
; i
++)
1604 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1605 for (i
= 0; i
< enic
->cq_count
; i
++)
1606 vnic_cq_clean(&enic
->cq
[i
]);
1607 for (i
= 0; i
< enic
->intr_count
; i
++)
1608 vnic_intr_clean(&enic
->intr
[i
]);
1613 static int enic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1615 struct enic
*enic
= netdev_priv(netdev
);
1616 int running
= netif_running(netdev
);
1618 if (new_mtu
< ENIC_MIN_MTU
|| new_mtu
> ENIC_MAX_MTU
)
1621 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
))
1627 netdev
->mtu
= new_mtu
;
1629 if (netdev
->mtu
> enic
->port_mtu
)
1631 "interface MTU (%d) set higher than port MTU (%d)\n",
1632 netdev
->mtu
, enic
->port_mtu
);
1640 static void enic_change_mtu_work(struct work_struct
*work
)
1642 struct enic
*enic
= container_of(work
, struct enic
, change_mtu_work
);
1643 struct net_device
*netdev
= enic
->netdev
;
1644 int new_mtu
= vnic_dev_mtu(enic
->vdev
);
1648 new_mtu
= max_t(int, ENIC_MIN_MTU
, min_t(int, ENIC_MAX_MTU
, new_mtu
));
1653 del_timer_sync(&enic
->notify_timer
);
1655 for (i
= 0; i
< enic
->rq_count
; i
++)
1656 napi_disable(&enic
->napi
[i
]);
1658 vnic_intr_mask(&enic
->intr
[0]);
1659 enic_synchronize_irqs(enic
);
1660 err
= vnic_rq_disable(&enic
->rq
[0]);
1663 netdev_err(netdev
, "Unable to disable RQ.\n");
1666 vnic_rq_clean(&enic
->rq
[0], enic_free_rq_buf
);
1667 vnic_cq_clean(&enic
->cq
[0]);
1668 vnic_intr_clean(&enic
->intr
[0]);
1670 /* Fill RQ with new_mtu-sized buffers */
1671 netdev
->mtu
= new_mtu
;
1672 vnic_rq_fill(&enic
->rq
[0], enic_rq_alloc_buf
);
1673 /* Need at least one buffer on ring to get going */
1674 if (vnic_rq_desc_used(&enic
->rq
[0]) == 0) {
1676 netdev_err(netdev
, "Unable to alloc receive buffers.\n");
1681 vnic_rq_enable(&enic
->rq
[0]);
1682 napi_enable(&enic
->napi
[0]);
1683 vnic_intr_unmask(&enic
->intr
[0]);
1684 enic_notify_timer_start(enic
);
1688 netdev_info(netdev
, "interface MTU set as %d\n", netdev
->mtu
);
1691 #ifdef CONFIG_NET_POLL_CONTROLLER
1692 static void enic_poll_controller(struct net_device
*netdev
)
1694 struct enic
*enic
= netdev_priv(netdev
);
1695 struct vnic_dev
*vdev
= enic
->vdev
;
1696 unsigned int i
, intr
;
1698 switch (vnic_dev_get_intr_mode(vdev
)) {
1699 case VNIC_DEV_INTR_MODE_MSIX
:
1700 for (i
= 0; i
< enic
->rq_count
; i
++) {
1701 intr
= enic_msix_rq_intr(enic
, i
);
1702 enic_isr_msix_rq(enic
->msix_entry
[intr
].vector
,
1706 for (i
= 0; i
< enic
->wq_count
; i
++) {
1707 intr
= enic_msix_wq_intr(enic
, i
);
1708 enic_isr_msix_wq(enic
->msix_entry
[intr
].vector
, enic
);
1712 case VNIC_DEV_INTR_MODE_MSI
:
1713 enic_isr_msi(enic
->pdev
->irq
, enic
);
1715 case VNIC_DEV_INTR_MODE_INTX
:
1716 enic_isr_legacy(enic
->pdev
->irq
, netdev
);
1724 static int enic_dev_wait(struct vnic_dev
*vdev
,
1725 int (*start
)(struct vnic_dev
*, int),
1726 int (*finished
)(struct vnic_dev
*, int *),
1733 BUG_ON(in_interrupt());
1735 err
= start(vdev
, arg
);
1739 /* Wait for func to complete...2 seconds max
1742 time
= jiffies
+ (HZ
* 2);
1745 err
= finished(vdev
, &done
);
1752 schedule_timeout_uninterruptible(HZ
/ 10);
1754 } while (time_after(time
, jiffies
));
1759 static int enic_dev_open(struct enic
*enic
)
1763 err
= enic_dev_wait(enic
->vdev
, vnic_dev_open
,
1764 vnic_dev_open_done
, 0);
1766 dev_err(enic_get_dev(enic
), "vNIC device open failed, err %d\n",
1772 static int enic_dev_hang_reset(struct enic
*enic
)
1776 err
= enic_dev_wait(enic
->vdev
, vnic_dev_hang_reset
,
1777 vnic_dev_hang_reset_done
, 0);
1779 netdev_err(enic
->netdev
, "vNIC hang reset failed, err %d\n",
1785 static int enic_set_rsskey(struct enic
*enic
)
1787 dma_addr_t rss_key_buf_pa
;
1788 union vnic_rss_key
*rss_key_buf_va
= NULL
;
1789 union vnic_rss_key rss_key
= {
1790 .key
[0].b
= {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
1791 .key
[1].b
= {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
1792 .key
[2].b
= {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
1793 .key
[3].b
= {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
1797 rss_key_buf_va
= pci_alloc_consistent(enic
->pdev
,
1798 sizeof(union vnic_rss_key
), &rss_key_buf_pa
);
1799 if (!rss_key_buf_va
)
1802 memcpy(rss_key_buf_va
, &rss_key
, sizeof(union vnic_rss_key
));
1804 spin_lock(&enic
->devcmd_lock
);
1805 err
= enic_set_rss_key(enic
,
1807 sizeof(union vnic_rss_key
));
1808 spin_unlock(&enic
->devcmd_lock
);
1810 pci_free_consistent(enic
->pdev
, sizeof(union vnic_rss_key
),
1811 rss_key_buf_va
, rss_key_buf_pa
);
1816 static int enic_set_rsscpu(struct enic
*enic
, u8 rss_hash_bits
)
1818 dma_addr_t rss_cpu_buf_pa
;
1819 union vnic_rss_cpu
*rss_cpu_buf_va
= NULL
;
1823 rss_cpu_buf_va
= pci_alloc_consistent(enic
->pdev
,
1824 sizeof(union vnic_rss_cpu
), &rss_cpu_buf_pa
);
1825 if (!rss_cpu_buf_va
)
1828 for (i
= 0; i
< (1 << rss_hash_bits
); i
++)
1829 (*rss_cpu_buf_va
).cpu
[i
/4].b
[i
%4] = i
% enic
->rq_count
;
1831 spin_lock(&enic
->devcmd_lock
);
1832 err
= enic_set_rss_cpu(enic
,
1834 sizeof(union vnic_rss_cpu
));
1835 spin_unlock(&enic
->devcmd_lock
);
1837 pci_free_consistent(enic
->pdev
, sizeof(union vnic_rss_cpu
),
1838 rss_cpu_buf_va
, rss_cpu_buf_pa
);
1843 static int enic_set_niccfg(struct enic
*enic
, u8 rss_default_cpu
,
1844 u8 rss_hash_type
, u8 rss_hash_bits
, u8 rss_base_cpu
, u8 rss_enable
)
1846 const u8 tso_ipid_split_en
= 0;
1847 const u8 ig_vlan_strip_en
= 1;
1850 /* Enable VLAN tag stripping.
1853 spin_lock(&enic
->devcmd_lock
);
1854 err
= enic_set_nic_cfg(enic
,
1855 rss_default_cpu
, rss_hash_type
,
1856 rss_hash_bits
, rss_base_cpu
,
1857 rss_enable
, tso_ipid_split_en
,
1859 spin_unlock(&enic
->devcmd_lock
);
1864 static int enic_set_rss_nic_cfg(struct enic
*enic
)
1866 struct device
*dev
= enic_get_dev(enic
);
1867 const u8 rss_default_cpu
= 0;
1868 const u8 rss_hash_type
= NIC_CFG_RSS_HASH_TYPE_IPV4
|
1869 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4
|
1870 NIC_CFG_RSS_HASH_TYPE_IPV6
|
1871 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6
;
1872 const u8 rss_hash_bits
= 7;
1873 const u8 rss_base_cpu
= 0;
1874 u8 rss_enable
= ENIC_SETTING(enic
, RSS
) && (enic
->rq_count
> 1);
1877 if (!enic_set_rsskey(enic
)) {
1878 if (enic_set_rsscpu(enic
, rss_hash_bits
)) {
1880 dev_warn(dev
, "RSS disabled, "
1881 "Failed to set RSS cpu indirection table.");
1885 dev_warn(dev
, "RSS disabled, Failed to set RSS key.\n");
1889 return enic_set_niccfg(enic
, rss_default_cpu
, rss_hash_type
,
1890 rss_hash_bits
, rss_base_cpu
, rss_enable
);
1893 static void enic_reset(struct work_struct
*work
)
1895 struct enic
*enic
= container_of(work
, struct enic
, reset
);
1897 if (!netif_running(enic
->netdev
))
1902 spin_lock(&enic
->enic_api_lock
);
1903 enic_dev_hang_notify(enic
);
1904 enic_stop(enic
->netdev
);
1905 enic_dev_hang_reset(enic
);
1906 enic_reset_addr_lists(enic
);
1907 enic_init_vnic_resources(enic
);
1908 enic_set_rss_nic_cfg(enic
);
1909 enic_dev_set_ig_vlan_rewrite_mode(enic
);
1910 enic_open(enic
->netdev
);
1911 spin_unlock(&enic
->enic_api_lock
);
1912 call_netdevice_notifiers(NETDEV_REBOOT
, enic
->netdev
);
1917 static int enic_set_intr_mode(struct enic
*enic
)
1919 unsigned int n
= min_t(unsigned int, enic
->rq_count
, ENIC_RQ_MAX
);
1920 unsigned int m
= min_t(unsigned int, enic
->wq_count
, ENIC_WQ_MAX
);
1923 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1924 * on system capabilities.
1928 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1929 * (the second to last INTR is used for WQ/RQ errors)
1930 * (the last INTR is used for notifications)
1933 BUG_ON(ARRAY_SIZE(enic
->msix_entry
) < n
+ m
+ 2);
1934 for (i
= 0; i
< n
+ m
+ 2; i
++)
1935 enic
->msix_entry
[i
].entry
= i
;
1937 /* Use multiple RQs if RSS is enabled
1940 if (ENIC_SETTING(enic
, RSS
) &&
1941 enic
->config
.intr_mode
< 1 &&
1942 enic
->rq_count
>= n
&&
1943 enic
->wq_count
>= m
&&
1944 enic
->cq_count
>= n
+ m
&&
1945 enic
->intr_count
>= n
+ m
+ 2) {
1947 if (pci_enable_msix_range(enic
->pdev
, enic
->msix_entry
,
1948 n
+ m
+ 2, n
+ m
+ 2) > 0) {
1952 enic
->cq_count
= n
+ m
;
1953 enic
->intr_count
= n
+ m
+ 2;
1955 vnic_dev_set_intr_mode(enic
->vdev
,
1956 VNIC_DEV_INTR_MODE_MSIX
);
1962 if (enic
->config
.intr_mode
< 1 &&
1963 enic
->rq_count
>= 1 &&
1964 enic
->wq_count
>= m
&&
1965 enic
->cq_count
>= 1 + m
&&
1966 enic
->intr_count
>= 1 + m
+ 2) {
1967 if (pci_enable_msix_range(enic
->pdev
, enic
->msix_entry
,
1968 1 + m
+ 2, 1 + m
+ 2) > 0) {
1972 enic
->cq_count
= 1 + m
;
1973 enic
->intr_count
= 1 + m
+ 2;
1975 vnic_dev_set_intr_mode(enic
->vdev
,
1976 VNIC_DEV_INTR_MODE_MSIX
);
1984 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1987 if (enic
->config
.intr_mode
< 2 &&
1988 enic
->rq_count
>= 1 &&
1989 enic
->wq_count
>= 1 &&
1990 enic
->cq_count
>= 2 &&
1991 enic
->intr_count
>= 1 &&
1992 !pci_enable_msi(enic
->pdev
)) {
1997 enic
->intr_count
= 1;
1999 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSI
);
2006 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2007 * (the first INTR is used for WQ/RQ)
2008 * (the second INTR is used for WQ/RQ errors)
2009 * (the last INTR is used for notifications)
2012 if (enic
->config
.intr_mode
< 3 &&
2013 enic
->rq_count
>= 1 &&
2014 enic
->wq_count
>= 1 &&
2015 enic
->cq_count
>= 2 &&
2016 enic
->intr_count
>= 3) {
2021 enic
->intr_count
= 3;
2023 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_INTX
);
2028 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2033 static void enic_clear_intr_mode(struct enic
*enic
)
2035 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2036 case VNIC_DEV_INTR_MODE_MSIX
:
2037 pci_disable_msix(enic
->pdev
);
2039 case VNIC_DEV_INTR_MODE_MSI
:
2040 pci_disable_msi(enic
->pdev
);
2046 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2049 static const struct net_device_ops enic_netdev_dynamic_ops
= {
2050 .ndo_open
= enic_open
,
2051 .ndo_stop
= enic_stop
,
2052 .ndo_start_xmit
= enic_hard_start_xmit
,
2053 .ndo_get_stats64
= enic_get_stats
,
2054 .ndo_validate_addr
= eth_validate_addr
,
2055 .ndo_set_rx_mode
= enic_set_rx_mode
,
2056 .ndo_set_mac_address
= enic_set_mac_address_dynamic
,
2057 .ndo_change_mtu
= enic_change_mtu
,
2058 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2059 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2060 .ndo_tx_timeout
= enic_tx_timeout
,
2061 .ndo_set_vf_port
= enic_set_vf_port
,
2062 .ndo_get_vf_port
= enic_get_vf_port
,
2063 .ndo_set_vf_mac
= enic_set_vf_mac
,
2064 #ifdef CONFIG_NET_POLL_CONTROLLER
2065 .ndo_poll_controller
= enic_poll_controller
,
2069 static const struct net_device_ops enic_netdev_ops
= {
2070 .ndo_open
= enic_open
,
2071 .ndo_stop
= enic_stop
,
2072 .ndo_start_xmit
= enic_hard_start_xmit
,
2073 .ndo_get_stats64
= enic_get_stats
,
2074 .ndo_validate_addr
= eth_validate_addr
,
2075 .ndo_set_mac_address
= enic_set_mac_address
,
2076 .ndo_set_rx_mode
= enic_set_rx_mode
,
2077 .ndo_change_mtu
= enic_change_mtu
,
2078 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2079 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2080 .ndo_tx_timeout
= enic_tx_timeout
,
2081 .ndo_set_vf_port
= enic_set_vf_port
,
2082 .ndo_get_vf_port
= enic_get_vf_port
,
2083 .ndo_set_vf_mac
= enic_set_vf_mac
,
2084 #ifdef CONFIG_NET_POLL_CONTROLLER
2085 .ndo_poll_controller
= enic_poll_controller
,
2089 static void enic_dev_deinit(struct enic
*enic
)
2093 for (i
= 0; i
< enic
->rq_count
; i
++)
2094 netif_napi_del(&enic
->napi
[i
]);
2096 enic_free_vnic_resources(enic
);
2097 enic_clear_intr_mode(enic
);
2100 static int enic_dev_init(struct enic
*enic
)
2102 struct device
*dev
= enic_get_dev(enic
);
2103 struct net_device
*netdev
= enic
->netdev
;
2107 /* Get interrupt coalesce timer info */
2108 err
= enic_dev_intr_coal_timer_info(enic
);
2110 dev_warn(dev
, "Using default conversion factor for "
2111 "interrupt coalesce timer\n");
2112 vnic_dev_intr_coal_timer_info_default(enic
->vdev
);
2115 /* Get vNIC configuration
2118 err
= enic_get_vnic_config(enic
);
2120 dev_err(dev
, "Get vNIC configuration failed, aborting\n");
2124 /* Get available resource counts
2127 enic_get_res_counts(enic
);
2129 /* Set interrupt mode based on resource counts and system
2133 err
= enic_set_intr_mode(enic
);
2135 dev_err(dev
, "Failed to set intr mode based on resource "
2136 "counts and system capabilities, aborting\n");
2140 /* Allocate and configure vNIC resources
2143 err
= enic_alloc_vnic_resources(enic
);
2145 dev_err(dev
, "Failed to alloc vNIC resources, aborting\n");
2146 goto err_out_free_vnic_resources
;
2149 enic_init_vnic_resources(enic
);
2151 err
= enic_set_rss_nic_cfg(enic
);
2153 dev_err(dev
, "Failed to config nic, aborting\n");
2154 goto err_out_free_vnic_resources
;
2157 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2159 netif_napi_add(netdev
, &enic
->napi
[0], enic_poll
, 64);
2161 case VNIC_DEV_INTR_MODE_MSIX
:
2162 for (i
= 0; i
< enic
->rq_count
; i
++)
2163 netif_napi_add(netdev
, &enic
->napi
[i
],
2164 enic_poll_msix
, 64);
2170 err_out_free_vnic_resources
:
2171 enic_clear_intr_mode(enic
);
2172 enic_free_vnic_resources(enic
);
2177 static void enic_iounmap(struct enic
*enic
)
2181 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++)
2182 if (enic
->bar
[i
].vaddr
)
2183 iounmap(enic
->bar
[i
].vaddr
);
2186 static int enic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2188 struct device
*dev
= &pdev
->dev
;
2189 struct net_device
*netdev
;
2194 #ifdef CONFIG_PCI_IOV
2199 /* Allocate net device structure and initialize. Private
2200 * instance data is initialized to zero.
2203 netdev
= alloc_etherdev_mqs(sizeof(struct enic
),
2204 ENIC_RQ_MAX
, ENIC_WQ_MAX
);
2208 pci_set_drvdata(pdev
, netdev
);
2210 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2212 enic
= netdev_priv(netdev
);
2213 enic
->netdev
= netdev
;
2216 /* Setup PCI resources
2219 err
= pci_enable_device_mem(pdev
);
2221 dev_err(dev
, "Cannot enable PCI device, aborting\n");
2222 goto err_out_free_netdev
;
2225 err
= pci_request_regions(pdev
, DRV_NAME
);
2227 dev_err(dev
, "Cannot request PCI regions, aborting\n");
2228 goto err_out_disable_device
;
2231 pci_set_master(pdev
);
2233 /* Query PCI controller on system for DMA addressing
2234 * limitation for the device. Try 64-bit first, and
2238 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2240 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2242 dev_err(dev
, "No usable DMA configuration, aborting\n");
2243 goto err_out_release_regions
;
2245 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2247 dev_err(dev
, "Unable to obtain %u-bit DMA "
2248 "for consistent allocations, aborting\n", 32);
2249 goto err_out_release_regions
;
2252 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2254 dev_err(dev
, "Unable to obtain %u-bit DMA "
2255 "for consistent allocations, aborting\n", 64);
2256 goto err_out_release_regions
;
2261 /* Map vNIC resources from BAR0-5
2264 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++) {
2265 if (!(pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
))
2267 enic
->bar
[i
].len
= pci_resource_len(pdev
, i
);
2268 enic
->bar
[i
].vaddr
= pci_iomap(pdev
, i
, enic
->bar
[i
].len
);
2269 if (!enic
->bar
[i
].vaddr
) {
2270 dev_err(dev
, "Cannot memory-map BAR %d, aborting\n", i
);
2272 goto err_out_iounmap
;
2274 enic
->bar
[i
].bus_addr
= pci_resource_start(pdev
, i
);
2277 /* Register vNIC device
2280 enic
->vdev
= vnic_dev_register(NULL
, enic
, pdev
, enic
->bar
,
2281 ARRAY_SIZE(enic
->bar
));
2283 dev_err(dev
, "vNIC registration failed, aborting\n");
2285 goto err_out_iounmap
;
2288 #ifdef CONFIG_PCI_IOV
2289 /* Get number of subvnics */
2290 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
2292 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_TOTAL_VF
,
2294 if (enic
->num_vfs
) {
2295 err
= pci_enable_sriov(pdev
, enic
->num_vfs
);
2297 dev_err(dev
, "SRIOV enable failed, aborting."
2298 " pci_enable_sriov() returned %d\n",
2300 goto err_out_vnic_unregister
;
2302 enic
->priv_flags
|= ENIC_SRIOV_ENABLED
;
2303 num_pps
= enic
->num_vfs
;
2308 /* Allocate structure for port profiles */
2309 enic
->pp
= kcalloc(num_pps
, sizeof(*enic
->pp
), GFP_KERNEL
);
2312 goto err_out_disable_sriov_pp
;
2315 /* Issue device open to get device in known state
2318 err
= enic_dev_open(enic
);
2320 dev_err(dev
, "vNIC dev open failed, aborting\n");
2321 goto err_out_disable_sriov
;
2324 /* Setup devcmd lock
2327 spin_lock_init(&enic
->devcmd_lock
);
2328 spin_lock_init(&enic
->enic_api_lock
);
2331 * Set ingress vlan rewrite mode before vnic initialization
2334 err
= enic_dev_set_ig_vlan_rewrite_mode(enic
);
2337 "Failed to set ingress vlan rewrite mode, aborting.\n");
2338 goto err_out_dev_close
;
2341 /* Issue device init to initialize the vnic-to-switch link.
2342 * We'll start with carrier off and wait for link UP
2343 * notification later to turn on carrier. We don't need
2344 * to wait here for the vnic-to-switch link initialization
2345 * to complete; link UP notification is the indication that
2346 * the process is complete.
2349 netif_carrier_off(netdev
);
2351 /* Do not call dev_init for a dynamic vnic.
2352 * For a dynamic vnic, init_prov_info will be
2353 * called later by an upper layer.
2356 if (!enic_is_dynamic(enic
)) {
2357 err
= vnic_dev_init(enic
->vdev
, 0);
2359 dev_err(dev
, "vNIC dev init failed, aborting\n");
2360 goto err_out_dev_close
;
2364 err
= enic_dev_init(enic
);
2366 dev_err(dev
, "Device initialization failed, aborting\n");
2367 goto err_out_dev_close
;
2370 netif_set_real_num_tx_queues(netdev
, enic
->wq_count
);
2371 netif_set_real_num_rx_queues(netdev
, enic
->rq_count
);
2373 /* Setup notification timer, HW reset task, and wq locks
2376 init_timer(&enic
->notify_timer
);
2377 enic
->notify_timer
.function
= enic_notify_timer
;
2378 enic
->notify_timer
.data
= (unsigned long)enic
;
2380 enic_set_rx_coal_setting(enic
);
2381 INIT_WORK(&enic
->reset
, enic_reset
);
2382 INIT_WORK(&enic
->change_mtu_work
, enic_change_mtu_work
);
2384 for (i
= 0; i
< enic
->wq_count
; i
++)
2385 spin_lock_init(&enic
->wq_lock
[i
]);
2387 /* Register net device
2390 enic
->port_mtu
= enic
->config
.mtu
;
2391 (void)enic_change_mtu(netdev
, enic
->port_mtu
);
2393 err
= enic_set_mac_addr(netdev
, enic
->mac_addr
);
2395 dev_err(dev
, "Invalid MAC address, aborting\n");
2396 goto err_out_dev_deinit
;
2399 enic
->tx_coalesce_usecs
= enic
->config
.intr_timer_usec
;
2400 /* rx coalesce time already got initialized. This gets used
2401 * if adaptive coal is turned off
2403 enic
->rx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
2405 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
))
2406 netdev
->netdev_ops
= &enic_netdev_dynamic_ops
;
2408 netdev
->netdev_ops
= &enic_netdev_ops
;
2410 netdev
->watchdog_timeo
= 2 * HZ
;
2411 enic_set_ethtool_ops(netdev
);
2413 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
2414 if (ENIC_SETTING(enic
, LOOP
)) {
2415 netdev
->features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
2416 enic
->loop_enable
= 1;
2417 enic
->loop_tag
= enic
->config
.loop_tag
;
2418 dev_info(dev
, "loopback tag=0x%04x\n", enic
->loop_tag
);
2420 if (ENIC_SETTING(enic
, TXCSUM
))
2421 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2422 if (ENIC_SETTING(enic
, TSO
))
2423 netdev
->hw_features
|= NETIF_F_TSO
|
2424 NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
2425 if (ENIC_SETTING(enic
, RSS
))
2426 netdev
->hw_features
|= NETIF_F_RXHASH
;
2427 if (ENIC_SETTING(enic
, RXCSUM
))
2428 netdev
->hw_features
|= NETIF_F_RXCSUM
;
2430 netdev
->features
|= netdev
->hw_features
;
2433 netdev
->features
|= NETIF_F_HIGHDMA
;
2435 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2437 err
= register_netdev(netdev
);
2439 dev_err(dev
, "Cannot register net device, aborting\n");
2440 goto err_out_dev_deinit
;
2446 enic_dev_deinit(enic
);
2448 vnic_dev_close(enic
->vdev
);
2449 err_out_disable_sriov
:
2451 err_out_disable_sriov_pp
:
2452 #ifdef CONFIG_PCI_IOV
2453 if (enic_sriov_enabled(enic
)) {
2454 pci_disable_sriov(pdev
);
2455 enic
->priv_flags
&= ~ENIC_SRIOV_ENABLED
;
2457 err_out_vnic_unregister
:
2459 vnic_dev_unregister(enic
->vdev
);
2462 err_out_release_regions
:
2463 pci_release_regions(pdev
);
2464 err_out_disable_device
:
2465 pci_disable_device(pdev
);
2466 err_out_free_netdev
:
2467 free_netdev(netdev
);
2472 static void enic_remove(struct pci_dev
*pdev
)
2474 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2477 struct enic
*enic
= netdev_priv(netdev
);
2479 cancel_work_sync(&enic
->reset
);
2480 cancel_work_sync(&enic
->change_mtu_work
);
2481 unregister_netdev(netdev
);
2482 enic_dev_deinit(enic
);
2483 vnic_dev_close(enic
->vdev
);
2484 #ifdef CONFIG_PCI_IOV
2485 if (enic_sriov_enabled(enic
)) {
2486 pci_disable_sriov(pdev
);
2487 enic
->priv_flags
&= ~ENIC_SRIOV_ENABLED
;
2491 vnic_dev_unregister(enic
->vdev
);
2493 pci_release_regions(pdev
);
2494 pci_disable_device(pdev
);
2495 free_netdev(netdev
);
2499 static struct pci_driver enic_driver
= {
2501 .id_table
= enic_id_table
,
2502 .probe
= enic_probe
,
2503 .remove
= enic_remove
,
2506 static int __init
enic_init_module(void)
2508 pr_info("%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2510 return pci_register_driver(&enic_driver
);
2513 static void __exit
enic_cleanup_module(void)
2515 pci_unregister_driver(&enic_driver
);
2518 module_init(enic_init_module
);
2519 module_exit(enic_cleanup_module
);