2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ktime.h>
42 #ifdef CONFIG_RFS_ACCEL
43 #include <linux/cpu_rmap.h>
45 #ifdef CONFIG_NET_RX_BUSY_POLL
46 #include <net/busy_poll.h>
48 #include <linux/crash_dump.h>
50 #include "cq_enet_desc.h"
52 #include "vnic_intr.h"
53 #include "vnic_stats.h"
59 #include "enic_clsf.h"
61 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
62 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
63 #define MAX_TSO (1 << 16)
64 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
66 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
67 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
70 #define RX_COPYBREAK_DEFAULT 256
72 /* Supported devices */
73 static const struct pci_device_id enic_id_table
[] = {
74 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET
) },
75 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
) },
76 { PCI_VDEVICE(CISCO
, PCI_DEVICE_ID_CISCO_VIC_ENET_VF
) },
77 { 0, } /* end of table */
80 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
81 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_VERSION
);
84 MODULE_DEVICE_TABLE(pci
, enic_id_table
);
86 #define ENIC_LARGE_PKT_THRESHOLD 1000
87 #define ENIC_MAX_COALESCE_TIMERS 10
88 /* Interrupt moderation table, which will be used to decide the
89 * coalescing timer values
90 * {rx_rate in Mbps, mapping percentage of the range}
92 static struct enic_intr_mod_table mod_table
[ENIC_MAX_COALESCE_TIMERS
+ 1] = {
106 /* This table helps the driver to pick different ranges for rx coalescing
107 * timer depending on the link speed.
109 static struct enic_intr_mod_range mod_range
[ENIC_MAX_LINK_SPEEDS
] = {
110 {0, 0}, /* 0 - 4 Gbps */
111 {0, 3}, /* 4 - 10 Gbps */
112 {3, 6}, /* 10 - 40 Gbps */
115 int enic_is_dynamic(struct enic
*enic
)
117 return enic
->pdev
->device
== PCI_DEVICE_ID_CISCO_VIC_ENET_DYN
;
120 int enic_sriov_enabled(struct enic
*enic
)
122 return (enic
->priv_flags
& ENIC_SRIOV_ENABLED
) ? 1 : 0;
125 static int enic_is_sriov_vf(struct enic
*enic
)
127 return enic
->pdev
->device
== PCI_DEVICE_ID_CISCO_VIC_ENET_VF
;
130 int enic_is_valid_vf(struct enic
*enic
, int vf
)
132 #ifdef CONFIG_PCI_IOV
133 return vf
>= 0 && vf
< enic
->num_vfs
;
139 static void enic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
141 struct enic
*enic
= vnic_dev_priv(wq
->vdev
);
144 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
145 buf
->len
, PCI_DMA_TODEVICE
);
147 pci_unmap_page(enic
->pdev
, buf
->dma_addr
,
148 buf
->len
, PCI_DMA_TODEVICE
);
151 dev_kfree_skb_any(buf
->os_buf
);
154 static void enic_wq_free_buf(struct vnic_wq
*wq
,
155 struct cq_desc
*cq_desc
, struct vnic_wq_buf
*buf
, void *opaque
)
157 enic_free_wq_buf(wq
, buf
);
160 static int enic_wq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
161 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
163 struct enic
*enic
= vnic_dev_priv(vdev
);
165 spin_lock(&enic
->wq_lock
[q_number
]);
167 vnic_wq_service(&enic
->wq
[q_number
], cq_desc
,
168 completed_index
, enic_wq_free_buf
,
171 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic
->netdev
, q_number
)) &&
172 vnic_wq_desc_avail(&enic
->wq
[q_number
]) >=
173 (MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
))
174 netif_wake_subqueue(enic
->netdev
, q_number
);
176 spin_unlock(&enic
->wq_lock
[q_number
]);
181 static void enic_log_q_error(struct enic
*enic
)
186 for (i
= 0; i
< enic
->wq_count
; i
++) {
187 error_status
= vnic_wq_error_status(&enic
->wq
[i
]);
189 netdev_err(enic
->netdev
, "WQ[%d] error_status %d\n",
193 for (i
= 0; i
< enic
->rq_count
; i
++) {
194 error_status
= vnic_rq_error_status(&enic
->rq
[i
]);
196 netdev_err(enic
->netdev
, "RQ[%d] error_status %d\n",
201 static void enic_msglvl_check(struct enic
*enic
)
203 u32 msg_enable
= vnic_dev_msg_lvl(enic
->vdev
);
205 if (msg_enable
!= enic
->msg_enable
) {
206 netdev_info(enic
->netdev
, "msg lvl changed from 0x%x to 0x%x\n",
207 enic
->msg_enable
, msg_enable
);
208 enic
->msg_enable
= msg_enable
;
212 static void enic_mtu_check(struct enic
*enic
)
214 u32 mtu
= vnic_dev_mtu(enic
->vdev
);
215 struct net_device
*netdev
= enic
->netdev
;
217 if (mtu
&& mtu
!= enic
->port_mtu
) {
218 enic
->port_mtu
= mtu
;
219 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
)) {
220 mtu
= max_t(int, ENIC_MIN_MTU
,
221 min_t(int, ENIC_MAX_MTU
, mtu
));
222 if (mtu
!= netdev
->mtu
)
223 schedule_work(&enic
->change_mtu_work
);
225 if (mtu
< netdev
->mtu
)
227 "interface MTU (%d) set higher "
228 "than switch port MTU (%d)\n",
234 static void enic_link_check(struct enic
*enic
)
236 int link_status
= vnic_dev_link_status(enic
->vdev
);
237 int carrier_ok
= netif_carrier_ok(enic
->netdev
);
239 if (link_status
&& !carrier_ok
) {
240 netdev_info(enic
->netdev
, "Link UP\n");
241 netif_carrier_on(enic
->netdev
);
242 } else if (!link_status
&& carrier_ok
) {
243 netdev_info(enic
->netdev
, "Link DOWN\n");
244 netif_carrier_off(enic
->netdev
);
248 static void enic_notify_check(struct enic
*enic
)
250 enic_msglvl_check(enic
);
251 enic_mtu_check(enic
);
252 enic_link_check(enic
);
255 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
257 static irqreturn_t
enic_isr_legacy(int irq
, void *data
)
259 struct net_device
*netdev
= data
;
260 struct enic
*enic
= netdev_priv(netdev
);
261 unsigned int io_intr
= enic_legacy_io_intr();
262 unsigned int err_intr
= enic_legacy_err_intr();
263 unsigned int notify_intr
= enic_legacy_notify_intr();
266 vnic_intr_mask(&enic
->intr
[io_intr
]);
268 pba
= vnic_intr_legacy_pba(enic
->legacy_pba
);
270 vnic_intr_unmask(&enic
->intr
[io_intr
]);
271 return IRQ_NONE
; /* not our interrupt */
274 if (ENIC_TEST_INTR(pba
, notify_intr
)) {
275 enic_notify_check(enic
);
276 vnic_intr_return_all_credits(&enic
->intr
[notify_intr
]);
279 if (ENIC_TEST_INTR(pba
, err_intr
)) {
280 vnic_intr_return_all_credits(&enic
->intr
[err_intr
]);
281 enic_log_q_error(enic
);
282 /* schedule recovery from WQ/RQ error */
283 schedule_work(&enic
->reset
);
287 if (ENIC_TEST_INTR(pba
, io_intr
))
288 napi_schedule_irqoff(&enic
->napi
[0]);
290 vnic_intr_unmask(&enic
->intr
[io_intr
]);
295 static irqreturn_t
enic_isr_msi(int irq
, void *data
)
297 struct enic
*enic
= data
;
299 /* With MSI, there is no sharing of interrupts, so this is
300 * our interrupt and there is no need to ack it. The device
301 * is not providing per-vector masking, so the OS will not
302 * write to PCI config space to mask/unmask the interrupt.
303 * We're using mask_on_assertion for MSI, so the device
304 * automatically masks the interrupt when the interrupt is
305 * generated. Later, when exiting polling, the interrupt
306 * will be unmasked (see enic_poll).
308 * Also, the device uses the same PCIe Traffic Class (TC)
309 * for Memory Write data and MSI, so there are no ordering
310 * issues; the MSI will always arrive at the Root Complex
311 * _after_ corresponding Memory Writes (i.e. descriptor
315 napi_schedule_irqoff(&enic
->napi
[0]);
320 static irqreturn_t
enic_isr_msix(int irq
, void *data
)
322 struct napi_struct
*napi
= data
;
324 napi_schedule_irqoff(napi
);
329 static irqreturn_t
enic_isr_msix_err(int irq
, void *data
)
331 struct enic
*enic
= data
;
332 unsigned int intr
= enic_msix_err_intr(enic
);
334 vnic_intr_return_all_credits(&enic
->intr
[intr
]);
336 enic_log_q_error(enic
);
338 /* schedule recovery from WQ/RQ error */
339 schedule_work(&enic
->reset
);
344 static irqreturn_t
enic_isr_msix_notify(int irq
, void *data
)
346 struct enic
*enic
= data
;
347 unsigned int intr
= enic_msix_notify_intr(enic
);
349 enic_notify_check(enic
);
350 vnic_intr_return_all_credits(&enic
->intr
[intr
]);
355 static int enic_queue_wq_skb_cont(struct enic
*enic
, struct vnic_wq
*wq
,
356 struct sk_buff
*skb
, unsigned int len_left
,
359 const skb_frag_t
*frag
;
362 /* Queue additional data fragments */
363 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
364 len_left
-= skb_frag_size(frag
);
365 dma_addr
= skb_frag_dma_map(&enic
->pdev
->dev
, frag
, 0,
368 if (unlikely(enic_dma_map_check(enic
, dma_addr
)))
370 enic_queue_wq_desc_cont(wq
, skb
, dma_addr
, skb_frag_size(frag
),
371 (len_left
== 0), /* EOP? */
378 static int enic_queue_wq_skb_vlan(struct enic
*enic
, struct vnic_wq
*wq
,
379 struct sk_buff
*skb
, int vlan_tag_insert
,
380 unsigned int vlan_tag
, int loopback
)
382 unsigned int head_len
= skb_headlen(skb
);
383 unsigned int len_left
= skb
->len
- head_len
;
384 int eop
= (len_left
== 0);
388 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
, head_len
,
390 if (unlikely(enic_dma_map_check(enic
, dma_addr
)))
393 /* Queue the main skb fragment. The fragments are no larger
394 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
395 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
396 * per fragment is queued.
398 enic_queue_wq_desc(wq
, skb
, dma_addr
, head_len
, vlan_tag_insert
,
399 vlan_tag
, eop
, loopback
);
402 err
= enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
, loopback
);
407 static int enic_queue_wq_skb_csum_l4(struct enic
*enic
, struct vnic_wq
*wq
,
408 struct sk_buff
*skb
, int vlan_tag_insert
,
409 unsigned int vlan_tag
, int loopback
)
411 unsigned int head_len
= skb_headlen(skb
);
412 unsigned int len_left
= skb
->len
- head_len
;
413 unsigned int hdr_len
= skb_checksum_start_offset(skb
);
414 unsigned int csum_offset
= hdr_len
+ skb
->csum_offset
;
415 int eop
= (len_left
== 0);
419 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
, head_len
,
421 if (unlikely(enic_dma_map_check(enic
, dma_addr
)))
424 /* Queue the main skb fragment. The fragments are no larger
425 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
426 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
427 * per fragment is queued.
429 enic_queue_wq_desc_csum_l4(wq
, skb
, dma_addr
, head_len
, csum_offset
,
430 hdr_len
, vlan_tag_insert
, vlan_tag
, eop
,
434 err
= enic_queue_wq_skb_cont(enic
, wq
, skb
, len_left
, loopback
);
439 static int enic_queue_wq_skb_tso(struct enic
*enic
, struct vnic_wq
*wq
,
440 struct sk_buff
*skb
, unsigned int mss
,
441 int vlan_tag_insert
, unsigned int vlan_tag
,
444 unsigned int frag_len_left
= skb_headlen(skb
);
445 unsigned int len_left
= skb
->len
- frag_len_left
;
446 unsigned int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
447 int eop
= (len_left
== 0);
450 unsigned int offset
= 0;
453 /* Preload TCP csum field with IP pseudo hdr calculated
454 * with IP length set to zero. HW will later add in length
455 * to each TCP segment resulting from the TSO.
458 if (skb
->protocol
== cpu_to_be16(ETH_P_IP
)) {
459 ip_hdr(skb
)->check
= 0;
460 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
461 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
462 } else if (skb
->protocol
== cpu_to_be16(ETH_P_IPV6
)) {
463 tcp_hdr(skb
)->check
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
464 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
467 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
468 * for the main skb fragment
470 while (frag_len_left
) {
471 len
= min(frag_len_left
, (unsigned int)WQ_ENET_MAX_DESC_LEN
);
472 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
+ offset
, len
,
474 if (unlikely(enic_dma_map_check(enic
, dma_addr
)))
476 enic_queue_wq_desc_tso(wq
, skb
, dma_addr
, len
, mss
, hdr_len
,
477 vlan_tag_insert
, vlan_tag
,
478 eop
&& (len
== frag_len_left
), loopback
);
479 frag_len_left
-= len
;
486 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
487 * for additional data fragments
489 for (frag
= skb_shinfo(skb
)->frags
; len_left
; frag
++) {
490 len_left
-= skb_frag_size(frag
);
491 frag_len_left
= skb_frag_size(frag
);
494 while (frag_len_left
) {
495 len
= min(frag_len_left
,
496 (unsigned int)WQ_ENET_MAX_DESC_LEN
);
497 dma_addr
= skb_frag_dma_map(&enic
->pdev
->dev
, frag
,
500 if (unlikely(enic_dma_map_check(enic
, dma_addr
)))
502 enic_queue_wq_desc_cont(wq
, skb
, dma_addr
, len
,
504 (len
== frag_len_left
),/*EOP*/
506 frag_len_left
-= len
;
514 static inline void enic_queue_wq_skb(struct enic
*enic
,
515 struct vnic_wq
*wq
, struct sk_buff
*skb
)
517 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
518 unsigned int vlan_tag
= 0;
519 int vlan_tag_insert
= 0;
523 if (skb_vlan_tag_present(skb
)) {
524 /* VLAN tag from trunking driver */
526 vlan_tag
= skb_vlan_tag_get(skb
);
527 } else if (enic
->loop_enable
) {
528 vlan_tag
= enic
->loop_tag
;
533 err
= enic_queue_wq_skb_tso(enic
, wq
, skb
, mss
,
534 vlan_tag_insert
, vlan_tag
,
536 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
537 err
= enic_queue_wq_skb_csum_l4(enic
, wq
, skb
, vlan_tag_insert
,
540 err
= enic_queue_wq_skb_vlan(enic
, wq
, skb
, vlan_tag_insert
,
543 struct vnic_wq_buf
*buf
;
545 buf
= wq
->to_use
->prev
;
546 /* while not EOP of previous pkt && queue not empty.
547 * For all non EOP bufs, os_buf is NULL.
549 while (!buf
->os_buf
&& (buf
->next
!= wq
->to_clean
)) {
550 enic_free_wq_buf(wq
, buf
);
551 wq
->ring
.desc_avail
++;
554 wq
->to_use
= buf
->next
;
559 /* netif_tx_lock held, process context with BHs disabled, or BH */
560 static netdev_tx_t
enic_hard_start_xmit(struct sk_buff
*skb
,
561 struct net_device
*netdev
)
563 struct enic
*enic
= netdev_priv(netdev
);
565 unsigned int txq_map
;
566 struct netdev_queue
*txq
;
569 dev_kfree_skb_any(skb
);
573 txq_map
= skb_get_queue_mapping(skb
) % enic
->wq_count
;
574 wq
= &enic
->wq
[txq_map
];
575 txq
= netdev_get_tx_queue(netdev
, txq_map
);
577 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
578 * which is very likely. In the off chance it's going to take
579 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
582 if (skb_shinfo(skb
)->gso_size
== 0 &&
583 skb_shinfo(skb
)->nr_frags
+ 1 > ENIC_NON_TSO_MAX_DESC
&&
584 skb_linearize(skb
)) {
585 dev_kfree_skb_any(skb
);
589 spin_lock(&enic
->wq_lock
[txq_map
]);
591 if (vnic_wq_desc_avail(wq
) <
592 skb_shinfo(skb
)->nr_frags
+ ENIC_DESC_MAX_SPLITS
) {
593 netif_tx_stop_queue(txq
);
594 /* This is a hard error, log it */
595 netdev_err(netdev
, "BUG! Tx ring full when queue awake!\n");
596 spin_unlock(&enic
->wq_lock
[txq_map
]);
597 return NETDEV_TX_BUSY
;
600 enic_queue_wq_skb(enic
, wq
, skb
);
602 if (vnic_wq_desc_avail(wq
) < MAX_SKB_FRAGS
+ ENIC_DESC_MAX_SPLITS
)
603 netif_tx_stop_queue(txq
);
604 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
))
605 vnic_wq_doorbell(wq
);
607 spin_unlock(&enic
->wq_lock
[txq_map
]);
612 /* dev_base_lock rwlock held, nominally process context */
613 static struct rtnl_link_stats64
*enic_get_stats(struct net_device
*netdev
,
614 struct rtnl_link_stats64
*net_stats
)
616 struct enic
*enic
= netdev_priv(netdev
);
617 struct vnic_stats
*stats
;
619 enic_dev_stats_dump(enic
, &stats
);
621 net_stats
->tx_packets
= stats
->tx
.tx_frames_ok
;
622 net_stats
->tx_bytes
= stats
->tx
.tx_bytes_ok
;
623 net_stats
->tx_errors
= stats
->tx
.tx_errors
;
624 net_stats
->tx_dropped
= stats
->tx
.tx_drops
;
626 net_stats
->rx_packets
= stats
->rx
.rx_frames_ok
;
627 net_stats
->rx_bytes
= stats
->rx
.rx_bytes_ok
;
628 net_stats
->rx_errors
= stats
->rx
.rx_errors
;
629 net_stats
->multicast
= stats
->rx
.rx_multicast_frames_ok
;
630 net_stats
->rx_over_errors
= enic
->rq_truncated_pkts
;
631 net_stats
->rx_crc_errors
= enic
->rq_bad_fcs
;
632 net_stats
->rx_dropped
= stats
->rx
.rx_no_bufs
+ stats
->rx
.rx_drop
;
637 static int enic_mc_sync(struct net_device
*netdev
, const u8
*mc_addr
)
639 struct enic
*enic
= netdev_priv(netdev
);
641 if (enic
->mc_count
== ENIC_MULTICAST_PERFECT_FILTERS
) {
642 unsigned int mc_count
= netdev_mc_count(netdev
);
644 netdev_warn(netdev
, "Registering only %d out of %d multicast addresses\n",
645 ENIC_MULTICAST_PERFECT_FILTERS
, mc_count
);
650 enic_dev_add_addr(enic
, mc_addr
);
656 static int enic_mc_unsync(struct net_device
*netdev
, const u8
*mc_addr
)
658 struct enic
*enic
= netdev_priv(netdev
);
660 enic_dev_del_addr(enic
, mc_addr
);
666 static int enic_uc_sync(struct net_device
*netdev
, const u8
*uc_addr
)
668 struct enic
*enic
= netdev_priv(netdev
);
670 if (enic
->uc_count
== ENIC_UNICAST_PERFECT_FILTERS
) {
671 unsigned int uc_count
= netdev_uc_count(netdev
);
673 netdev_warn(netdev
, "Registering only %d out of %d unicast addresses\n",
674 ENIC_UNICAST_PERFECT_FILTERS
, uc_count
);
679 enic_dev_add_addr(enic
, uc_addr
);
685 static int enic_uc_unsync(struct net_device
*netdev
, const u8
*uc_addr
)
687 struct enic
*enic
= netdev_priv(netdev
);
689 enic_dev_del_addr(enic
, uc_addr
);
695 void enic_reset_addr_lists(struct enic
*enic
)
697 struct net_device
*netdev
= enic
->netdev
;
699 __dev_uc_unsync(netdev
, NULL
);
700 __dev_mc_unsync(netdev
, NULL
);
707 static int enic_set_mac_addr(struct net_device
*netdev
, char *addr
)
709 struct enic
*enic
= netdev_priv(netdev
);
711 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
)) {
712 if (!is_valid_ether_addr(addr
) && !is_zero_ether_addr(addr
))
713 return -EADDRNOTAVAIL
;
715 if (!is_valid_ether_addr(addr
))
716 return -EADDRNOTAVAIL
;
719 memcpy(netdev
->dev_addr
, addr
, netdev
->addr_len
);
724 static int enic_set_mac_address_dynamic(struct net_device
*netdev
, void *p
)
726 struct enic
*enic
= netdev_priv(netdev
);
727 struct sockaddr
*saddr
= p
;
728 char *addr
= saddr
->sa_data
;
731 if (netif_running(enic
->netdev
)) {
732 err
= enic_dev_del_station_addr(enic
);
737 err
= enic_set_mac_addr(netdev
, addr
);
741 if (netif_running(enic
->netdev
)) {
742 err
= enic_dev_add_station_addr(enic
);
750 static int enic_set_mac_address(struct net_device
*netdev
, void *p
)
752 struct sockaddr
*saddr
= p
;
753 char *addr
= saddr
->sa_data
;
754 struct enic
*enic
= netdev_priv(netdev
);
757 err
= enic_dev_del_station_addr(enic
);
761 err
= enic_set_mac_addr(netdev
, addr
);
765 return enic_dev_add_station_addr(enic
);
768 /* netif_tx_lock held, BHs disabled */
769 static void enic_set_rx_mode(struct net_device
*netdev
)
771 struct enic
*enic
= netdev_priv(netdev
);
773 int multicast
= (netdev
->flags
& IFF_MULTICAST
) ? 1 : 0;
774 int broadcast
= (netdev
->flags
& IFF_BROADCAST
) ? 1 : 0;
775 int promisc
= (netdev
->flags
& IFF_PROMISC
) ||
776 netdev_uc_count(netdev
) > ENIC_UNICAST_PERFECT_FILTERS
;
777 int allmulti
= (netdev
->flags
& IFF_ALLMULTI
) ||
778 netdev_mc_count(netdev
) > ENIC_MULTICAST_PERFECT_FILTERS
;
779 unsigned int flags
= netdev
->flags
|
780 (allmulti
? IFF_ALLMULTI
: 0) |
781 (promisc
? IFF_PROMISC
: 0);
783 if (enic
->flags
!= flags
) {
785 enic_dev_packet_filter(enic
, directed
,
786 multicast
, broadcast
, promisc
, allmulti
);
790 __dev_uc_sync(netdev
, enic_uc_sync
, enic_uc_unsync
);
792 __dev_mc_sync(netdev
, enic_mc_sync
, enic_mc_unsync
);
796 /* netif_tx_lock held, BHs disabled */
797 static void enic_tx_timeout(struct net_device
*netdev
)
799 struct enic
*enic
= netdev_priv(netdev
);
800 schedule_work(&enic
->reset
);
803 static int enic_set_vf_mac(struct net_device
*netdev
, int vf
, u8
*mac
)
805 struct enic
*enic
= netdev_priv(netdev
);
806 struct enic_port_profile
*pp
;
809 ENIC_PP_BY_INDEX(enic
, vf
, pp
, &err
);
813 if (is_valid_ether_addr(mac
) || is_zero_ether_addr(mac
)) {
814 if (vf
== PORT_SELF_VF
) {
815 memcpy(pp
->vf_mac
, mac
, ETH_ALEN
);
819 * For sriov vf's set the mac in hw
821 ENIC_DEVCMD_PROXY_BY_INDEX(vf
, err
, enic
,
822 vnic_dev_set_mac_addr
, mac
);
823 return enic_dev_status_to_errno(err
);
829 static int enic_set_vf_port(struct net_device
*netdev
, int vf
,
830 struct nlattr
*port
[])
832 struct enic
*enic
= netdev_priv(netdev
);
833 struct enic_port_profile prev_pp
;
834 struct enic_port_profile
*pp
;
835 int err
= 0, restore_pp
= 1;
837 ENIC_PP_BY_INDEX(enic
, vf
, pp
, &err
);
841 if (!port
[IFLA_PORT_REQUEST
])
844 memcpy(&prev_pp
, pp
, sizeof(*enic
->pp
));
845 memset(pp
, 0, sizeof(*enic
->pp
));
847 pp
->set
|= ENIC_SET_REQUEST
;
848 pp
->request
= nla_get_u8(port
[IFLA_PORT_REQUEST
]);
850 if (port
[IFLA_PORT_PROFILE
]) {
851 pp
->set
|= ENIC_SET_NAME
;
852 memcpy(pp
->name
, nla_data(port
[IFLA_PORT_PROFILE
]),
856 if (port
[IFLA_PORT_INSTANCE_UUID
]) {
857 pp
->set
|= ENIC_SET_INSTANCE
;
858 memcpy(pp
->instance_uuid
,
859 nla_data(port
[IFLA_PORT_INSTANCE_UUID
]), PORT_UUID_MAX
);
862 if (port
[IFLA_PORT_HOST_UUID
]) {
863 pp
->set
|= ENIC_SET_HOST
;
864 memcpy(pp
->host_uuid
,
865 nla_data(port
[IFLA_PORT_HOST_UUID
]), PORT_UUID_MAX
);
868 if (vf
== PORT_SELF_VF
) {
869 /* Special case handling: mac came from IFLA_VF_MAC */
870 if (!is_zero_ether_addr(prev_pp
.vf_mac
))
871 memcpy(pp
->mac_addr
, prev_pp
.vf_mac
, ETH_ALEN
);
873 if (is_zero_ether_addr(netdev
->dev_addr
))
874 eth_hw_addr_random(netdev
);
876 /* SR-IOV VF: get mac from adapter */
877 ENIC_DEVCMD_PROXY_BY_INDEX(vf
, err
, enic
,
878 vnic_dev_get_mac_addr
, pp
->mac_addr
);
880 netdev_err(netdev
, "Error getting mac for vf %d\n", vf
);
881 memcpy(pp
, &prev_pp
, sizeof(*pp
));
882 return enic_dev_status_to_errno(err
);
886 err
= enic_process_set_pp_request(enic
, vf
, &prev_pp
, &restore_pp
);
889 /* Things are still the way they were: Implicit
890 * DISASSOCIATE failed
892 memcpy(pp
, &prev_pp
, sizeof(*pp
));
894 memset(pp
, 0, sizeof(*pp
));
895 if (vf
== PORT_SELF_VF
)
896 eth_zero_addr(netdev
->dev_addr
);
899 /* Set flag to indicate that the port assoc/disassoc
900 * request has been sent out to fw
902 pp
->set
|= ENIC_PORT_REQUEST_APPLIED
;
904 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
905 if (pp
->request
== PORT_REQUEST_DISASSOCIATE
) {
906 eth_zero_addr(pp
->mac_addr
);
907 if (vf
== PORT_SELF_VF
)
908 eth_zero_addr(netdev
->dev_addr
);
912 if (vf
== PORT_SELF_VF
)
913 eth_zero_addr(pp
->vf_mac
);
918 static int enic_get_vf_port(struct net_device
*netdev
, int vf
,
921 struct enic
*enic
= netdev_priv(netdev
);
922 u16 response
= PORT_PROFILE_RESPONSE_SUCCESS
;
923 struct enic_port_profile
*pp
;
926 ENIC_PP_BY_INDEX(enic
, vf
, pp
, &err
);
930 if (!(pp
->set
& ENIC_PORT_REQUEST_APPLIED
))
933 err
= enic_process_get_pp_request(enic
, vf
, pp
->request
, &response
);
937 if (nla_put_u16(skb
, IFLA_PORT_REQUEST
, pp
->request
) ||
938 nla_put_u16(skb
, IFLA_PORT_RESPONSE
, response
) ||
939 ((pp
->set
& ENIC_SET_NAME
) &&
940 nla_put(skb
, IFLA_PORT_PROFILE
, PORT_PROFILE_MAX
, pp
->name
)) ||
941 ((pp
->set
& ENIC_SET_INSTANCE
) &&
942 nla_put(skb
, IFLA_PORT_INSTANCE_UUID
, PORT_UUID_MAX
,
943 pp
->instance_uuid
)) ||
944 ((pp
->set
& ENIC_SET_HOST
) &&
945 nla_put(skb
, IFLA_PORT_HOST_UUID
, PORT_UUID_MAX
, pp
->host_uuid
)))
946 goto nla_put_failure
;
953 static void enic_free_rq_buf(struct vnic_rq
*rq
, struct vnic_rq_buf
*buf
)
955 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
960 pci_unmap_single(enic
->pdev
, buf
->dma_addr
,
961 buf
->len
, PCI_DMA_FROMDEVICE
);
962 dev_kfree_skb_any(buf
->os_buf
);
966 static int enic_rq_alloc_buf(struct vnic_rq
*rq
)
968 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
969 struct net_device
*netdev
= enic
->netdev
;
971 unsigned int len
= netdev
->mtu
+ VLAN_ETH_HLEN
;
972 unsigned int os_buf_index
= 0;
974 struct vnic_rq_buf
*buf
= rq
->to_use
;
977 enic_queue_rq_desc(rq
, buf
->os_buf
, os_buf_index
, buf
->dma_addr
,
982 skb
= netdev_alloc_skb_ip_align(netdev
, len
);
986 dma_addr
= pci_map_single(enic
->pdev
, skb
->data
, len
,
988 if (unlikely(enic_dma_map_check(enic
, dma_addr
))) {
993 enic_queue_rq_desc(rq
, skb
, os_buf_index
,
999 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter
*pkt_size
,
1002 if (ENIC_LARGE_PKT_THRESHOLD
<= pkt_len
)
1003 pkt_size
->large_pkt_bytes_cnt
+= pkt_len
;
1005 pkt_size
->small_pkt_bytes_cnt
+= pkt_len
;
1008 static bool enic_rxcopybreak(struct net_device
*netdev
, struct sk_buff
**skb
,
1009 struct vnic_rq_buf
*buf
, u16 len
)
1011 struct enic
*enic
= netdev_priv(netdev
);
1012 struct sk_buff
*new_skb
;
1014 if (len
> enic
->rx_copybreak
)
1016 new_skb
= netdev_alloc_skb_ip_align(netdev
, len
);
1019 pci_dma_sync_single_for_cpu(enic
->pdev
, buf
->dma_addr
, len
,
1021 memcpy(new_skb
->data
, (*skb
)->data
, len
);
1027 static void enic_rq_indicate_buf(struct vnic_rq
*rq
,
1028 struct cq_desc
*cq_desc
, struct vnic_rq_buf
*buf
,
1029 int skipped
, void *opaque
)
1031 struct enic
*enic
= vnic_dev_priv(rq
->vdev
);
1032 struct net_device
*netdev
= enic
->netdev
;
1033 struct sk_buff
*skb
;
1034 struct vnic_cq
*cq
= &enic
->cq
[enic_cq_rq(enic
, rq
->index
)];
1036 u8 type
, color
, eop
, sop
, ingress_port
, vlan_stripped
;
1037 u8 fcoe
, fcoe_sof
, fcoe_fc_crc_ok
, fcoe_enc_error
, fcoe_eof
;
1038 u8 tcp_udp_csum_ok
, udp
, tcp
, ipv4_csum_ok
;
1039 u8 ipv6
, ipv4
, ipv4_fragment
, fcs_ok
, rss_type
, csum_not_calc
;
1041 u16 q_number
, completed_index
, bytes_written
, vlan_tci
, checksum
;
1049 cq_enet_rq_desc_dec((struct cq_enet_rq_desc
*)cq_desc
,
1050 &type
, &color
, &q_number
, &completed_index
,
1051 &ingress_port
, &fcoe
, &eop
, &sop
, &rss_type
,
1052 &csum_not_calc
, &rss_hash
, &bytes_written
,
1053 &packet_error
, &vlan_stripped
, &vlan_tci
, &checksum
,
1054 &fcoe_sof
, &fcoe_fc_crc_ok
, &fcoe_enc_error
,
1055 &fcoe_eof
, &tcp_udp_csum_ok
, &udp
, &tcp
,
1056 &ipv4_csum_ok
, &ipv6
, &ipv4
, &ipv4_fragment
,
1062 if (bytes_written
> 0)
1064 else if (bytes_written
== 0)
1065 enic
->rq_truncated_pkts
++;
1068 pci_unmap_single(enic
->pdev
, buf
->dma_addr
, buf
->len
,
1069 PCI_DMA_FROMDEVICE
);
1070 dev_kfree_skb_any(skb
);
1076 if (eop
&& bytes_written
> 0) {
1081 if (!enic_rxcopybreak(netdev
, &skb
, buf
, bytes_written
)) {
1083 pci_unmap_single(enic
->pdev
, buf
->dma_addr
, buf
->len
,
1084 PCI_DMA_FROMDEVICE
);
1086 prefetch(skb
->data
- NET_IP_ALIGN
);
1088 skb_put(skb
, bytes_written
);
1089 skb
->protocol
= eth_type_trans(skb
, netdev
);
1090 skb_record_rx_queue(skb
, q_number
);
1091 if (netdev
->features
& NETIF_F_RXHASH
) {
1092 skb_set_hash(skb
, rss_hash
,
1094 (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX
|
1095 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6
|
1096 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4
)) ?
1097 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
1100 /* Hardware does not provide whole packet checksum. It only
1101 * provides pseudo checksum. Since hw validates the packet
1102 * checksum but not provide us the checksum value. use
1103 * CHECSUM_UNNECESSARY.
1105 if ((netdev
->features
& NETIF_F_RXCSUM
) && tcp_udp_csum_ok
&&
1107 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1110 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tci
);
1112 skb_mark_napi_id(skb
, &enic
->napi
[rq
->index
]);
1113 if (enic_poll_busy_polling(rq
) ||
1114 !(netdev
->features
& NETIF_F_GRO
))
1115 netif_receive_skb(skb
);
1117 napi_gro_receive(&enic
->napi
[q_number
], skb
);
1118 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1119 enic_intr_update_pkt_size(&cq
->pkt_size_counter
,
1126 pci_unmap_single(enic
->pdev
, buf
->dma_addr
, buf
->len
,
1127 PCI_DMA_FROMDEVICE
);
1128 dev_kfree_skb_any(skb
);
1133 static int enic_rq_service(struct vnic_dev
*vdev
, struct cq_desc
*cq_desc
,
1134 u8 type
, u16 q_number
, u16 completed_index
, void *opaque
)
1136 struct enic
*enic
= vnic_dev_priv(vdev
);
1138 vnic_rq_service(&enic
->rq
[q_number
], cq_desc
,
1139 completed_index
, VNIC_RQ_RETURN_DESC
,
1140 enic_rq_indicate_buf
, opaque
);
1145 static int enic_poll(struct napi_struct
*napi
, int budget
)
1147 struct net_device
*netdev
= napi
->dev
;
1148 struct enic
*enic
= netdev_priv(netdev
);
1149 unsigned int cq_rq
= enic_cq_rq(enic
, 0);
1150 unsigned int cq_wq
= enic_cq_wq(enic
, 0);
1151 unsigned int intr
= enic_legacy_io_intr();
1152 unsigned int rq_work_to_do
= budget
;
1153 unsigned int wq_work_to_do
= -1; /* no limit */
1154 unsigned int work_done
, rq_work_done
= 0, wq_work_done
;
1157 wq_work_done
= vnic_cq_service(&enic
->cq
[cq_wq
], wq_work_to_do
,
1158 enic_wq_service
, NULL
);
1160 if (!enic_poll_lock_napi(&enic
->rq
[cq_rq
])) {
1161 if (wq_work_done
> 0)
1162 vnic_intr_return_credits(&enic
->intr
[intr
],
1164 0 /* dont unmask intr */,
1165 0 /* dont reset intr timer */);
1166 return rq_work_done
;
1170 rq_work_done
= vnic_cq_service(&enic
->cq
[cq_rq
],
1171 rq_work_to_do
, enic_rq_service
, NULL
);
1173 /* Accumulate intr event credits for this polling
1174 * cycle. An intr event is the completion of a
1175 * a WQ or RQ packet.
1178 work_done
= rq_work_done
+ wq_work_done
;
1181 vnic_intr_return_credits(&enic
->intr
[intr
],
1183 0 /* don't unmask intr */,
1184 0 /* don't reset intr timer */);
1186 err
= vnic_rq_fill(&enic
->rq
[0], enic_rq_alloc_buf
);
1188 /* Buffer allocation failed. Stay in polling
1189 * mode so we can try to fill the ring again.
1193 rq_work_done
= rq_work_to_do
;
1195 if (rq_work_done
< rq_work_to_do
) {
1197 /* Some work done, but not enough to stay in polling,
1201 napi_complete(napi
);
1202 vnic_intr_unmask(&enic
->intr
[intr
]);
1204 enic_poll_unlock_napi(&enic
->rq
[cq_rq
]);
1206 return rq_work_done
;
1209 static void enic_set_int_moderation(struct enic
*enic
, struct vnic_rq
*rq
)
1211 unsigned int intr
= enic_msix_rq_intr(enic
, rq
->index
);
1212 struct vnic_cq
*cq
= &enic
->cq
[enic_cq_rq(enic
, rq
->index
)];
1213 u32 timer
= cq
->tobe_rx_coal_timeval
;
1215 if (cq
->tobe_rx_coal_timeval
!= cq
->cur_rx_coal_timeval
) {
1216 vnic_intr_coalescing_timer_set(&enic
->intr
[intr
], timer
);
1217 cq
->cur_rx_coal_timeval
= cq
->tobe_rx_coal_timeval
;
1221 static void enic_calc_int_moderation(struct enic
*enic
, struct vnic_rq
*rq
)
1223 struct enic_rx_coal
*rx_coal
= &enic
->rx_coalesce_setting
;
1224 struct vnic_cq
*cq
= &enic
->cq
[enic_cq_rq(enic
, rq
->index
)];
1225 struct vnic_rx_bytes_counter
*pkt_size_counter
= &cq
->pkt_size_counter
;
1231 ktime_t now
= ktime_get();
1233 delta
= ktime_us_delta(now
, cq
->prev_ts
);
1234 if (delta
< ENIC_AIC_TS_BREAK
)
1238 traffic
= pkt_size_counter
->large_pkt_bytes_cnt
+
1239 pkt_size_counter
->small_pkt_bytes_cnt
;
1240 /* The table takes Mbps
1241 * traffic *= 8 => bits
1242 * traffic *= (10^6 / delta) => bps
1243 * traffic /= 10^6 => Mbps
1245 * Combining, traffic *= (8 / delta)
1249 traffic
= delta
> UINT_MAX
? 0 : traffic
/ (u32
)delta
;
1251 for (index
= 0; index
< ENIC_MAX_COALESCE_TIMERS
; index
++)
1252 if (traffic
< mod_table
[index
].rx_rate
)
1254 range_start
= (pkt_size_counter
->small_pkt_bytes_cnt
>
1255 pkt_size_counter
->large_pkt_bytes_cnt
<< 1) ?
1256 rx_coal
->small_pkt_range_start
:
1257 rx_coal
->large_pkt_range_start
;
1258 timer
= range_start
+ ((rx_coal
->range_end
- range_start
) *
1259 mod_table
[index
].range_percent
/ 100);
1261 cq
->tobe_rx_coal_timeval
= (timer
+ cq
->tobe_rx_coal_timeval
) >> 1;
1263 pkt_size_counter
->large_pkt_bytes_cnt
= 0;
1264 pkt_size_counter
->small_pkt_bytes_cnt
= 0;
1267 #ifdef CONFIG_RFS_ACCEL
1268 static void enic_free_rx_cpu_rmap(struct enic
*enic
)
1270 free_irq_cpu_rmap(enic
->netdev
->rx_cpu_rmap
);
1271 enic
->netdev
->rx_cpu_rmap
= NULL
;
1274 static void enic_set_rx_cpu_rmap(struct enic
*enic
)
1278 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
) {
1279 enic
->netdev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(enic
->rq_count
);
1280 if (unlikely(!enic
->netdev
->rx_cpu_rmap
))
1282 for (i
= 0; i
< enic
->rq_count
; i
++) {
1283 res
= irq_cpu_rmap_add(enic
->netdev
->rx_cpu_rmap
,
1284 enic
->msix_entry
[i
].vector
);
1285 if (unlikely(res
)) {
1286 enic_free_rx_cpu_rmap(enic
);
1295 static void enic_free_rx_cpu_rmap(struct enic
*enic
)
1299 static void enic_set_rx_cpu_rmap(struct enic
*enic
)
1303 #endif /* CONFIG_RFS_ACCEL */
1305 #ifdef CONFIG_NET_RX_BUSY_POLL
1306 static int enic_busy_poll(struct napi_struct
*napi
)
1308 struct net_device
*netdev
= napi
->dev
;
1309 struct enic
*enic
= netdev_priv(netdev
);
1310 unsigned int rq
= (napi
- &enic
->napi
[0]);
1311 unsigned int cq
= enic_cq_rq(enic
, rq
);
1312 unsigned int intr
= enic_msix_rq_intr(enic
, rq
);
1313 unsigned int work_to_do
= -1; /* clean all pkts possible */
1314 unsigned int work_done
;
1316 if (!enic_poll_lock_poll(&enic
->rq
[rq
]))
1317 return LL_FLUSH_BUSY
;
1318 work_done
= vnic_cq_service(&enic
->cq
[cq
], work_to_do
,
1319 enic_rq_service
, NULL
);
1322 vnic_intr_return_credits(&enic
->intr
[intr
],
1324 vnic_rq_fill(&enic
->rq
[rq
], enic_rq_alloc_buf
);
1325 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1326 enic_calc_int_moderation(enic
, &enic
->rq
[rq
]);
1327 enic_poll_unlock_poll(&enic
->rq
[rq
]);
1331 #endif /* CONFIG_NET_RX_BUSY_POLL */
1333 static int enic_poll_msix_wq(struct napi_struct
*napi
, int budget
)
1335 struct net_device
*netdev
= napi
->dev
;
1336 struct enic
*enic
= netdev_priv(netdev
);
1337 unsigned int wq_index
= (napi
- &enic
->napi
[0]) - enic
->rq_count
;
1338 struct vnic_wq
*wq
= &enic
->wq
[wq_index
];
1341 unsigned int wq_work_to_do
= -1; /* clean all desc possible */
1342 unsigned int wq_work_done
;
1343 unsigned int wq_irq
;
1346 cq
= enic_cq_wq(enic
, wq_irq
);
1347 intr
= enic_msix_wq_intr(enic
, wq_irq
);
1348 wq_work_done
= vnic_cq_service(&enic
->cq
[cq
], wq_work_to_do
,
1349 enic_wq_service
, NULL
);
1351 vnic_intr_return_credits(&enic
->intr
[intr
], wq_work_done
,
1352 0 /* don't unmask intr */,
1353 1 /* reset intr timer */);
1354 if (!wq_work_done
) {
1355 napi_complete(napi
);
1356 vnic_intr_unmask(&enic
->intr
[intr
]);
1363 static int enic_poll_msix_rq(struct napi_struct
*napi
, int budget
)
1365 struct net_device
*netdev
= napi
->dev
;
1366 struct enic
*enic
= netdev_priv(netdev
);
1367 unsigned int rq
= (napi
- &enic
->napi
[0]);
1368 unsigned int cq
= enic_cq_rq(enic
, rq
);
1369 unsigned int intr
= enic_msix_rq_intr(enic
, rq
);
1370 unsigned int work_to_do
= budget
;
1371 unsigned int work_done
= 0;
1374 if (!enic_poll_lock_napi(&enic
->rq
[rq
]))
1380 work_done
= vnic_cq_service(&enic
->cq
[cq
],
1381 work_to_do
, enic_rq_service
, NULL
);
1383 /* Return intr event credits for this polling
1384 * cycle. An intr event is the completion of a
1389 vnic_intr_return_credits(&enic
->intr
[intr
],
1391 0 /* don't unmask intr */,
1392 0 /* don't reset intr timer */);
1394 err
= vnic_rq_fill(&enic
->rq
[rq
], enic_rq_alloc_buf
);
1396 /* Buffer allocation failed. Stay in polling mode
1397 * so we can try to fill the ring again.
1401 work_done
= work_to_do
;
1402 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1403 /* Call the function which refreshes
1404 * the intr coalescing timer value based on
1405 * the traffic. This is supported only in
1406 * the case of MSI-x mode
1408 enic_calc_int_moderation(enic
, &enic
->rq
[rq
]);
1410 if (work_done
< work_to_do
) {
1412 /* Some work done, but not enough to stay in polling,
1416 napi_complete(napi
);
1417 if (enic
->rx_coalesce_setting
.use_adaptive_rx_coalesce
)
1418 enic_set_int_moderation(enic
, &enic
->rq
[rq
]);
1419 vnic_intr_unmask(&enic
->intr
[intr
]);
1421 enic_poll_unlock_napi(&enic
->rq
[rq
]);
1426 static void enic_notify_timer(unsigned long data
)
1428 struct enic
*enic
= (struct enic
*)data
;
1430 enic_notify_check(enic
);
1432 mod_timer(&enic
->notify_timer
,
1433 round_jiffies(jiffies
+ ENIC_NOTIFY_TIMER_PERIOD
));
1436 static void enic_free_intr(struct enic
*enic
)
1438 struct net_device
*netdev
= enic
->netdev
;
1441 enic_free_rx_cpu_rmap(enic
);
1442 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1443 case VNIC_DEV_INTR_MODE_INTX
:
1444 free_irq(enic
->pdev
->irq
, netdev
);
1446 case VNIC_DEV_INTR_MODE_MSI
:
1447 free_irq(enic
->pdev
->irq
, enic
);
1449 case VNIC_DEV_INTR_MODE_MSIX
:
1450 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1451 if (enic
->msix
[i
].requested
)
1452 free_irq(enic
->msix_entry
[i
].vector
,
1453 enic
->msix
[i
].devid
);
1460 static int enic_request_intr(struct enic
*enic
)
1462 struct net_device
*netdev
= enic
->netdev
;
1463 unsigned int i
, intr
;
1466 enic_set_rx_cpu_rmap(enic
);
1467 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1469 case VNIC_DEV_INTR_MODE_INTX
:
1471 err
= request_irq(enic
->pdev
->irq
, enic_isr_legacy
,
1472 IRQF_SHARED
, netdev
->name
, netdev
);
1475 case VNIC_DEV_INTR_MODE_MSI
:
1477 err
= request_irq(enic
->pdev
->irq
, enic_isr_msi
,
1478 0, netdev
->name
, enic
);
1481 case VNIC_DEV_INTR_MODE_MSIX
:
1483 for (i
= 0; i
< enic
->rq_count
; i
++) {
1484 intr
= enic_msix_rq_intr(enic
, i
);
1485 snprintf(enic
->msix
[intr
].devname
,
1486 sizeof(enic
->msix
[intr
].devname
),
1487 "%.11s-rx-%d", netdev
->name
, i
);
1488 enic
->msix
[intr
].isr
= enic_isr_msix
;
1489 enic
->msix
[intr
].devid
= &enic
->napi
[i
];
1492 for (i
= 0; i
< enic
->wq_count
; i
++) {
1493 int wq
= enic_cq_wq(enic
, i
);
1495 intr
= enic_msix_wq_intr(enic
, i
);
1496 snprintf(enic
->msix
[intr
].devname
,
1497 sizeof(enic
->msix
[intr
].devname
),
1498 "%.11s-tx-%d", netdev
->name
, i
);
1499 enic
->msix
[intr
].isr
= enic_isr_msix
;
1500 enic
->msix
[intr
].devid
= &enic
->napi
[wq
];
1503 intr
= enic_msix_err_intr(enic
);
1504 snprintf(enic
->msix
[intr
].devname
,
1505 sizeof(enic
->msix
[intr
].devname
),
1506 "%.11s-err", netdev
->name
);
1507 enic
->msix
[intr
].isr
= enic_isr_msix_err
;
1508 enic
->msix
[intr
].devid
= enic
;
1510 intr
= enic_msix_notify_intr(enic
);
1511 snprintf(enic
->msix
[intr
].devname
,
1512 sizeof(enic
->msix
[intr
].devname
),
1513 "%.11s-notify", netdev
->name
);
1514 enic
->msix
[intr
].isr
= enic_isr_msix_notify
;
1515 enic
->msix
[intr
].devid
= enic
;
1517 for (i
= 0; i
< ARRAY_SIZE(enic
->msix
); i
++)
1518 enic
->msix
[i
].requested
= 0;
1520 for (i
= 0; i
< enic
->intr_count
; i
++) {
1521 err
= request_irq(enic
->msix_entry
[i
].vector
,
1522 enic
->msix
[i
].isr
, 0,
1523 enic
->msix
[i
].devname
,
1524 enic
->msix
[i
].devid
);
1526 enic_free_intr(enic
);
1529 enic
->msix
[i
].requested
= 1;
1541 static void enic_synchronize_irqs(struct enic
*enic
)
1545 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1546 case VNIC_DEV_INTR_MODE_INTX
:
1547 case VNIC_DEV_INTR_MODE_MSI
:
1548 synchronize_irq(enic
->pdev
->irq
);
1550 case VNIC_DEV_INTR_MODE_MSIX
:
1551 for (i
= 0; i
< enic
->intr_count
; i
++)
1552 synchronize_irq(enic
->msix_entry
[i
].vector
);
1559 static void enic_set_rx_coal_setting(struct enic
*enic
)
1563 struct enic_rx_coal
*rx_coal
= &enic
->rx_coalesce_setting
;
1565 /* If intr mode is not MSIX, do not do adaptive coalescing */
1566 if (VNIC_DEV_INTR_MODE_MSIX
!= vnic_dev_get_intr_mode(enic
->vdev
)) {
1567 netdev_info(enic
->netdev
, "INTR mode is not MSIX, Not initializing adaptive coalescing");
1571 /* 1. Read the link speed from fw
1572 * 2. Pick the default range for the speed
1573 * 3. Update it in enic->rx_coalesce_setting
1575 speed
= vnic_dev_port_speed(enic
->vdev
);
1576 if (ENIC_LINK_SPEED_10G
< speed
)
1577 index
= ENIC_LINK_40G_INDEX
;
1578 else if (ENIC_LINK_SPEED_4G
< speed
)
1579 index
= ENIC_LINK_10G_INDEX
;
1581 index
= ENIC_LINK_4G_INDEX
;
1583 rx_coal
->small_pkt_range_start
= mod_range
[index
].small_pkt_range_start
;
1584 rx_coal
->large_pkt_range_start
= mod_range
[index
].large_pkt_range_start
;
1585 rx_coal
->range_end
= ENIC_RX_COALESCE_RANGE_END
;
1587 /* Start with the value provided by UCSM */
1588 for (index
= 0; index
< enic
->rq_count
; index
++)
1589 enic
->cq
[index
].cur_rx_coal_timeval
=
1590 enic
->config
.intr_timer_usec
;
1592 rx_coal
->use_adaptive_rx_coalesce
= 1;
1595 static int enic_dev_notify_set(struct enic
*enic
)
1599 spin_lock_bh(&enic
->devcmd_lock
);
1600 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1601 case VNIC_DEV_INTR_MODE_INTX
:
1602 err
= vnic_dev_notify_set(enic
->vdev
,
1603 enic_legacy_notify_intr());
1605 case VNIC_DEV_INTR_MODE_MSIX
:
1606 err
= vnic_dev_notify_set(enic
->vdev
,
1607 enic_msix_notify_intr(enic
));
1610 err
= vnic_dev_notify_set(enic
->vdev
, -1 /* no intr */);
1613 spin_unlock_bh(&enic
->devcmd_lock
);
1618 static void enic_notify_timer_start(struct enic
*enic
)
1620 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
1621 case VNIC_DEV_INTR_MODE_MSI
:
1622 mod_timer(&enic
->notify_timer
, jiffies
);
1625 /* Using intr for notification for INTx/MSI-X */
1630 /* rtnl lock is held, process context */
1631 static int enic_open(struct net_device
*netdev
)
1633 struct enic
*enic
= netdev_priv(netdev
);
1637 err
= enic_request_intr(enic
);
1639 netdev_err(netdev
, "Unable to request irq.\n");
1643 err
= enic_dev_notify_set(enic
);
1646 "Failed to alloc notify buffer, aborting.\n");
1647 goto err_out_free_intr
;
1650 for (i
= 0; i
< enic
->rq_count
; i
++) {
1651 vnic_rq_fill(&enic
->rq
[i
], enic_rq_alloc_buf
);
1652 /* Need at least one buffer on ring to get going */
1653 if (vnic_rq_desc_used(&enic
->rq
[i
]) == 0) {
1654 netdev_err(netdev
, "Unable to alloc receive buffers\n");
1656 goto err_out_free_rq
;
1660 for (i
= 0; i
< enic
->wq_count
; i
++)
1661 vnic_wq_enable(&enic
->wq
[i
]);
1662 for (i
= 0; i
< enic
->rq_count
; i
++)
1663 vnic_rq_enable(&enic
->rq
[i
]);
1665 if (!enic_is_dynamic(enic
) && !enic_is_sriov_vf(enic
))
1666 enic_dev_add_station_addr(enic
);
1668 enic_set_rx_mode(netdev
);
1670 netif_tx_wake_all_queues(netdev
);
1672 for (i
= 0; i
< enic
->rq_count
; i
++) {
1673 enic_busy_poll_init_lock(&enic
->rq
[i
]);
1674 napi_enable(&enic
->napi
[i
]);
1676 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
)
1677 for (i
= 0; i
< enic
->wq_count
; i
++)
1678 napi_enable(&enic
->napi
[enic_cq_wq(enic
, i
)]);
1679 enic_dev_enable(enic
);
1681 for (i
= 0; i
< enic
->intr_count
; i
++)
1682 vnic_intr_unmask(&enic
->intr
[i
]);
1684 enic_notify_timer_start(enic
);
1685 enic_rfs_flw_tbl_init(enic
);
1690 for (i
= 0; i
< enic
->rq_count
; i
++)
1691 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1692 enic_dev_notify_unset(enic
);
1694 enic_free_intr(enic
);
1699 /* rtnl lock is held, process context */
1700 static int enic_stop(struct net_device
*netdev
)
1702 struct enic
*enic
= netdev_priv(netdev
);
1706 for (i
= 0; i
< enic
->intr_count
; i
++) {
1707 vnic_intr_mask(&enic
->intr
[i
]);
1708 (void)vnic_intr_masked(&enic
->intr
[i
]); /* flush write */
1711 enic_synchronize_irqs(enic
);
1713 del_timer_sync(&enic
->notify_timer
);
1714 enic_rfs_flw_tbl_free(enic
);
1716 enic_dev_disable(enic
);
1718 for (i
= 0; i
< enic
->rq_count
; i
++) {
1719 napi_disable(&enic
->napi
[i
]);
1721 while (!enic_poll_lock_napi(&enic
->rq
[i
]))
1726 netif_carrier_off(netdev
);
1727 netif_tx_disable(netdev
);
1728 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
)
1729 for (i
= 0; i
< enic
->wq_count
; i
++)
1730 napi_disable(&enic
->napi
[enic_cq_wq(enic
, i
)]);
1732 if (!enic_is_dynamic(enic
) && !enic_is_sriov_vf(enic
))
1733 enic_dev_del_station_addr(enic
);
1735 for (i
= 0; i
< enic
->wq_count
; i
++) {
1736 err
= vnic_wq_disable(&enic
->wq
[i
]);
1740 for (i
= 0; i
< enic
->rq_count
; i
++) {
1741 err
= vnic_rq_disable(&enic
->rq
[i
]);
1746 enic_dev_notify_unset(enic
);
1747 enic_free_intr(enic
);
1749 for (i
= 0; i
< enic
->wq_count
; i
++)
1750 vnic_wq_clean(&enic
->wq
[i
], enic_free_wq_buf
);
1751 for (i
= 0; i
< enic
->rq_count
; i
++)
1752 vnic_rq_clean(&enic
->rq
[i
], enic_free_rq_buf
);
1753 for (i
= 0; i
< enic
->cq_count
; i
++)
1754 vnic_cq_clean(&enic
->cq
[i
]);
1755 for (i
= 0; i
< enic
->intr_count
; i
++)
1756 vnic_intr_clean(&enic
->intr
[i
]);
1761 static int enic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1763 struct enic
*enic
= netdev_priv(netdev
);
1764 int running
= netif_running(netdev
);
1766 if (new_mtu
< ENIC_MIN_MTU
|| new_mtu
> ENIC_MAX_MTU
)
1769 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
))
1775 netdev
->mtu
= new_mtu
;
1777 if (netdev
->mtu
> enic
->port_mtu
)
1779 "interface MTU (%d) set higher than port MTU (%d)\n",
1780 netdev
->mtu
, enic
->port_mtu
);
1788 static void enic_change_mtu_work(struct work_struct
*work
)
1790 struct enic
*enic
= container_of(work
, struct enic
, change_mtu_work
);
1791 struct net_device
*netdev
= enic
->netdev
;
1792 int new_mtu
= vnic_dev_mtu(enic
->vdev
);
1796 new_mtu
= max_t(int, ENIC_MIN_MTU
, min_t(int, ENIC_MAX_MTU
, new_mtu
));
1801 del_timer_sync(&enic
->notify_timer
);
1803 for (i
= 0; i
< enic
->rq_count
; i
++)
1804 napi_disable(&enic
->napi
[i
]);
1806 vnic_intr_mask(&enic
->intr
[0]);
1807 enic_synchronize_irqs(enic
);
1808 err
= vnic_rq_disable(&enic
->rq
[0]);
1811 netdev_err(netdev
, "Unable to disable RQ.\n");
1814 vnic_rq_clean(&enic
->rq
[0], enic_free_rq_buf
);
1815 vnic_cq_clean(&enic
->cq
[0]);
1816 vnic_intr_clean(&enic
->intr
[0]);
1818 /* Fill RQ with new_mtu-sized buffers */
1819 netdev
->mtu
= new_mtu
;
1820 vnic_rq_fill(&enic
->rq
[0], enic_rq_alloc_buf
);
1821 /* Need at least one buffer on ring to get going */
1822 if (vnic_rq_desc_used(&enic
->rq
[0]) == 0) {
1824 netdev_err(netdev
, "Unable to alloc receive buffers.\n");
1829 vnic_rq_enable(&enic
->rq
[0]);
1830 napi_enable(&enic
->napi
[0]);
1831 vnic_intr_unmask(&enic
->intr
[0]);
1832 enic_notify_timer_start(enic
);
1836 netdev_info(netdev
, "interface MTU set as %d\n", netdev
->mtu
);
1839 #ifdef CONFIG_NET_POLL_CONTROLLER
1840 static void enic_poll_controller(struct net_device
*netdev
)
1842 struct enic
*enic
= netdev_priv(netdev
);
1843 struct vnic_dev
*vdev
= enic
->vdev
;
1844 unsigned int i
, intr
;
1846 switch (vnic_dev_get_intr_mode(vdev
)) {
1847 case VNIC_DEV_INTR_MODE_MSIX
:
1848 for (i
= 0; i
< enic
->rq_count
; i
++) {
1849 intr
= enic_msix_rq_intr(enic
, i
);
1850 enic_isr_msix(enic
->msix_entry
[intr
].vector
,
1854 for (i
= 0; i
< enic
->wq_count
; i
++) {
1855 intr
= enic_msix_wq_intr(enic
, i
);
1856 enic_isr_msix(enic
->msix_entry
[intr
].vector
,
1857 &enic
->napi
[enic_cq_wq(enic
, i
)]);
1861 case VNIC_DEV_INTR_MODE_MSI
:
1862 enic_isr_msi(enic
->pdev
->irq
, enic
);
1864 case VNIC_DEV_INTR_MODE_INTX
:
1865 enic_isr_legacy(enic
->pdev
->irq
, netdev
);
1873 static int enic_dev_wait(struct vnic_dev
*vdev
,
1874 int (*start
)(struct vnic_dev
*, int),
1875 int (*finished
)(struct vnic_dev
*, int *),
1882 BUG_ON(in_interrupt());
1884 err
= start(vdev
, arg
);
1888 /* Wait for func to complete...2 seconds max
1891 time
= jiffies
+ (HZ
* 2);
1894 err
= finished(vdev
, &done
);
1901 schedule_timeout_uninterruptible(HZ
/ 10);
1903 } while (time_after(time
, jiffies
));
1908 static int enic_dev_open(struct enic
*enic
)
1912 err
= enic_dev_wait(enic
->vdev
, vnic_dev_open
,
1913 vnic_dev_open_done
, 0);
1915 dev_err(enic_get_dev(enic
), "vNIC device open failed, err %d\n",
1921 static int enic_dev_hang_reset(struct enic
*enic
)
1925 err
= enic_dev_wait(enic
->vdev
, vnic_dev_hang_reset
,
1926 vnic_dev_hang_reset_done
, 0);
1928 netdev_err(enic
->netdev
, "vNIC hang reset failed, err %d\n",
1934 int __enic_set_rsskey(struct enic
*enic
)
1936 union vnic_rss_key
*rss_key_buf_va
;
1937 dma_addr_t rss_key_buf_pa
;
1938 int i
, kidx
, bidx
, err
;
1940 rss_key_buf_va
= pci_zalloc_consistent(enic
->pdev
,
1941 sizeof(union vnic_rss_key
),
1943 if (!rss_key_buf_va
)
1946 for (i
= 0; i
< ENIC_RSS_LEN
; i
++) {
1947 kidx
= i
/ ENIC_RSS_BYTES_PER_KEY
;
1948 bidx
= i
% ENIC_RSS_BYTES_PER_KEY
;
1949 rss_key_buf_va
->key
[kidx
].b
[bidx
] = enic
->rss_key
[i
];
1951 spin_lock_bh(&enic
->devcmd_lock
);
1952 err
= enic_set_rss_key(enic
,
1954 sizeof(union vnic_rss_key
));
1955 spin_unlock_bh(&enic
->devcmd_lock
);
1957 pci_free_consistent(enic
->pdev
, sizeof(union vnic_rss_key
),
1958 rss_key_buf_va
, rss_key_buf_pa
);
1963 static int enic_set_rsskey(struct enic
*enic
)
1965 netdev_rss_key_fill(enic
->rss_key
, ENIC_RSS_LEN
);
1967 return __enic_set_rsskey(enic
);
1970 static int enic_set_rsscpu(struct enic
*enic
, u8 rss_hash_bits
)
1972 dma_addr_t rss_cpu_buf_pa
;
1973 union vnic_rss_cpu
*rss_cpu_buf_va
= NULL
;
1977 rss_cpu_buf_va
= pci_alloc_consistent(enic
->pdev
,
1978 sizeof(union vnic_rss_cpu
), &rss_cpu_buf_pa
);
1979 if (!rss_cpu_buf_va
)
1982 for (i
= 0; i
< (1 << rss_hash_bits
); i
++)
1983 (*rss_cpu_buf_va
).cpu
[i
/4].b
[i
%4] = i
% enic
->rq_count
;
1985 spin_lock_bh(&enic
->devcmd_lock
);
1986 err
= enic_set_rss_cpu(enic
,
1988 sizeof(union vnic_rss_cpu
));
1989 spin_unlock_bh(&enic
->devcmd_lock
);
1991 pci_free_consistent(enic
->pdev
, sizeof(union vnic_rss_cpu
),
1992 rss_cpu_buf_va
, rss_cpu_buf_pa
);
1997 static int enic_set_niccfg(struct enic
*enic
, u8 rss_default_cpu
,
1998 u8 rss_hash_type
, u8 rss_hash_bits
, u8 rss_base_cpu
, u8 rss_enable
)
2000 const u8 tso_ipid_split_en
= 0;
2001 const u8 ig_vlan_strip_en
= 1;
2004 /* Enable VLAN tag stripping.
2007 spin_lock_bh(&enic
->devcmd_lock
);
2008 err
= enic_set_nic_cfg(enic
,
2009 rss_default_cpu
, rss_hash_type
,
2010 rss_hash_bits
, rss_base_cpu
,
2011 rss_enable
, tso_ipid_split_en
,
2013 spin_unlock_bh(&enic
->devcmd_lock
);
2018 static int enic_set_rss_nic_cfg(struct enic
*enic
)
2020 struct device
*dev
= enic_get_dev(enic
);
2021 const u8 rss_default_cpu
= 0;
2022 const u8 rss_hash_type
= NIC_CFG_RSS_HASH_TYPE_IPV4
|
2023 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4
|
2024 NIC_CFG_RSS_HASH_TYPE_IPV6
|
2025 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6
;
2026 const u8 rss_hash_bits
= 7;
2027 const u8 rss_base_cpu
= 0;
2028 u8 rss_enable
= ENIC_SETTING(enic
, RSS
) && (enic
->rq_count
> 1);
2031 if (!enic_set_rsskey(enic
)) {
2032 if (enic_set_rsscpu(enic
, rss_hash_bits
)) {
2034 dev_warn(dev
, "RSS disabled, "
2035 "Failed to set RSS cpu indirection table.");
2039 dev_warn(dev
, "RSS disabled, Failed to set RSS key.\n");
2043 return enic_set_niccfg(enic
, rss_default_cpu
, rss_hash_type
,
2044 rss_hash_bits
, rss_base_cpu
, rss_enable
);
2047 static void enic_reset(struct work_struct
*work
)
2049 struct enic
*enic
= container_of(work
, struct enic
, reset
);
2051 if (!netif_running(enic
->netdev
))
2056 spin_lock(&enic
->enic_api_lock
);
2057 enic_dev_hang_notify(enic
);
2058 enic_stop(enic
->netdev
);
2059 enic_dev_hang_reset(enic
);
2060 enic_reset_addr_lists(enic
);
2061 enic_init_vnic_resources(enic
);
2062 enic_set_rss_nic_cfg(enic
);
2063 enic_dev_set_ig_vlan_rewrite_mode(enic
);
2064 enic_open(enic
->netdev
);
2065 spin_unlock(&enic
->enic_api_lock
);
2066 call_netdevice_notifiers(NETDEV_REBOOT
, enic
->netdev
);
2071 static int enic_set_intr_mode(struct enic
*enic
)
2073 unsigned int n
= min_t(unsigned int, enic
->rq_count
, ENIC_RQ_MAX
);
2074 unsigned int m
= min_t(unsigned int, enic
->wq_count
, ENIC_WQ_MAX
);
2077 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2078 * on system capabilities.
2082 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2083 * (the second to last INTR is used for WQ/RQ errors)
2084 * (the last INTR is used for notifications)
2087 BUG_ON(ARRAY_SIZE(enic
->msix_entry
) < n
+ m
+ 2);
2088 for (i
= 0; i
< n
+ m
+ 2; i
++)
2089 enic
->msix_entry
[i
].entry
= i
;
2091 /* Use multiple RQs if RSS is enabled
2094 if (ENIC_SETTING(enic
, RSS
) &&
2095 enic
->config
.intr_mode
< 1 &&
2096 enic
->rq_count
>= n
&&
2097 enic
->wq_count
>= m
&&
2098 enic
->cq_count
>= n
+ m
&&
2099 enic
->intr_count
>= n
+ m
+ 2) {
2101 if (pci_enable_msix_range(enic
->pdev
, enic
->msix_entry
,
2102 n
+ m
+ 2, n
+ m
+ 2) > 0) {
2106 enic
->cq_count
= n
+ m
;
2107 enic
->intr_count
= n
+ m
+ 2;
2109 vnic_dev_set_intr_mode(enic
->vdev
,
2110 VNIC_DEV_INTR_MODE_MSIX
);
2116 if (enic
->config
.intr_mode
< 1 &&
2117 enic
->rq_count
>= 1 &&
2118 enic
->wq_count
>= m
&&
2119 enic
->cq_count
>= 1 + m
&&
2120 enic
->intr_count
>= 1 + m
+ 2) {
2121 if (pci_enable_msix_range(enic
->pdev
, enic
->msix_entry
,
2122 1 + m
+ 2, 1 + m
+ 2) > 0) {
2126 enic
->cq_count
= 1 + m
;
2127 enic
->intr_count
= 1 + m
+ 2;
2129 vnic_dev_set_intr_mode(enic
->vdev
,
2130 VNIC_DEV_INTR_MODE_MSIX
);
2138 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2141 if (enic
->config
.intr_mode
< 2 &&
2142 enic
->rq_count
>= 1 &&
2143 enic
->wq_count
>= 1 &&
2144 enic
->cq_count
>= 2 &&
2145 enic
->intr_count
>= 1 &&
2146 !pci_enable_msi(enic
->pdev
)) {
2151 enic
->intr_count
= 1;
2153 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_MSI
);
2160 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2161 * (the first INTR is used for WQ/RQ)
2162 * (the second INTR is used for WQ/RQ errors)
2163 * (the last INTR is used for notifications)
2166 if (enic
->config
.intr_mode
< 3 &&
2167 enic
->rq_count
>= 1 &&
2168 enic
->wq_count
>= 1 &&
2169 enic
->cq_count
>= 2 &&
2170 enic
->intr_count
>= 3) {
2175 enic
->intr_count
= 3;
2177 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_INTX
);
2182 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2187 static void enic_clear_intr_mode(struct enic
*enic
)
2189 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2190 case VNIC_DEV_INTR_MODE_MSIX
:
2191 pci_disable_msix(enic
->pdev
);
2193 case VNIC_DEV_INTR_MODE_MSI
:
2194 pci_disable_msi(enic
->pdev
);
2200 vnic_dev_set_intr_mode(enic
->vdev
, VNIC_DEV_INTR_MODE_UNKNOWN
);
2203 static const struct net_device_ops enic_netdev_dynamic_ops
= {
2204 .ndo_open
= enic_open
,
2205 .ndo_stop
= enic_stop
,
2206 .ndo_start_xmit
= enic_hard_start_xmit
,
2207 .ndo_get_stats64
= enic_get_stats
,
2208 .ndo_validate_addr
= eth_validate_addr
,
2209 .ndo_set_rx_mode
= enic_set_rx_mode
,
2210 .ndo_set_mac_address
= enic_set_mac_address_dynamic
,
2211 .ndo_change_mtu
= enic_change_mtu
,
2212 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2213 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2214 .ndo_tx_timeout
= enic_tx_timeout
,
2215 .ndo_set_vf_port
= enic_set_vf_port
,
2216 .ndo_get_vf_port
= enic_get_vf_port
,
2217 .ndo_set_vf_mac
= enic_set_vf_mac
,
2218 #ifdef CONFIG_NET_POLL_CONTROLLER
2219 .ndo_poll_controller
= enic_poll_controller
,
2221 #ifdef CONFIG_RFS_ACCEL
2222 .ndo_rx_flow_steer
= enic_rx_flow_steer
,
2224 #ifdef CONFIG_NET_RX_BUSY_POLL
2225 .ndo_busy_poll
= enic_busy_poll
,
2229 static const struct net_device_ops enic_netdev_ops
= {
2230 .ndo_open
= enic_open
,
2231 .ndo_stop
= enic_stop
,
2232 .ndo_start_xmit
= enic_hard_start_xmit
,
2233 .ndo_get_stats64
= enic_get_stats
,
2234 .ndo_validate_addr
= eth_validate_addr
,
2235 .ndo_set_mac_address
= enic_set_mac_address
,
2236 .ndo_set_rx_mode
= enic_set_rx_mode
,
2237 .ndo_change_mtu
= enic_change_mtu
,
2238 .ndo_vlan_rx_add_vid
= enic_vlan_rx_add_vid
,
2239 .ndo_vlan_rx_kill_vid
= enic_vlan_rx_kill_vid
,
2240 .ndo_tx_timeout
= enic_tx_timeout
,
2241 .ndo_set_vf_port
= enic_set_vf_port
,
2242 .ndo_get_vf_port
= enic_get_vf_port
,
2243 .ndo_set_vf_mac
= enic_set_vf_mac
,
2244 #ifdef CONFIG_NET_POLL_CONTROLLER
2245 .ndo_poll_controller
= enic_poll_controller
,
2247 #ifdef CONFIG_RFS_ACCEL
2248 .ndo_rx_flow_steer
= enic_rx_flow_steer
,
2250 #ifdef CONFIG_NET_RX_BUSY_POLL
2251 .ndo_busy_poll
= enic_busy_poll
,
2255 static void enic_dev_deinit(struct enic
*enic
)
2259 for (i
= 0; i
< enic
->rq_count
; i
++) {
2260 napi_hash_del(&enic
->napi
[i
]);
2261 netif_napi_del(&enic
->napi
[i
]);
2263 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
)
2264 for (i
= 0; i
< enic
->wq_count
; i
++)
2265 netif_napi_del(&enic
->napi
[enic_cq_wq(enic
, i
)]);
2267 enic_free_vnic_resources(enic
);
2268 enic_clear_intr_mode(enic
);
2271 static void enic_kdump_kernel_config(struct enic
*enic
)
2273 if (is_kdump_kernel()) {
2274 dev_info(enic_get_dev(enic
), "Running from within kdump kernel. Using minimal resources\n");
2277 enic
->config
.rq_desc_count
= ENIC_MIN_RQ_DESCS
;
2278 enic
->config
.wq_desc_count
= ENIC_MIN_WQ_DESCS
;
2279 enic
->config
.mtu
= min_t(u16
, 1500, enic
->config
.mtu
);
2283 static int enic_dev_init(struct enic
*enic
)
2285 struct device
*dev
= enic_get_dev(enic
);
2286 struct net_device
*netdev
= enic
->netdev
;
2290 /* Get interrupt coalesce timer info */
2291 err
= enic_dev_intr_coal_timer_info(enic
);
2293 dev_warn(dev
, "Using default conversion factor for "
2294 "interrupt coalesce timer\n");
2295 vnic_dev_intr_coal_timer_info_default(enic
->vdev
);
2298 /* Get vNIC configuration
2301 err
= enic_get_vnic_config(enic
);
2303 dev_err(dev
, "Get vNIC configuration failed, aborting\n");
2307 /* Get available resource counts
2310 enic_get_res_counts(enic
);
2312 /* modify resource count if we are in kdump_kernel
2314 enic_kdump_kernel_config(enic
);
2316 /* Set interrupt mode based on resource counts and system
2320 err
= enic_set_intr_mode(enic
);
2322 dev_err(dev
, "Failed to set intr mode based on resource "
2323 "counts and system capabilities, aborting\n");
2327 /* Allocate and configure vNIC resources
2330 err
= enic_alloc_vnic_resources(enic
);
2332 dev_err(dev
, "Failed to alloc vNIC resources, aborting\n");
2333 goto err_out_free_vnic_resources
;
2336 enic_init_vnic_resources(enic
);
2338 err
= enic_set_rss_nic_cfg(enic
);
2340 dev_err(dev
, "Failed to config nic, aborting\n");
2341 goto err_out_free_vnic_resources
;
2344 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
2346 netif_napi_add(netdev
, &enic
->napi
[0], enic_poll
, 64);
2347 napi_hash_add(&enic
->napi
[0]);
2349 case VNIC_DEV_INTR_MODE_MSIX
:
2350 for (i
= 0; i
< enic
->rq_count
; i
++) {
2351 netif_napi_add(netdev
, &enic
->napi
[i
],
2352 enic_poll_msix_rq
, NAPI_POLL_WEIGHT
);
2353 napi_hash_add(&enic
->napi
[i
]);
2355 for (i
= 0; i
< enic
->wq_count
; i
++)
2356 netif_napi_add(netdev
, &enic
->napi
[enic_cq_wq(enic
, i
)],
2357 enic_poll_msix_wq
, NAPI_POLL_WEIGHT
);
2363 err_out_free_vnic_resources
:
2364 enic_clear_intr_mode(enic
);
2365 enic_free_vnic_resources(enic
);
2370 static void enic_iounmap(struct enic
*enic
)
2374 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++)
2375 if (enic
->bar
[i
].vaddr
)
2376 iounmap(enic
->bar
[i
].vaddr
);
2379 static int enic_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2381 struct device
*dev
= &pdev
->dev
;
2382 struct net_device
*netdev
;
2387 #ifdef CONFIG_PCI_IOV
2392 /* Allocate net device structure and initialize. Private
2393 * instance data is initialized to zero.
2396 netdev
= alloc_etherdev_mqs(sizeof(struct enic
),
2397 ENIC_RQ_MAX
, ENIC_WQ_MAX
);
2401 pci_set_drvdata(pdev
, netdev
);
2403 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2405 enic
= netdev_priv(netdev
);
2406 enic
->netdev
= netdev
;
2409 /* Setup PCI resources
2412 err
= pci_enable_device_mem(pdev
);
2414 dev_err(dev
, "Cannot enable PCI device, aborting\n");
2415 goto err_out_free_netdev
;
2418 err
= pci_request_regions(pdev
, DRV_NAME
);
2420 dev_err(dev
, "Cannot request PCI regions, aborting\n");
2421 goto err_out_disable_device
;
2424 pci_set_master(pdev
);
2426 /* Query PCI controller on system for DMA addressing
2427 * limitation for the device. Try 64-bit first, and
2431 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2433 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2435 dev_err(dev
, "No usable DMA configuration, aborting\n");
2436 goto err_out_release_regions
;
2438 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
2440 dev_err(dev
, "Unable to obtain %u-bit DMA "
2441 "for consistent allocations, aborting\n", 32);
2442 goto err_out_release_regions
;
2445 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2447 dev_err(dev
, "Unable to obtain %u-bit DMA "
2448 "for consistent allocations, aborting\n", 64);
2449 goto err_out_release_regions
;
2454 /* Map vNIC resources from BAR0-5
2457 for (i
= 0; i
< ARRAY_SIZE(enic
->bar
); i
++) {
2458 if (!(pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
))
2460 enic
->bar
[i
].len
= pci_resource_len(pdev
, i
);
2461 enic
->bar
[i
].vaddr
= pci_iomap(pdev
, i
, enic
->bar
[i
].len
);
2462 if (!enic
->bar
[i
].vaddr
) {
2463 dev_err(dev
, "Cannot memory-map BAR %d, aborting\n", i
);
2465 goto err_out_iounmap
;
2467 enic
->bar
[i
].bus_addr
= pci_resource_start(pdev
, i
);
2470 /* Register vNIC device
2473 enic
->vdev
= vnic_dev_register(NULL
, enic
, pdev
, enic
->bar
,
2474 ARRAY_SIZE(enic
->bar
));
2476 dev_err(dev
, "vNIC registration failed, aborting\n");
2478 goto err_out_iounmap
;
2481 #ifdef CONFIG_PCI_IOV
2482 /* Get number of subvnics */
2483 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_SRIOV
);
2485 pci_read_config_word(pdev
, pos
+ PCI_SRIOV_TOTAL_VF
,
2487 if (enic
->num_vfs
) {
2488 err
= pci_enable_sriov(pdev
, enic
->num_vfs
);
2490 dev_err(dev
, "SRIOV enable failed, aborting."
2491 " pci_enable_sriov() returned %d\n",
2493 goto err_out_vnic_unregister
;
2495 enic
->priv_flags
|= ENIC_SRIOV_ENABLED
;
2496 num_pps
= enic
->num_vfs
;
2501 /* Allocate structure for port profiles */
2502 enic
->pp
= kcalloc(num_pps
, sizeof(*enic
->pp
), GFP_KERNEL
);
2505 goto err_out_disable_sriov_pp
;
2508 /* Issue device open to get device in known state
2511 err
= enic_dev_open(enic
);
2513 dev_err(dev
, "vNIC dev open failed, aborting\n");
2514 goto err_out_disable_sriov
;
2517 /* Setup devcmd lock
2520 spin_lock_init(&enic
->devcmd_lock
);
2521 spin_lock_init(&enic
->enic_api_lock
);
2524 * Set ingress vlan rewrite mode before vnic initialization
2527 err
= enic_dev_set_ig_vlan_rewrite_mode(enic
);
2530 "Failed to set ingress vlan rewrite mode, aborting.\n");
2531 goto err_out_dev_close
;
2534 /* Issue device init to initialize the vnic-to-switch link.
2535 * We'll start with carrier off and wait for link UP
2536 * notification later to turn on carrier. We don't need
2537 * to wait here for the vnic-to-switch link initialization
2538 * to complete; link UP notification is the indication that
2539 * the process is complete.
2542 netif_carrier_off(netdev
);
2544 /* Do not call dev_init for a dynamic vnic.
2545 * For a dynamic vnic, init_prov_info will be
2546 * called later by an upper layer.
2549 if (!enic_is_dynamic(enic
)) {
2550 err
= vnic_dev_init(enic
->vdev
, 0);
2552 dev_err(dev
, "vNIC dev init failed, aborting\n");
2553 goto err_out_dev_close
;
2557 err
= enic_dev_init(enic
);
2559 dev_err(dev
, "Device initialization failed, aborting\n");
2560 goto err_out_dev_close
;
2563 netif_set_real_num_tx_queues(netdev
, enic
->wq_count
);
2564 netif_set_real_num_rx_queues(netdev
, enic
->rq_count
);
2566 /* Setup notification timer, HW reset task, and wq locks
2569 init_timer(&enic
->notify_timer
);
2570 enic
->notify_timer
.function
= enic_notify_timer
;
2571 enic
->notify_timer
.data
= (unsigned long)enic
;
2573 enic_set_rx_coal_setting(enic
);
2574 INIT_WORK(&enic
->reset
, enic_reset
);
2575 INIT_WORK(&enic
->change_mtu_work
, enic_change_mtu_work
);
2577 for (i
= 0; i
< enic
->wq_count
; i
++)
2578 spin_lock_init(&enic
->wq_lock
[i
]);
2580 /* Register net device
2583 enic
->port_mtu
= enic
->config
.mtu
;
2584 (void)enic_change_mtu(netdev
, enic
->port_mtu
);
2586 err
= enic_set_mac_addr(netdev
, enic
->mac_addr
);
2588 dev_err(dev
, "Invalid MAC address, aborting\n");
2589 goto err_out_dev_deinit
;
2592 enic
->tx_coalesce_usecs
= enic
->config
.intr_timer_usec
;
2593 /* rx coalesce time already got initialized. This gets used
2594 * if adaptive coal is turned off
2596 enic
->rx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
2598 if (enic_is_dynamic(enic
) || enic_is_sriov_vf(enic
))
2599 netdev
->netdev_ops
= &enic_netdev_dynamic_ops
;
2601 netdev
->netdev_ops
= &enic_netdev_ops
;
2603 netdev
->watchdog_timeo
= 2 * HZ
;
2604 enic_set_ethtool_ops(netdev
);
2606 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
2607 if (ENIC_SETTING(enic
, LOOP
)) {
2608 netdev
->features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
2609 enic
->loop_enable
= 1;
2610 enic
->loop_tag
= enic
->config
.loop_tag
;
2611 dev_info(dev
, "loopback tag=0x%04x\n", enic
->loop_tag
);
2613 if (ENIC_SETTING(enic
, TXCSUM
))
2614 netdev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
2615 if (ENIC_SETTING(enic
, TSO
))
2616 netdev
->hw_features
|= NETIF_F_TSO
|
2617 NETIF_F_TSO6
| NETIF_F_TSO_ECN
;
2618 if (ENIC_SETTING(enic
, RSS
))
2619 netdev
->hw_features
|= NETIF_F_RXHASH
;
2620 if (ENIC_SETTING(enic
, RXCSUM
))
2621 netdev
->hw_features
|= NETIF_F_RXCSUM
;
2623 netdev
->features
|= netdev
->hw_features
;
2625 #ifdef CONFIG_RFS_ACCEL
2626 netdev
->hw_features
|= NETIF_F_NTUPLE
;
2630 netdev
->features
|= NETIF_F_HIGHDMA
;
2632 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2634 err
= register_netdev(netdev
);
2636 dev_err(dev
, "Cannot register net device, aborting\n");
2637 goto err_out_dev_deinit
;
2639 enic
->rx_copybreak
= RX_COPYBREAK_DEFAULT
;
2644 enic_dev_deinit(enic
);
2646 vnic_dev_close(enic
->vdev
);
2647 err_out_disable_sriov
:
2649 err_out_disable_sriov_pp
:
2650 #ifdef CONFIG_PCI_IOV
2651 if (enic_sriov_enabled(enic
)) {
2652 pci_disable_sriov(pdev
);
2653 enic
->priv_flags
&= ~ENIC_SRIOV_ENABLED
;
2655 err_out_vnic_unregister
:
2657 vnic_dev_unregister(enic
->vdev
);
2660 err_out_release_regions
:
2661 pci_release_regions(pdev
);
2662 err_out_disable_device
:
2663 pci_disable_device(pdev
);
2664 err_out_free_netdev
:
2665 free_netdev(netdev
);
2670 static void enic_remove(struct pci_dev
*pdev
)
2672 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2675 struct enic
*enic
= netdev_priv(netdev
);
2677 cancel_work_sync(&enic
->reset
);
2678 cancel_work_sync(&enic
->change_mtu_work
);
2679 unregister_netdev(netdev
);
2680 enic_dev_deinit(enic
);
2681 vnic_dev_close(enic
->vdev
);
2682 #ifdef CONFIG_PCI_IOV
2683 if (enic_sriov_enabled(enic
)) {
2684 pci_disable_sriov(pdev
);
2685 enic
->priv_flags
&= ~ENIC_SRIOV_ENABLED
;
2689 vnic_dev_unregister(enic
->vdev
);
2691 pci_release_regions(pdev
);
2692 pci_disable_device(pdev
);
2693 free_netdev(netdev
);
2697 static struct pci_driver enic_driver
= {
2699 .id_table
= enic_id_table
,
2700 .probe
= enic_probe
,
2701 .remove
= enic_remove
,
2704 static int __init
enic_init_module(void)
2706 pr_info("%s, ver %s\n", DRV_DESCRIPTION
, DRV_VERSION
);
2708 return pci_register_driver(&enic_driver
);
2711 static void __exit
enic_cleanup_module(void)
2713 pci_unregister_driver(&enic_driver
);
2716 module_init(enic_init_module
);
2717 module_exit(enic_cleanup_module
);