1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
28 #include "i40e_prototype.h"
29 #include "i40evf_client.h"
30 /* All i40evf tracepoints are defined by the include below, which must
31 * be included exactly once across the whole kernel with
32 * CREATE_TRACE_POINTS defined
34 #define CREATE_TRACE_POINTS
35 #include "i40e_trace.h"
37 static int i40evf_setup_all_tx_resources(struct i40evf_adapter
*adapter
);
38 static int i40evf_setup_all_rx_resources(struct i40evf_adapter
*adapter
);
39 static int i40evf_close(struct net_device
*netdev
);
41 char i40evf_driver_name
[] = "i40evf";
42 static const char i40evf_driver_string
[] =
43 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
47 #define DRV_VERSION_MAJOR 3
48 #define DRV_VERSION_MINOR 0
49 #define DRV_VERSION_BUILD 1
50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) \
54 const char i40evf_driver_version
[] = DRV_VERSION
;
55 static const char i40evf_copyright
[] =
56 "Copyright (c) 2013 - 2015 Intel Corporation.";
58 /* i40evf_pci_tbl - PCI Device ID Table
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
66 static const struct pci_device_id i40evf_pci_tbl
[] = {
67 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_VF
), 0},
68 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_VF_HV
), 0},
69 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_X722_VF
), 0},
70 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_ADAPTIVE_VF
), 0},
71 /* required last entry */
75 MODULE_DEVICE_TABLE(pci
, i40evf_pci_tbl
);
77 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
78 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_VERSION
);
82 static struct workqueue_struct
*i40evf_wq
;
85 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
86 * @hw: pointer to the HW structure
87 * @mem: ptr to mem struct to fill out
88 * @size: size of memory requested
89 * @alignment: what to align the allocation to
91 i40e_status
i40evf_allocate_dma_mem_d(struct i40e_hw
*hw
,
92 struct i40e_dma_mem
*mem
,
93 u64 size
, u32 alignment
)
95 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)hw
->back
;
98 return I40E_ERR_PARAM
;
100 mem
->size
= ALIGN(size
, alignment
);
101 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
,
102 (dma_addr_t
*)&mem
->pa
, GFP_KERNEL
);
106 return I40E_ERR_NO_MEMORY
;
110 * i40evf_free_dma_mem_d - OS specific memory free for shared code
111 * @hw: pointer to the HW structure
112 * @mem: ptr to mem struct to free
114 i40e_status
i40evf_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
116 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)hw
->back
;
118 if (!mem
|| !mem
->va
)
119 return I40E_ERR_PARAM
;
120 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
,
121 mem
->va
, (dma_addr_t
)mem
->pa
);
126 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
127 * @hw: pointer to the HW structure
128 * @mem: ptr to mem struct to fill out
129 * @size: size of memory requested
131 i40e_status
i40evf_allocate_virt_mem_d(struct i40e_hw
*hw
,
132 struct i40e_virt_mem
*mem
, u32 size
)
135 return I40E_ERR_PARAM
;
138 mem
->va
= kzalloc(size
, GFP_KERNEL
);
143 return I40E_ERR_NO_MEMORY
;
147 * i40evf_free_virt_mem_d - OS specific memory free for shared code
148 * @hw: pointer to the HW structure
149 * @mem: ptr to mem struct to free
151 i40e_status
i40evf_free_virt_mem_d(struct i40e_hw
*hw
,
152 struct i40e_virt_mem
*mem
)
155 return I40E_ERR_PARAM
;
157 /* it's ok to kfree a NULL pointer */
164 * i40evf_debug_d - OS dependent version of debug printing
165 * @hw: pointer to the HW structure
166 * @mask: debug level mask
167 * @fmt_str: printf-type format description
169 void i40evf_debug_d(void *hw
, u32 mask
, char *fmt_str
, ...)
174 if (!(mask
& ((struct i40e_hw
*)hw
)->debug_mask
))
177 va_start(argptr
, fmt_str
);
178 vsnprintf(buf
, sizeof(buf
), fmt_str
, argptr
);
181 /* the debug string is already formatted with a newline */
186 * i40evf_schedule_reset - Set the flags and schedule a reset event
187 * @adapter: board private structure
189 void i40evf_schedule_reset(struct i40evf_adapter
*adapter
)
191 if (!(adapter
->flags
&
192 (I40EVF_FLAG_RESET_PENDING
| I40EVF_FLAG_RESET_NEEDED
))) {
193 adapter
->flags
|= I40EVF_FLAG_RESET_NEEDED
;
194 schedule_work(&adapter
->reset_task
);
199 * i40evf_tx_timeout - Respond to a Tx Hang
200 * @netdev: network interface device structure
202 static void i40evf_tx_timeout(struct net_device
*netdev
)
204 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
206 adapter
->tx_timeout_count
++;
207 i40evf_schedule_reset(adapter
);
211 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
212 * @adapter: board private structure
214 static void i40evf_misc_irq_disable(struct i40evf_adapter
*adapter
)
216 struct i40e_hw
*hw
= &adapter
->hw
;
218 if (!adapter
->msix_entries
)
221 wr32(hw
, I40E_VFINT_DYN_CTL01
, 0);
224 rd32(hw
, I40E_VFGEN_RSTAT
);
226 synchronize_irq(adapter
->msix_entries
[0].vector
);
230 * i40evf_misc_irq_enable - Enable default interrupt generation settings
231 * @adapter: board private structure
233 static void i40evf_misc_irq_enable(struct i40evf_adapter
*adapter
)
235 struct i40e_hw
*hw
= &adapter
->hw
;
237 wr32(hw
, I40E_VFINT_DYN_CTL01
, I40E_VFINT_DYN_CTL01_INTENA_MASK
|
238 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
239 wr32(hw
, I40E_VFINT_ICR0_ENA1
, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK
);
242 rd32(hw
, I40E_VFGEN_RSTAT
);
246 * i40evf_irq_disable - Mask off interrupt generation on the NIC
247 * @adapter: board private structure
249 static void i40evf_irq_disable(struct i40evf_adapter
*adapter
)
252 struct i40e_hw
*hw
= &adapter
->hw
;
254 if (!adapter
->msix_entries
)
257 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
258 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1), 0);
259 synchronize_irq(adapter
->msix_entries
[i
].vector
);
262 rd32(hw
, I40E_VFGEN_RSTAT
);
266 * i40evf_irq_enable_queues - Enable interrupt for specified queues
267 * @adapter: board private structure
268 * @mask: bitmap of queues to enable
270 void i40evf_irq_enable_queues(struct i40evf_adapter
*adapter
, u32 mask
)
272 struct i40e_hw
*hw
= &adapter
->hw
;
275 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
276 if (mask
& BIT(i
- 1)) {
277 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1),
278 I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
279 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
280 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
);
286 * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
287 * @adapter: board private structure
288 * @mask: bitmap of vectors to trigger
290 static void i40evf_fire_sw_int(struct i40evf_adapter
*adapter
, u32 mask
)
292 struct i40e_hw
*hw
= &adapter
->hw
;
297 dyn_ctl
= rd32(hw
, I40E_VFINT_DYN_CTL01
);
298 dyn_ctl
|= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
299 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
300 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
;
301 wr32(hw
, I40E_VFINT_DYN_CTL01
, dyn_ctl
);
303 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
305 dyn_ctl
= rd32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1));
306 dyn_ctl
|= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
307 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
308 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
;
309 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1), dyn_ctl
);
315 * i40evf_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 * @flush: boolean value whether to run rd32()
319 void i40evf_irq_enable(struct i40evf_adapter
*adapter
, bool flush
)
321 struct i40e_hw
*hw
= &adapter
->hw
;
323 i40evf_misc_irq_enable(adapter
);
324 i40evf_irq_enable_queues(adapter
, ~0);
327 rd32(hw
, I40E_VFGEN_RSTAT
);
331 * i40evf_msix_aq - Interrupt handler for vector 0
332 * @irq: interrupt number
333 * @data: pointer to netdev
335 static irqreturn_t
i40evf_msix_aq(int irq
, void *data
)
337 struct net_device
*netdev
= data
;
338 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
339 struct i40e_hw
*hw
= &adapter
->hw
;
342 /* handle non-queue interrupts, these reads clear the registers */
343 val
= rd32(hw
, I40E_VFINT_ICR01
);
344 val
= rd32(hw
, I40E_VFINT_ICR0_ENA1
);
346 val
= rd32(hw
, I40E_VFINT_DYN_CTL01
) |
347 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK
;
348 wr32(hw
, I40E_VFINT_DYN_CTL01
, val
);
350 /* schedule work on the private workqueue */
351 schedule_work(&adapter
->adminq_task
);
357 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
358 * @irq: interrupt number
359 * @data: pointer to a q_vector
361 static irqreturn_t
i40evf_msix_clean_rings(int irq
, void *data
)
363 struct i40e_q_vector
*q_vector
= data
;
365 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
368 napi_schedule_irqoff(&q_vector
->napi
);
374 * i40evf_map_vector_to_rxq - associate irqs with rx queues
375 * @adapter: board private structure
376 * @v_idx: interrupt number
377 * @r_idx: queue number
380 i40evf_map_vector_to_rxq(struct i40evf_adapter
*adapter
, int v_idx
, int r_idx
)
382 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
383 struct i40e_ring
*rx_ring
= &adapter
->rx_rings
[r_idx
];
384 struct i40e_hw
*hw
= &adapter
->hw
;
386 rx_ring
->q_vector
= q_vector
;
387 rx_ring
->next
= q_vector
->rx
.ring
;
388 rx_ring
->vsi
= &adapter
->vsi
;
389 q_vector
->rx
.ring
= rx_ring
;
390 q_vector
->rx
.count
++;
391 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
392 q_vector
->rx
.itr
= ITR_TO_REG(rx_ring
->rx_itr_setting
);
393 q_vector
->ring_mask
|= BIT(r_idx
);
394 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
395 wr32(hw
, I40E_VFINT_ITRN1(I40E_RX_ITR
, v_idx
- 1), q_vector
->rx
.itr
);
399 * i40evf_map_vector_to_txq - associate irqs with tx queues
400 * @adapter: board private structure
401 * @v_idx: interrupt number
402 * @t_idx: queue number
405 i40evf_map_vector_to_txq(struct i40evf_adapter
*adapter
, int v_idx
, int t_idx
)
407 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
408 struct i40e_ring
*tx_ring
= &adapter
->tx_rings
[t_idx
];
409 struct i40e_hw
*hw
= &adapter
->hw
;
411 tx_ring
->q_vector
= q_vector
;
412 tx_ring
->next
= q_vector
->tx
.ring
;
413 tx_ring
->vsi
= &adapter
->vsi
;
414 q_vector
->tx
.ring
= tx_ring
;
415 q_vector
->tx
.count
++;
416 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
417 q_vector
->tx
.itr
= ITR_TO_REG(tx_ring
->tx_itr_setting
);
418 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
419 q_vector
->num_ringpairs
++;
420 wr32(hw
, I40E_VFINT_ITRN1(I40E_TX_ITR
, v_idx
- 1), q_vector
->tx
.itr
);
424 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
425 * @adapter: board private structure to initialize
427 * This function maps descriptor rings to the queue-specific vectors
428 * we were allotted through the MSI-X enabling code. Ideally, we'd have
429 * one vector per ring/queue, but on a constrained vector budget, we
430 * group the rings as "efficiently" as possible. You would add new
431 * mapping configurations in here.
433 static void i40evf_map_rings_to_vectors(struct i40evf_adapter
*adapter
)
435 int rings_remaining
= adapter
->num_active_queues
;
436 int ridx
= 0, vidx
= 0;
439 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
441 for (; ridx
< rings_remaining
; ridx
++) {
442 i40evf_map_vector_to_rxq(adapter
, vidx
, ridx
);
443 i40evf_map_vector_to_txq(adapter
, vidx
, ridx
);
445 /* In the case where we have more queues than vectors, continue
446 * round-robin on vectors until all queues are mapped.
448 if (++vidx
>= q_vectors
)
452 adapter
->aq_required
|= I40EVF_FLAG_AQ_MAP_VECTORS
;
455 #ifdef CONFIG_NET_POLL_CONTROLLER
457 * i40evf_netpoll - A Polling 'interrupt' handler
458 * @netdev: network interface device structure
460 * This is used by netconsole to send skbs without having to re-enable
461 * interrupts. It's not called while the normal interrupt routine is executing.
463 static void i40evf_netpoll(struct net_device
*netdev
)
465 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
466 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
469 /* if interface is down do nothing */
470 if (test_bit(__I40E_VSI_DOWN
, adapter
->vsi
.state
))
473 for (i
= 0; i
< q_vectors
; i
++)
474 i40evf_msix_clean_rings(0, &adapter
->q_vectors
[i
]);
479 * i40evf_irq_affinity_notify - Callback for affinity changes
480 * @notify: context as to what irq was changed
481 * @mask: the new affinity mask
483 * This is a callback function used by the irq_set_affinity_notifier function
484 * so that we may register to receive changes to the irq affinity masks.
486 static void i40evf_irq_affinity_notify(struct irq_affinity_notify
*notify
,
487 const cpumask_t
*mask
)
489 struct i40e_q_vector
*q_vector
=
490 container_of(notify
, struct i40e_q_vector
, affinity_notify
);
492 cpumask_copy(&q_vector
->affinity_mask
, mask
);
496 * i40evf_irq_affinity_release - Callback for affinity notifier release
497 * @ref: internal core kernel usage
499 * This is a callback function used by the irq_set_affinity_notifier function
500 * to inform the current notification subscriber that they will no longer
501 * receive notifications.
503 static void i40evf_irq_affinity_release(struct kref
*ref
) {}
506 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
507 * @adapter: board private structure
509 * Allocates MSI-X vectors for tx and rx handling, and requests
510 * interrupts from the kernel.
513 i40evf_request_traffic_irqs(struct i40evf_adapter
*adapter
, char *basename
)
515 unsigned int vector
, q_vectors
;
516 unsigned int rx_int_idx
= 0, tx_int_idx
= 0;
520 i40evf_irq_disable(adapter
);
521 /* Decrement for Other and TCP Timer vectors */
522 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
524 for (vector
= 0; vector
< q_vectors
; vector
++) {
525 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[vector
];
526 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
528 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
529 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
530 "i40evf-%s-TxRx-%d", basename
, rx_int_idx
++);
532 } else if (q_vector
->rx
.ring
) {
533 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
534 "i40evf-%s-rx-%d", basename
, rx_int_idx
++);
535 } else if (q_vector
->tx
.ring
) {
536 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
537 "i40evf-%s-tx-%d", basename
, tx_int_idx
++);
539 /* skip this unused q_vector */
542 err
= request_irq(irq_num
,
543 i40evf_msix_clean_rings
,
548 dev_info(&adapter
->pdev
->dev
,
549 "Request_irq failed, error: %d\n", err
);
550 goto free_queue_irqs
;
552 /* register for affinity change notifications */
553 q_vector
->affinity_notify
.notify
= i40evf_irq_affinity_notify
;
554 q_vector
->affinity_notify
.release
=
555 i40evf_irq_affinity_release
;
556 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
557 /* Spread the IRQ affinity hints across online CPUs. Note that
558 * get_cpu_mask returns a mask with a permanent lifetime so
559 * it's safe to use as a hint for irq_set_affinity_hint.
561 cpu
= cpumask_local_spread(q_vector
->v_idx
, -1);
562 irq_set_affinity_hint(irq_num
, get_cpu_mask(cpu
));
570 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
571 irq_set_affinity_notifier(irq_num
, NULL
);
572 irq_set_affinity_hint(irq_num
, NULL
);
573 free_irq(irq_num
, &adapter
->q_vectors
[vector
]);
579 * i40evf_request_misc_irq - Initialize MSI-X interrupts
580 * @adapter: board private structure
582 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
583 * vector is only for the admin queue, and stays active even when the netdev
586 static int i40evf_request_misc_irq(struct i40evf_adapter
*adapter
)
588 struct net_device
*netdev
= adapter
->netdev
;
591 snprintf(adapter
->misc_vector_name
,
592 sizeof(adapter
->misc_vector_name
) - 1, "i40evf-%s:mbx",
593 dev_name(&adapter
->pdev
->dev
));
594 err
= request_irq(adapter
->msix_entries
[0].vector
,
596 adapter
->misc_vector_name
, netdev
);
598 dev_err(&adapter
->pdev
->dev
,
599 "request_irq for %s failed: %d\n",
600 adapter
->misc_vector_name
, err
);
601 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
607 * i40evf_free_traffic_irqs - Free MSI-X interrupts
608 * @adapter: board private structure
610 * Frees all MSI-X vectors other than 0.
612 static void i40evf_free_traffic_irqs(struct i40evf_adapter
*adapter
)
614 int vector
, irq_num
, q_vectors
;
616 if (!adapter
->msix_entries
)
619 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
621 for (vector
= 0; vector
< q_vectors
; vector
++) {
622 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
623 irq_set_affinity_notifier(irq_num
, NULL
);
624 irq_set_affinity_hint(irq_num
, NULL
);
625 free_irq(irq_num
, &adapter
->q_vectors
[vector
]);
630 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
631 * @adapter: board private structure
633 * Frees MSI-X vector 0.
635 static void i40evf_free_misc_irq(struct i40evf_adapter
*adapter
)
637 struct net_device
*netdev
= adapter
->netdev
;
639 if (!adapter
->msix_entries
)
642 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
646 * i40evf_configure_tx - Configure Transmit Unit after Reset
647 * @adapter: board private structure
649 * Configure the Tx unit of the MAC after a reset.
651 static void i40evf_configure_tx(struct i40evf_adapter
*adapter
)
653 struct i40e_hw
*hw
= &adapter
->hw
;
656 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
657 adapter
->tx_rings
[i
].tail
= hw
->hw_addr
+ I40E_QTX_TAIL1(i
);
661 * i40evf_configure_rx - Configure Receive Unit after Reset
662 * @adapter: board private structure
664 * Configure the Rx unit of the MAC after a reset.
666 static void i40evf_configure_rx(struct i40evf_adapter
*adapter
)
668 unsigned int rx_buf_len
= I40E_RXBUFFER_2048
;
669 struct i40e_hw
*hw
= &adapter
->hw
;
672 /* Legacy Rx will always default to a 2048 buffer size. */
673 #if (PAGE_SIZE < 8192)
674 if (!(adapter
->flags
& I40EVF_FLAG_LEGACY_RX
)) {
675 struct net_device
*netdev
= adapter
->netdev
;
677 /* For jumbo frames on systems with 4K pages we have to use
678 * an order 1 page, so we might as well increase the size
679 * of our Rx buffer to make better use of the available space
681 rx_buf_len
= I40E_RXBUFFER_3072
;
683 /* We use a 1536 buffer size for configurations with
684 * standard Ethernet mtu. On x86 this gives us enough room
685 * for shared info and 192 bytes of padding.
687 if (!I40E_2K_TOO_SMALL_WITH_PADDING
&&
688 (netdev
->mtu
<= ETH_DATA_LEN
))
689 rx_buf_len
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
693 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
694 adapter
->rx_rings
[i
].tail
= hw
->hw_addr
+ I40E_QRX_TAIL1(i
);
695 adapter
->rx_rings
[i
].rx_buf_len
= rx_buf_len
;
697 if (adapter
->flags
& I40EVF_FLAG_LEGACY_RX
)
698 clear_ring_build_skb_enabled(&adapter
->rx_rings
[i
]);
700 set_ring_build_skb_enabled(&adapter
->rx_rings
[i
]);
705 * i40evf_find_vlan - Search filter list for specific vlan filter
706 * @adapter: board private structure
709 * Returns ptr to the filter object or NULL
712 i40evf_vlan_filter
*i40evf_find_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
714 struct i40evf_vlan_filter
*f
;
716 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
724 * i40evf_add_vlan - Add a vlan filter to the list
725 * @adapter: board private structure
728 * Returns ptr to the filter object or NULL when no memory available.
731 i40evf_vlan_filter
*i40evf_add_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
733 struct i40evf_vlan_filter
*f
= NULL
;
736 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
737 &adapter
->crit_section
)) {
743 f
= i40evf_find_vlan(adapter
, vlan
);
745 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
751 INIT_LIST_HEAD(&f
->list
);
752 list_add(&f
->list
, &adapter
->vlan_filter_list
);
754 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_VLAN_FILTER
;
758 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
764 * i40evf_del_vlan - Remove a vlan filter from the list
765 * @adapter: board private structure
768 static void i40evf_del_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
770 struct i40evf_vlan_filter
*f
;
773 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
774 &adapter
->crit_section
)) {
780 f
= i40evf_find_vlan(adapter
, vlan
);
783 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_VLAN_FILTER
;
785 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
789 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
790 * @netdev: network device struct
793 static int i40evf_vlan_rx_add_vid(struct net_device
*netdev
,
794 __always_unused __be16 proto
, u16 vid
)
796 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
798 if (!VLAN_ALLOWED(adapter
))
800 if (i40evf_add_vlan(adapter
, vid
) == NULL
)
806 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
807 * @netdev: network device struct
810 static int i40evf_vlan_rx_kill_vid(struct net_device
*netdev
,
811 __always_unused __be16 proto
, u16 vid
)
813 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
815 if (VLAN_ALLOWED(adapter
)) {
816 i40evf_del_vlan(adapter
, vid
);
823 * i40evf_find_filter - Search filter list for specific mac filter
824 * @adapter: board private structure
825 * @macaddr: the MAC address
827 * Returns ptr to the filter object or NULL
830 i40evf_mac_filter
*i40evf_find_filter(struct i40evf_adapter
*adapter
,
833 struct i40evf_mac_filter
*f
;
838 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
839 if (ether_addr_equal(macaddr
, f
->macaddr
))
846 * i40e_add_filter - Add a mac filter to the filter list
847 * @adapter: board private structure
848 * @macaddr: the MAC address
850 * Returns ptr to the filter object or NULL when no memory available.
853 i40evf_mac_filter
*i40evf_add_filter(struct i40evf_adapter
*adapter
,
856 struct i40evf_mac_filter
*f
;
862 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
863 &adapter
->crit_section
)) {
869 f
= i40evf_find_filter(adapter
, macaddr
);
871 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
873 clear_bit(__I40EVF_IN_CRITICAL_TASK
,
874 &adapter
->crit_section
);
878 ether_addr_copy(f
->macaddr
, macaddr
);
880 list_add_tail(&f
->list
, &adapter
->mac_filter_list
);
882 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_MAC_FILTER
;
887 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
892 * i40evf_set_mac - NDO callback to set port mac address
893 * @netdev: network interface device structure
894 * @p: pointer to an address structure
896 * Returns 0 on success, negative on failure
898 static int i40evf_set_mac(struct net_device
*netdev
, void *p
)
900 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
901 struct i40e_hw
*hw
= &adapter
->hw
;
902 struct i40evf_mac_filter
*f
;
903 struct sockaddr
*addr
= p
;
905 if (!is_valid_ether_addr(addr
->sa_data
))
906 return -EADDRNOTAVAIL
;
908 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
))
911 if (adapter
->flags
& I40EVF_FLAG_ADDR_SET_BY_PF
)
914 f
= i40evf_find_filter(adapter
, hw
->mac
.addr
);
917 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
920 f
= i40evf_add_filter(adapter
, addr
->sa_data
);
922 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
923 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
926 return (f
== NULL
) ? -ENOMEM
: 0;
930 * i40evf_set_rx_mode - NDO callback to set the netdev filters
931 * @netdev: network interface device structure
933 static void i40evf_set_rx_mode(struct net_device
*netdev
)
935 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
936 struct i40evf_mac_filter
*f
, *ftmp
;
937 struct netdev_hw_addr
*uca
;
938 struct netdev_hw_addr
*mca
;
939 struct netdev_hw_addr
*ha
;
942 /* add addr if not already in the filter list */
943 netdev_for_each_uc_addr(uca
, netdev
) {
944 i40evf_add_filter(adapter
, uca
->addr
);
946 netdev_for_each_mc_addr(mca
, netdev
) {
947 i40evf_add_filter(adapter
, mca
->addr
);
950 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
951 &adapter
->crit_section
)) {
954 dev_err(&adapter
->pdev
->dev
,
955 "Failed to get lock in %s\n", __func__
);
959 /* remove filter if not in netdev list */
960 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
961 netdev_for_each_mc_addr(mca
, netdev
)
962 if (ether_addr_equal(mca
->addr
, f
->macaddr
))
963 goto bottom_of_search_loop
;
965 netdev_for_each_uc_addr(uca
, netdev
)
966 if (ether_addr_equal(uca
->addr
, f
->macaddr
))
967 goto bottom_of_search_loop
;
969 for_each_dev_addr(netdev
, ha
)
970 if (ether_addr_equal(ha
->addr
, f
->macaddr
))
971 goto bottom_of_search_loop
;
973 if (ether_addr_equal(f
->macaddr
, adapter
->hw
.mac
.addr
))
974 goto bottom_of_search_loop
;
976 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
978 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
980 bottom_of_search_loop
:
984 if (netdev
->flags
& IFF_PROMISC
&&
985 !(adapter
->flags
& I40EVF_FLAG_PROMISC_ON
))
986 adapter
->aq_required
|= I40EVF_FLAG_AQ_REQUEST_PROMISC
;
987 else if (!(netdev
->flags
& IFF_PROMISC
) &&
988 adapter
->flags
& I40EVF_FLAG_PROMISC_ON
)
989 adapter
->aq_required
|= I40EVF_FLAG_AQ_RELEASE_PROMISC
;
991 if (netdev
->flags
& IFF_ALLMULTI
&&
992 !(adapter
->flags
& I40EVF_FLAG_ALLMULTI_ON
))
993 adapter
->aq_required
|= I40EVF_FLAG_AQ_REQUEST_ALLMULTI
;
994 else if (!(netdev
->flags
& IFF_ALLMULTI
) &&
995 adapter
->flags
& I40EVF_FLAG_ALLMULTI_ON
)
996 adapter
->aq_required
|= I40EVF_FLAG_AQ_RELEASE_ALLMULTI
;
998 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1002 * i40evf_napi_enable_all - enable NAPI on all queue vectors
1003 * @adapter: board private structure
1005 static void i40evf_napi_enable_all(struct i40evf_adapter
*adapter
)
1008 struct i40e_q_vector
*q_vector
;
1009 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1011 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1012 struct napi_struct
*napi
;
1014 q_vector
= &adapter
->q_vectors
[q_idx
];
1015 napi
= &q_vector
->napi
;
1021 * i40evf_napi_disable_all - disable NAPI on all queue vectors
1022 * @adapter: board private structure
1024 static void i40evf_napi_disable_all(struct i40evf_adapter
*adapter
)
1027 struct i40e_q_vector
*q_vector
;
1028 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1030 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1031 q_vector
= &adapter
->q_vectors
[q_idx
];
1032 napi_disable(&q_vector
->napi
);
1037 * i40evf_configure - set up transmit and receive data structures
1038 * @adapter: board private structure
1040 static void i40evf_configure(struct i40evf_adapter
*adapter
)
1042 struct net_device
*netdev
= adapter
->netdev
;
1045 i40evf_set_rx_mode(netdev
);
1047 i40evf_configure_tx(adapter
);
1048 i40evf_configure_rx(adapter
);
1049 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_QUEUES
;
1051 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1052 struct i40e_ring
*ring
= &adapter
->rx_rings
[i
];
1054 i40evf_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
1059 * i40evf_up_complete - Finish the last steps of bringing up a connection
1060 * @adapter: board private structure
1062 static void i40evf_up_complete(struct i40evf_adapter
*adapter
)
1064 adapter
->state
= __I40EVF_RUNNING
;
1065 clear_bit(__I40E_VSI_DOWN
, adapter
->vsi
.state
);
1067 i40evf_napi_enable_all(adapter
);
1069 adapter
->aq_required
|= I40EVF_FLAG_AQ_ENABLE_QUEUES
;
1070 if (CLIENT_ENABLED(adapter
))
1071 adapter
->flags
|= I40EVF_FLAG_CLIENT_NEEDS_OPEN
;
1072 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
1076 * i40e_down - Shutdown the connection processing
1077 * @adapter: board private structure
1079 void i40evf_down(struct i40evf_adapter
*adapter
)
1081 struct net_device
*netdev
= adapter
->netdev
;
1082 struct i40evf_mac_filter
*f
;
1084 if (adapter
->state
<= __I40EVF_DOWN_PENDING
)
1087 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
1088 &adapter
->crit_section
))
1089 usleep_range(500, 1000);
1091 netif_carrier_off(netdev
);
1092 netif_tx_disable(netdev
);
1093 adapter
->link_up
= false;
1094 i40evf_napi_disable_all(adapter
);
1095 i40evf_irq_disable(adapter
);
1097 /* remove all MAC filters */
1098 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
1101 /* remove all VLAN filters */
1102 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
1105 if (!(adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) &&
1106 adapter
->state
!= __I40EVF_RESETTING
) {
1107 /* cancel any current operation */
1108 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1109 /* Schedule operations to close down the HW. Don't wait
1110 * here for this to complete. The watchdog is still running
1111 * and it will take care of this.
1113 adapter
->aq_required
= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
1114 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_VLAN_FILTER
;
1115 adapter
->aq_required
|= I40EVF_FLAG_AQ_DISABLE_QUEUES
;
1118 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1119 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
1123 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1124 * @adapter: board private structure
1125 * @vectors: number of vectors to request
1127 * Work with the OS to set up the MSIX vectors needed.
1129 * Returns 0 on success, negative on failure
1132 i40evf_acquire_msix_vectors(struct i40evf_adapter
*adapter
, int vectors
)
1134 int err
, vector_threshold
;
1136 /* We'll want at least 3 (vector_threshold):
1137 * 0) Other (Admin Queue and link, mostly)
1141 vector_threshold
= MIN_MSIX_COUNT
;
1143 /* The more we get, the more we will assign to Tx/Rx Cleanup
1144 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1145 * Right now, we simply care about how many we'll get; we'll
1146 * set them up later while requesting irq's.
1148 err
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
1149 vector_threshold
, vectors
);
1151 dev_err(&adapter
->pdev
->dev
, "Unable to allocate MSI-X interrupts\n");
1152 kfree(adapter
->msix_entries
);
1153 adapter
->msix_entries
= NULL
;
1157 /* Adjust for only the vectors we'll use, which is minimum
1158 * of max_msix_q_vectors + NONQ_VECS, or the number of
1159 * vectors we were allocated.
1161 adapter
->num_msix_vectors
= err
;
1166 * i40evf_free_queues - Free memory for all rings
1167 * @adapter: board private structure to initialize
1169 * Free all of the memory associated with queue pairs.
1171 static void i40evf_free_queues(struct i40evf_adapter
*adapter
)
1173 if (!adapter
->vsi_res
)
1175 adapter
->num_active_queues
= 0;
1176 kfree(adapter
->tx_rings
);
1177 adapter
->tx_rings
= NULL
;
1178 kfree(adapter
->rx_rings
);
1179 adapter
->rx_rings
= NULL
;
1183 * i40evf_alloc_queues - Allocate memory for all rings
1184 * @adapter: board private structure to initialize
1186 * We allocate one ring per queue at run-time since we don't know the
1187 * number of queues at compile-time. The polling_netdev array is
1188 * intended for Multiqueue, but should work fine with a single queue.
1190 static int i40evf_alloc_queues(struct i40evf_adapter
*adapter
)
1192 int i
, num_active_queues
;
1194 /* If we're in reset reallocating queues we don't actually know yet for
1195 * certain the PF gave us the number of queues we asked for but we'll
1196 * assume it did. Once basic reset is finished we'll confirm once we
1197 * start negotiating config with PF.
1199 if (adapter
->num_req_queues
)
1200 num_active_queues
= adapter
->num_req_queues
;
1202 num_active_queues
= min_t(int,
1203 adapter
->vsi_res
->num_queue_pairs
,
1204 (int)(num_online_cpus()));
1207 adapter
->tx_rings
= kcalloc(num_active_queues
,
1208 sizeof(struct i40e_ring
), GFP_KERNEL
);
1209 if (!adapter
->tx_rings
)
1211 adapter
->rx_rings
= kcalloc(num_active_queues
,
1212 sizeof(struct i40e_ring
), GFP_KERNEL
);
1213 if (!adapter
->rx_rings
)
1216 for (i
= 0; i
< num_active_queues
; i
++) {
1217 struct i40e_ring
*tx_ring
;
1218 struct i40e_ring
*rx_ring
;
1220 tx_ring
= &adapter
->tx_rings
[i
];
1222 tx_ring
->queue_index
= i
;
1223 tx_ring
->netdev
= adapter
->netdev
;
1224 tx_ring
->dev
= &adapter
->pdev
->dev
;
1225 tx_ring
->count
= adapter
->tx_desc_count
;
1226 tx_ring
->tx_itr_setting
= I40E_ITR_TX_DEF
;
1227 if (adapter
->flags
& I40EVF_FLAG_WB_ON_ITR_CAPABLE
)
1228 tx_ring
->flags
|= I40E_TXR_FLAGS_WB_ON_ITR
;
1230 rx_ring
= &adapter
->rx_rings
[i
];
1231 rx_ring
->queue_index
= i
;
1232 rx_ring
->netdev
= adapter
->netdev
;
1233 rx_ring
->dev
= &adapter
->pdev
->dev
;
1234 rx_ring
->count
= adapter
->rx_desc_count
;
1235 rx_ring
->rx_itr_setting
= I40E_ITR_RX_DEF
;
1238 adapter
->num_active_queues
= num_active_queues
;
1243 i40evf_free_queues(adapter
);
1248 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1249 * @adapter: board private structure to initialize
1251 * Attempt to configure the interrupts using the best available
1252 * capabilities of the hardware and the kernel.
1254 static int i40evf_set_interrupt_capability(struct i40evf_adapter
*adapter
)
1256 int vector
, v_budget
;
1260 if (!adapter
->vsi_res
) {
1264 pairs
= adapter
->num_active_queues
;
1266 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1267 * us much good if we have more vectors than CPUs. However, we already
1268 * limit the total number of queues by the number of CPUs so we do not
1269 * need any further limiting here.
1271 v_budget
= min_t(int, pairs
+ NONQ_VECS
,
1272 (int)adapter
->vf_res
->max_vectors
);
1274 adapter
->msix_entries
= kcalloc(v_budget
,
1275 sizeof(struct msix_entry
), GFP_KERNEL
);
1276 if (!adapter
->msix_entries
) {
1281 for (vector
= 0; vector
< v_budget
; vector
++)
1282 adapter
->msix_entries
[vector
].entry
= vector
;
1284 err
= i40evf_acquire_msix_vectors(adapter
, v_budget
);
1287 netif_set_real_num_rx_queues(adapter
->netdev
, pairs
);
1288 netif_set_real_num_tx_queues(adapter
->netdev
, pairs
);
1293 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1294 * @adapter: board private structure
1296 * Return 0 on success, negative on failure
1298 static int i40evf_config_rss_aq(struct i40evf_adapter
*adapter
)
1300 struct i40e_aqc_get_set_rss_key_data
*rss_key
=
1301 (struct i40e_aqc_get_set_rss_key_data
*)adapter
->rss_key
;
1302 struct i40e_hw
*hw
= &adapter
->hw
;
1305 if (adapter
->current_op
!= VIRTCHNL_OP_UNKNOWN
) {
1306 /* bail because we already have a command pending */
1307 dev_err(&adapter
->pdev
->dev
, "Cannot configure RSS, command %d pending\n",
1308 adapter
->current_op
);
1312 ret
= i40evf_aq_set_rss_key(hw
, adapter
->vsi
.id
, rss_key
);
1314 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS key, err %s aq_err %s\n",
1315 i40evf_stat_str(hw
, ret
),
1316 i40evf_aq_str(hw
, hw
->aq
.asq_last_status
));
1321 ret
= i40evf_aq_set_rss_lut(hw
, adapter
->vsi
.id
, false,
1322 adapter
->rss_lut
, adapter
->rss_lut_size
);
1324 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS lut, err %s aq_err %s\n",
1325 i40evf_stat_str(hw
, ret
),
1326 i40evf_aq_str(hw
, hw
->aq
.asq_last_status
));
1334 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1335 * @adapter: board private structure
1337 * Returns 0 on success, negative on failure
1339 static int i40evf_config_rss_reg(struct i40evf_adapter
*adapter
)
1341 struct i40e_hw
*hw
= &adapter
->hw
;
1345 dw
= (u32
*)adapter
->rss_key
;
1346 for (i
= 0; i
<= adapter
->rss_key_size
/ 4; i
++)
1347 wr32(hw
, I40E_VFQF_HKEY(i
), dw
[i
]);
1349 dw
= (u32
*)adapter
->rss_lut
;
1350 for (i
= 0; i
<= adapter
->rss_lut_size
/ 4; i
++)
1351 wr32(hw
, I40E_VFQF_HLUT(i
), dw
[i
]);
1359 * i40evf_config_rss - Configure RSS keys and lut
1360 * @adapter: board private structure
1362 * Returns 0 on success, negative on failure
1364 int i40evf_config_rss(struct i40evf_adapter
*adapter
)
1367 if (RSS_PF(adapter
)) {
1368 adapter
->aq_required
|= I40EVF_FLAG_AQ_SET_RSS_LUT
|
1369 I40EVF_FLAG_AQ_SET_RSS_KEY
;
1371 } else if (RSS_AQ(adapter
)) {
1372 return i40evf_config_rss_aq(adapter
);
1374 return i40evf_config_rss_reg(adapter
);
1379 * i40evf_fill_rss_lut - Fill the lut with default values
1380 * @adapter: board private structure
1382 static void i40evf_fill_rss_lut(struct i40evf_adapter
*adapter
)
1386 for (i
= 0; i
< adapter
->rss_lut_size
; i
++)
1387 adapter
->rss_lut
[i
] = i
% adapter
->num_active_queues
;
1391 * i40evf_init_rss - Prepare for RSS
1392 * @adapter: board private structure
1394 * Return 0 on success, negative on failure
1396 static int i40evf_init_rss(struct i40evf_adapter
*adapter
)
1398 struct i40e_hw
*hw
= &adapter
->hw
;
1401 if (!RSS_PF(adapter
)) {
1402 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1403 if (adapter
->vf_res
->vf_cap_flags
&
1404 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
1405 adapter
->hena
= I40E_DEFAULT_RSS_HENA_EXPANDED
;
1407 adapter
->hena
= I40E_DEFAULT_RSS_HENA
;
1409 wr32(hw
, I40E_VFQF_HENA(0), (u32
)adapter
->hena
);
1410 wr32(hw
, I40E_VFQF_HENA(1), (u32
)(adapter
->hena
>> 32));
1413 i40evf_fill_rss_lut(adapter
);
1415 netdev_rss_key_fill((void *)adapter
->rss_key
, adapter
->rss_key_size
);
1416 ret
= i40evf_config_rss(adapter
);
1422 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1423 * @adapter: board private structure to initialize
1425 * We allocate one q_vector per queue interrupt. If allocation fails we
1428 static int i40evf_alloc_q_vectors(struct i40evf_adapter
*adapter
)
1430 int q_idx
= 0, num_q_vectors
;
1431 struct i40e_q_vector
*q_vector
;
1433 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1434 adapter
->q_vectors
= kcalloc(num_q_vectors
, sizeof(*q_vector
),
1436 if (!adapter
->q_vectors
)
1439 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1440 q_vector
= &adapter
->q_vectors
[q_idx
];
1441 q_vector
->adapter
= adapter
;
1442 q_vector
->vsi
= &adapter
->vsi
;
1443 q_vector
->v_idx
= q_idx
;
1444 cpumask_copy(&q_vector
->affinity_mask
, cpu_possible_mask
);
1445 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
1446 i40evf_napi_poll
, NAPI_POLL_WEIGHT
);
1453 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1454 * @adapter: board private structure to initialize
1456 * This function frees the memory allocated to the q_vectors. In addition if
1457 * NAPI is enabled it will delete any references to the NAPI struct prior
1458 * to freeing the q_vector.
1460 static void i40evf_free_q_vectors(struct i40evf_adapter
*adapter
)
1462 int q_idx
, num_q_vectors
;
1465 if (!adapter
->q_vectors
)
1468 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1469 napi_vectors
= adapter
->num_active_queues
;
1471 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1472 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[q_idx
];
1473 if (q_idx
< napi_vectors
)
1474 netif_napi_del(&q_vector
->napi
);
1476 kfree(adapter
->q_vectors
);
1477 adapter
->q_vectors
= NULL
;
1481 * i40evf_reset_interrupt_capability - Reset MSIX setup
1482 * @adapter: board private structure
1485 void i40evf_reset_interrupt_capability(struct i40evf_adapter
*adapter
)
1487 if (!adapter
->msix_entries
)
1490 pci_disable_msix(adapter
->pdev
);
1491 kfree(adapter
->msix_entries
);
1492 adapter
->msix_entries
= NULL
;
1496 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1497 * @adapter: board private structure to initialize
1500 int i40evf_init_interrupt_scheme(struct i40evf_adapter
*adapter
)
1504 err
= i40evf_alloc_queues(adapter
);
1506 dev_err(&adapter
->pdev
->dev
,
1507 "Unable to allocate memory for queues\n");
1508 goto err_alloc_queues
;
1512 err
= i40evf_set_interrupt_capability(adapter
);
1515 dev_err(&adapter
->pdev
->dev
,
1516 "Unable to setup interrupt capabilities\n");
1517 goto err_set_interrupt
;
1520 err
= i40evf_alloc_q_vectors(adapter
);
1522 dev_err(&adapter
->pdev
->dev
,
1523 "Unable to allocate memory for queue vectors\n");
1524 goto err_alloc_q_vectors
;
1527 dev_info(&adapter
->pdev
->dev
, "Multiqueue %s: Queue pair count = %u",
1528 (adapter
->num_active_queues
> 1) ? "Enabled" : "Disabled",
1529 adapter
->num_active_queues
);
1532 err_alloc_q_vectors
:
1533 i40evf_reset_interrupt_capability(adapter
);
1535 i40evf_free_queues(adapter
);
1541 * i40evf_free_rss - Free memory used by RSS structs
1542 * @adapter: board private structure
1544 static void i40evf_free_rss(struct i40evf_adapter
*adapter
)
1546 kfree(adapter
->rss_key
);
1547 adapter
->rss_key
= NULL
;
1549 kfree(adapter
->rss_lut
);
1550 adapter
->rss_lut
= NULL
;
1554 * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
1555 * @adapter: board private structure
1557 * Returns 0 on success, negative on failure
1559 static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter
*adapter
)
1561 struct net_device
*netdev
= adapter
->netdev
;
1564 if (netif_running(netdev
))
1565 i40evf_free_traffic_irqs(adapter
);
1566 i40evf_free_misc_irq(adapter
);
1567 i40evf_reset_interrupt_capability(adapter
);
1568 i40evf_free_q_vectors(adapter
);
1569 i40evf_free_queues(adapter
);
1571 err
= i40evf_init_interrupt_scheme(adapter
);
1575 netif_tx_stop_all_queues(netdev
);
1577 err
= i40evf_request_misc_irq(adapter
);
1581 set_bit(__I40E_VSI_DOWN
, adapter
->vsi
.state
);
1583 i40evf_map_rings_to_vectors(adapter
);
1585 if (RSS_AQ(adapter
))
1586 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_RSS
;
1588 err
= i40evf_init_rss(adapter
);
1594 * i40evf_watchdog_timer - Periodic call-back timer
1595 * @data: pointer to adapter disguised as unsigned long
1597 static void i40evf_watchdog_timer(struct timer_list
*t
)
1599 struct i40evf_adapter
*adapter
= from_timer(adapter
, t
,
1602 schedule_work(&adapter
->watchdog_task
);
1603 /* timer will be rescheduled in watchdog task */
1607 * i40evf_watchdog_task - Periodic call-back task
1608 * @work: pointer to work_struct
1610 static void i40evf_watchdog_task(struct work_struct
*work
)
1612 struct i40evf_adapter
*adapter
= container_of(work
,
1613 struct i40evf_adapter
,
1615 struct i40e_hw
*hw
= &adapter
->hw
;
1618 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
))
1619 goto restart_watchdog
;
1621 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) {
1622 reg_val
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1623 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1624 if ((reg_val
== VIRTCHNL_VFR_VFACTIVE
) ||
1625 (reg_val
== VIRTCHNL_VFR_COMPLETED
)) {
1626 /* A chance for redemption! */
1627 dev_err(&adapter
->pdev
->dev
, "Hardware came out of reset. Attempting reinit.\n");
1628 adapter
->state
= __I40EVF_STARTUP
;
1629 adapter
->flags
&= ~I40EVF_FLAG_PF_COMMS_FAILED
;
1630 schedule_delayed_work(&adapter
->init_task
, 10);
1631 clear_bit(__I40EVF_IN_CRITICAL_TASK
,
1632 &adapter
->crit_section
);
1633 /* Don't reschedule the watchdog, since we've restarted
1634 * the init task. When init_task contacts the PF and
1635 * gets everything set up again, it'll restart the
1636 * watchdog for us. Down, boy. Sit. Stay. Woof.
1640 adapter
->aq_required
= 0;
1641 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1645 if ((adapter
->state
< __I40EVF_DOWN
) ||
1646 (adapter
->flags
& I40EVF_FLAG_RESET_PENDING
))
1649 /* check for reset */
1650 reg_val
= rd32(hw
, I40E_VF_ARQLEN1
) & I40E_VF_ARQLEN1_ARQENABLE_MASK
;
1651 if (!(adapter
->flags
& I40EVF_FLAG_RESET_PENDING
) && !reg_val
) {
1652 adapter
->state
= __I40EVF_RESETTING
;
1653 adapter
->flags
|= I40EVF_FLAG_RESET_PENDING
;
1654 dev_err(&adapter
->pdev
->dev
, "Hardware reset detected\n");
1655 schedule_work(&adapter
->reset_task
);
1656 adapter
->aq_required
= 0;
1657 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1661 /* Process admin queue tasks. After init, everything gets done
1662 * here so we don't race on the admin queue.
1664 if (adapter
->current_op
) {
1665 if (!i40evf_asq_done(hw
)) {
1666 dev_dbg(&adapter
->pdev
->dev
, "Admin queue timeout\n");
1667 i40evf_send_api_ver(adapter
);
1671 if (adapter
->aq_required
& I40EVF_FLAG_AQ_GET_CONFIG
) {
1672 i40evf_send_vf_config_msg(adapter
);
1676 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DISABLE_QUEUES
) {
1677 i40evf_disable_queues(adapter
);
1681 if (adapter
->aq_required
& I40EVF_FLAG_AQ_MAP_VECTORS
) {
1682 i40evf_map_queues(adapter
);
1686 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ADD_MAC_FILTER
) {
1687 i40evf_add_ether_addrs(adapter
);
1691 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ADD_VLAN_FILTER
) {
1692 i40evf_add_vlans(adapter
);
1696 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DEL_MAC_FILTER
) {
1697 i40evf_del_ether_addrs(adapter
);
1701 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DEL_VLAN_FILTER
) {
1702 i40evf_del_vlans(adapter
);
1706 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING
) {
1707 i40evf_enable_vlan_stripping(adapter
);
1711 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING
) {
1712 i40evf_disable_vlan_stripping(adapter
);
1716 if (adapter
->aq_required
& I40EVF_FLAG_AQ_CONFIGURE_QUEUES
) {
1717 i40evf_configure_queues(adapter
);
1721 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ENABLE_QUEUES
) {
1722 i40evf_enable_queues(adapter
);
1726 if (adapter
->aq_required
& I40EVF_FLAG_AQ_CONFIGURE_RSS
) {
1727 /* This message goes straight to the firmware, not the
1728 * PF, so we don't have to set current_op as we will
1729 * not get a response through the ARQ.
1731 i40evf_init_rss(adapter
);
1732 adapter
->aq_required
&= ~I40EVF_FLAG_AQ_CONFIGURE_RSS
;
1735 if (adapter
->aq_required
& I40EVF_FLAG_AQ_GET_HENA
) {
1736 i40evf_get_hena(adapter
);
1739 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_HENA
) {
1740 i40evf_set_hena(adapter
);
1743 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_RSS_KEY
) {
1744 i40evf_set_rss_key(adapter
);
1747 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_RSS_LUT
) {
1748 i40evf_set_rss_lut(adapter
);
1752 if (adapter
->aq_required
& I40EVF_FLAG_AQ_REQUEST_PROMISC
) {
1753 i40evf_set_promiscuous(adapter
, FLAG_VF_UNICAST_PROMISC
|
1754 FLAG_VF_MULTICAST_PROMISC
);
1758 if (adapter
->aq_required
& I40EVF_FLAG_AQ_REQUEST_ALLMULTI
) {
1759 i40evf_set_promiscuous(adapter
, FLAG_VF_MULTICAST_PROMISC
);
1763 if ((adapter
->aq_required
& I40EVF_FLAG_AQ_RELEASE_PROMISC
) &&
1764 (adapter
->aq_required
& I40EVF_FLAG_AQ_RELEASE_ALLMULTI
)) {
1765 i40evf_set_promiscuous(adapter
, 0);
1768 schedule_delayed_work(&adapter
->client_task
, msecs_to_jiffies(5));
1770 if (adapter
->state
== __I40EVF_RUNNING
)
1771 i40evf_request_stats(adapter
);
1773 if (adapter
->state
== __I40EVF_RUNNING
) {
1774 i40evf_irq_enable_queues(adapter
, ~0);
1775 i40evf_fire_sw_int(adapter
, 0xFF);
1777 i40evf_fire_sw_int(adapter
, 0x1);
1780 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1782 if (adapter
->state
== __I40EVF_REMOVE
)
1784 if (adapter
->aq_required
)
1785 mod_timer(&adapter
->watchdog_timer
,
1786 jiffies
+ msecs_to_jiffies(20));
1788 mod_timer(&adapter
->watchdog_timer
, jiffies
+ (HZ
* 2));
1789 schedule_work(&adapter
->adminq_task
);
1792 static void i40evf_disable_vf(struct i40evf_adapter
*adapter
)
1794 struct i40evf_mac_filter
*f
, *ftmp
;
1795 struct i40evf_vlan_filter
*fv
, *fvtmp
;
1797 adapter
->flags
|= I40EVF_FLAG_PF_COMMS_FAILED
;
1799 if (netif_running(adapter
->netdev
)) {
1800 set_bit(__I40E_VSI_DOWN
, adapter
->vsi
.state
);
1801 netif_carrier_off(adapter
->netdev
);
1802 netif_tx_disable(adapter
->netdev
);
1803 adapter
->link_up
= false;
1804 i40evf_napi_disable_all(adapter
);
1805 i40evf_irq_disable(adapter
);
1806 i40evf_free_traffic_irqs(adapter
);
1807 i40evf_free_all_tx_resources(adapter
);
1808 i40evf_free_all_rx_resources(adapter
);
1811 /* Delete all of the filters, both MAC and VLAN. */
1812 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
1817 list_for_each_entry_safe(fv
, fvtmp
, &adapter
->vlan_filter_list
, list
) {
1818 list_del(&fv
->list
);
1822 i40evf_free_misc_irq(adapter
);
1823 i40evf_reset_interrupt_capability(adapter
);
1824 i40evf_free_queues(adapter
);
1825 i40evf_free_q_vectors(adapter
);
1826 kfree(adapter
->vf_res
);
1827 i40evf_shutdown_adminq(&adapter
->hw
);
1828 adapter
->netdev
->flags
&= ~IFF_UP
;
1829 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1830 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
1831 adapter
->state
= __I40EVF_DOWN
;
1832 wake_up(&adapter
->down_waitqueue
);
1833 dev_info(&adapter
->pdev
->dev
, "Reset task did not complete, VF disabled\n");
1836 #define I40EVF_RESET_WAIT_MS 10
1837 #define I40EVF_RESET_WAIT_COUNT 500
1839 * i40evf_reset_task - Call-back task to handle hardware reset
1840 * @work: pointer to work_struct
1842 * During reset we need to shut down and reinitialize the admin queue
1843 * before we can use it to communicate with the PF again. We also clear
1844 * and reinit the rings because that context is lost as well.
1846 static void i40evf_reset_task(struct work_struct
*work
)
1848 struct i40evf_adapter
*adapter
= container_of(work
,
1849 struct i40evf_adapter
,
1851 struct net_device
*netdev
= adapter
->netdev
;
1852 struct i40e_hw
*hw
= &adapter
->hw
;
1853 struct i40evf_vlan_filter
*vlf
;
1854 struct i40evf_mac_filter
*f
;
1858 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK
,
1859 &adapter
->crit_section
))
1860 usleep_range(500, 1000);
1861 if (CLIENT_ENABLED(adapter
)) {
1862 adapter
->flags
&= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN
|
1863 I40EVF_FLAG_CLIENT_NEEDS_CLOSE
|
1864 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS
|
1865 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED
);
1866 cancel_delayed_work_sync(&adapter
->client_task
);
1867 i40evf_notify_client_close(&adapter
->vsi
, true);
1869 i40evf_misc_irq_disable(adapter
);
1870 if (adapter
->flags
& I40EVF_FLAG_RESET_NEEDED
) {
1871 adapter
->flags
&= ~I40EVF_FLAG_RESET_NEEDED
;
1872 /* Restart the AQ here. If we have been reset but didn't
1873 * detect it, or if the PF had to reinit, our AQ will be hosed.
1875 i40evf_shutdown_adminq(hw
);
1876 i40evf_init_adminq(hw
);
1877 i40evf_request_reset(adapter
);
1879 adapter
->flags
|= I40EVF_FLAG_RESET_PENDING
;
1881 /* poll until we see the reset actually happen */
1882 for (i
= 0; i
< I40EVF_RESET_WAIT_COUNT
; i
++) {
1883 reg_val
= rd32(hw
, I40E_VF_ARQLEN1
) &
1884 I40E_VF_ARQLEN1_ARQENABLE_MASK
;
1887 usleep_range(5000, 10000);
1889 if (i
== I40EVF_RESET_WAIT_COUNT
) {
1890 dev_info(&adapter
->pdev
->dev
, "Never saw reset\n");
1891 goto continue_reset
; /* act like the reset happened */
1894 /* wait until the reset is complete and the PF is responding to us */
1895 for (i
= 0; i
< I40EVF_RESET_WAIT_COUNT
; i
++) {
1896 /* sleep first to make sure a minimum wait time is met */
1897 msleep(I40EVF_RESET_WAIT_MS
);
1899 reg_val
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1900 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1901 if (reg_val
== VIRTCHNL_VFR_VFACTIVE
)
1905 pci_set_master(adapter
->pdev
);
1907 if (i
== I40EVF_RESET_WAIT_COUNT
) {
1908 dev_err(&adapter
->pdev
->dev
, "Reset never finished (%x)\n",
1910 i40evf_disable_vf(adapter
);
1911 clear_bit(__I40EVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
1912 return; /* Do not attempt to reinit. It's dead, Jim. */
1916 if (netif_running(netdev
)) {
1917 netif_carrier_off(netdev
);
1918 netif_tx_stop_all_queues(netdev
);
1919 adapter
->link_up
= false;
1920 i40evf_napi_disable_all(adapter
);
1922 i40evf_irq_disable(adapter
);
1924 adapter
->state
= __I40EVF_RESETTING
;
1925 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
1927 /* free the Tx/Rx rings and descriptors, might be better to just
1928 * re-use them sometime in the future
1930 i40evf_free_all_rx_resources(adapter
);
1931 i40evf_free_all_tx_resources(adapter
);
1933 /* kill and reinit the admin queue */
1934 i40evf_shutdown_adminq(hw
);
1935 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1936 err
= i40evf_init_adminq(hw
);
1938 dev_info(&adapter
->pdev
->dev
, "Failed to init adminq: %d\n",
1940 adapter
->aq_required
= 0;
1942 if (adapter
->flags
& I40EVF_FLAG_REINIT_ITR_NEEDED
) {
1943 err
= i40evf_reinit_interrupt_scheme(adapter
);
1948 adapter
->aq_required
|= I40EVF_FLAG_AQ_GET_CONFIG
;
1949 adapter
->aq_required
|= I40EVF_FLAG_AQ_MAP_VECTORS
;
1951 /* re-add all MAC filters */
1952 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
1955 /* re-add all VLAN filters */
1956 list_for_each_entry(vlf
, &adapter
->vlan_filter_list
, list
) {
1959 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_MAC_FILTER
;
1960 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_VLAN_FILTER
;
1961 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1962 clear_bit(__I40EVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
1963 i40evf_misc_irq_enable(adapter
);
1965 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2);
1967 if (netif_running(adapter
->netdev
)) {
1968 /* allocate transmit descriptors */
1969 err
= i40evf_setup_all_tx_resources(adapter
);
1973 /* allocate receive descriptors */
1974 err
= i40evf_setup_all_rx_resources(adapter
);
1978 if (adapter
->flags
& I40EVF_FLAG_REINIT_ITR_NEEDED
) {
1979 err
= i40evf_request_traffic_irqs(adapter
,
1984 adapter
->flags
&= ~I40EVF_FLAG_REINIT_ITR_NEEDED
;
1987 i40evf_configure(adapter
);
1989 i40evf_up_complete(adapter
);
1991 i40evf_irq_enable(adapter
, true);
1993 adapter
->state
= __I40EVF_DOWN
;
1994 wake_up(&adapter
->down_waitqueue
);
1999 dev_err(&adapter
->pdev
->dev
, "failed to allocate resources during reinit\n");
2000 i40evf_close(netdev
);
2004 * i40evf_adminq_task - worker thread to clean the admin queue
2005 * @work: pointer to work_struct containing our data
2007 static void i40evf_adminq_task(struct work_struct
*work
)
2009 struct i40evf_adapter
*adapter
=
2010 container_of(work
, struct i40evf_adapter
, adminq_task
);
2011 struct i40e_hw
*hw
= &adapter
->hw
;
2012 struct i40e_arq_event_info event
;
2013 enum virtchnl_ops v_op
;
2014 i40e_status ret
, v_ret
;
2018 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
)
2021 event
.buf_len
= I40EVF_MAX_AQ_BUF_SIZE
;
2022 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
2027 ret
= i40evf_clean_arq_element(hw
, &event
, &pending
);
2028 v_op
= (enum virtchnl_ops
)le32_to_cpu(event
.desc
.cookie_high
);
2029 v_ret
= (i40e_status
)le32_to_cpu(event
.desc
.cookie_low
);
2032 break; /* No event to process or error cleaning ARQ */
2034 i40evf_virtchnl_completion(adapter
, v_op
, v_ret
, event
.msg_buf
,
2037 memset(event
.msg_buf
, 0, I40EVF_MAX_AQ_BUF_SIZE
);
2040 if ((adapter
->flags
&
2041 (I40EVF_FLAG_RESET_PENDING
| I40EVF_FLAG_RESET_NEEDED
)) ||
2042 adapter
->state
== __I40EVF_RESETTING
)
2045 /* check for error indications */
2046 val
= rd32(hw
, hw
->aq
.arq
.len
);
2047 if (val
== 0xdeadbeef) /* indicates device in reset */
2050 if (val
& I40E_VF_ARQLEN1_ARQVFE_MASK
) {
2051 dev_info(&adapter
->pdev
->dev
, "ARQ VF Error detected\n");
2052 val
&= ~I40E_VF_ARQLEN1_ARQVFE_MASK
;
2054 if (val
& I40E_VF_ARQLEN1_ARQOVFL_MASK
) {
2055 dev_info(&adapter
->pdev
->dev
, "ARQ Overflow Error detected\n");
2056 val
&= ~I40E_VF_ARQLEN1_ARQOVFL_MASK
;
2058 if (val
& I40E_VF_ARQLEN1_ARQCRIT_MASK
) {
2059 dev_info(&adapter
->pdev
->dev
, "ARQ Critical Error detected\n");
2060 val
&= ~I40E_VF_ARQLEN1_ARQCRIT_MASK
;
2063 wr32(hw
, hw
->aq
.arq
.len
, val
);
2065 val
= rd32(hw
, hw
->aq
.asq
.len
);
2067 if (val
& I40E_VF_ATQLEN1_ATQVFE_MASK
) {
2068 dev_info(&adapter
->pdev
->dev
, "ASQ VF Error detected\n");
2069 val
&= ~I40E_VF_ATQLEN1_ATQVFE_MASK
;
2071 if (val
& I40E_VF_ATQLEN1_ATQOVFL_MASK
) {
2072 dev_info(&adapter
->pdev
->dev
, "ASQ Overflow Error detected\n");
2073 val
&= ~I40E_VF_ATQLEN1_ATQOVFL_MASK
;
2075 if (val
& I40E_VF_ATQLEN1_ATQCRIT_MASK
) {
2076 dev_info(&adapter
->pdev
->dev
, "ASQ Critical Error detected\n");
2077 val
&= ~I40E_VF_ATQLEN1_ATQCRIT_MASK
;
2080 wr32(hw
, hw
->aq
.asq
.len
, val
);
2083 kfree(event
.msg_buf
);
2085 /* re-enable Admin queue interrupt cause */
2086 i40evf_misc_irq_enable(adapter
);
2090 * i40evf_client_task - worker thread to perform client work
2091 * @work: pointer to work_struct containing our data
2093 * This task handles client interactions. Because client calls can be
2094 * reentrant, we can't handle them in the watchdog.
2096 static void i40evf_client_task(struct work_struct
*work
)
2098 struct i40evf_adapter
*adapter
=
2099 container_of(work
, struct i40evf_adapter
, client_task
.work
);
2101 /* If we can't get the client bit, just give up. We'll be rescheduled
2105 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK
, &adapter
->crit_section
))
2108 if (adapter
->flags
& I40EVF_FLAG_SERVICE_CLIENT_REQUESTED
) {
2109 i40evf_client_subtask(adapter
);
2110 adapter
->flags
&= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED
;
2113 if (adapter
->flags
& I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS
) {
2114 i40evf_notify_client_l2_params(&adapter
->vsi
);
2115 adapter
->flags
&= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS
;
2118 if (adapter
->flags
& I40EVF_FLAG_CLIENT_NEEDS_CLOSE
) {
2119 i40evf_notify_client_close(&adapter
->vsi
, false);
2120 adapter
->flags
&= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE
;
2123 if (adapter
->flags
& I40EVF_FLAG_CLIENT_NEEDS_OPEN
) {
2124 i40evf_notify_client_open(&adapter
->vsi
);
2125 adapter
->flags
&= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN
;
2128 clear_bit(__I40EVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
2132 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2133 * @adapter: board private structure
2135 * Free all transmit software resources
2137 void i40evf_free_all_tx_resources(struct i40evf_adapter
*adapter
)
2141 if (!adapter
->tx_rings
)
2144 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
2145 if (adapter
->tx_rings
[i
].desc
)
2146 i40evf_free_tx_resources(&adapter
->tx_rings
[i
]);
2150 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2151 * @adapter: board private structure
2153 * If this function returns with an error, then it's possible one or
2154 * more of the rings is populated (while the rest are not). It is the
2155 * callers duty to clean those orphaned rings.
2157 * Return 0 on success, negative on failure
2159 static int i40evf_setup_all_tx_resources(struct i40evf_adapter
*adapter
)
2163 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
2164 adapter
->tx_rings
[i
].count
= adapter
->tx_desc_count
;
2165 err
= i40evf_setup_tx_descriptors(&adapter
->tx_rings
[i
]);
2168 dev_err(&adapter
->pdev
->dev
,
2169 "Allocation for Tx Queue %u failed\n", i
);
2177 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2178 * @adapter: board private structure
2180 * If this function returns with an error, then it's possible one or
2181 * more of the rings is populated (while the rest are not). It is the
2182 * callers duty to clean those orphaned rings.
2184 * Return 0 on success, negative on failure
2186 static int i40evf_setup_all_rx_resources(struct i40evf_adapter
*adapter
)
2190 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
2191 adapter
->rx_rings
[i
].count
= adapter
->rx_desc_count
;
2192 err
= i40evf_setup_rx_descriptors(&adapter
->rx_rings
[i
]);
2195 dev_err(&adapter
->pdev
->dev
,
2196 "Allocation for Rx Queue %u failed\n", i
);
2203 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2204 * @adapter: board private structure
2206 * Free all receive software resources
2208 void i40evf_free_all_rx_resources(struct i40evf_adapter
*adapter
)
2212 if (!adapter
->rx_rings
)
2215 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
2216 if (adapter
->rx_rings
[i
].desc
)
2217 i40evf_free_rx_resources(&adapter
->rx_rings
[i
]);
2221 * i40evf_open - Called when a network interface is made active
2222 * @netdev: network interface device structure
2224 * Returns 0 on success, negative value on failure
2226 * The open entry point is called when a network interface is made
2227 * active by the system (IFF_UP). At this point all resources needed
2228 * for transmit and receive operations are allocated, the interrupt
2229 * handler is registered with the OS, the watchdog timer is started,
2230 * and the stack is notified that the interface is ready.
2232 static int i40evf_open(struct net_device
*netdev
)
2234 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2237 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) {
2238 dev_err(&adapter
->pdev
->dev
, "Unable to open device due to PF driver failure.\n");
2242 if (adapter
->state
!= __I40EVF_DOWN
)
2245 /* allocate transmit descriptors */
2246 err
= i40evf_setup_all_tx_resources(adapter
);
2250 /* allocate receive descriptors */
2251 err
= i40evf_setup_all_rx_resources(adapter
);
2255 /* clear any pending interrupts, may auto mask */
2256 err
= i40evf_request_traffic_irqs(adapter
, netdev
->name
);
2260 i40evf_add_filter(adapter
, adapter
->hw
.mac
.addr
);
2261 i40evf_configure(adapter
);
2263 i40evf_up_complete(adapter
);
2265 i40evf_irq_enable(adapter
, true);
2270 i40evf_down(adapter
);
2271 i40evf_free_traffic_irqs(adapter
);
2273 i40evf_free_all_rx_resources(adapter
);
2275 i40evf_free_all_tx_resources(adapter
);
2281 * i40evf_close - Disables a network interface
2282 * @netdev: network interface device structure
2284 * Returns 0, this is not allowed to fail
2286 * The close entry point is called when an interface is de-activated
2287 * by the OS. The hardware is still under the drivers control, but
2288 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2289 * are freed, along with all transmit and receive resources.
2291 static int i40evf_close(struct net_device
*netdev
)
2293 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2296 if (adapter
->state
<= __I40EVF_DOWN_PENDING
)
2300 set_bit(__I40E_VSI_DOWN
, adapter
->vsi
.state
);
2301 if (CLIENT_ENABLED(adapter
))
2302 adapter
->flags
|= I40EVF_FLAG_CLIENT_NEEDS_CLOSE
;
2304 i40evf_down(adapter
);
2305 adapter
->state
= __I40EVF_DOWN_PENDING
;
2306 i40evf_free_traffic_irqs(adapter
);
2308 /* We explicitly don't free resources here because the hardware is
2309 * still active and can DMA into memory. Resources are cleared in
2310 * i40evf_virtchnl_completion() after we get confirmation from the PF
2311 * driver that the rings have been stopped.
2313 * Also, we wait for state to transition to __I40EVF_DOWN before
2314 * returning. State change occurs in i40evf_virtchnl_completion() after
2315 * VF resources are released (which occurs after PF driver processes and
2316 * responds to admin queue commands).
2319 status
= wait_event_timeout(adapter
->down_waitqueue
,
2320 adapter
->state
== __I40EVF_DOWN
,
2321 msecs_to_jiffies(200));
2323 netdev_warn(netdev
, "Device resources not yet released\n");
2328 * i40evf_change_mtu - Change the Maximum Transfer Unit
2329 * @netdev: network interface device structure
2330 * @new_mtu: new value for maximum frame size
2332 * Returns 0 on success, negative on failure
2334 static int i40evf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2336 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2338 netdev
->mtu
= new_mtu
;
2339 if (CLIENT_ENABLED(adapter
)) {
2340 i40evf_notify_client_l2_params(&adapter
->vsi
);
2341 adapter
->flags
|= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED
;
2343 adapter
->flags
|= I40EVF_FLAG_RESET_NEEDED
;
2344 schedule_work(&adapter
->reset_task
);
2350 * i40e_set_features - set the netdev feature flags
2351 * @netdev: ptr to the netdev being adjusted
2352 * @features: the feature set that the stack is suggesting
2353 * Note: expects to be called while under rtnl_lock()
2355 static int i40evf_set_features(struct net_device
*netdev
,
2356 netdev_features_t features
)
2358 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2360 if (!VLAN_ALLOWED(adapter
))
2363 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2364 adapter
->aq_required
|= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING
;
2366 adapter
->aq_required
|= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING
;
2372 * i40evf_features_check - Validate encapsulated packet conforms to limits
2374 * @netdev: This physical port's netdev
2375 * @features: Offload features that the stack believes apply
2377 static netdev_features_t
i40evf_features_check(struct sk_buff
*skb
,
2378 struct net_device
*dev
,
2379 netdev_features_t features
)
2383 /* No point in doing any of this if neither checksum nor GSO are
2384 * being requested for this frame. We can rule out both by just
2385 * checking for CHECKSUM_PARTIAL
2387 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2390 /* We cannot support GSO if the MSS is going to be less than
2391 * 64 bytes. If it is then we need to drop support for GSO.
2393 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
2394 features
&= ~NETIF_F_GSO_MASK
;
2396 /* MACLEN can support at most 63 words */
2397 len
= skb_network_header(skb
) - skb
->data
;
2398 if (len
& ~(63 * 2))
2401 /* IPLEN and EIPLEN can support at most 127 dwords */
2402 len
= skb_transport_header(skb
) - skb_network_header(skb
);
2403 if (len
& ~(127 * 4))
2406 if (skb
->encapsulation
) {
2407 /* L4TUNLEN can support 127 words */
2408 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
2409 if (len
& ~(127 * 2))
2412 /* IPLEN can support at most 127 dwords */
2413 len
= skb_inner_transport_header(skb
) -
2414 skb_inner_network_header(skb
);
2415 if (len
& ~(127 * 4))
2419 /* No need to validate L4LEN as TCP is the only protocol with a
2420 * a flexible value and we support all possible values supported
2421 * by TCP, which is at most 15 dwords
2426 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2430 * i40evf_fix_features - fix up the netdev feature bits
2431 * @netdev: our net device
2432 * @features: desired feature bits
2434 * Returns fixed-up features bits
2436 static netdev_features_t
i40evf_fix_features(struct net_device
*netdev
,
2437 netdev_features_t features
)
2439 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2441 if (!(adapter
->vf_res
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_VLAN
))
2442 features
&= ~(NETIF_F_HW_VLAN_CTAG_TX
|
2443 NETIF_F_HW_VLAN_CTAG_RX
|
2444 NETIF_F_HW_VLAN_CTAG_FILTER
);
2449 static const struct net_device_ops i40evf_netdev_ops
= {
2450 .ndo_open
= i40evf_open
,
2451 .ndo_stop
= i40evf_close
,
2452 .ndo_start_xmit
= i40evf_xmit_frame
,
2453 .ndo_set_rx_mode
= i40evf_set_rx_mode
,
2454 .ndo_validate_addr
= eth_validate_addr
,
2455 .ndo_set_mac_address
= i40evf_set_mac
,
2456 .ndo_change_mtu
= i40evf_change_mtu
,
2457 .ndo_tx_timeout
= i40evf_tx_timeout
,
2458 .ndo_vlan_rx_add_vid
= i40evf_vlan_rx_add_vid
,
2459 .ndo_vlan_rx_kill_vid
= i40evf_vlan_rx_kill_vid
,
2460 .ndo_features_check
= i40evf_features_check
,
2461 .ndo_fix_features
= i40evf_fix_features
,
2462 .ndo_set_features
= i40evf_set_features
,
2463 #ifdef CONFIG_NET_POLL_CONTROLLER
2464 .ndo_poll_controller
= i40evf_netpoll
,
2469 * i40evf_check_reset_complete - check that VF reset is complete
2470 * @hw: pointer to hw struct
2472 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2474 static int i40evf_check_reset_complete(struct i40e_hw
*hw
)
2479 for (i
= 0; i
< 100; i
++) {
2480 rstat
= rd32(hw
, I40E_VFGEN_RSTAT
) &
2481 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
2482 if ((rstat
== VIRTCHNL_VFR_VFACTIVE
) ||
2483 (rstat
== VIRTCHNL_VFR_COMPLETED
))
2485 usleep_range(10, 20);
2491 * i40evf_process_config - Process the config information we got from the PF
2492 * @adapter: board private structure
2494 * Verify that we have a valid config struct, and set up our netdev features
2495 * and our VSI struct.
2497 int i40evf_process_config(struct i40evf_adapter
*adapter
)
2499 struct virtchnl_vf_resource
*vfres
= adapter
->vf_res
;
2500 int i
, num_req_queues
= adapter
->num_req_queues
;
2501 struct net_device
*netdev
= adapter
->netdev
;
2502 struct i40e_vsi
*vsi
= &adapter
->vsi
;
2503 netdev_features_t hw_enc_features
;
2504 netdev_features_t hw_features
;
2506 /* got VF config message back from PF, now we can parse it */
2507 for (i
= 0; i
< vfres
->num_vsis
; i
++) {
2508 if (vfres
->vsi_res
[i
].vsi_type
== VIRTCHNL_VSI_SRIOV
)
2509 adapter
->vsi_res
= &vfres
->vsi_res
[i
];
2511 if (!adapter
->vsi_res
) {
2512 dev_err(&adapter
->pdev
->dev
, "No LAN VSI found\n");
2516 if (num_req_queues
&&
2517 num_req_queues
!= adapter
->vsi_res
->num_queue_pairs
) {
2518 /* Problem. The PF gave us fewer queues than what we had
2519 * negotiated in our request. Need a reset to see if we can't
2520 * get back to a working state.
2522 dev_err(&adapter
->pdev
->dev
,
2523 "Requested %d queues, but PF only gave us %d.\n",
2525 adapter
->vsi_res
->num_queue_pairs
);
2526 adapter
->flags
|= I40EVF_FLAG_REINIT_ITR_NEEDED
;
2527 adapter
->num_req_queues
= adapter
->vsi_res
->num_queue_pairs
;
2528 i40evf_schedule_reset(adapter
);
2531 adapter
->num_req_queues
= 0;
2533 hw_enc_features
= NETIF_F_SG
|
2537 NETIF_F_SOFT_FEATURES
|
2546 /* advertise to stack only if offloads for encapsulated packets is
2549 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ENCAP
) {
2550 hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
2552 NETIF_F_GSO_GRE_CSUM
|
2553 NETIF_F_GSO_IPXIP4
|
2554 NETIF_F_GSO_IPXIP6
|
2555 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
2556 NETIF_F_GSO_PARTIAL
|
2559 if (!(vfres
->vf_cap_flags
&
2560 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
))
2561 netdev
->gso_partial_features
|=
2562 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2564 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
2565 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
2566 netdev
->hw_enc_features
|= hw_enc_features
;
2568 /* record features VLANs can make use of */
2569 netdev
->vlan_features
|= hw_enc_features
| NETIF_F_TSO_MANGLEID
;
2571 /* Write features and hw_features separately to avoid polluting
2572 * with, or dropping, features that are set when we registered.
2574 hw_features
= hw_enc_features
;
2576 /* Enable VLAN features if supported */
2577 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_VLAN
)
2578 hw_features
|= (NETIF_F_HW_VLAN_CTAG_TX
|
2579 NETIF_F_HW_VLAN_CTAG_RX
);
2581 netdev
->hw_features
|= hw_features
;
2583 netdev
->features
|= hw_features
;
2585 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_VLAN
)
2586 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
2588 adapter
->vsi
.id
= adapter
->vsi_res
->vsi_id
;
2590 adapter
->vsi
.back
= adapter
;
2591 adapter
->vsi
.base_vector
= 1;
2592 adapter
->vsi
.work_limit
= I40E_DEFAULT_IRQ_WORK
;
2593 vsi
->netdev
= adapter
->netdev
;
2594 vsi
->qs_handle
= adapter
->vsi_res
->qset_handle
;
2595 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
2596 adapter
->rss_key_size
= vfres
->rss_key_size
;
2597 adapter
->rss_lut_size
= vfres
->rss_lut_size
;
2599 adapter
->rss_key_size
= I40EVF_HKEY_ARRAY_SIZE
;
2600 adapter
->rss_lut_size
= I40EVF_HLUT_ARRAY_SIZE
;
2607 * i40evf_init_task - worker thread to perform delayed initialization
2608 * @work: pointer to work_struct containing our data
2610 * This task completes the work that was begun in probe. Due to the nature
2611 * of VF-PF communications, we may need to wait tens of milliseconds to get
2612 * responses back from the PF. Rather than busy-wait in probe and bog down the
2613 * whole system, we'll do it in a task so we can sleep.
2614 * This task only runs during driver init. Once we've established
2615 * communications with the PF driver and set up our netdev, the watchdog
2618 static void i40evf_init_task(struct work_struct
*work
)
2620 struct i40evf_adapter
*adapter
= container_of(work
,
2621 struct i40evf_adapter
,
2623 struct net_device
*netdev
= adapter
->netdev
;
2624 struct i40e_hw
*hw
= &adapter
->hw
;
2625 struct pci_dev
*pdev
= adapter
->pdev
;
2628 switch (adapter
->state
) {
2629 case __I40EVF_STARTUP
:
2630 /* driver loaded, probe complete */
2631 adapter
->flags
&= ~I40EVF_FLAG_PF_COMMS_FAILED
;
2632 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
2633 err
= i40e_set_mac_type(hw
);
2635 dev_err(&pdev
->dev
, "Failed to set MAC type (%d)\n",
2639 err
= i40evf_check_reset_complete(hw
);
2641 dev_info(&pdev
->dev
, "Device is still in reset (%d), retrying\n",
2645 hw
->aq
.num_arq_entries
= I40EVF_AQ_LEN
;
2646 hw
->aq
.num_asq_entries
= I40EVF_AQ_LEN
;
2647 hw
->aq
.arq_buf_size
= I40EVF_MAX_AQ_BUF_SIZE
;
2648 hw
->aq
.asq_buf_size
= I40EVF_MAX_AQ_BUF_SIZE
;
2650 err
= i40evf_init_adminq(hw
);
2652 dev_err(&pdev
->dev
, "Failed to init Admin Queue (%d)\n",
2656 err
= i40evf_send_api_ver(adapter
);
2658 dev_err(&pdev
->dev
, "Unable to send to PF (%d)\n", err
);
2659 i40evf_shutdown_adminq(hw
);
2662 adapter
->state
= __I40EVF_INIT_VERSION_CHECK
;
2664 case __I40EVF_INIT_VERSION_CHECK
:
2665 if (!i40evf_asq_done(hw
)) {
2666 dev_err(&pdev
->dev
, "Admin queue command never completed\n");
2667 i40evf_shutdown_adminq(hw
);
2668 adapter
->state
= __I40EVF_STARTUP
;
2672 /* aq msg sent, awaiting reply */
2673 err
= i40evf_verify_api_ver(adapter
);
2675 if (err
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
2676 err
= i40evf_send_api_ver(adapter
);
2678 dev_err(&pdev
->dev
, "Unsupported PF API version %d.%d, expected %d.%d\n",
2679 adapter
->pf_version
.major
,
2680 adapter
->pf_version
.minor
,
2681 VIRTCHNL_VERSION_MAJOR
,
2682 VIRTCHNL_VERSION_MINOR
);
2685 err
= i40evf_send_vf_config_msg(adapter
);
2687 dev_err(&pdev
->dev
, "Unable to send config request (%d)\n",
2691 adapter
->state
= __I40EVF_INIT_GET_RESOURCES
;
2693 case __I40EVF_INIT_GET_RESOURCES
:
2694 /* aq msg sent, awaiting reply */
2695 if (!adapter
->vf_res
) {
2696 bufsz
= sizeof(struct virtchnl_vf_resource
) +
2698 sizeof(struct virtchnl_vsi_resource
));
2699 adapter
->vf_res
= kzalloc(bufsz
, GFP_KERNEL
);
2700 if (!adapter
->vf_res
)
2703 err
= i40evf_get_vf_config(adapter
);
2704 if (err
== I40E_ERR_ADMIN_QUEUE_NO_WORK
) {
2705 err
= i40evf_send_vf_config_msg(adapter
);
2707 } else if (err
== I40E_ERR_PARAM
) {
2708 /* We only get ERR_PARAM if the device is in a very bad
2709 * state or if we've been disabled for previous bad
2710 * behavior. Either way, we're done now.
2712 i40evf_shutdown_adminq(hw
);
2713 dev_err(&pdev
->dev
, "Unable to get VF config due to PF error condition, not retrying\n");
2717 dev_err(&pdev
->dev
, "Unable to get VF config (%d)\n",
2721 adapter
->state
= __I40EVF_INIT_SW
;
2727 if (i40evf_process_config(adapter
))
2729 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
2731 adapter
->flags
|= I40EVF_FLAG_RX_CSUM_ENABLED
;
2733 netdev
->netdev_ops
= &i40evf_netdev_ops
;
2734 i40evf_set_ethtool_ops(netdev
);
2735 netdev
->watchdog_timeo
= 5 * HZ
;
2737 /* MTU range: 68 - 9710 */
2738 netdev
->min_mtu
= ETH_MIN_MTU
;
2739 netdev
->max_mtu
= I40E_MAX_RXBUFFER
- I40E_PACKET_HDR_PAD
;
2741 if (!is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2742 dev_info(&pdev
->dev
, "Invalid MAC address %pM, using random\n",
2743 adapter
->hw
.mac
.addr
);
2744 eth_hw_addr_random(netdev
);
2745 ether_addr_copy(adapter
->hw
.mac
.addr
, netdev
->dev_addr
);
2747 adapter
->flags
|= I40EVF_FLAG_ADDR_SET_BY_PF
;
2748 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
2749 ether_addr_copy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
);
2752 timer_setup(&adapter
->watchdog_timer
, i40evf_watchdog_timer
, 0);
2753 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
2755 adapter
->tx_desc_count
= I40EVF_DEFAULT_TXD
;
2756 adapter
->rx_desc_count
= I40EVF_DEFAULT_RXD
;
2757 err
= i40evf_init_interrupt_scheme(adapter
);
2760 i40evf_map_rings_to_vectors(adapter
);
2761 if (adapter
->vf_res
->vf_cap_flags
&
2762 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
2763 adapter
->flags
|= I40EVF_FLAG_WB_ON_ITR_CAPABLE
;
2765 err
= i40evf_request_misc_irq(adapter
);
2769 netif_carrier_off(netdev
);
2770 adapter
->link_up
= false;
2772 if (!adapter
->netdev_registered
) {
2773 err
= register_netdev(netdev
);
2778 adapter
->netdev_registered
= true;
2780 netif_tx_stop_all_queues(netdev
);
2781 if (CLIENT_ALLOWED(adapter
)) {
2782 err
= i40evf_lan_add_device(adapter
);
2784 dev_info(&pdev
->dev
, "Failed to add VF to client API service list: %d\n",
2788 dev_info(&pdev
->dev
, "MAC address: %pM\n", adapter
->hw
.mac
.addr
);
2789 if (netdev
->features
& NETIF_F_GRO
)
2790 dev_info(&pdev
->dev
, "GRO is enabled\n");
2792 adapter
->state
= __I40EVF_DOWN
;
2793 set_bit(__I40E_VSI_DOWN
, adapter
->vsi
.state
);
2794 i40evf_misc_irq_enable(adapter
);
2795 wake_up(&adapter
->down_waitqueue
);
2797 adapter
->rss_key
= kzalloc(adapter
->rss_key_size
, GFP_KERNEL
);
2798 adapter
->rss_lut
= kzalloc(adapter
->rss_lut_size
, GFP_KERNEL
);
2799 if (!adapter
->rss_key
|| !adapter
->rss_lut
)
2802 if (RSS_AQ(adapter
)) {
2803 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_RSS
;
2804 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
2806 i40evf_init_rss(adapter
);
2810 schedule_delayed_work(&adapter
->init_task
, msecs_to_jiffies(30));
2813 i40evf_free_rss(adapter
);
2815 i40evf_free_misc_irq(adapter
);
2817 i40evf_reset_interrupt_capability(adapter
);
2819 kfree(adapter
->vf_res
);
2820 adapter
->vf_res
= NULL
;
2822 /* Things went into the weeds, so try again later */
2823 if (++adapter
->aq_wait_count
> I40EVF_AQ_MAX_ERR
) {
2824 dev_err(&pdev
->dev
, "Failed to communicate with PF; waiting before retry\n");
2825 adapter
->flags
|= I40EVF_FLAG_PF_COMMS_FAILED
;
2826 i40evf_shutdown_adminq(hw
);
2827 adapter
->state
= __I40EVF_STARTUP
;
2828 schedule_delayed_work(&adapter
->init_task
, HZ
* 5);
2831 schedule_delayed_work(&adapter
->init_task
, HZ
);
2835 * i40evf_shutdown - Shutdown the device in preparation for a reboot
2836 * @pdev: pci device structure
2838 static void i40evf_shutdown(struct pci_dev
*pdev
)
2840 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2841 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2843 netif_device_detach(netdev
);
2845 if (netif_running(netdev
))
2846 i40evf_close(netdev
);
2848 /* Prevent the watchdog from running. */
2849 adapter
->state
= __I40EVF_REMOVE
;
2850 adapter
->aq_required
= 0;
2853 pci_save_state(pdev
);
2856 pci_disable_device(pdev
);
2860 * i40evf_probe - Device Initialization Routine
2861 * @pdev: PCI device information struct
2862 * @ent: entry in i40evf_pci_tbl
2864 * Returns 0 on success, negative on failure
2866 * i40evf_probe initializes an adapter identified by a pci_dev structure.
2867 * The OS initialization, configuring of the adapter private structure,
2868 * and a hardware reset occur.
2870 static int i40evf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2872 struct net_device
*netdev
;
2873 struct i40evf_adapter
*adapter
= NULL
;
2874 struct i40e_hw
*hw
= NULL
;
2877 err
= pci_enable_device(pdev
);
2881 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2883 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2886 "DMA configuration failed: 0x%x\n", err
);
2891 err
= pci_request_regions(pdev
, i40evf_driver_name
);
2894 "pci_request_regions failed 0x%x\n", err
);
2898 pci_enable_pcie_error_reporting(pdev
);
2900 pci_set_master(pdev
);
2902 netdev
= alloc_etherdev_mq(sizeof(struct i40evf_adapter
), MAX_QUEUES
);
2905 goto err_alloc_etherdev
;
2908 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2910 pci_set_drvdata(pdev
, netdev
);
2911 adapter
= netdev_priv(netdev
);
2913 adapter
->netdev
= netdev
;
2914 adapter
->pdev
= pdev
;
2919 adapter
->msg_enable
= BIT(DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
2920 adapter
->state
= __I40EVF_STARTUP
;
2922 /* Call save state here because it relies on the adapter struct. */
2923 pci_save_state(pdev
);
2925 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2926 pci_resource_len(pdev
, 0));
2931 hw
->vendor_id
= pdev
->vendor
;
2932 hw
->device_id
= pdev
->device
;
2933 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2934 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2935 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2936 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
2937 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
2938 hw
->bus
.bus_id
= pdev
->bus
->number
;
2940 /* set up the locks for the AQ, do this only once in probe
2941 * and destroy them only once in remove
2943 mutex_init(&hw
->aq
.asq_mutex
);
2944 mutex_init(&hw
->aq
.arq_mutex
);
2946 INIT_LIST_HEAD(&adapter
->mac_filter_list
);
2947 INIT_LIST_HEAD(&adapter
->vlan_filter_list
);
2949 INIT_WORK(&adapter
->reset_task
, i40evf_reset_task
);
2950 INIT_WORK(&adapter
->adminq_task
, i40evf_adminq_task
);
2951 INIT_WORK(&adapter
->watchdog_task
, i40evf_watchdog_task
);
2952 INIT_DELAYED_WORK(&adapter
->client_task
, i40evf_client_task
);
2953 INIT_DELAYED_WORK(&adapter
->init_task
, i40evf_init_task
);
2954 schedule_delayed_work(&adapter
->init_task
,
2955 msecs_to_jiffies(5 * (pdev
->devfn
& 0x07)));
2957 /* Setup the wait queue for indicating transition to down status */
2958 init_waitqueue_head(&adapter
->down_waitqueue
);
2963 free_netdev(netdev
);
2965 pci_release_regions(pdev
);
2968 pci_disable_device(pdev
);
2974 * i40evf_suspend - Power management suspend routine
2975 * @pdev: PCI device information struct
2978 * Called when the system (VM) is entering sleep/suspend.
2980 static int i40evf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2982 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2983 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2986 netif_device_detach(netdev
);
2988 if (netif_running(netdev
)) {
2990 i40evf_down(adapter
);
2993 i40evf_free_misc_irq(adapter
);
2994 i40evf_reset_interrupt_capability(adapter
);
2996 retval
= pci_save_state(pdev
);
3000 pci_disable_device(pdev
);
3006 * i40evf_resume - Power management resume routine
3007 * @pdev: PCI device information struct
3009 * Called when the system (VM) is resumed from sleep/suspend.
3011 static int i40evf_resume(struct pci_dev
*pdev
)
3013 struct i40evf_adapter
*adapter
= pci_get_drvdata(pdev
);
3014 struct net_device
*netdev
= adapter
->netdev
;
3017 pci_set_power_state(pdev
, PCI_D0
);
3018 pci_restore_state(pdev
);
3019 /* pci_restore_state clears dev->state_saved so call
3020 * pci_save_state to restore it.
3022 pci_save_state(pdev
);
3024 err
= pci_enable_device_mem(pdev
);
3026 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend.\n");
3029 pci_set_master(pdev
);
3032 err
= i40evf_set_interrupt_capability(adapter
);
3035 dev_err(&pdev
->dev
, "Cannot enable MSI-X interrupts.\n");
3038 err
= i40evf_request_misc_irq(adapter
);
3041 dev_err(&pdev
->dev
, "Cannot get interrupt vector.\n");
3045 schedule_work(&adapter
->reset_task
);
3047 netif_device_attach(netdev
);
3052 #endif /* CONFIG_PM */
3054 * i40evf_remove - Device Removal Routine
3055 * @pdev: PCI device information struct
3057 * i40evf_remove is called by the PCI subsystem to alert the driver
3058 * that it should release a PCI device. The could be caused by a
3059 * Hot-Plug event, or because the driver is going to be removed from
3062 static void i40evf_remove(struct pci_dev
*pdev
)
3064 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3065 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
3066 struct i40evf_mac_filter
*f
, *ftmp
;
3067 struct i40e_hw
*hw
= &adapter
->hw
;
3070 cancel_delayed_work_sync(&adapter
->init_task
);
3071 cancel_work_sync(&adapter
->reset_task
);
3072 cancel_delayed_work_sync(&adapter
->client_task
);
3073 if (adapter
->netdev_registered
) {
3074 unregister_netdev(netdev
);
3075 adapter
->netdev_registered
= false;
3077 if (CLIENT_ALLOWED(adapter
)) {
3078 err
= i40evf_lan_del_device(adapter
);
3080 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
3084 /* Shut down all the garbage mashers on the detention level */
3085 adapter
->state
= __I40EVF_REMOVE
;
3086 adapter
->aq_required
= 0;
3087 i40evf_request_reset(adapter
);
3089 /* If the FW isn't responding, kick it once, but only once. */
3090 if (!i40evf_asq_done(hw
)) {
3091 i40evf_request_reset(adapter
);
3094 i40evf_free_all_tx_resources(adapter
);
3095 i40evf_free_all_rx_resources(adapter
);
3096 i40evf_misc_irq_disable(adapter
);
3097 i40evf_free_misc_irq(adapter
);
3098 i40evf_reset_interrupt_capability(adapter
);
3099 i40evf_free_q_vectors(adapter
);
3101 if (adapter
->watchdog_timer
.function
)
3102 del_timer_sync(&adapter
->watchdog_timer
);
3104 flush_scheduled_work();
3106 i40evf_free_rss(adapter
);
3108 if (hw
->aq
.asq
.count
)
3109 i40evf_shutdown_adminq(hw
);
3111 /* destroy the locks only once, here */
3112 mutex_destroy(&hw
->aq
.arq_mutex
);
3113 mutex_destroy(&hw
->aq
.asq_mutex
);
3115 iounmap(hw
->hw_addr
);
3116 pci_release_regions(pdev
);
3117 i40evf_free_all_tx_resources(adapter
);
3118 i40evf_free_all_rx_resources(adapter
);
3119 i40evf_free_queues(adapter
);
3120 kfree(adapter
->vf_res
);
3121 /* If we got removed before an up/down sequence, we've got a filter
3122 * hanging out there that we need to get rid of.
3124 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
3128 list_for_each_entry_safe(f
, ftmp
, &adapter
->vlan_filter_list
, list
) {
3133 free_netdev(netdev
);
3135 pci_disable_pcie_error_reporting(pdev
);
3137 pci_disable_device(pdev
);
3140 static struct pci_driver i40evf_driver
= {
3141 .name
= i40evf_driver_name
,
3142 .id_table
= i40evf_pci_tbl
,
3143 .probe
= i40evf_probe
,
3144 .remove
= i40evf_remove
,
3146 .suspend
= i40evf_suspend
,
3147 .resume
= i40evf_resume
,
3149 .shutdown
= i40evf_shutdown
,
3153 * i40e_init_module - Driver Registration Routine
3155 * i40e_init_module is the first routine called when the driver is
3156 * loaded. All it does is register with the PCI subsystem.
3158 static int __init
i40evf_init_module(void)
3162 pr_info("i40evf: %s - version %s\n", i40evf_driver_string
,
3163 i40evf_driver_version
);
3165 pr_info("%s\n", i40evf_copyright
);
3167 i40evf_wq
= alloc_workqueue("%s", WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1,
3168 i40evf_driver_name
);
3170 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name
);
3173 ret
= pci_register_driver(&i40evf_driver
);
3177 module_init(i40evf_init_module
);
3180 * i40e_exit_module - Driver Exit Cleanup Routine
3182 * i40e_exit_module is called just before the driver is removed
3185 static void __exit
i40evf_exit_module(void)
3187 pci_unregister_driver(&i40evf_driver
);
3188 destroy_workqueue(i40evf_wq
);
3191 module_exit(i40evf_exit_module
);