]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/intel/i40evf/i40evf_main.c
i40evf: fix client notify of l2 params
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / intel / i40evf / i40evf_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include "i40evf.h"
28 #include "i40e_prototype.h"
29 #include "i40evf_client.h"
30 /* All i40evf tracepoints are defined by the include below, which must
31 * be included exactly once across the whole kernel with
32 * CREATE_TRACE_POINTS defined
33 */
34 #define CREATE_TRACE_POINTS
35 #include "i40e_trace.h"
36
37 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
38 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
39 static int i40evf_close(struct net_device *netdev);
40
41 char i40evf_driver_name[] = "i40evf";
42 static const char i40evf_driver_string[] =
43 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
44
45 #define DRV_KERN "-k"
46
47 #define DRV_VERSION_MAJOR 3
48 #define DRV_VERSION_MINOR 0
49 #define DRV_VERSION_BUILD 1
50 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
51 __stringify(DRV_VERSION_MINOR) "." \
52 __stringify(DRV_VERSION_BUILD) \
53 DRV_KERN
54 const char i40evf_driver_version[] = DRV_VERSION;
55 static const char i40evf_copyright[] =
56 "Copyright (c) 2013 - 2015 Intel Corporation.";
57
58 /* i40evf_pci_tbl - PCI Device ID Table
59 *
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
62 *
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
65 */
66 static const struct pci_device_id i40evf_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
71 /* required last entry */
72 {0, }
73 };
74
75 MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
76
77 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
78 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_VERSION);
81
82 static struct workqueue_struct *i40evf_wq;
83
84 /**
85 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
86 * @hw: pointer to the HW structure
87 * @mem: ptr to mem struct to fill out
88 * @size: size of memory requested
89 * @alignment: what to align the allocation to
90 **/
91 i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
92 struct i40e_dma_mem *mem,
93 u64 size, u32 alignment)
94 {
95 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
96
97 if (!mem)
98 return I40E_ERR_PARAM;
99
100 mem->size = ALIGN(size, alignment);
101 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
102 (dma_addr_t *)&mem->pa, GFP_KERNEL);
103 if (mem->va)
104 return 0;
105 else
106 return I40E_ERR_NO_MEMORY;
107 }
108
109 /**
110 * i40evf_free_dma_mem_d - OS specific memory free for shared code
111 * @hw: pointer to the HW structure
112 * @mem: ptr to mem struct to free
113 **/
114 i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
115 {
116 struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
117
118 if (!mem || !mem->va)
119 return I40E_ERR_PARAM;
120 dma_free_coherent(&adapter->pdev->dev, mem->size,
121 mem->va, (dma_addr_t)mem->pa);
122 return 0;
123 }
124
125 /**
126 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
127 * @hw: pointer to the HW structure
128 * @mem: ptr to mem struct to fill out
129 * @size: size of memory requested
130 **/
131 i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
132 struct i40e_virt_mem *mem, u32 size)
133 {
134 if (!mem)
135 return I40E_ERR_PARAM;
136
137 mem->size = size;
138 mem->va = kzalloc(size, GFP_KERNEL);
139
140 if (mem->va)
141 return 0;
142 else
143 return I40E_ERR_NO_MEMORY;
144 }
145
146 /**
147 * i40evf_free_virt_mem_d - OS specific memory free for shared code
148 * @hw: pointer to the HW structure
149 * @mem: ptr to mem struct to free
150 **/
151 i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
152 struct i40e_virt_mem *mem)
153 {
154 if (!mem)
155 return I40E_ERR_PARAM;
156
157 /* it's ok to kfree a NULL pointer */
158 kfree(mem->va);
159
160 return 0;
161 }
162
163 /**
164 * i40evf_debug_d - OS dependent version of debug printing
165 * @hw: pointer to the HW structure
166 * @mask: debug level mask
167 * @fmt_str: printf-type format description
168 **/
169 void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
170 {
171 char buf[512];
172 va_list argptr;
173
174 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
175 return;
176
177 va_start(argptr, fmt_str);
178 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
179 va_end(argptr);
180
181 /* the debug string is already formatted with a newline */
182 pr_info("%s", buf);
183 }
184
185 /**
186 * i40evf_schedule_reset - Set the flags and schedule a reset event
187 * @adapter: board private structure
188 **/
189 void i40evf_schedule_reset(struct i40evf_adapter *adapter)
190 {
191 if (!(adapter->flags &
192 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
193 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
194 schedule_work(&adapter->reset_task);
195 }
196 }
197
198 /**
199 * i40evf_tx_timeout - Respond to a Tx Hang
200 * @netdev: network interface device structure
201 **/
202 static void i40evf_tx_timeout(struct net_device *netdev)
203 {
204 struct i40evf_adapter *adapter = netdev_priv(netdev);
205
206 adapter->tx_timeout_count++;
207 i40evf_schedule_reset(adapter);
208 }
209
210 /**
211 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
212 * @adapter: board private structure
213 **/
214 static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
215 {
216 struct i40e_hw *hw = &adapter->hw;
217
218 if (!adapter->msix_entries)
219 return;
220
221 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
222
223 /* read flush */
224 rd32(hw, I40E_VFGEN_RSTAT);
225
226 synchronize_irq(adapter->msix_entries[0].vector);
227 }
228
229 /**
230 * i40evf_misc_irq_enable - Enable default interrupt generation settings
231 * @adapter: board private structure
232 **/
233 static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
234 {
235 struct i40e_hw *hw = &adapter->hw;
236
237 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
238 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
239 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
240
241 /* read flush */
242 rd32(hw, I40E_VFGEN_RSTAT);
243 }
244
245 /**
246 * i40evf_irq_disable - Mask off interrupt generation on the NIC
247 * @adapter: board private structure
248 **/
249 static void i40evf_irq_disable(struct i40evf_adapter *adapter)
250 {
251 int i;
252 struct i40e_hw *hw = &adapter->hw;
253
254 if (!adapter->msix_entries)
255 return;
256
257 for (i = 1; i < adapter->num_msix_vectors; i++) {
258 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
259 synchronize_irq(adapter->msix_entries[i].vector);
260 }
261 /* read flush */
262 rd32(hw, I40E_VFGEN_RSTAT);
263 }
264
265 /**
266 * i40evf_irq_enable_queues - Enable interrupt for specified queues
267 * @adapter: board private structure
268 * @mask: bitmap of queues to enable
269 **/
270 void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
271 {
272 struct i40e_hw *hw = &adapter->hw;
273 int i;
274
275 for (i = 1; i < adapter->num_msix_vectors; i++) {
276 if (mask & BIT(i - 1)) {
277 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
278 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
279 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
280 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
281 }
282 }
283 }
284
285 /**
286 * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
287 * @adapter: board private structure
288 * @mask: bitmap of vectors to trigger
289 **/
290 static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
291 {
292 struct i40e_hw *hw = &adapter->hw;
293 int i;
294 u32 dyn_ctl;
295
296 if (mask & 1) {
297 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
298 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
299 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
300 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
301 wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
302 }
303 for (i = 1; i < adapter->num_msix_vectors; i++) {
304 if (mask & BIT(i)) {
305 dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
306 dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
307 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
308 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
309 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
310 }
311 }
312 }
313
314 /**
315 * i40evf_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 * @flush: boolean value whether to run rd32()
318 **/
319 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
320 {
321 struct i40e_hw *hw = &adapter->hw;
322
323 i40evf_misc_irq_enable(adapter);
324 i40evf_irq_enable_queues(adapter, ~0);
325
326 if (flush)
327 rd32(hw, I40E_VFGEN_RSTAT);
328 }
329
330 /**
331 * i40evf_msix_aq - Interrupt handler for vector 0
332 * @irq: interrupt number
333 * @data: pointer to netdev
334 **/
335 static irqreturn_t i40evf_msix_aq(int irq, void *data)
336 {
337 struct net_device *netdev = data;
338 struct i40evf_adapter *adapter = netdev_priv(netdev);
339 struct i40e_hw *hw = &adapter->hw;
340 u32 val;
341
342 /* handle non-queue interrupts, these reads clear the registers */
343 val = rd32(hw, I40E_VFINT_ICR01);
344 val = rd32(hw, I40E_VFINT_ICR0_ENA1);
345
346 val = rd32(hw, I40E_VFINT_DYN_CTL01) |
347 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
348 wr32(hw, I40E_VFINT_DYN_CTL01, val);
349
350 /* schedule work on the private workqueue */
351 schedule_work(&adapter->adminq_task);
352
353 return IRQ_HANDLED;
354 }
355
356 /**
357 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
358 * @irq: interrupt number
359 * @data: pointer to a q_vector
360 **/
361 static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
362 {
363 struct i40e_q_vector *q_vector = data;
364
365 if (!q_vector->tx.ring && !q_vector->rx.ring)
366 return IRQ_HANDLED;
367
368 napi_schedule_irqoff(&q_vector->napi);
369
370 return IRQ_HANDLED;
371 }
372
373 /**
374 * i40evf_map_vector_to_rxq - associate irqs with rx queues
375 * @adapter: board private structure
376 * @v_idx: interrupt number
377 * @r_idx: queue number
378 **/
379 static void
380 i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
381 {
382 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
383 struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
384 struct i40e_hw *hw = &adapter->hw;
385
386 rx_ring->q_vector = q_vector;
387 rx_ring->next = q_vector->rx.ring;
388 rx_ring->vsi = &adapter->vsi;
389 q_vector->rx.ring = rx_ring;
390 q_vector->rx.count++;
391 q_vector->rx.latency_range = I40E_LOW_LATENCY;
392 q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
393 q_vector->ring_mask |= BIT(r_idx);
394 q_vector->itr_countdown = ITR_COUNTDOWN_START;
395 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
396 }
397
398 /**
399 * i40evf_map_vector_to_txq - associate irqs with tx queues
400 * @adapter: board private structure
401 * @v_idx: interrupt number
402 * @t_idx: queue number
403 **/
404 static void
405 i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
406 {
407 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
408 struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
409 struct i40e_hw *hw = &adapter->hw;
410
411 tx_ring->q_vector = q_vector;
412 tx_ring->next = q_vector->tx.ring;
413 tx_ring->vsi = &adapter->vsi;
414 q_vector->tx.ring = tx_ring;
415 q_vector->tx.count++;
416 q_vector->tx.latency_range = I40E_LOW_LATENCY;
417 q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
418 q_vector->itr_countdown = ITR_COUNTDOWN_START;
419 q_vector->num_ringpairs++;
420 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
421 }
422
423 /**
424 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
425 * @adapter: board private structure to initialize
426 *
427 * This function maps descriptor rings to the queue-specific vectors
428 * we were allotted through the MSI-X enabling code. Ideally, we'd have
429 * one vector per ring/queue, but on a constrained vector budget, we
430 * group the rings as "efficiently" as possible. You would add new
431 * mapping configurations in here.
432 **/
433 static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
434 {
435 int rings_remaining = adapter->num_active_queues;
436 int ridx = 0, vidx = 0;
437 int q_vectors;
438
439 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
440
441 for (; ridx < rings_remaining; ridx++) {
442 i40evf_map_vector_to_rxq(adapter, vidx, ridx);
443 i40evf_map_vector_to_txq(adapter, vidx, ridx);
444
445 /* In the case where we have more queues than vectors, continue
446 * round-robin on vectors until all queues are mapped.
447 */
448 if (++vidx >= q_vectors)
449 vidx = 0;
450 }
451
452 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
453 }
454
455 #ifdef CONFIG_NET_POLL_CONTROLLER
456 /**
457 * i40evf_netpoll - A Polling 'interrupt' handler
458 * @netdev: network interface device structure
459 *
460 * This is used by netconsole to send skbs without having to re-enable
461 * interrupts. It's not called while the normal interrupt routine is executing.
462 **/
463 static void i40evf_netpoll(struct net_device *netdev)
464 {
465 struct i40evf_adapter *adapter = netdev_priv(netdev);
466 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
467 int i;
468
469 /* if interface is down do nothing */
470 if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
471 return;
472
473 for (i = 0; i < q_vectors; i++)
474 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
475 }
476
477 #endif
478 /**
479 * i40evf_irq_affinity_notify - Callback for affinity changes
480 * @notify: context as to what irq was changed
481 * @mask: the new affinity mask
482 *
483 * This is a callback function used by the irq_set_affinity_notifier function
484 * so that we may register to receive changes to the irq affinity masks.
485 **/
486 static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
487 const cpumask_t *mask)
488 {
489 struct i40e_q_vector *q_vector =
490 container_of(notify, struct i40e_q_vector, affinity_notify);
491
492 cpumask_copy(&q_vector->affinity_mask, mask);
493 }
494
495 /**
496 * i40evf_irq_affinity_release - Callback for affinity notifier release
497 * @ref: internal core kernel usage
498 *
499 * This is a callback function used by the irq_set_affinity_notifier function
500 * to inform the current notification subscriber that they will no longer
501 * receive notifications.
502 **/
503 static void i40evf_irq_affinity_release(struct kref *ref) {}
504
505 /**
506 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
507 * @adapter: board private structure
508 *
509 * Allocates MSI-X vectors for tx and rx handling, and requests
510 * interrupts from the kernel.
511 **/
512 static int
513 i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
514 {
515 unsigned int vector, q_vectors;
516 unsigned int rx_int_idx = 0, tx_int_idx = 0;
517 int irq_num, err;
518 int cpu;
519
520 i40evf_irq_disable(adapter);
521 /* Decrement for Other and TCP Timer vectors */
522 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
523
524 for (vector = 0; vector < q_vectors; vector++) {
525 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
526 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
527
528 if (q_vector->tx.ring && q_vector->rx.ring) {
529 snprintf(q_vector->name, sizeof(q_vector->name),
530 "i40evf-%s-TxRx-%d", basename, rx_int_idx++);
531 tx_int_idx++;
532 } else if (q_vector->rx.ring) {
533 snprintf(q_vector->name, sizeof(q_vector->name),
534 "i40evf-%s-rx-%d", basename, rx_int_idx++);
535 } else if (q_vector->tx.ring) {
536 snprintf(q_vector->name, sizeof(q_vector->name),
537 "i40evf-%s-tx-%d", basename, tx_int_idx++);
538 } else {
539 /* skip this unused q_vector */
540 continue;
541 }
542 err = request_irq(irq_num,
543 i40evf_msix_clean_rings,
544 0,
545 q_vector->name,
546 q_vector);
547 if (err) {
548 dev_info(&adapter->pdev->dev,
549 "Request_irq failed, error: %d\n", err);
550 goto free_queue_irqs;
551 }
552 /* register for affinity change notifications */
553 q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
554 q_vector->affinity_notify.release =
555 i40evf_irq_affinity_release;
556 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
557 /* Spread the IRQ affinity hints across online CPUs. Note that
558 * get_cpu_mask returns a mask with a permanent lifetime so
559 * it's safe to use as a hint for irq_set_affinity_hint.
560 */
561 cpu = cpumask_local_spread(q_vector->v_idx, -1);
562 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
563 }
564
565 return 0;
566
567 free_queue_irqs:
568 while (vector) {
569 vector--;
570 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
571 irq_set_affinity_notifier(irq_num, NULL);
572 irq_set_affinity_hint(irq_num, NULL);
573 free_irq(irq_num, &adapter->q_vectors[vector]);
574 }
575 return err;
576 }
577
578 /**
579 * i40evf_request_misc_irq - Initialize MSI-X interrupts
580 * @adapter: board private structure
581 *
582 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
583 * vector is only for the admin queue, and stays active even when the netdev
584 * is closed.
585 **/
586 static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
587 {
588 struct net_device *netdev = adapter->netdev;
589 int err;
590
591 snprintf(adapter->misc_vector_name,
592 sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx",
593 dev_name(&adapter->pdev->dev));
594 err = request_irq(adapter->msix_entries[0].vector,
595 &i40evf_msix_aq, 0,
596 adapter->misc_vector_name, netdev);
597 if (err) {
598 dev_err(&adapter->pdev->dev,
599 "request_irq for %s failed: %d\n",
600 adapter->misc_vector_name, err);
601 free_irq(adapter->msix_entries[0].vector, netdev);
602 }
603 return err;
604 }
605
606 /**
607 * i40evf_free_traffic_irqs - Free MSI-X interrupts
608 * @adapter: board private structure
609 *
610 * Frees all MSI-X vectors other than 0.
611 **/
612 static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
613 {
614 int vector, irq_num, q_vectors;
615
616 if (!adapter->msix_entries)
617 return;
618
619 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
620
621 for (vector = 0; vector < q_vectors; vector++) {
622 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
623 irq_set_affinity_notifier(irq_num, NULL);
624 irq_set_affinity_hint(irq_num, NULL);
625 free_irq(irq_num, &adapter->q_vectors[vector]);
626 }
627 }
628
629 /**
630 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
631 * @adapter: board private structure
632 *
633 * Frees MSI-X vector 0.
634 **/
635 static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
636 {
637 struct net_device *netdev = adapter->netdev;
638
639 if (!adapter->msix_entries)
640 return;
641
642 free_irq(adapter->msix_entries[0].vector, netdev);
643 }
644
645 /**
646 * i40evf_configure_tx - Configure Transmit Unit after Reset
647 * @adapter: board private structure
648 *
649 * Configure the Tx unit of the MAC after a reset.
650 **/
651 static void i40evf_configure_tx(struct i40evf_adapter *adapter)
652 {
653 struct i40e_hw *hw = &adapter->hw;
654 int i;
655
656 for (i = 0; i < adapter->num_active_queues; i++)
657 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
658 }
659
660 /**
661 * i40evf_configure_rx - Configure Receive Unit after Reset
662 * @adapter: board private structure
663 *
664 * Configure the Rx unit of the MAC after a reset.
665 **/
666 static void i40evf_configure_rx(struct i40evf_adapter *adapter)
667 {
668 unsigned int rx_buf_len = I40E_RXBUFFER_2048;
669 struct i40e_hw *hw = &adapter->hw;
670 int i;
671
672 /* Legacy Rx will always default to a 2048 buffer size. */
673 #if (PAGE_SIZE < 8192)
674 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
675 struct net_device *netdev = adapter->netdev;
676
677 /* For jumbo frames on systems with 4K pages we have to use
678 * an order 1 page, so we might as well increase the size
679 * of our Rx buffer to make better use of the available space
680 */
681 rx_buf_len = I40E_RXBUFFER_3072;
682
683 /* We use a 1536 buffer size for configurations with
684 * standard Ethernet mtu. On x86 this gives us enough room
685 * for shared info and 192 bytes of padding.
686 */
687 if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
688 (netdev->mtu <= ETH_DATA_LEN))
689 rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
690 }
691 #endif
692
693 for (i = 0; i < adapter->num_active_queues; i++) {
694 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
695 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
696
697 if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
698 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
699 else
700 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
701 }
702 }
703
704 /**
705 * i40evf_find_vlan - Search filter list for specific vlan filter
706 * @adapter: board private structure
707 * @vlan: vlan tag
708 *
709 * Returns ptr to the filter object or NULL
710 **/
711 static struct
712 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
713 {
714 struct i40evf_vlan_filter *f;
715
716 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
717 if (vlan == f->vlan)
718 return f;
719 }
720 return NULL;
721 }
722
723 /**
724 * i40evf_add_vlan - Add a vlan filter to the list
725 * @adapter: board private structure
726 * @vlan: VLAN tag
727 *
728 * Returns ptr to the filter object or NULL when no memory available.
729 **/
730 static struct
731 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
732 {
733 struct i40evf_vlan_filter *f = NULL;
734 int count = 50;
735
736 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
737 &adapter->crit_section)) {
738 udelay(1);
739 if (--count == 0)
740 goto out;
741 }
742
743 f = i40evf_find_vlan(adapter, vlan);
744 if (!f) {
745 f = kzalloc(sizeof(*f), GFP_ATOMIC);
746 if (!f)
747 goto clearout;
748
749 f->vlan = vlan;
750
751 INIT_LIST_HEAD(&f->list);
752 list_add(&f->list, &adapter->vlan_filter_list);
753 f->add = true;
754 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
755 }
756
757 clearout:
758 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
759 out:
760 return f;
761 }
762
763 /**
764 * i40evf_del_vlan - Remove a vlan filter from the list
765 * @adapter: board private structure
766 * @vlan: VLAN tag
767 **/
768 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
769 {
770 struct i40evf_vlan_filter *f;
771 int count = 50;
772
773 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
774 &adapter->crit_section)) {
775 udelay(1);
776 if (--count == 0)
777 return;
778 }
779
780 f = i40evf_find_vlan(adapter, vlan);
781 if (f) {
782 f->remove = true;
783 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
784 }
785 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
786 }
787
788 /**
789 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
790 * @netdev: network device struct
791 * @vid: VLAN tag
792 **/
793 static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
794 __always_unused __be16 proto, u16 vid)
795 {
796 struct i40evf_adapter *adapter = netdev_priv(netdev);
797
798 if (!VLAN_ALLOWED(adapter))
799 return -EIO;
800 if (i40evf_add_vlan(adapter, vid) == NULL)
801 return -ENOMEM;
802 return 0;
803 }
804
805 /**
806 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
807 * @netdev: network device struct
808 * @vid: VLAN tag
809 **/
810 static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
811 __always_unused __be16 proto, u16 vid)
812 {
813 struct i40evf_adapter *adapter = netdev_priv(netdev);
814
815 if (VLAN_ALLOWED(adapter)) {
816 i40evf_del_vlan(adapter, vid);
817 return 0;
818 }
819 return -EIO;
820 }
821
822 /**
823 * i40evf_find_filter - Search filter list for specific mac filter
824 * @adapter: board private structure
825 * @macaddr: the MAC address
826 *
827 * Returns ptr to the filter object or NULL
828 **/
829 static struct
830 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
831 u8 *macaddr)
832 {
833 struct i40evf_mac_filter *f;
834
835 if (!macaddr)
836 return NULL;
837
838 list_for_each_entry(f, &adapter->mac_filter_list, list) {
839 if (ether_addr_equal(macaddr, f->macaddr))
840 return f;
841 }
842 return NULL;
843 }
844
845 /**
846 * i40e_add_filter - Add a mac filter to the filter list
847 * @adapter: board private structure
848 * @macaddr: the MAC address
849 *
850 * Returns ptr to the filter object or NULL when no memory available.
851 **/
852 static struct
853 i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
854 u8 *macaddr)
855 {
856 struct i40evf_mac_filter *f;
857 int count = 50;
858
859 if (!macaddr)
860 return NULL;
861
862 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
863 &adapter->crit_section)) {
864 udelay(1);
865 if (--count == 0)
866 return NULL;
867 }
868
869 f = i40evf_find_filter(adapter, macaddr);
870 if (!f) {
871 f = kzalloc(sizeof(*f), GFP_ATOMIC);
872 if (!f) {
873 clear_bit(__I40EVF_IN_CRITICAL_TASK,
874 &adapter->crit_section);
875 return NULL;
876 }
877
878 ether_addr_copy(f->macaddr, macaddr);
879
880 list_add_tail(&f->list, &adapter->mac_filter_list);
881 f->add = true;
882 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
883 } else {
884 f->remove = false;
885 }
886
887 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
888 return f;
889 }
890
891 /**
892 * i40evf_set_mac - NDO callback to set port mac address
893 * @netdev: network interface device structure
894 * @p: pointer to an address structure
895 *
896 * Returns 0 on success, negative on failure
897 **/
898 static int i40evf_set_mac(struct net_device *netdev, void *p)
899 {
900 struct i40evf_adapter *adapter = netdev_priv(netdev);
901 struct i40e_hw *hw = &adapter->hw;
902 struct i40evf_mac_filter *f;
903 struct sockaddr *addr = p;
904
905 if (!is_valid_ether_addr(addr->sa_data))
906 return -EADDRNOTAVAIL;
907
908 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
909 return 0;
910
911 if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
912 return -EPERM;
913
914 f = i40evf_find_filter(adapter, hw->mac.addr);
915 if (f) {
916 f->remove = true;
917 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
918 }
919
920 f = i40evf_add_filter(adapter, addr->sa_data);
921 if (f) {
922 ether_addr_copy(hw->mac.addr, addr->sa_data);
923 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
924 }
925
926 return (f == NULL) ? -ENOMEM : 0;
927 }
928
929 /**
930 * i40evf_set_rx_mode - NDO callback to set the netdev filters
931 * @netdev: network interface device structure
932 **/
933 static void i40evf_set_rx_mode(struct net_device *netdev)
934 {
935 struct i40evf_adapter *adapter = netdev_priv(netdev);
936 struct i40evf_mac_filter *f, *ftmp;
937 struct netdev_hw_addr *uca;
938 struct netdev_hw_addr *mca;
939 struct netdev_hw_addr *ha;
940 int count = 50;
941
942 /* add addr if not already in the filter list */
943 netdev_for_each_uc_addr(uca, netdev) {
944 i40evf_add_filter(adapter, uca->addr);
945 }
946 netdev_for_each_mc_addr(mca, netdev) {
947 i40evf_add_filter(adapter, mca->addr);
948 }
949
950 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
951 &adapter->crit_section)) {
952 udelay(1);
953 if (--count == 0) {
954 dev_err(&adapter->pdev->dev,
955 "Failed to get lock in %s\n", __func__);
956 return;
957 }
958 }
959 /* remove filter if not in netdev list */
960 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
961 netdev_for_each_mc_addr(mca, netdev)
962 if (ether_addr_equal(mca->addr, f->macaddr))
963 goto bottom_of_search_loop;
964
965 netdev_for_each_uc_addr(uca, netdev)
966 if (ether_addr_equal(uca->addr, f->macaddr))
967 goto bottom_of_search_loop;
968
969 for_each_dev_addr(netdev, ha)
970 if (ether_addr_equal(ha->addr, f->macaddr))
971 goto bottom_of_search_loop;
972
973 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
974 goto bottom_of_search_loop;
975
976 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
977 f->remove = true;
978 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
979
980 bottom_of_search_loop:
981 continue;
982 }
983
984 if (netdev->flags & IFF_PROMISC &&
985 !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
986 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
987 else if (!(netdev->flags & IFF_PROMISC) &&
988 adapter->flags & I40EVF_FLAG_PROMISC_ON)
989 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
990
991 if (netdev->flags & IFF_ALLMULTI &&
992 !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
993 adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
994 else if (!(netdev->flags & IFF_ALLMULTI) &&
995 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
996 adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
997
998 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
999 }
1000
1001 /**
1002 * i40evf_napi_enable_all - enable NAPI on all queue vectors
1003 * @adapter: board private structure
1004 **/
1005 static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
1006 {
1007 int q_idx;
1008 struct i40e_q_vector *q_vector;
1009 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1010
1011 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1012 struct napi_struct *napi;
1013
1014 q_vector = &adapter->q_vectors[q_idx];
1015 napi = &q_vector->napi;
1016 napi_enable(napi);
1017 }
1018 }
1019
1020 /**
1021 * i40evf_napi_disable_all - disable NAPI on all queue vectors
1022 * @adapter: board private structure
1023 **/
1024 static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
1025 {
1026 int q_idx;
1027 struct i40e_q_vector *q_vector;
1028 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1029
1030 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1031 q_vector = &adapter->q_vectors[q_idx];
1032 napi_disable(&q_vector->napi);
1033 }
1034 }
1035
1036 /**
1037 * i40evf_configure - set up transmit and receive data structures
1038 * @adapter: board private structure
1039 **/
1040 static void i40evf_configure(struct i40evf_adapter *adapter)
1041 {
1042 struct net_device *netdev = adapter->netdev;
1043 int i;
1044
1045 i40evf_set_rx_mode(netdev);
1046
1047 i40evf_configure_tx(adapter);
1048 i40evf_configure_rx(adapter);
1049 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
1050
1051 for (i = 0; i < adapter->num_active_queues; i++) {
1052 struct i40e_ring *ring = &adapter->rx_rings[i];
1053
1054 i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
1055 }
1056 }
1057
1058 /**
1059 * i40evf_up_complete - Finish the last steps of bringing up a connection
1060 * @adapter: board private structure
1061 **/
1062 static void i40evf_up_complete(struct i40evf_adapter *adapter)
1063 {
1064 adapter->state = __I40EVF_RUNNING;
1065 clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1066
1067 i40evf_napi_enable_all(adapter);
1068
1069 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
1070 if (CLIENT_ENABLED(adapter))
1071 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
1072 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1073 }
1074
1075 /**
1076 * i40e_down - Shutdown the connection processing
1077 * @adapter: board private structure
1078 **/
1079 void i40evf_down(struct i40evf_adapter *adapter)
1080 {
1081 struct net_device *netdev = adapter->netdev;
1082 struct i40evf_mac_filter *f;
1083
1084 if (adapter->state <= __I40EVF_DOWN_PENDING)
1085 return;
1086
1087 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1088 &adapter->crit_section))
1089 usleep_range(500, 1000);
1090
1091 netif_carrier_off(netdev);
1092 netif_tx_disable(netdev);
1093 adapter->link_up = false;
1094 i40evf_napi_disable_all(adapter);
1095 i40evf_irq_disable(adapter);
1096
1097 /* remove all MAC filters */
1098 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1099 f->remove = true;
1100 }
1101 /* remove all VLAN filters */
1102 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
1103 f->remove = true;
1104 }
1105 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
1106 adapter->state != __I40EVF_RESETTING) {
1107 /* cancel any current operation */
1108 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1109 /* Schedule operations to close down the HW. Don't wait
1110 * here for this to complete. The watchdog is still running
1111 * and it will take care of this.
1112 */
1113 adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
1114 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
1115 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
1116 }
1117
1118 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1119 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
1120 }
1121
1122 /**
1123 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1124 * @adapter: board private structure
1125 * @vectors: number of vectors to request
1126 *
1127 * Work with the OS to set up the MSIX vectors needed.
1128 *
1129 * Returns 0 on success, negative on failure
1130 **/
1131 static int
1132 i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
1133 {
1134 int err, vector_threshold;
1135
1136 /* We'll want at least 3 (vector_threshold):
1137 * 0) Other (Admin Queue and link, mostly)
1138 * 1) TxQ[0] Cleanup
1139 * 2) RxQ[0] Cleanup
1140 */
1141 vector_threshold = MIN_MSIX_COUNT;
1142
1143 /* The more we get, the more we will assign to Tx/Rx Cleanup
1144 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1145 * Right now, we simply care about how many we'll get; we'll
1146 * set them up later while requesting irq's.
1147 */
1148 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1149 vector_threshold, vectors);
1150 if (err < 0) {
1151 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1152 kfree(adapter->msix_entries);
1153 adapter->msix_entries = NULL;
1154 return err;
1155 }
1156
1157 /* Adjust for only the vectors we'll use, which is minimum
1158 * of max_msix_q_vectors + NONQ_VECS, or the number of
1159 * vectors we were allocated.
1160 */
1161 adapter->num_msix_vectors = err;
1162 return 0;
1163 }
1164
1165 /**
1166 * i40evf_free_queues - Free memory for all rings
1167 * @adapter: board private structure to initialize
1168 *
1169 * Free all of the memory associated with queue pairs.
1170 **/
1171 static void i40evf_free_queues(struct i40evf_adapter *adapter)
1172 {
1173 if (!adapter->vsi_res)
1174 return;
1175 adapter->num_active_queues = 0;
1176 kfree(adapter->tx_rings);
1177 adapter->tx_rings = NULL;
1178 kfree(adapter->rx_rings);
1179 adapter->rx_rings = NULL;
1180 }
1181
1182 /**
1183 * i40evf_alloc_queues - Allocate memory for all rings
1184 * @adapter: board private structure to initialize
1185 *
1186 * We allocate one ring per queue at run-time since we don't know the
1187 * number of queues at compile-time. The polling_netdev array is
1188 * intended for Multiqueue, but should work fine with a single queue.
1189 **/
1190 static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
1191 {
1192 int i, num_active_queues;
1193
1194 /* If we're in reset reallocating queues we don't actually know yet for
1195 * certain the PF gave us the number of queues we asked for but we'll
1196 * assume it did. Once basic reset is finished we'll confirm once we
1197 * start negotiating config with PF.
1198 */
1199 if (adapter->num_req_queues)
1200 num_active_queues = adapter->num_req_queues;
1201 else
1202 num_active_queues = min_t(int,
1203 adapter->vsi_res->num_queue_pairs,
1204 (int)(num_online_cpus()));
1205
1206
1207 adapter->tx_rings = kcalloc(num_active_queues,
1208 sizeof(struct i40e_ring), GFP_KERNEL);
1209 if (!adapter->tx_rings)
1210 goto err_out;
1211 adapter->rx_rings = kcalloc(num_active_queues,
1212 sizeof(struct i40e_ring), GFP_KERNEL);
1213 if (!adapter->rx_rings)
1214 goto err_out;
1215
1216 for (i = 0; i < num_active_queues; i++) {
1217 struct i40e_ring *tx_ring;
1218 struct i40e_ring *rx_ring;
1219
1220 tx_ring = &adapter->tx_rings[i];
1221
1222 tx_ring->queue_index = i;
1223 tx_ring->netdev = adapter->netdev;
1224 tx_ring->dev = &adapter->pdev->dev;
1225 tx_ring->count = adapter->tx_desc_count;
1226 tx_ring->tx_itr_setting = I40E_ITR_TX_DEF;
1227 if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
1228 tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
1229
1230 rx_ring = &adapter->rx_rings[i];
1231 rx_ring->queue_index = i;
1232 rx_ring->netdev = adapter->netdev;
1233 rx_ring->dev = &adapter->pdev->dev;
1234 rx_ring->count = adapter->rx_desc_count;
1235 rx_ring->rx_itr_setting = I40E_ITR_RX_DEF;
1236 }
1237
1238 adapter->num_active_queues = num_active_queues;
1239
1240 return 0;
1241
1242 err_out:
1243 i40evf_free_queues(adapter);
1244 return -ENOMEM;
1245 }
1246
1247 /**
1248 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1249 * @adapter: board private structure to initialize
1250 *
1251 * Attempt to configure the interrupts using the best available
1252 * capabilities of the hardware and the kernel.
1253 **/
1254 static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1255 {
1256 int vector, v_budget;
1257 int pairs = 0;
1258 int err = 0;
1259
1260 if (!adapter->vsi_res) {
1261 err = -EIO;
1262 goto out;
1263 }
1264 pairs = adapter->num_active_queues;
1265
1266 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1267 * us much good if we have more vectors than CPUs. However, we already
1268 * limit the total number of queues by the number of CPUs so we do not
1269 * need any further limiting here.
1270 */
1271 v_budget = min_t(int, pairs + NONQ_VECS,
1272 (int)adapter->vf_res->max_vectors);
1273
1274 adapter->msix_entries = kcalloc(v_budget,
1275 sizeof(struct msix_entry), GFP_KERNEL);
1276 if (!adapter->msix_entries) {
1277 err = -ENOMEM;
1278 goto out;
1279 }
1280
1281 for (vector = 0; vector < v_budget; vector++)
1282 adapter->msix_entries[vector].entry = vector;
1283
1284 err = i40evf_acquire_msix_vectors(adapter, v_budget);
1285
1286 out:
1287 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1288 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1289 return err;
1290 }
1291
1292 /**
1293 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1294 * @adapter: board private structure
1295 *
1296 * Return 0 on success, negative on failure
1297 **/
1298 static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
1299 {
1300 struct i40e_aqc_get_set_rss_key_data *rss_key =
1301 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
1302 struct i40e_hw *hw = &adapter->hw;
1303 int ret = 0;
1304
1305 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1306 /* bail because we already have a command pending */
1307 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1308 adapter->current_op);
1309 return -EBUSY;
1310 }
1311
1312 ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1313 if (ret) {
1314 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1315 i40evf_stat_str(hw, ret),
1316 i40evf_aq_str(hw, hw->aq.asq_last_status));
1317 return ret;
1318
1319 }
1320
1321 ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1322 adapter->rss_lut, adapter->rss_lut_size);
1323 if (ret) {
1324 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1325 i40evf_stat_str(hw, ret),
1326 i40evf_aq_str(hw, hw->aq.asq_last_status));
1327 }
1328
1329 return ret;
1330
1331 }
1332
1333 /**
1334 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1335 * @adapter: board private structure
1336 *
1337 * Returns 0 on success, negative on failure
1338 **/
1339 static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
1340 {
1341 struct i40e_hw *hw = &adapter->hw;
1342 u32 *dw;
1343 u16 i;
1344
1345 dw = (u32 *)adapter->rss_key;
1346 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1347 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
1348
1349 dw = (u32 *)adapter->rss_lut;
1350 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1351 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
1352
1353 i40e_flush(hw);
1354
1355 return 0;
1356 }
1357
1358 /**
1359 * i40evf_config_rss - Configure RSS keys and lut
1360 * @adapter: board private structure
1361 *
1362 * Returns 0 on success, negative on failure
1363 **/
1364 int i40evf_config_rss(struct i40evf_adapter *adapter)
1365 {
1366
1367 if (RSS_PF(adapter)) {
1368 adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
1369 I40EVF_FLAG_AQ_SET_RSS_KEY;
1370 return 0;
1371 } else if (RSS_AQ(adapter)) {
1372 return i40evf_config_rss_aq(adapter);
1373 } else {
1374 return i40evf_config_rss_reg(adapter);
1375 }
1376 }
1377
1378 /**
1379 * i40evf_fill_rss_lut - Fill the lut with default values
1380 * @adapter: board private structure
1381 **/
1382 static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
1383 {
1384 u16 i;
1385
1386 for (i = 0; i < adapter->rss_lut_size; i++)
1387 adapter->rss_lut[i] = i % adapter->num_active_queues;
1388 }
1389
1390 /**
1391 * i40evf_init_rss - Prepare for RSS
1392 * @adapter: board private structure
1393 *
1394 * Return 0 on success, negative on failure
1395 **/
1396 static int i40evf_init_rss(struct i40evf_adapter *adapter)
1397 {
1398 struct i40e_hw *hw = &adapter->hw;
1399 int ret;
1400
1401 if (!RSS_PF(adapter)) {
1402 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1403 if (adapter->vf_res->vf_cap_flags &
1404 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1405 adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1406 else
1407 adapter->hena = I40E_DEFAULT_RSS_HENA;
1408
1409 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1410 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1411 }
1412
1413 i40evf_fill_rss_lut(adapter);
1414
1415 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1416 ret = i40evf_config_rss(adapter);
1417
1418 return ret;
1419 }
1420
1421 /**
1422 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1423 * @adapter: board private structure to initialize
1424 *
1425 * We allocate one q_vector per queue interrupt. If allocation fails we
1426 * return -ENOMEM.
1427 **/
1428 static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
1429 {
1430 int q_idx = 0, num_q_vectors;
1431 struct i40e_q_vector *q_vector;
1432
1433 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1434 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1435 GFP_KERNEL);
1436 if (!adapter->q_vectors)
1437 return -ENOMEM;
1438
1439 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1440 q_vector = &adapter->q_vectors[q_idx];
1441 q_vector->adapter = adapter;
1442 q_vector->vsi = &adapter->vsi;
1443 q_vector->v_idx = q_idx;
1444 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1445 netif_napi_add(adapter->netdev, &q_vector->napi,
1446 i40evf_napi_poll, NAPI_POLL_WEIGHT);
1447 }
1448
1449 return 0;
1450 }
1451
1452 /**
1453 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1454 * @adapter: board private structure to initialize
1455 *
1456 * This function frees the memory allocated to the q_vectors. In addition if
1457 * NAPI is enabled it will delete any references to the NAPI struct prior
1458 * to freeing the q_vector.
1459 **/
1460 static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
1461 {
1462 int q_idx, num_q_vectors;
1463 int napi_vectors;
1464
1465 if (!adapter->q_vectors)
1466 return;
1467
1468 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1469 napi_vectors = adapter->num_active_queues;
1470
1471 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1472 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
1473 if (q_idx < napi_vectors)
1474 netif_napi_del(&q_vector->napi);
1475 }
1476 kfree(adapter->q_vectors);
1477 adapter->q_vectors = NULL;
1478 }
1479
1480 /**
1481 * i40evf_reset_interrupt_capability - Reset MSIX setup
1482 * @adapter: board private structure
1483 *
1484 **/
1485 void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
1486 {
1487 if (!adapter->msix_entries)
1488 return;
1489
1490 pci_disable_msix(adapter->pdev);
1491 kfree(adapter->msix_entries);
1492 adapter->msix_entries = NULL;
1493 }
1494
1495 /**
1496 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1497 * @adapter: board private structure to initialize
1498 *
1499 **/
1500 int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
1501 {
1502 int err;
1503
1504 err = i40evf_alloc_queues(adapter);
1505 if (err) {
1506 dev_err(&adapter->pdev->dev,
1507 "Unable to allocate memory for queues\n");
1508 goto err_alloc_queues;
1509 }
1510
1511 rtnl_lock();
1512 err = i40evf_set_interrupt_capability(adapter);
1513 rtnl_unlock();
1514 if (err) {
1515 dev_err(&adapter->pdev->dev,
1516 "Unable to setup interrupt capabilities\n");
1517 goto err_set_interrupt;
1518 }
1519
1520 err = i40evf_alloc_q_vectors(adapter);
1521 if (err) {
1522 dev_err(&adapter->pdev->dev,
1523 "Unable to allocate memory for queue vectors\n");
1524 goto err_alloc_q_vectors;
1525 }
1526
1527 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1528 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1529 adapter->num_active_queues);
1530
1531 return 0;
1532 err_alloc_q_vectors:
1533 i40evf_reset_interrupt_capability(adapter);
1534 err_set_interrupt:
1535 i40evf_free_queues(adapter);
1536 err_alloc_queues:
1537 return err;
1538 }
1539
1540 /**
1541 * i40evf_free_rss - Free memory used by RSS structs
1542 * @adapter: board private structure
1543 **/
1544 static void i40evf_free_rss(struct i40evf_adapter *adapter)
1545 {
1546 kfree(adapter->rss_key);
1547 adapter->rss_key = NULL;
1548
1549 kfree(adapter->rss_lut);
1550 adapter->rss_lut = NULL;
1551 }
1552
1553 /**
1554 * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
1555 * @adapter: board private structure
1556 *
1557 * Returns 0 on success, negative on failure
1558 **/
1559 static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
1560 {
1561 struct net_device *netdev = adapter->netdev;
1562 int err;
1563
1564 if (netif_running(netdev))
1565 i40evf_free_traffic_irqs(adapter);
1566 i40evf_free_misc_irq(adapter);
1567 i40evf_reset_interrupt_capability(adapter);
1568 i40evf_free_q_vectors(adapter);
1569 i40evf_free_queues(adapter);
1570
1571 err = i40evf_init_interrupt_scheme(adapter);
1572 if (err)
1573 goto err;
1574
1575 netif_tx_stop_all_queues(netdev);
1576
1577 err = i40evf_request_misc_irq(adapter);
1578 if (err)
1579 goto err;
1580
1581 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1582
1583 i40evf_map_rings_to_vectors(adapter);
1584
1585 if (RSS_AQ(adapter))
1586 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
1587 else
1588 err = i40evf_init_rss(adapter);
1589 err:
1590 return err;
1591 }
1592
1593 /**
1594 * i40evf_watchdog_timer - Periodic call-back timer
1595 * @data: pointer to adapter disguised as unsigned long
1596 **/
1597 static void i40evf_watchdog_timer(struct timer_list *t)
1598 {
1599 struct i40evf_adapter *adapter = from_timer(adapter, t,
1600 watchdog_timer);
1601
1602 schedule_work(&adapter->watchdog_task);
1603 /* timer will be rescheduled in watchdog task */
1604 }
1605
1606 /**
1607 * i40evf_watchdog_task - Periodic call-back task
1608 * @work: pointer to work_struct
1609 **/
1610 static void i40evf_watchdog_task(struct work_struct *work)
1611 {
1612 struct i40evf_adapter *adapter = container_of(work,
1613 struct i40evf_adapter,
1614 watchdog_task);
1615 struct i40e_hw *hw = &adapter->hw;
1616 u32 reg_val;
1617
1618 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1619 goto restart_watchdog;
1620
1621 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1622 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1623 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1624 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1625 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
1626 /* A chance for redemption! */
1627 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1628 adapter->state = __I40EVF_STARTUP;
1629 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1630 schedule_delayed_work(&adapter->init_task, 10);
1631 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1632 &adapter->crit_section);
1633 /* Don't reschedule the watchdog, since we've restarted
1634 * the init task. When init_task contacts the PF and
1635 * gets everything set up again, it'll restart the
1636 * watchdog for us. Down, boy. Sit. Stay. Woof.
1637 */
1638 return;
1639 }
1640 adapter->aq_required = 0;
1641 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1642 goto watchdog_done;
1643 }
1644
1645 if ((adapter->state < __I40EVF_DOWN) ||
1646 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1647 goto watchdog_done;
1648
1649 /* check for reset */
1650 reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
1651 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
1652 adapter->state = __I40EVF_RESETTING;
1653 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1654 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1655 schedule_work(&adapter->reset_task);
1656 adapter->aq_required = 0;
1657 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1658 goto watchdog_done;
1659 }
1660
1661 /* Process admin queue tasks. After init, everything gets done
1662 * here so we don't race on the admin queue.
1663 */
1664 if (adapter->current_op) {
1665 if (!i40evf_asq_done(hw)) {
1666 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
1667 i40evf_send_api_ver(adapter);
1668 }
1669 goto watchdog_done;
1670 }
1671 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
1672 i40evf_send_vf_config_msg(adapter);
1673 goto watchdog_done;
1674 }
1675
1676 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
1677 i40evf_disable_queues(adapter);
1678 goto watchdog_done;
1679 }
1680
1681 if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
1682 i40evf_map_queues(adapter);
1683 goto watchdog_done;
1684 }
1685
1686 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
1687 i40evf_add_ether_addrs(adapter);
1688 goto watchdog_done;
1689 }
1690
1691 if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
1692 i40evf_add_vlans(adapter);
1693 goto watchdog_done;
1694 }
1695
1696 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
1697 i40evf_del_ether_addrs(adapter);
1698 goto watchdog_done;
1699 }
1700
1701 if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
1702 i40evf_del_vlans(adapter);
1703 goto watchdog_done;
1704 }
1705
1706 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1707 i40evf_enable_vlan_stripping(adapter);
1708 goto watchdog_done;
1709 }
1710
1711 if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1712 i40evf_disable_vlan_stripping(adapter);
1713 goto watchdog_done;
1714 }
1715
1716 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
1717 i40evf_configure_queues(adapter);
1718 goto watchdog_done;
1719 }
1720
1721 if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
1722 i40evf_enable_queues(adapter);
1723 goto watchdog_done;
1724 }
1725
1726 if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
1727 /* This message goes straight to the firmware, not the
1728 * PF, so we don't have to set current_op as we will
1729 * not get a response through the ARQ.
1730 */
1731 i40evf_init_rss(adapter);
1732 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
1733 goto watchdog_done;
1734 }
1735 if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
1736 i40evf_get_hena(adapter);
1737 goto watchdog_done;
1738 }
1739 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
1740 i40evf_set_hena(adapter);
1741 goto watchdog_done;
1742 }
1743 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
1744 i40evf_set_rss_key(adapter);
1745 goto watchdog_done;
1746 }
1747 if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
1748 i40evf_set_rss_lut(adapter);
1749 goto watchdog_done;
1750 }
1751
1752 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
1753 i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1754 FLAG_VF_MULTICAST_PROMISC);
1755 goto watchdog_done;
1756 }
1757
1758 if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
1759 i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1760 goto watchdog_done;
1761 }
1762
1763 if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
1764 (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1765 i40evf_set_promiscuous(adapter, 0);
1766 goto watchdog_done;
1767 }
1768 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1769
1770 if (adapter->state == __I40EVF_RUNNING)
1771 i40evf_request_stats(adapter);
1772 watchdog_done:
1773 if (adapter->state == __I40EVF_RUNNING) {
1774 i40evf_irq_enable_queues(adapter, ~0);
1775 i40evf_fire_sw_int(adapter, 0xFF);
1776 } else {
1777 i40evf_fire_sw_int(adapter, 0x1);
1778 }
1779
1780 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1781 restart_watchdog:
1782 if (adapter->state == __I40EVF_REMOVE)
1783 return;
1784 if (adapter->aq_required)
1785 mod_timer(&adapter->watchdog_timer,
1786 jiffies + msecs_to_jiffies(20));
1787 else
1788 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1789 schedule_work(&adapter->adminq_task);
1790 }
1791
1792 static void i40evf_disable_vf(struct i40evf_adapter *adapter)
1793 {
1794 struct i40evf_mac_filter *f, *ftmp;
1795 struct i40evf_vlan_filter *fv, *fvtmp;
1796
1797 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1798
1799 if (netif_running(adapter->netdev)) {
1800 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1801 netif_carrier_off(adapter->netdev);
1802 netif_tx_disable(adapter->netdev);
1803 adapter->link_up = false;
1804 i40evf_napi_disable_all(adapter);
1805 i40evf_irq_disable(adapter);
1806 i40evf_free_traffic_irqs(adapter);
1807 i40evf_free_all_tx_resources(adapter);
1808 i40evf_free_all_rx_resources(adapter);
1809 }
1810
1811 /* Delete all of the filters, both MAC and VLAN. */
1812 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1813 list_del(&f->list);
1814 kfree(f);
1815 }
1816
1817 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1818 list_del(&fv->list);
1819 kfree(fv);
1820 }
1821
1822 i40evf_free_misc_irq(adapter);
1823 i40evf_reset_interrupt_capability(adapter);
1824 i40evf_free_queues(adapter);
1825 i40evf_free_q_vectors(adapter);
1826 kfree(adapter->vf_res);
1827 i40evf_shutdown_adminq(&adapter->hw);
1828 adapter->netdev->flags &= ~IFF_UP;
1829 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1830 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1831 adapter->state = __I40EVF_DOWN;
1832 wake_up(&adapter->down_waitqueue);
1833 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1834 }
1835
1836 #define I40EVF_RESET_WAIT_MS 10
1837 #define I40EVF_RESET_WAIT_COUNT 500
1838 /**
1839 * i40evf_reset_task - Call-back task to handle hardware reset
1840 * @work: pointer to work_struct
1841 *
1842 * During reset we need to shut down and reinitialize the admin queue
1843 * before we can use it to communicate with the PF again. We also clear
1844 * and reinit the rings because that context is lost as well.
1845 **/
1846 static void i40evf_reset_task(struct work_struct *work)
1847 {
1848 struct i40evf_adapter *adapter = container_of(work,
1849 struct i40evf_adapter,
1850 reset_task);
1851 struct net_device *netdev = adapter->netdev;
1852 struct i40e_hw *hw = &adapter->hw;
1853 struct i40evf_vlan_filter *vlf;
1854 struct i40evf_mac_filter *f;
1855 u32 reg_val;
1856 int i = 0, err;
1857
1858 while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
1859 &adapter->crit_section))
1860 usleep_range(500, 1000);
1861 if (CLIENT_ENABLED(adapter)) {
1862 adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
1863 I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
1864 I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1865 I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
1866 cancel_delayed_work_sync(&adapter->client_task);
1867 i40evf_notify_client_close(&adapter->vsi, true);
1868 }
1869 i40evf_misc_irq_disable(adapter);
1870 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
1871 adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
1872 /* Restart the AQ here. If we have been reset but didn't
1873 * detect it, or if the PF had to reinit, our AQ will be hosed.
1874 */
1875 i40evf_shutdown_adminq(hw);
1876 i40evf_init_adminq(hw);
1877 i40evf_request_reset(adapter);
1878 }
1879 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1880
1881 /* poll until we see the reset actually happen */
1882 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1883 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1884 I40E_VF_ARQLEN1_ARQENABLE_MASK;
1885 if (!reg_val)
1886 break;
1887 usleep_range(5000, 10000);
1888 }
1889 if (i == I40EVF_RESET_WAIT_COUNT) {
1890 dev_info(&adapter->pdev->dev, "Never saw reset\n");
1891 goto continue_reset; /* act like the reset happened */
1892 }
1893
1894 /* wait until the reset is complete and the PF is responding to us */
1895 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1896 /* sleep first to make sure a minimum wait time is met */
1897 msleep(I40EVF_RESET_WAIT_MS);
1898
1899 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1900 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1901 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
1902 break;
1903 }
1904
1905 pci_set_master(adapter->pdev);
1906
1907 if (i == I40EVF_RESET_WAIT_COUNT) {
1908 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
1909 reg_val);
1910 i40evf_disable_vf(adapter);
1911 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1912 return; /* Do not attempt to reinit. It's dead, Jim. */
1913 }
1914
1915 continue_reset:
1916 if (netif_running(netdev)) {
1917 netif_carrier_off(netdev);
1918 netif_tx_stop_all_queues(netdev);
1919 adapter->link_up = false;
1920 i40evf_napi_disable_all(adapter);
1921 }
1922 i40evf_irq_disable(adapter);
1923
1924 adapter->state = __I40EVF_RESETTING;
1925 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1926
1927 /* free the Tx/Rx rings and descriptors, might be better to just
1928 * re-use them sometime in the future
1929 */
1930 i40evf_free_all_rx_resources(adapter);
1931 i40evf_free_all_tx_resources(adapter);
1932
1933 /* kill and reinit the admin queue */
1934 i40evf_shutdown_adminq(hw);
1935 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1936 err = i40evf_init_adminq(hw);
1937 if (err)
1938 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1939 err);
1940 adapter->aq_required = 0;
1941
1942 if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
1943 err = i40evf_reinit_interrupt_scheme(adapter);
1944 if (err)
1945 goto reset_err;
1946 }
1947
1948 adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
1949 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
1950
1951 /* re-add all MAC filters */
1952 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1953 f->add = true;
1954 }
1955 /* re-add all VLAN filters */
1956 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1957 vlf->add = true;
1958 }
1959 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
1960 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
1961 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1962 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
1963 i40evf_misc_irq_enable(adapter);
1964
1965 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1966
1967 if (netif_running(adapter->netdev)) {
1968 /* allocate transmit descriptors */
1969 err = i40evf_setup_all_tx_resources(adapter);
1970 if (err)
1971 goto reset_err;
1972
1973 /* allocate receive descriptors */
1974 err = i40evf_setup_all_rx_resources(adapter);
1975 if (err)
1976 goto reset_err;
1977
1978 if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
1979 err = i40evf_request_traffic_irqs(adapter,
1980 netdev->name);
1981 if (err)
1982 goto reset_err;
1983
1984 adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
1985 }
1986
1987 i40evf_configure(adapter);
1988
1989 i40evf_up_complete(adapter);
1990
1991 i40evf_irq_enable(adapter, true);
1992 } else {
1993 adapter->state = __I40EVF_DOWN;
1994 wake_up(&adapter->down_waitqueue);
1995 }
1996
1997 return;
1998 reset_err:
1999 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2000 i40evf_close(netdev);
2001 }
2002
2003 /**
2004 * i40evf_adminq_task - worker thread to clean the admin queue
2005 * @work: pointer to work_struct containing our data
2006 **/
2007 static void i40evf_adminq_task(struct work_struct *work)
2008 {
2009 struct i40evf_adapter *adapter =
2010 container_of(work, struct i40evf_adapter, adminq_task);
2011 struct i40e_hw *hw = &adapter->hw;
2012 struct i40e_arq_event_info event;
2013 enum virtchnl_ops v_op;
2014 i40e_status ret, v_ret;
2015 u32 val, oldval;
2016 u16 pending;
2017
2018 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
2019 goto out;
2020
2021 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
2022 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2023 if (!event.msg_buf)
2024 goto out;
2025
2026 do {
2027 ret = i40evf_clean_arq_element(hw, &event, &pending);
2028 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2029 v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
2030
2031 if (ret || !v_op)
2032 break; /* No event to process or error cleaning ARQ */
2033
2034 i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2035 event.msg_len);
2036 if (pending != 0)
2037 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
2038 } while (pending);
2039
2040 if ((adapter->flags &
2041 (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) ||
2042 adapter->state == __I40EVF_RESETTING)
2043 goto freedom;
2044
2045 /* check for error indications */
2046 val = rd32(hw, hw->aq.arq.len);
2047 if (val == 0xdeadbeef) /* indicates device in reset */
2048 goto freedom;
2049 oldval = val;
2050 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
2051 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2052 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
2053 }
2054 if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
2055 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2056 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
2057 }
2058 if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
2059 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2060 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
2061 }
2062 if (oldval != val)
2063 wr32(hw, hw->aq.arq.len, val);
2064
2065 val = rd32(hw, hw->aq.asq.len);
2066 oldval = val;
2067 if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
2068 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2069 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
2070 }
2071 if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
2072 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2073 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
2074 }
2075 if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
2076 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2077 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
2078 }
2079 if (oldval != val)
2080 wr32(hw, hw->aq.asq.len, val);
2081
2082 freedom:
2083 kfree(event.msg_buf);
2084 out:
2085 /* re-enable Admin queue interrupt cause */
2086 i40evf_misc_irq_enable(adapter);
2087 }
2088
2089 /**
2090 * i40evf_client_task - worker thread to perform client work
2091 * @work: pointer to work_struct containing our data
2092 *
2093 * This task handles client interactions. Because client calls can be
2094 * reentrant, we can't handle them in the watchdog.
2095 **/
2096 static void i40evf_client_task(struct work_struct *work)
2097 {
2098 struct i40evf_adapter *adapter =
2099 container_of(work, struct i40evf_adapter, client_task.work);
2100
2101 /* If we can't get the client bit, just give up. We'll be rescheduled
2102 * later.
2103 */
2104
2105 if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
2106 return;
2107
2108 if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2109 i40evf_client_subtask(adapter);
2110 adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2111 goto out;
2112 }
2113 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2114 i40evf_notify_client_l2_params(&adapter->vsi);
2115 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2116 goto out;
2117 }
2118 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
2119 i40evf_notify_client_close(&adapter->vsi, false);
2120 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2121 goto out;
2122 }
2123 if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
2124 i40evf_notify_client_open(&adapter->vsi);
2125 adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
2126 }
2127 out:
2128 clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
2129 }
2130
2131 /**
2132 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
2133 * @adapter: board private structure
2134 *
2135 * Free all transmit software resources
2136 **/
2137 void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
2138 {
2139 int i;
2140
2141 if (!adapter->tx_rings)
2142 return;
2143
2144 for (i = 0; i < adapter->num_active_queues; i++)
2145 if (adapter->tx_rings[i].desc)
2146 i40evf_free_tx_resources(&adapter->tx_rings[i]);
2147 }
2148
2149 /**
2150 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2151 * @adapter: board private structure
2152 *
2153 * If this function returns with an error, then it's possible one or
2154 * more of the rings is populated (while the rest are not). It is the
2155 * callers duty to clean those orphaned rings.
2156 *
2157 * Return 0 on success, negative on failure
2158 **/
2159 static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
2160 {
2161 int i, err = 0;
2162
2163 for (i = 0; i < adapter->num_active_queues; i++) {
2164 adapter->tx_rings[i].count = adapter->tx_desc_count;
2165 err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
2166 if (!err)
2167 continue;
2168 dev_err(&adapter->pdev->dev,
2169 "Allocation for Tx Queue %u failed\n", i);
2170 break;
2171 }
2172
2173 return err;
2174 }
2175
2176 /**
2177 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2178 * @adapter: board private structure
2179 *
2180 * If this function returns with an error, then it's possible one or
2181 * more of the rings is populated (while the rest are not). It is the
2182 * callers duty to clean those orphaned rings.
2183 *
2184 * Return 0 on success, negative on failure
2185 **/
2186 static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
2187 {
2188 int i, err = 0;
2189
2190 for (i = 0; i < adapter->num_active_queues; i++) {
2191 adapter->rx_rings[i].count = adapter->rx_desc_count;
2192 err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
2193 if (!err)
2194 continue;
2195 dev_err(&adapter->pdev->dev,
2196 "Allocation for Rx Queue %u failed\n", i);
2197 break;
2198 }
2199 return err;
2200 }
2201
2202 /**
2203 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2204 * @adapter: board private structure
2205 *
2206 * Free all receive software resources
2207 **/
2208 void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
2209 {
2210 int i;
2211
2212 if (!adapter->rx_rings)
2213 return;
2214
2215 for (i = 0; i < adapter->num_active_queues; i++)
2216 if (adapter->rx_rings[i].desc)
2217 i40evf_free_rx_resources(&adapter->rx_rings[i]);
2218 }
2219
2220 /**
2221 * i40evf_open - Called when a network interface is made active
2222 * @netdev: network interface device structure
2223 *
2224 * Returns 0 on success, negative value on failure
2225 *
2226 * The open entry point is called when a network interface is made
2227 * active by the system (IFF_UP). At this point all resources needed
2228 * for transmit and receive operations are allocated, the interrupt
2229 * handler is registered with the OS, the watchdog timer is started,
2230 * and the stack is notified that the interface is ready.
2231 **/
2232 static int i40evf_open(struct net_device *netdev)
2233 {
2234 struct i40evf_adapter *adapter = netdev_priv(netdev);
2235 int err;
2236
2237 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
2238 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2239 return -EIO;
2240 }
2241
2242 if (adapter->state != __I40EVF_DOWN)
2243 return -EBUSY;
2244
2245 /* allocate transmit descriptors */
2246 err = i40evf_setup_all_tx_resources(adapter);
2247 if (err)
2248 goto err_setup_tx;
2249
2250 /* allocate receive descriptors */
2251 err = i40evf_setup_all_rx_resources(adapter);
2252 if (err)
2253 goto err_setup_rx;
2254
2255 /* clear any pending interrupts, may auto mask */
2256 err = i40evf_request_traffic_irqs(adapter, netdev->name);
2257 if (err)
2258 goto err_req_irq;
2259
2260 i40evf_add_filter(adapter, adapter->hw.mac.addr);
2261 i40evf_configure(adapter);
2262
2263 i40evf_up_complete(adapter);
2264
2265 i40evf_irq_enable(adapter, true);
2266
2267 return 0;
2268
2269 err_req_irq:
2270 i40evf_down(adapter);
2271 i40evf_free_traffic_irqs(adapter);
2272 err_setup_rx:
2273 i40evf_free_all_rx_resources(adapter);
2274 err_setup_tx:
2275 i40evf_free_all_tx_resources(adapter);
2276
2277 return err;
2278 }
2279
2280 /**
2281 * i40evf_close - Disables a network interface
2282 * @netdev: network interface device structure
2283 *
2284 * Returns 0, this is not allowed to fail
2285 *
2286 * The close entry point is called when an interface is de-activated
2287 * by the OS. The hardware is still under the drivers control, but
2288 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2289 * are freed, along with all transmit and receive resources.
2290 **/
2291 static int i40evf_close(struct net_device *netdev)
2292 {
2293 struct i40evf_adapter *adapter = netdev_priv(netdev);
2294 int status;
2295
2296 if (adapter->state <= __I40EVF_DOWN_PENDING)
2297 return 0;
2298
2299
2300 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
2301 if (CLIENT_ENABLED(adapter))
2302 adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
2303
2304 i40evf_down(adapter);
2305 adapter->state = __I40EVF_DOWN_PENDING;
2306 i40evf_free_traffic_irqs(adapter);
2307
2308 /* We explicitly don't free resources here because the hardware is
2309 * still active and can DMA into memory. Resources are cleared in
2310 * i40evf_virtchnl_completion() after we get confirmation from the PF
2311 * driver that the rings have been stopped.
2312 *
2313 * Also, we wait for state to transition to __I40EVF_DOWN before
2314 * returning. State change occurs in i40evf_virtchnl_completion() after
2315 * VF resources are released (which occurs after PF driver processes and
2316 * responds to admin queue commands).
2317 */
2318
2319 status = wait_event_timeout(adapter->down_waitqueue,
2320 adapter->state == __I40EVF_DOWN,
2321 msecs_to_jiffies(200));
2322 if (!status)
2323 netdev_warn(netdev, "Device resources not yet released\n");
2324 return 0;
2325 }
2326
2327 /**
2328 * i40evf_change_mtu - Change the Maximum Transfer Unit
2329 * @netdev: network interface device structure
2330 * @new_mtu: new value for maximum frame size
2331 *
2332 * Returns 0 on success, negative on failure
2333 **/
2334 static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
2335 {
2336 struct i40evf_adapter *adapter = netdev_priv(netdev);
2337
2338 netdev->mtu = new_mtu;
2339 if (CLIENT_ENABLED(adapter)) {
2340 i40evf_notify_client_l2_params(&adapter->vsi);
2341 adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
2342 }
2343 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
2344 schedule_work(&adapter->reset_task);
2345
2346 return 0;
2347 }
2348
2349 /**
2350 * i40e_set_features - set the netdev feature flags
2351 * @netdev: ptr to the netdev being adjusted
2352 * @features: the feature set that the stack is suggesting
2353 * Note: expects to be called while under rtnl_lock()
2354 **/
2355 static int i40evf_set_features(struct net_device *netdev,
2356 netdev_features_t features)
2357 {
2358 struct i40evf_adapter *adapter = netdev_priv(netdev);
2359
2360 if (!VLAN_ALLOWED(adapter))
2361 return -EINVAL;
2362
2363 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2364 adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2365 else
2366 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2367
2368 return 0;
2369 }
2370
2371 /**
2372 * i40evf_features_check - Validate encapsulated packet conforms to limits
2373 * @skb: skb buff
2374 * @netdev: This physical port's netdev
2375 * @features: Offload features that the stack believes apply
2376 **/
2377 static netdev_features_t i40evf_features_check(struct sk_buff *skb,
2378 struct net_device *dev,
2379 netdev_features_t features)
2380 {
2381 size_t len;
2382
2383 /* No point in doing any of this if neither checksum nor GSO are
2384 * being requested for this frame. We can rule out both by just
2385 * checking for CHECKSUM_PARTIAL
2386 */
2387 if (skb->ip_summed != CHECKSUM_PARTIAL)
2388 return features;
2389
2390 /* We cannot support GSO if the MSS is going to be less than
2391 * 64 bytes. If it is then we need to drop support for GSO.
2392 */
2393 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
2394 features &= ~NETIF_F_GSO_MASK;
2395
2396 /* MACLEN can support at most 63 words */
2397 len = skb_network_header(skb) - skb->data;
2398 if (len & ~(63 * 2))
2399 goto out_err;
2400
2401 /* IPLEN and EIPLEN can support at most 127 dwords */
2402 len = skb_transport_header(skb) - skb_network_header(skb);
2403 if (len & ~(127 * 4))
2404 goto out_err;
2405
2406 if (skb->encapsulation) {
2407 /* L4TUNLEN can support 127 words */
2408 len = skb_inner_network_header(skb) - skb_transport_header(skb);
2409 if (len & ~(127 * 2))
2410 goto out_err;
2411
2412 /* IPLEN can support at most 127 dwords */
2413 len = skb_inner_transport_header(skb) -
2414 skb_inner_network_header(skb);
2415 if (len & ~(127 * 4))
2416 goto out_err;
2417 }
2418
2419 /* No need to validate L4LEN as TCP is the only protocol with a
2420 * a flexible value and we support all possible values supported
2421 * by TCP, which is at most 15 dwords
2422 */
2423
2424 return features;
2425 out_err:
2426 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2427 }
2428
2429 /**
2430 * i40evf_fix_features - fix up the netdev feature bits
2431 * @netdev: our net device
2432 * @features: desired feature bits
2433 *
2434 * Returns fixed-up features bits
2435 **/
2436 static netdev_features_t i40evf_fix_features(struct net_device *netdev,
2437 netdev_features_t features)
2438 {
2439 struct i40evf_adapter *adapter = netdev_priv(netdev);
2440
2441 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
2442 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
2443 NETIF_F_HW_VLAN_CTAG_RX |
2444 NETIF_F_HW_VLAN_CTAG_FILTER);
2445
2446 return features;
2447 }
2448
2449 static const struct net_device_ops i40evf_netdev_ops = {
2450 .ndo_open = i40evf_open,
2451 .ndo_stop = i40evf_close,
2452 .ndo_start_xmit = i40evf_xmit_frame,
2453 .ndo_set_rx_mode = i40evf_set_rx_mode,
2454 .ndo_validate_addr = eth_validate_addr,
2455 .ndo_set_mac_address = i40evf_set_mac,
2456 .ndo_change_mtu = i40evf_change_mtu,
2457 .ndo_tx_timeout = i40evf_tx_timeout,
2458 .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
2459 .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
2460 .ndo_features_check = i40evf_features_check,
2461 .ndo_fix_features = i40evf_fix_features,
2462 .ndo_set_features = i40evf_set_features,
2463 #ifdef CONFIG_NET_POLL_CONTROLLER
2464 .ndo_poll_controller = i40evf_netpoll,
2465 #endif
2466 };
2467
2468 /**
2469 * i40evf_check_reset_complete - check that VF reset is complete
2470 * @hw: pointer to hw struct
2471 *
2472 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2473 **/
2474 static int i40evf_check_reset_complete(struct i40e_hw *hw)
2475 {
2476 u32 rstat;
2477 int i;
2478
2479 for (i = 0; i < 100; i++) {
2480 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
2481 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
2482 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
2483 (rstat == VIRTCHNL_VFR_COMPLETED))
2484 return 0;
2485 usleep_range(10, 20);
2486 }
2487 return -EBUSY;
2488 }
2489
2490 /**
2491 * i40evf_process_config - Process the config information we got from the PF
2492 * @adapter: board private structure
2493 *
2494 * Verify that we have a valid config struct, and set up our netdev features
2495 * and our VSI struct.
2496 **/
2497 int i40evf_process_config(struct i40evf_adapter *adapter)
2498 {
2499 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2500 int i, num_req_queues = adapter->num_req_queues;
2501 struct net_device *netdev = adapter->netdev;
2502 struct i40e_vsi *vsi = &adapter->vsi;
2503 netdev_features_t hw_enc_features;
2504 netdev_features_t hw_features;
2505
2506 /* got VF config message back from PF, now we can parse it */
2507 for (i = 0; i < vfres->num_vsis; i++) {
2508 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2509 adapter->vsi_res = &vfres->vsi_res[i];
2510 }
2511 if (!adapter->vsi_res) {
2512 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2513 return -ENODEV;
2514 }
2515
2516 if (num_req_queues &&
2517 num_req_queues != adapter->vsi_res->num_queue_pairs) {
2518 /* Problem. The PF gave us fewer queues than what we had
2519 * negotiated in our request. Need a reset to see if we can't
2520 * get back to a working state.
2521 */
2522 dev_err(&adapter->pdev->dev,
2523 "Requested %d queues, but PF only gave us %d.\n",
2524 num_req_queues,
2525 adapter->vsi_res->num_queue_pairs);
2526 adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
2527 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2528 i40evf_schedule_reset(adapter);
2529 return -ENODEV;
2530 }
2531 adapter->num_req_queues = 0;
2532
2533 hw_enc_features = NETIF_F_SG |
2534 NETIF_F_IP_CSUM |
2535 NETIF_F_IPV6_CSUM |
2536 NETIF_F_HIGHDMA |
2537 NETIF_F_SOFT_FEATURES |
2538 NETIF_F_TSO |
2539 NETIF_F_TSO_ECN |
2540 NETIF_F_TSO6 |
2541 NETIF_F_SCTP_CRC |
2542 NETIF_F_RXHASH |
2543 NETIF_F_RXCSUM |
2544 0;
2545
2546 /* advertise to stack only if offloads for encapsulated packets is
2547 * supported
2548 */
2549 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
2550 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
2551 NETIF_F_GSO_GRE |
2552 NETIF_F_GSO_GRE_CSUM |
2553 NETIF_F_GSO_IPXIP4 |
2554 NETIF_F_GSO_IPXIP6 |
2555 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2556 NETIF_F_GSO_PARTIAL |
2557 0;
2558
2559 if (!(vfres->vf_cap_flags &
2560 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2561 netdev->gso_partial_features |=
2562 NETIF_F_GSO_UDP_TUNNEL_CSUM;
2563
2564 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2565 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2566 netdev->hw_enc_features |= hw_enc_features;
2567 }
2568 /* record features VLANs can make use of */
2569 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
2570
2571 /* Write features and hw_features separately to avoid polluting
2572 * with, or dropping, features that are set when we registered.
2573 */
2574 hw_features = hw_enc_features;
2575
2576 /* Enable VLAN features if supported */
2577 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
2578 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
2579 NETIF_F_HW_VLAN_CTAG_RX);
2580
2581 netdev->hw_features |= hw_features;
2582
2583 netdev->features |= hw_features;
2584
2585 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
2586 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2587
2588 adapter->vsi.id = adapter->vsi_res->vsi_id;
2589
2590 adapter->vsi.back = adapter;
2591 adapter->vsi.base_vector = 1;
2592 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
2593 vsi->netdev = adapter->netdev;
2594 vsi->qs_handle = adapter->vsi_res->qset_handle;
2595 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2596 adapter->rss_key_size = vfres->rss_key_size;
2597 adapter->rss_lut_size = vfres->rss_lut_size;
2598 } else {
2599 adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
2600 adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
2601 }
2602
2603 return 0;
2604 }
2605
2606 /**
2607 * i40evf_init_task - worker thread to perform delayed initialization
2608 * @work: pointer to work_struct containing our data
2609 *
2610 * This task completes the work that was begun in probe. Due to the nature
2611 * of VF-PF communications, we may need to wait tens of milliseconds to get
2612 * responses back from the PF. Rather than busy-wait in probe and bog down the
2613 * whole system, we'll do it in a task so we can sleep.
2614 * This task only runs during driver init. Once we've established
2615 * communications with the PF driver and set up our netdev, the watchdog
2616 * takes over.
2617 **/
2618 static void i40evf_init_task(struct work_struct *work)
2619 {
2620 struct i40evf_adapter *adapter = container_of(work,
2621 struct i40evf_adapter,
2622 init_task.work);
2623 struct net_device *netdev = adapter->netdev;
2624 struct i40e_hw *hw = &adapter->hw;
2625 struct pci_dev *pdev = adapter->pdev;
2626 int err, bufsz;
2627
2628 switch (adapter->state) {
2629 case __I40EVF_STARTUP:
2630 /* driver loaded, probe complete */
2631 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
2632 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
2633 err = i40e_set_mac_type(hw);
2634 if (err) {
2635 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
2636 err);
2637 goto err;
2638 }
2639 err = i40evf_check_reset_complete(hw);
2640 if (err) {
2641 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2642 err);
2643 goto err;
2644 }
2645 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
2646 hw->aq.num_asq_entries = I40EVF_AQ_LEN;
2647 hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2648 hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
2649
2650 err = i40evf_init_adminq(hw);
2651 if (err) {
2652 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2653 err);
2654 goto err;
2655 }
2656 err = i40evf_send_api_ver(adapter);
2657 if (err) {
2658 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
2659 i40evf_shutdown_adminq(hw);
2660 goto err;
2661 }
2662 adapter->state = __I40EVF_INIT_VERSION_CHECK;
2663 goto restart;
2664 case __I40EVF_INIT_VERSION_CHECK:
2665 if (!i40evf_asq_done(hw)) {
2666 dev_err(&pdev->dev, "Admin queue command never completed\n");
2667 i40evf_shutdown_adminq(hw);
2668 adapter->state = __I40EVF_STARTUP;
2669 goto err;
2670 }
2671
2672 /* aq msg sent, awaiting reply */
2673 err = i40evf_verify_api_ver(adapter);
2674 if (err) {
2675 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
2676 err = i40evf_send_api_ver(adapter);
2677 else
2678 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2679 adapter->pf_version.major,
2680 adapter->pf_version.minor,
2681 VIRTCHNL_VERSION_MAJOR,
2682 VIRTCHNL_VERSION_MINOR);
2683 goto err;
2684 }
2685 err = i40evf_send_vf_config_msg(adapter);
2686 if (err) {
2687 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2688 err);
2689 goto err;
2690 }
2691 adapter->state = __I40EVF_INIT_GET_RESOURCES;
2692 goto restart;
2693 case __I40EVF_INIT_GET_RESOURCES:
2694 /* aq msg sent, awaiting reply */
2695 if (!adapter->vf_res) {
2696 bufsz = sizeof(struct virtchnl_vf_resource) +
2697 (I40E_MAX_VF_VSI *
2698 sizeof(struct virtchnl_vsi_resource));
2699 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
2700 if (!adapter->vf_res)
2701 goto err;
2702 }
2703 err = i40evf_get_vf_config(adapter);
2704 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
2705 err = i40evf_send_vf_config_msg(adapter);
2706 goto err;
2707 } else if (err == I40E_ERR_PARAM) {
2708 /* We only get ERR_PARAM if the device is in a very bad
2709 * state or if we've been disabled for previous bad
2710 * behavior. Either way, we're done now.
2711 */
2712 i40evf_shutdown_adminq(hw);
2713 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2714 return;
2715 }
2716 if (err) {
2717 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
2718 err);
2719 goto err_alloc;
2720 }
2721 adapter->state = __I40EVF_INIT_SW;
2722 break;
2723 default:
2724 goto err_alloc;
2725 }
2726
2727 if (i40evf_process_config(adapter))
2728 goto err_alloc;
2729 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2730
2731 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
2732
2733 netdev->netdev_ops = &i40evf_netdev_ops;
2734 i40evf_set_ethtool_ops(netdev);
2735 netdev->watchdog_timeo = 5 * HZ;
2736
2737 /* MTU range: 68 - 9710 */
2738 netdev->min_mtu = ETH_MIN_MTU;
2739 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
2740
2741 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2742 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2743 adapter->hw.mac.addr);
2744 eth_hw_addr_random(netdev);
2745 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2746 } else {
2747 adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
2748 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2749 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2750 }
2751
2752 timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
2753 mod_timer(&adapter->watchdog_timer, jiffies + 1);
2754
2755 adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
2756 adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
2757 err = i40evf_init_interrupt_scheme(adapter);
2758 if (err)
2759 goto err_sw_init;
2760 i40evf_map_rings_to_vectors(adapter);
2761 if (adapter->vf_res->vf_cap_flags &
2762 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2763 adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
2764
2765 err = i40evf_request_misc_irq(adapter);
2766 if (err)
2767 goto err_sw_init;
2768
2769 netif_carrier_off(netdev);
2770 adapter->link_up = false;
2771
2772 if (!adapter->netdev_registered) {
2773 err = register_netdev(netdev);
2774 if (err)
2775 goto err_register;
2776 }
2777
2778 adapter->netdev_registered = true;
2779
2780 netif_tx_stop_all_queues(netdev);
2781 if (CLIENT_ALLOWED(adapter)) {
2782 err = i40evf_lan_add_device(adapter);
2783 if (err)
2784 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2785 err);
2786 }
2787
2788 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2789 if (netdev->features & NETIF_F_GRO)
2790 dev_info(&pdev->dev, "GRO is enabled\n");
2791
2792 adapter->state = __I40EVF_DOWN;
2793 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
2794 i40evf_misc_irq_enable(adapter);
2795 wake_up(&adapter->down_waitqueue);
2796
2797 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2798 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2799 if (!adapter->rss_key || !adapter->rss_lut)
2800 goto err_mem;
2801
2802 if (RSS_AQ(adapter)) {
2803 adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
2804 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
2805 } else {
2806 i40evf_init_rss(adapter);
2807 }
2808 return;
2809 restart:
2810 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
2811 return;
2812 err_mem:
2813 i40evf_free_rss(adapter);
2814 err_register:
2815 i40evf_free_misc_irq(adapter);
2816 err_sw_init:
2817 i40evf_reset_interrupt_capability(adapter);
2818 err_alloc:
2819 kfree(adapter->vf_res);
2820 adapter->vf_res = NULL;
2821 err:
2822 /* Things went into the weeds, so try again later */
2823 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2824 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
2825 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2826 i40evf_shutdown_adminq(hw);
2827 adapter->state = __I40EVF_STARTUP;
2828 schedule_delayed_work(&adapter->init_task, HZ * 5);
2829 return;
2830 }
2831 schedule_delayed_work(&adapter->init_task, HZ);
2832 }
2833
2834 /**
2835 * i40evf_shutdown - Shutdown the device in preparation for a reboot
2836 * @pdev: pci device structure
2837 **/
2838 static void i40evf_shutdown(struct pci_dev *pdev)
2839 {
2840 struct net_device *netdev = pci_get_drvdata(pdev);
2841 struct i40evf_adapter *adapter = netdev_priv(netdev);
2842
2843 netif_device_detach(netdev);
2844
2845 if (netif_running(netdev))
2846 i40evf_close(netdev);
2847
2848 /* Prevent the watchdog from running. */
2849 adapter->state = __I40EVF_REMOVE;
2850 adapter->aq_required = 0;
2851
2852 #ifdef CONFIG_PM
2853 pci_save_state(pdev);
2854
2855 #endif
2856 pci_disable_device(pdev);
2857 }
2858
2859 /**
2860 * i40evf_probe - Device Initialization Routine
2861 * @pdev: PCI device information struct
2862 * @ent: entry in i40evf_pci_tbl
2863 *
2864 * Returns 0 on success, negative on failure
2865 *
2866 * i40evf_probe initializes an adapter identified by a pci_dev structure.
2867 * The OS initialization, configuring of the adapter private structure,
2868 * and a hardware reset occur.
2869 **/
2870 static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2871 {
2872 struct net_device *netdev;
2873 struct i40evf_adapter *adapter = NULL;
2874 struct i40e_hw *hw = NULL;
2875 int err;
2876
2877 err = pci_enable_device(pdev);
2878 if (err)
2879 return err;
2880
2881 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2882 if (err) {
2883 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2884 if (err) {
2885 dev_err(&pdev->dev,
2886 "DMA configuration failed: 0x%x\n", err);
2887 goto err_dma;
2888 }
2889 }
2890
2891 err = pci_request_regions(pdev, i40evf_driver_name);
2892 if (err) {
2893 dev_err(&pdev->dev,
2894 "pci_request_regions failed 0x%x\n", err);
2895 goto err_pci_reg;
2896 }
2897
2898 pci_enable_pcie_error_reporting(pdev);
2899
2900 pci_set_master(pdev);
2901
2902 netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES);
2903 if (!netdev) {
2904 err = -ENOMEM;
2905 goto err_alloc_etherdev;
2906 }
2907
2908 SET_NETDEV_DEV(netdev, &pdev->dev);
2909
2910 pci_set_drvdata(pdev, netdev);
2911 adapter = netdev_priv(netdev);
2912
2913 adapter->netdev = netdev;
2914 adapter->pdev = pdev;
2915
2916 hw = &adapter->hw;
2917 hw->back = adapter;
2918
2919 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
2920 adapter->state = __I40EVF_STARTUP;
2921
2922 /* Call save state here because it relies on the adapter struct. */
2923 pci_save_state(pdev);
2924
2925 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
2926 pci_resource_len(pdev, 0));
2927 if (!hw->hw_addr) {
2928 err = -EIO;
2929 goto err_ioremap;
2930 }
2931 hw->vendor_id = pdev->vendor;
2932 hw->device_id = pdev->device;
2933 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2934 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2935 hw->subsystem_device_id = pdev->subsystem_device;
2936 hw->bus.device = PCI_SLOT(pdev->devfn);
2937 hw->bus.func = PCI_FUNC(pdev->devfn);
2938 hw->bus.bus_id = pdev->bus->number;
2939
2940 /* set up the locks for the AQ, do this only once in probe
2941 * and destroy them only once in remove
2942 */
2943 mutex_init(&hw->aq.asq_mutex);
2944 mutex_init(&hw->aq.arq_mutex);
2945
2946 INIT_LIST_HEAD(&adapter->mac_filter_list);
2947 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2948
2949 INIT_WORK(&adapter->reset_task, i40evf_reset_task);
2950 INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
2951 INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
2952 INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
2953 INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
2954 schedule_delayed_work(&adapter->init_task,
2955 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
2956
2957 /* Setup the wait queue for indicating transition to down status */
2958 init_waitqueue_head(&adapter->down_waitqueue);
2959
2960 return 0;
2961
2962 err_ioremap:
2963 free_netdev(netdev);
2964 err_alloc_etherdev:
2965 pci_release_regions(pdev);
2966 err_pci_reg:
2967 err_dma:
2968 pci_disable_device(pdev);
2969 return err;
2970 }
2971
2972 #ifdef CONFIG_PM
2973 /**
2974 * i40evf_suspend - Power management suspend routine
2975 * @pdev: PCI device information struct
2976 * @state: unused
2977 *
2978 * Called when the system (VM) is entering sleep/suspend.
2979 **/
2980 static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
2981 {
2982 struct net_device *netdev = pci_get_drvdata(pdev);
2983 struct i40evf_adapter *adapter = netdev_priv(netdev);
2984 int retval = 0;
2985
2986 netif_device_detach(netdev);
2987
2988 if (netif_running(netdev)) {
2989 rtnl_lock();
2990 i40evf_down(adapter);
2991 rtnl_unlock();
2992 }
2993 i40evf_free_misc_irq(adapter);
2994 i40evf_reset_interrupt_capability(adapter);
2995
2996 retval = pci_save_state(pdev);
2997 if (retval)
2998 return retval;
2999
3000 pci_disable_device(pdev);
3001
3002 return 0;
3003 }
3004
3005 /**
3006 * i40evf_resume - Power management resume routine
3007 * @pdev: PCI device information struct
3008 *
3009 * Called when the system (VM) is resumed from sleep/suspend.
3010 **/
3011 static int i40evf_resume(struct pci_dev *pdev)
3012 {
3013 struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
3014 struct net_device *netdev = adapter->netdev;
3015 u32 err;
3016
3017 pci_set_power_state(pdev, PCI_D0);
3018 pci_restore_state(pdev);
3019 /* pci_restore_state clears dev->state_saved so call
3020 * pci_save_state to restore it.
3021 */
3022 pci_save_state(pdev);
3023
3024 err = pci_enable_device_mem(pdev);
3025 if (err) {
3026 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3027 return err;
3028 }
3029 pci_set_master(pdev);
3030
3031 rtnl_lock();
3032 err = i40evf_set_interrupt_capability(adapter);
3033 if (err) {
3034 rtnl_unlock();
3035 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3036 return err;
3037 }
3038 err = i40evf_request_misc_irq(adapter);
3039 rtnl_unlock();
3040 if (err) {
3041 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3042 return err;
3043 }
3044
3045 schedule_work(&adapter->reset_task);
3046
3047 netif_device_attach(netdev);
3048
3049 return err;
3050 }
3051
3052 #endif /* CONFIG_PM */
3053 /**
3054 * i40evf_remove - Device Removal Routine
3055 * @pdev: PCI device information struct
3056 *
3057 * i40evf_remove is called by the PCI subsystem to alert the driver
3058 * that it should release a PCI device. The could be caused by a
3059 * Hot-Plug event, or because the driver is going to be removed from
3060 * memory.
3061 **/
3062 static void i40evf_remove(struct pci_dev *pdev)
3063 {
3064 struct net_device *netdev = pci_get_drvdata(pdev);
3065 struct i40evf_adapter *adapter = netdev_priv(netdev);
3066 struct i40evf_mac_filter *f, *ftmp;
3067 struct i40e_hw *hw = &adapter->hw;
3068 int err;
3069
3070 cancel_delayed_work_sync(&adapter->init_task);
3071 cancel_work_sync(&adapter->reset_task);
3072 cancel_delayed_work_sync(&adapter->client_task);
3073 if (adapter->netdev_registered) {
3074 unregister_netdev(netdev);
3075 adapter->netdev_registered = false;
3076 }
3077 if (CLIENT_ALLOWED(adapter)) {
3078 err = i40evf_lan_del_device(adapter);
3079 if (err)
3080 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3081 err);
3082 }
3083
3084 /* Shut down all the garbage mashers on the detention level */
3085 adapter->state = __I40EVF_REMOVE;
3086 adapter->aq_required = 0;
3087 i40evf_request_reset(adapter);
3088 msleep(50);
3089 /* If the FW isn't responding, kick it once, but only once. */
3090 if (!i40evf_asq_done(hw)) {
3091 i40evf_request_reset(adapter);
3092 msleep(50);
3093 }
3094 i40evf_free_all_tx_resources(adapter);
3095 i40evf_free_all_rx_resources(adapter);
3096 i40evf_misc_irq_disable(adapter);
3097 i40evf_free_misc_irq(adapter);
3098 i40evf_reset_interrupt_capability(adapter);
3099 i40evf_free_q_vectors(adapter);
3100
3101 if (adapter->watchdog_timer.function)
3102 del_timer_sync(&adapter->watchdog_timer);
3103
3104 flush_scheduled_work();
3105
3106 i40evf_free_rss(adapter);
3107
3108 if (hw->aq.asq.count)
3109 i40evf_shutdown_adminq(hw);
3110
3111 /* destroy the locks only once, here */
3112 mutex_destroy(&hw->aq.arq_mutex);
3113 mutex_destroy(&hw->aq.asq_mutex);
3114
3115 iounmap(hw->hw_addr);
3116 pci_release_regions(pdev);
3117 i40evf_free_all_tx_resources(adapter);
3118 i40evf_free_all_rx_resources(adapter);
3119 i40evf_free_queues(adapter);
3120 kfree(adapter->vf_res);
3121 /* If we got removed before an up/down sequence, we've got a filter
3122 * hanging out there that we need to get rid of.
3123 */
3124 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3125 list_del(&f->list);
3126 kfree(f);
3127 }
3128 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
3129 list_del(&f->list);
3130 kfree(f);
3131 }
3132
3133 free_netdev(netdev);
3134
3135 pci_disable_pcie_error_reporting(pdev);
3136
3137 pci_disable_device(pdev);
3138 }
3139
3140 static struct pci_driver i40evf_driver = {
3141 .name = i40evf_driver_name,
3142 .id_table = i40evf_pci_tbl,
3143 .probe = i40evf_probe,
3144 .remove = i40evf_remove,
3145 #ifdef CONFIG_PM
3146 .suspend = i40evf_suspend,
3147 .resume = i40evf_resume,
3148 #endif
3149 .shutdown = i40evf_shutdown,
3150 };
3151
3152 /**
3153 * i40e_init_module - Driver Registration Routine
3154 *
3155 * i40e_init_module is the first routine called when the driver is
3156 * loaded. All it does is register with the PCI subsystem.
3157 **/
3158 static int __init i40evf_init_module(void)
3159 {
3160 int ret;
3161
3162 pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
3163 i40evf_driver_version);
3164
3165 pr_info("%s\n", i40evf_copyright);
3166
3167 i40evf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3168 i40evf_driver_name);
3169 if (!i40evf_wq) {
3170 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
3171 return -ENOMEM;
3172 }
3173 ret = pci_register_driver(&i40evf_driver);
3174 return ret;
3175 }
3176
3177 module_init(i40evf_init_module);
3178
3179 /**
3180 * i40e_exit_module - Driver Exit Cleanup Routine
3181 *
3182 * i40e_exit_module is called just before the driver is removed
3183 * from memory.
3184 **/
3185 static void __exit i40evf_exit_module(void)
3186 {
3187 pci_unregister_driver(&i40evf_driver);
3188 destroy_workqueue(i40evf_wq);
3189 }
3190
3191 module_exit(i40evf_exit_module);
3192
3193 /* i40evf_main.c */