]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/intel/iavf/iavf_main.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
CommitLineData
ae06c70b 1// SPDX-License-Identifier: GPL-2.0
51dce24b 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
5eae00c5 3
5ec8b7d1 4#include "iavf.h"
66bc8e0f 5#include "iavf_prototype.h"
5ec8b7d1 6#include "iavf_client.h"
129cf89e 7/* All iavf tracepoints are defined by the include below, which must
ed0980c4
SP
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11#define CREATE_TRACE_POINTS
ad64ed8b 12#include "iavf_trace.h"
ed0980c4 13
129cf89e
JB
14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16static int iavf_close(struct net_device *netdev);
22773761 17static void iavf_init_get_resources(struct iavf_adapter *adapter);
b66c7bc1 18static int iavf_check_reset_complete(struct iavf_hw *hw);
5eae00c5 19
129cf89e
JB
20char iavf_driver_name[] = "iavf";
21static const char iavf_driver_string[] =
8062b226 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
5eae00c5 23
129cf89e 24static const char iavf_copyright[] =
8062b226 25 "Copyright (c) 2013 - 2018 Intel Corporation.";
5eae00c5 26
129cf89e 27/* iavf_pci_tbl - PCI Device ID Table
5eae00c5
GR
28 *
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
31 *
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
34 */
129cf89e 35static const struct pci_device_id iavf_pci_tbl[] = {
4dbc76e0
JB
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
5eae00c5
GR
40 /* required last entry */
41 {0, }
42};
43
129cf89e 44MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
5eae00c5 45
8062b226 46MODULE_ALIAS("i40evf");
5eae00c5 47MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
98674ebe
JB
48MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49MODULE_LICENSE("GPL v2");
5eae00c5 50
b66c7bc1 51static const struct net_device_ops iavf_netdev_ops;
fdd4044f 52struct workqueue_struct *iavf_wq;
2803b16c 53
d4dded4a
KS
54/**
55 * iavf_pdev_to_adapter - go from pci_dev to adapter
56 * @pdev: pci_dev pointer
57 */
58static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
59{
60 return netdev_priv(pci_get_drvdata(pdev));
61}
62
5eae00c5 63/**
129cf89e 64 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
5eae00c5
GR
65 * @hw: pointer to the HW structure
66 * @mem: ptr to mem struct to fill out
67 * @size: size of memory requested
68 * @alignment: what to align the allocation to
69 **/
80754bbc
SN
70enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
71 struct iavf_dma_mem *mem,
72 u64 size, u32 alignment)
5eae00c5 73{
129cf89e 74 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
5eae00c5
GR
75
76 if (!mem)
8821b3fa 77 return IAVF_ERR_PARAM;
5eae00c5
GR
78
79 mem->size = ALIGN(size, alignment);
80 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
81 (dma_addr_t *)&mem->pa, GFP_KERNEL);
82 if (mem->va)
83 return 0;
84 else
8821b3fa 85 return IAVF_ERR_NO_MEMORY;
5eae00c5
GR
86}
87
88/**
129cf89e 89 * iavf_free_dma_mem_d - OS specific memory free for shared code
5eae00c5
GR
90 * @hw: pointer to the HW structure
91 * @mem: ptr to mem struct to free
92 **/
80754bbc
SN
93enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
94 struct iavf_dma_mem *mem)
5eae00c5 95{
129cf89e 96 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
5eae00c5
GR
97
98 if (!mem || !mem->va)
8821b3fa 99 return IAVF_ERR_PARAM;
5eae00c5
GR
100 dma_free_coherent(&adapter->pdev->dev, mem->size,
101 mem->va, (dma_addr_t)mem->pa);
102 return 0;
103}
104
105/**
129cf89e 106 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
5eae00c5
GR
107 * @hw: pointer to the HW structure
108 * @mem: ptr to mem struct to fill out
109 * @size: size of memory requested
110 **/
80754bbc
SN
111enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
112 struct iavf_virt_mem *mem, u32 size)
5eae00c5
GR
113{
114 if (!mem)
8821b3fa 115 return IAVF_ERR_PARAM;
5eae00c5
GR
116
117 mem->size = size;
118 mem->va = kzalloc(size, GFP_KERNEL);
119
120 if (mem->va)
121 return 0;
122 else
8821b3fa 123 return IAVF_ERR_NO_MEMORY;
5eae00c5
GR
124}
125
126/**
129cf89e 127 * iavf_free_virt_mem_d - OS specific memory free for shared code
5eae00c5
GR
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to free
130 **/
80754bbc
SN
131enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
132 struct iavf_virt_mem *mem)
5eae00c5
GR
133{
134 if (!mem)
8821b3fa 135 return IAVF_ERR_PARAM;
5eae00c5
GR
136
137 /* it's ok to kfree a NULL pointer */
138 kfree(mem->va);
139
140 return 0;
141}
142
226d5285 143/**
5ac49f3c
SA
144 * iavf_lock_timeout - try to lock mutex but give up after timeout
145 * @lock: mutex that should be locked
226d5285
SA
146 * @msecs: timeout in msecs
147 *
148 * Returns 0 on success, negative on failure
149 **/
08772382 150int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
226d5285
SA
151{
152 unsigned int wait, delay = 10;
153
154 for (wait = 0; wait < msecs; wait += delay) {
5ac49f3c 155 if (mutex_trylock(lock))
226d5285
SA
156 return 0;
157
158 msleep(delay);
159 }
160
161 return -1;
162}
163
00e5ec4b 164/**
129cf89e 165 * iavf_schedule_reset - Set the flags and schedule a reset event
00e5ec4b
MW
166 * @adapter: board private structure
167 **/
129cf89e 168void iavf_schedule_reset(struct iavf_adapter *adapter)
00e5ec4b
MW
169{
170 if (!(adapter->flags &
129cf89e
JB
171 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
172 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
fdd4044f 173 queue_work(iavf_wq, &adapter->reset_task);
00e5ec4b
MW
174 }
175}
176
6a8621b9
JJ
177/**
178 * iavf_schedule_request_stats - Set the flags and schedule statistics request
179 * @adapter: board private structure
180 *
181 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
182 * request and refresh ethtool stats
183 **/
184void iavf_schedule_request_stats(struct iavf_adapter *adapter)
185{
186 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
187 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
188}
189
5eae00c5 190/**
129cf89e 191 * iavf_tx_timeout - Respond to a Tx Hang
5eae00c5 192 * @netdev: network interface device structure
b50f7bca 193 * @txqueue: queue number that is timing out
5eae00c5 194 **/
0290bd29 195static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
5eae00c5 196{
129cf89e 197 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
198
199 adapter->tx_timeout_count++;
129cf89e 200 iavf_schedule_reset(adapter);
5eae00c5
GR
201}
202
203/**
129cf89e 204 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
5eae00c5
GR
205 * @adapter: board private structure
206 **/
129cf89e 207static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
5eae00c5 208{
f349daa5 209 struct iavf_hw *hw = &adapter->hw;
75a64435 210
ef4603e8
JK
211 if (!adapter->msix_entries)
212 return;
213
f1cad2ce 214 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
5eae00c5 215
f1cad2ce 216 iavf_flush(hw);
5eae00c5
GR
217
218 synchronize_irq(adapter->msix_entries[0].vector);
219}
220
221/**
129cf89e 222 * iavf_misc_irq_enable - Enable default interrupt generation settings
5eae00c5
GR
223 * @adapter: board private structure
224 **/
129cf89e 225static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
5eae00c5 226{
f349daa5 227 struct iavf_hw *hw = &adapter->hw;
75a64435 228
f1cad2ce
JB
229 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
230 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
231 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
5eae00c5 232
f1cad2ce 233 iavf_flush(hw);
5eae00c5
GR
234}
235
236/**
129cf89e 237 * iavf_irq_disable - Mask off interrupt generation on the NIC
5eae00c5
GR
238 * @adapter: board private structure
239 **/
129cf89e 240static void iavf_irq_disable(struct iavf_adapter *adapter)
5eae00c5
GR
241{
242 int i;
f349daa5 243 struct iavf_hw *hw = &adapter->hw;
5eae00c5 244
dbb01c8a
MW
245 if (!adapter->msix_entries)
246 return;
247
5eae00c5 248 for (i = 1; i < adapter->num_msix_vectors; i++) {
f1cad2ce 249 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
5eae00c5
GR
250 synchronize_irq(adapter->msix_entries[i].vector);
251 }
f1cad2ce 252 iavf_flush(hw);
5eae00c5
GR
253}
254
255/**
129cf89e 256 * iavf_irq_enable_queues - Enable interrupt for specified queues
5eae00c5
GR
257 * @adapter: board private structure
258 * @mask: bitmap of queues to enable
259 **/
129cf89e 260void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
5eae00c5 261{
f349daa5 262 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
263 int i;
264
265 for (i = 1; i < adapter->num_msix_vectors; i++) {
41a1d04b 266 if (mask & BIT(i - 1)) {
f1cad2ce
JB
267 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
268 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
269 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
5eae00c5
GR
270 }
271 }
272}
273
5eae00c5 274/**
129cf89e 275 * iavf_irq_enable - Enable default interrupt generation settings
5eae00c5 276 * @adapter: board private structure
69c1d70a 277 * @flush: boolean value whether to run rd32()
5eae00c5 278 **/
129cf89e 279void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
5eae00c5 280{
f349daa5 281 struct iavf_hw *hw = &adapter->hw;
5eae00c5 282
129cf89e
JB
283 iavf_misc_irq_enable(adapter);
284 iavf_irq_enable_queues(adapter, ~0);
5eae00c5
GR
285
286 if (flush)
f1cad2ce 287 iavf_flush(hw);
5eae00c5
GR
288}
289
290/**
129cf89e 291 * iavf_msix_aq - Interrupt handler for vector 0
5eae00c5
GR
292 * @irq: interrupt number
293 * @data: pointer to netdev
294 **/
129cf89e 295static irqreturn_t iavf_msix_aq(int irq, void *data)
5eae00c5
GR
296{
297 struct net_device *netdev = data;
129cf89e 298 struct iavf_adapter *adapter = netdev_priv(netdev);
f349daa5 299 struct iavf_hw *hw = &adapter->hw;
5eae00c5 300
cfbe4dba 301 /* handle non-queue interrupts, these reads clear the registers */
f1cad2ce
JB
302 rd32(hw, IAVF_VFINT_ICR01);
303 rd32(hw, IAVF_VFINT_ICR0_ENA1);
5eae00c5 304
3986c698
SL
305 if (adapter->state != __IAVF_REMOVE)
306 /* schedule work on the private workqueue */
307 queue_work(iavf_wq, &adapter->adminq_task);
5eae00c5
GR
308
309 return IRQ_HANDLED;
310}
311
312/**
129cf89e 313 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
5eae00c5
GR
314 * @irq: interrupt number
315 * @data: pointer to a q_vector
316 **/
129cf89e 317static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
5eae00c5 318{
56184e01 319 struct iavf_q_vector *q_vector = data;
5eae00c5
GR
320
321 if (!q_vector->tx.ring && !q_vector->rx.ring)
322 return IRQ_HANDLED;
323
5d3465a1 324 napi_schedule_irqoff(&q_vector->napi);
5eae00c5
GR
325
326 return IRQ_HANDLED;
327}
328
329/**
129cf89e 330 * iavf_map_vector_to_rxq - associate irqs with rx queues
5eae00c5
GR
331 * @adapter: board private structure
332 * @v_idx: interrupt number
333 * @r_idx: queue number
334 **/
335static void
129cf89e 336iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
5eae00c5 337{
56184e01
JB
338 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
339 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
f349daa5 340 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
341
342 rx_ring->q_vector = q_vector;
343 rx_ring->next = q_vector->rx.ring;
344 rx_ring->vsi = &adapter->vsi;
345 q_vector->rx.ring = rx_ring;
346 q_vector->rx.count++;
a0073a4b 347 q_vector->rx.next_update = jiffies + 1;
556fdfd6 348 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
f19a973f 349 q_vector->ring_mask |= BIT(r_idx);
56184e01 350 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
4eda4e00 351 q_vector->rx.current_itr >> 1);
556fdfd6 352 q_vector->rx.current_itr = q_vector->rx.target_itr;
5eae00c5
GR
353}
354
355/**
129cf89e 356 * iavf_map_vector_to_txq - associate irqs with tx queues
5eae00c5
GR
357 * @adapter: board private structure
358 * @v_idx: interrupt number
359 * @t_idx: queue number
360 **/
361static void
129cf89e 362iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
5eae00c5 363{
56184e01
JB
364 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
365 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
f349daa5 366 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
367
368 tx_ring->q_vector = q_vector;
369 tx_ring->next = q_vector->tx.ring;
370 tx_ring->vsi = &adapter->vsi;
371 q_vector->tx.ring = tx_ring;
372 q_vector->tx.count++;
a0073a4b 373 q_vector->tx.next_update = jiffies + 1;
556fdfd6 374 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
5eae00c5 375 q_vector->num_ringpairs++;
56184e01 376 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
4eda4e00 377 q_vector->tx.target_itr >> 1);
556fdfd6 378 q_vector->tx.current_itr = q_vector->tx.target_itr;
5eae00c5
GR
379}
380
381/**
129cf89e 382 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
5eae00c5
GR
383 * @adapter: board private structure to initialize
384 *
385 * This function maps descriptor rings to the queue-specific vectors
386 * we were allotted through the MSI-X enabling code. Ideally, we'd have
387 * one vector per ring/queue, but on a constrained vector budget, we
388 * group the rings as "efficiently" as possible. You would add new
389 * mapping configurations in here.
390 **/
129cf89e 391static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
5eae00c5 392{
c97fc9b6
AB
393 int rings_remaining = adapter->num_active_queues;
394 int ridx = 0, vidx = 0;
5eae00c5 395 int q_vectors;
5eae00c5
GR
396
397 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
398
c97fc9b6 399 for (; ridx < rings_remaining; ridx++) {
129cf89e
JB
400 iavf_map_vector_to_rxq(adapter, vidx, ridx);
401 iavf_map_vector_to_txq(adapter, vidx, ridx);
5eae00c5 402
c97fc9b6
AB
403 /* In the case where we have more queues than vectors, continue
404 * round-robin on vectors until all queues are mapped.
405 */
406 if (++vidx >= q_vectors)
407 vidx = 0;
5eae00c5
GR
408 }
409
129cf89e 410 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
5eae00c5
GR
411}
412
96db776a 413/**
129cf89e 414 * iavf_irq_affinity_notify - Callback for affinity changes
96db776a
AB
415 * @notify: context as to what irq was changed
416 * @mask: the new affinity mask
417 *
418 * This is a callback function used by the irq_set_affinity_notifier function
419 * so that we may register to receive changes to the irq affinity masks.
420 **/
129cf89e
JB
421static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
422 const cpumask_t *mask)
96db776a 423{
56184e01
JB
424 struct iavf_q_vector *q_vector =
425 container_of(notify, struct iavf_q_vector, affinity_notify);
96db776a 426
7e4d01e7 427 cpumask_copy(&q_vector->affinity_mask, mask);
96db776a
AB
428}
429
430/**
129cf89e 431 * iavf_irq_affinity_release - Callback for affinity notifier release
96db776a
AB
432 * @ref: internal core kernel usage
433 *
434 * This is a callback function used by the irq_set_affinity_notifier function
435 * to inform the current notification subscriber that they will no longer
436 * receive notifications.
437 **/
129cf89e 438static void iavf_irq_affinity_release(struct kref *ref) {}
96db776a 439
5eae00c5 440/**
129cf89e 441 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
5eae00c5 442 * @adapter: board private structure
f5254429 443 * @basename: device basename
5eae00c5
GR
444 *
445 * Allocates MSI-X vectors for tx and rx handling, and requests
446 * interrupts from the kernel.
447 **/
448static int
129cf89e 449iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
5eae00c5 450{
696ac80a
JK
451 unsigned int vector, q_vectors;
452 unsigned int rx_int_idx = 0, tx_int_idx = 0;
453 int irq_num, err;
be664cbe 454 int cpu;
5eae00c5 455
129cf89e 456 iavf_irq_disable(adapter);
5eae00c5
GR
457 /* Decrement for Other and TCP Timer vectors */
458 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
459
460 for (vector = 0; vector < q_vectors; vector++) {
56184e01 461 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
0b6591e6 462
96db776a 463 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
5eae00c5
GR
464
465 if (q_vector->tx.ring && q_vector->rx.ring) {
696ac80a 466 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 467 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
5eae00c5
GR
468 tx_int_idx++;
469 } else if (q_vector->rx.ring) {
696ac80a 470 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 471 "iavf-%s-rx-%d", basename, rx_int_idx++);
5eae00c5 472 } else if (q_vector->tx.ring) {
696ac80a 473 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 474 "iavf-%s-tx-%d", basename, tx_int_idx++);
5eae00c5
GR
475 } else {
476 /* skip this unused q_vector */
477 continue;
478 }
96db776a 479 err = request_irq(irq_num,
129cf89e 480 iavf_msix_clean_rings,
96db776a
AB
481 0,
482 q_vector->name,
483 q_vector);
5eae00c5
GR
484 if (err) {
485 dev_info(&adapter->pdev->dev,
fb43201f 486 "Request_irq failed, error: %d\n", err);
5eae00c5
GR
487 goto free_queue_irqs;
488 }
96db776a 489 /* register for affinity change notifications */
129cf89e 490 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
96db776a 491 q_vector->affinity_notify.release =
129cf89e 492 iavf_irq_affinity_release;
96db776a 493 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
be664cbe
JK
494 /* Spread the IRQ affinity hints across online CPUs. Note that
495 * get_cpu_mask returns a mask with a permanent lifetime so
496 * it's safe to use as a hint for irq_set_affinity_hint.
759dc4a7 497 */
be664cbe
JK
498 cpu = cpumask_local_spread(q_vector->v_idx, -1);
499 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
5eae00c5
GR
500 }
501
502 return 0;
503
504free_queue_irqs:
505 while (vector) {
506 vector--;
96db776a
AB
507 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
508 irq_set_affinity_notifier(irq_num, NULL);
509 irq_set_affinity_hint(irq_num, NULL);
510 free_irq(irq_num, &adapter->q_vectors[vector]);
5eae00c5
GR
511 }
512 return err;
513}
514
515/**
129cf89e 516 * iavf_request_misc_irq - Initialize MSI-X interrupts
5eae00c5
GR
517 * @adapter: board private structure
518 *
519 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
520 * vector is only for the admin queue, and stays active even when the netdev
521 * is closed.
522 **/
129cf89e 523static int iavf_request_misc_irq(struct iavf_adapter *adapter)
5eae00c5
GR
524{
525 struct net_device *netdev = adapter->netdev;
526 int err;
527
b39c1e2c 528 snprintf(adapter->misc_vector_name,
129cf89e 529 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
9a21a007 530 dev_name(&adapter->pdev->dev));
5eae00c5 531 err = request_irq(adapter->msix_entries[0].vector,
129cf89e 532 &iavf_msix_aq, 0,
e1dfee8e 533 adapter->misc_vector_name, netdev);
5eae00c5
GR
534 if (err) {
535 dev_err(&adapter->pdev->dev,
77fa28be
CS
536 "request_irq for %s failed: %d\n",
537 adapter->misc_vector_name, err);
5eae00c5
GR
538 free_irq(adapter->msix_entries[0].vector, netdev);
539 }
540 return err;
541}
542
543/**
129cf89e 544 * iavf_free_traffic_irqs - Free MSI-X interrupts
5eae00c5
GR
545 * @adapter: board private structure
546 *
547 * Frees all MSI-X vectors other than 0.
548 **/
129cf89e 549static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
5eae00c5 550{
96db776a 551 int vector, irq_num, q_vectors;
75a64435 552
47d2a5d8
AB
553 if (!adapter->msix_entries)
554 return;
555
5eae00c5
GR
556 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
557
96db776a
AB
558 for (vector = 0; vector < q_vectors; vector++) {
559 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
560 irq_set_affinity_notifier(irq_num, NULL);
561 irq_set_affinity_hint(irq_num, NULL);
562 free_irq(irq_num, &adapter->q_vectors[vector]);
5eae00c5
GR
563 }
564}
565
566/**
129cf89e 567 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
5eae00c5
GR
568 * @adapter: board private structure
569 *
570 * Frees MSI-X vector 0.
571 **/
129cf89e 572static void iavf_free_misc_irq(struct iavf_adapter *adapter)
5eae00c5
GR
573{
574 struct net_device *netdev = adapter->netdev;
575
ef4603e8
JK
576 if (!adapter->msix_entries)
577 return;
578
5eae00c5
GR
579 free_irq(adapter->msix_entries[0].vector, netdev);
580}
581
582/**
129cf89e 583 * iavf_configure_tx - Configure Transmit Unit after Reset
5eae00c5
GR
584 * @adapter: board private structure
585 *
586 * Configure the Tx unit of the MAC after a reset.
587 **/
129cf89e 588static void iavf_configure_tx(struct iavf_adapter *adapter)
5eae00c5 589{
f349daa5 590 struct iavf_hw *hw = &adapter->hw;
5eae00c5 591 int i;
75a64435 592
cc052927 593 for (i = 0; i < adapter->num_active_queues; i++)
f1cad2ce 594 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
5eae00c5
GR
595}
596
597/**
129cf89e 598 * iavf_configure_rx - Configure Receive Unit after Reset
5eae00c5
GR
599 * @adapter: board private structure
600 *
601 * Configure the Rx unit of the MAC after a reset.
602 **/
129cf89e 603static void iavf_configure_rx(struct iavf_adapter *adapter)
5eae00c5 604{
56184e01 605 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
f349daa5 606 struct iavf_hw *hw = &adapter->hw;
5eae00c5 607 int i;
5eae00c5 608
dab86afd
AD
609 /* Legacy Rx will always default to a 2048 buffer size. */
610#if (PAGE_SIZE < 8192)
129cf89e 611 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
3dfc3eb5
AB
612 struct net_device *netdev = adapter->netdev;
613
98efd694
AD
614 /* For jumbo frames on systems with 4K pages we have to use
615 * an order 1 page, so we might as well increase the size
616 * of our Rx buffer to make better use of the available space
617 */
56184e01 618 rx_buf_len = IAVF_RXBUFFER_3072;
98efd694 619
dab86afd
AD
620 /* We use a 1536 buffer size for configurations with
621 * standard Ethernet mtu. On x86 this gives us enough room
622 * for shared info and 192 bytes of padding.
623 */
56184e01 624 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
ca9ec088 625 (netdev->mtu <= ETH_DATA_LEN))
56184e01 626 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
dab86afd
AD
627 }
628#endif
629
cc052927 630 for (i = 0; i < adapter->num_active_queues; i++) {
f1cad2ce 631 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
dab86afd 632 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
ca9ec088 633
129cf89e 634 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
ca9ec088
AD
635 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
636 else
637 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
5eae00c5
GR
638 }
639}
640
641/**
129cf89e 642 * iavf_find_vlan - Search filter list for specific vlan filter
5eae00c5
GR
643 * @adapter: board private structure
644 * @vlan: vlan tag
645 *
504398f0
JK
646 * Returns ptr to the filter object or NULL. Must be called while holding the
647 * mac_vlan_list_lock.
5eae00c5
GR
648 **/
649static struct
129cf89e 650iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 651{
129cf89e 652 struct iavf_vlan_filter *f;
5eae00c5
GR
653
654 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
655 if (vlan == f->vlan)
656 return f;
657 }
658 return NULL;
659}
660
661/**
129cf89e 662 * iavf_add_vlan - Add a vlan filter to the list
5eae00c5
GR
663 * @adapter: board private structure
664 * @vlan: VLAN tag
665 *
666 * Returns ptr to the filter object or NULL when no memory available.
667 **/
668static struct
129cf89e 669iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 670{
129cf89e 671 struct iavf_vlan_filter *f = NULL;
13acb546 672
504398f0 673 spin_lock_bh(&adapter->mac_vlan_list_lock);
5eae00c5 674
129cf89e 675 f = iavf_find_vlan(adapter, vlan);
348d4994 676 if (!f) {
f0a48fb4 677 f = kzalloc(sizeof(*f), GFP_ATOMIC);
348d4994 678 if (!f)
13acb546 679 goto clearout;
249c8b8d 680
5eae00c5
GR
681 f->vlan = vlan;
682
c2417a7b 683 list_add_tail(&f->list, &adapter->vlan_filter_list);
5eae00c5 684 f->add = true;
129cf89e 685 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
5eae00c5
GR
686 }
687
13acb546 688clearout:
504398f0 689 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5eae00c5
GR
690 return f;
691}
692
693/**
129cf89e 694 * iavf_del_vlan - Remove a vlan filter from the list
5eae00c5
GR
695 * @adapter: board private structure
696 * @vlan: VLAN tag
697 **/
129cf89e 698static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 699{
129cf89e 700 struct iavf_vlan_filter *f;
13acb546 701
504398f0 702 spin_lock_bh(&adapter->mac_vlan_list_lock);
5eae00c5 703
129cf89e 704 f = iavf_find_vlan(adapter, vlan);
5eae00c5
GR
705 if (f) {
706 f->remove = true;
129cf89e 707 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
5eae00c5 708 }
504398f0
JK
709
710 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5eae00c5
GR
711}
712
1f36185d
AA
713/**
714 * iavf_restore_filters
715 * @adapter: board private structure
716 *
717 * Restore existing non MAC filters when VF netdev comes back up
718 **/
719static void iavf_restore_filters(struct iavf_adapter *adapter)
720{
08772382 721 u16 vid;
1f36185d 722
08772382
BC
723 /* re-add all VLAN filters */
724 for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
725 iavf_add_vlan(adapter, vid);
1f36185d
AA
726}
727
5eae00c5 728/**
129cf89e 729 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
5eae00c5 730 * @netdev: network device struct
f5254429 731 * @proto: unused protocol data
5eae00c5
GR
732 * @vid: VLAN tag
733 **/
129cf89e
JB
734static int iavf_vlan_rx_add_vid(struct net_device *netdev,
735 __always_unused __be16 proto, u16 vid)
5eae00c5 736{
129cf89e 737 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 738
8ed995ff
MW
739 if (!VLAN_ALLOWED(adapter))
740 return -EIO;
1f36185d 741
129cf89e 742 if (iavf_add_vlan(adapter, vid) == NULL)
5eae00c5 743 return -ENOMEM;
1f36185d
AA
744
745 set_bit(vid, adapter->vsi.active_vlans);
5eae00c5
GR
746 return 0;
747}
748
749/**
129cf89e 750 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
5eae00c5 751 * @netdev: network device struct
f5254429 752 * @proto: unused protocol data
5eae00c5
GR
753 * @vid: VLAN tag
754 **/
129cf89e
JB
755static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
756 __always_unused __be16 proto, u16 vid)
5eae00c5 757{
129cf89e 758 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 759
1f36185d
AA
760 iavf_del_vlan(adapter, vid);
761 clear_bit(vid, adapter->vsi.active_vlans);
762
763 return 0;
5eae00c5
GR
764}
765
766/**
129cf89e 767 * iavf_find_filter - Search filter list for specific mac filter
5eae00c5
GR
768 * @adapter: board private structure
769 * @macaddr: the MAC address
770 *
504398f0
JK
771 * Returns ptr to the filter object or NULL. Must be called while holding the
772 * mac_vlan_list_lock.
5eae00c5
GR
773 **/
774static struct
129cf89e
JB
775iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
776 const u8 *macaddr)
5eae00c5 777{
129cf89e 778 struct iavf_mac_filter *f;
5eae00c5
GR
779
780 if (!macaddr)
781 return NULL;
782
783 list_for_each_entry(f, &adapter->mac_filter_list, list) {
784 if (ether_addr_equal(macaddr, f->macaddr))
785 return f;
786 }
787 return NULL;
788}
789
790/**
56184e01 791 * iavf_add_filter - Add a mac filter to the filter list
5eae00c5
GR
792 * @adapter: board private structure
793 * @macaddr: the MAC address
794 *
795 * Returns ptr to the filter object or NULL when no memory available.
796 **/
9e052291
SA
797struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
798 const u8 *macaddr)
5eae00c5 799{
129cf89e 800 struct iavf_mac_filter *f;
5eae00c5
GR
801
802 if (!macaddr)
803 return NULL;
804
129cf89e 805 f = iavf_find_filter(adapter, macaddr);
348d4994 806 if (!f) {
5eae00c5 807 f = kzalloc(sizeof(*f), GFP_ATOMIC);
504398f0 808 if (!f)
8cd5fe62 809 return f;
5eae00c5 810
9a173901 811 ether_addr_copy(f->macaddr, macaddr);
5eae00c5 812
63590b61 813 list_add_tail(&f->list, &adapter->mac_filter_list);
5eae00c5 814 f->add = true;
8da80c9d 815 f->is_new_mac = true;
129cf89e 816 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
c766b9af
AB
817 } else {
818 f->remove = false;
5eae00c5
GR
819 }
820
5eae00c5
GR
821 return f;
822}
823
824/**
129cf89e 825 * iavf_set_mac - NDO callback to set port mac address
5eae00c5
GR
826 * @netdev: network interface device structure
827 * @p: pointer to an address structure
828 *
829 * Returns 0 on success, negative on failure
830 **/
129cf89e 831static int iavf_set_mac(struct net_device *netdev, void *p)
5eae00c5 832{
129cf89e 833 struct iavf_adapter *adapter = netdev_priv(netdev);
f349daa5 834 struct iavf_hw *hw = &adapter->hw;
129cf89e 835 struct iavf_mac_filter *f;
5eae00c5
GR
836 struct sockaddr *addr = p;
837
838 if (!is_valid_ether_addr(addr->sa_data))
839 return -EADDRNOTAVAIL;
840
841 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
842 return 0;
843
504398f0
JK
844 spin_lock_bh(&adapter->mac_vlan_list_lock);
845
129cf89e 846 f = iavf_find_filter(adapter, hw->mac.addr);
14e52ee2
MW
847 if (f) {
848 f->remove = true;
129cf89e 849 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
14e52ee2
MW
850 }
851
129cf89e 852 f = iavf_add_filter(adapter, addr->sa_data);
8cd5fe62 853
504398f0
JK
854 spin_unlock_bh(&adapter->mac_vlan_list_lock);
855
5eae00c5 856 if (f) {
9a173901 857 ether_addr_copy(hw->mac.addr, addr->sa_data);
5eae00c5
GR
858 }
859
860 return (f == NULL) ? -ENOMEM : 0;
861}
862
863/**
129cf89e 864 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
8946b563
JK
865 * @netdev: the netdevice
866 * @addr: address to add
867 *
868 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
869 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
870 */
129cf89e 871static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
5eae00c5 872{
129cf89e 873 struct iavf_adapter *adapter = netdev_priv(netdev);
2f41f335 874
129cf89e 875 if (iavf_add_filter(adapter, addr))
8946b563
JK
876 return 0;
877 else
878 return -ENOMEM;
879}
2f41f335 880
8946b563 881/**
129cf89e 882 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
8946b563
JK
883 * @netdev: the netdevice
884 * @addr: address to add
885 *
886 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
887 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
888 */
129cf89e 889static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
8946b563 890{
129cf89e
JB
891 struct iavf_adapter *adapter = netdev_priv(netdev);
892 struct iavf_mac_filter *f;
2f41f335 893
8946b563
JK
894 /* Under some circumstances, we might receive a request to delete
895 * our own device address from our uc list. Because we store the
896 * device address in the VSI's MAC/VLAN filter list, we need to ignore
897 * such requests and not delete our device address from this list.
898 */
899 if (ether_addr_equal(addr, netdev->dev_addr))
900 return 0;
2f41f335 901
129cf89e 902 f = iavf_find_filter(adapter, addr);
8946b563 903 if (f) {
2f41f335 904 f->remove = true;
129cf89e 905 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
5eae00c5 906 }
8946b563
JK
907 return 0;
908}
909
910/**
129cf89e 911 * iavf_set_rx_mode - NDO callback to set the netdev filters
8946b563
JK
912 * @netdev: network interface device structure
913 **/
129cf89e 914static void iavf_set_rx_mode(struct net_device *netdev)
8946b563 915{
129cf89e 916 struct iavf_adapter *adapter = netdev_priv(netdev);
8946b563
JK
917
918 spin_lock_bh(&adapter->mac_vlan_list_lock);
129cf89e
JB
919 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
920 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
8946b563 921 spin_unlock_bh(&adapter->mac_vlan_list_lock);
47d34839
ASJ
922
923 if (netdev->flags & IFF_PROMISC &&
129cf89e
JB
924 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
925 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
47d34839 926 else if (!(netdev->flags & IFF_PROMISC) &&
129cf89e
JB
927 adapter->flags & IAVF_FLAG_PROMISC_ON)
928 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
47d34839 929
f42a5c74 930 if (netdev->flags & IFF_ALLMULTI &&
129cf89e
JB
931 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
932 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
f42a5c74 933 else if (!(netdev->flags & IFF_ALLMULTI) &&
129cf89e
JB
934 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
935 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
5eae00c5
GR
936}
937
938/**
129cf89e 939 * iavf_napi_enable_all - enable NAPI on all queue vectors
5eae00c5
GR
940 * @adapter: board private structure
941 **/
129cf89e 942static void iavf_napi_enable_all(struct iavf_adapter *adapter)
5eae00c5
GR
943{
944 int q_idx;
56184e01 945 struct iavf_q_vector *q_vector;
5eae00c5
GR
946 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
947
948 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
949 struct napi_struct *napi;
75a64435 950
7d96ba1a 951 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
952 napi = &q_vector->napi;
953 napi_enable(napi);
954 }
955}
956
957/**
129cf89e 958 * iavf_napi_disable_all - disable NAPI on all queue vectors
5eae00c5
GR
959 * @adapter: board private structure
960 **/
129cf89e 961static void iavf_napi_disable_all(struct iavf_adapter *adapter)
5eae00c5
GR
962{
963 int q_idx;
56184e01 964 struct iavf_q_vector *q_vector;
5eae00c5
GR
965 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
966
967 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7d96ba1a 968 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
969 napi_disable(&q_vector->napi);
970 }
971}
972
973/**
129cf89e 974 * iavf_configure - set up transmit and receive data structures
5eae00c5
GR
975 * @adapter: board private structure
976 **/
129cf89e 977static void iavf_configure(struct iavf_adapter *adapter)
5eae00c5
GR
978{
979 struct net_device *netdev = adapter->netdev;
980 int i;
981
129cf89e 982 iavf_set_rx_mode(netdev);
5eae00c5 983
129cf89e
JB
984 iavf_configure_tx(adapter);
985 iavf_configure_rx(adapter);
986 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
5eae00c5 987
cc052927 988 for (i = 0; i < adapter->num_active_queues; i++) {
56184e01 989 struct iavf_ring *ring = &adapter->rx_rings[i];
75a64435 990
56184e01 991 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
5eae00c5
GR
992 }
993}
994
995/**
129cf89e 996 * iavf_up_complete - Finish the last steps of bringing up a connection
5eae00c5 997 * @adapter: board private structure
9b2aef12 998 *
129cf89e 999 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
5eae00c5 1000 **/
129cf89e 1001static void iavf_up_complete(struct iavf_adapter *adapter)
5eae00c5 1002{
a646d474 1003 iavf_change_state(adapter, __IAVF_RUNNING);
56184e01 1004 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
5eae00c5 1005
129cf89e 1006 iavf_napi_enable_all(adapter);
5eae00c5 1007
129cf89e 1008 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
ed0e894d 1009 if (CLIENT_ENABLED(adapter))
129cf89e 1010 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
fdd4044f 1011 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
5eae00c5
GR
1012}
1013
5eae00c5 1014/**
56184e01 1015 * iavf_down - Shutdown the connection processing
5eae00c5 1016 * @adapter: board private structure
9b2aef12 1017 *
129cf89e 1018 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
5eae00c5 1019 **/
129cf89e 1020void iavf_down(struct iavf_adapter *adapter)
5eae00c5
GR
1021{
1022 struct net_device *netdev = adapter->netdev;
129cf89e 1023 struct iavf_vlan_filter *vlf;
129cf89e 1024 struct iavf_cloud_filter *cf;
0dbfbabb
HW
1025 struct iavf_fdir_fltr *fdir;
1026 struct iavf_mac_filter *f;
0aaeb4fb 1027 struct iavf_adv_rss *rss;
5eae00c5 1028
129cf89e 1029 if (adapter->state <= __IAVF_DOWN_PENDING)
ddf0b3a6
MW
1030 return;
1031
63e18c25
MW
1032 netif_carrier_off(netdev);
1033 netif_tx_disable(netdev);
3f341acc 1034 adapter->link_up = false;
129cf89e
JB
1035 iavf_napi_disable_all(adapter);
1036 iavf_irq_disable(adapter);
53d0b3ae 1037
504398f0
JK
1038 spin_lock_bh(&adapter->mac_vlan_list_lock);
1039
8946b563
JK
1040 /* clear the sync flag on all filters */
1041 __dev_uc_unsync(adapter->netdev, NULL);
1042 __dev_mc_unsync(adapter->netdev, NULL);
1043
ef8693eb 1044 /* remove all MAC filters */
5eae00c5
GR
1045 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1046 f->remove = true;
1047 }
8946b563 1048
ed1f5b58 1049 /* remove all VLAN filters */
fbd5eb54 1050 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
0075fa0f 1051 vlf->remove = true;
ed1f5b58 1052 }
504398f0
JK
1053
1054 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1055
0075fa0f
HR
1056 /* remove all cloud filters */
1057 spin_lock_bh(&adapter->cloud_filter_list_lock);
1058 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1059 cf->del = true;
1060 }
1061 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1062
0dbfbabb
HW
1063 /* remove all Flow Director filters */
1064 spin_lock_bh(&adapter->fdir_fltr_lock);
1065 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1066 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1067 }
1068 spin_unlock_bh(&adapter->fdir_fltr_lock);
1069
0aaeb4fb
HW
1070 /* remove all advance RSS configuration */
1071 spin_lock_bh(&adapter->adv_rss_lock);
1072 list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
1073 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1074 spin_unlock_bh(&adapter->adv_rss_lock);
1075
664c0ec7 1076 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
53d0b3ae 1077 /* cancel any current operation */
310a2ad9 1078 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
53d0b3ae
MW
1079 /* Schedule operations to close down the HW. Don't wait
1080 * here for this to complete. The watchdog is still running
1081 * and it will take care of this.
1082 */
129cf89e
JB
1083 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1084 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1085 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
0dbfbabb 1086 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
0aaeb4fb 1087 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
129cf89e 1088 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
ef8693eb 1089 }
5eae00c5 1090
fdd4044f 1091 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
5eae00c5
GR
1092}
1093
1094/**
129cf89e 1095 * iavf_acquire_msix_vectors - Setup the MSIX capability
5eae00c5
GR
1096 * @adapter: board private structure
1097 * @vectors: number of vectors to request
1098 *
1099 * Work with the OS to set up the MSIX vectors needed.
1100 *
1101 * Returns 0 on success, negative on failure
1102 **/
1103static int
129cf89e 1104iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
5eae00c5
GR
1105{
1106 int err, vector_threshold;
1107
1108 /* We'll want at least 3 (vector_threshold):
1109 * 0) Other (Admin Queue and link, mostly)
1110 * 1) TxQ[0] Cleanup
1111 * 2) RxQ[0] Cleanup
1112 */
1113 vector_threshold = MIN_MSIX_COUNT;
1114
1115 /* The more we get, the more we will assign to Tx/Rx Cleanup
1116 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1117 * Right now, we simply care about how many we'll get; we'll
1118 * set them up later while requesting irq's.
1119 */
fc2f2f5d
AG
1120 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1121 vector_threshold, vectors);
1122 if (err < 0) {
80e72893 1123 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
5eae00c5
GR
1124 kfree(adapter->msix_entries);
1125 adapter->msix_entries = NULL;
fc2f2f5d 1126 return err;
5eae00c5 1127 }
fc2f2f5d
AG
1128
1129 /* Adjust for only the vectors we'll use, which is minimum
1130 * of max_msix_q_vectors + NONQ_VECS, or the number of
1131 * vectors we were allocated.
1132 */
1133 adapter->num_msix_vectors = err;
1134 return 0;
5eae00c5
GR
1135}
1136
1137/**
129cf89e 1138 * iavf_free_queues - Free memory for all rings
5eae00c5
GR
1139 * @adapter: board private structure to initialize
1140 *
1141 * Free all of the memory associated with queue pairs.
1142 **/
129cf89e 1143static void iavf_free_queues(struct iavf_adapter *adapter)
5eae00c5 1144{
5eae00c5
GR
1145 if (!adapter->vsi_res)
1146 return;
65c7006f 1147 adapter->num_active_queues = 0;
0dd438d8 1148 kfree(adapter->tx_rings);
10311540 1149 adapter->tx_rings = NULL;
0dd438d8 1150 kfree(adapter->rx_rings);
10311540 1151 adapter->rx_rings = NULL;
5eae00c5
GR
1152}
1153
1154/**
129cf89e 1155 * iavf_alloc_queues - Allocate memory for all rings
5eae00c5
GR
1156 * @adapter: board private structure to initialize
1157 *
1158 * We allocate one ring per queue at run-time since we don't know the
1159 * number of queues at compile-time. The polling_netdev array is
1160 * intended for Multiqueue, but should work fine with a single queue.
1161 **/
129cf89e 1162static int iavf_alloc_queues(struct iavf_adapter *adapter)
5eae00c5 1163{
65c7006f
JK
1164 int i, num_active_queues;
1165
5b36e8d0
AB
1166 /* If we're in reset reallocating queues we don't actually know yet for
1167 * certain the PF gave us the number of queues we asked for but we'll
1168 * assume it did. Once basic reset is finished we'll confirm once we
1169 * start negotiating config with PF.
1170 */
1171 if (adapter->num_req_queues)
1172 num_active_queues = adapter->num_req_queues;
5e97ce63
AD
1173 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1174 adapter->num_tc)
1175 num_active_queues = adapter->ch_config.total_qps;
5b36e8d0
AB
1176 else
1177 num_active_queues = min_t(int,
1178 adapter->vsi_res->num_queue_pairs,
1179 (int)(num_online_cpus()));
1180
5eae00c5 1181
65c7006f 1182 adapter->tx_rings = kcalloc(num_active_queues,
56184e01 1183 sizeof(struct iavf_ring), GFP_KERNEL);
0dd438d8
MW
1184 if (!adapter->tx_rings)
1185 goto err_out;
65c7006f 1186 adapter->rx_rings = kcalloc(num_active_queues,
56184e01 1187 sizeof(struct iavf_ring), GFP_KERNEL);
0dd438d8
MW
1188 if (!adapter->rx_rings)
1189 goto err_out;
1190
65c7006f 1191 for (i = 0; i < num_active_queues; i++) {
56184e01
JB
1192 struct iavf_ring *tx_ring;
1193 struct iavf_ring *rx_ring;
5eae00c5 1194
0dd438d8 1195 tx_ring = &adapter->tx_rings[i];
5eae00c5
GR
1196
1197 tx_ring->queue_index = i;
1198 tx_ring->netdev = adapter->netdev;
1199 tx_ring->dev = &adapter->pdev->dev;
d732a184 1200 tx_ring->count = adapter->tx_desc_count;
56184e01 1201 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
129cf89e 1202 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
56184e01 1203 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
5eae00c5 1204
0dd438d8 1205 rx_ring = &adapter->rx_rings[i];
5eae00c5
GR
1206 rx_ring->queue_index = i;
1207 rx_ring->netdev = adapter->netdev;
1208 rx_ring->dev = &adapter->pdev->dev;
d732a184 1209 rx_ring->count = adapter->rx_desc_count;
56184e01 1210 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
5eae00c5
GR
1211 }
1212
65c7006f
JK
1213 adapter->num_active_queues = num_active_queues;
1214
5eae00c5
GR
1215 return 0;
1216
1217err_out:
129cf89e 1218 iavf_free_queues(adapter);
5eae00c5
GR
1219 return -ENOMEM;
1220}
1221
1222/**
129cf89e 1223 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
5eae00c5
GR
1224 * @adapter: board private structure to initialize
1225 *
1226 * Attempt to configure the interrupts using the best available
1227 * capabilities of the hardware and the kernel.
1228 **/
129cf89e 1229static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
5eae00c5
GR
1230{
1231 int vector, v_budget;
1232 int pairs = 0;
1233 int err = 0;
1234
1235 if (!adapter->vsi_res) {
1236 err = -EIO;
1237 goto out;
1238 }
cc052927 1239 pairs = adapter->num_active_queues;
5eae00c5 1240
789f38ca
JK
1241 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1242 * us much good if we have more vectors than CPUs. However, we already
1243 * limit the total number of queues by the number of CPUs so we do not
1244 * need any further limiting here.
5eae00c5 1245 */
789f38ca
JK
1246 v_budget = min_t(int, pairs + NONQ_VECS,
1247 (int)adapter->vf_res->max_vectors);
5eae00c5 1248
5eae00c5
GR
1249 adapter->msix_entries = kcalloc(v_budget,
1250 sizeof(struct msix_entry), GFP_KERNEL);
1251 if (!adapter->msix_entries) {
1252 err = -ENOMEM;
1253 goto out;
1254 }
1255
1256 for (vector = 0; vector < v_budget; vector++)
1257 adapter->msix_entries[vector].entry = vector;
1258
129cf89e 1259 err = iavf_acquire_msix_vectors(adapter, v_budget);
5eae00c5
GR
1260
1261out:
e6c4cf6f
MW
1262 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1263 netif_set_real_num_tx_queues(adapter->netdev, pairs);
5eae00c5
GR
1264 return err;
1265}
1266
e25d00b8 1267/**
56184e01 1268 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
43a3d9ba 1269 * @adapter: board private structure
2c86ac3c
HZ
1270 *
1271 * Return 0 on success, negative on failure
e25d00b8 1272 **/
129cf89e 1273static int iavf_config_rss_aq(struct iavf_adapter *adapter)
e25d00b8 1274{
7af36e32
AM
1275 struct iavf_aqc_get_set_rss_key_data *rss_key =
1276 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
f349daa5 1277 struct iavf_hw *hw = &adapter->hw;
2c86ac3c 1278 int ret = 0;
e25d00b8 1279
310a2ad9 1280 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
e25d00b8 1281 /* bail because we already have a command pending */
e3d132d1 1282 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
e25d00b8 1283 adapter->current_op);
2c86ac3c 1284 return -EBUSY;
e25d00b8
ASJ
1285 }
1286
129cf89e 1287 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
43a3d9ba
MW
1288 if (ret) {
1289 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
129cf89e
JB
1290 iavf_stat_str(hw, ret),
1291 iavf_aq_str(hw, hw->aq.asq_last_status));
43a3d9ba
MW
1292 return ret;
1293
2c86ac3c 1294 }
e25d00b8 1295
129cf89e
JB
1296 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1297 adapter->rss_lut, adapter->rss_lut_size);
43a3d9ba
MW
1298 if (ret) {
1299 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
129cf89e
JB
1300 iavf_stat_str(hw, ret),
1301 iavf_aq_str(hw, hw->aq.asq_last_status));
e25d00b8
ASJ
1302 }
1303
2c86ac3c 1304 return ret;
43a3d9ba 1305
e25d00b8
ASJ
1306}
1307
1308/**
129cf89e 1309 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
43a3d9ba 1310 * @adapter: board private structure
2c86ac3c
HZ
1311 *
1312 * Returns 0 on success, negative on failure
e25d00b8 1313 **/
129cf89e 1314static int iavf_config_rss_reg(struct iavf_adapter *adapter)
e25d00b8 1315{
f349daa5 1316 struct iavf_hw *hw = &adapter->hw;
43a3d9ba 1317 u32 *dw;
2c86ac3c 1318 u16 i;
e25d00b8 1319
43a3d9ba
MW
1320 dw = (u32 *)adapter->rss_key;
1321 for (i = 0; i <= adapter->rss_key_size / 4; i++)
f1cad2ce 1322 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
2c86ac3c 1323
43a3d9ba
MW
1324 dw = (u32 *)adapter->rss_lut;
1325 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
f1cad2ce 1326 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
2c86ac3c 1327
f1cad2ce 1328 iavf_flush(hw);
2c86ac3c
HZ
1329
1330 return 0;
1331}
1332
1333/**
129cf89e 1334 * iavf_config_rss - Configure RSS keys and lut
43a3d9ba 1335 * @adapter: board private structure
90b02b43
HZ
1336 *
1337 * Returns 0 on success, negative on failure
1338 **/
129cf89e 1339int iavf_config_rss(struct iavf_adapter *adapter)
90b02b43 1340{
90b02b43 1341
43a3d9ba 1342 if (RSS_PF(adapter)) {
129cf89e
JB
1343 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1344 IAVF_FLAG_AQ_SET_RSS_KEY;
43a3d9ba
MW
1345 return 0;
1346 } else if (RSS_AQ(adapter)) {
129cf89e 1347 return iavf_config_rss_aq(adapter);
43a3d9ba 1348 } else {
129cf89e 1349 return iavf_config_rss_reg(adapter);
43a3d9ba 1350 }
90b02b43
HZ
1351}
1352
2c86ac3c 1353/**
129cf89e 1354 * iavf_fill_rss_lut - Fill the lut with default values
43a3d9ba 1355 * @adapter: board private structure
2c86ac3c 1356 **/
129cf89e 1357static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
2c86ac3c
HZ
1358{
1359 u16 i;
1360
43a3d9ba
MW
1361 for (i = 0; i < adapter->rss_lut_size; i++)
1362 adapter->rss_lut[i] = i % adapter->num_active_queues;
e25d00b8
ASJ
1363}
1364
1365/**
129cf89e 1366 * iavf_init_rss - Prepare for RSS
e25d00b8 1367 * @adapter: board private structure
2c86ac3c
HZ
1368 *
1369 * Return 0 on success, negative on failure
e25d00b8 1370 **/
129cf89e 1371static int iavf_init_rss(struct iavf_adapter *adapter)
e25d00b8 1372{
f349daa5 1373 struct iavf_hw *hw = &adapter->hw;
2c86ac3c 1374 int ret;
e25d00b8 1375
43a3d9ba
MW
1376 if (!RSS_PF(adapter)) {
1377 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
fbb113f7 1378 if (adapter->vf_res->vf_cap_flags &
310a2ad9 1379 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
56184e01 1380 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
43a3d9ba 1381 else
56184e01 1382 adapter->hena = IAVF_DEFAULT_RSS_HENA;
e25d00b8 1383
f1cad2ce
JB
1384 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1385 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
43a3d9ba 1386 }
66f9af85 1387
129cf89e 1388 iavf_fill_rss_lut(adapter);
43a3d9ba 1389 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
129cf89e 1390 ret = iavf_config_rss(adapter);
2c86ac3c
HZ
1391
1392 return ret;
e25d00b8
ASJ
1393}
1394
5eae00c5 1395/**
129cf89e 1396 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
5eae00c5
GR
1397 * @adapter: board private structure to initialize
1398 *
1399 * We allocate one q_vector per queue interrupt. If allocation fails we
1400 * return -ENOMEM.
1401 **/
129cf89e 1402static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
5eae00c5 1403{
7d96ba1a 1404 int q_idx = 0, num_q_vectors;
56184e01 1405 struct iavf_q_vector *q_vector;
5eae00c5
GR
1406
1407 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
0dd438d8 1408 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
7d96ba1a
MW
1409 GFP_KERNEL);
1410 if (!adapter->q_vectors)
311f23e9 1411 return -ENOMEM;
5eae00c5
GR
1412
1413 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
7d96ba1a 1414 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
1415 q_vector->adapter = adapter;
1416 q_vector->vsi = &adapter->vsi;
1417 q_vector->v_idx = q_idx;
a3f9fb5e 1418 q_vector->reg_idx = q_idx;
759dc4a7 1419 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
5eae00c5 1420 netif_napi_add(adapter->netdev, &q_vector->napi,
129cf89e 1421 iavf_napi_poll, NAPI_POLL_WEIGHT);
5eae00c5
GR
1422 }
1423
1424 return 0;
5eae00c5
GR
1425}
1426
1427/**
129cf89e 1428 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
5eae00c5
GR
1429 * @adapter: board private structure to initialize
1430 *
1431 * This function frees the memory allocated to the q_vectors. In addition if
1432 * NAPI is enabled it will delete any references to the NAPI struct prior
1433 * to freeing the q_vector.
1434 **/
129cf89e 1435static void iavf_free_q_vectors(struct iavf_adapter *adapter)
5eae00c5
GR
1436{
1437 int q_idx, num_q_vectors;
1438 int napi_vectors;
1439
ef4603e8
JK
1440 if (!adapter->q_vectors)
1441 return;
1442
5eae00c5 1443 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
cc052927 1444 napi_vectors = adapter->num_active_queues;
5eae00c5
GR
1445
1446 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
56184e01 1447 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
0b6591e6 1448
5eae00c5
GR
1449 if (q_idx < napi_vectors)
1450 netif_napi_del(&q_vector->napi);
5eae00c5 1451 }
7d96ba1a 1452 kfree(adapter->q_vectors);
ef4603e8 1453 adapter->q_vectors = NULL;
5eae00c5
GR
1454}
1455
1456/**
129cf89e 1457 * iavf_reset_interrupt_capability - Reset MSIX setup
5eae00c5
GR
1458 * @adapter: board private structure
1459 *
1460 **/
129cf89e 1461void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
5eae00c5 1462{
47d2a5d8
AB
1463 if (!adapter->msix_entries)
1464 return;
1465
5eae00c5
GR
1466 pci_disable_msix(adapter->pdev);
1467 kfree(adapter->msix_entries);
1468 adapter->msix_entries = NULL;
5eae00c5
GR
1469}
1470
1471/**
129cf89e 1472 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
5eae00c5
GR
1473 * @adapter: board private structure to initialize
1474 *
1475 **/
129cf89e 1476int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
5eae00c5
GR
1477{
1478 int err;
1479
129cf89e 1480 err = iavf_alloc_queues(adapter);
283aeafe
JK
1481 if (err) {
1482 dev_err(&adapter->pdev->dev,
1483 "Unable to allocate memory for queues\n");
1484 goto err_alloc_queues;
1485 }
1486
62fe2a86 1487 rtnl_lock();
129cf89e 1488 err = iavf_set_interrupt_capability(adapter);
62fe2a86 1489 rtnl_unlock();
5eae00c5
GR
1490 if (err) {
1491 dev_err(&adapter->pdev->dev,
1492 "Unable to setup interrupt capabilities\n");
1493 goto err_set_interrupt;
1494 }
1495
129cf89e 1496 err = iavf_alloc_q_vectors(adapter);
5eae00c5
GR
1497 if (err) {
1498 dev_err(&adapter->pdev->dev,
1499 "Unable to allocate memory for queue vectors\n");
1500 goto err_alloc_q_vectors;
1501 }
1502
5e97ce63
AD
1503 /* If we've made it so far while ADq flag being ON, then we haven't
1504 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1505 * resources have been allocated in the reset path.
1506 * Now we can truly claim that ADq is enabled.
1507 */
1508 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1509 adapter->num_tc)
1510 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1511 adapter->num_tc);
1512
5eae00c5 1513 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
75a64435
MW
1514 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1515 adapter->num_active_queues);
5eae00c5
GR
1516
1517 return 0;
5eae00c5 1518err_alloc_q_vectors:
129cf89e 1519 iavf_reset_interrupt_capability(adapter);
5eae00c5 1520err_set_interrupt:
129cf89e 1521 iavf_free_queues(adapter);
283aeafe 1522err_alloc_queues:
5eae00c5
GR
1523 return err;
1524}
1525
66f9af85 1526/**
129cf89e 1527 * iavf_free_rss - Free memory used by RSS structs
43a3d9ba 1528 * @adapter: board private structure
66f9af85 1529 **/
129cf89e 1530static void iavf_free_rss(struct iavf_adapter *adapter)
66f9af85 1531{
43a3d9ba
MW
1532 kfree(adapter->rss_key);
1533 adapter->rss_key = NULL;
66f9af85 1534
43a3d9ba
MW
1535 kfree(adapter->rss_lut);
1536 adapter->rss_lut = NULL;
66f9af85
HZ
1537}
1538
5b36e8d0 1539/**
129cf89e 1540 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
5b36e8d0
AB
1541 * @adapter: board private structure
1542 *
1543 * Returns 0 on success, negative on failure
1544 **/
129cf89e 1545static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
5b36e8d0
AB
1546{
1547 struct net_device *netdev = adapter->netdev;
1548 int err;
1549
1550 if (netif_running(netdev))
129cf89e
JB
1551 iavf_free_traffic_irqs(adapter);
1552 iavf_free_misc_irq(adapter);
1553 iavf_reset_interrupt_capability(adapter);
1554 iavf_free_q_vectors(adapter);
1555 iavf_free_queues(adapter);
5b36e8d0 1556
129cf89e 1557 err = iavf_init_interrupt_scheme(adapter);
5b36e8d0
AB
1558 if (err)
1559 goto err;
1560
1561 netif_tx_stop_all_queues(netdev);
1562
129cf89e 1563 err = iavf_request_misc_irq(adapter);
5b36e8d0
AB
1564 if (err)
1565 goto err;
1566
56184e01 1567 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
5b36e8d0 1568
129cf89e 1569 iavf_map_rings_to_vectors(adapter);
5b36e8d0
AB
1570err:
1571 return err;
1572}
1573
5eae00c5 1574/**
b476b003
JP
1575 * iavf_process_aq_command - process aq_required flags
1576 * and sends aq command
1577 * @adapter: pointer to iavf adapter structure
1578 *
1579 * Returns 0 on success
1580 * Returns error code if no command was sent
1581 * or error code if the command failed.
5eae00c5 1582 **/
b476b003 1583static int iavf_process_aq_command(struct iavf_adapter *adapter)
5eae00c5 1584{
b476b003
JP
1585 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1586 return iavf_send_vf_config_msg(adapter);
129cf89e
JB
1587 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1588 iavf_disable_queues(adapter);
b476b003 1589 return 0;
e284fc88
MW
1590 }
1591
129cf89e
JB
1592 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1593 iavf_map_queues(adapter);
b476b003 1594 return 0;
5eae00c5
GR
1595 }
1596
129cf89e
JB
1597 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1598 iavf_add_ether_addrs(adapter);
b476b003 1599 return 0;
5eae00c5
GR
1600 }
1601
129cf89e
JB
1602 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1603 iavf_add_vlans(adapter);
b476b003 1604 return 0;
5eae00c5
GR
1605 }
1606
129cf89e
JB
1607 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1608 iavf_del_ether_addrs(adapter);
b476b003 1609 return 0;
5eae00c5
GR
1610 }
1611
129cf89e
JB
1612 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1613 iavf_del_vlans(adapter);
b476b003 1614 return 0;
5eae00c5
GR
1615 }
1616
129cf89e
JB
1617 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1618 iavf_enable_vlan_stripping(adapter);
b476b003 1619 return 0;
8774370d
MS
1620 }
1621
129cf89e
JB
1622 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1623 iavf_disable_vlan_stripping(adapter);
b476b003 1624 return 0;
8774370d
MS
1625 }
1626
129cf89e
JB
1627 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1628 iavf_configure_queues(adapter);
b476b003 1629 return 0;
5eae00c5
GR
1630 }
1631
129cf89e
JB
1632 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1633 iavf_enable_queues(adapter);
b476b003 1634 return 0;
5eae00c5
GR
1635 }
1636
129cf89e 1637 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
e25d00b8
ASJ
1638 /* This message goes straight to the firmware, not the
1639 * PF, so we don't have to set current_op as we will
1640 * not get a response through the ARQ.
1641 */
129cf89e 1642 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
b476b003 1643 return 0;
e25d00b8 1644 }
129cf89e
JB
1645 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1646 iavf_get_hena(adapter);
b476b003 1647 return 0;
43a3d9ba 1648 }
129cf89e
JB
1649 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1650 iavf_set_hena(adapter);
b476b003 1651 return 0;
43a3d9ba 1652 }
129cf89e
JB
1653 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1654 iavf_set_rss_key(adapter);
b476b003 1655 return 0;
43a3d9ba 1656 }
129cf89e
JB
1657 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1658 iavf_set_rss_lut(adapter);
b476b003 1659 return 0;
43a3d9ba 1660 }
e25d00b8 1661
129cf89e
JB
1662 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1663 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
ff3f4cc2 1664 FLAG_VF_MULTICAST_PROMISC);
b476b003 1665 return 0;
47d34839
ASJ
1666 }
1667
129cf89e
JB
1668 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1669 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
b476b003 1670 return 0;
f42a5c74 1671 }
084a4685 1672 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
129cf89e
JB
1673 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1674 iavf_set_promiscuous(adapter, 0);
b476b003 1675 return 0;
47d34839 1676 }
d5b33d02 1677
129cf89e
JB
1678 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1679 iavf_enable_channels(adapter);
b476b003 1680 return 0;
d5b33d02
HR
1681 }
1682
129cf89e
JB
1683 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1684 iavf_disable_channels(adapter);
b476b003 1685 return 0;
d5b33d02 1686 }
129cf89e
JB
1687 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1688 iavf_add_cloud_filter(adapter);
b476b003 1689 return 0;
0075fa0f
HR
1690 }
1691
129cf89e
JB
1692 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1693 iavf_del_cloud_filter(adapter);
b476b003
JP
1694 return 0;
1695 }
68dfe634
PG
1696 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1697 iavf_del_cloud_filter(adapter);
1698 return 0;
1699 }
1700 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1701 iavf_add_cloud_filter(adapter);
1702 return 0;
1703 }
0dbfbabb
HW
1704 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
1705 iavf_add_fdir_filter(adapter);
1706 return IAVF_SUCCESS;
1707 }
1708 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
1709 iavf_del_fdir_filter(adapter);
1710 return IAVF_SUCCESS;
1711 }
0aaeb4fb
HW
1712 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
1713 iavf_add_adv_rss_cfg(adapter);
1714 return 0;
1715 }
1716 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
1717 iavf_del_adv_rss_cfg(adapter);
1718 return 0;
1719 }
6a8621b9
JJ
1720 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
1721 iavf_request_stats(adapter);
1722 return 0;
1723 }
1724
b476b003
JP
1725 return -EAGAIN;
1726}
1727
b66c7bc1
JP
1728/**
1729 * iavf_startup - first step of driver startup
1730 * @adapter: board private structure
1731 *
1732 * Function process __IAVF_STARTUP driver state.
1733 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
22773761 1734 * when fails the state is changed to __IAVF_INIT_FAILED
b66c7bc1 1735 **/
22773761 1736static void iavf_startup(struct iavf_adapter *adapter)
b66c7bc1
JP
1737{
1738 struct pci_dev *pdev = adapter->pdev;
1739 struct iavf_hw *hw = &adapter->hw;
1740 int err;
1741
1742 WARN_ON(adapter->state != __IAVF_STARTUP);
1743
1744 /* driver loaded, probe complete */
1745 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1746 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1747 err = iavf_set_mac_type(hw);
1748 if (err) {
1749 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1750 goto err;
1751 }
1752
1753 err = iavf_check_reset_complete(hw);
1754 if (err) {
1755 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1756 err);
1757 goto err;
1758 }
1759 hw->aq.num_arq_entries = IAVF_AQ_LEN;
1760 hw->aq.num_asq_entries = IAVF_AQ_LEN;
1761 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1762 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1763
1764 err = iavf_init_adminq(hw);
1765 if (err) {
1766 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1767 goto err;
1768 }
1769 err = iavf_send_api_ver(adapter);
1770 if (err) {
1771 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1772 iavf_shutdown_adminq(hw);
1773 goto err;
1774 }
a646d474 1775 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
22773761 1776 return;
b66c7bc1 1777err:
22773761 1778 iavf_change_state(adapter, __IAVF_INIT_FAILED);
b66c7bc1
JP
1779}
1780
1781/**
1782 * iavf_init_version_check - second step of driver startup
1783 * @adapter: board private structure
1784 *
1785 * Function process __IAVF_INIT_VERSION_CHECK driver state.
1786 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
22773761 1787 * when fails the state is changed to __IAVF_INIT_FAILED
b66c7bc1 1788 **/
22773761 1789static void iavf_init_version_check(struct iavf_adapter *adapter)
b66c7bc1
JP
1790{
1791 struct pci_dev *pdev = adapter->pdev;
1792 struct iavf_hw *hw = &adapter->hw;
1793 int err = -EAGAIN;
1794
1795 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1796
1797 if (!iavf_asq_done(hw)) {
1798 dev_err(&pdev->dev, "Admin queue command never completed\n");
1799 iavf_shutdown_adminq(hw);
a646d474 1800 iavf_change_state(adapter, __IAVF_STARTUP);
b66c7bc1
JP
1801 goto err;
1802 }
1803
1804 /* aq msg sent, awaiting reply */
1805 err = iavf_verify_api_ver(adapter);
1806 if (err) {
1807 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1808 err = iavf_send_api_ver(adapter);
1809 else
1810 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1811 adapter->pf_version.major,
1812 adapter->pf_version.minor,
1813 VIRTCHNL_VERSION_MAJOR,
1814 VIRTCHNL_VERSION_MINOR);
1815 goto err;
1816 }
1817 err = iavf_send_vf_config_msg(adapter);
1818 if (err) {
1819 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1820 err);
1821 goto err;
1822 }
a646d474 1823 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
22773761 1824 return;
b66c7bc1 1825err:
22773761 1826 iavf_change_state(adapter, __IAVF_INIT_FAILED);
b66c7bc1
JP
1827}
1828
1829/**
1830 * iavf_init_get_resources - third step of driver startup
1831 * @adapter: board private structure
1832 *
1833 * Function process __IAVF_INIT_GET_RESOURCES driver state and
1834 * finishes driver initialization procedure.
1835 * When success the state is changed to __IAVF_DOWN
22773761 1836 * when fails the state is changed to __IAVF_INIT_FAILED
b66c7bc1 1837 **/
22773761 1838static void iavf_init_get_resources(struct iavf_adapter *adapter)
b66c7bc1
JP
1839{
1840 struct net_device *netdev = adapter->netdev;
1841 struct pci_dev *pdev = adapter->pdev;
1842 struct iavf_hw *hw = &adapter->hw;
e0ef26fb 1843 int err;
b66c7bc1
JP
1844
1845 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1846 /* aq msg sent, awaiting reply */
1847 if (!adapter->vf_res) {
e0ef26fb
BC
1848 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1849 GFP_KERNEL);
1850 if (!adapter->vf_res) {
1851 err = -ENOMEM;
b66c7bc1 1852 goto err;
e0ef26fb 1853 }
b66c7bc1
JP
1854 }
1855 err = iavf_get_vf_config(adapter);
1856 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1857 err = iavf_send_vf_config_msg(adapter);
1858 goto err;
1859 } else if (err == IAVF_ERR_PARAM) {
1860 /* We only get ERR_PARAM if the device is in a very bad
1861 * state or if we've been disabled for previous bad
1862 * behavior. Either way, we're done now.
1863 */
1864 iavf_shutdown_adminq(hw);
1865 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
22773761 1866 return;
b66c7bc1
JP
1867 }
1868 if (err) {
1869 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1870 goto err_alloc;
1871 }
1872
6650d31f
JJB
1873 err = iavf_process_config(adapter);
1874 if (err)
b66c7bc1
JP
1875 goto err_alloc;
1876 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1877
1878 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1879
1880 netdev->netdev_ops = &iavf_netdev_ops;
1881 iavf_set_ethtool_ops(netdev);
1882 netdev->watchdog_timeo = 5 * HZ;
1883
1884 /* MTU range: 68 - 9710 */
1885 netdev->min_mtu = ETH_MIN_MTU;
1886 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1887
1888 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1889 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1890 adapter->hw.mac.addr);
1891 eth_hw_addr_random(netdev);
1892 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1893 } else {
b66c7bc1
JP
1894 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1895 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1896 }
1897
1898 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1899 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1900 err = iavf_init_interrupt_scheme(adapter);
1901 if (err)
1902 goto err_sw_init;
1903 iavf_map_rings_to_vectors(adapter);
1904 if (adapter->vf_res->vf_cap_flags &
1905 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1906 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1907
1908 err = iavf_request_misc_irq(adapter);
1909 if (err)
1910 goto err_sw_init;
1911
1912 netif_carrier_off(netdev);
1913 adapter->link_up = false;
1914
1915 /* set the semaphore to prevent any callbacks after device registration
1916 * up to time when state of driver will be set to __IAVF_DOWN
1917 */
1918 rtnl_lock();
1919 if (!adapter->netdev_registered) {
1920 err = register_netdevice(netdev);
1921 if (err) {
1922 rtnl_unlock();
1923 goto err_register;
1924 }
1925 }
1926
1927 adapter->netdev_registered = true;
1928
1929 netif_tx_stop_all_queues(netdev);
1930 if (CLIENT_ALLOWED(adapter)) {
1931 err = iavf_lan_add_device(adapter);
f1340265 1932 if (err)
b66c7bc1
JP
1933 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
1934 err);
b66c7bc1
JP
1935 }
1936 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
1937 if (netdev->features & NETIF_F_GRO)
1938 dev_info(&pdev->dev, "GRO is enabled\n");
1939
a646d474 1940 iavf_change_state(adapter, __IAVF_DOWN);
b66c7bc1
JP
1941 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1942 rtnl_unlock();
1943
1944 iavf_misc_irq_enable(adapter);
1945 wake_up(&adapter->down_waitqueue);
1946
1947 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
1948 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
753f3884
WY
1949 if (!adapter->rss_key || !adapter->rss_lut) {
1950 err = -ENOMEM;
b66c7bc1 1951 goto err_mem;
753f3884 1952 }
b66c7bc1
JP
1953 if (RSS_AQ(adapter))
1954 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1955 else
1956 iavf_init_rss(adapter);
1957
22773761 1958 return;
b66c7bc1
JP
1959err_mem:
1960 iavf_free_rss(adapter);
1961err_register:
1962 iavf_free_misc_irq(adapter);
1963err_sw_init:
1964 iavf_reset_interrupt_capability(adapter);
1965err_alloc:
1966 kfree(adapter->vf_res);
1967 adapter->vf_res = NULL;
1968err:
22773761 1969 iavf_change_state(adapter, __IAVF_INIT_FAILED);
b66c7bc1
JP
1970}
1971
b476b003
JP
1972/**
1973 * iavf_watchdog_task - Periodic call-back task
1974 * @work: pointer to work_struct
1975 **/
1976static void iavf_watchdog_task(struct work_struct *work)
1977{
1978 struct iavf_adapter *adapter = container_of(work,
1979 struct iavf_adapter,
fdd4044f 1980 watchdog_task.work);
b476b003
JP
1981 struct iavf_hw *hw = &adapter->hw;
1982 u32 reg_val;
1983
3986c698
SL
1984 if (!mutex_trylock(&adapter->crit_lock)) {
1985 if (adapter->state == __IAVF_REMOVE)
1986 return;
1987
b476b003 1988 goto restart_watchdog;
3986c698 1989 }
b476b003 1990
bac84861 1991 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
a646d474 1992 iavf_change_state(adapter, __IAVF_COMM_FAILED);
bac84861 1993
664c0ec7 1994 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
ce42e26c
MP
1995 adapter->aq_required = 0;
1996 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
664c0ec7
SL
1997 mutex_unlock(&adapter->crit_lock);
1998 queue_work(iavf_wq, &adapter->reset_task);
1999 return;
ce42e26c
MP
2000 }
2001
bac84861 2002 switch (adapter->state) {
ce42e26c
MP
2003 case __IAVF_STARTUP:
2004 iavf_startup(adapter);
2005 mutex_unlock(&adapter->crit_lock);
2006 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2007 msecs_to_jiffies(30));
2008 return;
2009 case __IAVF_INIT_VERSION_CHECK:
2010 iavf_init_version_check(adapter);
2011 mutex_unlock(&adapter->crit_lock);
2012 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2013 msecs_to_jiffies(30));
2014 return;
2015 case __IAVF_INIT_GET_RESOURCES:
2016 iavf_init_get_resources(adapter);
2017 mutex_unlock(&adapter->crit_lock);
2018 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2019 msecs_to_jiffies(1));
2020 return;
2021 case __IAVF_INIT_FAILED:
5d49e683
SL
2022 if (test_bit(__IAVF_IN_REMOVE_TASK,
2023 &adapter->crit_section)) {
2024 /* Do not update the state and do not reschedule
2025 * watchdog task, iavf_remove should handle this state
2026 * as it can loop forever
2027 */
2028 mutex_unlock(&adapter->crit_lock);
2029 return;
2030 }
ce42e26c
MP
2031 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2032 dev_err(&adapter->pdev->dev,
2033 "Failed to communicate with PF; waiting before retry\n");
2034 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2035 iavf_shutdown_adminq(hw);
2036 mutex_unlock(&adapter->crit_lock);
2037 queue_delayed_work(iavf_wq,
2038 &adapter->watchdog_task, (5 * HZ));
2039 return;
2040 }
2041 /* Try again from failed step*/
2042 iavf_change_state(adapter, adapter->last_state);
2043 mutex_unlock(&adapter->crit_lock);
2044 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2045 return;
bac84861 2046 case __IAVF_COMM_FAILED:
5d49e683
SL
2047 if (test_bit(__IAVF_IN_REMOVE_TASK,
2048 &adapter->crit_section)) {
2049 /* Set state to __IAVF_INIT_FAILED and perform remove
2050 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2051 * doesn't bring the state back to __IAVF_COMM_FAILED.
2052 */
2053 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2054 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2055 mutex_unlock(&adapter->crit_lock);
2056 return;
2057 }
b476b003
JP
2058 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2059 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2060 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2061 reg_val == VIRTCHNL_VFR_COMPLETED) {
2062 /* A chance for redemption! */
bac84861
JS
2063 dev_err(&adapter->pdev->dev,
2064 "Hardware came out of reset. Attempting reinit.\n");
ce42e26c 2065 /* When init task contacts the PF and
b476b003
JP
2066 * gets everything set up again, it'll restart the
2067 * watchdog for us. Down, boy. Sit. Stay. Woof.
2068 */
ce42e26c
MP
2069 iavf_change_state(adapter, __IAVF_STARTUP);
2070 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
b476b003
JP
2071 }
2072 adapter->aq_required = 0;
2073 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
249f1303 2074 mutex_unlock(&adapter->crit_lock);
bac84861
JS
2075 queue_delayed_work(iavf_wq,
2076 &adapter->watchdog_task,
2077 msecs_to_jiffies(10));
ce42e26c 2078 return;
bac84861 2079 case __IAVF_RESETTING:
5ac49f3c 2080 mutex_unlock(&adapter->crit_lock);
bac84861
JS
2081 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2082 return;
2083 case __IAVF_DOWN:
2084 case __IAVF_DOWN_PENDING:
2085 case __IAVF_TESTING:
2086 case __IAVF_RUNNING:
2087 if (adapter->current_op) {
2088 if (!iavf_asq_done(hw)) {
2089 dev_dbg(&adapter->pdev->dev,
2090 "Admin queue timeout\n");
2091 iavf_send_api_ver(adapter);
2092 }
2093 } else {
93580766
TN
2094 /* An error will be returned if no commands were
2095 * processed; use this opportunity to update stats
2096 */
2097 if (iavf_process_aq_command(adapter) &&
bac84861
JS
2098 adapter->state == __IAVF_RUNNING)
2099 iavf_request_stats(adapter);
2100 }
ce42e26c
MP
2101 if (adapter->state == __IAVF_RUNNING)
2102 iavf_detect_recover_hung(&adapter->vsi);
bac84861
JS
2103 break;
2104 case __IAVF_REMOVE:
bac84861 2105 default:
249f1303 2106 mutex_unlock(&adapter->crit_lock);
ce42e26c 2107 return;
0075fa0f
HR
2108 }
2109
a646d474 2110 /* check for hw reset */
b476b003 2111 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
bac84861 2112 if (!reg_val) {
b476b003 2113 adapter->flags |= IAVF_FLAG_RESET_PENDING;
b476b003
JP
2114 adapter->aq_required = 0;
2115 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
bac84861
JS
2116 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2117 queue_work(iavf_wq, &adapter->reset_task);
ce42e26c
MP
2118 mutex_unlock(&adapter->crit_lock);
2119 queue_delayed_work(iavf_wq,
2120 &adapter->watchdog_task, HZ * 2);
2121 return;
b476b003
JP
2122 }
2123
b476b003 2124 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
5ac49f3c 2125 mutex_unlock(&adapter->crit_lock);
ef8693eb 2126restart_watchdog:
23e1823b
SL
2127 if (adapter->state >= __IAVF_DOWN)
2128 queue_work(iavf_wq, &adapter->adminq_task);
5eae00c5 2129 if (adapter->aq_required)
fdd4044f
JP
2130 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2131 msecs_to_jiffies(20));
5eae00c5 2132 else
fdd4044f 2133 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
5eae00c5
GR
2134}
2135
f8f5bd57
PP
2136/**
2137 * iavf_disable_vf - disable VF
2138 * @adapter: board private structure
2139 *
2140 * Set communication failed flag and free all resources.
2141 * NOTE: This function is expected to be called with crit_lock being held.
2142 **/
129cf89e 2143static void iavf_disable_vf(struct iavf_adapter *adapter)
dedecb6d 2144{
129cf89e
JB
2145 struct iavf_mac_filter *f, *ftmp;
2146 struct iavf_vlan_filter *fv, *fvtmp;
2147 struct iavf_cloud_filter *cf, *cftmp;
dedecb6d 2148
129cf89e 2149 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
dedecb6d 2150
44b034b4
JK
2151 /* We don't use netif_running() because it may be true prior to
2152 * ndo_open() returning, so we can't assume it means all our open
2153 * tasks have finished, since we're not holding the rtnl_lock here.
2154 */
129cf89e 2155 if (adapter->state == __IAVF_RUNNING) {
56184e01 2156 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
dedecb6d
JP
2157 netif_carrier_off(adapter->netdev);
2158 netif_tx_disable(adapter->netdev);
2159 adapter->link_up = false;
129cf89e
JB
2160 iavf_napi_disable_all(adapter);
2161 iavf_irq_disable(adapter);
2162 iavf_free_traffic_irqs(adapter);
2163 iavf_free_all_tx_resources(adapter);
2164 iavf_free_all_rx_resources(adapter);
dedecb6d
JP
2165 }
2166
504398f0
JK
2167 spin_lock_bh(&adapter->mac_vlan_list_lock);
2168
0075fa0f 2169 /* Delete all of the filters */
dedecb6d
JP
2170 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2171 list_del(&f->list);
2172 kfree(f);
2173 }
2174
2175 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2176 list_del(&fv->list);
2177 kfree(fv);
2178 }
2179
504398f0
JK
2180 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2181
0075fa0f
HR
2182 spin_lock_bh(&adapter->cloud_filter_list_lock);
2183 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2184 list_del(&cf->list);
2185 kfree(cf);
2186 adapter->num_cloud_filters--;
2187 }
2188 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2189
129cf89e
JB
2190 iavf_free_misc_irq(adapter);
2191 iavf_reset_interrupt_capability(adapter);
129cf89e 2192 iavf_free_q_vectors(adapter);
f0d8f344 2193 iavf_free_queues(adapter);
e0ef26fb 2194 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
129cf89e 2195 iavf_shutdown_adminq(&adapter->hw);
dedecb6d 2196 adapter->netdev->flags &= ~IFF_UP;
129cf89e 2197 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
a646d474 2198 iavf_change_state(adapter, __IAVF_DOWN);
fe2647ab 2199 wake_up(&adapter->down_waitqueue);
dedecb6d
JP
2200 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2201}
2202
5eae00c5 2203/**
129cf89e 2204 * iavf_reset_task - Call-back task to handle hardware reset
5eae00c5
GR
2205 * @work: pointer to work_struct
2206 *
2207 * During reset we need to shut down and reinitialize the admin queue
2208 * before we can use it to communicate with the PF again. We also clear
2209 * and reinit the rings because that context is lost as well.
2210 **/
129cf89e 2211static void iavf_reset_task(struct work_struct *work)
5eae00c5 2212{
129cf89e
JB
2213 struct iavf_adapter *adapter = container_of(work,
2214 struct iavf_adapter,
ef8693eb 2215 reset_task);
0075fa0f 2216 struct virtchnl_vf_resource *vfres = adapter->vf_res;
ac833bbf 2217 struct net_device *netdev = adapter->netdev;
f349daa5 2218 struct iavf_hw *hw = &adapter->hw;
9e052291 2219 struct iavf_mac_filter *f, *ftmp;
129cf89e 2220 struct iavf_cloud_filter *cf;
ee5c1e92 2221 u32 reg_val;
ac833bbf 2222 int i = 0, err;
44b034b4 2223 bool running;
5eae00c5 2224
06aa040f
AD
2225 /* When device is being removed it doesn't make sense to run the reset
2226 * task, just return in such a case.
2227 */
3986c698
SL
2228 if (!mutex_trylock(&adapter->crit_lock)) {
2229 if (adapter->state != __IAVF_REMOVE)
2230 queue_work(iavf_wq, &adapter->reset_task);
06aa040f 2231
226d5285
SA
2232 return;
2233 }
3986c698 2234
5ac49f3c 2235 while (!mutex_trylock(&adapter->client_lock))
f98a2006 2236 usleep_range(500, 1000);
ed0e894d 2237 if (CLIENT_ENABLED(adapter)) {
129cf89e
JB
2238 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2239 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2240 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2241 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
ed0e894d 2242 cancel_delayed_work_sync(&adapter->client_task);
129cf89e 2243 iavf_notify_client_close(&adapter->vsi, true);
ed0e894d 2244 }
129cf89e
JB
2245 iavf_misc_irq_disable(adapter);
2246 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2247 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
67c818a1
MW
2248 /* Restart the AQ here. If we have been reset but didn't
2249 * detect it, or if the PF had to reinit, our AQ will be hosed.
2250 */
129cf89e
JB
2251 iavf_shutdown_adminq(hw);
2252 iavf_init_adminq(hw);
2253 iavf_request_reset(adapter);
3526d800 2254 }
129cf89e 2255 adapter->flags |= IAVF_FLAG_RESET_PENDING;
3526d800 2256
ef8693eb 2257 /* poll until we see the reset actually happen */
8e3e4b9d 2258 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
f1cad2ce
JB
2259 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2260 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
ee5c1e92 2261 if (!reg_val)
ef8693eb 2262 break;
ee5c1e92 2263 usleep_range(5000, 10000);
ef8693eb 2264 }
8e3e4b9d 2265 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
67c818a1 2266 dev_info(&adapter->pdev->dev, "Never saw reset\n");
ef8693eb
MW
2267 goto continue_reset; /* act like the reset happened */
2268 }
5eae00c5 2269
ef8693eb 2270 /* wait until the reset is complete and the PF is responding to us */
8e3e4b9d 2271 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
7d3f04af 2272 /* sleep first to make sure a minimum wait time is met */
129cf89e 2273 msleep(IAVF_RESET_WAIT_MS);
7d3f04af 2274
f1cad2ce
JB
2275 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2276 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9 2277 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
5eae00c5 2278 break;
5eae00c5 2279 }
7d3f04af 2280
509a447a 2281 pci_set_master(adapter->pdev);
b57ead90 2282 pci_restore_msi_state(adapter->pdev);
7d3f04af 2283
8e3e4b9d 2284 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
80e72893 2285 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
ee5c1e92 2286 reg_val);
129cf89e 2287 iavf_disable_vf(adapter);
5ac49f3c 2288 mutex_unlock(&adapter->client_lock);
23139372 2289 mutex_unlock(&adapter->crit_lock);
ef8693eb 2290 return; /* Do not attempt to reinit. It's dead, Jim. */
5eae00c5 2291 }
ef8693eb
MW
2292
2293continue_reset:
44b034b4
JK
2294 /* We don't use netif_running() because it may be true prior to
2295 * ndo_open() returning, so we can't assume it means all our open
2296 * tasks have finished, since we're not holding the rtnl_lock here.
2297 */
664c0ec7 2298 running = adapter->state == __IAVF_RUNNING;
44b034b4
JK
2299
2300 if (running) {
3c8e0b98 2301 netif_carrier_off(netdev);
67c818a1 2302 netif_tx_stop_all_queues(netdev);
3f341acc 2303 adapter->link_up = false;
129cf89e 2304 iavf_napi_disable_all(adapter);
3c8e0b98 2305 }
129cf89e 2306 iavf_irq_disable(adapter);
ac833bbf 2307
a646d474 2308 iavf_change_state(adapter, __IAVF_RESETTING);
129cf89e 2309 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
67c818a1
MW
2310
2311 /* free the Tx/Rx rings and descriptors, might be better to just
2312 * re-use them sometime in the future
2313 */
129cf89e
JB
2314 iavf_free_all_rx_resources(adapter);
2315 iavf_free_all_tx_resources(adapter);
5eae00c5 2316
129cf89e 2317 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
5eae00c5 2318 /* kill and reinit the admin queue */
129cf89e 2319 iavf_shutdown_adminq(hw);
310a2ad9 2320 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
129cf89e 2321 err = iavf_init_adminq(hw);
5eae00c5 2322 if (err)
ac833bbf
MW
2323 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2324 err);
5b36e8d0
AB
2325 adapter->aq_required = 0;
2326
129cf89e
JB
2327 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2328 err = iavf_reinit_interrupt_scheme(adapter);
5b36e8d0
AB
2329 if (err)
2330 goto reset_err;
2331 }
5eae00c5 2332
a7550f8b
MFIP
2333 if (RSS_AQ(adapter)) {
2334 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2335 } else {
2336 err = iavf_init_rss(adapter);
2337 if (err)
2338 goto reset_err;
2339 }
2340
129cf89e
JB
2341 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2342 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
ac833bbf 2343
504398f0
JK
2344 spin_lock_bh(&adapter->mac_vlan_list_lock);
2345
9e052291
SA
2346 /* Delete filter for the current MAC address, it could have
2347 * been changed by the PF via administratively set MAC.
2348 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2349 */
2350 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2351 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2352 list_del(&f->list);
2353 kfree(f);
2354 }
2355 }
ac833bbf
MW
2356 /* re-add all MAC filters */
2357 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2358 f->add = true;
2359 }
504398f0
JK
2360 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2361
0075fa0f
HR
2362 /* check if TCs are running and re-add all cloud filters */
2363 spin_lock_bh(&adapter->cloud_filter_list_lock);
2364 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2365 adapter->num_tc) {
2366 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2367 cf->add = true;
2368 }
2369 }
2370 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2371
129cf89e 2372 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
129cf89e
JB
2373 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2374 iavf_misc_irq_enable(adapter);
5eae00c5 2375
fdd4044f 2376 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
5eae00c5 2377
44b034b4
JK
2378 /* We were running when the reset started, so we need to restore some
2379 * state here.
2380 */
2381 if (running) {
5eae00c5 2382 /* allocate transmit descriptors */
129cf89e 2383 err = iavf_setup_all_tx_resources(adapter);
5eae00c5
GR
2384 if (err)
2385 goto reset_err;
2386
2387 /* allocate receive descriptors */
129cf89e 2388 err = iavf_setup_all_rx_resources(adapter);
5eae00c5
GR
2389 if (err)
2390 goto reset_err;
2391
129cf89e
JB
2392 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2393 err = iavf_request_traffic_irqs(adapter, netdev->name);
5b36e8d0
AB
2394 if (err)
2395 goto reset_err;
2396
129cf89e 2397 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5b36e8d0
AB
2398 }
2399
129cf89e 2400 iavf_configure(adapter);
5eae00c5 2401
a646d474
MP
2402 /* iavf_up_complete() will switch device back
2403 * to __IAVF_RUNNING
2404 */
129cf89e 2405 iavf_up_complete(adapter);
5eae00c5 2406
129cf89e 2407 iavf_irq_enable(adapter, true);
67c818a1 2408 } else {
a646d474 2409 iavf_change_state(adapter, __IAVF_DOWN);
fe2647ab 2410 wake_up(&adapter->down_waitqueue);
5eae00c5 2411 }
5ac49f3c
SA
2412 mutex_unlock(&adapter->client_lock);
2413 mutex_unlock(&adapter->crit_lock);
67c818a1 2414
5eae00c5
GR
2415 return;
2416reset_err:
5ac49f3c
SA
2417 mutex_unlock(&adapter->client_lock);
2418 mutex_unlock(&adapter->crit_lock);
ce42e26c
MP
2419 if (running)
2420 iavf_change_state(adapter, __IAVF_RUNNING);
80e72893 2421 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
129cf89e 2422 iavf_close(netdev);
5eae00c5
GR
2423}
2424
2425/**
129cf89e 2426 * iavf_adminq_task - worker thread to clean the admin queue
5eae00c5
GR
2427 * @work: pointer to work_struct containing our data
2428 **/
129cf89e 2429static void iavf_adminq_task(struct work_struct *work)
5eae00c5 2430{
129cf89e
JB
2431 struct iavf_adapter *adapter =
2432 container_of(work, struct iavf_adapter, adminq_task);
f349daa5 2433 struct iavf_hw *hw = &adapter->hw;
7af36e32 2434 struct iavf_arq_event_info event;
c969ef4e 2435 enum virtchnl_ops v_op;
80754bbc 2436 enum iavf_status ret, v_ret;
912257e5 2437 u32 val, oldval;
5eae00c5
GR
2438 u16 pending;
2439
129cf89e 2440 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
7235448c 2441 goto out;
ef8693eb 2442
3986c698
SL
2443 if (!mutex_trylock(&adapter->crit_lock)) {
2444 if (adapter->state == __IAVF_REMOVE)
2445 return;
2446
2447 queue_work(iavf_wq, &adapter->adminq_task);
2448 goto out;
2449 }
2450
129cf89e 2451 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
1001dc37 2452 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
249c8b8d 2453 if (!event.msg_buf)
7235448c 2454 goto out;
249c8b8d 2455
5eae00c5 2456 do {
129cf89e 2457 ret = iavf_clean_arq_element(hw, &event, &pending);
c969ef4e 2458 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
80754bbc 2459 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
c969ef4e
TD
2460
2461 if (ret || !v_op)
5eae00c5
GR
2462 break; /* No event to process or error cleaning ARQ */
2463
129cf89e
JB
2464 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2465 event.msg_len);
75a64435 2466 if (pending != 0)
129cf89e 2467 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
5eae00c5 2468 } while (pending);
5ac49f3c 2469 mutex_unlock(&adapter->crit_lock);
5eae00c5 2470
ec764b75
SL
2471 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
2472 if (adapter->netdev_registered ||
2473 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
2474 struct net_device *netdev = adapter->netdev;
2475
2476 rtnl_lock();
2477 netdev_update_features(netdev);
2478 rtnl_unlock();
2479 }
2480
2481 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2482 }
67c818a1 2483 if ((adapter->flags &
129cf89e
JB
2484 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2485 adapter->state == __IAVF_RESETTING)
67c818a1
MW
2486 goto freedom;
2487
912257e5
MW
2488 /* check for error indications */
2489 val = rd32(hw, hw->aq.arq.len);
2b3fd880 2490 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
19b73d8e 2491 goto freedom;
912257e5 2492 oldval = val;
f1cad2ce 2493 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
912257e5 2494 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
f1cad2ce 2495 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
912257e5 2496 }
f1cad2ce 2497 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
912257e5 2498 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
f1cad2ce 2499 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
912257e5 2500 }
f1cad2ce 2501 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
912257e5 2502 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
f1cad2ce 2503 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
912257e5
MW
2504 }
2505 if (oldval != val)
2506 wr32(hw, hw->aq.arq.len, val);
2507
2508 val = rd32(hw, hw->aq.asq.len);
2509 oldval = val;
f1cad2ce 2510 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
912257e5 2511 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
f1cad2ce 2512 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
912257e5 2513 }
f1cad2ce 2514 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
912257e5 2515 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
f1cad2ce 2516 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
912257e5 2517 }
f1cad2ce 2518 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
912257e5 2519 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
f1cad2ce 2520 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
912257e5
MW
2521 }
2522 if (oldval != val)
2523 wr32(hw, hw->aq.asq.len, val);
2524
67c818a1 2525freedom:
7235448c
MW
2526 kfree(event.msg_buf);
2527out:
5eae00c5 2528 /* re-enable Admin queue interrupt cause */
129cf89e 2529 iavf_misc_irq_enable(adapter);
5eae00c5
GR
2530}
2531
ed0e894d 2532/**
129cf89e 2533 * iavf_client_task - worker thread to perform client work
ed0e894d
MW
2534 * @work: pointer to work_struct containing our data
2535 *
2536 * This task handles client interactions. Because client calls can be
2537 * reentrant, we can't handle them in the watchdog.
2538 **/
129cf89e 2539static void iavf_client_task(struct work_struct *work)
ed0e894d 2540{
129cf89e
JB
2541 struct iavf_adapter *adapter =
2542 container_of(work, struct iavf_adapter, client_task.work);
ed0e894d
MW
2543
2544 /* If we can't get the client bit, just give up. We'll be rescheduled
2545 * later.
2546 */
2547
5ac49f3c 2548 if (!mutex_trylock(&adapter->client_lock))
ed0e894d
MW
2549 return;
2550
129cf89e
JB
2551 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2552 iavf_client_subtask(adapter);
2553 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
ed0e894d
MW
2554 goto out;
2555 }
129cf89e
JB
2556 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2557 iavf_notify_client_l2_params(&adapter->vsi);
2558 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
01acc73f
AB
2559 goto out;
2560 }
129cf89e
JB
2561 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2562 iavf_notify_client_close(&adapter->vsi, false);
2563 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
ed0e894d
MW
2564 goto out;
2565 }
129cf89e
JB
2566 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2567 iavf_notify_client_open(&adapter->vsi);
2568 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
ed0e894d
MW
2569 }
2570out:
5ac49f3c 2571 mutex_unlock(&adapter->client_lock);
ed0e894d
MW
2572}
2573
5eae00c5 2574/**
129cf89e 2575 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
5eae00c5
GR
2576 * @adapter: board private structure
2577 *
2578 * Free all transmit software resources
2579 **/
129cf89e 2580void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2581{
2582 int i;
2583
fdb47ae8
MW
2584 if (!adapter->tx_rings)
2585 return;
2586
cc052927 2587 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 2588 if (adapter->tx_rings[i].desc)
129cf89e 2589 iavf_free_tx_resources(&adapter->tx_rings[i]);
5eae00c5
GR
2590}
2591
2592/**
129cf89e 2593 * iavf_setup_all_tx_resources - allocate all queues Tx resources
5eae00c5
GR
2594 * @adapter: board private structure
2595 *
2596 * If this function returns with an error, then it's possible one or
2597 * more of the rings is populated (while the rest are not). It is the
2598 * callers duty to clean those orphaned rings.
2599 *
2600 * Return 0 on success, negative on failure
2601 **/
129cf89e 2602static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2603{
2604 int i, err = 0;
2605
cc052927 2606 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 2607 adapter->tx_rings[i].count = adapter->tx_desc_count;
129cf89e 2608 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
5eae00c5
GR
2609 if (!err)
2610 continue;
2611 dev_err(&adapter->pdev->dev,
fb43201f 2612 "Allocation for Tx Queue %u failed\n", i);
5eae00c5
GR
2613 break;
2614 }
2615
2616 return err;
2617}
2618
2619/**
129cf89e 2620 * iavf_setup_all_rx_resources - allocate all queues Rx resources
5eae00c5
GR
2621 * @adapter: board private structure
2622 *
2623 * If this function returns with an error, then it's possible one or
2624 * more of the rings is populated (while the rest are not). It is the
2625 * callers duty to clean those orphaned rings.
2626 *
2627 * Return 0 on success, negative on failure
2628 **/
129cf89e 2629static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2630{
2631 int i, err = 0;
2632
cc052927 2633 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 2634 adapter->rx_rings[i].count = adapter->rx_desc_count;
129cf89e 2635 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
5eae00c5
GR
2636 if (!err)
2637 continue;
2638 dev_err(&adapter->pdev->dev,
fb43201f 2639 "Allocation for Rx Queue %u failed\n", i);
5eae00c5
GR
2640 break;
2641 }
2642 return err;
2643}
2644
2645/**
129cf89e 2646 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
5eae00c5
GR
2647 * @adapter: board private structure
2648 *
2649 * Free all receive software resources
2650 **/
129cf89e 2651void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2652{
2653 int i;
2654
fdb47ae8
MW
2655 if (!adapter->rx_rings)
2656 return;
2657
cc052927 2658 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 2659 if (adapter->rx_rings[i].desc)
129cf89e 2660 iavf_free_rx_resources(&adapter->rx_rings[i]);
5eae00c5
GR
2661}
2662
591532d6 2663/**
129cf89e 2664 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
591532d6
HR
2665 * @adapter: board private structure
2666 * @max_tx_rate: max Tx bw for a tc
2667 **/
129cf89e
JB
2668static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2669 u64 max_tx_rate)
591532d6
HR
2670{
2671 int speed = 0, ret = 0;
2672
e0ef26fb
BC
2673 if (ADV_LINK_SUPPORT(adapter)) {
2674 if (adapter->link_speed_mbps < U32_MAX) {
2675 speed = adapter->link_speed_mbps;
2676 goto validate_bw;
2677 } else {
2678 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2679 return -EINVAL;
2680 }
2681 }
2682
591532d6 2683 switch (adapter->link_speed) {
5071bda2 2684 case VIRTCHNL_LINK_SPEED_40GB:
18c012d9 2685 speed = SPEED_40000;
591532d6 2686 break;
5071bda2 2687 case VIRTCHNL_LINK_SPEED_25GB:
18c012d9 2688 speed = SPEED_25000;
591532d6 2689 break;
5071bda2 2690 case VIRTCHNL_LINK_SPEED_20GB:
18c012d9 2691 speed = SPEED_20000;
591532d6 2692 break;
5071bda2 2693 case VIRTCHNL_LINK_SPEED_10GB:
18c012d9
BC
2694 speed = SPEED_10000;
2695 break;
2696 case VIRTCHNL_LINK_SPEED_5GB:
2697 speed = SPEED_5000;
2698 break;
2699 case VIRTCHNL_LINK_SPEED_2_5GB:
2700 speed = SPEED_2500;
591532d6 2701 break;
5071bda2 2702 case VIRTCHNL_LINK_SPEED_1GB:
18c012d9 2703 speed = SPEED_1000;
591532d6 2704 break;
5071bda2 2705 case VIRTCHNL_LINK_SPEED_100MB:
18c012d9 2706 speed = SPEED_100;
591532d6
HR
2707 break;
2708 default:
2709 break;
2710 }
2711
e0ef26fb 2712validate_bw:
591532d6
HR
2713 if (max_tx_rate > speed) {
2714 dev_err(&adapter->pdev->dev,
2715 "Invalid tx rate specified\n");
2716 ret = -EINVAL;
2717 }
2718
2719 return ret;
2720}
2721
d5b33d02 2722/**
262de08f 2723 * iavf_validate_ch_config - validate queue mapping info
d5b33d02
HR
2724 * @adapter: board private structure
2725 * @mqprio_qopt: queue parameters
2726 *
2727 * This function validates if the config provided by the user to
2728 * configure queue channels is valid or not. Returns 0 on a valid
2729 * config.
2730 **/
129cf89e
JB
2731static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2732 struct tc_mqprio_qopt_offload *mqprio_qopt)
d5b33d02 2733{
591532d6 2734 u64 total_max_rate = 0;
d5b33d02 2735 int i, num_qps = 0;
591532d6
HR
2736 u64 tx_rate = 0;
2737 int ret = 0;
d5b33d02 2738
129cf89e 2739 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
d5b33d02
HR
2740 mqprio_qopt->qopt.num_tc < 1)
2741 return -EINVAL;
2742
2743 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2744 if (!mqprio_qopt->qopt.count[i] ||
d5b33d02
HR
2745 mqprio_qopt->qopt.offset[i] != num_qps)
2746 return -EINVAL;
591532d6
HR
2747 if (mqprio_qopt->min_rate[i]) {
2748 dev_err(&adapter->pdev->dev,
2749 "Invalid min tx rate (greater than 0) specified\n");
2750 return -EINVAL;
2751 }
2752 /*convert to Mbps */
2753 tx_rate = div_u64(mqprio_qopt->max_rate[i],
129cf89e 2754 IAVF_MBPS_DIVISOR);
591532d6 2755 total_max_rate += tx_rate;
d5b33d02
HR
2756 num_qps += mqprio_qopt->qopt.count[i];
2757 }
4989a2b3
KS
2758 if (num_qps > adapter->num_active_queues) {
2759 dev_err(&adapter->pdev->dev,
2760 "Cannot support requested number of queues\n");
d5b33d02 2761 return -EINVAL;
4989a2b3 2762 }
d5b33d02 2763
129cf89e 2764 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
591532d6 2765 return ret;
d5b33d02
HR
2766}
2767
0075fa0f 2768/**
b50f7bca
JB
2769 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
2770 * @adapter: board private structure
0075fa0f 2771 **/
129cf89e 2772static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
0075fa0f 2773{
129cf89e 2774 struct iavf_cloud_filter *cf, *cftmp;
0075fa0f
HR
2775
2776 spin_lock_bh(&adapter->cloud_filter_list_lock);
2777 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2778 list) {
2779 list_del(&cf->list);
2780 kfree(cf);
2781 adapter->num_cloud_filters--;
2782 }
2783 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2784}
2785
d5b33d02 2786/**
129cf89e 2787 * __iavf_setup_tc - configure multiple traffic classes
d5b33d02 2788 * @netdev: network interface device structure
b50f7bca 2789 * @type_data: tc offload data
d5b33d02
HR
2790 *
2791 * This function processes the config information provided by the
2792 * user to configure traffic classes/queue channels and packages the
2793 * information to request the PF to setup traffic classes.
2794 *
2795 * Returns 0 on success.
2796 **/
129cf89e 2797static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
d5b33d02
HR
2798{
2799 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
129cf89e 2800 struct iavf_adapter *adapter = netdev_priv(netdev);
d5b33d02
HR
2801 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2802 u8 num_tc = 0, total_qps = 0;
2803 int ret = 0, netdev_tc = 0;
591532d6 2804 u64 max_tx_rate;
d5b33d02
HR
2805 u16 mode;
2806 int i;
2807
2808 num_tc = mqprio_qopt->qopt.num_tc;
2809 mode = mqprio_qopt->mode;
2810
2811 /* delete queue_channel */
2812 if (!mqprio_qopt->qopt.hw) {
129cf89e 2813 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
d5b33d02
HR
2814 /* reset the tc configuration */
2815 netdev_reset_tc(netdev);
2816 adapter->num_tc = 0;
2817 netif_tx_stop_all_queues(netdev);
2818 netif_tx_disable(netdev);
129cf89e
JB
2819 iavf_del_all_cloud_filters(adapter);
2820 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
d5b33d02
HR
2821 goto exit;
2822 } else {
2823 return -EINVAL;
2824 }
2825 }
2826
2827 /* add queue channel */
2828 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2829 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2830 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2831 return -EOPNOTSUPP;
2832 }
129cf89e 2833 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
d5b33d02
HR
2834 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2835 return -EINVAL;
2836 }
2837
129cf89e 2838 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
d5b33d02
HR
2839 if (ret)
2840 return ret;
2841 /* Return if same TC config is requested */
2842 if (adapter->num_tc == num_tc)
2843 return 0;
2844 adapter->num_tc = num_tc;
2845
129cf89e 2846 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
d5b33d02
HR
2847 if (i < num_tc) {
2848 adapter->ch_config.ch_info[i].count =
2849 mqprio_qopt->qopt.count[i];
2850 adapter->ch_config.ch_info[i].offset =
2851 mqprio_qopt->qopt.offset[i];
2852 total_qps += mqprio_qopt->qopt.count[i];
591532d6
HR
2853 max_tx_rate = mqprio_qopt->max_rate[i];
2854 /* convert to Mbps */
2855 max_tx_rate = div_u64(max_tx_rate,
129cf89e 2856 IAVF_MBPS_DIVISOR);
591532d6
HR
2857 adapter->ch_config.ch_info[i].max_tx_rate =
2858 max_tx_rate;
d5b33d02
HR
2859 } else {
2860 adapter->ch_config.ch_info[i].count = 1;
2861 adapter->ch_config.ch_info[i].offset = 0;
2862 }
2863 }
2864 adapter->ch_config.total_qps = total_qps;
2865 netif_tx_stop_all_queues(netdev);
2866 netif_tx_disable(netdev);
129cf89e 2867 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
d5b33d02
HR
2868 netdev_reset_tc(netdev);
2869 /* Report the tc mapping up the stack */
2870 netdev_set_num_tc(adapter->netdev, num_tc);
129cf89e 2871 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
d5b33d02
HR
2872 u16 qcount = mqprio_qopt->qopt.count[i];
2873 u16 qoffset = mqprio_qopt->qopt.offset[i];
2874
2875 if (i < num_tc)
2876 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2877 qoffset);
2878 }
2879 }
2880exit:
2881 return ret;
2882}
2883
0075fa0f 2884/**
129cf89e 2885 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
0075fa0f 2886 * @adapter: board private structure
b50f7bca 2887 * @f: pointer to struct flow_cls_offload
0075fa0f
HR
2888 * @filter: pointer to cloud filter structure
2889 */
129cf89e 2890static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
f9e30088 2891 struct flow_cls_offload *f,
129cf89e 2892 struct iavf_cloud_filter *filter)
0075fa0f 2893{
f9e30088 2894 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8f256622 2895 struct flow_dissector *dissector = rule->match.dissector;
0075fa0f
HR
2896 u16 n_proto_mask = 0;
2897 u16 n_proto_key = 0;
2898 u8 field_flags = 0;
2899 u16 addr_type = 0;
2900 u16 n_proto = 0;
2901 int i = 0;
deb9a9ad 2902 struct virtchnl_filter *vf = &filter->f;
0075fa0f 2903
8f256622 2904 if (dissector->used_keys &
0075fa0f
HR
2905 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2906 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2907 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2908 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2909 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2910 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2911 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2912 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2913 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
8f256622 2914 dissector->used_keys);
0075fa0f
HR
2915 return -EOPNOTSUPP;
2916 }
2917
8f256622
PNA
2918 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2919 struct flow_match_enc_keyid match;
0075fa0f 2920
8f256622
PNA
2921 flow_rule_match_enc_keyid(rule, &match);
2922 if (match.mask->keyid != 0)
129cf89e 2923 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
0075fa0f
HR
2924 }
2925
8f256622
PNA
2926 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2927 struct flow_match_basic match;
0075fa0f 2928
8f256622
PNA
2929 flow_rule_match_basic(rule, &match);
2930 n_proto_key = ntohs(match.key->n_proto);
2931 n_proto_mask = ntohs(match.mask->n_proto);
0075fa0f
HR
2932
2933 if (n_proto_key == ETH_P_ALL) {
2934 n_proto_key = 0;
2935 n_proto_mask = 0;
2936 }
2937 n_proto = n_proto_key & n_proto_mask;
2938 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2939 return -EINVAL;
2940 if (n_proto == ETH_P_IPV6) {
2941 /* specify flow type as TCP IPv6 */
deb9a9ad 2942 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
0075fa0f
HR
2943 }
2944
8f256622 2945 if (match.key->ip_proto != IPPROTO_TCP) {
0075fa0f
HR
2946 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2947 return -EINVAL;
2948 }
2949 }
2950
8f256622
PNA
2951 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2952 struct flow_match_eth_addrs match;
2953
2954 flow_rule_match_eth_addrs(rule, &match);
0075fa0f 2955
0075fa0f 2956 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8f256622
PNA
2957 if (!is_zero_ether_addr(match.mask->dst)) {
2958 if (is_broadcast_ether_addr(match.mask->dst)) {
129cf89e 2959 field_flags |= IAVF_CLOUD_FIELD_OMAC;
0075fa0f
HR
2960 } else {
2961 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
8f256622 2962 match.mask->dst);
8821b3fa 2963 return IAVF_ERR_CONFIG;
0075fa0f
HR
2964 }
2965 }
2966
8f256622
PNA
2967 if (!is_zero_ether_addr(match.mask->src)) {
2968 if (is_broadcast_ether_addr(match.mask->src)) {
129cf89e 2969 field_flags |= IAVF_CLOUD_FIELD_IMAC;
0075fa0f
HR
2970 } else {
2971 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
8f256622 2972 match.mask->src);
8821b3fa 2973 return IAVF_ERR_CONFIG;
0075fa0f
HR
2974 }
2975 }
2976
8f256622
PNA
2977 if (!is_zero_ether_addr(match.key->dst))
2978 if (is_valid_ether_addr(match.key->dst) ||
2979 is_multicast_ether_addr(match.key->dst)) {
0075fa0f
HR
2980 /* set the mask if a valid dst_mac address */
2981 for (i = 0; i < ETH_ALEN; i++)
deb9a9ad
CIK
2982 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2983 ether_addr_copy(vf->data.tcp_spec.dst_mac,
8f256622 2984 match.key->dst);
0075fa0f
HR
2985 }
2986
8f256622
PNA
2987 if (!is_zero_ether_addr(match.key->src))
2988 if (is_valid_ether_addr(match.key->src) ||
2989 is_multicast_ether_addr(match.key->src)) {
0075fa0f
HR
2990 /* set the mask if a valid dst_mac address */
2991 for (i = 0; i < ETH_ALEN; i++)
deb9a9ad
CIK
2992 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2993 ether_addr_copy(vf->data.tcp_spec.src_mac,
8f256622 2994 match.key->src);
0075fa0f
HR
2995 }
2996 }
2997
8f256622
PNA
2998 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2999 struct flow_match_vlan match;
0075fa0f 3000
8f256622
PNA
3001 flow_rule_match_vlan(rule, &match);
3002 if (match.mask->vlan_id) {
3003 if (match.mask->vlan_id == VLAN_VID_MASK) {
129cf89e 3004 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
0075fa0f
HR
3005 } else {
3006 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
8f256622 3007 match.mask->vlan_id);
8821b3fa 3008 return IAVF_ERR_CONFIG;
0075fa0f
HR
3009 }
3010 }
deb9a9ad 3011 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
8f256622 3012 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
0075fa0f
HR
3013 }
3014
8f256622
PNA
3015 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3016 struct flow_match_control match;
0075fa0f 3017
8f256622
PNA
3018 flow_rule_match_control(rule, &match);
3019 addr_type = match.key->addr_type;
0075fa0f
HR
3020 }
3021
3022 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8f256622
PNA
3023 struct flow_match_ipv4_addrs match;
3024
3025 flow_rule_match_ipv4_addrs(rule, &match);
3026 if (match.mask->dst) {
3027 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
129cf89e 3028 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
3029 } else {
3030 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
8f256622 3031 be32_to_cpu(match.mask->dst));
8821b3fa 3032 return IAVF_ERR_CONFIG;
0075fa0f
HR
3033 }
3034 }
3035
8f256622
PNA
3036 if (match.mask->src) {
3037 if (match.mask->src == cpu_to_be32(0xffffffff)) {
129cf89e 3038 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
3039 } else {
3040 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
8f256622 3041 be32_to_cpu(match.mask->dst));
8821b3fa 3042 return IAVF_ERR_CONFIG;
0075fa0f
HR
3043 }
3044 }
3045
129cf89e 3046 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
0075fa0f 3047 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
8821b3fa 3048 return IAVF_ERR_CONFIG;
0075fa0f 3049 }
8f256622 3050 if (match.key->dst) {
deb9a9ad 3051 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
8f256622 3052 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
0075fa0f 3053 }
8f256622 3054 if (match.key->src) {
deb9a9ad 3055 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
8f256622 3056 vf->data.tcp_spec.src_ip[0] = match.key->src;
0075fa0f
HR
3057 }
3058 }
3059
3060 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8f256622
PNA
3061 struct flow_match_ipv6_addrs match;
3062
3063 flow_rule_match_ipv6_addrs(rule, &match);
0075fa0f
HR
3064
3065 /* validate mask, make sure it is not IPV6_ADDR_ANY */
8f256622 3066 if (ipv6_addr_any(&match.mask->dst)) {
0075fa0f
HR
3067 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3068 IPV6_ADDR_ANY);
8821b3fa 3069 return IAVF_ERR_CONFIG;
0075fa0f
HR
3070 }
3071
3072 /* src and dest IPv6 address should not be LOOPBACK
3073 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3074 */
8f256622
PNA
3075 if (ipv6_addr_loopback(&match.key->dst) ||
3076 ipv6_addr_loopback(&match.key->src)) {
0075fa0f
HR
3077 dev_err(&adapter->pdev->dev,
3078 "ipv6 addr should not be loopback\n");
8821b3fa 3079 return IAVF_ERR_CONFIG;
0075fa0f 3080 }
8f256622
PNA
3081 if (!ipv6_addr_any(&match.mask->dst) ||
3082 !ipv6_addr_any(&match.mask->src))
129cf89e 3083 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f 3084
deb9a9ad
CIK
3085 for (i = 0; i < 4; i++)
3086 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
8f256622 3087 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
deb9a9ad
CIK
3088 sizeof(vf->data.tcp_spec.dst_ip));
3089 for (i = 0; i < 4; i++)
3090 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
8f256622 3091 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
deb9a9ad 3092 sizeof(vf->data.tcp_spec.src_ip));
0075fa0f 3093 }
8f256622
PNA
3094 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3095 struct flow_match_ports match;
3096
3097 flow_rule_match_ports(rule, &match);
3098 if (match.mask->src) {
3099 if (match.mask->src == cpu_to_be16(0xffff)) {
129cf89e 3100 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
3101 } else {
3102 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
8f256622 3103 be16_to_cpu(match.mask->src));
8821b3fa 3104 return IAVF_ERR_CONFIG;
0075fa0f
HR
3105 }
3106 }
3107
8f256622
PNA
3108 if (match.mask->dst) {
3109 if (match.mask->dst == cpu_to_be16(0xffff)) {
129cf89e 3110 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
3111 } else {
3112 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
8f256622 3113 be16_to_cpu(match.mask->dst));
8821b3fa 3114 return IAVF_ERR_CONFIG;
0075fa0f
HR
3115 }
3116 }
8f256622 3117 if (match.key->dst) {
deb9a9ad 3118 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
8f256622 3119 vf->data.tcp_spec.dst_port = match.key->dst;
0075fa0f
HR
3120 }
3121
8f256622 3122 if (match.key->src) {
deb9a9ad 3123 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
8f256622 3124 vf->data.tcp_spec.src_port = match.key->src;
0075fa0f
HR
3125 }
3126 }
deb9a9ad 3127 vf->field_flags = field_flags;
0075fa0f
HR
3128
3129 return 0;
3130}
3131
3132/**
129cf89e 3133 * iavf_handle_tclass - Forward to a traffic class on the device
0075fa0f
HR
3134 * @adapter: board private structure
3135 * @tc: traffic class index on the device
3136 * @filter: pointer to cloud filter structure
3137 */
129cf89e
JB
3138static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3139 struct iavf_cloud_filter *filter)
0075fa0f
HR
3140{
3141 if (tc == 0)
3142 return 0;
3143 if (tc < adapter->num_tc) {
3144 if (!filter->f.data.tcp_spec.dst_port) {
3145 dev_err(&adapter->pdev->dev,
3146 "Specify destination port to redirect to traffic class other than TC0\n");
3147 return -EINVAL;
3148 }
3149 }
3150 /* redirect to a traffic class on the same device */
3151 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3152 filter->f.action_meta = tc;
3153 return 0;
3154}
3155
3156/**
129cf89e 3157 * iavf_configure_clsflower - Add tc flower filters
0075fa0f 3158 * @adapter: board private structure
f9e30088 3159 * @cls_flower: Pointer to struct flow_cls_offload
0075fa0f 3160 */
129cf89e 3161static int iavf_configure_clsflower(struct iavf_adapter *adapter,
f9e30088 3162 struct flow_cls_offload *cls_flower)
0075fa0f
HR
3163{
3164 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
129cf89e 3165 struct iavf_cloud_filter *filter = NULL;
640a8af5 3166 int err = -EINVAL, count = 50;
0075fa0f
HR
3167
3168 if (tc < 0) {
3169 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3170 return -EINVAL;
3171 }
3172
3173 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
640a8af5
AD
3174 if (!filter)
3175 return -ENOMEM;
3176
5ac49f3c 3177 while (!mutex_trylock(&adapter->crit_lock)) {
3e269413
NN
3178 if (--count == 0) {
3179 kfree(filter);
3180 return err;
3181 }
640a8af5 3182 udelay(1);
0075fa0f 3183 }
640a8af5 3184
0075fa0f
HR
3185 filter->cookie = cls_flower->cookie;
3186
3187 /* set the mask to all zeroes to begin with */
3188 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3189 /* start out with flow type and eth type IPv4 to begin with */
3190 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
129cf89e 3191 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
b267ce84 3192 if (err)
0075fa0f
HR
3193 goto err;
3194
129cf89e 3195 err = iavf_handle_tclass(adapter, tc, filter);
b267ce84 3196 if (err)
0075fa0f
HR
3197 goto err;
3198
3199 /* add filter to the list */
3200 spin_lock_bh(&adapter->cloud_filter_list_lock);
3201 list_add_tail(&filter->list, &adapter->cloud_filter_list);
3202 adapter->num_cloud_filters++;
3203 filter->add = true;
129cf89e 3204 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
0075fa0f
HR
3205 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3206err:
3207 if (err)
3208 kfree(filter);
640a8af5 3209
5ac49f3c 3210 mutex_unlock(&adapter->crit_lock);
0075fa0f
HR
3211 return err;
3212}
3213
129cf89e 3214/* iavf_find_cf - Find the cloud filter in the list
0075fa0f
HR
3215 * @adapter: Board private structure
3216 * @cookie: filter specific cookie
3217 *
3218 * Returns ptr to the filter object or NULL. Must be called while holding the
3219 * cloud_filter_list_lock.
3220 */
129cf89e
JB
3221static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3222 unsigned long *cookie)
0075fa0f 3223{
129cf89e 3224 struct iavf_cloud_filter *filter = NULL;
0075fa0f
HR
3225
3226 if (!cookie)
3227 return NULL;
3228
3229 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3230 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3231 return filter;
3232 }
3233 return NULL;
3234}
3235
3236/**
129cf89e 3237 * iavf_delete_clsflower - Remove tc flower filters
0075fa0f 3238 * @adapter: board private structure
f9e30088 3239 * @cls_flower: Pointer to struct flow_cls_offload
0075fa0f 3240 */
129cf89e 3241static int iavf_delete_clsflower(struct iavf_adapter *adapter,
f9e30088 3242 struct flow_cls_offload *cls_flower)
0075fa0f 3243{
129cf89e 3244 struct iavf_cloud_filter *filter = NULL;
0075fa0f
HR
3245 int err = 0;
3246
3247 spin_lock_bh(&adapter->cloud_filter_list_lock);
129cf89e 3248 filter = iavf_find_cf(adapter, &cls_flower->cookie);
0075fa0f
HR
3249 if (filter) {
3250 filter->del = true;
129cf89e 3251 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
0075fa0f
HR
3252 } else {
3253 err = -EINVAL;
3254 }
3255 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3256
3257 return err;
3258}
3259
3260/**
129cf89e 3261 * iavf_setup_tc_cls_flower - flower classifier offloads
b50f7bca
JB
3262 * @adapter: board private structure
3263 * @cls_flower: pointer to flow_cls_offload struct with flow info
0075fa0f 3264 */
129cf89e 3265static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
f9e30088 3266 struct flow_cls_offload *cls_flower)
0075fa0f 3267{
0075fa0f 3268 switch (cls_flower->command) {
f9e30088 3269 case FLOW_CLS_REPLACE:
129cf89e 3270 return iavf_configure_clsflower(adapter, cls_flower);
f9e30088 3271 case FLOW_CLS_DESTROY:
129cf89e 3272 return iavf_delete_clsflower(adapter, cls_flower);
f9e30088 3273 case FLOW_CLS_STATS:
0075fa0f
HR
3274 return -EOPNOTSUPP;
3275 default:
246ab6f0 3276 return -EOPNOTSUPP;
0075fa0f
HR
3277 }
3278}
3279
3280/**
129cf89e 3281 * iavf_setup_tc_block_cb - block callback for tc
0075fa0f
HR
3282 * @type: type of offload
3283 * @type_data: offload data
3284 * @cb_priv:
3285 *
3286 * This function is the block callback for traffic classes
3287 **/
129cf89e
JB
3288static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3289 void *cb_priv)
0075fa0f 3290{
bb0858d8
JP
3291 struct iavf_adapter *adapter = cb_priv;
3292
3293 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3294 return -EOPNOTSUPP;
3295
0075fa0f
HR
3296 switch (type) {
3297 case TC_SETUP_CLSFLOWER:
129cf89e 3298 return iavf_setup_tc_cls_flower(cb_priv, type_data);
0075fa0f
HR
3299 default:
3300 return -EOPNOTSUPP;
3301 }
3302}
3303
955bcb6e
PNA
3304static LIST_HEAD(iavf_block_cb_list);
3305
d5b33d02 3306/**
129cf89e 3307 * iavf_setup_tc - configure multiple traffic classes
d5b33d02
HR
3308 * @netdev: network interface device structure
3309 * @type: type of offload
b50f7bca 3310 * @type_data: tc offload data
d5b33d02
HR
3311 *
3312 * This function is the callback to ndo_setup_tc in the
3313 * netdev_ops.
3314 *
3315 * Returns 0 on success
3316 **/
129cf89e
JB
3317static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3318 void *type_data)
d5b33d02 3319{
4e95bc26
PNA
3320 struct iavf_adapter *adapter = netdev_priv(netdev);
3321
0075fa0f
HR
3322 switch (type) {
3323 case TC_SETUP_QDISC_MQPRIO:
129cf89e 3324 return __iavf_setup_tc(netdev, type_data);
0075fa0f 3325 case TC_SETUP_BLOCK:
955bcb6e
PNA
3326 return flow_block_cb_setup_simple(type_data,
3327 &iavf_block_cb_list,
4e95bc26
PNA
3328 iavf_setup_tc_block_cb,
3329 adapter, adapter, true);
0075fa0f 3330 default:
d5b33d02 3331 return -EOPNOTSUPP;
0075fa0f 3332 }
d5b33d02
HR
3333}
3334
5eae00c5 3335/**
129cf89e 3336 * iavf_open - Called when a network interface is made active
5eae00c5
GR
3337 * @netdev: network interface device structure
3338 *
3339 * Returns 0 on success, negative value on failure
3340 *
3341 * The open entry point is called when a network interface is made
3342 * active by the system (IFF_UP). At this point all resources needed
3343 * for transmit and receive operations are allocated, the interrupt
fdd4044f 3344 * handler is registered with the OS, the watchdog is started,
5eae00c5
GR
3345 * and the stack is notified that the interface is ready.
3346 **/
129cf89e 3347static int iavf_open(struct net_device *netdev)
5eae00c5 3348{
129cf89e 3349 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3350 int err;
3351
129cf89e 3352 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
ef8693eb
MW
3353 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3354 return -EIO;
3355 }
209dc4da 3356
5ac49f3c 3357 while (!mutex_trylock(&adapter->crit_lock))
9b2aef12
JK
3358 usleep_range(500, 1000);
3359
129cf89e 3360 if (adapter->state != __IAVF_DOWN) {
9b2aef12
JK
3361 err = -EBUSY;
3362 goto err_unlock;
3363 }
5eae00c5 3364
502b210b
PP
3365 if (adapter->state == __IAVF_RUNNING &&
3366 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
3367 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
3368 err = 0;
3369 goto err_unlock;
3370 }
3371
5eae00c5 3372 /* allocate transmit descriptors */
129cf89e 3373 err = iavf_setup_all_tx_resources(adapter);
5eae00c5
GR
3374 if (err)
3375 goto err_setup_tx;
3376
3377 /* allocate receive descriptors */
129cf89e 3378 err = iavf_setup_all_rx_resources(adapter);
5eae00c5
GR
3379 if (err)
3380 goto err_setup_rx;
3381
3382 /* clear any pending interrupts, may auto mask */
129cf89e 3383 err = iavf_request_traffic_irqs(adapter, netdev->name);
5eae00c5
GR
3384 if (err)
3385 goto err_req_irq;
3386
8cd5fe62
PJ
3387 spin_lock_bh(&adapter->mac_vlan_list_lock);
3388
129cf89e 3389 iavf_add_filter(adapter, adapter->hw.mac.addr);
8cd5fe62
PJ
3390
3391 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3392
1f36185d
AA
3393 /* Restore VLAN filters that were removed with IFF_DOWN */
3394 iavf_restore_filters(adapter);
3395
129cf89e 3396 iavf_configure(adapter);
5eae00c5 3397
129cf89e 3398 iavf_up_complete(adapter);
5eae00c5 3399
129cf89e 3400 iavf_irq_enable(adapter, true);
5eae00c5 3401
5ac49f3c 3402 mutex_unlock(&adapter->crit_lock);
9b2aef12 3403
5eae00c5
GR
3404 return 0;
3405
3406err_req_irq:
129cf89e
JB
3407 iavf_down(adapter);
3408 iavf_free_traffic_irqs(adapter);
5eae00c5 3409err_setup_rx:
129cf89e 3410 iavf_free_all_rx_resources(adapter);
5eae00c5 3411err_setup_tx:
129cf89e 3412 iavf_free_all_tx_resources(adapter);
9b2aef12 3413err_unlock:
5ac49f3c 3414 mutex_unlock(&adapter->crit_lock);
5eae00c5
GR
3415
3416 return err;
3417}
3418
3419/**
129cf89e 3420 * iavf_close - Disables a network interface
5eae00c5
GR
3421 * @netdev: network interface device structure
3422 *
3423 * Returns 0, this is not allowed to fail
3424 *
3425 * The close entry point is called when an interface is de-activated
3426 * by the OS. The hardware is still under the drivers control, but
3427 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3428 * are freed, along with all transmit and receive resources.
3429 **/
129cf89e 3430static int iavf_close(struct net_device *netdev)
5eae00c5 3431{
129cf89e 3432 struct iavf_adapter *adapter = netdev_priv(netdev);
fe2647ab 3433 int status;
5eae00c5 3434
3986c698 3435 mutex_lock(&adapter->crit_lock);
ef8693eb 3436
3986c698
SL
3437 if (adapter->state <= __IAVF_DOWN_PENDING) {
3438 mutex_unlock(&adapter->crit_lock);
3439 return 0;
3440 }
ef8693eb 3441
56184e01 3442 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
ed0e894d 3443 if (CLIENT_ENABLED(adapter))
129cf89e 3444 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
5eae00c5 3445
129cf89e 3446 iavf_down(adapter);
a646d474 3447 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
129cf89e 3448 iavf_free_traffic_irqs(adapter);
5eae00c5 3449
5ac49f3c 3450 mutex_unlock(&adapter->crit_lock);
9b2aef12 3451
51f38262
MW
3452 /* We explicitly don't free resources here because the hardware is
3453 * still active and can DMA into memory. Resources are cleared in
129cf89e 3454 * iavf_virtchnl_completion() after we get confirmation from the PF
51f38262 3455 * driver that the rings have been stopped.
fe2647ab 3456 *
129cf89e
JB
3457 * Also, we wait for state to transition to __IAVF_DOWN before
3458 * returning. State change occurs in iavf_virtchnl_completion() after
fe2647ab
SM
3459 * VF resources are released (which occurs after PF driver processes and
3460 * responds to admin queue commands).
51f38262 3461 */
fe2647ab
SM
3462
3463 status = wait_event_timeout(adapter->down_waitqueue,
129cf89e 3464 adapter->state == __IAVF_DOWN,
88ec7308 3465 msecs_to_jiffies(500));
fe2647ab
SM
3466 if (!status)
3467 netdev_warn(netdev, "Device resources not yet released\n");
5eae00c5
GR
3468 return 0;
3469}
3470
5eae00c5 3471/**
129cf89e 3472 * iavf_change_mtu - Change the Maximum Transfer Unit
5eae00c5
GR
3473 * @netdev: network interface device structure
3474 * @new_mtu: new value for maximum frame size
3475 *
3476 * Returns 0 on success, negative on failure
3477 **/
129cf89e 3478static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
5eae00c5 3479{
129cf89e 3480 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 3481
5eae00c5 3482 netdev->mtu = new_mtu;
ed0e894d 3483 if (CLIENT_ENABLED(adapter)) {
129cf89e
JB
3484 iavf_notify_client_l2_params(&adapter->vsi);
3485 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
ed0e894d 3486 }
e15f243a
SL
3487
3488 if (netif_running(netdev)) {
3489 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3490 queue_work(iavf_wq, &adapter->reset_task);
3491 }
67c818a1 3492
5eae00c5
GR
3493 return 0;
3494}
3495
8774370d 3496/**
56184e01 3497 * iavf_set_features - set the netdev feature flags
8774370d
MS
3498 * @netdev: ptr to the netdev being adjusted
3499 * @features: the feature set that the stack is suggesting
3500 * Note: expects to be called while under rtnl_lock()
3501 **/
129cf89e
JB
3502static int iavf_set_features(struct net_device *netdev,
3503 netdev_features_t features)
8774370d 3504{
129cf89e 3505 struct iavf_adapter *adapter = netdev_priv(netdev);
8774370d 3506
08772382
BC
3507 /* Don't allow enabling VLAN features when adapter is not capable
3508 * of VLAN offload/filtering
e0f60a81 3509 */
3bd77e2a 3510 if (!VLAN_ALLOWED(adapter)) {
08772382
BC
3511 netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
3512 NETIF_F_HW_VLAN_CTAG_TX |
3513 NETIF_F_HW_VLAN_CTAG_FILTER);
3514 if (features & (NETIF_F_HW_VLAN_CTAG_RX |
3515 NETIF_F_HW_VLAN_CTAG_TX |
3516 NETIF_F_HW_VLAN_CTAG_FILTER))
3bd77e2a
PM
3517 return -EINVAL;
3518 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
e0f60a81
PJ
3519 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3520 adapter->aq_required |=
129cf89e 3521 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
e0f60a81
PJ
3522 else
3523 adapter->aq_required |=
129cf89e 3524 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
e0f60a81 3525 }
8774370d
MS
3526
3527 return 0;
3528}
3529
06fc016c 3530/**
129cf89e 3531 * iavf_features_check - Validate encapsulated packet conforms to limits
06fc016c 3532 * @skb: skb buff
f5254429 3533 * @dev: This physical port's netdev
06fc016c
AD
3534 * @features: Offload features that the stack believes apply
3535 **/
129cf89e
JB
3536static netdev_features_t iavf_features_check(struct sk_buff *skb,
3537 struct net_device *dev,
3538 netdev_features_t features)
06fc016c
AD
3539{
3540 size_t len;
3541
3542 /* No point in doing any of this if neither checksum nor GSO are
3543 * being requested for this frame. We can rule out both by just
3544 * checking for CHECKSUM_PARTIAL
3545 */
3546 if (skb->ip_summed != CHECKSUM_PARTIAL)
3547 return features;
3548
3549 /* We cannot support GSO if the MSS is going to be less than
3550 * 64 bytes. If it is then we need to drop support for GSO.
3551 */
3552 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3553 features &= ~NETIF_F_GSO_MASK;
3554
3555 /* MACLEN can support at most 63 words */
3556 len = skb_network_header(skb) - skb->data;
3557 if (len & ~(63 * 2))
3558 goto out_err;
3559
3560 /* IPLEN and EIPLEN can support at most 127 dwords */
3561 len = skb_transport_header(skb) - skb_network_header(skb);
3562 if (len & ~(127 * 4))
3563 goto out_err;
3564
3565 if (skb->encapsulation) {
3566 /* L4TUNLEN can support 127 words */
3567 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3568 if (len & ~(127 * 2))
3569 goto out_err;
3570
3571 /* IPLEN can support at most 127 dwords */
3572 len = skb_inner_transport_header(skb) -
3573 skb_inner_network_header(skb);
3574 if (len & ~(127 * 4))
3575 goto out_err;
3576 }
3577
3578 /* No need to validate L4LEN as TCP is the only protocol with a
3579 * a flexible value and we support all possible values supported
3580 * by TCP, which is at most 15 dwords
3581 */
3582
3583 return features;
3584out_err:
3585 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3586}
3587
c4445aed 3588/**
129cf89e 3589 * iavf_fix_features - fix up the netdev feature bits
c4445aed
MW
3590 * @netdev: our net device
3591 * @features: desired feature bits
3592 *
3593 * Returns fixed-up features bits
3594 **/
129cf89e
JB
3595static netdev_features_t iavf_fix_features(struct net_device *netdev,
3596 netdev_features_t features)
c4445aed 3597{
129cf89e 3598 struct iavf_adapter *adapter = netdev_priv(netdev);
c4445aed 3599
cba429c6
NN
3600 if (adapter->vf_res &&
3601 !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
0a3b4f70
JK
3602 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3603 NETIF_F_HW_VLAN_CTAG_RX |
3604 NETIF_F_HW_VLAN_CTAG_FILTER);
3605
c4445aed
MW
3606 return features;
3607}
3608
129cf89e
JB
3609static const struct net_device_ops iavf_netdev_ops = {
3610 .ndo_open = iavf_open,
3611 .ndo_stop = iavf_close,
3612 .ndo_start_xmit = iavf_xmit_frame,
3613 .ndo_set_rx_mode = iavf_set_rx_mode,
5eae00c5 3614 .ndo_validate_addr = eth_validate_addr,
129cf89e
JB
3615 .ndo_set_mac_address = iavf_set_mac,
3616 .ndo_change_mtu = iavf_change_mtu,
3617 .ndo_tx_timeout = iavf_tx_timeout,
3618 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3619 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3620 .ndo_features_check = iavf_features_check,
3621 .ndo_fix_features = iavf_fix_features,
3622 .ndo_set_features = iavf_set_features,
129cf89e 3623 .ndo_setup_tc = iavf_setup_tc,
5eae00c5
GR
3624};
3625
3626/**
129cf89e 3627 * iavf_check_reset_complete - check that VF reset is complete
5eae00c5
GR
3628 * @hw: pointer to hw struct
3629 *
3630 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3631 **/
f349daa5 3632static int iavf_check_reset_complete(struct iavf_hw *hw)
5eae00c5
GR
3633{
3634 u32 rstat;
3635 int i;
3636
8e3e4b9d 3637 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
f1cad2ce
JB
3638 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3639 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9
JB
3640 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3641 (rstat == VIRTCHNL_VFR_COMPLETED))
5eae00c5 3642 return 0;
f98a2006 3643 usleep_range(10, 20);
5eae00c5
GR
3644 }
3645 return -EBUSY;
3646}
3647
e6d038de 3648/**
129cf89e 3649 * iavf_process_config - Process the config information we got from the PF
e6d038de
MW
3650 * @adapter: board private structure
3651 *
3652 * Verify that we have a valid config struct, and set up our netdev features
3653 * and our VSI struct.
3654 **/
129cf89e 3655int iavf_process_config(struct iavf_adapter *adapter)
e6d038de 3656{
310a2ad9 3657 struct virtchnl_vf_resource *vfres = adapter->vf_res;
5b36e8d0 3658 int i, num_req_queues = adapter->num_req_queues;
e6d038de 3659 struct net_device *netdev = adapter->netdev;
56184e01 3660 struct iavf_vsi *vsi = &adapter->vsi;
bacd75cf
PB
3661 netdev_features_t hw_enc_features;
3662 netdev_features_t hw_features;
e6d038de
MW
3663
3664 /* got VF config message back from PF, now we can parse it */
ba6cc7f6 3665 for (i = 0; i < vfres->num_vsis; i++) {
ff3f4cc2 3666 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
ba6cc7f6 3667 adapter->vsi_res = &vfres->vsi_res[i];
e6d038de
MW
3668 }
3669 if (!adapter->vsi_res) {
3670 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3671 return -ENODEV;
3672 }
3673
5b36e8d0 3674 if (num_req_queues &&
5520deb1 3675 num_req_queues > adapter->vsi_res->num_queue_pairs) {
5b36e8d0
AB
3676 /* Problem. The PF gave us fewer queues than what we had
3677 * negotiated in our request. Need a reset to see if we can't
3678 * get back to a working state.
3679 */
3680 dev_err(&adapter->pdev->dev,
3681 "Requested %d queues, but PF only gave us %d.\n",
3682 num_req_queues,
3683 adapter->vsi_res->num_queue_pairs);
129cf89e 3684 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
5b36e8d0 3685 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
129cf89e 3686 iavf_schedule_reset(adapter);
5b36e8d0
AB
3687 return -ENODEV;
3688 }
3689 adapter->num_req_queues = 0;
3690
bacd75cf
PB
3691 hw_enc_features = NETIF_F_SG |
3692 NETIF_F_IP_CSUM |
3693 NETIF_F_IPV6_CSUM |
3694 NETIF_F_HIGHDMA |
3695 NETIF_F_SOFT_FEATURES |
3696 NETIF_F_TSO |
3697 NETIF_F_TSO_ECN |
3698 NETIF_F_TSO6 |
3699 NETIF_F_SCTP_CRC |
3700 NETIF_F_RXHASH |
3701 NETIF_F_RXCSUM |
3702 0;
3703
3704 /* advertise to stack only if offloads for encapsulated packets is
3705 * supported
3706 */
fbb113f7 3707 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
bacd75cf 3708 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
b0fe3306 3709 NETIF_F_GSO_GRE |
1c7b4a23 3710 NETIF_F_GSO_GRE_CSUM |
7e13318d 3711 NETIF_F_GSO_IPXIP4 |
bf2d1df3 3712 NETIF_F_GSO_IPXIP6 |
b0fe3306 3713 NETIF_F_GSO_UDP_TUNNEL_CSUM |
1c7b4a23 3714 NETIF_F_GSO_PARTIAL |
b0fe3306
AD
3715 0;
3716
fbb113f7 3717 if (!(vfres->vf_cap_flags &
310a2ad9 3718 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
bacd75cf
PB
3719 netdev->gso_partial_features |=
3720 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b0fe3306 3721
bacd75cf
PB
3722 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3723 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3724 netdev->hw_enc_features |= hw_enc_features;
3725 }
b0fe3306 3726 /* record features VLANs can make use of */
bacd75cf 3727 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
b0fe3306
AD
3728
3729 /* Write features and hw_features separately to avoid polluting
bacd75cf 3730 * with, or dropping, features that are set when we registered.
b0fe3306 3731 */
bacd75cf 3732 hw_features = hw_enc_features;
b0fe3306 3733
0a3b4f70
JK
3734 /* Enable VLAN features if supported */
3735 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3736 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3737 NETIF_F_HW_VLAN_CTAG_RX);
0075fa0f
HR
3738 /* Enable cloud filter if ADQ is supported */
3739 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3740 hw_features |= NETIF_F_HW_TC;
c91a4f9f
BC
3741 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
3742 hw_features |= NETIF_F_GSO_UDP_L4;
0a3b4f70 3743
bacd75cf 3744 netdev->hw_features |= hw_features;
b0fe3306 3745
0a3b4f70
JK
3746 netdev->features |= hw_features;
3747
3748 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3749 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
e6d038de 3750
e65aae08
LY
3751 netdev->priv_flags |= IFF_UNICAST_FLT;
3752
e4062894
PJ
3753 /* Do not turn on offloads when they are requested to be turned off.
3754 * TSO needs minimum 576 bytes to work correctly.
3755 */
3756 if (netdev->wanted_features) {
3757 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3758 netdev->mtu < 576)
3759 netdev->features &= ~NETIF_F_TSO;
3760 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3761 netdev->mtu < 576)
3762 netdev->features &= ~NETIF_F_TSO6;
3763 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3764 netdev->features &= ~NETIF_F_TSO_ECN;
3765 if (!(netdev->wanted_features & NETIF_F_GRO))
3766 netdev->features &= ~NETIF_F_GRO;
3767 if (!(netdev->wanted_features & NETIF_F_GSO))
3768 netdev->features &= ~NETIF_F_GSO;
3769 }
3770
e6d038de
MW
3771 adapter->vsi.id = adapter->vsi_res->vsi_id;
3772
3773 adapter->vsi.back = adapter;
3774 adapter->vsi.base_vector = 1;
56184e01 3775 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
43a3d9ba
MW
3776 vsi->netdev = adapter->netdev;
3777 vsi->qs_handle = adapter->vsi_res->qset_handle;
fbb113f7 3778 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
43a3d9ba
MW
3779 adapter->rss_key_size = vfres->rss_key_size;
3780 adapter->rss_lut_size = vfres->rss_lut_size;
3781 } else {
129cf89e
JB
3782 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3783 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
43a3d9ba
MW
3784 }
3785
e6d038de
MW
3786 return 0;
3787}
3788
5eae00c5 3789/**
129cf89e 3790 * iavf_shutdown - Shutdown the device in preparation for a reboot
5eae00c5
GR
3791 * @pdev: pci device structure
3792 **/
129cf89e 3793static void iavf_shutdown(struct pci_dev *pdev)
5eae00c5 3794{
d4dded4a
KS
3795 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
3796 struct net_device *netdev = adapter->netdev;
5eae00c5
GR
3797
3798 netif_device_detach(netdev);
3799
3800 if (netif_running(netdev))
129cf89e 3801 iavf_close(netdev);
5eae00c5 3802
5ac49f3c
SA
3803 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
3804 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
00293fdc 3805 /* Prevent the watchdog from running. */
a646d474 3806 iavf_change_state(adapter, __IAVF_REMOVE);
00293fdc 3807 adapter->aq_required = 0;
5ac49f3c 3808 mutex_unlock(&adapter->crit_lock);
00293fdc 3809
5eae00c5
GR
3810#ifdef CONFIG_PM
3811 pci_save_state(pdev);
3812
3813#endif
3814 pci_disable_device(pdev);
3815}
3816
3817/**
129cf89e 3818 * iavf_probe - Device Initialization Routine
5eae00c5 3819 * @pdev: PCI device information struct
129cf89e 3820 * @ent: entry in iavf_pci_tbl
5eae00c5
GR
3821 *
3822 * Returns 0 on success, negative on failure
3823 *
129cf89e 3824 * iavf_probe initializes an adapter identified by a pci_dev structure.
5eae00c5
GR
3825 * The OS initialization, configuring of the adapter private structure,
3826 * and a hardware reset occur.
3827 **/
129cf89e 3828static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5eae00c5
GR
3829{
3830 struct net_device *netdev;
129cf89e 3831 struct iavf_adapter *adapter = NULL;
f349daa5 3832 struct iavf_hw *hw = NULL;
dbbd8111 3833 int err;
5eae00c5
GR
3834
3835 err = pci_enable_device(pdev);
3836 if (err)
3837 return err;
3838
6494294f 3839 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 3840 if (err) {
e3e3bfdd
JS
3841 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3842 if (err) {
3843 dev_err(&pdev->dev,
3844 "DMA configuration failed: 0x%x\n", err);
3845 goto err_dma;
3846 }
5eae00c5
GR
3847 }
3848
129cf89e 3849 err = pci_request_regions(pdev, iavf_driver_name);
5eae00c5
GR
3850 if (err) {
3851 dev_err(&pdev->dev,
3852 "pci_request_regions failed 0x%x\n", err);
3853 goto err_pci_reg;
3854 }
3855
3856 pci_enable_pcie_error_reporting(pdev);
3857
3858 pci_set_master(pdev);
3859
129cf89e
JB
3860 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3861 IAVF_MAX_REQ_QUEUES);
5eae00c5
GR
3862 if (!netdev) {
3863 err = -ENOMEM;
3864 goto err_alloc_etherdev;
3865 }
3866
3867 SET_NETDEV_DEV(netdev, &pdev->dev);
3868
3869 pci_set_drvdata(pdev, netdev);
3870 adapter = netdev_priv(netdev);
5eae00c5
GR
3871
3872 adapter->netdev = netdev;
3873 adapter->pdev = pdev;
3874
3875 hw = &adapter->hw;
3876 hw->back = adapter;
3877
41a1d04b 3878 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
a646d474 3879 iavf_change_state(adapter, __IAVF_STARTUP);
5eae00c5
GR
3880
3881 /* Call save state here because it relies on the adapter struct. */
3882 pci_save_state(pdev);
3883
3884 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3885 pci_resource_len(pdev, 0));
3886 if (!hw->hw_addr) {
3887 err = -EIO;
3888 goto err_ioremap;
3889 }
3890 hw->vendor_id = pdev->vendor;
3891 hw->device_id = pdev->device;
3892 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3893 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3894 hw->subsystem_device_id = pdev->subsystem_device;
3895 hw->bus.device = PCI_SLOT(pdev->devfn);
3896 hw->bus.func = PCI_FUNC(pdev->devfn);
b3f028fc 3897 hw->bus.bus_id = pdev->bus->number;
5eae00c5 3898
8ddb3326
JB
3899 /* set up the locks for the AQ, do this only once in probe
3900 * and destroy them only once in remove
3901 */
5ac49f3c
SA
3902 mutex_init(&adapter->crit_lock);
3903 mutex_init(&adapter->client_lock);
8ddb3326
JB
3904 mutex_init(&hw->aq.asq_mutex);
3905 mutex_init(&hw->aq.arq_mutex);
3906
504398f0 3907 spin_lock_init(&adapter->mac_vlan_list_lock);
0075fa0f 3908 spin_lock_init(&adapter->cloud_filter_list_lock);
0dbfbabb 3909 spin_lock_init(&adapter->fdir_fltr_lock);
0aaeb4fb 3910 spin_lock_init(&adapter->adv_rss_lock);
504398f0 3911
8bb1a540
SK
3912 INIT_LIST_HEAD(&adapter->mac_filter_list);
3913 INIT_LIST_HEAD(&adapter->vlan_filter_list);
0075fa0f 3914 INIT_LIST_HEAD(&adapter->cloud_filter_list);
0dbfbabb 3915 INIT_LIST_HEAD(&adapter->fdir_list_head);
0aaeb4fb 3916 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
8bb1a540 3917
129cf89e
JB
3918 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3919 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
fdd4044f 3920 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
129cf89e 3921 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
ce42e26c 3922 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
fdd4044f 3923 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
5eae00c5 3924
fe2647ab
SM
3925 /* Setup the wait queue for indicating transition to down status */
3926 init_waitqueue_head(&adapter->down_waitqueue);
3927
5eae00c5
GR
3928 return 0;
3929
3930err_ioremap:
3931 free_netdev(netdev);
3932err_alloc_etherdev:
af30cbd2 3933 pci_disable_pcie_error_reporting(pdev);
5eae00c5
GR
3934 pci_release_regions(pdev);
3935err_pci_reg:
3936err_dma:
3937 pci_disable_device(pdev);
3938 return err;
3939}
3940
5eae00c5 3941/**
129cf89e 3942 * iavf_suspend - Power management suspend routine
b50f7bca 3943 * @dev_d: device info pointer
5eae00c5
GR
3944 *
3945 * Called when the system (VM) is entering sleep/suspend.
3946 **/
bc5cbd73 3947static int __maybe_unused iavf_suspend(struct device *dev_d)
5eae00c5 3948{
bc5cbd73 3949 struct net_device *netdev = dev_get_drvdata(dev_d);
129cf89e 3950 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3951
3952 netif_device_detach(netdev);
3953
5ac49f3c 3954 while (!mutex_trylock(&adapter->crit_lock))
9b2aef12
JK
3955 usleep_range(500, 1000);
3956
5eae00c5
GR
3957 if (netif_running(netdev)) {
3958 rtnl_lock();
129cf89e 3959 iavf_down(adapter);
5eae00c5
GR
3960 rtnl_unlock();
3961 }
129cf89e
JB
3962 iavf_free_misc_irq(adapter);
3963 iavf_reset_interrupt_capability(adapter);
5eae00c5 3964
5ac49f3c 3965 mutex_unlock(&adapter->crit_lock);
9b2aef12 3966
5eae00c5
GR
3967 return 0;
3968}
3969
3970/**
129cf89e 3971 * iavf_resume - Power management resume routine
b50f7bca 3972 * @dev_d: device info pointer
5eae00c5
GR
3973 *
3974 * Called when the system (VM) is resumed from sleep/suspend.
3975 **/
bc5cbd73 3976static int __maybe_unused iavf_resume(struct device *dev_d)
5eae00c5 3977{
bc5cbd73 3978 struct pci_dev *pdev = to_pci_dev(dev_d);
d4dded4a 3979 struct iavf_adapter *adapter;
5eae00c5
GR
3980 u32 err;
3981
d4dded4a
KS
3982 adapter = iavf_pdev_to_adapter(pdev);
3983
5eae00c5
GR
3984 pci_set_master(pdev);
3985
3986 rtnl_lock();
129cf89e 3987 err = iavf_set_interrupt_capability(adapter);
5eae00c5 3988 if (err) {
f2a1c368 3989 rtnl_unlock();
5eae00c5
GR
3990 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3991 return err;
3992 }
129cf89e 3993 err = iavf_request_misc_irq(adapter);
5eae00c5
GR
3994 rtnl_unlock();
3995 if (err) {
3996 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3997 return err;
3998 }
3999
fdd4044f 4000 queue_work(iavf_wq, &adapter->reset_task);
5eae00c5 4001
d4dded4a 4002 netif_device_attach(adapter->netdev);
5eae00c5
GR
4003
4004 return err;
4005}
4006
5eae00c5 4007/**
129cf89e 4008 * iavf_remove - Device Removal Routine
5eae00c5
GR
4009 * @pdev: PCI device information struct
4010 *
129cf89e 4011 * iavf_remove is called by the PCI subsystem to alert the driver
5eae00c5
GR
4012 * that it should release a PCI device. The could be caused by a
4013 * Hot-Plug event, or because the driver is going to be removed from
4014 * memory.
4015 **/
129cf89e 4016static void iavf_remove(struct pci_dev *pdev)
5eae00c5 4017{
d4dded4a
KS
4018 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4019 struct net_device *netdev = adapter->netdev;
0dbfbabb 4020 struct iavf_fdir_fltr *fdir, *fdirtmp;
129cf89e 4021 struct iavf_vlan_filter *vlf, *vlftmp;
0aaeb4fb 4022 struct iavf_adv_rss *rss, *rsstmp;
129cf89e
JB
4023 struct iavf_mac_filter *f, *ftmp;
4024 struct iavf_cloud_filter *cf, *cftmp;
f349daa5 4025 struct iavf_hw *hw = &adapter->hw;
ed0e894d 4026 int err;
3986c698 4027
cf848d66
IV
4028 /* When reboot/shutdown is in progress no need to do anything
4029 * as the adapter is already REMOVE state that was set during
4030 * iavf_shutdown() callback.
4031 */
4032 if (adapter->state == __IAVF_REMOVE)
4033 return;
4034
5d49e683 4035 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
c2733289
SL
4036 /* Wait until port initialization is complete.
4037 * There are flows where register/unregister netdev may race.
4038 */
4039 while (1) {
4040 mutex_lock(&adapter->crit_lock);
4041 if (adapter->state == __IAVF_RUNNING ||
5d49e683
SL
4042 adapter->state == __IAVF_DOWN ||
4043 adapter->state == __IAVF_INIT_FAILED) {
c2733289
SL
4044 mutex_unlock(&adapter->crit_lock);
4045 break;
4046 }
4047
4048 mutex_unlock(&adapter->crit_lock);
4049 usleep_range(500, 1000);
4050 }
4051 cancel_delayed_work_sync(&adapter->watchdog_task);
4052
5eae00c5 4053 if (adapter->netdev_registered) {
ec764b75
SL
4054 rtnl_lock();
4055 unregister_netdevice(netdev);
5eae00c5 4056 adapter->netdev_registered = false;
ec764b75 4057 rtnl_unlock();
5eae00c5 4058 }
ed0e894d 4059 if (CLIENT_ALLOWED(adapter)) {
129cf89e 4060 err = iavf_lan_del_device(adapter);
ed0e894d
MW
4061 if (err)
4062 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
4063 err);
4064 }
53d0b3ae 4065
3986c698
SL
4066 mutex_lock(&adapter->crit_lock);
4067 dev_info(&adapter->pdev->dev, "Remove device\n");
4068 iavf_change_state(adapter, __IAVF_REMOVE);
4069
129cf89e 4070 iavf_request_reset(adapter);
22ead37f 4071 msleep(50);
f4a71881 4072 /* If the FW isn't responding, kick it once, but only once. */
129cf89e
JB
4073 if (!iavf_asq_done(hw)) {
4074 iavf_request_reset(adapter);
22ead37f 4075 msleep(50);
f4a71881 4076 }
226d5285 4077
3986c698 4078 iavf_misc_irq_disable(adapter);
226d5285 4079 /* Shut down all the garbage mashers on the detention level */
3986c698
SL
4080 cancel_work_sync(&adapter->reset_task);
4081 cancel_delayed_work_sync(&adapter->watchdog_task);
4082 cancel_work_sync(&adapter->adminq_task);
4083 cancel_delayed_work_sync(&adapter->client_task);
4084
226d5285
SA
4085 adapter->aq_required = 0;
4086 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
502b210b 4087
129cf89e
JB
4088 iavf_free_all_tx_resources(adapter);
4089 iavf_free_all_rx_resources(adapter);
129cf89e 4090 iavf_free_misc_irq(adapter);
502b210b 4091
129cf89e
JB
4092 iavf_reset_interrupt_capability(adapter);
4093 iavf_free_q_vectors(adapter);
5eae00c5 4094
129cf89e 4095 iavf_free_rss(adapter);
66f9af85 4096
5eae00c5 4097 if (hw->aq.asq.count)
129cf89e 4098 iavf_shutdown_adminq(hw);
5eae00c5 4099
8ddb3326
JB
4100 /* destroy the locks only once, here */
4101 mutex_destroy(&hw->aq.arq_mutex);
4102 mutex_destroy(&hw->aq.asq_mutex);
5ac49f3c
SA
4103 mutex_destroy(&adapter->client_lock);
4104 mutex_unlock(&adapter->crit_lock);
4105 mutex_destroy(&adapter->crit_lock);
8ddb3326 4106
5eae00c5
GR
4107 iounmap(hw->hw_addr);
4108 pci_release_regions(pdev);
129cf89e 4109 iavf_free_queues(adapter);
5eae00c5 4110 kfree(adapter->vf_res);
504398f0 4111 spin_lock_bh(&adapter->mac_vlan_list_lock);
6ba36a24
MW
4112 /* If we got removed before an up/down sequence, we've got a filter
4113 * hanging out there that we need to get rid of.
4114 */
4115 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
4116 list_del(&f->list);
4117 kfree(f);
4118 }
fbd5eb54
HR
4119 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
4120 list) {
4121 list_del(&vlf->list);
4122 kfree(vlf);
37dfdf37 4123 }
5eae00c5 4124
504398f0
JK
4125 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4126
0075fa0f
HR
4127 spin_lock_bh(&adapter->cloud_filter_list_lock);
4128 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
4129 list_del(&cf->list);
4130 kfree(cf);
4131 }
4132 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4133
0dbfbabb
HW
4134 spin_lock_bh(&adapter->fdir_fltr_lock);
4135 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
4136 list_del(&fdir->list);
4137 kfree(fdir);
4138 }
4139 spin_unlock_bh(&adapter->fdir_fltr_lock);
4140
0aaeb4fb
HW
4141 spin_lock_bh(&adapter->adv_rss_lock);
4142 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
4143 list) {
4144 list_del(&rss->list);
4145 kfree(rss);
4146 }
4147 spin_unlock_bh(&adapter->adv_rss_lock);
4148
5eae00c5
GR
4149 free_netdev(netdev);
4150
4151 pci_disable_pcie_error_reporting(pdev);
4152
4153 pci_disable_device(pdev);
4154}
4155
bc5cbd73
VG
4156static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
4157
129cf89e 4158static struct pci_driver iavf_driver = {
bc5cbd73
VG
4159 .name = iavf_driver_name,
4160 .id_table = iavf_pci_tbl,
4161 .probe = iavf_probe,
4162 .remove = iavf_remove,
4163 .driver.pm = &iavf_pm_ops,
4164 .shutdown = iavf_shutdown,
5eae00c5
GR
4165};
4166
4167/**
56184e01 4168 * iavf_init_module - Driver Registration Routine
5eae00c5 4169 *
56184e01 4170 * iavf_init_module is the first routine called when the driver is
5eae00c5
GR
4171 * loaded. All it does is register with the PCI subsystem.
4172 **/
129cf89e 4173static int __init iavf_init_module(void)
5eae00c5
GR
4174{
4175 int ret;
75a64435 4176
34a2a3b8 4177 pr_info("iavf: %s\n", iavf_driver_string);
5eae00c5 4178
129cf89e 4179 pr_info("%s\n", iavf_copyright);
5eae00c5 4180
129cf89e
JB
4181 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4182 iavf_driver_name);
4183 if (!iavf_wq) {
4184 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
2803b16c
JB
4185 return -ENOMEM;
4186 }
129cf89e 4187 ret = pci_register_driver(&iavf_driver);
5eae00c5
GR
4188 return ret;
4189}
4190
129cf89e 4191module_init(iavf_init_module);
5eae00c5
GR
4192
4193/**
56184e01 4194 * iavf_exit_module - Driver Exit Cleanup Routine
5eae00c5 4195 *
56184e01 4196 * iavf_exit_module is called just before the driver is removed
5eae00c5
GR
4197 * from memory.
4198 **/
129cf89e 4199static void __exit iavf_exit_module(void)
5eae00c5 4200{
129cf89e
JB
4201 pci_unregister_driver(&iavf_driver);
4202 destroy_workqueue(iavf_wq);
5eae00c5
GR
4203}
4204
129cf89e 4205module_exit(iavf_exit_module);
5eae00c5 4206
129cf89e 4207/* iavf_main.c */