]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/intel/iavf/iavf_main.c
iavf: Fix reporting 2.5 Gb and 5Gb speeds
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
CommitLineData
ae06c70b 1// SPDX-License-Identifier: GPL-2.0
51dce24b 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
5eae00c5 3
5ec8b7d1 4#include "iavf.h"
66bc8e0f 5#include "iavf_prototype.h"
5ec8b7d1 6#include "iavf_client.h"
129cf89e 7/* All iavf tracepoints are defined by the include below, which must
ed0980c4
SP
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11#define CREATE_TRACE_POINTS
ad64ed8b 12#include "iavf_trace.h"
ed0980c4 13
129cf89e
JB
14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16static int iavf_close(struct net_device *netdev);
b66c7bc1
JP
17static int iavf_init_get_resources(struct iavf_adapter *adapter);
18static int iavf_check_reset_complete(struct iavf_hw *hw);
5eae00c5 19
129cf89e
JB
20char iavf_driver_name[] = "iavf";
21static const char iavf_driver_string[] =
8062b226 22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
5eae00c5 23
69ebe955
MW
24#define DRV_KERN "-k"
25
abf709a1 26#define DRV_VERSION_MAJOR 3
f2fc31ef 27#define DRV_VERSION_MINOR 2
8062b226 28#define DRV_VERSION_BUILD 3
69ebe955
MW
29#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
30 __stringify(DRV_VERSION_MINOR) "." \
31 __stringify(DRV_VERSION_BUILD) \
32 DRV_KERN
129cf89e
JB
33const char iavf_driver_version[] = DRV_VERSION;
34static const char iavf_copyright[] =
8062b226 35 "Copyright (c) 2013 - 2018 Intel Corporation.";
5eae00c5 36
129cf89e 37/* iavf_pci_tbl - PCI Device ID Table
5eae00c5
GR
38 *
39 * Wildcard entries (PCI_ANY_ID) should come last
40 * Last entry must be all 0s
41 *
42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43 * Class, Class Mask, private data (not used) }
44 */
129cf89e 45static const struct pci_device_id iavf_pci_tbl[] = {
4dbc76e0
JB
46 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
47 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
48 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
49 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
5eae00c5
GR
50 /* required last entry */
51 {0, }
52};
53
129cf89e 54MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
5eae00c5 55
8062b226 56MODULE_ALIAS("i40evf");
5eae00c5 57MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
98674ebe
JB
58MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
59MODULE_LICENSE("GPL v2");
5eae00c5
GR
60MODULE_VERSION(DRV_VERSION);
61
b66c7bc1 62static const struct net_device_ops iavf_netdev_ops;
fdd4044f 63struct workqueue_struct *iavf_wq;
2803b16c 64
5eae00c5 65/**
129cf89e 66 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
5eae00c5
GR
67 * @hw: pointer to the HW structure
68 * @mem: ptr to mem struct to fill out
69 * @size: size of memory requested
70 * @alignment: what to align the allocation to
71 **/
80754bbc
SN
72enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
73 struct iavf_dma_mem *mem,
74 u64 size, u32 alignment)
5eae00c5 75{
129cf89e 76 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
5eae00c5
GR
77
78 if (!mem)
8821b3fa 79 return IAVF_ERR_PARAM;
5eae00c5
GR
80
81 mem->size = ALIGN(size, alignment);
82 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
83 (dma_addr_t *)&mem->pa, GFP_KERNEL);
84 if (mem->va)
85 return 0;
86 else
8821b3fa 87 return IAVF_ERR_NO_MEMORY;
5eae00c5
GR
88}
89
90/**
129cf89e 91 * iavf_free_dma_mem_d - OS specific memory free for shared code
5eae00c5
GR
92 * @hw: pointer to the HW structure
93 * @mem: ptr to mem struct to free
94 **/
80754bbc
SN
95enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
96 struct iavf_dma_mem *mem)
5eae00c5 97{
129cf89e 98 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
5eae00c5
GR
99
100 if (!mem || !mem->va)
8821b3fa 101 return IAVF_ERR_PARAM;
5eae00c5
GR
102 dma_free_coherent(&adapter->pdev->dev, mem->size,
103 mem->va, (dma_addr_t)mem->pa);
104 return 0;
105}
106
107/**
129cf89e 108 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
5eae00c5
GR
109 * @hw: pointer to the HW structure
110 * @mem: ptr to mem struct to fill out
111 * @size: size of memory requested
112 **/
80754bbc
SN
113enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
114 struct iavf_virt_mem *mem, u32 size)
5eae00c5
GR
115{
116 if (!mem)
8821b3fa 117 return IAVF_ERR_PARAM;
5eae00c5
GR
118
119 mem->size = size;
120 mem->va = kzalloc(size, GFP_KERNEL);
121
122 if (mem->va)
123 return 0;
124 else
8821b3fa 125 return IAVF_ERR_NO_MEMORY;
5eae00c5
GR
126}
127
128/**
129cf89e 129 * iavf_free_virt_mem_d - OS specific memory free for shared code
5eae00c5
GR
130 * @hw: pointer to the HW structure
131 * @mem: ptr to mem struct to free
132 **/
80754bbc
SN
133enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
134 struct iavf_virt_mem *mem)
5eae00c5
GR
135{
136 if (!mem)
8821b3fa 137 return IAVF_ERR_PARAM;
5eae00c5
GR
138
139 /* it's ok to kfree a NULL pointer */
140 kfree(mem->va);
141
142 return 0;
143}
144
00e5ec4b 145/**
129cf89e 146 * iavf_schedule_reset - Set the flags and schedule a reset event
00e5ec4b
MW
147 * @adapter: board private structure
148 **/
129cf89e 149void iavf_schedule_reset(struct iavf_adapter *adapter)
00e5ec4b
MW
150{
151 if (!(adapter->flags &
129cf89e
JB
152 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
153 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
fdd4044f 154 queue_work(iavf_wq, &adapter->reset_task);
00e5ec4b
MW
155 }
156}
157
5eae00c5 158/**
129cf89e 159 * iavf_tx_timeout - Respond to a Tx Hang
5eae00c5
GR
160 * @netdev: network interface device structure
161 **/
0290bd29 162static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
5eae00c5 163{
129cf89e 164 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
165
166 adapter->tx_timeout_count++;
129cf89e 167 iavf_schedule_reset(adapter);
5eae00c5
GR
168}
169
170/**
129cf89e 171 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
5eae00c5
GR
172 * @adapter: board private structure
173 **/
129cf89e 174static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
5eae00c5 175{
f349daa5 176 struct iavf_hw *hw = &adapter->hw;
75a64435 177
ef4603e8
JK
178 if (!adapter->msix_entries)
179 return;
180
f1cad2ce 181 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
5eae00c5 182
f1cad2ce 183 iavf_flush(hw);
5eae00c5
GR
184
185 synchronize_irq(adapter->msix_entries[0].vector);
186}
187
188/**
129cf89e 189 * iavf_misc_irq_enable - Enable default interrupt generation settings
5eae00c5
GR
190 * @adapter: board private structure
191 **/
129cf89e 192static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
5eae00c5 193{
f349daa5 194 struct iavf_hw *hw = &adapter->hw;
75a64435 195
f1cad2ce
JB
196 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
197 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
198 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
5eae00c5 199
f1cad2ce 200 iavf_flush(hw);
5eae00c5
GR
201}
202
203/**
129cf89e 204 * iavf_irq_disable - Mask off interrupt generation on the NIC
5eae00c5
GR
205 * @adapter: board private structure
206 **/
129cf89e 207static void iavf_irq_disable(struct iavf_adapter *adapter)
5eae00c5
GR
208{
209 int i;
f349daa5 210 struct iavf_hw *hw = &adapter->hw;
5eae00c5 211
dbb01c8a
MW
212 if (!adapter->msix_entries)
213 return;
214
5eae00c5 215 for (i = 1; i < adapter->num_msix_vectors; i++) {
f1cad2ce 216 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
5eae00c5
GR
217 synchronize_irq(adapter->msix_entries[i].vector);
218 }
f1cad2ce 219 iavf_flush(hw);
5eae00c5
GR
220}
221
222/**
129cf89e 223 * iavf_irq_enable_queues - Enable interrupt for specified queues
5eae00c5
GR
224 * @adapter: board private structure
225 * @mask: bitmap of queues to enable
226 **/
129cf89e 227void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
5eae00c5 228{
f349daa5 229 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
230 int i;
231
232 for (i = 1; i < adapter->num_msix_vectors; i++) {
41a1d04b 233 if (mask & BIT(i - 1)) {
f1cad2ce
JB
234 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
235 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
236 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
5eae00c5
GR
237 }
238 }
239}
240
5eae00c5 241/**
129cf89e 242 * iavf_irq_enable - Enable default interrupt generation settings
5eae00c5 243 * @adapter: board private structure
69c1d70a 244 * @flush: boolean value whether to run rd32()
5eae00c5 245 **/
129cf89e 246void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
5eae00c5 247{
f349daa5 248 struct iavf_hw *hw = &adapter->hw;
5eae00c5 249
129cf89e
JB
250 iavf_misc_irq_enable(adapter);
251 iavf_irq_enable_queues(adapter, ~0);
5eae00c5
GR
252
253 if (flush)
f1cad2ce 254 iavf_flush(hw);
5eae00c5
GR
255}
256
257/**
129cf89e 258 * iavf_msix_aq - Interrupt handler for vector 0
5eae00c5
GR
259 * @irq: interrupt number
260 * @data: pointer to netdev
261 **/
129cf89e 262static irqreturn_t iavf_msix_aq(int irq, void *data)
5eae00c5
GR
263{
264 struct net_device *netdev = data;
129cf89e 265 struct iavf_adapter *adapter = netdev_priv(netdev);
f349daa5 266 struct iavf_hw *hw = &adapter->hw;
5eae00c5 267
cfbe4dba 268 /* handle non-queue interrupts, these reads clear the registers */
f1cad2ce
JB
269 rd32(hw, IAVF_VFINT_ICR01);
270 rd32(hw, IAVF_VFINT_ICR0_ENA1);
5eae00c5 271
5eae00c5 272 /* schedule work on the private workqueue */
fdd4044f 273 queue_work(iavf_wq, &adapter->adminq_task);
5eae00c5
GR
274
275 return IRQ_HANDLED;
276}
277
278/**
129cf89e 279 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
5eae00c5
GR
280 * @irq: interrupt number
281 * @data: pointer to a q_vector
282 **/
129cf89e 283static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
5eae00c5 284{
56184e01 285 struct iavf_q_vector *q_vector = data;
5eae00c5
GR
286
287 if (!q_vector->tx.ring && !q_vector->rx.ring)
288 return IRQ_HANDLED;
289
5d3465a1 290 napi_schedule_irqoff(&q_vector->napi);
5eae00c5
GR
291
292 return IRQ_HANDLED;
293}
294
295/**
129cf89e 296 * iavf_map_vector_to_rxq - associate irqs with rx queues
5eae00c5
GR
297 * @adapter: board private structure
298 * @v_idx: interrupt number
299 * @r_idx: queue number
300 **/
301static void
129cf89e 302iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
5eae00c5 303{
56184e01
JB
304 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
305 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
f349daa5 306 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
307
308 rx_ring->q_vector = q_vector;
309 rx_ring->next = q_vector->rx.ring;
310 rx_ring->vsi = &adapter->vsi;
311 q_vector->rx.ring = rx_ring;
312 q_vector->rx.count++;
a0073a4b 313 q_vector->rx.next_update = jiffies + 1;
556fdfd6 314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
f19a973f 315 q_vector->ring_mask |= BIT(r_idx);
56184e01 316 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
4eda4e00 317 q_vector->rx.current_itr >> 1);
556fdfd6 318 q_vector->rx.current_itr = q_vector->rx.target_itr;
5eae00c5
GR
319}
320
321/**
129cf89e 322 * iavf_map_vector_to_txq - associate irqs with tx queues
5eae00c5
GR
323 * @adapter: board private structure
324 * @v_idx: interrupt number
325 * @t_idx: queue number
326 **/
327static void
129cf89e 328iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
5eae00c5 329{
56184e01
JB
330 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
331 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
f349daa5 332 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
333
334 tx_ring->q_vector = q_vector;
335 tx_ring->next = q_vector->tx.ring;
336 tx_ring->vsi = &adapter->vsi;
337 q_vector->tx.ring = tx_ring;
338 q_vector->tx.count++;
a0073a4b 339 q_vector->tx.next_update = jiffies + 1;
556fdfd6 340 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
5eae00c5 341 q_vector->num_ringpairs++;
56184e01 342 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
4eda4e00 343 q_vector->tx.target_itr >> 1);
556fdfd6 344 q_vector->tx.current_itr = q_vector->tx.target_itr;
5eae00c5
GR
345}
346
347/**
129cf89e 348 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
5eae00c5
GR
349 * @adapter: board private structure to initialize
350 *
351 * This function maps descriptor rings to the queue-specific vectors
352 * we were allotted through the MSI-X enabling code. Ideally, we'd have
353 * one vector per ring/queue, but on a constrained vector budget, we
354 * group the rings as "efficiently" as possible. You would add new
355 * mapping configurations in here.
356 **/
129cf89e 357static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
5eae00c5 358{
c97fc9b6
AB
359 int rings_remaining = adapter->num_active_queues;
360 int ridx = 0, vidx = 0;
5eae00c5 361 int q_vectors;
5eae00c5
GR
362
363 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
364
c97fc9b6 365 for (; ridx < rings_remaining; ridx++) {
129cf89e
JB
366 iavf_map_vector_to_rxq(adapter, vidx, ridx);
367 iavf_map_vector_to_txq(adapter, vidx, ridx);
5eae00c5 368
c97fc9b6
AB
369 /* In the case where we have more queues than vectors, continue
370 * round-robin on vectors until all queues are mapped.
371 */
372 if (++vidx >= q_vectors)
373 vidx = 0;
5eae00c5
GR
374 }
375
129cf89e 376 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
5eae00c5
GR
377}
378
96db776a 379/**
129cf89e 380 * iavf_irq_affinity_notify - Callback for affinity changes
96db776a
AB
381 * @notify: context as to what irq was changed
382 * @mask: the new affinity mask
383 *
384 * This is a callback function used by the irq_set_affinity_notifier function
385 * so that we may register to receive changes to the irq affinity masks.
386 **/
129cf89e
JB
387static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
388 const cpumask_t *mask)
96db776a 389{
56184e01
JB
390 struct iavf_q_vector *q_vector =
391 container_of(notify, struct iavf_q_vector, affinity_notify);
96db776a 392
7e4d01e7 393 cpumask_copy(&q_vector->affinity_mask, mask);
96db776a
AB
394}
395
396/**
129cf89e 397 * iavf_irq_affinity_release - Callback for affinity notifier release
96db776a
AB
398 * @ref: internal core kernel usage
399 *
400 * This is a callback function used by the irq_set_affinity_notifier function
401 * to inform the current notification subscriber that they will no longer
402 * receive notifications.
403 **/
129cf89e 404static void iavf_irq_affinity_release(struct kref *ref) {}
96db776a 405
5eae00c5 406/**
129cf89e 407 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
5eae00c5 408 * @adapter: board private structure
f5254429 409 * @basename: device basename
5eae00c5
GR
410 *
411 * Allocates MSI-X vectors for tx and rx handling, and requests
412 * interrupts from the kernel.
413 **/
414static int
129cf89e 415iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
5eae00c5 416{
696ac80a
JK
417 unsigned int vector, q_vectors;
418 unsigned int rx_int_idx = 0, tx_int_idx = 0;
419 int irq_num, err;
be664cbe 420 int cpu;
5eae00c5 421
129cf89e 422 iavf_irq_disable(adapter);
5eae00c5
GR
423 /* Decrement for Other and TCP Timer vectors */
424 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
425
426 for (vector = 0; vector < q_vectors; vector++) {
56184e01 427 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
0b6591e6 428
96db776a 429 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
5eae00c5
GR
430
431 if (q_vector->tx.ring && q_vector->rx.ring) {
696ac80a 432 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 433 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
5eae00c5
GR
434 tx_int_idx++;
435 } else if (q_vector->rx.ring) {
696ac80a 436 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 437 "iavf-%s-rx-%d", basename, rx_int_idx++);
5eae00c5 438 } else if (q_vector->tx.ring) {
696ac80a 439 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 440 "iavf-%s-tx-%d", basename, tx_int_idx++);
5eae00c5
GR
441 } else {
442 /* skip this unused q_vector */
443 continue;
444 }
96db776a 445 err = request_irq(irq_num,
129cf89e 446 iavf_msix_clean_rings,
96db776a
AB
447 0,
448 q_vector->name,
449 q_vector);
5eae00c5
GR
450 if (err) {
451 dev_info(&adapter->pdev->dev,
fb43201f 452 "Request_irq failed, error: %d\n", err);
5eae00c5
GR
453 goto free_queue_irqs;
454 }
96db776a 455 /* register for affinity change notifications */
129cf89e 456 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
96db776a 457 q_vector->affinity_notify.release =
129cf89e 458 iavf_irq_affinity_release;
96db776a 459 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
be664cbe
JK
460 /* Spread the IRQ affinity hints across online CPUs. Note that
461 * get_cpu_mask returns a mask with a permanent lifetime so
462 * it's safe to use as a hint for irq_set_affinity_hint.
759dc4a7 463 */
be664cbe
JK
464 cpu = cpumask_local_spread(q_vector->v_idx, -1);
465 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
5eae00c5
GR
466 }
467
468 return 0;
469
470free_queue_irqs:
471 while (vector) {
472 vector--;
96db776a
AB
473 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
474 irq_set_affinity_notifier(irq_num, NULL);
475 irq_set_affinity_hint(irq_num, NULL);
476 free_irq(irq_num, &adapter->q_vectors[vector]);
5eae00c5
GR
477 }
478 return err;
479}
480
481/**
129cf89e 482 * iavf_request_misc_irq - Initialize MSI-X interrupts
5eae00c5
GR
483 * @adapter: board private structure
484 *
485 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
486 * vector is only for the admin queue, and stays active even when the netdev
487 * is closed.
488 **/
129cf89e 489static int iavf_request_misc_irq(struct iavf_adapter *adapter)
5eae00c5
GR
490{
491 struct net_device *netdev = adapter->netdev;
492 int err;
493
b39c1e2c 494 snprintf(adapter->misc_vector_name,
129cf89e 495 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
9a21a007 496 dev_name(&adapter->pdev->dev));
5eae00c5 497 err = request_irq(adapter->msix_entries[0].vector,
129cf89e 498 &iavf_msix_aq, 0,
e1dfee8e 499 adapter->misc_vector_name, netdev);
5eae00c5
GR
500 if (err) {
501 dev_err(&adapter->pdev->dev,
77fa28be
CS
502 "request_irq for %s failed: %d\n",
503 adapter->misc_vector_name, err);
5eae00c5
GR
504 free_irq(adapter->msix_entries[0].vector, netdev);
505 }
506 return err;
507}
508
509/**
129cf89e 510 * iavf_free_traffic_irqs - Free MSI-X interrupts
5eae00c5
GR
511 * @adapter: board private structure
512 *
513 * Frees all MSI-X vectors other than 0.
514 **/
129cf89e 515static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
5eae00c5 516{
96db776a 517 int vector, irq_num, q_vectors;
75a64435 518
47d2a5d8
AB
519 if (!adapter->msix_entries)
520 return;
521
5eae00c5
GR
522 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
523
96db776a
AB
524 for (vector = 0; vector < q_vectors; vector++) {
525 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
526 irq_set_affinity_notifier(irq_num, NULL);
527 irq_set_affinity_hint(irq_num, NULL);
528 free_irq(irq_num, &adapter->q_vectors[vector]);
5eae00c5
GR
529 }
530}
531
532/**
129cf89e 533 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
5eae00c5
GR
534 * @adapter: board private structure
535 *
536 * Frees MSI-X vector 0.
537 **/
129cf89e 538static void iavf_free_misc_irq(struct iavf_adapter *adapter)
5eae00c5
GR
539{
540 struct net_device *netdev = adapter->netdev;
541
ef4603e8
JK
542 if (!adapter->msix_entries)
543 return;
544
5eae00c5
GR
545 free_irq(adapter->msix_entries[0].vector, netdev);
546}
547
548/**
129cf89e 549 * iavf_configure_tx - Configure Transmit Unit after Reset
5eae00c5
GR
550 * @adapter: board private structure
551 *
552 * Configure the Tx unit of the MAC after a reset.
553 **/
129cf89e 554static void iavf_configure_tx(struct iavf_adapter *adapter)
5eae00c5 555{
f349daa5 556 struct iavf_hw *hw = &adapter->hw;
5eae00c5 557 int i;
75a64435 558
cc052927 559 for (i = 0; i < adapter->num_active_queues; i++)
f1cad2ce 560 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
5eae00c5
GR
561}
562
563/**
129cf89e 564 * iavf_configure_rx - Configure Receive Unit after Reset
5eae00c5
GR
565 * @adapter: board private structure
566 *
567 * Configure the Rx unit of the MAC after a reset.
568 **/
129cf89e 569static void iavf_configure_rx(struct iavf_adapter *adapter)
5eae00c5 570{
56184e01 571 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
f349daa5 572 struct iavf_hw *hw = &adapter->hw;
5eae00c5 573 int i;
5eae00c5 574
dab86afd
AD
575 /* Legacy Rx will always default to a 2048 buffer size. */
576#if (PAGE_SIZE < 8192)
129cf89e 577 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
3dfc3eb5
AB
578 struct net_device *netdev = adapter->netdev;
579
98efd694
AD
580 /* For jumbo frames on systems with 4K pages we have to use
581 * an order 1 page, so we might as well increase the size
582 * of our Rx buffer to make better use of the available space
583 */
56184e01 584 rx_buf_len = IAVF_RXBUFFER_3072;
98efd694 585
dab86afd
AD
586 /* We use a 1536 buffer size for configurations with
587 * standard Ethernet mtu. On x86 this gives us enough room
588 * for shared info and 192 bytes of padding.
589 */
56184e01 590 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
ca9ec088 591 (netdev->mtu <= ETH_DATA_LEN))
56184e01 592 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
dab86afd
AD
593 }
594#endif
595
cc052927 596 for (i = 0; i < adapter->num_active_queues; i++) {
f1cad2ce 597 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
dab86afd 598 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
ca9ec088 599
129cf89e 600 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
ca9ec088
AD
601 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
602 else
603 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
5eae00c5
GR
604 }
605}
606
607/**
129cf89e 608 * iavf_find_vlan - Search filter list for specific vlan filter
5eae00c5
GR
609 * @adapter: board private structure
610 * @vlan: vlan tag
611 *
504398f0
JK
612 * Returns ptr to the filter object or NULL. Must be called while holding the
613 * mac_vlan_list_lock.
5eae00c5
GR
614 **/
615static struct
129cf89e 616iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 617{
129cf89e 618 struct iavf_vlan_filter *f;
5eae00c5
GR
619
620 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
621 if (vlan == f->vlan)
622 return f;
623 }
624 return NULL;
625}
626
627/**
129cf89e 628 * iavf_add_vlan - Add a vlan filter to the list
5eae00c5
GR
629 * @adapter: board private structure
630 * @vlan: VLAN tag
631 *
632 * Returns ptr to the filter object or NULL when no memory available.
633 **/
634static struct
129cf89e 635iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 636{
129cf89e 637 struct iavf_vlan_filter *f = NULL;
13acb546 638
504398f0 639 spin_lock_bh(&adapter->mac_vlan_list_lock);
5eae00c5 640
129cf89e 641 f = iavf_find_vlan(adapter, vlan);
348d4994 642 if (!f) {
f0a48fb4 643 f = kzalloc(sizeof(*f), GFP_ATOMIC);
348d4994 644 if (!f)
13acb546 645 goto clearout;
249c8b8d 646
5eae00c5
GR
647 f->vlan = vlan;
648
c2417a7b 649 list_add_tail(&f->list, &adapter->vlan_filter_list);
5eae00c5 650 f->add = true;
129cf89e 651 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
5eae00c5
GR
652 }
653
13acb546 654clearout:
504398f0 655 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5eae00c5
GR
656 return f;
657}
658
659/**
129cf89e 660 * iavf_del_vlan - Remove a vlan filter from the list
5eae00c5
GR
661 * @adapter: board private structure
662 * @vlan: VLAN tag
663 **/
129cf89e 664static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 665{
129cf89e 666 struct iavf_vlan_filter *f;
13acb546 667
504398f0 668 spin_lock_bh(&adapter->mac_vlan_list_lock);
5eae00c5 669
129cf89e 670 f = iavf_find_vlan(adapter, vlan);
5eae00c5
GR
671 if (f) {
672 f->remove = true;
129cf89e 673 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
5eae00c5 674 }
504398f0
JK
675
676 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5eae00c5
GR
677}
678
679/**
129cf89e 680 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
5eae00c5 681 * @netdev: network device struct
f5254429 682 * @proto: unused protocol data
5eae00c5
GR
683 * @vid: VLAN tag
684 **/
129cf89e
JB
685static int iavf_vlan_rx_add_vid(struct net_device *netdev,
686 __always_unused __be16 proto, u16 vid)
5eae00c5 687{
129cf89e 688 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 689
8ed995ff
MW
690 if (!VLAN_ALLOWED(adapter))
691 return -EIO;
129cf89e 692 if (iavf_add_vlan(adapter, vid) == NULL)
5eae00c5
GR
693 return -ENOMEM;
694 return 0;
695}
696
697/**
129cf89e 698 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
5eae00c5 699 * @netdev: network device struct
f5254429 700 * @proto: unused protocol data
5eae00c5
GR
701 * @vid: VLAN tag
702 **/
129cf89e
JB
703static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
704 __always_unused __be16 proto, u16 vid)
5eae00c5 705{
129cf89e 706 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 707
8ed995ff 708 if (VLAN_ALLOWED(adapter)) {
129cf89e 709 iavf_del_vlan(adapter, vid);
8ed995ff
MW
710 return 0;
711 }
712 return -EIO;
5eae00c5
GR
713}
714
715/**
129cf89e 716 * iavf_find_filter - Search filter list for specific mac filter
5eae00c5
GR
717 * @adapter: board private structure
718 * @macaddr: the MAC address
719 *
504398f0
JK
720 * Returns ptr to the filter object or NULL. Must be called while holding the
721 * mac_vlan_list_lock.
5eae00c5
GR
722 **/
723static struct
129cf89e
JB
724iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
725 const u8 *macaddr)
5eae00c5 726{
129cf89e 727 struct iavf_mac_filter *f;
5eae00c5
GR
728
729 if (!macaddr)
730 return NULL;
731
732 list_for_each_entry(f, &adapter->mac_filter_list, list) {
733 if (ether_addr_equal(macaddr, f->macaddr))
734 return f;
735 }
736 return NULL;
737}
738
739/**
56184e01 740 * iavf_add_filter - Add a mac filter to the filter list
5eae00c5
GR
741 * @adapter: board private structure
742 * @macaddr: the MAC address
743 *
744 * Returns ptr to the filter object or NULL when no memory available.
745 **/
9e052291
SA
746struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
747 const u8 *macaddr)
5eae00c5 748{
129cf89e 749 struct iavf_mac_filter *f;
5eae00c5
GR
750
751 if (!macaddr)
752 return NULL;
753
129cf89e 754 f = iavf_find_filter(adapter, macaddr);
348d4994 755 if (!f) {
5eae00c5 756 f = kzalloc(sizeof(*f), GFP_ATOMIC);
504398f0 757 if (!f)
8cd5fe62 758 return f;
5eae00c5 759
9a173901 760 ether_addr_copy(f->macaddr, macaddr);
5eae00c5 761
63590b61 762 list_add_tail(&f->list, &adapter->mac_filter_list);
5eae00c5 763 f->add = true;
129cf89e 764 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
c766b9af
AB
765 } else {
766 f->remove = false;
5eae00c5
GR
767 }
768
5eae00c5
GR
769 return f;
770}
771
772/**
129cf89e 773 * iavf_set_mac - NDO callback to set port mac address
5eae00c5
GR
774 * @netdev: network interface device structure
775 * @p: pointer to an address structure
776 *
777 * Returns 0 on success, negative on failure
778 **/
129cf89e 779static int iavf_set_mac(struct net_device *netdev, void *p)
5eae00c5 780{
129cf89e 781 struct iavf_adapter *adapter = netdev_priv(netdev);
f349daa5 782 struct iavf_hw *hw = &adapter->hw;
129cf89e 783 struct iavf_mac_filter *f;
5eae00c5
GR
784 struct sockaddr *addr = p;
785
786 if (!is_valid_ether_addr(addr->sa_data))
787 return -EADDRNOTAVAIL;
788
789 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
790 return 0;
791
504398f0
JK
792 spin_lock_bh(&adapter->mac_vlan_list_lock);
793
129cf89e 794 f = iavf_find_filter(adapter, hw->mac.addr);
14e52ee2
MW
795 if (f) {
796 f->remove = true;
129cf89e 797 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
14e52ee2
MW
798 }
799
129cf89e 800 f = iavf_add_filter(adapter, addr->sa_data);
8cd5fe62 801
504398f0
JK
802 spin_unlock_bh(&adapter->mac_vlan_list_lock);
803
5eae00c5 804 if (f) {
9a173901 805 ether_addr_copy(hw->mac.addr, addr->sa_data);
5eae00c5
GR
806 }
807
808 return (f == NULL) ? -ENOMEM : 0;
809}
810
811/**
129cf89e 812 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
8946b563
JK
813 * @netdev: the netdevice
814 * @addr: address to add
815 *
816 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
817 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
818 */
129cf89e 819static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
5eae00c5 820{
129cf89e 821 struct iavf_adapter *adapter = netdev_priv(netdev);
2f41f335 822
129cf89e 823 if (iavf_add_filter(adapter, addr))
8946b563
JK
824 return 0;
825 else
826 return -ENOMEM;
827}
2f41f335 828
8946b563 829/**
129cf89e 830 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
8946b563
JK
831 * @netdev: the netdevice
832 * @addr: address to add
833 *
834 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
835 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
836 */
129cf89e 837static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
8946b563 838{
129cf89e
JB
839 struct iavf_adapter *adapter = netdev_priv(netdev);
840 struct iavf_mac_filter *f;
2f41f335 841
8946b563
JK
842 /* Under some circumstances, we might receive a request to delete
843 * our own device address from our uc list. Because we store the
844 * device address in the VSI's MAC/VLAN filter list, we need to ignore
845 * such requests and not delete our device address from this list.
846 */
847 if (ether_addr_equal(addr, netdev->dev_addr))
848 return 0;
2f41f335 849
129cf89e 850 f = iavf_find_filter(adapter, addr);
8946b563 851 if (f) {
2f41f335 852 f->remove = true;
129cf89e 853 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
5eae00c5 854 }
8946b563
JK
855 return 0;
856}
857
858/**
129cf89e 859 * iavf_set_rx_mode - NDO callback to set the netdev filters
8946b563
JK
860 * @netdev: network interface device structure
861 **/
129cf89e 862static void iavf_set_rx_mode(struct net_device *netdev)
8946b563 863{
129cf89e 864 struct iavf_adapter *adapter = netdev_priv(netdev);
8946b563
JK
865
866 spin_lock_bh(&adapter->mac_vlan_list_lock);
129cf89e
JB
867 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
868 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
8946b563 869 spin_unlock_bh(&adapter->mac_vlan_list_lock);
47d34839
ASJ
870
871 if (netdev->flags & IFF_PROMISC &&
129cf89e
JB
872 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
873 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
47d34839 874 else if (!(netdev->flags & IFF_PROMISC) &&
129cf89e
JB
875 adapter->flags & IAVF_FLAG_PROMISC_ON)
876 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
47d34839 877
f42a5c74 878 if (netdev->flags & IFF_ALLMULTI &&
129cf89e
JB
879 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
880 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
f42a5c74 881 else if (!(netdev->flags & IFF_ALLMULTI) &&
129cf89e
JB
882 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
883 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
5eae00c5
GR
884}
885
886/**
129cf89e 887 * iavf_napi_enable_all - enable NAPI on all queue vectors
5eae00c5
GR
888 * @adapter: board private structure
889 **/
129cf89e 890static void iavf_napi_enable_all(struct iavf_adapter *adapter)
5eae00c5
GR
891{
892 int q_idx;
56184e01 893 struct iavf_q_vector *q_vector;
5eae00c5
GR
894 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
895
896 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
897 struct napi_struct *napi;
75a64435 898
7d96ba1a 899 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
900 napi = &q_vector->napi;
901 napi_enable(napi);
902 }
903}
904
905/**
129cf89e 906 * iavf_napi_disable_all - disable NAPI on all queue vectors
5eae00c5
GR
907 * @adapter: board private structure
908 **/
129cf89e 909static void iavf_napi_disable_all(struct iavf_adapter *adapter)
5eae00c5
GR
910{
911 int q_idx;
56184e01 912 struct iavf_q_vector *q_vector;
5eae00c5
GR
913 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
914
915 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7d96ba1a 916 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
917 napi_disable(&q_vector->napi);
918 }
919}
920
921/**
129cf89e 922 * iavf_configure - set up transmit and receive data structures
5eae00c5
GR
923 * @adapter: board private structure
924 **/
129cf89e 925static void iavf_configure(struct iavf_adapter *adapter)
5eae00c5
GR
926{
927 struct net_device *netdev = adapter->netdev;
928 int i;
929
129cf89e 930 iavf_set_rx_mode(netdev);
5eae00c5 931
129cf89e
JB
932 iavf_configure_tx(adapter);
933 iavf_configure_rx(adapter);
934 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
5eae00c5 935
cc052927 936 for (i = 0; i < adapter->num_active_queues; i++) {
56184e01 937 struct iavf_ring *ring = &adapter->rx_rings[i];
75a64435 938
56184e01 939 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
5eae00c5
GR
940 }
941}
942
943/**
129cf89e 944 * iavf_up_complete - Finish the last steps of bringing up a connection
5eae00c5 945 * @adapter: board private structure
9b2aef12 946 *
129cf89e 947 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
5eae00c5 948 **/
129cf89e 949static void iavf_up_complete(struct iavf_adapter *adapter)
5eae00c5 950{
129cf89e 951 adapter->state = __IAVF_RUNNING;
56184e01 952 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
5eae00c5 953
129cf89e 954 iavf_napi_enable_all(adapter);
5eae00c5 955
129cf89e 956 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
ed0e894d 957 if (CLIENT_ENABLED(adapter))
129cf89e 958 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
fdd4044f 959 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
5eae00c5
GR
960}
961
5eae00c5 962/**
56184e01 963 * iavf_down - Shutdown the connection processing
5eae00c5 964 * @adapter: board private structure
9b2aef12 965 *
129cf89e 966 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
5eae00c5 967 **/
129cf89e 968void iavf_down(struct iavf_adapter *adapter)
5eae00c5
GR
969{
970 struct net_device *netdev = adapter->netdev;
129cf89e
JB
971 struct iavf_vlan_filter *vlf;
972 struct iavf_mac_filter *f;
973 struct iavf_cloud_filter *cf;
5eae00c5 974
129cf89e 975 if (adapter->state <= __IAVF_DOWN_PENDING)
ddf0b3a6
MW
976 return;
977
63e18c25
MW
978 netif_carrier_off(netdev);
979 netif_tx_disable(netdev);
3f341acc 980 adapter->link_up = false;
129cf89e
JB
981 iavf_napi_disable_all(adapter);
982 iavf_irq_disable(adapter);
53d0b3ae 983
504398f0
JK
984 spin_lock_bh(&adapter->mac_vlan_list_lock);
985
8946b563
JK
986 /* clear the sync flag on all filters */
987 __dev_uc_unsync(adapter->netdev, NULL);
988 __dev_mc_unsync(adapter->netdev, NULL);
989
ef8693eb 990 /* remove all MAC filters */
5eae00c5
GR
991 list_for_each_entry(f, &adapter->mac_filter_list, list) {
992 f->remove = true;
993 }
8946b563 994
ed1f5b58 995 /* remove all VLAN filters */
fbd5eb54 996 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
0075fa0f 997 vlf->remove = true;
ed1f5b58 998 }
504398f0
JK
999
1000 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1001
0075fa0f
HR
1002 /* remove all cloud filters */
1003 spin_lock_bh(&adapter->cloud_filter_list_lock);
1004 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1005 cf->del = true;
1006 }
1007 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1008
129cf89e
JB
1009 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1010 adapter->state != __IAVF_RESETTING) {
53d0b3ae 1011 /* cancel any current operation */
310a2ad9 1012 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
53d0b3ae
MW
1013 /* Schedule operations to close down the HW. Don't wait
1014 * here for this to complete. The watchdog is still running
1015 * and it will take care of this.
1016 */
129cf89e
JB
1017 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1018 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1019 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1020 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
ef8693eb 1021 }
5eae00c5 1022
fdd4044f 1023 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
5eae00c5
GR
1024}
1025
1026/**
129cf89e 1027 * iavf_acquire_msix_vectors - Setup the MSIX capability
5eae00c5
GR
1028 * @adapter: board private structure
1029 * @vectors: number of vectors to request
1030 *
1031 * Work with the OS to set up the MSIX vectors needed.
1032 *
1033 * Returns 0 on success, negative on failure
1034 **/
1035static int
129cf89e 1036iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
5eae00c5
GR
1037{
1038 int err, vector_threshold;
1039
1040 /* We'll want at least 3 (vector_threshold):
1041 * 0) Other (Admin Queue and link, mostly)
1042 * 1) TxQ[0] Cleanup
1043 * 2) RxQ[0] Cleanup
1044 */
1045 vector_threshold = MIN_MSIX_COUNT;
1046
1047 /* The more we get, the more we will assign to Tx/Rx Cleanup
1048 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1049 * Right now, we simply care about how many we'll get; we'll
1050 * set them up later while requesting irq's.
1051 */
fc2f2f5d
AG
1052 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1053 vector_threshold, vectors);
1054 if (err < 0) {
80e72893 1055 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
5eae00c5
GR
1056 kfree(adapter->msix_entries);
1057 adapter->msix_entries = NULL;
fc2f2f5d 1058 return err;
5eae00c5 1059 }
fc2f2f5d
AG
1060
1061 /* Adjust for only the vectors we'll use, which is minimum
1062 * of max_msix_q_vectors + NONQ_VECS, or the number of
1063 * vectors we were allocated.
1064 */
1065 adapter->num_msix_vectors = err;
1066 return 0;
5eae00c5
GR
1067}
1068
1069/**
129cf89e 1070 * iavf_free_queues - Free memory for all rings
5eae00c5
GR
1071 * @adapter: board private structure to initialize
1072 *
1073 * Free all of the memory associated with queue pairs.
1074 **/
129cf89e 1075static void iavf_free_queues(struct iavf_adapter *adapter)
5eae00c5 1076{
5eae00c5
GR
1077 if (!adapter->vsi_res)
1078 return;
65c7006f 1079 adapter->num_active_queues = 0;
0dd438d8 1080 kfree(adapter->tx_rings);
10311540 1081 adapter->tx_rings = NULL;
0dd438d8 1082 kfree(adapter->rx_rings);
10311540 1083 adapter->rx_rings = NULL;
5eae00c5
GR
1084}
1085
1086/**
129cf89e 1087 * iavf_alloc_queues - Allocate memory for all rings
5eae00c5
GR
1088 * @adapter: board private structure to initialize
1089 *
1090 * We allocate one ring per queue at run-time since we don't know the
1091 * number of queues at compile-time. The polling_netdev array is
1092 * intended for Multiqueue, but should work fine with a single queue.
1093 **/
129cf89e 1094static int iavf_alloc_queues(struct iavf_adapter *adapter)
5eae00c5 1095{
65c7006f
JK
1096 int i, num_active_queues;
1097
5b36e8d0
AB
1098 /* If we're in reset reallocating queues we don't actually know yet for
1099 * certain the PF gave us the number of queues we asked for but we'll
1100 * assume it did. Once basic reset is finished we'll confirm once we
1101 * start negotiating config with PF.
1102 */
1103 if (adapter->num_req_queues)
1104 num_active_queues = adapter->num_req_queues;
5e97ce63
AD
1105 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1106 adapter->num_tc)
1107 num_active_queues = adapter->ch_config.total_qps;
5b36e8d0
AB
1108 else
1109 num_active_queues = min_t(int,
1110 adapter->vsi_res->num_queue_pairs,
1111 (int)(num_online_cpus()));
1112
5eae00c5 1113
65c7006f 1114 adapter->tx_rings = kcalloc(num_active_queues,
56184e01 1115 sizeof(struct iavf_ring), GFP_KERNEL);
0dd438d8
MW
1116 if (!adapter->tx_rings)
1117 goto err_out;
65c7006f 1118 adapter->rx_rings = kcalloc(num_active_queues,
56184e01 1119 sizeof(struct iavf_ring), GFP_KERNEL);
0dd438d8
MW
1120 if (!adapter->rx_rings)
1121 goto err_out;
1122
65c7006f 1123 for (i = 0; i < num_active_queues; i++) {
56184e01
JB
1124 struct iavf_ring *tx_ring;
1125 struct iavf_ring *rx_ring;
5eae00c5 1126
0dd438d8 1127 tx_ring = &adapter->tx_rings[i];
5eae00c5
GR
1128
1129 tx_ring->queue_index = i;
1130 tx_ring->netdev = adapter->netdev;
1131 tx_ring->dev = &adapter->pdev->dev;
d732a184 1132 tx_ring->count = adapter->tx_desc_count;
56184e01 1133 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
129cf89e 1134 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
56184e01 1135 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
5eae00c5 1136
0dd438d8 1137 rx_ring = &adapter->rx_rings[i];
5eae00c5
GR
1138 rx_ring->queue_index = i;
1139 rx_ring->netdev = adapter->netdev;
1140 rx_ring->dev = &adapter->pdev->dev;
d732a184 1141 rx_ring->count = adapter->rx_desc_count;
56184e01 1142 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
5eae00c5
GR
1143 }
1144
65c7006f
JK
1145 adapter->num_active_queues = num_active_queues;
1146
5eae00c5
GR
1147 return 0;
1148
1149err_out:
129cf89e 1150 iavf_free_queues(adapter);
5eae00c5
GR
1151 return -ENOMEM;
1152}
1153
1154/**
129cf89e 1155 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
5eae00c5
GR
1156 * @adapter: board private structure to initialize
1157 *
1158 * Attempt to configure the interrupts using the best available
1159 * capabilities of the hardware and the kernel.
1160 **/
129cf89e 1161static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
5eae00c5
GR
1162{
1163 int vector, v_budget;
1164 int pairs = 0;
1165 int err = 0;
1166
1167 if (!adapter->vsi_res) {
1168 err = -EIO;
1169 goto out;
1170 }
cc052927 1171 pairs = adapter->num_active_queues;
5eae00c5 1172
789f38ca
JK
1173 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1174 * us much good if we have more vectors than CPUs. However, we already
1175 * limit the total number of queues by the number of CPUs so we do not
1176 * need any further limiting here.
5eae00c5 1177 */
789f38ca
JK
1178 v_budget = min_t(int, pairs + NONQ_VECS,
1179 (int)adapter->vf_res->max_vectors);
5eae00c5 1180
5eae00c5
GR
1181 adapter->msix_entries = kcalloc(v_budget,
1182 sizeof(struct msix_entry), GFP_KERNEL);
1183 if (!adapter->msix_entries) {
1184 err = -ENOMEM;
1185 goto out;
1186 }
1187
1188 for (vector = 0; vector < v_budget; vector++)
1189 adapter->msix_entries[vector].entry = vector;
1190
129cf89e 1191 err = iavf_acquire_msix_vectors(adapter, v_budget);
5eae00c5
GR
1192
1193out:
e6c4cf6f
MW
1194 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1195 netif_set_real_num_tx_queues(adapter->netdev, pairs);
5eae00c5
GR
1196 return err;
1197}
1198
e25d00b8 1199/**
56184e01 1200 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
43a3d9ba 1201 * @adapter: board private structure
2c86ac3c
HZ
1202 *
1203 * Return 0 on success, negative on failure
e25d00b8 1204 **/
129cf89e 1205static int iavf_config_rss_aq(struct iavf_adapter *adapter)
e25d00b8 1206{
7af36e32
AM
1207 struct iavf_aqc_get_set_rss_key_data *rss_key =
1208 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
f349daa5 1209 struct iavf_hw *hw = &adapter->hw;
2c86ac3c 1210 int ret = 0;
e25d00b8 1211
310a2ad9 1212 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
e25d00b8 1213 /* bail because we already have a command pending */
e3d132d1 1214 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
e25d00b8 1215 adapter->current_op);
2c86ac3c 1216 return -EBUSY;
e25d00b8
ASJ
1217 }
1218
129cf89e 1219 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
43a3d9ba
MW
1220 if (ret) {
1221 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
129cf89e
JB
1222 iavf_stat_str(hw, ret),
1223 iavf_aq_str(hw, hw->aq.asq_last_status));
43a3d9ba
MW
1224 return ret;
1225
2c86ac3c 1226 }
e25d00b8 1227
129cf89e
JB
1228 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1229 adapter->rss_lut, adapter->rss_lut_size);
43a3d9ba
MW
1230 if (ret) {
1231 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
129cf89e
JB
1232 iavf_stat_str(hw, ret),
1233 iavf_aq_str(hw, hw->aq.asq_last_status));
e25d00b8
ASJ
1234 }
1235
2c86ac3c 1236 return ret;
43a3d9ba 1237
e25d00b8
ASJ
1238}
1239
1240/**
129cf89e 1241 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
43a3d9ba 1242 * @adapter: board private structure
2c86ac3c
HZ
1243 *
1244 * Returns 0 on success, negative on failure
e25d00b8 1245 **/
129cf89e 1246static int iavf_config_rss_reg(struct iavf_adapter *adapter)
e25d00b8 1247{
f349daa5 1248 struct iavf_hw *hw = &adapter->hw;
43a3d9ba 1249 u32 *dw;
2c86ac3c 1250 u16 i;
e25d00b8 1251
43a3d9ba
MW
1252 dw = (u32 *)adapter->rss_key;
1253 for (i = 0; i <= adapter->rss_key_size / 4; i++)
f1cad2ce 1254 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
2c86ac3c 1255
43a3d9ba
MW
1256 dw = (u32 *)adapter->rss_lut;
1257 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
f1cad2ce 1258 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
2c86ac3c 1259
f1cad2ce 1260 iavf_flush(hw);
2c86ac3c
HZ
1261
1262 return 0;
1263}
1264
1265/**
129cf89e 1266 * iavf_config_rss - Configure RSS keys and lut
43a3d9ba 1267 * @adapter: board private structure
90b02b43
HZ
1268 *
1269 * Returns 0 on success, negative on failure
1270 **/
129cf89e 1271int iavf_config_rss(struct iavf_adapter *adapter)
90b02b43 1272{
90b02b43 1273
43a3d9ba 1274 if (RSS_PF(adapter)) {
129cf89e
JB
1275 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1276 IAVF_FLAG_AQ_SET_RSS_KEY;
43a3d9ba
MW
1277 return 0;
1278 } else if (RSS_AQ(adapter)) {
129cf89e 1279 return iavf_config_rss_aq(adapter);
43a3d9ba 1280 } else {
129cf89e 1281 return iavf_config_rss_reg(adapter);
43a3d9ba 1282 }
90b02b43
HZ
1283}
1284
2c86ac3c 1285/**
129cf89e 1286 * iavf_fill_rss_lut - Fill the lut with default values
43a3d9ba 1287 * @adapter: board private structure
2c86ac3c 1288 **/
129cf89e 1289static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
2c86ac3c
HZ
1290{
1291 u16 i;
1292
43a3d9ba
MW
1293 for (i = 0; i < adapter->rss_lut_size; i++)
1294 adapter->rss_lut[i] = i % adapter->num_active_queues;
e25d00b8
ASJ
1295}
1296
1297/**
129cf89e 1298 * iavf_init_rss - Prepare for RSS
e25d00b8 1299 * @adapter: board private structure
2c86ac3c
HZ
1300 *
1301 * Return 0 on success, negative on failure
e25d00b8 1302 **/
129cf89e 1303static int iavf_init_rss(struct iavf_adapter *adapter)
e25d00b8 1304{
f349daa5 1305 struct iavf_hw *hw = &adapter->hw;
2c86ac3c 1306 int ret;
e25d00b8 1307
43a3d9ba
MW
1308 if (!RSS_PF(adapter)) {
1309 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
fbb113f7 1310 if (adapter->vf_res->vf_cap_flags &
310a2ad9 1311 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
56184e01 1312 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
43a3d9ba 1313 else
56184e01 1314 adapter->hena = IAVF_DEFAULT_RSS_HENA;
e25d00b8 1315
f1cad2ce
JB
1316 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1317 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
43a3d9ba 1318 }
66f9af85 1319
129cf89e 1320 iavf_fill_rss_lut(adapter);
43a3d9ba 1321 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
129cf89e 1322 ret = iavf_config_rss(adapter);
2c86ac3c
HZ
1323
1324 return ret;
e25d00b8
ASJ
1325}
1326
5eae00c5 1327/**
129cf89e 1328 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
5eae00c5
GR
1329 * @adapter: board private structure to initialize
1330 *
1331 * We allocate one q_vector per queue interrupt. If allocation fails we
1332 * return -ENOMEM.
1333 **/
129cf89e 1334static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
5eae00c5 1335{
7d96ba1a 1336 int q_idx = 0, num_q_vectors;
56184e01 1337 struct iavf_q_vector *q_vector;
5eae00c5
GR
1338
1339 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
0dd438d8 1340 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
7d96ba1a
MW
1341 GFP_KERNEL);
1342 if (!adapter->q_vectors)
311f23e9 1343 return -ENOMEM;
5eae00c5
GR
1344
1345 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
7d96ba1a 1346 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
1347 q_vector->adapter = adapter;
1348 q_vector->vsi = &adapter->vsi;
1349 q_vector->v_idx = q_idx;
a3f9fb5e 1350 q_vector->reg_idx = q_idx;
759dc4a7 1351 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
5eae00c5 1352 netif_napi_add(adapter->netdev, &q_vector->napi,
129cf89e 1353 iavf_napi_poll, NAPI_POLL_WEIGHT);
5eae00c5
GR
1354 }
1355
1356 return 0;
5eae00c5
GR
1357}
1358
1359/**
129cf89e 1360 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
5eae00c5
GR
1361 * @adapter: board private structure to initialize
1362 *
1363 * This function frees the memory allocated to the q_vectors. In addition if
1364 * NAPI is enabled it will delete any references to the NAPI struct prior
1365 * to freeing the q_vector.
1366 **/
129cf89e 1367static void iavf_free_q_vectors(struct iavf_adapter *adapter)
5eae00c5
GR
1368{
1369 int q_idx, num_q_vectors;
1370 int napi_vectors;
1371
ef4603e8
JK
1372 if (!adapter->q_vectors)
1373 return;
1374
5eae00c5 1375 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
cc052927 1376 napi_vectors = adapter->num_active_queues;
5eae00c5
GR
1377
1378 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
56184e01 1379 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
0b6591e6 1380
5eae00c5
GR
1381 if (q_idx < napi_vectors)
1382 netif_napi_del(&q_vector->napi);
5eae00c5 1383 }
7d96ba1a 1384 kfree(adapter->q_vectors);
ef4603e8 1385 adapter->q_vectors = NULL;
5eae00c5
GR
1386}
1387
1388/**
129cf89e 1389 * iavf_reset_interrupt_capability - Reset MSIX setup
5eae00c5
GR
1390 * @adapter: board private structure
1391 *
1392 **/
129cf89e 1393void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
5eae00c5 1394{
47d2a5d8
AB
1395 if (!adapter->msix_entries)
1396 return;
1397
5eae00c5
GR
1398 pci_disable_msix(adapter->pdev);
1399 kfree(adapter->msix_entries);
1400 adapter->msix_entries = NULL;
5eae00c5
GR
1401}
1402
1403/**
129cf89e 1404 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
5eae00c5
GR
1405 * @adapter: board private structure to initialize
1406 *
1407 **/
129cf89e 1408int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
5eae00c5
GR
1409{
1410 int err;
1411
129cf89e 1412 err = iavf_alloc_queues(adapter);
283aeafe
JK
1413 if (err) {
1414 dev_err(&adapter->pdev->dev,
1415 "Unable to allocate memory for queues\n");
1416 goto err_alloc_queues;
1417 }
1418
62fe2a86 1419 rtnl_lock();
129cf89e 1420 err = iavf_set_interrupt_capability(adapter);
62fe2a86 1421 rtnl_unlock();
5eae00c5
GR
1422 if (err) {
1423 dev_err(&adapter->pdev->dev,
1424 "Unable to setup interrupt capabilities\n");
1425 goto err_set_interrupt;
1426 }
1427
129cf89e 1428 err = iavf_alloc_q_vectors(adapter);
5eae00c5
GR
1429 if (err) {
1430 dev_err(&adapter->pdev->dev,
1431 "Unable to allocate memory for queue vectors\n");
1432 goto err_alloc_q_vectors;
1433 }
1434
5e97ce63
AD
1435 /* If we've made it so far while ADq flag being ON, then we haven't
1436 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1437 * resources have been allocated in the reset path.
1438 * Now we can truly claim that ADq is enabled.
1439 */
1440 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1441 adapter->num_tc)
1442 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1443 adapter->num_tc);
1444
5eae00c5 1445 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
75a64435
MW
1446 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1447 adapter->num_active_queues);
5eae00c5
GR
1448
1449 return 0;
5eae00c5 1450err_alloc_q_vectors:
129cf89e 1451 iavf_reset_interrupt_capability(adapter);
5eae00c5 1452err_set_interrupt:
129cf89e 1453 iavf_free_queues(adapter);
283aeafe 1454err_alloc_queues:
5eae00c5
GR
1455 return err;
1456}
1457
66f9af85 1458/**
129cf89e 1459 * iavf_free_rss - Free memory used by RSS structs
43a3d9ba 1460 * @adapter: board private structure
66f9af85 1461 **/
129cf89e 1462static void iavf_free_rss(struct iavf_adapter *adapter)
66f9af85 1463{
43a3d9ba
MW
1464 kfree(adapter->rss_key);
1465 adapter->rss_key = NULL;
66f9af85 1466
43a3d9ba
MW
1467 kfree(adapter->rss_lut);
1468 adapter->rss_lut = NULL;
66f9af85
HZ
1469}
1470
5b36e8d0 1471/**
129cf89e 1472 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
5b36e8d0
AB
1473 * @adapter: board private structure
1474 *
1475 * Returns 0 on success, negative on failure
1476 **/
129cf89e 1477static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
5b36e8d0
AB
1478{
1479 struct net_device *netdev = adapter->netdev;
1480 int err;
1481
1482 if (netif_running(netdev))
129cf89e
JB
1483 iavf_free_traffic_irqs(adapter);
1484 iavf_free_misc_irq(adapter);
1485 iavf_reset_interrupt_capability(adapter);
1486 iavf_free_q_vectors(adapter);
1487 iavf_free_queues(adapter);
5b36e8d0 1488
129cf89e 1489 err = iavf_init_interrupt_scheme(adapter);
5b36e8d0
AB
1490 if (err)
1491 goto err;
1492
1493 netif_tx_stop_all_queues(netdev);
1494
129cf89e 1495 err = iavf_request_misc_irq(adapter);
5b36e8d0
AB
1496 if (err)
1497 goto err;
1498
56184e01 1499 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
5b36e8d0 1500
129cf89e 1501 iavf_map_rings_to_vectors(adapter);
5b36e8d0
AB
1502
1503 if (RSS_AQ(adapter))
129cf89e 1504 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
5b36e8d0 1505 else
129cf89e 1506 err = iavf_init_rss(adapter);
5b36e8d0
AB
1507err:
1508 return err;
1509}
1510
5eae00c5 1511/**
b476b003
JP
1512 * iavf_process_aq_command - process aq_required flags
1513 * and sends aq command
1514 * @adapter: pointer to iavf adapter structure
1515 *
1516 * Returns 0 on success
1517 * Returns error code if no command was sent
1518 * or error code if the command failed.
5eae00c5 1519 **/
b476b003 1520static int iavf_process_aq_command(struct iavf_adapter *adapter)
5eae00c5 1521{
b476b003
JP
1522 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1523 return iavf_send_vf_config_msg(adapter);
129cf89e
JB
1524 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1525 iavf_disable_queues(adapter);
b476b003 1526 return 0;
e284fc88
MW
1527 }
1528
129cf89e
JB
1529 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1530 iavf_map_queues(adapter);
b476b003 1531 return 0;
5eae00c5
GR
1532 }
1533
129cf89e
JB
1534 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1535 iavf_add_ether_addrs(adapter);
b476b003 1536 return 0;
5eae00c5
GR
1537 }
1538
129cf89e
JB
1539 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1540 iavf_add_vlans(adapter);
b476b003 1541 return 0;
5eae00c5
GR
1542 }
1543
129cf89e
JB
1544 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1545 iavf_del_ether_addrs(adapter);
b476b003 1546 return 0;
5eae00c5
GR
1547 }
1548
129cf89e
JB
1549 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1550 iavf_del_vlans(adapter);
b476b003 1551 return 0;
5eae00c5
GR
1552 }
1553
129cf89e
JB
1554 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1555 iavf_enable_vlan_stripping(adapter);
b476b003 1556 return 0;
8774370d
MS
1557 }
1558
129cf89e
JB
1559 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1560 iavf_disable_vlan_stripping(adapter);
b476b003 1561 return 0;
8774370d
MS
1562 }
1563
129cf89e
JB
1564 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1565 iavf_configure_queues(adapter);
b476b003 1566 return 0;
5eae00c5
GR
1567 }
1568
129cf89e
JB
1569 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1570 iavf_enable_queues(adapter);
b476b003 1571 return 0;
5eae00c5
GR
1572 }
1573
129cf89e 1574 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
e25d00b8
ASJ
1575 /* This message goes straight to the firmware, not the
1576 * PF, so we don't have to set current_op as we will
1577 * not get a response through the ARQ.
1578 */
129cf89e 1579 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
b476b003 1580 return 0;
e25d00b8 1581 }
129cf89e
JB
1582 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1583 iavf_get_hena(adapter);
b476b003 1584 return 0;
43a3d9ba 1585 }
129cf89e
JB
1586 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1587 iavf_set_hena(adapter);
b476b003 1588 return 0;
43a3d9ba 1589 }
129cf89e
JB
1590 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1591 iavf_set_rss_key(adapter);
b476b003 1592 return 0;
43a3d9ba 1593 }
129cf89e
JB
1594 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1595 iavf_set_rss_lut(adapter);
b476b003 1596 return 0;
43a3d9ba 1597 }
e25d00b8 1598
129cf89e
JB
1599 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1600 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
ff3f4cc2 1601 FLAG_VF_MULTICAST_PROMISC);
b476b003 1602 return 0;
47d34839
ASJ
1603 }
1604
129cf89e
JB
1605 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1606 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
b476b003 1607 return 0;
f42a5c74
ASJ
1608 }
1609
129cf89e
JB
1610 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1611 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1612 iavf_set_promiscuous(adapter, 0);
b476b003 1613 return 0;
47d34839 1614 }
d5b33d02 1615
129cf89e
JB
1616 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1617 iavf_enable_channels(adapter);
b476b003 1618 return 0;
d5b33d02
HR
1619 }
1620
129cf89e
JB
1621 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1622 iavf_disable_channels(adapter);
b476b003 1623 return 0;
d5b33d02 1624 }
129cf89e
JB
1625 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1626 iavf_add_cloud_filter(adapter);
b476b003 1627 return 0;
0075fa0f
HR
1628 }
1629
129cf89e
JB
1630 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1631 iavf_del_cloud_filter(adapter);
b476b003
JP
1632 return 0;
1633 }
68dfe634
PG
1634 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1635 iavf_del_cloud_filter(adapter);
1636 return 0;
1637 }
1638 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1639 iavf_add_cloud_filter(adapter);
1640 return 0;
1641 }
b476b003
JP
1642 return -EAGAIN;
1643}
1644
b66c7bc1
JP
1645/**
1646 * iavf_startup - first step of driver startup
1647 * @adapter: board private structure
1648 *
1649 * Function process __IAVF_STARTUP driver state.
1650 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1651 * when fails it returns -EAGAIN
1652 **/
1653static int iavf_startup(struct iavf_adapter *adapter)
1654{
1655 struct pci_dev *pdev = adapter->pdev;
1656 struct iavf_hw *hw = &adapter->hw;
1657 int err;
1658
1659 WARN_ON(adapter->state != __IAVF_STARTUP);
1660
1661 /* driver loaded, probe complete */
1662 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1663 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1664 err = iavf_set_mac_type(hw);
1665 if (err) {
1666 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1667 goto err;
1668 }
1669
1670 err = iavf_check_reset_complete(hw);
1671 if (err) {
1672 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1673 err);
1674 goto err;
1675 }
1676 hw->aq.num_arq_entries = IAVF_AQ_LEN;
1677 hw->aq.num_asq_entries = IAVF_AQ_LEN;
1678 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1679 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1680
1681 err = iavf_init_adminq(hw);
1682 if (err) {
1683 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1684 goto err;
1685 }
1686 err = iavf_send_api_ver(adapter);
1687 if (err) {
1688 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1689 iavf_shutdown_adminq(hw);
1690 goto err;
1691 }
1692 adapter->state = __IAVF_INIT_VERSION_CHECK;
1693err:
1694 return err;
1695}
1696
1697/**
1698 * iavf_init_version_check - second step of driver startup
1699 * @adapter: board private structure
1700 *
1701 * Function process __IAVF_INIT_VERSION_CHECK driver state.
1702 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1703 * when fails it returns -EAGAIN
1704 **/
1705static int iavf_init_version_check(struct iavf_adapter *adapter)
1706{
1707 struct pci_dev *pdev = adapter->pdev;
1708 struct iavf_hw *hw = &adapter->hw;
1709 int err = -EAGAIN;
1710
1711 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1712
1713 if (!iavf_asq_done(hw)) {
1714 dev_err(&pdev->dev, "Admin queue command never completed\n");
1715 iavf_shutdown_adminq(hw);
1716 adapter->state = __IAVF_STARTUP;
1717 goto err;
1718 }
1719
1720 /* aq msg sent, awaiting reply */
1721 err = iavf_verify_api_ver(adapter);
1722 if (err) {
1723 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1724 err = iavf_send_api_ver(adapter);
1725 else
1726 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1727 adapter->pf_version.major,
1728 adapter->pf_version.minor,
1729 VIRTCHNL_VERSION_MAJOR,
1730 VIRTCHNL_VERSION_MINOR);
1731 goto err;
1732 }
1733 err = iavf_send_vf_config_msg(adapter);
1734 if (err) {
1735 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1736 err);
1737 goto err;
1738 }
1739 adapter->state = __IAVF_INIT_GET_RESOURCES;
1740
1741err:
1742 return err;
1743}
1744
1745/**
1746 * iavf_init_get_resources - third step of driver startup
1747 * @adapter: board private structure
1748 *
1749 * Function process __IAVF_INIT_GET_RESOURCES driver state and
1750 * finishes driver initialization procedure.
1751 * When success the state is changed to __IAVF_DOWN
1752 * when fails it returns -EAGAIN
1753 **/
1754static int iavf_init_get_resources(struct iavf_adapter *adapter)
1755{
1756 struct net_device *netdev = adapter->netdev;
1757 struct pci_dev *pdev = adapter->pdev;
1758 struct iavf_hw *hw = &adapter->hw;
e0ef26fb 1759 int err;
b66c7bc1
JP
1760
1761 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1762 /* aq msg sent, awaiting reply */
1763 if (!adapter->vf_res) {
e0ef26fb
BC
1764 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1765 GFP_KERNEL);
1766 if (!adapter->vf_res) {
1767 err = -ENOMEM;
b66c7bc1 1768 goto err;
e0ef26fb 1769 }
b66c7bc1
JP
1770 }
1771 err = iavf_get_vf_config(adapter);
1772 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1773 err = iavf_send_vf_config_msg(adapter);
1774 goto err;
1775 } else if (err == IAVF_ERR_PARAM) {
1776 /* We only get ERR_PARAM if the device is in a very bad
1777 * state or if we've been disabled for previous bad
1778 * behavior. Either way, we're done now.
1779 */
1780 iavf_shutdown_adminq(hw);
1781 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
1782 return 0;
1783 }
1784 if (err) {
1785 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1786 goto err_alloc;
1787 }
1788
1789 if (iavf_process_config(adapter))
1790 goto err_alloc;
1791 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1792
1793 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1794
1795 netdev->netdev_ops = &iavf_netdev_ops;
1796 iavf_set_ethtool_ops(netdev);
1797 netdev->watchdog_timeo = 5 * HZ;
1798
1799 /* MTU range: 68 - 9710 */
1800 netdev->min_mtu = ETH_MIN_MTU;
1801 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1802
1803 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1804 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1805 adapter->hw.mac.addr);
1806 eth_hw_addr_random(netdev);
1807 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1808 } else {
b66c7bc1
JP
1809 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1810 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1811 }
1812
1813 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1814 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1815 err = iavf_init_interrupt_scheme(adapter);
1816 if (err)
1817 goto err_sw_init;
1818 iavf_map_rings_to_vectors(adapter);
1819 if (adapter->vf_res->vf_cap_flags &
1820 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1821 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1822
1823 err = iavf_request_misc_irq(adapter);
1824 if (err)
1825 goto err_sw_init;
1826
1827 netif_carrier_off(netdev);
1828 adapter->link_up = false;
1829
1830 /* set the semaphore to prevent any callbacks after device registration
1831 * up to time when state of driver will be set to __IAVF_DOWN
1832 */
1833 rtnl_lock();
1834 if (!adapter->netdev_registered) {
1835 err = register_netdevice(netdev);
1836 if (err) {
1837 rtnl_unlock();
1838 goto err_register;
1839 }
1840 }
1841
1842 adapter->netdev_registered = true;
1843
1844 netif_tx_stop_all_queues(netdev);
1845 if (CLIENT_ALLOWED(adapter)) {
1846 err = iavf_lan_add_device(adapter);
1847 if (err) {
1848 rtnl_unlock();
1849 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
1850 err);
1851 }
1852 }
1853 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
1854 if (netdev->features & NETIF_F_GRO)
1855 dev_info(&pdev->dev, "GRO is enabled\n");
1856
1857 adapter->state = __IAVF_DOWN;
1858 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1859 rtnl_unlock();
1860
1861 iavf_misc_irq_enable(adapter);
1862 wake_up(&adapter->down_waitqueue);
1863
1864 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
1865 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
1866 if (!adapter->rss_key || !adapter->rss_lut)
1867 goto err_mem;
1868 if (RSS_AQ(adapter))
1869 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1870 else
1871 iavf_init_rss(adapter);
1872
1873 return err;
1874err_mem:
1875 iavf_free_rss(adapter);
1876err_register:
1877 iavf_free_misc_irq(adapter);
1878err_sw_init:
1879 iavf_reset_interrupt_capability(adapter);
1880err_alloc:
1881 kfree(adapter->vf_res);
1882 adapter->vf_res = NULL;
1883err:
1884 return err;
1885}
1886
b476b003
JP
1887/**
1888 * iavf_watchdog_task - Periodic call-back task
1889 * @work: pointer to work_struct
1890 **/
1891static void iavf_watchdog_task(struct work_struct *work)
1892{
1893 struct iavf_adapter *adapter = container_of(work,
1894 struct iavf_adapter,
fdd4044f 1895 watchdog_task.work);
b476b003
JP
1896 struct iavf_hw *hw = &adapter->hw;
1897 u32 reg_val;
1898
1899 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1900 goto restart_watchdog;
1901
bac84861
JS
1902 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1903 adapter->state = __IAVF_COMM_FAILED;
1904
1905 switch (adapter->state) {
1906 case __IAVF_COMM_FAILED:
b476b003
JP
1907 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1908 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1909 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
1910 reg_val == VIRTCHNL_VFR_COMPLETED) {
1911 /* A chance for redemption! */
bac84861
JS
1912 dev_err(&adapter->pdev->dev,
1913 "Hardware came out of reset. Attempting reinit.\n");
b476b003
JP
1914 adapter->state = __IAVF_STARTUP;
1915 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
fdd4044f 1916 queue_delayed_work(iavf_wq, &adapter->init_task, 10);
b476b003
JP
1917 clear_bit(__IAVF_IN_CRITICAL_TASK,
1918 &adapter->crit_section);
1919 /* Don't reschedule the watchdog, since we've restarted
1920 * the init task. When init_task contacts the PF and
1921 * gets everything set up again, it'll restart the
1922 * watchdog for us. Down, boy. Sit. Stay. Woof.
1923 */
1924 return;
1925 }
1926 adapter->aq_required = 0;
1927 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
bac84861
JS
1928 clear_bit(__IAVF_IN_CRITICAL_TASK,
1929 &adapter->crit_section);
1930 queue_delayed_work(iavf_wq,
1931 &adapter->watchdog_task,
1932 msecs_to_jiffies(10));
0075fa0f 1933 goto watchdog_done;
bac84861
JS
1934 case __IAVF_RESETTING:
1935 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1936 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1937 return;
1938 case __IAVF_DOWN:
1939 case __IAVF_DOWN_PENDING:
1940 case __IAVF_TESTING:
1941 case __IAVF_RUNNING:
1942 if (adapter->current_op) {
1943 if (!iavf_asq_done(hw)) {
1944 dev_dbg(&adapter->pdev->dev,
1945 "Admin queue timeout\n");
1946 iavf_send_api_ver(adapter);
1947 }
1948 } else {
1949 if (!iavf_process_aq_command(adapter) &&
1950 adapter->state == __IAVF_RUNNING)
1951 iavf_request_stats(adapter);
1952 }
1953 break;
1954 case __IAVF_REMOVE:
1955 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1956 return;
1957 default:
1958 goto restart_watchdog;
0075fa0f
HR
1959 }
1960
bac84861 1961 /* check for hw reset */
b476b003 1962 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
bac84861 1963 if (!reg_val) {
b476b003
JP
1964 adapter->state = __IAVF_RESETTING;
1965 adapter->flags |= IAVF_FLAG_RESET_PENDING;
b476b003
JP
1966 adapter->aq_required = 0;
1967 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
bac84861
JS
1968 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1969 queue_work(iavf_wq, &adapter->reset_task);
b476b003
JP
1970 goto watchdog_done;
1971 }
1972
b476b003 1973 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
5eae00c5 1974watchdog_done:
bac84861
JS
1975 if (adapter->state == __IAVF_RUNNING ||
1976 adapter->state == __IAVF_COMM_FAILED)
129cf89e
JB
1977 iavf_detect_recover_hung(&adapter->vsi);
1978 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
ef8693eb 1979restart_watchdog:
5eae00c5 1980 if (adapter->aq_required)
fdd4044f
JP
1981 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
1982 msecs_to_jiffies(20));
5eae00c5 1983 else
fdd4044f
JP
1984 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1985 queue_work(iavf_wq, &adapter->adminq_task);
5eae00c5
GR
1986}
1987
129cf89e 1988static void iavf_disable_vf(struct iavf_adapter *adapter)
dedecb6d 1989{
129cf89e
JB
1990 struct iavf_mac_filter *f, *ftmp;
1991 struct iavf_vlan_filter *fv, *fvtmp;
1992 struct iavf_cloud_filter *cf, *cftmp;
dedecb6d 1993
129cf89e 1994 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
dedecb6d 1995
44b034b4
JK
1996 /* We don't use netif_running() because it may be true prior to
1997 * ndo_open() returning, so we can't assume it means all our open
1998 * tasks have finished, since we're not holding the rtnl_lock here.
1999 */
129cf89e 2000 if (adapter->state == __IAVF_RUNNING) {
56184e01 2001 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
dedecb6d
JP
2002 netif_carrier_off(adapter->netdev);
2003 netif_tx_disable(adapter->netdev);
2004 adapter->link_up = false;
129cf89e
JB
2005 iavf_napi_disable_all(adapter);
2006 iavf_irq_disable(adapter);
2007 iavf_free_traffic_irqs(adapter);
2008 iavf_free_all_tx_resources(adapter);
2009 iavf_free_all_rx_resources(adapter);
dedecb6d
JP
2010 }
2011
504398f0
JK
2012 spin_lock_bh(&adapter->mac_vlan_list_lock);
2013
0075fa0f 2014 /* Delete all of the filters */
dedecb6d
JP
2015 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2016 list_del(&f->list);
2017 kfree(f);
2018 }
2019
2020 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2021 list_del(&fv->list);
2022 kfree(fv);
2023 }
2024
504398f0
JK
2025 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2026
0075fa0f
HR
2027 spin_lock_bh(&adapter->cloud_filter_list_lock);
2028 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2029 list_del(&cf->list);
2030 kfree(cf);
2031 adapter->num_cloud_filters--;
2032 }
2033 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2034
129cf89e
JB
2035 iavf_free_misc_irq(adapter);
2036 iavf_reset_interrupt_capability(adapter);
2037 iavf_free_queues(adapter);
2038 iavf_free_q_vectors(adapter);
e0ef26fb 2039 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
129cf89e 2040 iavf_shutdown_adminq(&adapter->hw);
dedecb6d 2041 adapter->netdev->flags &= ~IFF_UP;
129cf89e
JB
2042 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2043 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2044 adapter->state = __IAVF_DOWN;
fe2647ab 2045 wake_up(&adapter->down_waitqueue);
dedecb6d
JP
2046 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2047}
2048
129cf89e
JB
2049#define IAVF_RESET_WAIT_MS 10
2050#define IAVF_RESET_WAIT_COUNT 500
5eae00c5 2051/**
129cf89e 2052 * iavf_reset_task - Call-back task to handle hardware reset
5eae00c5
GR
2053 * @work: pointer to work_struct
2054 *
2055 * During reset we need to shut down and reinitialize the admin queue
2056 * before we can use it to communicate with the PF again. We also clear
2057 * and reinit the rings because that context is lost as well.
2058 **/
129cf89e 2059static void iavf_reset_task(struct work_struct *work)
5eae00c5 2060{
129cf89e
JB
2061 struct iavf_adapter *adapter = container_of(work,
2062 struct iavf_adapter,
ef8693eb 2063 reset_task);
0075fa0f 2064 struct virtchnl_vf_resource *vfres = adapter->vf_res;
ac833bbf 2065 struct net_device *netdev = adapter->netdev;
f349daa5 2066 struct iavf_hw *hw = &adapter->hw;
9e052291 2067 struct iavf_mac_filter *f, *ftmp;
129cf89e
JB
2068 struct iavf_vlan_filter *vlf;
2069 struct iavf_cloud_filter *cf;
ee5c1e92 2070 u32 reg_val;
ac833bbf 2071 int i = 0, err;
44b034b4 2072 bool running;
5eae00c5 2073
06aa040f
AD
2074 /* When device is being removed it doesn't make sense to run the reset
2075 * task, just return in such a case.
2076 */
129cf89e 2077 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
06aa040f
AD
2078 return;
2079
129cf89e 2080 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
5eae00c5 2081 &adapter->crit_section))
f98a2006 2082 usleep_range(500, 1000);
ed0e894d 2083 if (CLIENT_ENABLED(adapter)) {
129cf89e
JB
2084 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2085 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2086 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2087 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
ed0e894d 2088 cancel_delayed_work_sync(&adapter->client_task);
129cf89e 2089 iavf_notify_client_close(&adapter->vsi, true);
ed0e894d 2090 }
129cf89e
JB
2091 iavf_misc_irq_disable(adapter);
2092 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2093 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
67c818a1
MW
2094 /* Restart the AQ here. If we have been reset but didn't
2095 * detect it, or if the PF had to reinit, our AQ will be hosed.
2096 */
129cf89e
JB
2097 iavf_shutdown_adminq(hw);
2098 iavf_init_adminq(hw);
2099 iavf_request_reset(adapter);
3526d800 2100 }
129cf89e 2101 adapter->flags |= IAVF_FLAG_RESET_PENDING;
3526d800 2102
ef8693eb 2103 /* poll until we see the reset actually happen */
129cf89e 2104 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
f1cad2ce
JB
2105 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2106 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
ee5c1e92 2107 if (!reg_val)
ef8693eb 2108 break;
ee5c1e92 2109 usleep_range(5000, 10000);
ef8693eb 2110 }
129cf89e 2111 if (i == IAVF_RESET_WAIT_COUNT) {
67c818a1 2112 dev_info(&adapter->pdev->dev, "Never saw reset\n");
ef8693eb
MW
2113 goto continue_reset; /* act like the reset happened */
2114 }
5eae00c5 2115
ef8693eb 2116 /* wait until the reset is complete and the PF is responding to us */
129cf89e 2117 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
7d3f04af 2118 /* sleep first to make sure a minimum wait time is met */
129cf89e 2119 msleep(IAVF_RESET_WAIT_MS);
7d3f04af 2120
f1cad2ce
JB
2121 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2122 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9 2123 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
5eae00c5 2124 break;
5eae00c5 2125 }
7d3f04af 2126
509a447a 2127 pci_set_master(adapter->pdev);
7d3f04af 2128
129cf89e 2129 if (i == IAVF_RESET_WAIT_COUNT) {
80e72893 2130 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
ee5c1e92 2131 reg_val);
129cf89e
JB
2132 iavf_disable_vf(adapter);
2133 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
ef8693eb 2134 return; /* Do not attempt to reinit. It's dead, Jim. */
5eae00c5 2135 }
ef8693eb
MW
2136
2137continue_reset:
44b034b4
JK
2138 /* We don't use netif_running() because it may be true prior to
2139 * ndo_open() returning, so we can't assume it means all our open
2140 * tasks have finished, since we're not holding the rtnl_lock here.
2141 */
129cf89e
JB
2142 running = ((adapter->state == __IAVF_RUNNING) ||
2143 (adapter->state == __IAVF_RESETTING));
44b034b4
JK
2144
2145 if (running) {
3c8e0b98 2146 netif_carrier_off(netdev);
67c818a1 2147 netif_tx_stop_all_queues(netdev);
3f341acc 2148 adapter->link_up = false;
129cf89e 2149 iavf_napi_disable_all(adapter);
3c8e0b98 2150 }
129cf89e 2151 iavf_irq_disable(adapter);
ac833bbf 2152
129cf89e
JB
2153 adapter->state = __IAVF_RESETTING;
2154 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
67c818a1
MW
2155
2156 /* free the Tx/Rx rings and descriptors, might be better to just
2157 * re-use them sometime in the future
2158 */
129cf89e
JB
2159 iavf_free_all_rx_resources(adapter);
2160 iavf_free_all_tx_resources(adapter);
5eae00c5 2161
129cf89e 2162 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
5eae00c5 2163 /* kill and reinit the admin queue */
129cf89e 2164 iavf_shutdown_adminq(hw);
310a2ad9 2165 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
129cf89e 2166 err = iavf_init_adminq(hw);
5eae00c5 2167 if (err)
ac833bbf
MW
2168 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2169 err);
5b36e8d0
AB
2170 adapter->aq_required = 0;
2171
129cf89e
JB
2172 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2173 err = iavf_reinit_interrupt_scheme(adapter);
5b36e8d0
AB
2174 if (err)
2175 goto reset_err;
2176 }
5eae00c5 2177
129cf89e
JB
2178 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2179 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
ac833bbf 2180
504398f0
JK
2181 spin_lock_bh(&adapter->mac_vlan_list_lock);
2182
9e052291
SA
2183 /* Delete filter for the current MAC address, it could have
2184 * been changed by the PF via administratively set MAC.
2185 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2186 */
2187 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2188 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2189 list_del(&f->list);
2190 kfree(f);
2191 }
2192 }
ac833bbf
MW
2193 /* re-add all MAC filters */
2194 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2195 f->add = true;
2196 }
2197 /* re-add all VLAN filters */
40d01366
MW
2198 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
2199 vlf->add = true;
ac833bbf 2200 }
504398f0
JK
2201
2202 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2203
0075fa0f
HR
2204 /* check if TCs are running and re-add all cloud filters */
2205 spin_lock_bh(&adapter->cloud_filter_list_lock);
2206 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2207 adapter->num_tc) {
2208 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2209 cf->add = true;
2210 }
2211 }
2212 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2213
129cf89e
JB
2214 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2215 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2216 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2217 iavf_misc_irq_enable(adapter);
5eae00c5 2218
fdd4044f 2219 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
5eae00c5 2220
44b034b4
JK
2221 /* We were running when the reset started, so we need to restore some
2222 * state here.
2223 */
2224 if (running) {
5eae00c5 2225 /* allocate transmit descriptors */
129cf89e 2226 err = iavf_setup_all_tx_resources(adapter);
5eae00c5
GR
2227 if (err)
2228 goto reset_err;
2229
2230 /* allocate receive descriptors */
129cf89e 2231 err = iavf_setup_all_rx_resources(adapter);
5eae00c5
GR
2232 if (err)
2233 goto reset_err;
2234
129cf89e
JB
2235 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2236 err = iavf_request_traffic_irqs(adapter, netdev->name);
5b36e8d0
AB
2237 if (err)
2238 goto reset_err;
2239
129cf89e 2240 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5b36e8d0
AB
2241 }
2242
129cf89e 2243 iavf_configure(adapter);
5eae00c5 2244
129cf89e 2245 iavf_up_complete(adapter);
5eae00c5 2246
129cf89e 2247 iavf_irq_enable(adapter, true);
67c818a1 2248 } else {
129cf89e 2249 adapter->state = __IAVF_DOWN;
fe2647ab 2250 wake_up(&adapter->down_waitqueue);
5eae00c5 2251 }
129cf89e
JB
2252 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2253 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
67c818a1 2254
5eae00c5
GR
2255 return;
2256reset_err:
129cf89e
JB
2257 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2258 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
80e72893 2259 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
129cf89e 2260 iavf_close(netdev);
5eae00c5
GR
2261}
2262
2263/**
129cf89e 2264 * iavf_adminq_task - worker thread to clean the admin queue
5eae00c5
GR
2265 * @work: pointer to work_struct containing our data
2266 **/
129cf89e 2267static void iavf_adminq_task(struct work_struct *work)
5eae00c5 2268{
129cf89e
JB
2269 struct iavf_adapter *adapter =
2270 container_of(work, struct iavf_adapter, adminq_task);
f349daa5 2271 struct iavf_hw *hw = &adapter->hw;
7af36e32 2272 struct iavf_arq_event_info event;
c969ef4e 2273 enum virtchnl_ops v_op;
80754bbc 2274 enum iavf_status ret, v_ret;
912257e5 2275 u32 val, oldval;
5eae00c5
GR
2276 u16 pending;
2277
129cf89e 2278 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
7235448c 2279 goto out;
ef8693eb 2280
129cf89e 2281 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
1001dc37 2282 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
249c8b8d 2283 if (!event.msg_buf)
7235448c 2284 goto out;
249c8b8d 2285
5eae00c5 2286 do {
129cf89e 2287 ret = iavf_clean_arq_element(hw, &event, &pending);
c969ef4e 2288 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
80754bbc 2289 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
c969ef4e
TD
2290
2291 if (ret || !v_op)
5eae00c5
GR
2292 break; /* No event to process or error cleaning ARQ */
2293
129cf89e
JB
2294 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2295 event.msg_len);
75a64435 2296 if (pending != 0)
129cf89e 2297 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
5eae00c5
GR
2298 } while (pending);
2299
67c818a1 2300 if ((adapter->flags &
129cf89e
JB
2301 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2302 adapter->state == __IAVF_RESETTING)
67c818a1
MW
2303 goto freedom;
2304
912257e5
MW
2305 /* check for error indications */
2306 val = rd32(hw, hw->aq.arq.len);
19b73d8e
MW
2307 if (val == 0xdeadbeef) /* indicates device in reset */
2308 goto freedom;
912257e5 2309 oldval = val;
f1cad2ce 2310 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
912257e5 2311 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
f1cad2ce 2312 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
912257e5 2313 }
f1cad2ce 2314 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
912257e5 2315 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
f1cad2ce 2316 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
912257e5 2317 }
f1cad2ce 2318 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
912257e5 2319 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
f1cad2ce 2320 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
912257e5
MW
2321 }
2322 if (oldval != val)
2323 wr32(hw, hw->aq.arq.len, val);
2324
2325 val = rd32(hw, hw->aq.asq.len);
2326 oldval = val;
f1cad2ce 2327 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
912257e5 2328 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
f1cad2ce 2329 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
912257e5 2330 }
f1cad2ce 2331 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
912257e5 2332 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
f1cad2ce 2333 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
912257e5 2334 }
f1cad2ce 2335 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
912257e5 2336 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
f1cad2ce 2337 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
912257e5
MW
2338 }
2339 if (oldval != val)
2340 wr32(hw, hw->aq.asq.len, val);
2341
67c818a1 2342freedom:
7235448c
MW
2343 kfree(event.msg_buf);
2344out:
5eae00c5 2345 /* re-enable Admin queue interrupt cause */
129cf89e 2346 iavf_misc_irq_enable(adapter);
5eae00c5
GR
2347}
2348
ed0e894d 2349/**
129cf89e 2350 * iavf_client_task - worker thread to perform client work
ed0e894d
MW
2351 * @work: pointer to work_struct containing our data
2352 *
2353 * This task handles client interactions. Because client calls can be
2354 * reentrant, we can't handle them in the watchdog.
2355 **/
129cf89e 2356static void iavf_client_task(struct work_struct *work)
ed0e894d 2357{
129cf89e
JB
2358 struct iavf_adapter *adapter =
2359 container_of(work, struct iavf_adapter, client_task.work);
ed0e894d
MW
2360
2361 /* If we can't get the client bit, just give up. We'll be rescheduled
2362 * later.
2363 */
2364
129cf89e 2365 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
ed0e894d
MW
2366 return;
2367
129cf89e
JB
2368 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2369 iavf_client_subtask(adapter);
2370 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
ed0e894d
MW
2371 goto out;
2372 }
129cf89e
JB
2373 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2374 iavf_notify_client_l2_params(&adapter->vsi);
2375 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
01acc73f
AB
2376 goto out;
2377 }
129cf89e
JB
2378 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2379 iavf_notify_client_close(&adapter->vsi, false);
2380 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
ed0e894d
MW
2381 goto out;
2382 }
129cf89e
JB
2383 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2384 iavf_notify_client_open(&adapter->vsi);
2385 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
ed0e894d
MW
2386 }
2387out:
129cf89e 2388 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
ed0e894d
MW
2389}
2390
5eae00c5 2391/**
129cf89e 2392 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
5eae00c5
GR
2393 * @adapter: board private structure
2394 *
2395 * Free all transmit software resources
2396 **/
129cf89e 2397void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2398{
2399 int i;
2400
fdb47ae8
MW
2401 if (!adapter->tx_rings)
2402 return;
2403
cc052927 2404 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 2405 if (adapter->tx_rings[i].desc)
129cf89e 2406 iavf_free_tx_resources(&adapter->tx_rings[i]);
5eae00c5
GR
2407}
2408
2409/**
129cf89e 2410 * iavf_setup_all_tx_resources - allocate all queues Tx resources
5eae00c5
GR
2411 * @adapter: board private structure
2412 *
2413 * If this function returns with an error, then it's possible one or
2414 * more of the rings is populated (while the rest are not). It is the
2415 * callers duty to clean those orphaned rings.
2416 *
2417 * Return 0 on success, negative on failure
2418 **/
129cf89e 2419static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2420{
2421 int i, err = 0;
2422
cc052927 2423 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 2424 adapter->tx_rings[i].count = adapter->tx_desc_count;
129cf89e 2425 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
5eae00c5
GR
2426 if (!err)
2427 continue;
2428 dev_err(&adapter->pdev->dev,
fb43201f 2429 "Allocation for Tx Queue %u failed\n", i);
5eae00c5
GR
2430 break;
2431 }
2432
2433 return err;
2434}
2435
2436/**
129cf89e 2437 * iavf_setup_all_rx_resources - allocate all queues Rx resources
5eae00c5
GR
2438 * @adapter: board private structure
2439 *
2440 * If this function returns with an error, then it's possible one or
2441 * more of the rings is populated (while the rest are not). It is the
2442 * callers duty to clean those orphaned rings.
2443 *
2444 * Return 0 on success, negative on failure
2445 **/
129cf89e 2446static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2447{
2448 int i, err = 0;
2449
cc052927 2450 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 2451 adapter->rx_rings[i].count = adapter->rx_desc_count;
129cf89e 2452 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
5eae00c5
GR
2453 if (!err)
2454 continue;
2455 dev_err(&adapter->pdev->dev,
fb43201f 2456 "Allocation for Rx Queue %u failed\n", i);
5eae00c5
GR
2457 break;
2458 }
2459 return err;
2460}
2461
2462/**
129cf89e 2463 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
5eae00c5
GR
2464 * @adapter: board private structure
2465 *
2466 * Free all receive software resources
2467 **/
129cf89e 2468void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2469{
2470 int i;
2471
fdb47ae8
MW
2472 if (!adapter->rx_rings)
2473 return;
2474
cc052927 2475 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 2476 if (adapter->rx_rings[i].desc)
129cf89e 2477 iavf_free_rx_resources(&adapter->rx_rings[i]);
5eae00c5
GR
2478}
2479
591532d6 2480/**
129cf89e 2481 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
591532d6
HR
2482 * @adapter: board private structure
2483 * @max_tx_rate: max Tx bw for a tc
2484 **/
129cf89e
JB
2485static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2486 u64 max_tx_rate)
591532d6
HR
2487{
2488 int speed = 0, ret = 0;
2489
e0ef26fb
BC
2490 if (ADV_LINK_SUPPORT(adapter)) {
2491 if (adapter->link_speed_mbps < U32_MAX) {
2492 speed = adapter->link_speed_mbps;
2493 goto validate_bw;
2494 } else {
2495 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2496 return -EINVAL;
2497 }
2498 }
2499
591532d6 2500 switch (adapter->link_speed) {
5071bda2 2501 case VIRTCHNL_LINK_SPEED_40GB:
18c012d9 2502 speed = SPEED_40000;
591532d6 2503 break;
5071bda2 2504 case VIRTCHNL_LINK_SPEED_25GB:
18c012d9 2505 speed = SPEED_25000;
591532d6 2506 break;
5071bda2 2507 case VIRTCHNL_LINK_SPEED_20GB:
18c012d9 2508 speed = SPEED_20000;
591532d6 2509 break;
5071bda2 2510 case VIRTCHNL_LINK_SPEED_10GB:
18c012d9
BC
2511 speed = SPEED_10000;
2512 break;
2513 case VIRTCHNL_LINK_SPEED_5GB:
2514 speed = SPEED_5000;
2515 break;
2516 case VIRTCHNL_LINK_SPEED_2_5GB:
2517 speed = SPEED_2500;
591532d6 2518 break;
5071bda2 2519 case VIRTCHNL_LINK_SPEED_1GB:
18c012d9 2520 speed = SPEED_1000;
591532d6 2521 break;
5071bda2 2522 case VIRTCHNL_LINK_SPEED_100MB:
18c012d9 2523 speed = SPEED_100;
591532d6
HR
2524 break;
2525 default:
2526 break;
2527 }
2528
e0ef26fb 2529validate_bw:
591532d6
HR
2530 if (max_tx_rate > speed) {
2531 dev_err(&adapter->pdev->dev,
2532 "Invalid tx rate specified\n");
2533 ret = -EINVAL;
2534 }
2535
2536 return ret;
2537}
2538
d5b33d02 2539/**
129cf89e 2540 * iavf_validate_channel_config - validate queue mapping info
d5b33d02
HR
2541 * @adapter: board private structure
2542 * @mqprio_qopt: queue parameters
2543 *
2544 * This function validates if the config provided by the user to
2545 * configure queue channels is valid or not. Returns 0 on a valid
2546 * config.
2547 **/
129cf89e
JB
2548static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2549 struct tc_mqprio_qopt_offload *mqprio_qopt)
d5b33d02 2550{
591532d6 2551 u64 total_max_rate = 0;
d5b33d02 2552 int i, num_qps = 0;
591532d6
HR
2553 u64 tx_rate = 0;
2554 int ret = 0;
d5b33d02 2555
129cf89e 2556 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
d5b33d02
HR
2557 mqprio_qopt->qopt.num_tc < 1)
2558 return -EINVAL;
2559
2560 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2561 if (!mqprio_qopt->qopt.count[i] ||
d5b33d02
HR
2562 mqprio_qopt->qopt.offset[i] != num_qps)
2563 return -EINVAL;
591532d6
HR
2564 if (mqprio_qopt->min_rate[i]) {
2565 dev_err(&adapter->pdev->dev,
2566 "Invalid min tx rate (greater than 0) specified\n");
2567 return -EINVAL;
2568 }
2569 /*convert to Mbps */
2570 tx_rate = div_u64(mqprio_qopt->max_rate[i],
129cf89e 2571 IAVF_MBPS_DIVISOR);
591532d6 2572 total_max_rate += tx_rate;
d5b33d02
HR
2573 num_qps += mqprio_qopt->qopt.count[i];
2574 }
129cf89e 2575 if (num_qps > IAVF_MAX_REQ_QUEUES)
d5b33d02
HR
2576 return -EINVAL;
2577
129cf89e 2578 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
591532d6 2579 return ret;
d5b33d02
HR
2580}
2581
0075fa0f 2582/**
129cf89e 2583 * iavf_del_all_cloud_filters - delete all cloud filters
0075fa0f
HR
2584 * on the traffic classes
2585 **/
129cf89e 2586static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
0075fa0f 2587{
129cf89e 2588 struct iavf_cloud_filter *cf, *cftmp;
0075fa0f
HR
2589
2590 spin_lock_bh(&adapter->cloud_filter_list_lock);
2591 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2592 list) {
2593 list_del(&cf->list);
2594 kfree(cf);
2595 adapter->num_cloud_filters--;
2596 }
2597 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2598}
2599
d5b33d02 2600/**
129cf89e 2601 * __iavf_setup_tc - configure multiple traffic classes
d5b33d02
HR
2602 * @netdev: network interface device structure
2603 * @type_date: tc offload data
2604 *
2605 * This function processes the config information provided by the
2606 * user to configure traffic classes/queue channels and packages the
2607 * information to request the PF to setup traffic classes.
2608 *
2609 * Returns 0 on success.
2610 **/
129cf89e 2611static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
d5b33d02
HR
2612{
2613 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
129cf89e 2614 struct iavf_adapter *adapter = netdev_priv(netdev);
d5b33d02
HR
2615 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2616 u8 num_tc = 0, total_qps = 0;
2617 int ret = 0, netdev_tc = 0;
591532d6 2618 u64 max_tx_rate;
d5b33d02
HR
2619 u16 mode;
2620 int i;
2621
2622 num_tc = mqprio_qopt->qopt.num_tc;
2623 mode = mqprio_qopt->mode;
2624
2625 /* delete queue_channel */
2626 if (!mqprio_qopt->qopt.hw) {
129cf89e 2627 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
d5b33d02
HR
2628 /* reset the tc configuration */
2629 netdev_reset_tc(netdev);
2630 adapter->num_tc = 0;
2631 netif_tx_stop_all_queues(netdev);
2632 netif_tx_disable(netdev);
129cf89e
JB
2633 iavf_del_all_cloud_filters(adapter);
2634 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
d5b33d02
HR
2635 goto exit;
2636 } else {
2637 return -EINVAL;
2638 }
2639 }
2640
2641 /* add queue channel */
2642 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2643 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2644 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2645 return -EOPNOTSUPP;
2646 }
129cf89e 2647 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
d5b33d02
HR
2648 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2649 return -EINVAL;
2650 }
2651
129cf89e 2652 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
d5b33d02
HR
2653 if (ret)
2654 return ret;
2655 /* Return if same TC config is requested */
2656 if (adapter->num_tc == num_tc)
2657 return 0;
2658 adapter->num_tc = num_tc;
2659
129cf89e 2660 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
d5b33d02
HR
2661 if (i < num_tc) {
2662 adapter->ch_config.ch_info[i].count =
2663 mqprio_qopt->qopt.count[i];
2664 adapter->ch_config.ch_info[i].offset =
2665 mqprio_qopt->qopt.offset[i];
2666 total_qps += mqprio_qopt->qopt.count[i];
591532d6
HR
2667 max_tx_rate = mqprio_qopt->max_rate[i];
2668 /* convert to Mbps */
2669 max_tx_rate = div_u64(max_tx_rate,
129cf89e 2670 IAVF_MBPS_DIVISOR);
591532d6
HR
2671 adapter->ch_config.ch_info[i].max_tx_rate =
2672 max_tx_rate;
d5b33d02
HR
2673 } else {
2674 adapter->ch_config.ch_info[i].count = 1;
2675 adapter->ch_config.ch_info[i].offset = 0;
2676 }
2677 }
2678 adapter->ch_config.total_qps = total_qps;
2679 netif_tx_stop_all_queues(netdev);
2680 netif_tx_disable(netdev);
129cf89e 2681 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
d5b33d02
HR
2682 netdev_reset_tc(netdev);
2683 /* Report the tc mapping up the stack */
2684 netdev_set_num_tc(adapter->netdev, num_tc);
129cf89e 2685 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
d5b33d02
HR
2686 u16 qcount = mqprio_qopt->qopt.count[i];
2687 u16 qoffset = mqprio_qopt->qopt.offset[i];
2688
2689 if (i < num_tc)
2690 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2691 qoffset);
2692 }
2693 }
2694exit:
2695 return ret;
2696}
2697
0075fa0f 2698/**
129cf89e 2699 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
0075fa0f 2700 * @adapter: board private structure
f9e30088 2701 * @cls_flower: pointer to struct flow_cls_offload
0075fa0f
HR
2702 * @filter: pointer to cloud filter structure
2703 */
129cf89e 2704static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
f9e30088 2705 struct flow_cls_offload *f,
129cf89e 2706 struct iavf_cloud_filter *filter)
0075fa0f 2707{
f9e30088 2708 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
8f256622 2709 struct flow_dissector *dissector = rule->match.dissector;
0075fa0f
HR
2710 u16 n_proto_mask = 0;
2711 u16 n_proto_key = 0;
2712 u8 field_flags = 0;
2713 u16 addr_type = 0;
2714 u16 n_proto = 0;
2715 int i = 0;
deb9a9ad 2716 struct virtchnl_filter *vf = &filter->f;
0075fa0f 2717
8f256622 2718 if (dissector->used_keys &
0075fa0f
HR
2719 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2720 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2721 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2722 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2723 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2724 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2725 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2726 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2727 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
8f256622 2728 dissector->used_keys);
0075fa0f
HR
2729 return -EOPNOTSUPP;
2730 }
2731
8f256622
PNA
2732 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2733 struct flow_match_enc_keyid match;
0075fa0f 2734
8f256622
PNA
2735 flow_rule_match_enc_keyid(rule, &match);
2736 if (match.mask->keyid != 0)
129cf89e 2737 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
0075fa0f
HR
2738 }
2739
8f256622
PNA
2740 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2741 struct flow_match_basic match;
0075fa0f 2742
8f256622
PNA
2743 flow_rule_match_basic(rule, &match);
2744 n_proto_key = ntohs(match.key->n_proto);
2745 n_proto_mask = ntohs(match.mask->n_proto);
0075fa0f
HR
2746
2747 if (n_proto_key == ETH_P_ALL) {
2748 n_proto_key = 0;
2749 n_proto_mask = 0;
2750 }
2751 n_proto = n_proto_key & n_proto_mask;
2752 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2753 return -EINVAL;
2754 if (n_proto == ETH_P_IPV6) {
2755 /* specify flow type as TCP IPv6 */
deb9a9ad 2756 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
0075fa0f
HR
2757 }
2758
8f256622 2759 if (match.key->ip_proto != IPPROTO_TCP) {
0075fa0f
HR
2760 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2761 return -EINVAL;
2762 }
2763 }
2764
8f256622
PNA
2765 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2766 struct flow_match_eth_addrs match;
2767
2768 flow_rule_match_eth_addrs(rule, &match);
0075fa0f 2769
0075fa0f 2770 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8f256622
PNA
2771 if (!is_zero_ether_addr(match.mask->dst)) {
2772 if (is_broadcast_ether_addr(match.mask->dst)) {
129cf89e 2773 field_flags |= IAVF_CLOUD_FIELD_OMAC;
0075fa0f
HR
2774 } else {
2775 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
8f256622 2776 match.mask->dst);
8821b3fa 2777 return IAVF_ERR_CONFIG;
0075fa0f
HR
2778 }
2779 }
2780
8f256622
PNA
2781 if (!is_zero_ether_addr(match.mask->src)) {
2782 if (is_broadcast_ether_addr(match.mask->src)) {
129cf89e 2783 field_flags |= IAVF_CLOUD_FIELD_IMAC;
0075fa0f
HR
2784 } else {
2785 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
8f256622 2786 match.mask->src);
8821b3fa 2787 return IAVF_ERR_CONFIG;
0075fa0f
HR
2788 }
2789 }
2790
8f256622
PNA
2791 if (!is_zero_ether_addr(match.key->dst))
2792 if (is_valid_ether_addr(match.key->dst) ||
2793 is_multicast_ether_addr(match.key->dst)) {
0075fa0f
HR
2794 /* set the mask if a valid dst_mac address */
2795 for (i = 0; i < ETH_ALEN; i++)
deb9a9ad
CIK
2796 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2797 ether_addr_copy(vf->data.tcp_spec.dst_mac,
8f256622 2798 match.key->dst);
0075fa0f
HR
2799 }
2800
8f256622
PNA
2801 if (!is_zero_ether_addr(match.key->src))
2802 if (is_valid_ether_addr(match.key->src) ||
2803 is_multicast_ether_addr(match.key->src)) {
0075fa0f
HR
2804 /* set the mask if a valid dst_mac address */
2805 for (i = 0; i < ETH_ALEN; i++)
deb9a9ad
CIK
2806 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2807 ether_addr_copy(vf->data.tcp_spec.src_mac,
8f256622 2808 match.key->src);
0075fa0f
HR
2809 }
2810 }
2811
8f256622
PNA
2812 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2813 struct flow_match_vlan match;
0075fa0f 2814
8f256622
PNA
2815 flow_rule_match_vlan(rule, &match);
2816 if (match.mask->vlan_id) {
2817 if (match.mask->vlan_id == VLAN_VID_MASK) {
129cf89e 2818 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
0075fa0f
HR
2819 } else {
2820 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
8f256622 2821 match.mask->vlan_id);
8821b3fa 2822 return IAVF_ERR_CONFIG;
0075fa0f
HR
2823 }
2824 }
deb9a9ad 2825 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
8f256622 2826 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
0075fa0f
HR
2827 }
2828
8f256622
PNA
2829 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2830 struct flow_match_control match;
0075fa0f 2831
8f256622
PNA
2832 flow_rule_match_control(rule, &match);
2833 addr_type = match.key->addr_type;
0075fa0f
HR
2834 }
2835
2836 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8f256622
PNA
2837 struct flow_match_ipv4_addrs match;
2838
2839 flow_rule_match_ipv4_addrs(rule, &match);
2840 if (match.mask->dst) {
2841 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
129cf89e 2842 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2843 } else {
2844 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
8f256622 2845 be32_to_cpu(match.mask->dst));
8821b3fa 2846 return IAVF_ERR_CONFIG;
0075fa0f
HR
2847 }
2848 }
2849
8f256622
PNA
2850 if (match.mask->src) {
2851 if (match.mask->src == cpu_to_be32(0xffffffff)) {
129cf89e 2852 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2853 } else {
2854 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
8f256622 2855 be32_to_cpu(match.mask->dst));
8821b3fa 2856 return IAVF_ERR_CONFIG;
0075fa0f
HR
2857 }
2858 }
2859
129cf89e 2860 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
0075fa0f 2861 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
8821b3fa 2862 return IAVF_ERR_CONFIG;
0075fa0f 2863 }
8f256622 2864 if (match.key->dst) {
deb9a9ad 2865 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
8f256622 2866 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
0075fa0f 2867 }
8f256622 2868 if (match.key->src) {
deb9a9ad 2869 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
8f256622 2870 vf->data.tcp_spec.src_ip[0] = match.key->src;
0075fa0f
HR
2871 }
2872 }
2873
2874 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8f256622
PNA
2875 struct flow_match_ipv6_addrs match;
2876
2877 flow_rule_match_ipv6_addrs(rule, &match);
0075fa0f
HR
2878
2879 /* validate mask, make sure it is not IPV6_ADDR_ANY */
8f256622 2880 if (ipv6_addr_any(&match.mask->dst)) {
0075fa0f
HR
2881 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2882 IPV6_ADDR_ANY);
8821b3fa 2883 return IAVF_ERR_CONFIG;
0075fa0f
HR
2884 }
2885
2886 /* src and dest IPv6 address should not be LOOPBACK
2887 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2888 */
8f256622
PNA
2889 if (ipv6_addr_loopback(&match.key->dst) ||
2890 ipv6_addr_loopback(&match.key->src)) {
0075fa0f
HR
2891 dev_err(&adapter->pdev->dev,
2892 "ipv6 addr should not be loopback\n");
8821b3fa 2893 return IAVF_ERR_CONFIG;
0075fa0f 2894 }
8f256622
PNA
2895 if (!ipv6_addr_any(&match.mask->dst) ||
2896 !ipv6_addr_any(&match.mask->src))
129cf89e 2897 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f 2898
deb9a9ad
CIK
2899 for (i = 0; i < 4; i++)
2900 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
8f256622 2901 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
deb9a9ad
CIK
2902 sizeof(vf->data.tcp_spec.dst_ip));
2903 for (i = 0; i < 4; i++)
2904 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
8f256622 2905 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
deb9a9ad 2906 sizeof(vf->data.tcp_spec.src_ip));
0075fa0f 2907 }
8f256622
PNA
2908 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2909 struct flow_match_ports match;
2910
2911 flow_rule_match_ports(rule, &match);
2912 if (match.mask->src) {
2913 if (match.mask->src == cpu_to_be16(0xffff)) {
129cf89e 2914 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2915 } else {
2916 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
8f256622 2917 be16_to_cpu(match.mask->src));
8821b3fa 2918 return IAVF_ERR_CONFIG;
0075fa0f
HR
2919 }
2920 }
2921
8f256622
PNA
2922 if (match.mask->dst) {
2923 if (match.mask->dst == cpu_to_be16(0xffff)) {
129cf89e 2924 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2925 } else {
2926 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
8f256622 2927 be16_to_cpu(match.mask->dst));
8821b3fa 2928 return IAVF_ERR_CONFIG;
0075fa0f
HR
2929 }
2930 }
8f256622 2931 if (match.key->dst) {
deb9a9ad 2932 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
8f256622 2933 vf->data.tcp_spec.dst_port = match.key->dst;
0075fa0f
HR
2934 }
2935
8f256622 2936 if (match.key->src) {
deb9a9ad 2937 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
8f256622 2938 vf->data.tcp_spec.src_port = match.key->src;
0075fa0f
HR
2939 }
2940 }
deb9a9ad 2941 vf->field_flags = field_flags;
0075fa0f
HR
2942
2943 return 0;
2944}
2945
2946/**
129cf89e 2947 * iavf_handle_tclass - Forward to a traffic class on the device
0075fa0f
HR
2948 * @adapter: board private structure
2949 * @tc: traffic class index on the device
2950 * @filter: pointer to cloud filter structure
2951 */
129cf89e
JB
2952static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2953 struct iavf_cloud_filter *filter)
0075fa0f
HR
2954{
2955 if (tc == 0)
2956 return 0;
2957 if (tc < adapter->num_tc) {
2958 if (!filter->f.data.tcp_spec.dst_port) {
2959 dev_err(&adapter->pdev->dev,
2960 "Specify destination port to redirect to traffic class other than TC0\n");
2961 return -EINVAL;
2962 }
2963 }
2964 /* redirect to a traffic class on the same device */
2965 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2966 filter->f.action_meta = tc;
2967 return 0;
2968}
2969
2970/**
129cf89e 2971 * iavf_configure_clsflower - Add tc flower filters
0075fa0f 2972 * @adapter: board private structure
f9e30088 2973 * @cls_flower: Pointer to struct flow_cls_offload
0075fa0f 2974 */
129cf89e 2975static int iavf_configure_clsflower(struct iavf_adapter *adapter,
f9e30088 2976 struct flow_cls_offload *cls_flower)
0075fa0f
HR
2977{
2978 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
129cf89e 2979 struct iavf_cloud_filter *filter = NULL;
640a8af5 2980 int err = -EINVAL, count = 50;
0075fa0f
HR
2981
2982 if (tc < 0) {
2983 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2984 return -EINVAL;
2985 }
2986
2987 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
640a8af5
AD
2988 if (!filter)
2989 return -ENOMEM;
2990
129cf89e 2991 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
640a8af5
AD
2992 &adapter->crit_section)) {
2993 if (--count == 0)
2994 goto err;
2995 udelay(1);
0075fa0f 2996 }
640a8af5 2997
0075fa0f
HR
2998 filter->cookie = cls_flower->cookie;
2999
3000 /* set the mask to all zeroes to begin with */
3001 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3002 /* start out with flow type and eth type IPv4 to begin with */
3003 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
129cf89e 3004 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
0075fa0f
HR
3005 if (err < 0)
3006 goto err;
3007
129cf89e 3008 err = iavf_handle_tclass(adapter, tc, filter);
0075fa0f
HR
3009 if (err < 0)
3010 goto err;
3011
3012 /* add filter to the list */
3013 spin_lock_bh(&adapter->cloud_filter_list_lock);
3014 list_add_tail(&filter->list, &adapter->cloud_filter_list);
3015 adapter->num_cloud_filters++;
3016 filter->add = true;
129cf89e 3017 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
0075fa0f
HR
3018 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3019err:
3020 if (err)
3021 kfree(filter);
640a8af5 3022
129cf89e 3023 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
0075fa0f
HR
3024 return err;
3025}
3026
129cf89e 3027/* iavf_find_cf - Find the cloud filter in the list
0075fa0f
HR
3028 * @adapter: Board private structure
3029 * @cookie: filter specific cookie
3030 *
3031 * Returns ptr to the filter object or NULL. Must be called while holding the
3032 * cloud_filter_list_lock.
3033 */
129cf89e
JB
3034static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3035 unsigned long *cookie)
0075fa0f 3036{
129cf89e 3037 struct iavf_cloud_filter *filter = NULL;
0075fa0f
HR
3038
3039 if (!cookie)
3040 return NULL;
3041
3042 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3043 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3044 return filter;
3045 }
3046 return NULL;
3047}
3048
3049/**
129cf89e 3050 * iavf_delete_clsflower - Remove tc flower filters
0075fa0f 3051 * @adapter: board private structure
f9e30088 3052 * @cls_flower: Pointer to struct flow_cls_offload
0075fa0f 3053 */
129cf89e 3054static int iavf_delete_clsflower(struct iavf_adapter *adapter,
f9e30088 3055 struct flow_cls_offload *cls_flower)
0075fa0f 3056{
129cf89e 3057 struct iavf_cloud_filter *filter = NULL;
0075fa0f
HR
3058 int err = 0;
3059
3060 spin_lock_bh(&adapter->cloud_filter_list_lock);
129cf89e 3061 filter = iavf_find_cf(adapter, &cls_flower->cookie);
0075fa0f
HR
3062 if (filter) {
3063 filter->del = true;
129cf89e 3064 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
0075fa0f
HR
3065 } else {
3066 err = -EINVAL;
3067 }
3068 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3069
3070 return err;
3071}
3072
3073/**
129cf89e 3074 * iavf_setup_tc_cls_flower - flower classifier offloads
0075fa0f
HR
3075 * @netdev: net device to configure
3076 * @type_data: offload data
3077 */
129cf89e 3078static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
f9e30088 3079 struct flow_cls_offload *cls_flower)
0075fa0f 3080{
0075fa0f 3081 switch (cls_flower->command) {
f9e30088 3082 case FLOW_CLS_REPLACE:
129cf89e 3083 return iavf_configure_clsflower(adapter, cls_flower);
f9e30088 3084 case FLOW_CLS_DESTROY:
129cf89e 3085 return iavf_delete_clsflower(adapter, cls_flower);
f9e30088 3086 case FLOW_CLS_STATS:
0075fa0f
HR
3087 return -EOPNOTSUPP;
3088 default:
246ab6f0 3089 return -EOPNOTSUPP;
0075fa0f
HR
3090 }
3091}
3092
3093/**
129cf89e 3094 * iavf_setup_tc_block_cb - block callback for tc
0075fa0f
HR
3095 * @type: type of offload
3096 * @type_data: offload data
3097 * @cb_priv:
3098 *
3099 * This function is the block callback for traffic classes
3100 **/
129cf89e
JB
3101static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3102 void *cb_priv)
0075fa0f 3103{
bb0858d8
JP
3104 struct iavf_adapter *adapter = cb_priv;
3105
3106 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3107 return -EOPNOTSUPP;
3108
0075fa0f
HR
3109 switch (type) {
3110 case TC_SETUP_CLSFLOWER:
129cf89e 3111 return iavf_setup_tc_cls_flower(cb_priv, type_data);
0075fa0f
HR
3112 default:
3113 return -EOPNOTSUPP;
3114 }
3115}
3116
955bcb6e
PNA
3117static LIST_HEAD(iavf_block_cb_list);
3118
d5b33d02 3119/**
129cf89e 3120 * iavf_setup_tc - configure multiple traffic classes
d5b33d02
HR
3121 * @netdev: network interface device structure
3122 * @type: type of offload
3123 * @type_date: tc offload data
3124 *
3125 * This function is the callback to ndo_setup_tc in the
3126 * netdev_ops.
3127 *
3128 * Returns 0 on success
3129 **/
129cf89e
JB
3130static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3131 void *type_data)
d5b33d02 3132{
4e95bc26
PNA
3133 struct iavf_adapter *adapter = netdev_priv(netdev);
3134
0075fa0f
HR
3135 switch (type) {
3136 case TC_SETUP_QDISC_MQPRIO:
129cf89e 3137 return __iavf_setup_tc(netdev, type_data);
0075fa0f 3138 case TC_SETUP_BLOCK:
955bcb6e
PNA
3139 return flow_block_cb_setup_simple(type_data,
3140 &iavf_block_cb_list,
4e95bc26
PNA
3141 iavf_setup_tc_block_cb,
3142 adapter, adapter, true);
0075fa0f 3143 default:
d5b33d02 3144 return -EOPNOTSUPP;
0075fa0f 3145 }
d5b33d02
HR
3146}
3147
5eae00c5 3148/**
129cf89e 3149 * iavf_open - Called when a network interface is made active
5eae00c5
GR
3150 * @netdev: network interface device structure
3151 *
3152 * Returns 0 on success, negative value on failure
3153 *
3154 * The open entry point is called when a network interface is made
3155 * active by the system (IFF_UP). At this point all resources needed
3156 * for transmit and receive operations are allocated, the interrupt
fdd4044f 3157 * handler is registered with the OS, the watchdog is started,
5eae00c5
GR
3158 * and the stack is notified that the interface is ready.
3159 **/
129cf89e 3160static int iavf_open(struct net_device *netdev)
5eae00c5 3161{
129cf89e 3162 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3163 int err;
3164
129cf89e 3165 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
ef8693eb
MW
3166 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3167 return -EIO;
3168 }
209dc4da 3169
129cf89e 3170 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
9b2aef12
JK
3171 &adapter->crit_section))
3172 usleep_range(500, 1000);
3173
129cf89e 3174 if (adapter->state != __IAVF_DOWN) {
9b2aef12
JK
3175 err = -EBUSY;
3176 goto err_unlock;
3177 }
5eae00c5
GR
3178
3179 /* allocate transmit descriptors */
129cf89e 3180 err = iavf_setup_all_tx_resources(adapter);
5eae00c5
GR
3181 if (err)
3182 goto err_setup_tx;
3183
3184 /* allocate receive descriptors */
129cf89e 3185 err = iavf_setup_all_rx_resources(adapter);
5eae00c5
GR
3186 if (err)
3187 goto err_setup_rx;
3188
3189 /* clear any pending interrupts, may auto mask */
129cf89e 3190 err = iavf_request_traffic_irqs(adapter, netdev->name);
5eae00c5
GR
3191 if (err)
3192 goto err_req_irq;
3193
8cd5fe62
PJ
3194 spin_lock_bh(&adapter->mac_vlan_list_lock);
3195
129cf89e 3196 iavf_add_filter(adapter, adapter->hw.mac.addr);
8cd5fe62
PJ
3197
3198 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3199
129cf89e 3200 iavf_configure(adapter);
5eae00c5 3201
129cf89e 3202 iavf_up_complete(adapter);
5eae00c5 3203
129cf89e 3204 iavf_irq_enable(adapter, true);
5eae00c5 3205
129cf89e 3206 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
9b2aef12 3207
5eae00c5
GR
3208 return 0;
3209
3210err_req_irq:
129cf89e
JB
3211 iavf_down(adapter);
3212 iavf_free_traffic_irqs(adapter);
5eae00c5 3213err_setup_rx:
129cf89e 3214 iavf_free_all_rx_resources(adapter);
5eae00c5 3215err_setup_tx:
129cf89e 3216 iavf_free_all_tx_resources(adapter);
9b2aef12 3217err_unlock:
129cf89e 3218 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
5eae00c5
GR
3219
3220 return err;
3221}
3222
3223/**
129cf89e 3224 * iavf_close - Disables a network interface
5eae00c5
GR
3225 * @netdev: network interface device structure
3226 *
3227 * Returns 0, this is not allowed to fail
3228 *
3229 * The close entry point is called when an interface is de-activated
3230 * by the OS. The hardware is still under the drivers control, but
3231 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3232 * are freed, along with all transmit and receive resources.
3233 **/
129cf89e 3234static int iavf_close(struct net_device *netdev)
5eae00c5 3235{
129cf89e 3236 struct iavf_adapter *adapter = netdev_priv(netdev);
fe2647ab 3237 int status;
5eae00c5 3238
129cf89e 3239 if (adapter->state <= __IAVF_DOWN_PENDING)
ef8693eb
MW
3240 return 0;
3241
129cf89e 3242 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
9b2aef12
JK
3243 &adapter->crit_section))
3244 usleep_range(500, 1000);
ef8693eb 3245
56184e01 3246 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
ed0e894d 3247 if (CLIENT_ENABLED(adapter))
129cf89e 3248 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
5eae00c5 3249
129cf89e
JB
3250 iavf_down(adapter);
3251 adapter->state = __IAVF_DOWN_PENDING;
3252 iavf_free_traffic_irqs(adapter);
5eae00c5 3253
129cf89e 3254 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
9b2aef12 3255
51f38262
MW
3256 /* We explicitly don't free resources here because the hardware is
3257 * still active and can DMA into memory. Resources are cleared in
129cf89e 3258 * iavf_virtchnl_completion() after we get confirmation from the PF
51f38262 3259 * driver that the rings have been stopped.
fe2647ab 3260 *
129cf89e
JB
3261 * Also, we wait for state to transition to __IAVF_DOWN before
3262 * returning. State change occurs in iavf_virtchnl_completion() after
fe2647ab
SM
3263 * VF resources are released (which occurs after PF driver processes and
3264 * responds to admin queue commands).
51f38262 3265 */
fe2647ab
SM
3266
3267 status = wait_event_timeout(adapter->down_waitqueue,
129cf89e 3268 adapter->state == __IAVF_DOWN,
88ec7308 3269 msecs_to_jiffies(500));
fe2647ab
SM
3270 if (!status)
3271 netdev_warn(netdev, "Device resources not yet released\n");
5eae00c5
GR
3272 return 0;
3273}
3274
5eae00c5 3275/**
129cf89e 3276 * iavf_change_mtu - Change the Maximum Transfer Unit
5eae00c5
GR
3277 * @netdev: network interface device structure
3278 * @new_mtu: new value for maximum frame size
3279 *
3280 * Returns 0 on success, negative on failure
3281 **/
129cf89e 3282static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
5eae00c5 3283{
129cf89e 3284 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 3285
5eae00c5 3286 netdev->mtu = new_mtu;
ed0e894d 3287 if (CLIENT_ENABLED(adapter)) {
129cf89e
JB
3288 iavf_notify_client_l2_params(&adapter->vsi);
3289 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
ed0e894d 3290 }
129cf89e 3291 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
fdd4044f 3292 queue_work(iavf_wq, &adapter->reset_task);
67c818a1 3293
5eae00c5
GR
3294 return 0;
3295}
3296
8774370d 3297/**
56184e01 3298 * iavf_set_features - set the netdev feature flags
8774370d
MS
3299 * @netdev: ptr to the netdev being adjusted
3300 * @features: the feature set that the stack is suggesting
3301 * Note: expects to be called while under rtnl_lock()
3302 **/
129cf89e
JB
3303static int iavf_set_features(struct net_device *netdev,
3304 netdev_features_t features)
8774370d 3305{
129cf89e 3306 struct iavf_adapter *adapter = netdev_priv(netdev);
8774370d 3307
3bd77e2a
PM
3308 /* Don't allow changing VLAN_RX flag when adapter is not capable
3309 * of VLAN offload
e0f60a81 3310 */
3bd77e2a
PM
3311 if (!VLAN_ALLOWED(adapter)) {
3312 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3313 return -EINVAL;
3314 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
e0f60a81
PJ
3315 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3316 adapter->aq_required |=
129cf89e 3317 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
e0f60a81
PJ
3318 else
3319 adapter->aq_required |=
129cf89e 3320 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
e0f60a81 3321 }
8774370d
MS
3322
3323 return 0;
3324}
3325
06fc016c 3326/**
129cf89e 3327 * iavf_features_check - Validate encapsulated packet conforms to limits
06fc016c 3328 * @skb: skb buff
f5254429 3329 * @dev: This physical port's netdev
06fc016c
AD
3330 * @features: Offload features that the stack believes apply
3331 **/
129cf89e
JB
3332static netdev_features_t iavf_features_check(struct sk_buff *skb,
3333 struct net_device *dev,
3334 netdev_features_t features)
06fc016c
AD
3335{
3336 size_t len;
3337
3338 /* No point in doing any of this if neither checksum nor GSO are
3339 * being requested for this frame. We can rule out both by just
3340 * checking for CHECKSUM_PARTIAL
3341 */
3342 if (skb->ip_summed != CHECKSUM_PARTIAL)
3343 return features;
3344
3345 /* We cannot support GSO if the MSS is going to be less than
3346 * 64 bytes. If it is then we need to drop support for GSO.
3347 */
3348 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3349 features &= ~NETIF_F_GSO_MASK;
3350
3351 /* MACLEN can support at most 63 words */
3352 len = skb_network_header(skb) - skb->data;
3353 if (len & ~(63 * 2))
3354 goto out_err;
3355
3356 /* IPLEN and EIPLEN can support at most 127 dwords */
3357 len = skb_transport_header(skb) - skb_network_header(skb);
3358 if (len & ~(127 * 4))
3359 goto out_err;
3360
3361 if (skb->encapsulation) {
3362 /* L4TUNLEN can support 127 words */
3363 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3364 if (len & ~(127 * 2))
3365 goto out_err;
3366
3367 /* IPLEN can support at most 127 dwords */
3368 len = skb_inner_transport_header(skb) -
3369 skb_inner_network_header(skb);
3370 if (len & ~(127 * 4))
3371 goto out_err;
3372 }
3373
3374 /* No need to validate L4LEN as TCP is the only protocol with a
3375 * a flexible value and we support all possible values supported
3376 * by TCP, which is at most 15 dwords
3377 */
3378
3379 return features;
3380out_err:
3381 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3382}
3383
c4445aed 3384/**
129cf89e 3385 * iavf_fix_features - fix up the netdev feature bits
c4445aed
MW
3386 * @netdev: our net device
3387 * @features: desired feature bits
3388 *
3389 * Returns fixed-up features bits
3390 **/
129cf89e
JB
3391static netdev_features_t iavf_fix_features(struct net_device *netdev,
3392 netdev_features_t features)
c4445aed 3393{
129cf89e 3394 struct iavf_adapter *adapter = netdev_priv(netdev);
c4445aed 3395
0a3b4f70
JK
3396 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3397 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3398 NETIF_F_HW_VLAN_CTAG_RX |
3399 NETIF_F_HW_VLAN_CTAG_FILTER);
3400
c4445aed
MW
3401 return features;
3402}
3403
129cf89e
JB
3404static const struct net_device_ops iavf_netdev_ops = {
3405 .ndo_open = iavf_open,
3406 .ndo_stop = iavf_close,
3407 .ndo_start_xmit = iavf_xmit_frame,
3408 .ndo_set_rx_mode = iavf_set_rx_mode,
5eae00c5 3409 .ndo_validate_addr = eth_validate_addr,
129cf89e
JB
3410 .ndo_set_mac_address = iavf_set_mac,
3411 .ndo_change_mtu = iavf_change_mtu,
3412 .ndo_tx_timeout = iavf_tx_timeout,
3413 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3414 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3415 .ndo_features_check = iavf_features_check,
3416 .ndo_fix_features = iavf_fix_features,
3417 .ndo_set_features = iavf_set_features,
129cf89e 3418 .ndo_setup_tc = iavf_setup_tc,
5eae00c5
GR
3419};
3420
3421/**
129cf89e 3422 * iavf_check_reset_complete - check that VF reset is complete
5eae00c5
GR
3423 * @hw: pointer to hw struct
3424 *
3425 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3426 **/
f349daa5 3427static int iavf_check_reset_complete(struct iavf_hw *hw)
5eae00c5
GR
3428{
3429 u32 rstat;
3430 int i;
3431
3432 for (i = 0; i < 100; i++) {
f1cad2ce
JB
3433 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3434 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9
JB
3435 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3436 (rstat == VIRTCHNL_VFR_COMPLETED))
5eae00c5 3437 return 0;
f98a2006 3438 usleep_range(10, 20);
5eae00c5
GR
3439 }
3440 return -EBUSY;
3441}
3442
e6d038de 3443/**
129cf89e 3444 * iavf_process_config - Process the config information we got from the PF
e6d038de
MW
3445 * @adapter: board private structure
3446 *
3447 * Verify that we have a valid config struct, and set up our netdev features
3448 * and our VSI struct.
3449 **/
129cf89e 3450int iavf_process_config(struct iavf_adapter *adapter)
e6d038de 3451{
310a2ad9 3452 struct virtchnl_vf_resource *vfres = adapter->vf_res;
5b36e8d0 3453 int i, num_req_queues = adapter->num_req_queues;
e6d038de 3454 struct net_device *netdev = adapter->netdev;
56184e01 3455 struct iavf_vsi *vsi = &adapter->vsi;
bacd75cf
PB
3456 netdev_features_t hw_enc_features;
3457 netdev_features_t hw_features;
e6d038de
MW
3458
3459 /* got VF config message back from PF, now we can parse it */
ba6cc7f6 3460 for (i = 0; i < vfres->num_vsis; i++) {
ff3f4cc2 3461 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
ba6cc7f6 3462 adapter->vsi_res = &vfres->vsi_res[i];
e6d038de
MW
3463 }
3464 if (!adapter->vsi_res) {
3465 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3466 return -ENODEV;
3467 }
3468
5b36e8d0 3469 if (num_req_queues &&
5520deb1 3470 num_req_queues > adapter->vsi_res->num_queue_pairs) {
5b36e8d0
AB
3471 /* Problem. The PF gave us fewer queues than what we had
3472 * negotiated in our request. Need a reset to see if we can't
3473 * get back to a working state.
3474 */
3475 dev_err(&adapter->pdev->dev,
3476 "Requested %d queues, but PF only gave us %d.\n",
3477 num_req_queues,
3478 adapter->vsi_res->num_queue_pairs);
129cf89e 3479 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
5b36e8d0 3480 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
129cf89e 3481 iavf_schedule_reset(adapter);
5b36e8d0
AB
3482 return -ENODEV;
3483 }
3484 adapter->num_req_queues = 0;
3485
bacd75cf
PB
3486 hw_enc_features = NETIF_F_SG |
3487 NETIF_F_IP_CSUM |
3488 NETIF_F_IPV6_CSUM |
3489 NETIF_F_HIGHDMA |
3490 NETIF_F_SOFT_FEATURES |
3491 NETIF_F_TSO |
3492 NETIF_F_TSO_ECN |
3493 NETIF_F_TSO6 |
3494 NETIF_F_SCTP_CRC |
3495 NETIF_F_RXHASH |
3496 NETIF_F_RXCSUM |
3497 0;
3498
3499 /* advertise to stack only if offloads for encapsulated packets is
3500 * supported
3501 */
fbb113f7 3502 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
bacd75cf 3503 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
b0fe3306 3504 NETIF_F_GSO_GRE |
1c7b4a23 3505 NETIF_F_GSO_GRE_CSUM |
7e13318d 3506 NETIF_F_GSO_IPXIP4 |
bf2d1df3 3507 NETIF_F_GSO_IPXIP6 |
b0fe3306 3508 NETIF_F_GSO_UDP_TUNNEL_CSUM |
1c7b4a23 3509 NETIF_F_GSO_PARTIAL |
b0fe3306
AD
3510 0;
3511
fbb113f7 3512 if (!(vfres->vf_cap_flags &
310a2ad9 3513 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
bacd75cf
PB
3514 netdev->gso_partial_features |=
3515 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b0fe3306 3516
bacd75cf
PB
3517 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3518 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3519 netdev->hw_enc_features |= hw_enc_features;
3520 }
b0fe3306 3521 /* record features VLANs can make use of */
bacd75cf 3522 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
b0fe3306
AD
3523
3524 /* Write features and hw_features separately to avoid polluting
bacd75cf 3525 * with, or dropping, features that are set when we registered.
b0fe3306 3526 */
bacd75cf 3527 hw_features = hw_enc_features;
b0fe3306 3528
0a3b4f70
JK
3529 /* Enable VLAN features if supported */
3530 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3531 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3532 NETIF_F_HW_VLAN_CTAG_RX);
0075fa0f
HR
3533 /* Enable cloud filter if ADQ is supported */
3534 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3535 hw_features |= NETIF_F_HW_TC;
0a3b4f70 3536
bacd75cf 3537 netdev->hw_features |= hw_features;
b0fe3306 3538
0a3b4f70
JK
3539 netdev->features |= hw_features;
3540
3541 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3542 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
e6d038de 3543
e65aae08
LY
3544 netdev->priv_flags |= IFF_UNICAST_FLT;
3545
e4062894
PJ
3546 /* Do not turn on offloads when they are requested to be turned off.
3547 * TSO needs minimum 576 bytes to work correctly.
3548 */
3549 if (netdev->wanted_features) {
3550 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3551 netdev->mtu < 576)
3552 netdev->features &= ~NETIF_F_TSO;
3553 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3554 netdev->mtu < 576)
3555 netdev->features &= ~NETIF_F_TSO6;
3556 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3557 netdev->features &= ~NETIF_F_TSO_ECN;
3558 if (!(netdev->wanted_features & NETIF_F_GRO))
3559 netdev->features &= ~NETIF_F_GRO;
3560 if (!(netdev->wanted_features & NETIF_F_GSO))
3561 netdev->features &= ~NETIF_F_GSO;
3562 }
3563
e6d038de
MW
3564 adapter->vsi.id = adapter->vsi_res->vsi_id;
3565
3566 adapter->vsi.back = adapter;
3567 adapter->vsi.base_vector = 1;
56184e01 3568 adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
43a3d9ba
MW
3569 vsi->netdev = adapter->netdev;
3570 vsi->qs_handle = adapter->vsi_res->qset_handle;
fbb113f7 3571 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
43a3d9ba
MW
3572 adapter->rss_key_size = vfres->rss_key_size;
3573 adapter->rss_lut_size = vfres->rss_lut_size;
3574 } else {
129cf89e
JB
3575 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3576 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
43a3d9ba
MW
3577 }
3578
e6d038de
MW
3579 return 0;
3580}
3581
5eae00c5 3582/**
129cf89e 3583 * iavf_init_task - worker thread to perform delayed initialization
5eae00c5
GR
3584 * @work: pointer to work_struct containing our data
3585 *
3586 * This task completes the work that was begun in probe. Due to the nature
3587 * of VF-PF communications, we may need to wait tens of milliseconds to get
dbedd44e 3588 * responses back from the PF. Rather than busy-wait in probe and bog down the
5eae00c5
GR
3589 * whole system, we'll do it in a task so we can sleep.
3590 * This task only runs during driver init. Once we've established
3591 * communications with the PF driver and set up our netdev, the watchdog
3592 * takes over.
3593 **/
129cf89e 3594static void iavf_init_task(struct work_struct *work)
5eae00c5 3595{
129cf89e 3596 struct iavf_adapter *adapter = container_of(work,
b66c7bc1
JP
3597 struct iavf_adapter,
3598 init_task.work);
f349daa5 3599 struct iavf_hw *hw = &adapter->hw;
5eae00c5
GR
3600
3601 switch (adapter->state) {
129cf89e 3602 case __IAVF_STARTUP:
b66c7bc1
JP
3603 if (iavf_startup(adapter) < 0)
3604 goto init_failed;
3605 break;
129cf89e 3606 case __IAVF_INIT_VERSION_CHECK:
b66c7bc1
JP
3607 if (iavf_init_version_check(adapter) < 0)
3608 goto init_failed;
5eae00c5 3609 break;
b66c7bc1
JP
3610 case __IAVF_INIT_GET_RESOURCES:
3611 if (iavf_init_get_resources(adapter) < 0)
3612 goto init_failed;
3613 return;
5eae00c5 3614 default:
b66c7bc1 3615 goto init_failed;
ef8693eb 3616 }
5eae00c5 3617
fdd4044f
JP
3618 queue_delayed_work(iavf_wq, &adapter->init_task,
3619 msecs_to_jiffies(30));
5eae00c5 3620 return;
b66c7bc1 3621init_failed:
129cf89e 3622 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
b66c7bc1
JP
3623 dev_err(&adapter->pdev->dev,
3624 "Failed to communicate with PF; waiting before retry\n");
129cf89e
JB
3625 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3626 iavf_shutdown_adminq(hw);
3627 adapter->state = __IAVF_STARTUP;
fdd4044f 3628 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
b9029e94 3629 return;
5eae00c5 3630 }
fdd4044f 3631 queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
5eae00c5
GR
3632}
3633
3634/**
129cf89e 3635 * iavf_shutdown - Shutdown the device in preparation for a reboot
5eae00c5
GR
3636 * @pdev: pci device structure
3637 **/
129cf89e 3638static void iavf_shutdown(struct pci_dev *pdev)
5eae00c5
GR
3639{
3640 struct net_device *netdev = pci_get_drvdata(pdev);
129cf89e 3641 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3642
3643 netif_device_detach(netdev);
3644
3645 if (netif_running(netdev))
129cf89e 3646 iavf_close(netdev);
5eae00c5 3647
00293fdc 3648 /* Prevent the watchdog from running. */
129cf89e 3649 adapter->state = __IAVF_REMOVE;
00293fdc 3650 adapter->aq_required = 0;
00293fdc 3651
5eae00c5
GR
3652#ifdef CONFIG_PM
3653 pci_save_state(pdev);
3654
3655#endif
3656 pci_disable_device(pdev);
3657}
3658
3659/**
129cf89e 3660 * iavf_probe - Device Initialization Routine
5eae00c5 3661 * @pdev: PCI device information struct
129cf89e 3662 * @ent: entry in iavf_pci_tbl
5eae00c5
GR
3663 *
3664 * Returns 0 on success, negative on failure
3665 *
129cf89e 3666 * iavf_probe initializes an adapter identified by a pci_dev structure.
5eae00c5
GR
3667 * The OS initialization, configuring of the adapter private structure,
3668 * and a hardware reset occur.
3669 **/
129cf89e 3670static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5eae00c5
GR
3671{
3672 struct net_device *netdev;
129cf89e 3673 struct iavf_adapter *adapter = NULL;
f349daa5 3674 struct iavf_hw *hw = NULL;
dbbd8111 3675 int err;
5eae00c5
GR
3676
3677 err = pci_enable_device(pdev);
3678 if (err)
3679 return err;
3680
6494294f 3681 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 3682 if (err) {
e3e3bfdd
JS
3683 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3684 if (err) {
3685 dev_err(&pdev->dev,
3686 "DMA configuration failed: 0x%x\n", err);
3687 goto err_dma;
3688 }
5eae00c5
GR
3689 }
3690
129cf89e 3691 err = pci_request_regions(pdev, iavf_driver_name);
5eae00c5
GR
3692 if (err) {
3693 dev_err(&pdev->dev,
3694 "pci_request_regions failed 0x%x\n", err);
3695 goto err_pci_reg;
3696 }
3697
3698 pci_enable_pcie_error_reporting(pdev);
3699
3700 pci_set_master(pdev);
3701
129cf89e
JB
3702 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3703 IAVF_MAX_REQ_QUEUES);
5eae00c5
GR
3704 if (!netdev) {
3705 err = -ENOMEM;
3706 goto err_alloc_etherdev;
3707 }
3708
3709 SET_NETDEV_DEV(netdev, &pdev->dev);
3710
3711 pci_set_drvdata(pdev, netdev);
3712 adapter = netdev_priv(netdev);
5eae00c5
GR
3713
3714 adapter->netdev = netdev;
3715 adapter->pdev = pdev;
3716
3717 hw = &adapter->hw;
3718 hw->back = adapter;
3719
41a1d04b 3720 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
129cf89e 3721 adapter->state = __IAVF_STARTUP;
5eae00c5
GR
3722
3723 /* Call save state here because it relies on the adapter struct. */
3724 pci_save_state(pdev);
3725
3726 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3727 pci_resource_len(pdev, 0));
3728 if (!hw->hw_addr) {
3729 err = -EIO;
3730 goto err_ioremap;
3731 }
3732 hw->vendor_id = pdev->vendor;
3733 hw->device_id = pdev->device;
3734 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3735 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3736 hw->subsystem_device_id = pdev->subsystem_device;
3737 hw->bus.device = PCI_SLOT(pdev->devfn);
3738 hw->bus.func = PCI_FUNC(pdev->devfn);
b3f028fc 3739 hw->bus.bus_id = pdev->bus->number;
5eae00c5 3740
8ddb3326
JB
3741 /* set up the locks for the AQ, do this only once in probe
3742 * and destroy them only once in remove
3743 */
3744 mutex_init(&hw->aq.asq_mutex);
3745 mutex_init(&hw->aq.arq_mutex);
3746
504398f0 3747 spin_lock_init(&adapter->mac_vlan_list_lock);
0075fa0f 3748 spin_lock_init(&adapter->cloud_filter_list_lock);
504398f0 3749
8bb1a540
SK
3750 INIT_LIST_HEAD(&adapter->mac_filter_list);
3751 INIT_LIST_HEAD(&adapter->vlan_filter_list);
0075fa0f 3752 INIT_LIST_HEAD(&adapter->cloud_filter_list);
8bb1a540 3753
129cf89e
JB
3754 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3755 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
fdd4044f 3756 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
129cf89e
JB
3757 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3758 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
fdd4044f
JP
3759 queue_delayed_work(iavf_wq, &adapter->init_task,
3760 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
5eae00c5 3761
fe2647ab
SM
3762 /* Setup the wait queue for indicating transition to down status */
3763 init_waitqueue_head(&adapter->down_waitqueue);
3764
5eae00c5
GR
3765 return 0;
3766
3767err_ioremap:
3768 free_netdev(netdev);
3769err_alloc_etherdev:
3770 pci_release_regions(pdev);
3771err_pci_reg:
3772err_dma:
3773 pci_disable_device(pdev);
3774 return err;
3775}
3776
3777#ifdef CONFIG_PM
3778/**
129cf89e 3779 * iavf_suspend - Power management suspend routine
5eae00c5
GR
3780 * @pdev: PCI device information struct
3781 * @state: unused
3782 *
3783 * Called when the system (VM) is entering sleep/suspend.
3784 **/
129cf89e 3785static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
5eae00c5
GR
3786{
3787 struct net_device *netdev = pci_get_drvdata(pdev);
129cf89e 3788 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3789 int retval = 0;
3790
3791 netif_device_detach(netdev);
3792
129cf89e 3793 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
9b2aef12
JK
3794 &adapter->crit_section))
3795 usleep_range(500, 1000);
3796
5eae00c5
GR
3797 if (netif_running(netdev)) {
3798 rtnl_lock();
129cf89e 3799 iavf_down(adapter);
5eae00c5
GR
3800 rtnl_unlock();
3801 }
129cf89e
JB
3802 iavf_free_misc_irq(adapter);
3803 iavf_reset_interrupt_capability(adapter);
5eae00c5 3804
129cf89e 3805 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
9b2aef12 3806
5eae00c5
GR
3807 retval = pci_save_state(pdev);
3808 if (retval)
3809 return retval;
3810
3811 pci_disable_device(pdev);
3812
3813 return 0;
3814}
3815
3816/**
129cf89e 3817 * iavf_resume - Power management resume routine
5eae00c5
GR
3818 * @pdev: PCI device information struct
3819 *
3820 * Called when the system (VM) is resumed from sleep/suspend.
3821 **/
129cf89e 3822static int iavf_resume(struct pci_dev *pdev)
5eae00c5 3823{
129cf89e 3824 struct iavf_adapter *adapter = pci_get_drvdata(pdev);
5eae00c5
GR
3825 struct net_device *netdev = adapter->netdev;
3826 u32 err;
3827
3828 pci_set_power_state(pdev, PCI_D0);
3829 pci_restore_state(pdev);
3830 /* pci_restore_state clears dev->state_saved so call
3831 * pci_save_state to restore it.
3832 */
3833 pci_save_state(pdev);
3834
3835 err = pci_enable_device_mem(pdev);
3836 if (err) {
3837 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3838 return err;
3839 }
3840 pci_set_master(pdev);
3841
3842 rtnl_lock();
129cf89e 3843 err = iavf_set_interrupt_capability(adapter);
5eae00c5 3844 if (err) {
f2a1c368 3845 rtnl_unlock();
5eae00c5
GR
3846 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3847 return err;
3848 }
129cf89e 3849 err = iavf_request_misc_irq(adapter);
5eae00c5
GR
3850 rtnl_unlock();
3851 if (err) {
3852 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3853 return err;
3854 }
3855
fdd4044f 3856 queue_work(iavf_wq, &adapter->reset_task);
5eae00c5
GR
3857
3858 netif_device_attach(netdev);
3859
3860 return err;
3861}
3862
3863#endif /* CONFIG_PM */
3864/**
129cf89e 3865 * iavf_remove - Device Removal Routine
5eae00c5
GR
3866 * @pdev: PCI device information struct
3867 *
129cf89e 3868 * iavf_remove is called by the PCI subsystem to alert the driver
5eae00c5
GR
3869 * that it should release a PCI device. The could be caused by a
3870 * Hot-Plug event, or because the driver is going to be removed from
3871 * memory.
3872 **/
129cf89e 3873static void iavf_remove(struct pci_dev *pdev)
5eae00c5
GR
3874{
3875 struct net_device *netdev = pci_get_drvdata(pdev);
129cf89e
JB
3876 struct iavf_adapter *adapter = netdev_priv(netdev);
3877 struct iavf_vlan_filter *vlf, *vlftmp;
3878 struct iavf_mac_filter *f, *ftmp;
3879 struct iavf_cloud_filter *cf, *cftmp;
f349daa5 3880 struct iavf_hw *hw = &adapter->hw;
ed0e894d 3881 int err;
06aa040f 3882 /* Indicate we are in remove and not to run reset_task */
129cf89e 3883 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
5eae00c5 3884 cancel_delayed_work_sync(&adapter->init_task);
ef8693eb 3885 cancel_work_sync(&adapter->reset_task);
ed0e894d 3886 cancel_delayed_work_sync(&adapter->client_task);
5eae00c5
GR
3887 if (adapter->netdev_registered) {
3888 unregister_netdev(netdev);
3889 adapter->netdev_registered = false;
3890 }
ed0e894d 3891 if (CLIENT_ALLOWED(adapter)) {
129cf89e 3892 err = iavf_lan_del_device(adapter);
ed0e894d
MW
3893 if (err)
3894 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3895 err);
3896 }
53d0b3ae 3897
f4a71881 3898 /* Shut down all the garbage mashers on the detention level */
129cf89e 3899 adapter->state = __IAVF_REMOVE;
f4a71881 3900 adapter->aq_required = 0;
129cf89e
JB
3901 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3902 iavf_request_reset(adapter);
22ead37f 3903 msleep(50);
f4a71881 3904 /* If the FW isn't responding, kick it once, but only once. */
129cf89e
JB
3905 if (!iavf_asq_done(hw)) {
3906 iavf_request_reset(adapter);
22ead37f 3907 msleep(50);
f4a71881 3908 }
129cf89e
JB
3909 iavf_free_all_tx_resources(adapter);
3910 iavf_free_all_rx_resources(adapter);
3911 iavf_misc_irq_disable(adapter);
3912 iavf_free_misc_irq(adapter);
3913 iavf_reset_interrupt_capability(adapter);
3914 iavf_free_q_vectors(adapter);
5eae00c5 3915
fdd4044f 3916 cancel_delayed_work_sync(&adapter->watchdog_task);
e5d17c3e 3917
babbcc60
LY
3918 cancel_work_sync(&adapter->adminq_task);
3919
129cf89e 3920 iavf_free_rss(adapter);
66f9af85 3921
5eae00c5 3922 if (hw->aq.asq.count)
129cf89e 3923 iavf_shutdown_adminq(hw);
5eae00c5 3924
8ddb3326
JB
3925 /* destroy the locks only once, here */
3926 mutex_destroy(&hw->aq.arq_mutex);
3927 mutex_destroy(&hw->aq.asq_mutex);
3928
5eae00c5
GR
3929 iounmap(hw->hw_addr);
3930 pci_release_regions(pdev);
129cf89e
JB
3931 iavf_free_all_tx_resources(adapter);
3932 iavf_free_all_rx_resources(adapter);
3933 iavf_free_queues(adapter);
5eae00c5 3934 kfree(adapter->vf_res);
504398f0 3935 spin_lock_bh(&adapter->mac_vlan_list_lock);
6ba36a24
MW
3936 /* If we got removed before an up/down sequence, we've got a filter
3937 * hanging out there that we need to get rid of.
3938 */
3939 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3940 list_del(&f->list);
3941 kfree(f);
3942 }
fbd5eb54
HR
3943 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3944 list) {
3945 list_del(&vlf->list);
3946 kfree(vlf);
37dfdf37 3947 }
5eae00c5 3948
504398f0
JK
3949 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3950
0075fa0f
HR
3951 spin_lock_bh(&adapter->cloud_filter_list_lock);
3952 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3953 list_del(&cf->list);
3954 kfree(cf);
3955 }
3956 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3957
5eae00c5
GR
3958 free_netdev(netdev);
3959
3960 pci_disable_pcie_error_reporting(pdev);
3961
3962 pci_disable_device(pdev);
3963}
3964
129cf89e
JB
3965static struct pci_driver iavf_driver = {
3966 .name = iavf_driver_name,
3967 .id_table = iavf_pci_tbl,
3968 .probe = iavf_probe,
3969 .remove = iavf_remove,
5eae00c5 3970#ifdef CONFIG_PM
129cf89e
JB
3971 .suspend = iavf_suspend,
3972 .resume = iavf_resume,
5eae00c5 3973#endif
129cf89e 3974 .shutdown = iavf_shutdown,
5eae00c5
GR
3975};
3976
3977/**
56184e01 3978 * iavf_init_module - Driver Registration Routine
5eae00c5 3979 *
56184e01 3980 * iavf_init_module is the first routine called when the driver is
5eae00c5
GR
3981 * loaded. All it does is register with the PCI subsystem.
3982 **/
129cf89e 3983static int __init iavf_init_module(void)
5eae00c5
GR
3984{
3985 int ret;
75a64435 3986
129cf89e
JB
3987 pr_info("iavf: %s - version %s\n", iavf_driver_string,
3988 iavf_driver_version);
5eae00c5 3989
129cf89e 3990 pr_info("%s\n", iavf_copyright);
5eae00c5 3991
129cf89e
JB
3992 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3993 iavf_driver_name);
3994 if (!iavf_wq) {
3995 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
2803b16c
JB
3996 return -ENOMEM;
3997 }
129cf89e 3998 ret = pci_register_driver(&iavf_driver);
5eae00c5
GR
3999 return ret;
4000}
4001
129cf89e 4002module_init(iavf_init_module);
5eae00c5
GR
4003
4004/**
56184e01 4005 * iavf_exit_module - Driver Exit Cleanup Routine
5eae00c5 4006 *
56184e01 4007 * iavf_exit_module is called just before the driver is removed
5eae00c5
GR
4008 * from memory.
4009 **/
129cf89e 4010static void __exit iavf_exit_module(void)
5eae00c5 4011{
129cf89e
JB
4012 pci_unregister_driver(&iavf_driver);
4013 destroy_workqueue(iavf_wq);
5eae00c5
GR
4014}
4015
129cf89e 4016module_exit(iavf_exit_module);
5eae00c5 4017
129cf89e 4018/* iavf_main.c */