]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/intel/iavf/iavf_main.c
iavf: move i40evf files to new name
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
CommitLineData
ae06c70b 1// SPDX-License-Identifier: GPL-2.0
51dce24b 2/* Copyright(c) 2013 - 2018 Intel Corporation. */
5eae00c5 3
5ec8b7d1 4#include "iavf.h"
5eae00c5 5#include "i40e_prototype.h"
5ec8b7d1 6#include "iavf_client.h"
129cf89e 7/* All iavf tracepoints are defined by the include below, which must
ed0980c4
SP
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11#define CREATE_TRACE_POINTS
12#include "i40e_trace.h"
13
129cf89e
JB
14static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16static int iavf_close(struct net_device *netdev);
5eae00c5 17
129cf89e
JB
18char iavf_driver_name[] = "iavf";
19static const char iavf_driver_string[] =
8062b226 20 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
5eae00c5 21
69ebe955
MW
22#define DRV_KERN "-k"
23
abf709a1 24#define DRV_VERSION_MAJOR 3
f2fc31ef 25#define DRV_VERSION_MINOR 2
8062b226 26#define DRV_VERSION_BUILD 3
69ebe955
MW
27#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
28 __stringify(DRV_VERSION_MINOR) "." \
29 __stringify(DRV_VERSION_BUILD) \
30 DRV_KERN
129cf89e
JB
31const char iavf_driver_version[] = DRV_VERSION;
32static const char iavf_copyright[] =
8062b226 33 "Copyright (c) 2013 - 2018 Intel Corporation.";
5eae00c5 34
129cf89e 35/* iavf_pci_tbl - PCI Device ID Table
5eae00c5
GR
36 *
37 * Wildcard entries (PCI_ANY_ID) should come last
38 * Last entry must be all 0s
39 *
40 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
41 * Class, Class Mask, private data (not used) }
42 */
129cf89e 43static const struct pci_device_id iavf_pci_tbl[] = {
ab60085e 44 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
92871412 45 {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
87e6c1d7 46 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
abf709a1 47 {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
5eae00c5
GR
48 /* required last entry */
49 {0, }
50};
51
129cf89e 52MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
5eae00c5 53
8062b226 54MODULE_ALIAS("i40evf");
5eae00c5
GR
55MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
56MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_VERSION);
59
129cf89e 60static struct workqueue_struct *iavf_wq;
2803b16c 61
5eae00c5 62/**
129cf89e 63 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
5eae00c5
GR
64 * @hw: pointer to the HW structure
65 * @mem: ptr to mem struct to fill out
66 * @size: size of memory requested
67 * @alignment: what to align the allocation to
68 **/
0b6591e6 69iavf_status iavf_allocate_dma_mem_d(struct i40e_hw *hw,
129cf89e
JB
70 struct i40e_dma_mem *mem,
71 u64 size, u32 alignment)
5eae00c5 72{
129cf89e 73 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
5eae00c5
GR
74
75 if (!mem)
76 return I40E_ERR_PARAM;
77
78 mem->size = ALIGN(size, alignment);
79 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
80 (dma_addr_t *)&mem->pa, GFP_KERNEL);
81 if (mem->va)
82 return 0;
83 else
84 return I40E_ERR_NO_MEMORY;
85}
86
87/**
129cf89e 88 * iavf_free_dma_mem_d - OS specific memory free for shared code
5eae00c5
GR
89 * @hw: pointer to the HW structure
90 * @mem: ptr to mem struct to free
91 **/
0b6591e6 92iavf_status iavf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
5eae00c5 93{
129cf89e 94 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
5eae00c5
GR
95
96 if (!mem || !mem->va)
97 return I40E_ERR_PARAM;
98 dma_free_coherent(&adapter->pdev->dev, mem->size,
99 mem->va, (dma_addr_t)mem->pa);
100 return 0;
101}
102
103/**
129cf89e 104 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
5eae00c5
GR
105 * @hw: pointer to the HW structure
106 * @mem: ptr to mem struct to fill out
107 * @size: size of memory requested
108 **/
0b6591e6 109iavf_status iavf_allocate_virt_mem_d(struct i40e_hw *hw,
129cf89e 110 struct i40e_virt_mem *mem, u32 size)
5eae00c5
GR
111{
112 if (!mem)
113 return I40E_ERR_PARAM;
114
115 mem->size = size;
116 mem->va = kzalloc(size, GFP_KERNEL);
117
118 if (mem->va)
119 return 0;
120 else
121 return I40E_ERR_NO_MEMORY;
122}
123
124/**
129cf89e 125 * iavf_free_virt_mem_d - OS specific memory free for shared code
5eae00c5
GR
126 * @hw: pointer to the HW structure
127 * @mem: ptr to mem struct to free
128 **/
0b6591e6 129iavf_status iavf_free_virt_mem_d(struct i40e_hw *hw,
129cf89e 130 struct i40e_virt_mem *mem)
5eae00c5
GR
131{
132 if (!mem)
133 return I40E_ERR_PARAM;
134
135 /* it's ok to kfree a NULL pointer */
136 kfree(mem->va);
137
138 return 0;
139}
140
141/**
129cf89e 142 * iavf_debug_d - OS dependent version of debug printing
5eae00c5
GR
143 * @hw: pointer to the HW structure
144 * @mask: debug level mask
145 * @fmt_str: printf-type format description
146 **/
129cf89e 147void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
5eae00c5
GR
148{
149 char buf[512];
150 va_list argptr;
151
152 if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
153 return;
154
155 va_start(argptr, fmt_str);
156 vsnprintf(buf, sizeof(buf), fmt_str, argptr);
157 va_end(argptr);
158
159 /* the debug string is already formatted with a newline */
160 pr_info("%s", buf);
161}
162
00e5ec4b 163/**
129cf89e 164 * iavf_schedule_reset - Set the flags and schedule a reset event
00e5ec4b
MW
165 * @adapter: board private structure
166 **/
129cf89e 167void iavf_schedule_reset(struct iavf_adapter *adapter)
00e5ec4b
MW
168{
169 if (!(adapter->flags &
129cf89e
JB
170 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
171 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
00e5ec4b
MW
172 schedule_work(&adapter->reset_task);
173 }
174}
175
5eae00c5 176/**
129cf89e 177 * iavf_tx_timeout - Respond to a Tx Hang
5eae00c5
GR
178 * @netdev: network interface device structure
179 **/
129cf89e 180static void iavf_tx_timeout(struct net_device *netdev)
5eae00c5 181{
129cf89e 182 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
183
184 adapter->tx_timeout_count++;
129cf89e 185 iavf_schedule_reset(adapter);
5eae00c5
GR
186}
187
188/**
129cf89e 189 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
5eae00c5
GR
190 * @adapter: board private structure
191 **/
129cf89e 192static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
5eae00c5
GR
193{
194 struct i40e_hw *hw = &adapter->hw;
75a64435 195
ef4603e8
JK
196 if (!adapter->msix_entries)
197 return;
198
5eae00c5
GR
199 wr32(hw, I40E_VFINT_DYN_CTL01, 0);
200
201 /* read flush */
202 rd32(hw, I40E_VFGEN_RSTAT);
203
204 synchronize_irq(adapter->msix_entries[0].vector);
205}
206
207/**
129cf89e 208 * iavf_misc_irq_enable - Enable default interrupt generation settings
5eae00c5
GR
209 * @adapter: board private structure
210 **/
129cf89e 211static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
5eae00c5
GR
212{
213 struct i40e_hw *hw = &adapter->hw;
75a64435 214
5eae00c5
GR
215 wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
216 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
b1f3366b 217 wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
5eae00c5
GR
218
219 /* read flush */
220 rd32(hw, I40E_VFGEN_RSTAT);
221}
222
223/**
129cf89e 224 * iavf_irq_disable - Mask off interrupt generation on the NIC
5eae00c5
GR
225 * @adapter: board private structure
226 **/
129cf89e 227static void iavf_irq_disable(struct iavf_adapter *adapter)
5eae00c5
GR
228{
229 int i;
230 struct i40e_hw *hw = &adapter->hw;
231
dbb01c8a
MW
232 if (!adapter->msix_entries)
233 return;
234
5eae00c5
GR
235 for (i = 1; i < adapter->num_msix_vectors; i++) {
236 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
237 synchronize_irq(adapter->msix_entries[i].vector);
238 }
239 /* read flush */
240 rd32(hw, I40E_VFGEN_RSTAT);
5eae00c5
GR
241}
242
243/**
129cf89e 244 * iavf_irq_enable_queues - Enable interrupt for specified queues
5eae00c5
GR
245 * @adapter: board private structure
246 * @mask: bitmap of queues to enable
247 **/
129cf89e 248void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
5eae00c5
GR
249{
250 struct i40e_hw *hw = &adapter->hw;
251 int i;
252
253 for (i = 1; i < adapter->num_msix_vectors; i++) {
41a1d04b 254 if (mask & BIT(i - 1)) {
5eae00c5
GR
255 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
256 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
498860cf 257 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
5eae00c5
GR
258 }
259 }
260}
261
5eae00c5 262/**
129cf89e 263 * iavf_irq_enable - Enable default interrupt generation settings
5eae00c5 264 * @adapter: board private structure
69c1d70a 265 * @flush: boolean value whether to run rd32()
5eae00c5 266 **/
129cf89e 267void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
5eae00c5
GR
268{
269 struct i40e_hw *hw = &adapter->hw;
270
129cf89e
JB
271 iavf_misc_irq_enable(adapter);
272 iavf_irq_enable_queues(adapter, ~0);
5eae00c5
GR
273
274 if (flush)
275 rd32(hw, I40E_VFGEN_RSTAT);
276}
277
278/**
129cf89e 279 * iavf_msix_aq - Interrupt handler for vector 0
5eae00c5
GR
280 * @irq: interrupt number
281 * @data: pointer to netdev
282 **/
129cf89e 283static irqreturn_t iavf_msix_aq(int irq, void *data)
5eae00c5
GR
284{
285 struct net_device *netdev = data;
129cf89e 286 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 287 struct i40e_hw *hw = &adapter->hw;
5eae00c5 288
cfbe4dba 289 /* handle non-queue interrupts, these reads clear the registers */
498860cf
AD
290 rd32(hw, I40E_VFINT_ICR01);
291 rd32(hw, I40E_VFINT_ICR0_ENA1);
5eae00c5 292
5eae00c5
GR
293 /* schedule work on the private workqueue */
294 schedule_work(&adapter->adminq_task);
295
296 return IRQ_HANDLED;
297}
298
299/**
129cf89e 300 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
5eae00c5
GR
301 * @irq: interrupt number
302 * @data: pointer to a q_vector
303 **/
129cf89e 304static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
5eae00c5
GR
305{
306 struct i40e_q_vector *q_vector = data;
307
308 if (!q_vector->tx.ring && !q_vector->rx.ring)
309 return IRQ_HANDLED;
310
5d3465a1 311 napi_schedule_irqoff(&q_vector->napi);
5eae00c5
GR
312
313 return IRQ_HANDLED;
314}
315
316/**
129cf89e 317 * iavf_map_vector_to_rxq - associate irqs with rx queues
5eae00c5
GR
318 * @adapter: board private structure
319 * @v_idx: interrupt number
320 * @r_idx: queue number
321 **/
322static void
129cf89e 323iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
5eae00c5 324{
7d96ba1a 325 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
0dd438d8 326 struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
f19a973f 327 struct i40e_hw *hw = &adapter->hw;
5eae00c5
GR
328
329 rx_ring->q_vector = q_vector;
330 rx_ring->next = q_vector->rx.ring;
331 rx_ring->vsi = &adapter->vsi;
332 q_vector->rx.ring = rx_ring;
333 q_vector->rx.count++;
a0073a4b 334 q_vector->rx.next_update = jiffies + 1;
556fdfd6 335 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
f19a973f 336 q_vector->ring_mask |= BIT(r_idx);
8b99b117 337 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
556fdfd6
AD
338 q_vector->rx.current_itr);
339 q_vector->rx.current_itr = q_vector->rx.target_itr;
5eae00c5
GR
340}
341
342/**
129cf89e 343 * iavf_map_vector_to_txq - associate irqs with tx queues
5eae00c5
GR
344 * @adapter: board private structure
345 * @v_idx: interrupt number
346 * @t_idx: queue number
347 **/
348static void
129cf89e 349iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
5eae00c5 350{
7d96ba1a 351 struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
0dd438d8 352 struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
f19a973f 353 struct i40e_hw *hw = &adapter->hw;
5eae00c5
GR
354
355 tx_ring->q_vector = q_vector;
356 tx_ring->next = q_vector->tx.ring;
357 tx_ring->vsi = &adapter->vsi;
358 q_vector->tx.ring = tx_ring;
359 q_vector->tx.count++;
a0073a4b 360 q_vector->tx.next_update = jiffies + 1;
556fdfd6 361 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
5eae00c5 362 q_vector->num_ringpairs++;
8b99b117 363 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
556fdfd6
AD
364 q_vector->tx.target_itr);
365 q_vector->tx.current_itr = q_vector->tx.target_itr;
5eae00c5
GR
366}
367
368/**
129cf89e 369 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
5eae00c5
GR
370 * @adapter: board private structure to initialize
371 *
372 * This function maps descriptor rings to the queue-specific vectors
373 * we were allotted through the MSI-X enabling code. Ideally, we'd have
374 * one vector per ring/queue, but on a constrained vector budget, we
375 * group the rings as "efficiently" as possible. You would add new
376 * mapping configurations in here.
377 **/
129cf89e 378static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
5eae00c5 379{
c97fc9b6
AB
380 int rings_remaining = adapter->num_active_queues;
381 int ridx = 0, vidx = 0;
5eae00c5 382 int q_vectors;
5eae00c5
GR
383
384 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
385
c97fc9b6 386 for (; ridx < rings_remaining; ridx++) {
129cf89e
JB
387 iavf_map_vector_to_rxq(adapter, vidx, ridx);
388 iavf_map_vector_to_txq(adapter, vidx, ridx);
5eae00c5 389
c97fc9b6
AB
390 /* In the case where we have more queues than vectors, continue
391 * round-robin on vectors until all queues are mapped.
392 */
393 if (++vidx >= q_vectors)
394 vidx = 0;
5eae00c5
GR
395 }
396
129cf89e 397 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
5eae00c5
GR
398}
399
7709b4c1
AD
400#ifdef CONFIG_NET_POLL_CONTROLLER
401/**
129cf89e 402 * iavf_netpoll - A Polling 'interrupt' handler
7709b4c1
AD
403 * @netdev: network interface device structure
404 *
405 * This is used by netconsole to send skbs without having to re-enable
406 * interrupts. It's not called while the normal interrupt routine is executing.
407 **/
129cf89e 408static void iavf_netpoll(struct net_device *netdev)
7709b4c1 409{
129cf89e 410 struct iavf_adapter *adapter = netdev_priv(netdev);
7709b4c1
AD
411 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
412 int i;
413
414 /* if interface is down do nothing */
0da36b97 415 if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
7709b4c1
AD
416 return;
417
418 for (i = 0; i < q_vectors; i++)
129cf89e 419 iavf_msix_clean_rings(0, &adapter->q_vectors[i]);
7709b4c1
AD
420}
421
422#endif
96db776a 423/**
129cf89e 424 * iavf_irq_affinity_notify - Callback for affinity changes
96db776a
AB
425 * @notify: context as to what irq was changed
426 * @mask: the new affinity mask
427 *
428 * This is a callback function used by the irq_set_affinity_notifier function
429 * so that we may register to receive changes to the irq affinity masks.
430 **/
129cf89e
JB
431static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
432 const cpumask_t *mask)
96db776a
AB
433{
434 struct i40e_q_vector *q_vector =
435 container_of(notify, struct i40e_q_vector, affinity_notify);
436
7e4d01e7 437 cpumask_copy(&q_vector->affinity_mask, mask);
96db776a
AB
438}
439
440/**
129cf89e 441 * iavf_irq_affinity_release - Callback for affinity notifier release
96db776a
AB
442 * @ref: internal core kernel usage
443 *
444 * This is a callback function used by the irq_set_affinity_notifier function
445 * to inform the current notification subscriber that they will no longer
446 * receive notifications.
447 **/
129cf89e 448static void iavf_irq_affinity_release(struct kref *ref) {}
96db776a 449
5eae00c5 450/**
129cf89e 451 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
5eae00c5 452 * @adapter: board private structure
f5254429 453 * @basename: device basename
5eae00c5
GR
454 *
455 * Allocates MSI-X vectors for tx and rx handling, and requests
456 * interrupts from the kernel.
457 **/
458static int
129cf89e 459iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
5eae00c5 460{
696ac80a
JK
461 unsigned int vector, q_vectors;
462 unsigned int rx_int_idx = 0, tx_int_idx = 0;
463 int irq_num, err;
be664cbe 464 int cpu;
5eae00c5 465
129cf89e 466 iavf_irq_disable(adapter);
5eae00c5
GR
467 /* Decrement for Other and TCP Timer vectors */
468 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
469
470 for (vector = 0; vector < q_vectors; vector++) {
7d96ba1a 471 struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
0b6591e6 472
96db776a 473 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
5eae00c5
GR
474
475 if (q_vector->tx.ring && q_vector->rx.ring) {
696ac80a 476 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 477 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
5eae00c5
GR
478 tx_int_idx++;
479 } else if (q_vector->rx.ring) {
696ac80a 480 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 481 "iavf-%s-rx-%d", basename, rx_int_idx++);
5eae00c5 482 } else if (q_vector->tx.ring) {
696ac80a 483 snprintf(q_vector->name, sizeof(q_vector->name),
129cf89e 484 "iavf-%s-tx-%d", basename, tx_int_idx++);
5eae00c5
GR
485 } else {
486 /* skip this unused q_vector */
487 continue;
488 }
96db776a 489 err = request_irq(irq_num,
129cf89e 490 iavf_msix_clean_rings,
96db776a
AB
491 0,
492 q_vector->name,
493 q_vector);
5eae00c5
GR
494 if (err) {
495 dev_info(&adapter->pdev->dev,
fb43201f 496 "Request_irq failed, error: %d\n", err);
5eae00c5
GR
497 goto free_queue_irqs;
498 }
96db776a 499 /* register for affinity change notifications */
129cf89e 500 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
96db776a 501 q_vector->affinity_notify.release =
129cf89e 502 iavf_irq_affinity_release;
96db776a 503 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
be664cbe
JK
504 /* Spread the IRQ affinity hints across online CPUs. Note that
505 * get_cpu_mask returns a mask with a permanent lifetime so
506 * it's safe to use as a hint for irq_set_affinity_hint.
759dc4a7 507 */
be664cbe
JK
508 cpu = cpumask_local_spread(q_vector->v_idx, -1);
509 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
5eae00c5
GR
510 }
511
512 return 0;
513
514free_queue_irqs:
515 while (vector) {
516 vector--;
96db776a
AB
517 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
518 irq_set_affinity_notifier(irq_num, NULL);
519 irq_set_affinity_hint(irq_num, NULL);
520 free_irq(irq_num, &adapter->q_vectors[vector]);
5eae00c5
GR
521 }
522 return err;
523}
524
525/**
129cf89e 526 * iavf_request_misc_irq - Initialize MSI-X interrupts
5eae00c5
GR
527 * @adapter: board private structure
528 *
529 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
530 * vector is only for the admin queue, and stays active even when the netdev
531 * is closed.
532 **/
129cf89e 533static int iavf_request_misc_irq(struct iavf_adapter *adapter)
5eae00c5
GR
534{
535 struct net_device *netdev = adapter->netdev;
536 int err;
537
b39c1e2c 538 snprintf(adapter->misc_vector_name,
129cf89e 539 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
9a21a007 540 dev_name(&adapter->pdev->dev));
5eae00c5 541 err = request_irq(adapter->msix_entries[0].vector,
129cf89e 542 &iavf_msix_aq, 0,
e1dfee8e 543 adapter->misc_vector_name, netdev);
5eae00c5
GR
544 if (err) {
545 dev_err(&adapter->pdev->dev,
77fa28be
CS
546 "request_irq for %s failed: %d\n",
547 adapter->misc_vector_name, err);
5eae00c5
GR
548 free_irq(adapter->msix_entries[0].vector, netdev);
549 }
550 return err;
551}
552
553/**
129cf89e 554 * iavf_free_traffic_irqs - Free MSI-X interrupts
5eae00c5
GR
555 * @adapter: board private structure
556 *
557 * Frees all MSI-X vectors other than 0.
558 **/
129cf89e 559static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
5eae00c5 560{
96db776a 561 int vector, irq_num, q_vectors;
75a64435 562
47d2a5d8
AB
563 if (!adapter->msix_entries)
564 return;
565
5eae00c5
GR
566 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
567
96db776a
AB
568 for (vector = 0; vector < q_vectors; vector++) {
569 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
570 irq_set_affinity_notifier(irq_num, NULL);
571 irq_set_affinity_hint(irq_num, NULL);
572 free_irq(irq_num, &adapter->q_vectors[vector]);
5eae00c5
GR
573 }
574}
575
576/**
129cf89e 577 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
5eae00c5
GR
578 * @adapter: board private structure
579 *
580 * Frees MSI-X vector 0.
581 **/
129cf89e 582static void iavf_free_misc_irq(struct iavf_adapter *adapter)
5eae00c5
GR
583{
584 struct net_device *netdev = adapter->netdev;
585
ef4603e8
JK
586 if (!adapter->msix_entries)
587 return;
588
5eae00c5
GR
589 free_irq(adapter->msix_entries[0].vector, netdev);
590}
591
592/**
129cf89e 593 * iavf_configure_tx - Configure Transmit Unit after Reset
5eae00c5
GR
594 * @adapter: board private structure
595 *
596 * Configure the Tx unit of the MAC after a reset.
597 **/
129cf89e 598static void iavf_configure_tx(struct iavf_adapter *adapter)
5eae00c5
GR
599{
600 struct i40e_hw *hw = &adapter->hw;
601 int i;
75a64435 602
cc052927 603 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 604 adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
5eae00c5
GR
605}
606
607/**
129cf89e 608 * iavf_configure_rx - Configure Receive Unit after Reset
5eae00c5
GR
609 * @adapter: board private structure
610 *
611 * Configure the Rx unit of the MAC after a reset.
612 **/
129cf89e 613static void iavf_configure_rx(struct iavf_adapter *adapter)
5eae00c5 614{
dab86afd 615 unsigned int rx_buf_len = I40E_RXBUFFER_2048;
5eae00c5 616 struct i40e_hw *hw = &adapter->hw;
5eae00c5 617 int i;
5eae00c5 618
dab86afd
AD
619 /* Legacy Rx will always default to a 2048 buffer size. */
620#if (PAGE_SIZE < 8192)
129cf89e 621 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
3dfc3eb5
AB
622 struct net_device *netdev = adapter->netdev;
623
98efd694
AD
624 /* For jumbo frames on systems with 4K pages we have to use
625 * an order 1 page, so we might as well increase the size
626 * of our Rx buffer to make better use of the available space
627 */
628 rx_buf_len = I40E_RXBUFFER_3072;
629
dab86afd
AD
630 /* We use a 1536 buffer size for configurations with
631 * standard Ethernet mtu. On x86 this gives us enough room
632 * for shared info and 192 bytes of padding.
633 */
ca9ec088
AD
634 if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
635 (netdev->mtu <= ETH_DATA_LEN))
dab86afd
AD
636 rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
637 }
638#endif
639
cc052927 640 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 641 adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
dab86afd 642 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
ca9ec088 643
129cf89e 644 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
ca9ec088
AD
645 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
646 else
647 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
5eae00c5
GR
648 }
649}
650
651/**
129cf89e 652 * iavf_find_vlan - Search filter list for specific vlan filter
5eae00c5
GR
653 * @adapter: board private structure
654 * @vlan: vlan tag
655 *
504398f0
JK
656 * Returns ptr to the filter object or NULL. Must be called while holding the
657 * mac_vlan_list_lock.
5eae00c5
GR
658 **/
659static struct
129cf89e 660iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 661{
129cf89e 662 struct iavf_vlan_filter *f;
5eae00c5
GR
663
664 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
665 if (vlan == f->vlan)
666 return f;
667 }
668 return NULL;
669}
670
671/**
129cf89e 672 * iavf_add_vlan - Add a vlan filter to the list
5eae00c5
GR
673 * @adapter: board private structure
674 * @vlan: VLAN tag
675 *
676 * Returns ptr to the filter object or NULL when no memory available.
677 **/
678static struct
129cf89e 679iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 680{
129cf89e 681 struct iavf_vlan_filter *f = NULL;
13acb546 682
504398f0 683 spin_lock_bh(&adapter->mac_vlan_list_lock);
5eae00c5 684
129cf89e 685 f = iavf_find_vlan(adapter, vlan);
348d4994 686 if (!f) {
04a1e08c 687 f = kzalloc(sizeof(*f), GFP_KERNEL);
348d4994 688 if (!f)
13acb546 689 goto clearout;
249c8b8d 690
5eae00c5
GR
691 f->vlan = vlan;
692
693 INIT_LIST_HEAD(&f->list);
694 list_add(&f->list, &adapter->vlan_filter_list);
695 f->add = true;
129cf89e 696 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
5eae00c5
GR
697 }
698
13acb546 699clearout:
504398f0 700 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5eae00c5
GR
701 return f;
702}
703
704/**
129cf89e 705 * iavf_del_vlan - Remove a vlan filter from the list
5eae00c5
GR
706 * @adapter: board private structure
707 * @vlan: VLAN tag
708 **/
129cf89e 709static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
5eae00c5 710{
129cf89e 711 struct iavf_vlan_filter *f;
13acb546 712
504398f0 713 spin_lock_bh(&adapter->mac_vlan_list_lock);
5eae00c5 714
129cf89e 715 f = iavf_find_vlan(adapter, vlan);
5eae00c5
GR
716 if (f) {
717 f->remove = true;
129cf89e 718 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
5eae00c5 719 }
504398f0
JK
720
721 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5eae00c5
GR
722}
723
724/**
129cf89e 725 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
5eae00c5 726 * @netdev: network device struct
f5254429 727 * @proto: unused protocol data
5eae00c5
GR
728 * @vid: VLAN tag
729 **/
129cf89e
JB
730static int iavf_vlan_rx_add_vid(struct net_device *netdev,
731 __always_unused __be16 proto, u16 vid)
5eae00c5 732{
129cf89e 733 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 734
8ed995ff
MW
735 if (!VLAN_ALLOWED(adapter))
736 return -EIO;
129cf89e 737 if (iavf_add_vlan(adapter, vid) == NULL)
5eae00c5
GR
738 return -ENOMEM;
739 return 0;
740}
741
742/**
129cf89e 743 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
5eae00c5 744 * @netdev: network device struct
f5254429 745 * @proto: unused protocol data
5eae00c5
GR
746 * @vid: VLAN tag
747 **/
129cf89e
JB
748static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
749 __always_unused __be16 proto, u16 vid)
5eae00c5 750{
129cf89e 751 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 752
8ed995ff 753 if (VLAN_ALLOWED(adapter)) {
129cf89e 754 iavf_del_vlan(adapter, vid);
8ed995ff
MW
755 return 0;
756 }
757 return -EIO;
5eae00c5
GR
758}
759
760/**
129cf89e 761 * iavf_find_filter - Search filter list for specific mac filter
5eae00c5
GR
762 * @adapter: board private structure
763 * @macaddr: the MAC address
764 *
504398f0
JK
765 * Returns ptr to the filter object or NULL. Must be called while holding the
766 * mac_vlan_list_lock.
5eae00c5
GR
767 **/
768static struct
129cf89e
JB
769iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
770 const u8 *macaddr)
5eae00c5 771{
129cf89e 772 struct iavf_mac_filter *f;
5eae00c5
GR
773
774 if (!macaddr)
775 return NULL;
776
777 list_for_each_entry(f, &adapter->mac_filter_list, list) {
778 if (ether_addr_equal(macaddr, f->macaddr))
779 return f;
780 }
781 return NULL;
782}
783
784/**
785 * i40e_add_filter - Add a mac filter to the filter list
786 * @adapter: board private structure
787 * @macaddr: the MAC address
788 *
789 * Returns ptr to the filter object or NULL when no memory available.
790 **/
791static struct
129cf89e
JB
792iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
793 const u8 *macaddr)
5eae00c5 794{
129cf89e 795 struct iavf_mac_filter *f;
5eae00c5
GR
796
797 if (!macaddr)
798 return NULL;
799
129cf89e 800 f = iavf_find_filter(adapter, macaddr);
348d4994 801 if (!f) {
5eae00c5 802 f = kzalloc(sizeof(*f), GFP_ATOMIC);
504398f0 803 if (!f)
8cd5fe62 804 return f;
5eae00c5 805
9a173901 806 ether_addr_copy(f->macaddr, macaddr);
5eae00c5 807
63590b61 808 list_add_tail(&f->list, &adapter->mac_filter_list);
5eae00c5 809 f->add = true;
129cf89e 810 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
c766b9af
AB
811 } else {
812 f->remove = false;
5eae00c5
GR
813 }
814
5eae00c5
GR
815 return f;
816}
817
818/**
129cf89e 819 * iavf_set_mac - NDO callback to set port mac address
5eae00c5
GR
820 * @netdev: network interface device structure
821 * @p: pointer to an address structure
822 *
823 * Returns 0 on success, negative on failure
824 **/
129cf89e 825static int iavf_set_mac(struct net_device *netdev, void *p)
5eae00c5 826{
129cf89e 827 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 828 struct i40e_hw *hw = &adapter->hw;
129cf89e 829 struct iavf_mac_filter *f;
5eae00c5
GR
830 struct sockaddr *addr = p;
831
832 if (!is_valid_ether_addr(addr->sa_data))
833 return -EADDRNOTAVAIL;
834
835 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
836 return 0;
837
129cf89e 838 if (adapter->flags & IAVF_FLAG_ADDR_SET_BY_PF)
14e52ee2
MW
839 return -EPERM;
840
504398f0
JK
841 spin_lock_bh(&adapter->mac_vlan_list_lock);
842
129cf89e 843 f = iavf_find_filter(adapter, hw->mac.addr);
14e52ee2
MW
844 if (f) {
845 f->remove = true;
129cf89e 846 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
14e52ee2
MW
847 }
848
129cf89e 849 f = iavf_add_filter(adapter, addr->sa_data);
8cd5fe62 850
504398f0
JK
851 spin_unlock_bh(&adapter->mac_vlan_list_lock);
852
5eae00c5 853 if (f) {
9a173901
GR
854 ether_addr_copy(hw->mac.addr, addr->sa_data);
855 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
5eae00c5
GR
856 }
857
858 return (f == NULL) ? -ENOMEM : 0;
859}
860
861/**
129cf89e 862 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
8946b563
JK
863 * @netdev: the netdevice
864 * @addr: address to add
865 *
866 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
867 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
868 */
129cf89e 869static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
5eae00c5 870{
129cf89e 871 struct iavf_adapter *adapter = netdev_priv(netdev);
2f41f335 872
129cf89e 873 if (iavf_add_filter(adapter, addr))
8946b563
JK
874 return 0;
875 else
876 return -ENOMEM;
877}
2f41f335 878
8946b563 879/**
129cf89e 880 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
8946b563
JK
881 * @netdev: the netdevice
882 * @addr: address to add
883 *
884 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
885 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
886 */
129cf89e 887static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
8946b563 888{
129cf89e
JB
889 struct iavf_adapter *adapter = netdev_priv(netdev);
890 struct iavf_mac_filter *f;
2f41f335 891
8946b563
JK
892 /* Under some circumstances, we might receive a request to delete
893 * our own device address from our uc list. Because we store the
894 * device address in the VSI's MAC/VLAN filter list, we need to ignore
895 * such requests and not delete our device address from this list.
896 */
897 if (ether_addr_equal(addr, netdev->dev_addr))
898 return 0;
2f41f335 899
129cf89e 900 f = iavf_find_filter(adapter, addr);
8946b563 901 if (f) {
2f41f335 902 f->remove = true;
129cf89e 903 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
5eae00c5 904 }
8946b563
JK
905 return 0;
906}
907
908/**
129cf89e 909 * iavf_set_rx_mode - NDO callback to set the netdev filters
8946b563
JK
910 * @netdev: network interface device structure
911 **/
129cf89e 912static void iavf_set_rx_mode(struct net_device *netdev)
8946b563 913{
129cf89e 914 struct iavf_adapter *adapter = netdev_priv(netdev);
8946b563
JK
915
916 spin_lock_bh(&adapter->mac_vlan_list_lock);
129cf89e
JB
917 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
918 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
8946b563 919 spin_unlock_bh(&adapter->mac_vlan_list_lock);
47d34839
ASJ
920
921 if (netdev->flags & IFF_PROMISC &&
129cf89e
JB
922 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
923 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
47d34839 924 else if (!(netdev->flags & IFF_PROMISC) &&
129cf89e
JB
925 adapter->flags & IAVF_FLAG_PROMISC_ON)
926 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
47d34839 927
f42a5c74 928 if (netdev->flags & IFF_ALLMULTI &&
129cf89e
JB
929 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
930 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
f42a5c74 931 else if (!(netdev->flags & IFF_ALLMULTI) &&
129cf89e
JB
932 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
933 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
5eae00c5
GR
934}
935
936/**
129cf89e 937 * iavf_napi_enable_all - enable NAPI on all queue vectors
5eae00c5
GR
938 * @adapter: board private structure
939 **/
129cf89e 940static void iavf_napi_enable_all(struct iavf_adapter *adapter)
5eae00c5
GR
941{
942 int q_idx;
943 struct i40e_q_vector *q_vector;
944 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
945
946 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
947 struct napi_struct *napi;
75a64435 948
7d96ba1a 949 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
950 napi = &q_vector->napi;
951 napi_enable(napi);
952 }
953}
954
955/**
129cf89e 956 * iavf_napi_disable_all - disable NAPI on all queue vectors
5eae00c5
GR
957 * @adapter: board private structure
958 **/
129cf89e 959static void iavf_napi_disable_all(struct iavf_adapter *adapter)
5eae00c5
GR
960{
961 int q_idx;
962 struct i40e_q_vector *q_vector;
963 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
964
965 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7d96ba1a 966 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
967 napi_disable(&q_vector->napi);
968 }
969}
970
971/**
129cf89e 972 * iavf_configure - set up transmit and receive data structures
5eae00c5
GR
973 * @adapter: board private structure
974 **/
129cf89e 975static void iavf_configure(struct iavf_adapter *adapter)
5eae00c5
GR
976{
977 struct net_device *netdev = adapter->netdev;
978 int i;
979
129cf89e 980 iavf_set_rx_mode(netdev);
5eae00c5 981
129cf89e
JB
982 iavf_configure_tx(adapter);
983 iavf_configure_rx(adapter);
984 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
5eae00c5 985
cc052927 986 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 987 struct i40e_ring *ring = &adapter->rx_rings[i];
75a64435 988
129cf89e 989 iavf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
5eae00c5
GR
990 }
991}
992
993/**
129cf89e 994 * iavf_up_complete - Finish the last steps of bringing up a connection
5eae00c5 995 * @adapter: board private structure
9b2aef12 996 *
129cf89e 997 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
5eae00c5 998 **/
129cf89e 999static void iavf_up_complete(struct iavf_adapter *adapter)
5eae00c5 1000{
129cf89e 1001 adapter->state = __IAVF_RUNNING;
0da36b97 1002 clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
5eae00c5 1003
129cf89e 1004 iavf_napi_enable_all(adapter);
5eae00c5 1005
129cf89e 1006 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
ed0e894d 1007 if (CLIENT_ENABLED(adapter))
129cf89e 1008 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
5eae00c5 1009 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
5eae00c5
GR
1010}
1011
5eae00c5
GR
1012/**
1013 * i40e_down - Shutdown the connection processing
1014 * @adapter: board private structure
9b2aef12 1015 *
129cf89e 1016 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
5eae00c5 1017 **/
129cf89e 1018void iavf_down(struct iavf_adapter *adapter)
5eae00c5
GR
1019{
1020 struct net_device *netdev = adapter->netdev;
129cf89e
JB
1021 struct iavf_vlan_filter *vlf;
1022 struct iavf_mac_filter *f;
1023 struct iavf_cloud_filter *cf;
5eae00c5 1024
129cf89e 1025 if (adapter->state <= __IAVF_DOWN_PENDING)
ddf0b3a6
MW
1026 return;
1027
63e18c25
MW
1028 netif_carrier_off(netdev);
1029 netif_tx_disable(netdev);
3f341acc 1030 adapter->link_up = false;
129cf89e
JB
1031 iavf_napi_disable_all(adapter);
1032 iavf_irq_disable(adapter);
53d0b3ae 1033
504398f0
JK
1034 spin_lock_bh(&adapter->mac_vlan_list_lock);
1035
8946b563
JK
1036 /* clear the sync flag on all filters */
1037 __dev_uc_unsync(adapter->netdev, NULL);
1038 __dev_mc_unsync(adapter->netdev, NULL);
1039
ef8693eb 1040 /* remove all MAC filters */
5eae00c5
GR
1041 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1042 f->remove = true;
1043 }
8946b563 1044
ed1f5b58 1045 /* remove all VLAN filters */
fbd5eb54 1046 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
0075fa0f 1047 vlf->remove = true;
ed1f5b58 1048 }
504398f0
JK
1049
1050 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1051
0075fa0f
HR
1052 /* remove all cloud filters */
1053 spin_lock_bh(&adapter->cloud_filter_list_lock);
1054 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1055 cf->del = true;
1056 }
1057 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1058
129cf89e
JB
1059 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1060 adapter->state != __IAVF_RESETTING) {
53d0b3ae 1061 /* cancel any current operation */
310a2ad9 1062 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
53d0b3ae
MW
1063 /* Schedule operations to close down the HW. Don't wait
1064 * here for this to complete. The watchdog is still running
1065 * and it will take care of this.
1066 */
129cf89e
JB
1067 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1068 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1069 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1070 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
ef8693eb 1071 }
5eae00c5 1072
fe2647ab 1073 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
5eae00c5
GR
1074}
1075
1076/**
129cf89e 1077 * iavf_acquire_msix_vectors - Setup the MSIX capability
5eae00c5
GR
1078 * @adapter: board private structure
1079 * @vectors: number of vectors to request
1080 *
1081 * Work with the OS to set up the MSIX vectors needed.
1082 *
1083 * Returns 0 on success, negative on failure
1084 **/
1085static int
129cf89e 1086iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
5eae00c5
GR
1087{
1088 int err, vector_threshold;
1089
1090 /* We'll want at least 3 (vector_threshold):
1091 * 0) Other (Admin Queue and link, mostly)
1092 * 1) TxQ[0] Cleanup
1093 * 2) RxQ[0] Cleanup
1094 */
1095 vector_threshold = MIN_MSIX_COUNT;
1096
1097 /* The more we get, the more we will assign to Tx/Rx Cleanup
1098 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1099 * Right now, we simply care about how many we'll get; we'll
1100 * set them up later while requesting irq's.
1101 */
fc2f2f5d
AG
1102 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1103 vector_threshold, vectors);
1104 if (err < 0) {
80e72893 1105 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
5eae00c5
GR
1106 kfree(adapter->msix_entries);
1107 adapter->msix_entries = NULL;
fc2f2f5d 1108 return err;
5eae00c5 1109 }
fc2f2f5d
AG
1110
1111 /* Adjust for only the vectors we'll use, which is minimum
1112 * of max_msix_q_vectors + NONQ_VECS, or the number of
1113 * vectors we were allocated.
1114 */
1115 adapter->num_msix_vectors = err;
1116 return 0;
5eae00c5
GR
1117}
1118
1119/**
129cf89e 1120 * iavf_free_queues - Free memory for all rings
5eae00c5
GR
1121 * @adapter: board private structure to initialize
1122 *
1123 * Free all of the memory associated with queue pairs.
1124 **/
129cf89e 1125static void iavf_free_queues(struct iavf_adapter *adapter)
5eae00c5 1126{
5eae00c5
GR
1127 if (!adapter->vsi_res)
1128 return;
65c7006f 1129 adapter->num_active_queues = 0;
0dd438d8 1130 kfree(adapter->tx_rings);
10311540 1131 adapter->tx_rings = NULL;
0dd438d8 1132 kfree(adapter->rx_rings);
10311540 1133 adapter->rx_rings = NULL;
5eae00c5
GR
1134}
1135
1136/**
129cf89e 1137 * iavf_alloc_queues - Allocate memory for all rings
5eae00c5
GR
1138 * @adapter: board private structure to initialize
1139 *
1140 * We allocate one ring per queue at run-time since we don't know the
1141 * number of queues at compile-time. The polling_netdev array is
1142 * intended for Multiqueue, but should work fine with a single queue.
1143 **/
129cf89e 1144static int iavf_alloc_queues(struct iavf_adapter *adapter)
5eae00c5 1145{
65c7006f
JK
1146 int i, num_active_queues;
1147
5b36e8d0
AB
1148 /* If we're in reset reallocating queues we don't actually know yet for
1149 * certain the PF gave us the number of queues we asked for but we'll
1150 * assume it did. Once basic reset is finished we'll confirm once we
1151 * start negotiating config with PF.
1152 */
1153 if (adapter->num_req_queues)
1154 num_active_queues = adapter->num_req_queues;
5e97ce63
AD
1155 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1156 adapter->num_tc)
1157 num_active_queues = adapter->ch_config.total_qps;
5b36e8d0
AB
1158 else
1159 num_active_queues = min_t(int,
1160 adapter->vsi_res->num_queue_pairs,
1161 (int)(num_online_cpus()));
1162
5eae00c5 1163
65c7006f 1164 adapter->tx_rings = kcalloc(num_active_queues,
0dd438d8
MW
1165 sizeof(struct i40e_ring), GFP_KERNEL);
1166 if (!adapter->tx_rings)
1167 goto err_out;
65c7006f 1168 adapter->rx_rings = kcalloc(num_active_queues,
0dd438d8
MW
1169 sizeof(struct i40e_ring), GFP_KERNEL);
1170 if (!adapter->rx_rings)
1171 goto err_out;
1172
65c7006f 1173 for (i = 0; i < num_active_queues; i++) {
5eae00c5
GR
1174 struct i40e_ring *tx_ring;
1175 struct i40e_ring *rx_ring;
1176
0dd438d8 1177 tx_ring = &adapter->tx_rings[i];
5eae00c5
GR
1178
1179 tx_ring->queue_index = i;
1180 tx_ring->netdev = adapter->netdev;
1181 tx_ring->dev = &adapter->pdev->dev;
d732a184 1182 tx_ring->count = adapter->tx_desc_count;
40588ca6 1183 tx_ring->itr_setting = I40E_ITR_TX_DEF;
129cf89e 1184 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1f012279 1185 tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
5eae00c5 1186
0dd438d8 1187 rx_ring = &adapter->rx_rings[i];
5eae00c5
GR
1188 rx_ring->queue_index = i;
1189 rx_ring->netdev = adapter->netdev;
1190 rx_ring->dev = &adapter->pdev->dev;
d732a184 1191 rx_ring->count = adapter->rx_desc_count;
40588ca6 1192 rx_ring->itr_setting = I40E_ITR_RX_DEF;
5eae00c5
GR
1193 }
1194
65c7006f
JK
1195 adapter->num_active_queues = num_active_queues;
1196
5eae00c5
GR
1197 return 0;
1198
1199err_out:
129cf89e 1200 iavf_free_queues(adapter);
5eae00c5
GR
1201 return -ENOMEM;
1202}
1203
1204/**
129cf89e 1205 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
5eae00c5
GR
1206 * @adapter: board private structure to initialize
1207 *
1208 * Attempt to configure the interrupts using the best available
1209 * capabilities of the hardware and the kernel.
1210 **/
129cf89e 1211static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
5eae00c5
GR
1212{
1213 int vector, v_budget;
1214 int pairs = 0;
1215 int err = 0;
1216
1217 if (!adapter->vsi_res) {
1218 err = -EIO;
1219 goto out;
1220 }
cc052927 1221 pairs = adapter->num_active_queues;
5eae00c5 1222
789f38ca
JK
1223 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1224 * us much good if we have more vectors than CPUs. However, we already
1225 * limit the total number of queues by the number of CPUs so we do not
1226 * need any further limiting here.
5eae00c5 1227 */
789f38ca
JK
1228 v_budget = min_t(int, pairs + NONQ_VECS,
1229 (int)adapter->vf_res->max_vectors);
5eae00c5 1230
5eae00c5
GR
1231 adapter->msix_entries = kcalloc(v_budget,
1232 sizeof(struct msix_entry), GFP_KERNEL);
1233 if (!adapter->msix_entries) {
1234 err = -ENOMEM;
1235 goto out;
1236 }
1237
1238 for (vector = 0; vector < v_budget; vector++)
1239 adapter->msix_entries[vector].entry = vector;
1240
129cf89e 1241 err = iavf_acquire_msix_vectors(adapter, v_budget);
5eae00c5
GR
1242
1243out:
e6c4cf6f
MW
1244 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1245 netif_set_real_num_tx_queues(adapter->netdev, pairs);
5eae00c5
GR
1246 return err;
1247}
1248
e25d00b8 1249/**
43a3d9ba
MW
1250 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1251 * @adapter: board private structure
2c86ac3c
HZ
1252 *
1253 * Return 0 on success, negative on failure
e25d00b8 1254 **/
129cf89e 1255static int iavf_config_rss_aq(struct iavf_adapter *adapter)
e25d00b8 1256{
43a3d9ba
MW
1257 struct i40e_aqc_get_set_rss_key_data *rss_key =
1258 (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
e25d00b8 1259 struct i40e_hw *hw = &adapter->hw;
2c86ac3c 1260 int ret = 0;
e25d00b8 1261
310a2ad9 1262 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
e25d00b8 1263 /* bail because we already have a command pending */
e3d132d1 1264 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
e25d00b8 1265 adapter->current_op);
2c86ac3c 1266 return -EBUSY;
e25d00b8
ASJ
1267 }
1268
129cf89e 1269 ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
43a3d9ba
MW
1270 if (ret) {
1271 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
129cf89e
JB
1272 iavf_stat_str(hw, ret),
1273 iavf_aq_str(hw, hw->aq.asq_last_status));
43a3d9ba
MW
1274 return ret;
1275
2c86ac3c 1276 }
e25d00b8 1277
129cf89e
JB
1278 ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1279 adapter->rss_lut, adapter->rss_lut_size);
43a3d9ba
MW
1280 if (ret) {
1281 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
129cf89e
JB
1282 iavf_stat_str(hw, ret),
1283 iavf_aq_str(hw, hw->aq.asq_last_status));
e25d00b8
ASJ
1284 }
1285
2c86ac3c 1286 return ret;
43a3d9ba 1287
e25d00b8
ASJ
1288}
1289
1290/**
129cf89e 1291 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
43a3d9ba 1292 * @adapter: board private structure
2c86ac3c
HZ
1293 *
1294 * Returns 0 on success, negative on failure
e25d00b8 1295 **/
129cf89e 1296static int iavf_config_rss_reg(struct iavf_adapter *adapter)
e25d00b8
ASJ
1297{
1298 struct i40e_hw *hw = &adapter->hw;
43a3d9ba 1299 u32 *dw;
2c86ac3c 1300 u16 i;
e25d00b8 1301
43a3d9ba
MW
1302 dw = (u32 *)adapter->rss_key;
1303 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1304 wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
2c86ac3c 1305
43a3d9ba
MW
1306 dw = (u32 *)adapter->rss_lut;
1307 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1308 wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
2c86ac3c 1309
e25d00b8 1310 i40e_flush(hw);
2c86ac3c
HZ
1311
1312 return 0;
1313}
1314
1315/**
129cf89e 1316 * iavf_config_rss - Configure RSS keys and lut
43a3d9ba 1317 * @adapter: board private structure
90b02b43
HZ
1318 *
1319 * Returns 0 on success, negative on failure
1320 **/
129cf89e 1321int iavf_config_rss(struct iavf_adapter *adapter)
90b02b43 1322{
90b02b43 1323
43a3d9ba 1324 if (RSS_PF(adapter)) {
129cf89e
JB
1325 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1326 IAVF_FLAG_AQ_SET_RSS_KEY;
43a3d9ba
MW
1327 return 0;
1328 } else if (RSS_AQ(adapter)) {
129cf89e 1329 return iavf_config_rss_aq(adapter);
43a3d9ba 1330 } else {
129cf89e 1331 return iavf_config_rss_reg(adapter);
43a3d9ba 1332 }
90b02b43
HZ
1333}
1334
2c86ac3c 1335/**
129cf89e 1336 * iavf_fill_rss_lut - Fill the lut with default values
43a3d9ba 1337 * @adapter: board private structure
2c86ac3c 1338 **/
129cf89e 1339static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
2c86ac3c
HZ
1340{
1341 u16 i;
1342
43a3d9ba
MW
1343 for (i = 0; i < adapter->rss_lut_size; i++)
1344 adapter->rss_lut[i] = i % adapter->num_active_queues;
e25d00b8
ASJ
1345}
1346
1347/**
129cf89e 1348 * iavf_init_rss - Prepare for RSS
e25d00b8 1349 * @adapter: board private structure
2c86ac3c
HZ
1350 *
1351 * Return 0 on success, negative on failure
e25d00b8 1352 **/
129cf89e 1353static int iavf_init_rss(struct iavf_adapter *adapter)
e25d00b8
ASJ
1354{
1355 struct i40e_hw *hw = &adapter->hw;
2c86ac3c 1356 int ret;
e25d00b8 1357
43a3d9ba
MW
1358 if (!RSS_PF(adapter)) {
1359 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
fbb113f7 1360 if (adapter->vf_res->vf_cap_flags &
310a2ad9 1361 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
43a3d9ba
MW
1362 adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
1363 else
1364 adapter->hena = I40E_DEFAULT_RSS_HENA;
e25d00b8 1365
43a3d9ba
MW
1366 wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
1367 wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1368 }
66f9af85 1369
129cf89e 1370 iavf_fill_rss_lut(adapter);
66f9af85 1371
43a3d9ba 1372 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
129cf89e 1373 ret = iavf_config_rss(adapter);
2c86ac3c
HZ
1374
1375 return ret;
e25d00b8
ASJ
1376}
1377
5eae00c5 1378/**
129cf89e 1379 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
5eae00c5
GR
1380 * @adapter: board private structure to initialize
1381 *
1382 * We allocate one q_vector per queue interrupt. If allocation fails we
1383 * return -ENOMEM.
1384 **/
129cf89e 1385static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
5eae00c5 1386{
7d96ba1a 1387 int q_idx = 0, num_q_vectors;
5eae00c5
GR
1388 struct i40e_q_vector *q_vector;
1389
1390 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
0dd438d8 1391 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
7d96ba1a
MW
1392 GFP_KERNEL);
1393 if (!adapter->q_vectors)
311f23e9 1394 return -ENOMEM;
5eae00c5
GR
1395
1396 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
7d96ba1a 1397 q_vector = &adapter->q_vectors[q_idx];
5eae00c5
GR
1398 q_vector->adapter = adapter;
1399 q_vector->vsi = &adapter->vsi;
1400 q_vector->v_idx = q_idx;
a3f9fb5e 1401 q_vector->reg_idx = q_idx;
759dc4a7 1402 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
5eae00c5 1403 netif_napi_add(adapter->netdev, &q_vector->napi,
129cf89e 1404 iavf_napi_poll, NAPI_POLL_WEIGHT);
5eae00c5
GR
1405 }
1406
1407 return 0;
5eae00c5
GR
1408}
1409
1410/**
129cf89e 1411 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
5eae00c5
GR
1412 * @adapter: board private structure to initialize
1413 *
1414 * This function frees the memory allocated to the q_vectors. In addition if
1415 * NAPI is enabled it will delete any references to the NAPI struct prior
1416 * to freeing the q_vector.
1417 **/
129cf89e 1418static void iavf_free_q_vectors(struct iavf_adapter *adapter)
5eae00c5
GR
1419{
1420 int q_idx, num_q_vectors;
1421 int napi_vectors;
1422
ef4603e8
JK
1423 if (!adapter->q_vectors)
1424 return;
1425
5eae00c5 1426 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
cc052927 1427 napi_vectors = adapter->num_active_queues;
5eae00c5
GR
1428
1429 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
7d96ba1a 1430 struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
0b6591e6 1431
5eae00c5
GR
1432 if (q_idx < napi_vectors)
1433 netif_napi_del(&q_vector->napi);
5eae00c5 1434 }
7d96ba1a 1435 kfree(adapter->q_vectors);
ef4603e8 1436 adapter->q_vectors = NULL;
5eae00c5
GR
1437}
1438
1439/**
129cf89e 1440 * iavf_reset_interrupt_capability - Reset MSIX setup
5eae00c5
GR
1441 * @adapter: board private structure
1442 *
1443 **/
129cf89e 1444void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
5eae00c5 1445{
47d2a5d8
AB
1446 if (!adapter->msix_entries)
1447 return;
1448
5eae00c5
GR
1449 pci_disable_msix(adapter->pdev);
1450 kfree(adapter->msix_entries);
1451 adapter->msix_entries = NULL;
5eae00c5
GR
1452}
1453
1454/**
129cf89e 1455 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
5eae00c5
GR
1456 * @adapter: board private structure to initialize
1457 *
1458 **/
129cf89e 1459int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
5eae00c5
GR
1460{
1461 int err;
1462
129cf89e 1463 err = iavf_alloc_queues(adapter);
283aeafe
JK
1464 if (err) {
1465 dev_err(&adapter->pdev->dev,
1466 "Unable to allocate memory for queues\n");
1467 goto err_alloc_queues;
1468 }
1469
62fe2a86 1470 rtnl_lock();
129cf89e 1471 err = iavf_set_interrupt_capability(adapter);
62fe2a86 1472 rtnl_unlock();
5eae00c5
GR
1473 if (err) {
1474 dev_err(&adapter->pdev->dev,
1475 "Unable to setup interrupt capabilities\n");
1476 goto err_set_interrupt;
1477 }
1478
129cf89e 1479 err = iavf_alloc_q_vectors(adapter);
5eae00c5
GR
1480 if (err) {
1481 dev_err(&adapter->pdev->dev,
1482 "Unable to allocate memory for queue vectors\n");
1483 goto err_alloc_q_vectors;
1484 }
1485
5e97ce63
AD
1486 /* If we've made it so far while ADq flag being ON, then we haven't
1487 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1488 * resources have been allocated in the reset path.
1489 * Now we can truly claim that ADq is enabled.
1490 */
1491 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1492 adapter->num_tc)
1493 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1494 adapter->num_tc);
1495
5eae00c5 1496 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
75a64435
MW
1497 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1498 adapter->num_active_queues);
5eae00c5
GR
1499
1500 return 0;
5eae00c5 1501err_alloc_q_vectors:
129cf89e 1502 iavf_reset_interrupt_capability(adapter);
5eae00c5 1503err_set_interrupt:
129cf89e 1504 iavf_free_queues(adapter);
283aeafe 1505err_alloc_queues:
5eae00c5
GR
1506 return err;
1507}
1508
66f9af85 1509/**
129cf89e 1510 * iavf_free_rss - Free memory used by RSS structs
43a3d9ba 1511 * @adapter: board private structure
66f9af85 1512 **/
129cf89e 1513static void iavf_free_rss(struct iavf_adapter *adapter)
66f9af85 1514{
43a3d9ba
MW
1515 kfree(adapter->rss_key);
1516 adapter->rss_key = NULL;
66f9af85 1517
43a3d9ba
MW
1518 kfree(adapter->rss_lut);
1519 adapter->rss_lut = NULL;
66f9af85
HZ
1520}
1521
5b36e8d0 1522/**
129cf89e 1523 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
5b36e8d0
AB
1524 * @adapter: board private structure
1525 *
1526 * Returns 0 on success, negative on failure
1527 **/
129cf89e 1528static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
5b36e8d0
AB
1529{
1530 struct net_device *netdev = adapter->netdev;
1531 int err;
1532
1533 if (netif_running(netdev))
129cf89e
JB
1534 iavf_free_traffic_irqs(adapter);
1535 iavf_free_misc_irq(adapter);
1536 iavf_reset_interrupt_capability(adapter);
1537 iavf_free_q_vectors(adapter);
1538 iavf_free_queues(adapter);
5b36e8d0 1539
129cf89e 1540 err = iavf_init_interrupt_scheme(adapter);
5b36e8d0
AB
1541 if (err)
1542 goto err;
1543
1544 netif_tx_stop_all_queues(netdev);
1545
129cf89e 1546 err = iavf_request_misc_irq(adapter);
5b36e8d0
AB
1547 if (err)
1548 goto err;
1549
1550 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
1551
129cf89e 1552 iavf_map_rings_to_vectors(adapter);
5b36e8d0
AB
1553
1554 if (RSS_AQ(adapter))
129cf89e 1555 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
5b36e8d0 1556 else
129cf89e 1557 err = iavf_init_rss(adapter);
5b36e8d0
AB
1558err:
1559 return err;
1560}
1561
5eae00c5 1562/**
129cf89e 1563 * iavf_watchdog_timer - Periodic call-back timer
5eae00c5
GR
1564 * @data: pointer to adapter disguised as unsigned long
1565 **/
129cf89e 1566static void iavf_watchdog_timer(struct timer_list *t)
5eae00c5 1567{
129cf89e 1568 struct iavf_adapter *adapter = from_timer(adapter, t,
26566eae 1569 watchdog_timer);
75a64435 1570
5eae00c5
GR
1571 schedule_work(&adapter->watchdog_task);
1572 /* timer will be rescheduled in watchdog task */
1573}
1574
1575/**
129cf89e 1576 * iavf_watchdog_task - Periodic call-back task
5eae00c5
GR
1577 * @work: pointer to work_struct
1578 **/
129cf89e 1579static void iavf_watchdog_task(struct work_struct *work)
5eae00c5 1580{
129cf89e
JB
1581 struct iavf_adapter *adapter = container_of(work,
1582 struct iavf_adapter,
75a64435 1583 watchdog_task);
5eae00c5 1584 struct i40e_hw *hw = &adapter->hw;
ee5c1e92 1585 u32 reg_val;
5eae00c5 1586
129cf89e 1587 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
ef8693eb
MW
1588 goto restart_watchdog;
1589
129cf89e 1590 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
ee5c1e92
MW
1591 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1592 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9
JB
1593 if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
1594 (reg_val == VIRTCHNL_VFR_COMPLETED)) {
ef8693eb
MW
1595 /* A chance for redemption! */
1596 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
129cf89e
JB
1597 adapter->state = __IAVF_STARTUP;
1598 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
ef8693eb 1599 schedule_delayed_work(&adapter->init_task, 10);
129cf89e 1600 clear_bit(__IAVF_IN_CRITICAL_TASK,
ef8693eb
MW
1601 &adapter->crit_section);
1602 /* Don't reschedule the watchdog, since we've restarted
1603 * the init task. When init_task contacts the PF and
1604 * gets everything set up again, it'll restart the
1605 * watchdog for us. Down, boy. Sit. Stay. Woof.
1606 */
1607 return;
1608 }
ef8693eb 1609 adapter->aq_required = 0;
310a2ad9 1610 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
5eae00c5 1611 goto watchdog_done;
ef8693eb 1612 }
5eae00c5 1613
129cf89e
JB
1614 if ((adapter->state < __IAVF_DOWN) ||
1615 (adapter->flags & IAVF_FLAG_RESET_PENDING))
5eae00c5
GR
1616 goto watchdog_done;
1617
ef8693eb 1618 /* check for reset */
ee5c1e92 1619 reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
129cf89e
JB
1620 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING) && !reg_val) {
1621 adapter->state = __IAVF_RESETTING;
1622 adapter->flags |= IAVF_FLAG_RESET_PENDING;
249c8b8d 1623 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
5eae00c5 1624 schedule_work(&adapter->reset_task);
ef8693eb 1625 adapter->aq_required = 0;
310a2ad9 1626 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
5eae00c5
GR
1627 goto watchdog_done;
1628 }
1629
1630 /* Process admin queue tasks. After init, everything gets done
1631 * here so we don't race on the admin queue.
1632 */
ed636960 1633 if (adapter->current_op) {
129cf89e 1634 if (!iavf_asq_done(hw)) {
0758e7cb 1635 dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
129cf89e 1636 iavf_send_api_ver(adapter);
0758e7cb 1637 }
5eae00c5 1638 goto watchdog_done;
0758e7cb 1639 }
129cf89e
JB
1640 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG) {
1641 iavf_send_vf_config_msg(adapter);
e6d038de
MW
1642 goto watchdog_done;
1643 }
5eae00c5 1644
129cf89e
JB
1645 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1646 iavf_disable_queues(adapter);
e284fc88
MW
1647 goto watchdog_done;
1648 }
1649
129cf89e
JB
1650 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1651 iavf_map_queues(adapter);
5eae00c5
GR
1652 goto watchdog_done;
1653 }
1654
129cf89e
JB
1655 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1656 iavf_add_ether_addrs(adapter);
5eae00c5
GR
1657 goto watchdog_done;
1658 }
1659
129cf89e
JB
1660 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1661 iavf_add_vlans(adapter);
5eae00c5
GR
1662 goto watchdog_done;
1663 }
1664
129cf89e
JB
1665 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1666 iavf_del_ether_addrs(adapter);
5eae00c5
GR
1667 goto watchdog_done;
1668 }
1669
129cf89e
JB
1670 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1671 iavf_del_vlans(adapter);
5eae00c5
GR
1672 goto watchdog_done;
1673 }
1674
129cf89e
JB
1675 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1676 iavf_enable_vlan_stripping(adapter);
8774370d
MS
1677 goto watchdog_done;
1678 }
1679
129cf89e
JB
1680 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1681 iavf_disable_vlan_stripping(adapter);
8774370d
MS
1682 goto watchdog_done;
1683 }
1684
129cf89e
JB
1685 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1686 iavf_configure_queues(adapter);
5eae00c5
GR
1687 goto watchdog_done;
1688 }
1689
129cf89e
JB
1690 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1691 iavf_enable_queues(adapter);
5eae00c5
GR
1692 goto watchdog_done;
1693 }
1694
129cf89e 1695 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
e25d00b8
ASJ
1696 /* This message goes straight to the firmware, not the
1697 * PF, so we don't have to set current_op as we will
1698 * not get a response through the ARQ.
1699 */
129cf89e
JB
1700 iavf_init_rss(adapter);
1701 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
e25d00b8
ASJ
1702 goto watchdog_done;
1703 }
129cf89e
JB
1704 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1705 iavf_get_hena(adapter);
43a3d9ba
MW
1706 goto watchdog_done;
1707 }
129cf89e
JB
1708 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1709 iavf_set_hena(adapter);
43a3d9ba
MW
1710 goto watchdog_done;
1711 }
129cf89e
JB
1712 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1713 iavf_set_rss_key(adapter);
43a3d9ba
MW
1714 goto watchdog_done;
1715 }
129cf89e
JB
1716 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1717 iavf_set_rss_lut(adapter);
43a3d9ba
MW
1718 goto watchdog_done;
1719 }
e25d00b8 1720
129cf89e
JB
1721 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1722 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
ff3f4cc2 1723 FLAG_VF_MULTICAST_PROMISC);
47d34839
ASJ
1724 goto watchdog_done;
1725 }
1726
129cf89e
JB
1727 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1728 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
f42a5c74
ASJ
1729 goto watchdog_done;
1730 }
1731
129cf89e
JB
1732 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
1733 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1734 iavf_set_promiscuous(adapter, 0);
47d34839
ASJ
1735 goto watchdog_done;
1736 }
d5b33d02 1737
129cf89e
JB
1738 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1739 iavf_enable_channels(adapter);
d5b33d02
HR
1740 goto watchdog_done;
1741 }
1742
129cf89e
JB
1743 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1744 iavf_disable_channels(adapter);
d5b33d02
HR
1745 goto watchdog_done;
1746 }
1747
129cf89e
JB
1748 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1749 iavf_add_cloud_filter(adapter);
0075fa0f
HR
1750 goto watchdog_done;
1751 }
1752
129cf89e
JB
1753 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1754 iavf_del_cloud_filter(adapter);
0075fa0f
HR
1755 goto watchdog_done;
1756 }
1757
ed0e894d 1758 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
47d34839 1759
129cf89e
JB
1760 if (adapter->state == __IAVF_RUNNING)
1761 iavf_request_stats(adapter);
5eae00c5 1762watchdog_done:
129cf89e
JB
1763 if (adapter->state == __IAVF_RUNNING)
1764 iavf_detect_recover_hung(&adapter->vsi);
1765 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
ef8693eb 1766restart_watchdog:
129cf89e 1767 if (adapter->state == __IAVF_REMOVE)
d3e2edb7 1768 return;
5eae00c5
GR
1769 if (adapter->aq_required)
1770 mod_timer(&adapter->watchdog_timer,
1771 jiffies + msecs_to_jiffies(20));
1772 else
1773 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
5eae00c5
GR
1774 schedule_work(&adapter->adminq_task);
1775}
1776
129cf89e 1777static void iavf_disable_vf(struct iavf_adapter *adapter)
dedecb6d 1778{
129cf89e
JB
1779 struct iavf_mac_filter *f, *ftmp;
1780 struct iavf_vlan_filter *fv, *fvtmp;
1781 struct iavf_cloud_filter *cf, *cftmp;
dedecb6d 1782
129cf89e 1783 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
dedecb6d 1784
44b034b4
JK
1785 /* We don't use netif_running() because it may be true prior to
1786 * ndo_open() returning, so we can't assume it means all our open
1787 * tasks have finished, since we're not holding the rtnl_lock here.
1788 */
129cf89e 1789 if (adapter->state == __IAVF_RUNNING) {
0da36b97 1790 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
dedecb6d
JP
1791 netif_carrier_off(adapter->netdev);
1792 netif_tx_disable(adapter->netdev);
1793 adapter->link_up = false;
129cf89e
JB
1794 iavf_napi_disable_all(adapter);
1795 iavf_irq_disable(adapter);
1796 iavf_free_traffic_irqs(adapter);
1797 iavf_free_all_tx_resources(adapter);
1798 iavf_free_all_rx_resources(adapter);
dedecb6d
JP
1799 }
1800
504398f0
JK
1801 spin_lock_bh(&adapter->mac_vlan_list_lock);
1802
0075fa0f 1803 /* Delete all of the filters */
dedecb6d
JP
1804 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
1805 list_del(&f->list);
1806 kfree(f);
1807 }
1808
1809 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
1810 list_del(&fv->list);
1811 kfree(fv);
1812 }
1813
504398f0
JK
1814 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1815
0075fa0f
HR
1816 spin_lock_bh(&adapter->cloud_filter_list_lock);
1817 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1818 list_del(&cf->list);
1819 kfree(cf);
1820 adapter->num_cloud_filters--;
1821 }
1822 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1823
129cf89e
JB
1824 iavf_free_misc_irq(adapter);
1825 iavf_reset_interrupt_capability(adapter);
1826 iavf_free_queues(adapter);
1827 iavf_free_q_vectors(adapter);
dedecb6d 1828 kfree(adapter->vf_res);
129cf89e 1829 iavf_shutdown_adminq(&adapter->hw);
dedecb6d 1830 adapter->netdev->flags &= ~IFF_UP;
129cf89e
JB
1831 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1832 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1833 adapter->state = __IAVF_DOWN;
fe2647ab 1834 wake_up(&adapter->down_waitqueue);
dedecb6d
JP
1835 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
1836}
1837
129cf89e
JB
1838#define IAVF_RESET_WAIT_MS 10
1839#define IAVF_RESET_WAIT_COUNT 500
5eae00c5 1840/**
129cf89e 1841 * iavf_reset_task - Call-back task to handle hardware reset
5eae00c5
GR
1842 * @work: pointer to work_struct
1843 *
1844 * During reset we need to shut down and reinitialize the admin queue
1845 * before we can use it to communicate with the PF again. We also clear
1846 * and reinit the rings because that context is lost as well.
1847 **/
129cf89e 1848static void iavf_reset_task(struct work_struct *work)
5eae00c5 1849{
129cf89e
JB
1850 struct iavf_adapter *adapter = container_of(work,
1851 struct iavf_adapter,
ef8693eb 1852 reset_task);
0075fa0f 1853 struct virtchnl_vf_resource *vfres = adapter->vf_res;
ac833bbf 1854 struct net_device *netdev = adapter->netdev;
5eae00c5 1855 struct i40e_hw *hw = &adapter->hw;
129cf89e
JB
1856 struct iavf_vlan_filter *vlf;
1857 struct iavf_cloud_filter *cf;
1858 struct iavf_mac_filter *f;
ee5c1e92 1859 u32 reg_val;
ac833bbf 1860 int i = 0, err;
44b034b4 1861 bool running;
5eae00c5 1862
06aa040f
AD
1863 /* When device is being removed it doesn't make sense to run the reset
1864 * task, just return in such a case.
1865 */
129cf89e 1866 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
06aa040f
AD
1867 return;
1868
129cf89e 1869 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
5eae00c5 1870 &adapter->crit_section))
f98a2006 1871 usleep_range(500, 1000);
ed0e894d 1872 if (CLIENT_ENABLED(adapter)) {
129cf89e
JB
1873 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
1874 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
1875 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
1876 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
ed0e894d 1877 cancel_delayed_work_sync(&adapter->client_task);
129cf89e 1878 iavf_notify_client_close(&adapter->vsi, true);
ed0e894d 1879 }
129cf89e
JB
1880 iavf_misc_irq_disable(adapter);
1881 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
1882 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
67c818a1
MW
1883 /* Restart the AQ here. If we have been reset but didn't
1884 * detect it, or if the PF had to reinit, our AQ will be hosed.
1885 */
129cf89e
JB
1886 iavf_shutdown_adminq(hw);
1887 iavf_init_adminq(hw);
1888 iavf_request_reset(adapter);
3526d800 1889 }
129cf89e 1890 adapter->flags |= IAVF_FLAG_RESET_PENDING;
3526d800 1891
ef8693eb 1892 /* poll until we see the reset actually happen */
129cf89e 1893 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
ee5c1e92
MW
1894 reg_val = rd32(hw, I40E_VF_ARQLEN1) &
1895 I40E_VF_ARQLEN1_ARQENABLE_MASK;
1896 if (!reg_val)
ef8693eb 1897 break;
ee5c1e92 1898 usleep_range(5000, 10000);
ef8693eb 1899 }
129cf89e 1900 if (i == IAVF_RESET_WAIT_COUNT) {
67c818a1 1901 dev_info(&adapter->pdev->dev, "Never saw reset\n");
ef8693eb
MW
1902 goto continue_reset; /* act like the reset happened */
1903 }
5eae00c5 1904
ef8693eb 1905 /* wait until the reset is complete and the PF is responding to us */
129cf89e 1906 for (i = 0; i < IAVF_RESET_WAIT_COUNT; i++) {
7d3f04af 1907 /* sleep first to make sure a minimum wait time is met */
129cf89e 1908 msleep(IAVF_RESET_WAIT_MS);
7d3f04af 1909
ee5c1e92
MW
1910 reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
1911 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9 1912 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
5eae00c5 1913 break;
5eae00c5 1914 }
7d3f04af 1915
509a447a 1916 pci_set_master(adapter->pdev);
7d3f04af 1917
129cf89e 1918 if (i == IAVF_RESET_WAIT_COUNT) {
80e72893 1919 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
ee5c1e92 1920 reg_val);
129cf89e
JB
1921 iavf_disable_vf(adapter);
1922 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
ef8693eb 1923 return; /* Do not attempt to reinit. It's dead, Jim. */
5eae00c5 1924 }
ef8693eb
MW
1925
1926continue_reset:
44b034b4
JK
1927 /* We don't use netif_running() because it may be true prior to
1928 * ndo_open() returning, so we can't assume it means all our open
1929 * tasks have finished, since we're not holding the rtnl_lock here.
1930 */
129cf89e
JB
1931 running = ((adapter->state == __IAVF_RUNNING) ||
1932 (adapter->state == __IAVF_RESETTING));
44b034b4
JK
1933
1934 if (running) {
3c8e0b98 1935 netif_carrier_off(netdev);
67c818a1 1936 netif_tx_stop_all_queues(netdev);
3f341acc 1937 adapter->link_up = false;
129cf89e 1938 iavf_napi_disable_all(adapter);
3c8e0b98 1939 }
129cf89e 1940 iavf_irq_disable(adapter);
ac833bbf 1941
129cf89e
JB
1942 adapter->state = __IAVF_RESETTING;
1943 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
67c818a1
MW
1944
1945 /* free the Tx/Rx rings and descriptors, might be better to just
1946 * re-use them sometime in the future
1947 */
129cf89e
JB
1948 iavf_free_all_rx_resources(adapter);
1949 iavf_free_all_tx_resources(adapter);
5eae00c5 1950
129cf89e 1951 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
5eae00c5 1952 /* kill and reinit the admin queue */
129cf89e 1953 iavf_shutdown_adminq(hw);
310a2ad9 1954 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
129cf89e 1955 err = iavf_init_adminq(hw);
5eae00c5 1956 if (err)
ac833bbf
MW
1957 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
1958 err);
5b36e8d0
AB
1959 adapter->aq_required = 0;
1960
129cf89e
JB
1961 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
1962 err = iavf_reinit_interrupt_scheme(adapter);
5b36e8d0
AB
1963 if (err)
1964 goto reset_err;
1965 }
5eae00c5 1966
129cf89e
JB
1967 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
1968 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
ac833bbf 1969
504398f0
JK
1970 spin_lock_bh(&adapter->mac_vlan_list_lock);
1971
ac833bbf
MW
1972 /* re-add all MAC filters */
1973 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1974 f->add = true;
1975 }
1976 /* re-add all VLAN filters */
40d01366
MW
1977 list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1978 vlf->add = true;
ac833bbf 1979 }
504398f0
JK
1980
1981 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1982
0075fa0f
HR
1983 /* check if TCs are running and re-add all cloud filters */
1984 spin_lock_bh(&adapter->cloud_filter_list_lock);
1985 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1986 adapter->num_tc) {
1987 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1988 cf->add = true;
1989 }
1990 }
1991 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1992
129cf89e
JB
1993 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1994 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
1995 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1996 iavf_misc_irq_enable(adapter);
5eae00c5
GR
1997
1998 mod_timer(&adapter->watchdog_timer, jiffies + 2);
1999
44b034b4
JK
2000 /* We were running when the reset started, so we need to restore some
2001 * state here.
2002 */
2003 if (running) {
5eae00c5 2004 /* allocate transmit descriptors */
129cf89e 2005 err = iavf_setup_all_tx_resources(adapter);
5eae00c5
GR
2006 if (err)
2007 goto reset_err;
2008
2009 /* allocate receive descriptors */
129cf89e 2010 err = iavf_setup_all_rx_resources(adapter);
5eae00c5
GR
2011 if (err)
2012 goto reset_err;
2013
129cf89e
JB
2014 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2015 err = iavf_request_traffic_irqs(adapter, netdev->name);
5b36e8d0
AB
2016 if (err)
2017 goto reset_err;
2018
129cf89e 2019 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5b36e8d0
AB
2020 }
2021
129cf89e 2022 iavf_configure(adapter);
5eae00c5 2023
129cf89e 2024 iavf_up_complete(adapter);
5eae00c5 2025
129cf89e 2026 iavf_irq_enable(adapter, true);
67c818a1 2027 } else {
129cf89e 2028 adapter->state = __IAVF_DOWN;
fe2647ab 2029 wake_up(&adapter->down_waitqueue);
5eae00c5 2030 }
129cf89e
JB
2031 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2032 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
67c818a1 2033
5eae00c5
GR
2034 return;
2035reset_err:
129cf89e
JB
2036 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2037 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
80e72893 2038 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
129cf89e 2039 iavf_close(netdev);
5eae00c5
GR
2040}
2041
2042/**
129cf89e 2043 * iavf_adminq_task - worker thread to clean the admin queue
5eae00c5
GR
2044 * @work: pointer to work_struct containing our data
2045 **/
129cf89e 2046static void iavf_adminq_task(struct work_struct *work)
5eae00c5 2047{
129cf89e
JB
2048 struct iavf_adapter *adapter =
2049 container_of(work, struct iavf_adapter, adminq_task);
5eae00c5
GR
2050 struct i40e_hw *hw = &adapter->hw;
2051 struct i40e_arq_event_info event;
c969ef4e 2052 enum virtchnl_ops v_op;
0b6591e6 2053 iavf_status ret, v_ret;
912257e5 2054 u32 val, oldval;
5eae00c5
GR
2055 u16 pending;
2056
129cf89e 2057 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
7235448c 2058 goto out;
ef8693eb 2059
129cf89e 2060 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
1001dc37 2061 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
249c8b8d 2062 if (!event.msg_buf)
7235448c 2063 goto out;
249c8b8d 2064
5eae00c5 2065 do {
129cf89e 2066 ret = iavf_clean_arq_element(hw, &event, &pending);
c969ef4e 2067 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
0b6591e6 2068 v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
c969ef4e
TD
2069
2070 if (ret || !v_op)
5eae00c5
GR
2071 break; /* No event to process or error cleaning ARQ */
2072
129cf89e
JB
2073 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2074 event.msg_len);
75a64435 2075 if (pending != 0)
129cf89e 2076 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
5eae00c5
GR
2077 } while (pending);
2078
67c818a1 2079 if ((adapter->flags &
129cf89e
JB
2080 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2081 adapter->state == __IAVF_RESETTING)
67c818a1
MW
2082 goto freedom;
2083
912257e5
MW
2084 /* check for error indications */
2085 val = rd32(hw, hw->aq.arq.len);
19b73d8e
MW
2086 if (val == 0xdeadbeef) /* indicates device in reset */
2087 goto freedom;
912257e5 2088 oldval = val;
b1f3366b 2089 if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
912257e5 2090 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
b1f3366b 2091 val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
912257e5 2092 }
b1f3366b 2093 if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
912257e5 2094 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
b1f3366b 2095 val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
912257e5 2096 }
b1f3366b 2097 if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
912257e5 2098 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
b1f3366b 2099 val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
912257e5
MW
2100 }
2101 if (oldval != val)
2102 wr32(hw, hw->aq.arq.len, val);
2103
2104 val = rd32(hw, hw->aq.asq.len);
2105 oldval = val;
b1f3366b 2106 if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
912257e5 2107 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
b1f3366b 2108 val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
912257e5 2109 }
b1f3366b 2110 if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
912257e5 2111 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
b1f3366b 2112 val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
912257e5 2113 }
b1f3366b 2114 if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
912257e5 2115 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
b1f3366b 2116 val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
912257e5
MW
2117 }
2118 if (oldval != val)
2119 wr32(hw, hw->aq.asq.len, val);
2120
67c818a1 2121freedom:
7235448c
MW
2122 kfree(event.msg_buf);
2123out:
5eae00c5 2124 /* re-enable Admin queue interrupt cause */
129cf89e 2125 iavf_misc_irq_enable(adapter);
5eae00c5
GR
2126}
2127
ed0e894d 2128/**
129cf89e 2129 * iavf_client_task - worker thread to perform client work
ed0e894d
MW
2130 * @work: pointer to work_struct containing our data
2131 *
2132 * This task handles client interactions. Because client calls can be
2133 * reentrant, we can't handle them in the watchdog.
2134 **/
129cf89e 2135static void iavf_client_task(struct work_struct *work)
ed0e894d 2136{
129cf89e
JB
2137 struct iavf_adapter *adapter =
2138 container_of(work, struct iavf_adapter, client_task.work);
ed0e894d
MW
2139
2140 /* If we can't get the client bit, just give up. We'll be rescheduled
2141 * later.
2142 */
2143
129cf89e 2144 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
ed0e894d
MW
2145 return;
2146
129cf89e
JB
2147 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2148 iavf_client_subtask(adapter);
2149 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
ed0e894d
MW
2150 goto out;
2151 }
129cf89e
JB
2152 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2153 iavf_notify_client_l2_params(&adapter->vsi);
2154 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
01acc73f
AB
2155 goto out;
2156 }
129cf89e
JB
2157 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2158 iavf_notify_client_close(&adapter->vsi, false);
2159 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
ed0e894d
MW
2160 goto out;
2161 }
129cf89e
JB
2162 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2163 iavf_notify_client_open(&adapter->vsi);
2164 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
ed0e894d
MW
2165 }
2166out:
129cf89e 2167 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
ed0e894d
MW
2168}
2169
5eae00c5 2170/**
129cf89e 2171 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
5eae00c5
GR
2172 * @adapter: board private structure
2173 *
2174 * Free all transmit software resources
2175 **/
129cf89e 2176void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2177{
2178 int i;
2179
fdb47ae8
MW
2180 if (!adapter->tx_rings)
2181 return;
2182
cc052927 2183 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 2184 if (adapter->tx_rings[i].desc)
129cf89e 2185 iavf_free_tx_resources(&adapter->tx_rings[i]);
5eae00c5
GR
2186}
2187
2188/**
129cf89e 2189 * iavf_setup_all_tx_resources - allocate all queues Tx resources
5eae00c5
GR
2190 * @adapter: board private structure
2191 *
2192 * If this function returns with an error, then it's possible one or
2193 * more of the rings is populated (while the rest are not). It is the
2194 * callers duty to clean those orphaned rings.
2195 *
2196 * Return 0 on success, negative on failure
2197 **/
129cf89e 2198static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2199{
2200 int i, err = 0;
2201
cc052927 2202 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 2203 adapter->tx_rings[i].count = adapter->tx_desc_count;
129cf89e 2204 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
5eae00c5
GR
2205 if (!err)
2206 continue;
2207 dev_err(&adapter->pdev->dev,
fb43201f 2208 "Allocation for Tx Queue %u failed\n", i);
5eae00c5
GR
2209 break;
2210 }
2211
2212 return err;
2213}
2214
2215/**
129cf89e 2216 * iavf_setup_all_rx_resources - allocate all queues Rx resources
5eae00c5
GR
2217 * @adapter: board private structure
2218 *
2219 * If this function returns with an error, then it's possible one or
2220 * more of the rings is populated (while the rest are not). It is the
2221 * callers duty to clean those orphaned rings.
2222 *
2223 * Return 0 on success, negative on failure
2224 **/
129cf89e 2225static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2226{
2227 int i, err = 0;
2228
cc052927 2229 for (i = 0; i < adapter->num_active_queues; i++) {
0dd438d8 2230 adapter->rx_rings[i].count = adapter->rx_desc_count;
129cf89e 2231 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
5eae00c5
GR
2232 if (!err)
2233 continue;
2234 dev_err(&adapter->pdev->dev,
fb43201f 2235 "Allocation for Rx Queue %u failed\n", i);
5eae00c5
GR
2236 break;
2237 }
2238 return err;
2239}
2240
2241/**
129cf89e 2242 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
5eae00c5
GR
2243 * @adapter: board private structure
2244 *
2245 * Free all receive software resources
2246 **/
129cf89e 2247void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
5eae00c5
GR
2248{
2249 int i;
2250
fdb47ae8
MW
2251 if (!adapter->rx_rings)
2252 return;
2253
cc052927 2254 for (i = 0; i < adapter->num_active_queues; i++)
0dd438d8 2255 if (adapter->rx_rings[i].desc)
129cf89e 2256 iavf_free_rx_resources(&adapter->rx_rings[i]);
5eae00c5
GR
2257}
2258
591532d6 2259/**
129cf89e 2260 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
591532d6
HR
2261 * @adapter: board private structure
2262 * @max_tx_rate: max Tx bw for a tc
2263 **/
129cf89e
JB
2264static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2265 u64 max_tx_rate)
591532d6
HR
2266{
2267 int speed = 0, ret = 0;
2268
2269 switch (adapter->link_speed) {
2270 case I40E_LINK_SPEED_40GB:
2271 speed = 40000;
2272 break;
2273 case I40E_LINK_SPEED_25GB:
2274 speed = 25000;
2275 break;
2276 case I40E_LINK_SPEED_20GB:
2277 speed = 20000;
2278 break;
2279 case I40E_LINK_SPEED_10GB:
2280 speed = 10000;
2281 break;
2282 case I40E_LINK_SPEED_1GB:
2283 speed = 1000;
2284 break;
2285 case I40E_LINK_SPEED_100MB:
2286 speed = 100;
2287 break;
2288 default:
2289 break;
2290 }
2291
2292 if (max_tx_rate > speed) {
2293 dev_err(&adapter->pdev->dev,
2294 "Invalid tx rate specified\n");
2295 ret = -EINVAL;
2296 }
2297
2298 return ret;
2299}
2300
d5b33d02 2301/**
129cf89e 2302 * iavf_validate_channel_config - validate queue mapping info
d5b33d02
HR
2303 * @adapter: board private structure
2304 * @mqprio_qopt: queue parameters
2305 *
2306 * This function validates if the config provided by the user to
2307 * configure queue channels is valid or not. Returns 0 on a valid
2308 * config.
2309 **/
129cf89e
JB
2310static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2311 struct tc_mqprio_qopt_offload *mqprio_qopt)
d5b33d02 2312{
591532d6 2313 u64 total_max_rate = 0;
d5b33d02 2314 int i, num_qps = 0;
591532d6
HR
2315 u64 tx_rate = 0;
2316 int ret = 0;
d5b33d02 2317
129cf89e 2318 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
d5b33d02
HR
2319 mqprio_qopt->qopt.num_tc < 1)
2320 return -EINVAL;
2321
2322 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2323 if (!mqprio_qopt->qopt.count[i] ||
d5b33d02
HR
2324 mqprio_qopt->qopt.offset[i] != num_qps)
2325 return -EINVAL;
591532d6
HR
2326 if (mqprio_qopt->min_rate[i]) {
2327 dev_err(&adapter->pdev->dev,
2328 "Invalid min tx rate (greater than 0) specified\n");
2329 return -EINVAL;
2330 }
2331 /*convert to Mbps */
2332 tx_rate = div_u64(mqprio_qopt->max_rate[i],
129cf89e 2333 IAVF_MBPS_DIVISOR);
591532d6 2334 total_max_rate += tx_rate;
d5b33d02
HR
2335 num_qps += mqprio_qopt->qopt.count[i];
2336 }
129cf89e 2337 if (num_qps > IAVF_MAX_REQ_QUEUES)
d5b33d02
HR
2338 return -EINVAL;
2339
129cf89e 2340 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
591532d6 2341 return ret;
d5b33d02
HR
2342}
2343
0075fa0f 2344/**
129cf89e 2345 * iavf_del_all_cloud_filters - delete all cloud filters
0075fa0f
HR
2346 * on the traffic classes
2347 **/
129cf89e 2348static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
0075fa0f 2349{
129cf89e 2350 struct iavf_cloud_filter *cf, *cftmp;
0075fa0f
HR
2351
2352 spin_lock_bh(&adapter->cloud_filter_list_lock);
2353 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2354 list) {
2355 list_del(&cf->list);
2356 kfree(cf);
2357 adapter->num_cloud_filters--;
2358 }
2359 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2360}
2361
d5b33d02 2362/**
129cf89e 2363 * __iavf_setup_tc - configure multiple traffic classes
d5b33d02
HR
2364 * @netdev: network interface device structure
2365 * @type_date: tc offload data
2366 *
2367 * This function processes the config information provided by the
2368 * user to configure traffic classes/queue channels and packages the
2369 * information to request the PF to setup traffic classes.
2370 *
2371 * Returns 0 on success.
2372 **/
129cf89e 2373static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
d5b33d02
HR
2374{
2375 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
129cf89e 2376 struct iavf_adapter *adapter = netdev_priv(netdev);
d5b33d02
HR
2377 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2378 u8 num_tc = 0, total_qps = 0;
2379 int ret = 0, netdev_tc = 0;
591532d6 2380 u64 max_tx_rate;
d5b33d02
HR
2381 u16 mode;
2382 int i;
2383
2384 num_tc = mqprio_qopt->qopt.num_tc;
2385 mode = mqprio_qopt->mode;
2386
2387 /* delete queue_channel */
2388 if (!mqprio_qopt->qopt.hw) {
129cf89e 2389 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
d5b33d02
HR
2390 /* reset the tc configuration */
2391 netdev_reset_tc(netdev);
2392 adapter->num_tc = 0;
2393 netif_tx_stop_all_queues(netdev);
2394 netif_tx_disable(netdev);
129cf89e
JB
2395 iavf_del_all_cloud_filters(adapter);
2396 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
d5b33d02
HR
2397 goto exit;
2398 } else {
2399 return -EINVAL;
2400 }
2401 }
2402
2403 /* add queue channel */
2404 if (mode == TC_MQPRIO_MODE_CHANNEL) {
2405 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2406 dev_err(&adapter->pdev->dev, "ADq not supported\n");
2407 return -EOPNOTSUPP;
2408 }
129cf89e 2409 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
d5b33d02
HR
2410 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2411 return -EINVAL;
2412 }
2413
129cf89e 2414 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
d5b33d02
HR
2415 if (ret)
2416 return ret;
2417 /* Return if same TC config is requested */
2418 if (adapter->num_tc == num_tc)
2419 return 0;
2420 adapter->num_tc = num_tc;
2421
129cf89e 2422 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
d5b33d02
HR
2423 if (i < num_tc) {
2424 adapter->ch_config.ch_info[i].count =
2425 mqprio_qopt->qopt.count[i];
2426 adapter->ch_config.ch_info[i].offset =
2427 mqprio_qopt->qopt.offset[i];
2428 total_qps += mqprio_qopt->qopt.count[i];
591532d6
HR
2429 max_tx_rate = mqprio_qopt->max_rate[i];
2430 /* convert to Mbps */
2431 max_tx_rate = div_u64(max_tx_rate,
129cf89e 2432 IAVF_MBPS_DIVISOR);
591532d6
HR
2433 adapter->ch_config.ch_info[i].max_tx_rate =
2434 max_tx_rate;
d5b33d02
HR
2435 } else {
2436 adapter->ch_config.ch_info[i].count = 1;
2437 adapter->ch_config.ch_info[i].offset = 0;
2438 }
2439 }
2440 adapter->ch_config.total_qps = total_qps;
2441 netif_tx_stop_all_queues(netdev);
2442 netif_tx_disable(netdev);
129cf89e 2443 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
d5b33d02
HR
2444 netdev_reset_tc(netdev);
2445 /* Report the tc mapping up the stack */
2446 netdev_set_num_tc(adapter->netdev, num_tc);
129cf89e 2447 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
d5b33d02
HR
2448 u16 qcount = mqprio_qopt->qopt.count[i];
2449 u16 qoffset = mqprio_qopt->qopt.offset[i];
2450
2451 if (i < num_tc)
2452 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2453 qoffset);
2454 }
2455 }
2456exit:
2457 return ret;
2458}
2459
0075fa0f 2460/**
129cf89e 2461 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
0075fa0f
HR
2462 * @adapter: board private structure
2463 * @cls_flower: pointer to struct tc_cls_flower_offload
2464 * @filter: pointer to cloud filter structure
2465 */
129cf89e
JB
2466static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2467 struct tc_cls_flower_offload *f,
2468 struct iavf_cloud_filter *filter)
0075fa0f
HR
2469{
2470 u16 n_proto_mask = 0;
2471 u16 n_proto_key = 0;
2472 u8 field_flags = 0;
2473 u16 addr_type = 0;
2474 u16 n_proto = 0;
2475 int i = 0;
deb9a9ad 2476 struct virtchnl_filter *vf = &filter->f;
0075fa0f
HR
2477
2478 if (f->dissector->used_keys &
2479 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2480 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2481 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2482 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2483 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2484 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2485 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2486 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2487 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2488 f->dissector->used_keys);
2489 return -EOPNOTSUPP;
2490 }
2491
2492 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2493 struct flow_dissector_key_keyid *mask =
2494 skb_flow_dissector_target(f->dissector,
2495 FLOW_DISSECTOR_KEY_ENC_KEYID,
2496 f->mask);
2497
2498 if (mask->keyid != 0)
129cf89e 2499 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
0075fa0f
HR
2500 }
2501
2502 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
2503 struct flow_dissector_key_basic *key =
2504 skb_flow_dissector_target(f->dissector,
2505 FLOW_DISSECTOR_KEY_BASIC,
2506 f->key);
2507
2508 struct flow_dissector_key_basic *mask =
2509 skb_flow_dissector_target(f->dissector,
2510 FLOW_DISSECTOR_KEY_BASIC,
2511 f->mask);
2512 n_proto_key = ntohs(key->n_proto);
2513 n_proto_mask = ntohs(mask->n_proto);
2514
2515 if (n_proto_key == ETH_P_ALL) {
2516 n_proto_key = 0;
2517 n_proto_mask = 0;
2518 }
2519 n_proto = n_proto_key & n_proto_mask;
2520 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2521 return -EINVAL;
2522 if (n_proto == ETH_P_IPV6) {
2523 /* specify flow type as TCP IPv6 */
deb9a9ad 2524 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
0075fa0f
HR
2525 }
2526
2527 if (key->ip_proto != IPPROTO_TCP) {
2528 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2529 return -EINVAL;
2530 }
2531 }
2532
2533 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2534 struct flow_dissector_key_eth_addrs *key =
2535 skb_flow_dissector_target(f->dissector,
2536 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2537 f->key);
2538
2539 struct flow_dissector_key_eth_addrs *mask =
2540 skb_flow_dissector_target(f->dissector,
2541 FLOW_DISSECTOR_KEY_ETH_ADDRS,
2542 f->mask);
2543 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2544 if (!is_zero_ether_addr(mask->dst)) {
2545 if (is_broadcast_ether_addr(mask->dst)) {
129cf89e 2546 field_flags |= IAVF_CLOUD_FIELD_OMAC;
0075fa0f
HR
2547 } else {
2548 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2549 mask->dst);
2550 return I40E_ERR_CONFIG;
2551 }
2552 }
2553
2554 if (!is_zero_ether_addr(mask->src)) {
2555 if (is_broadcast_ether_addr(mask->src)) {
129cf89e 2556 field_flags |= IAVF_CLOUD_FIELD_IMAC;
0075fa0f
HR
2557 } else {
2558 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2559 mask->src);
2560 return I40E_ERR_CONFIG;
2561 }
2562 }
2563
2564 if (!is_zero_ether_addr(key->dst))
2565 if (is_valid_ether_addr(key->dst) ||
2566 is_multicast_ether_addr(key->dst)) {
2567 /* set the mask if a valid dst_mac address */
2568 for (i = 0; i < ETH_ALEN; i++)
deb9a9ad
CIK
2569 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2570 ether_addr_copy(vf->data.tcp_spec.dst_mac,
0075fa0f
HR
2571 key->dst);
2572 }
2573
2574 if (!is_zero_ether_addr(key->src))
2575 if (is_valid_ether_addr(key->src) ||
2576 is_multicast_ether_addr(key->src)) {
2577 /* set the mask if a valid dst_mac address */
2578 for (i = 0; i < ETH_ALEN; i++)
deb9a9ad
CIK
2579 vf->mask.tcp_spec.src_mac[i] |= 0xff;
2580 ether_addr_copy(vf->data.tcp_spec.src_mac,
0075fa0f
HR
2581 key->src);
2582 }
2583 }
2584
2585 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
2586 struct flow_dissector_key_vlan *key =
2587 skb_flow_dissector_target(f->dissector,
2588 FLOW_DISSECTOR_KEY_VLAN,
2589 f->key);
2590 struct flow_dissector_key_vlan *mask =
2591 skb_flow_dissector_target(f->dissector,
2592 FLOW_DISSECTOR_KEY_VLAN,
2593 f->mask);
2594
2595 if (mask->vlan_id) {
2596 if (mask->vlan_id == VLAN_VID_MASK) {
129cf89e 2597 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
0075fa0f
HR
2598 } else {
2599 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2600 mask->vlan_id);
2601 return I40E_ERR_CONFIG;
2602 }
2603 }
deb9a9ad
CIK
2604 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2605 vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
0075fa0f
HR
2606 }
2607
2608 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
2609 struct flow_dissector_key_control *key =
2610 skb_flow_dissector_target(f->dissector,
2611 FLOW_DISSECTOR_KEY_CONTROL,
2612 f->key);
2613
2614 addr_type = key->addr_type;
2615 }
2616
2617 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2618 struct flow_dissector_key_ipv4_addrs *key =
2619 skb_flow_dissector_target(f->dissector,
2620 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2621 f->key);
2622 struct flow_dissector_key_ipv4_addrs *mask =
2623 skb_flow_dissector_target(f->dissector,
2624 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
2625 f->mask);
2626
2627 if (mask->dst) {
2628 if (mask->dst == cpu_to_be32(0xffffffff)) {
129cf89e 2629 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2630 } else {
2631 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2632 be32_to_cpu(mask->dst));
2633 return I40E_ERR_CONFIG;
2634 }
2635 }
2636
2637 if (mask->src) {
2638 if (mask->src == cpu_to_be32(0xffffffff)) {
129cf89e 2639 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2640 } else {
2641 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2642 be32_to_cpu(mask->dst));
2643 return I40E_ERR_CONFIG;
2644 }
2645 }
2646
129cf89e 2647 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
0075fa0f
HR
2648 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2649 return I40E_ERR_CONFIG;
2650 }
2651 if (key->dst) {
deb9a9ad
CIK
2652 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2653 vf->data.tcp_spec.dst_ip[0] = key->dst;
0075fa0f
HR
2654 }
2655 if (key->src) {
deb9a9ad
CIK
2656 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2657 vf->data.tcp_spec.src_ip[0] = key->src;
0075fa0f
HR
2658 }
2659 }
2660
2661 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2662 struct flow_dissector_key_ipv6_addrs *key =
2663 skb_flow_dissector_target(f->dissector,
2664 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2665 f->key);
2666 struct flow_dissector_key_ipv6_addrs *mask =
2667 skb_flow_dissector_target(f->dissector,
2668 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
2669 f->mask);
2670
2671 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2672 if (ipv6_addr_any(&mask->dst)) {
2673 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2674 IPV6_ADDR_ANY);
2675 return I40E_ERR_CONFIG;
2676 }
2677
2678 /* src and dest IPv6 address should not be LOOPBACK
2679 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2680 */
2681 if (ipv6_addr_loopback(&key->dst) ||
2682 ipv6_addr_loopback(&key->src)) {
2683 dev_err(&adapter->pdev->dev,
2684 "ipv6 addr should not be loopback\n");
2685 return I40E_ERR_CONFIG;
2686 }
2687 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
129cf89e 2688 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f 2689
deb9a9ad
CIK
2690 for (i = 0; i < 4; i++)
2691 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2692 memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
2693 sizeof(vf->data.tcp_spec.dst_ip));
2694 for (i = 0; i < 4; i++)
2695 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2696 memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
2697 sizeof(vf->data.tcp_spec.src_ip));
0075fa0f
HR
2698 }
2699 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
2700 struct flow_dissector_key_ports *key =
2701 skb_flow_dissector_target(f->dissector,
2702 FLOW_DISSECTOR_KEY_PORTS,
2703 f->key);
2704 struct flow_dissector_key_ports *mask =
2705 skb_flow_dissector_target(f->dissector,
2706 FLOW_DISSECTOR_KEY_PORTS,
2707 f->mask);
2708
2709 if (mask->src) {
2710 if (mask->src == cpu_to_be16(0xffff)) {
129cf89e 2711 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2712 } else {
2713 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2714 be16_to_cpu(mask->src));
2715 return I40E_ERR_CONFIG;
2716 }
2717 }
2718
2719 if (mask->dst) {
2720 if (mask->dst == cpu_to_be16(0xffff)) {
129cf89e 2721 field_flags |= IAVF_CLOUD_FIELD_IIP;
0075fa0f
HR
2722 } else {
2723 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2724 be16_to_cpu(mask->dst));
2725 return I40E_ERR_CONFIG;
2726 }
2727 }
2728 if (key->dst) {
deb9a9ad
CIK
2729 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2730 vf->data.tcp_spec.dst_port = key->dst;
0075fa0f
HR
2731 }
2732
2733 if (key->src) {
deb9a9ad 2734 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
20dd9147 2735 vf->data.tcp_spec.src_port = key->src;
0075fa0f
HR
2736 }
2737 }
deb9a9ad 2738 vf->field_flags = field_flags;
0075fa0f
HR
2739
2740 return 0;
2741}
2742
2743/**
129cf89e 2744 * iavf_handle_tclass - Forward to a traffic class on the device
0075fa0f
HR
2745 * @adapter: board private structure
2746 * @tc: traffic class index on the device
2747 * @filter: pointer to cloud filter structure
2748 */
129cf89e
JB
2749static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2750 struct iavf_cloud_filter *filter)
0075fa0f
HR
2751{
2752 if (tc == 0)
2753 return 0;
2754 if (tc < adapter->num_tc) {
2755 if (!filter->f.data.tcp_spec.dst_port) {
2756 dev_err(&adapter->pdev->dev,
2757 "Specify destination port to redirect to traffic class other than TC0\n");
2758 return -EINVAL;
2759 }
2760 }
2761 /* redirect to a traffic class on the same device */
2762 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
2763 filter->f.action_meta = tc;
2764 return 0;
2765}
2766
2767/**
129cf89e 2768 * iavf_configure_clsflower - Add tc flower filters
0075fa0f
HR
2769 * @adapter: board private structure
2770 * @cls_flower: Pointer to struct tc_cls_flower_offload
2771 */
129cf89e
JB
2772static int iavf_configure_clsflower(struct iavf_adapter *adapter,
2773 struct tc_cls_flower_offload *cls_flower)
0075fa0f
HR
2774{
2775 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
129cf89e 2776 struct iavf_cloud_filter *filter = NULL;
640a8af5 2777 int err = -EINVAL, count = 50;
0075fa0f
HR
2778
2779 if (tc < 0) {
2780 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
2781 return -EINVAL;
2782 }
2783
2784 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
640a8af5
AD
2785 if (!filter)
2786 return -ENOMEM;
2787
129cf89e 2788 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
640a8af5
AD
2789 &adapter->crit_section)) {
2790 if (--count == 0)
2791 goto err;
2792 udelay(1);
0075fa0f 2793 }
640a8af5 2794
0075fa0f
HR
2795 filter->cookie = cls_flower->cookie;
2796
2797 /* set the mask to all zeroes to begin with */
2798 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
2799 /* start out with flow type and eth type IPv4 to begin with */
2800 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
129cf89e 2801 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
0075fa0f
HR
2802 if (err < 0)
2803 goto err;
2804
129cf89e 2805 err = iavf_handle_tclass(adapter, tc, filter);
0075fa0f
HR
2806 if (err < 0)
2807 goto err;
2808
2809 /* add filter to the list */
2810 spin_lock_bh(&adapter->cloud_filter_list_lock);
2811 list_add_tail(&filter->list, &adapter->cloud_filter_list);
2812 adapter->num_cloud_filters++;
2813 filter->add = true;
129cf89e 2814 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
0075fa0f
HR
2815 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2816err:
2817 if (err)
2818 kfree(filter);
640a8af5 2819
129cf89e 2820 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
0075fa0f
HR
2821 return err;
2822}
2823
129cf89e 2824/* iavf_find_cf - Find the cloud filter in the list
0075fa0f
HR
2825 * @adapter: Board private structure
2826 * @cookie: filter specific cookie
2827 *
2828 * Returns ptr to the filter object or NULL. Must be called while holding the
2829 * cloud_filter_list_lock.
2830 */
129cf89e
JB
2831static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
2832 unsigned long *cookie)
0075fa0f 2833{
129cf89e 2834 struct iavf_cloud_filter *filter = NULL;
0075fa0f
HR
2835
2836 if (!cookie)
2837 return NULL;
2838
2839 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
2840 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
2841 return filter;
2842 }
2843 return NULL;
2844}
2845
2846/**
129cf89e 2847 * iavf_delete_clsflower - Remove tc flower filters
0075fa0f
HR
2848 * @adapter: board private structure
2849 * @cls_flower: Pointer to struct tc_cls_flower_offload
2850 */
129cf89e
JB
2851static int iavf_delete_clsflower(struct iavf_adapter *adapter,
2852 struct tc_cls_flower_offload *cls_flower)
0075fa0f 2853{
129cf89e 2854 struct iavf_cloud_filter *filter = NULL;
0075fa0f
HR
2855 int err = 0;
2856
2857 spin_lock_bh(&adapter->cloud_filter_list_lock);
129cf89e 2858 filter = iavf_find_cf(adapter, &cls_flower->cookie);
0075fa0f
HR
2859 if (filter) {
2860 filter->del = true;
129cf89e 2861 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
0075fa0f
HR
2862 } else {
2863 err = -EINVAL;
2864 }
2865 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2866
2867 return err;
2868}
2869
2870/**
129cf89e 2871 * iavf_setup_tc_cls_flower - flower classifier offloads
0075fa0f
HR
2872 * @netdev: net device to configure
2873 * @type_data: offload data
2874 */
129cf89e
JB
2875static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
2876 struct tc_cls_flower_offload *cls_flower)
0075fa0f
HR
2877{
2878 if (cls_flower->common.chain_index)
2879 return -EOPNOTSUPP;
2880
2881 switch (cls_flower->command) {
2882 case TC_CLSFLOWER_REPLACE:
129cf89e 2883 return iavf_configure_clsflower(adapter, cls_flower);
0075fa0f 2884 case TC_CLSFLOWER_DESTROY:
129cf89e 2885 return iavf_delete_clsflower(adapter, cls_flower);
0075fa0f
HR
2886 case TC_CLSFLOWER_STATS:
2887 return -EOPNOTSUPP;
2888 default:
246ab6f0 2889 return -EOPNOTSUPP;
0075fa0f
HR
2890 }
2891}
2892
2893/**
129cf89e 2894 * iavf_setup_tc_block_cb - block callback for tc
0075fa0f
HR
2895 * @type: type of offload
2896 * @type_data: offload data
2897 * @cb_priv:
2898 *
2899 * This function is the block callback for traffic classes
2900 **/
129cf89e
JB
2901static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2902 void *cb_priv)
0075fa0f
HR
2903{
2904 switch (type) {
2905 case TC_SETUP_CLSFLOWER:
129cf89e 2906 return iavf_setup_tc_cls_flower(cb_priv, type_data);
0075fa0f
HR
2907 default:
2908 return -EOPNOTSUPP;
2909 }
2910}
2911
2912/**
129cf89e 2913 * iavf_setup_tc_block - register callbacks for tc
0075fa0f
HR
2914 * @netdev: network interface device structure
2915 * @f: tc offload data
2916 *
2917 * This function registers block callbacks for tc
2918 * offloads
2919 **/
129cf89e
JB
2920static int iavf_setup_tc_block(struct net_device *dev,
2921 struct tc_block_offload *f)
0075fa0f 2922{
129cf89e 2923 struct iavf_adapter *adapter = netdev_priv(dev);
0075fa0f
HR
2924
2925 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
2926 return -EOPNOTSUPP;
2927
2928 switch (f->command) {
2929 case TC_BLOCK_BIND:
129cf89e 2930 return tcf_block_cb_register(f->block, iavf_setup_tc_block_cb,
60513bd8 2931 adapter, adapter, f->extack);
0075fa0f 2932 case TC_BLOCK_UNBIND:
129cf89e 2933 tcf_block_cb_unregister(f->block, iavf_setup_tc_block_cb,
0075fa0f
HR
2934 adapter);
2935 return 0;
2936 default:
2937 return -EOPNOTSUPP;
2938 }
2939}
2940
d5b33d02 2941/**
129cf89e 2942 * iavf_setup_tc - configure multiple traffic classes
d5b33d02
HR
2943 * @netdev: network interface device structure
2944 * @type: type of offload
2945 * @type_date: tc offload data
2946 *
2947 * This function is the callback to ndo_setup_tc in the
2948 * netdev_ops.
2949 *
2950 * Returns 0 on success
2951 **/
129cf89e
JB
2952static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2953 void *type_data)
d5b33d02 2954{
0075fa0f
HR
2955 switch (type) {
2956 case TC_SETUP_QDISC_MQPRIO:
129cf89e 2957 return __iavf_setup_tc(netdev, type_data);
0075fa0f 2958 case TC_SETUP_BLOCK:
129cf89e 2959 return iavf_setup_tc_block(netdev, type_data);
0075fa0f 2960 default:
d5b33d02 2961 return -EOPNOTSUPP;
0075fa0f 2962 }
d5b33d02
HR
2963}
2964
5eae00c5 2965/**
129cf89e 2966 * iavf_open - Called when a network interface is made active
5eae00c5
GR
2967 * @netdev: network interface device structure
2968 *
2969 * Returns 0 on success, negative value on failure
2970 *
2971 * The open entry point is called when a network interface is made
2972 * active by the system (IFF_UP). At this point all resources needed
2973 * for transmit and receive operations are allocated, the interrupt
2974 * handler is registered with the OS, the watchdog timer is started,
2975 * and the stack is notified that the interface is ready.
2976 **/
129cf89e 2977static int iavf_open(struct net_device *netdev)
5eae00c5 2978{
129cf89e 2979 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
2980 int err;
2981
129cf89e 2982 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
ef8693eb
MW
2983 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
2984 return -EIO;
2985 }
209dc4da 2986
129cf89e 2987 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
9b2aef12
JK
2988 &adapter->crit_section))
2989 usleep_range(500, 1000);
2990
129cf89e 2991 if (adapter->state != __IAVF_DOWN) {
9b2aef12
JK
2992 err = -EBUSY;
2993 goto err_unlock;
2994 }
5eae00c5
GR
2995
2996 /* allocate transmit descriptors */
129cf89e 2997 err = iavf_setup_all_tx_resources(adapter);
5eae00c5
GR
2998 if (err)
2999 goto err_setup_tx;
3000
3001 /* allocate receive descriptors */
129cf89e 3002 err = iavf_setup_all_rx_resources(adapter);
5eae00c5
GR
3003 if (err)
3004 goto err_setup_rx;
3005
3006 /* clear any pending interrupts, may auto mask */
129cf89e 3007 err = iavf_request_traffic_irqs(adapter, netdev->name);
5eae00c5
GR
3008 if (err)
3009 goto err_req_irq;
3010
8cd5fe62
PJ
3011 spin_lock_bh(&adapter->mac_vlan_list_lock);
3012
129cf89e 3013 iavf_add_filter(adapter, adapter->hw.mac.addr);
8cd5fe62
PJ
3014
3015 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3016
129cf89e 3017 iavf_configure(adapter);
5eae00c5 3018
129cf89e 3019 iavf_up_complete(adapter);
5eae00c5 3020
129cf89e 3021 iavf_irq_enable(adapter, true);
5eae00c5 3022
129cf89e 3023 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
9b2aef12 3024
5eae00c5
GR
3025 return 0;
3026
3027err_req_irq:
129cf89e
JB
3028 iavf_down(adapter);
3029 iavf_free_traffic_irqs(adapter);
5eae00c5 3030err_setup_rx:
129cf89e 3031 iavf_free_all_rx_resources(adapter);
5eae00c5 3032err_setup_tx:
129cf89e 3033 iavf_free_all_tx_resources(adapter);
9b2aef12 3034err_unlock:
129cf89e 3035 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
5eae00c5
GR
3036
3037 return err;
3038}
3039
3040/**
129cf89e 3041 * iavf_close - Disables a network interface
5eae00c5
GR
3042 * @netdev: network interface device structure
3043 *
3044 * Returns 0, this is not allowed to fail
3045 *
3046 * The close entry point is called when an interface is de-activated
3047 * by the OS. The hardware is still under the drivers control, but
3048 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3049 * are freed, along with all transmit and receive resources.
3050 **/
129cf89e 3051static int iavf_close(struct net_device *netdev)
5eae00c5 3052{
129cf89e 3053 struct iavf_adapter *adapter = netdev_priv(netdev);
fe2647ab 3054 int status;
5eae00c5 3055
129cf89e 3056 if (adapter->state <= __IAVF_DOWN_PENDING)
ef8693eb
MW
3057 return 0;
3058
129cf89e 3059 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
9b2aef12
JK
3060 &adapter->crit_section))
3061 usleep_range(500, 1000);
ef8693eb 3062
0da36b97 3063 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
ed0e894d 3064 if (CLIENT_ENABLED(adapter))
129cf89e 3065 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
5eae00c5 3066
129cf89e
JB
3067 iavf_down(adapter);
3068 adapter->state = __IAVF_DOWN_PENDING;
3069 iavf_free_traffic_irqs(adapter);
5eae00c5 3070
129cf89e 3071 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
9b2aef12 3072
51f38262
MW
3073 /* We explicitly don't free resources here because the hardware is
3074 * still active and can DMA into memory. Resources are cleared in
129cf89e 3075 * iavf_virtchnl_completion() after we get confirmation from the PF
51f38262 3076 * driver that the rings have been stopped.
fe2647ab 3077 *
129cf89e
JB
3078 * Also, we wait for state to transition to __IAVF_DOWN before
3079 * returning. State change occurs in iavf_virtchnl_completion() after
fe2647ab
SM
3080 * VF resources are released (which occurs after PF driver processes and
3081 * responds to admin queue commands).
51f38262 3082 */
fe2647ab
SM
3083
3084 status = wait_event_timeout(adapter->down_waitqueue,
129cf89e 3085 adapter->state == __IAVF_DOWN,
fe2647ab
SM
3086 msecs_to_jiffies(200));
3087 if (!status)
3088 netdev_warn(netdev, "Device resources not yet released\n");
5eae00c5
GR
3089 return 0;
3090}
3091
5eae00c5 3092/**
129cf89e 3093 * iavf_change_mtu - Change the Maximum Transfer Unit
5eae00c5
GR
3094 * @netdev: network interface device structure
3095 * @new_mtu: new value for maximum frame size
3096 *
3097 * Returns 0 on success, negative on failure
3098 **/
129cf89e 3099static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
5eae00c5 3100{
129cf89e 3101 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5 3102
5eae00c5 3103 netdev->mtu = new_mtu;
ed0e894d 3104 if (CLIENT_ENABLED(adapter)) {
129cf89e
JB
3105 iavf_notify_client_l2_params(&adapter->vsi);
3106 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
ed0e894d 3107 }
129cf89e 3108 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
67c818a1
MW
3109 schedule_work(&adapter->reset_task);
3110
5eae00c5
GR
3111 return 0;
3112}
3113
8774370d
MS
3114/**
3115 * i40e_set_features - set the netdev feature flags
3116 * @netdev: ptr to the netdev being adjusted
3117 * @features: the feature set that the stack is suggesting
3118 * Note: expects to be called while under rtnl_lock()
3119 **/
129cf89e
JB
3120static int iavf_set_features(struct net_device *netdev,
3121 netdev_features_t features)
8774370d 3122{
129cf89e 3123 struct iavf_adapter *adapter = netdev_priv(netdev);
8774370d 3124
3bd77e2a
PM
3125 /* Don't allow changing VLAN_RX flag when adapter is not capable
3126 * of VLAN offload
e0f60a81 3127 */
3bd77e2a
PM
3128 if (!VLAN_ALLOWED(adapter)) {
3129 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3130 return -EINVAL;
3131 } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
e0f60a81
PJ
3132 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3133 adapter->aq_required |=
129cf89e 3134 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
e0f60a81
PJ
3135 else
3136 adapter->aq_required |=
129cf89e 3137 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
e0f60a81 3138 }
8774370d
MS
3139
3140 return 0;
3141}
3142
06fc016c 3143/**
129cf89e 3144 * iavf_features_check - Validate encapsulated packet conforms to limits
06fc016c 3145 * @skb: skb buff
f5254429 3146 * @dev: This physical port's netdev
06fc016c
AD
3147 * @features: Offload features that the stack believes apply
3148 **/
129cf89e
JB
3149static netdev_features_t iavf_features_check(struct sk_buff *skb,
3150 struct net_device *dev,
3151 netdev_features_t features)
06fc016c
AD
3152{
3153 size_t len;
3154
3155 /* No point in doing any of this if neither checksum nor GSO are
3156 * being requested for this frame. We can rule out both by just
3157 * checking for CHECKSUM_PARTIAL
3158 */
3159 if (skb->ip_summed != CHECKSUM_PARTIAL)
3160 return features;
3161
3162 /* We cannot support GSO if the MSS is going to be less than
3163 * 64 bytes. If it is then we need to drop support for GSO.
3164 */
3165 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3166 features &= ~NETIF_F_GSO_MASK;
3167
3168 /* MACLEN can support at most 63 words */
3169 len = skb_network_header(skb) - skb->data;
3170 if (len & ~(63 * 2))
3171 goto out_err;
3172
3173 /* IPLEN and EIPLEN can support at most 127 dwords */
3174 len = skb_transport_header(skb) - skb_network_header(skb);
3175 if (len & ~(127 * 4))
3176 goto out_err;
3177
3178 if (skb->encapsulation) {
3179 /* L4TUNLEN can support 127 words */
3180 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3181 if (len & ~(127 * 2))
3182 goto out_err;
3183
3184 /* IPLEN can support at most 127 dwords */
3185 len = skb_inner_transport_header(skb) -
3186 skb_inner_network_header(skb);
3187 if (len & ~(127 * 4))
3188 goto out_err;
3189 }
3190
3191 /* No need to validate L4LEN as TCP is the only protocol with a
3192 * a flexible value and we support all possible values supported
3193 * by TCP, which is at most 15 dwords
3194 */
3195
3196 return features;
3197out_err:
3198 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3199}
3200
c4445aed 3201/**
129cf89e 3202 * iavf_fix_features - fix up the netdev feature bits
c4445aed
MW
3203 * @netdev: our net device
3204 * @features: desired feature bits
3205 *
3206 * Returns fixed-up features bits
3207 **/
129cf89e
JB
3208static netdev_features_t iavf_fix_features(struct net_device *netdev,
3209 netdev_features_t features)
c4445aed 3210{
129cf89e 3211 struct iavf_adapter *adapter = netdev_priv(netdev);
c4445aed 3212
0a3b4f70
JK
3213 if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3214 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3215 NETIF_F_HW_VLAN_CTAG_RX |
3216 NETIF_F_HW_VLAN_CTAG_FILTER);
3217
c4445aed
MW
3218 return features;
3219}
3220
129cf89e
JB
3221static const struct net_device_ops iavf_netdev_ops = {
3222 .ndo_open = iavf_open,
3223 .ndo_stop = iavf_close,
3224 .ndo_start_xmit = iavf_xmit_frame,
3225 .ndo_set_rx_mode = iavf_set_rx_mode,
5eae00c5 3226 .ndo_validate_addr = eth_validate_addr,
129cf89e
JB
3227 .ndo_set_mac_address = iavf_set_mac,
3228 .ndo_change_mtu = iavf_change_mtu,
3229 .ndo_tx_timeout = iavf_tx_timeout,
3230 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
3231 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
3232 .ndo_features_check = iavf_features_check,
3233 .ndo_fix_features = iavf_fix_features,
3234 .ndo_set_features = iavf_set_features,
7709b4c1 3235#ifdef CONFIG_NET_POLL_CONTROLLER
129cf89e 3236 .ndo_poll_controller = iavf_netpoll,
7709b4c1 3237#endif
129cf89e 3238 .ndo_setup_tc = iavf_setup_tc,
5eae00c5
GR
3239};
3240
3241/**
129cf89e 3242 * iavf_check_reset_complete - check that VF reset is complete
5eae00c5
GR
3243 * @hw: pointer to hw struct
3244 *
3245 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3246 **/
129cf89e 3247static int iavf_check_reset_complete(struct i40e_hw *hw)
5eae00c5
GR
3248{
3249 u32 rstat;
3250 int i;
3251
3252 for (i = 0; i < 100; i++) {
fd35886a
AS
3253 rstat = rd32(hw, I40E_VFGEN_RSTAT) &
3254 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
310a2ad9
JB
3255 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3256 (rstat == VIRTCHNL_VFR_COMPLETED))
5eae00c5 3257 return 0;
f98a2006 3258 usleep_range(10, 20);
5eae00c5
GR
3259 }
3260 return -EBUSY;
3261}
3262
e6d038de 3263/**
129cf89e 3264 * iavf_process_config - Process the config information we got from the PF
e6d038de
MW
3265 * @adapter: board private structure
3266 *
3267 * Verify that we have a valid config struct, and set up our netdev features
3268 * and our VSI struct.
3269 **/
129cf89e 3270int iavf_process_config(struct iavf_adapter *adapter)
e6d038de 3271{
310a2ad9 3272 struct virtchnl_vf_resource *vfres = adapter->vf_res;
5b36e8d0 3273 int i, num_req_queues = adapter->num_req_queues;
e6d038de 3274 struct net_device *netdev = adapter->netdev;
43a3d9ba 3275 struct i40e_vsi *vsi = &adapter->vsi;
bacd75cf
PB
3276 netdev_features_t hw_enc_features;
3277 netdev_features_t hw_features;
e6d038de
MW
3278
3279 /* got VF config message back from PF, now we can parse it */
ba6cc7f6 3280 for (i = 0; i < vfres->num_vsis; i++) {
ff3f4cc2 3281 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
ba6cc7f6 3282 adapter->vsi_res = &vfres->vsi_res[i];
e6d038de
MW
3283 }
3284 if (!adapter->vsi_res) {
3285 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3286 return -ENODEV;
3287 }
3288
5b36e8d0
AB
3289 if (num_req_queues &&
3290 num_req_queues != adapter->vsi_res->num_queue_pairs) {
3291 /* Problem. The PF gave us fewer queues than what we had
3292 * negotiated in our request. Need a reset to see if we can't
3293 * get back to a working state.
3294 */
3295 dev_err(&adapter->pdev->dev,
3296 "Requested %d queues, but PF only gave us %d.\n",
3297 num_req_queues,
3298 adapter->vsi_res->num_queue_pairs);
129cf89e 3299 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
5b36e8d0 3300 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
129cf89e 3301 iavf_schedule_reset(adapter);
5b36e8d0
AB
3302 return -ENODEV;
3303 }
3304 adapter->num_req_queues = 0;
3305
bacd75cf
PB
3306 hw_enc_features = NETIF_F_SG |
3307 NETIF_F_IP_CSUM |
3308 NETIF_F_IPV6_CSUM |
3309 NETIF_F_HIGHDMA |
3310 NETIF_F_SOFT_FEATURES |
3311 NETIF_F_TSO |
3312 NETIF_F_TSO_ECN |
3313 NETIF_F_TSO6 |
3314 NETIF_F_SCTP_CRC |
3315 NETIF_F_RXHASH |
3316 NETIF_F_RXCSUM |
3317 0;
3318
3319 /* advertise to stack only if offloads for encapsulated packets is
3320 * supported
3321 */
fbb113f7 3322 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
bacd75cf 3323 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
b0fe3306 3324 NETIF_F_GSO_GRE |
1c7b4a23 3325 NETIF_F_GSO_GRE_CSUM |
7e13318d 3326 NETIF_F_GSO_IPXIP4 |
bf2d1df3 3327 NETIF_F_GSO_IPXIP6 |
b0fe3306 3328 NETIF_F_GSO_UDP_TUNNEL_CSUM |
1c7b4a23 3329 NETIF_F_GSO_PARTIAL |
b0fe3306
AD
3330 0;
3331
fbb113f7 3332 if (!(vfres->vf_cap_flags &
310a2ad9 3333 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
bacd75cf
PB
3334 netdev->gso_partial_features |=
3335 NETIF_F_GSO_UDP_TUNNEL_CSUM;
b0fe3306 3336
bacd75cf
PB
3337 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3338 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3339 netdev->hw_enc_features |= hw_enc_features;
3340 }
b0fe3306 3341 /* record features VLANs can make use of */
bacd75cf 3342 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
b0fe3306
AD
3343
3344 /* Write features and hw_features separately to avoid polluting
bacd75cf 3345 * with, or dropping, features that are set when we registered.
b0fe3306 3346 */
bacd75cf 3347 hw_features = hw_enc_features;
b0fe3306 3348
0a3b4f70
JK
3349 /* Enable VLAN features if supported */
3350 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3351 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3352 NETIF_F_HW_VLAN_CTAG_RX);
0075fa0f
HR
3353 /* Enable cloud filter if ADQ is supported */
3354 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3355 hw_features |= NETIF_F_HW_TC;
0a3b4f70 3356
bacd75cf 3357 netdev->hw_features |= hw_features;
b0fe3306 3358
0a3b4f70
JK
3359 netdev->features |= hw_features;
3360
3361 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3362 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
e6d038de 3363
e65aae08
LY
3364 netdev->priv_flags |= IFF_UNICAST_FLT;
3365
e4062894
PJ
3366 /* Do not turn on offloads when they are requested to be turned off.
3367 * TSO needs minimum 576 bytes to work correctly.
3368 */
3369 if (netdev->wanted_features) {
3370 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3371 netdev->mtu < 576)
3372 netdev->features &= ~NETIF_F_TSO;
3373 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3374 netdev->mtu < 576)
3375 netdev->features &= ~NETIF_F_TSO6;
3376 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3377 netdev->features &= ~NETIF_F_TSO_ECN;
3378 if (!(netdev->wanted_features & NETIF_F_GRO))
3379 netdev->features &= ~NETIF_F_GRO;
3380 if (!(netdev->wanted_features & NETIF_F_GSO))
3381 netdev->features &= ~NETIF_F_GSO;
3382 }
3383
e6d038de
MW
3384 adapter->vsi.id = adapter->vsi_res->vsi_id;
3385
3386 adapter->vsi.back = adapter;
3387 adapter->vsi.base_vector = 1;
3388 adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
43a3d9ba
MW
3389 vsi->netdev = adapter->netdev;
3390 vsi->qs_handle = adapter->vsi_res->qset_handle;
fbb113f7 3391 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
43a3d9ba
MW
3392 adapter->rss_key_size = vfres->rss_key_size;
3393 adapter->rss_lut_size = vfres->rss_lut_size;
3394 } else {
129cf89e
JB
3395 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3396 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
43a3d9ba
MW
3397 }
3398
e6d038de
MW
3399 return 0;
3400}
3401
5eae00c5 3402/**
129cf89e 3403 * iavf_init_task - worker thread to perform delayed initialization
5eae00c5
GR
3404 * @work: pointer to work_struct containing our data
3405 *
3406 * This task completes the work that was begun in probe. Due to the nature
3407 * of VF-PF communications, we may need to wait tens of milliseconds to get
dbedd44e 3408 * responses back from the PF. Rather than busy-wait in probe and bog down the
5eae00c5
GR
3409 * whole system, we'll do it in a task so we can sleep.
3410 * This task only runs during driver init. Once we've established
3411 * communications with the PF driver and set up our netdev, the watchdog
3412 * takes over.
3413 **/
129cf89e 3414static void iavf_init_task(struct work_struct *work)
5eae00c5 3415{
129cf89e
JB
3416 struct iavf_adapter *adapter = container_of(work,
3417 struct iavf_adapter,
5eae00c5
GR
3418 init_task.work);
3419 struct net_device *netdev = adapter->netdev;
5eae00c5
GR
3420 struct i40e_hw *hw = &adapter->hw;
3421 struct pci_dev *pdev = adapter->pdev;
e6d038de 3422 int err, bufsz;
5eae00c5
GR
3423
3424 switch (adapter->state) {
129cf89e 3425 case __IAVF_STARTUP:
5eae00c5 3426 /* driver loaded, probe complete */
129cf89e
JB
3427 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
3428 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
5eae00c5
GR
3429 err = i40e_set_mac_type(hw);
3430 if (err) {
c2a137cb
MW
3431 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
3432 err);
2619ef47 3433 goto err;
5eae00c5 3434 }
129cf89e 3435 err = iavf_check_reset_complete(hw);
5eae00c5 3436 if (err) {
0d9c7ea8 3437 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
75a64435 3438 err);
5eae00c5
GR
3439 goto err;
3440 }
129cf89e
JB
3441 hw->aq.num_arq_entries = IAVF_AQ_LEN;
3442 hw->aq.num_asq_entries = IAVF_AQ_LEN;
3443 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
3444 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
5eae00c5 3445
129cf89e 3446 err = iavf_init_adminq(hw);
5eae00c5 3447 if (err) {
c2a137cb
MW
3448 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
3449 err);
5eae00c5
GR
3450 goto err;
3451 }
129cf89e 3452 err = iavf_send_api_ver(adapter);
5eae00c5 3453 if (err) {
10bdd67b 3454 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
129cf89e 3455 iavf_shutdown_adminq(hw);
5eae00c5
GR
3456 goto err;
3457 }
129cf89e 3458 adapter->state = __IAVF_INIT_VERSION_CHECK;
5eae00c5 3459 goto restart;
129cf89e
JB
3460 case __IAVF_INIT_VERSION_CHECK:
3461 if (!iavf_asq_done(hw)) {
80e72893 3462 dev_err(&pdev->dev, "Admin queue command never completed\n");
129cf89e
JB
3463 iavf_shutdown_adminq(hw);
3464 adapter->state = __IAVF_STARTUP;
5eae00c5 3465 goto err;
10bdd67b 3466 }
5eae00c5
GR
3467
3468 /* aq msg sent, awaiting reply */
129cf89e 3469 err = iavf_verify_api_ver(adapter);
5eae00c5 3470 if (err) {
d4f82fd3 3471 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
129cf89e 3472 err = iavf_send_api_ver(adapter);
ee1693e5
MW
3473 else
3474 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
3475 adapter->pf_version.major,
3476 adapter->pf_version.minor,
310a2ad9
JB
3477 VIRTCHNL_VERSION_MAJOR,
3478 VIRTCHNL_VERSION_MINOR);
5eae00c5
GR
3479 goto err;
3480 }
129cf89e 3481 err = iavf_send_vf_config_msg(adapter);
5eae00c5 3482 if (err) {
3f2ab172 3483 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
5eae00c5
GR
3484 err);
3485 goto err;
3486 }
129cf89e 3487 adapter->state = __IAVF_INIT_GET_RESOURCES;
5eae00c5 3488 goto restart;
129cf89e 3489 case __IAVF_INIT_GET_RESOURCES:
5eae00c5
GR
3490 /* aq msg sent, awaiting reply */
3491 if (!adapter->vf_res) {
310a2ad9 3492 bufsz = sizeof(struct virtchnl_vf_resource) +
5eae00c5 3493 (I40E_MAX_VF_VSI *
310a2ad9 3494 sizeof(struct virtchnl_vsi_resource));
5eae00c5 3495 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
c2a137cb 3496 if (!adapter->vf_res)
5eae00c5 3497 goto err;
5eae00c5 3498 }
129cf89e 3499 err = iavf_get_vf_config(adapter);
906a6937 3500 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
129cf89e 3501 err = iavf_send_vf_config_msg(adapter);
906a6937 3502 goto err;
e743072f
MW
3503 } else if (err == I40E_ERR_PARAM) {
3504 /* We only get ERR_PARAM if the device is in a very bad
3505 * state or if we've been disabled for previous bad
3506 * behavior. Either way, we're done now.
3507 */
129cf89e 3508 iavf_shutdown_adminq(hw);
e743072f
MW
3509 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
3510 return;
906a6937 3511 }
5eae00c5 3512 if (err) {
c2a137cb
MW
3513 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
3514 err);
5eae00c5
GR
3515 goto err_alloc;
3516 }
129cf89e 3517 adapter->state = __IAVF_INIT_SW;
5eae00c5
GR
3518 break;
3519 default:
3520 goto err_alloc;
3521 }
f608e6a6 3522
129cf89e 3523 if (iavf_process_config(adapter))
5eae00c5 3524 goto err_alloc;
310a2ad9 3525 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
5eae00c5 3526
129cf89e 3527 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
5eae00c5 3528
129cf89e
JB
3529 netdev->netdev_ops = &iavf_netdev_ops;
3530 iavf_set_ethtool_ops(netdev);
5eae00c5 3531 netdev->watchdog_timeo = 5 * HZ;
3415e8ce 3532
91c527a5
JW
3533 /* MTU range: 68 - 9710 */
3534 netdev->min_mtu = ETH_MIN_MTU;
1e3a5fd5 3535 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
91c527a5 3536
5eae00c5 3537 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
b34f90e7 3538 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
c2a137cb 3539 adapter->hw.mac.addr);
14e52ee2
MW
3540 eth_hw_addr_random(netdev);
3541 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
3542 } else {
129cf89e 3543 adapter->flags |= IAVF_FLAG_ADDR_SET_BY_PF;
14e52ee2
MW
3544 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
3545 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
5eae00c5 3546 }
5eae00c5 3547
129cf89e 3548 timer_setup(&adapter->watchdog_timer, iavf_watchdog_timer, 0);
5eae00c5
GR
3549 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3550
129cf89e
JB
3551 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
3552 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
3553 err = iavf_init_interrupt_scheme(adapter);
5eae00c5
GR
3554 if (err)
3555 goto err_sw_init;
129cf89e 3556 iavf_map_rings_to_vectors(adapter);
fbb113f7 3557 if (adapter->vf_res->vf_cap_flags &
310a2ad9 3558 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
129cf89e 3559 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
f6d83d13 3560
129cf89e 3561 err = iavf_request_misc_irq(adapter);
5eae00c5
GR
3562 if (err)
3563 goto err_sw_init;
3564
3565 netif_carrier_off(netdev);
3f341acc 3566 adapter->link_up = false;
5eae00c5 3567
ef8693eb
MW
3568 if (!adapter->netdev_registered) {
3569 err = register_netdev(netdev);
3570 if (err)
3571 goto err_register;
3572 }
5eae00c5
GR
3573
3574 adapter->netdev_registered = true;
3575
3576 netif_tx_stop_all_queues(netdev);
ed0e894d 3577 if (CLIENT_ALLOWED(adapter)) {
129cf89e 3578 err = iavf_lan_add_device(adapter);
ed0e894d
MW
3579 if (err)
3580 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
3581 err);
3582 }
5eae00c5 3583
b34f90e7 3584 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
5eae00c5
GR
3585 if (netdev->features & NETIF_F_GRO)
3586 dev_info(&pdev->dev, "GRO is enabled\n");
3587
129cf89e 3588 adapter->state = __IAVF_DOWN;
0da36b97 3589 set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
129cf89e 3590 iavf_misc_irq_enable(adapter);
fe2647ab 3591 wake_up(&adapter->down_waitqueue);
e25d00b8 3592
43a3d9ba
MW
3593 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
3594 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
3595 if (!adapter->rss_key || !adapter->rss_lut)
3596 goto err_mem;
3597
e25d00b8 3598 if (RSS_AQ(adapter)) {
129cf89e 3599 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
e25d00b8
ASJ
3600 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
3601 } else {
129cf89e 3602 iavf_init_rss(adapter);
e25d00b8 3603 }
5eae00c5
GR
3604 return;
3605restart:
3f7e5c33 3606 schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
5eae00c5 3607 return;
43a3d9ba 3608err_mem:
129cf89e 3609 iavf_free_rss(adapter);
5eae00c5 3610err_register:
129cf89e 3611 iavf_free_misc_irq(adapter);
5eae00c5 3612err_sw_init:
129cf89e 3613 iavf_reset_interrupt_capability(adapter);
5eae00c5
GR
3614err_alloc:
3615 kfree(adapter->vf_res);
3616 adapter->vf_res = NULL;
3617err:
3618 /* Things went into the weeds, so try again later */
129cf89e 3619 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
b9029e94 3620 dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
129cf89e
JB
3621 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3622 iavf_shutdown_adminq(hw);
3623 adapter->state = __IAVF_STARTUP;
b9029e94
MW
3624 schedule_delayed_work(&adapter->init_task, HZ * 5);
3625 return;
5eae00c5 3626 }
3f7e5c33 3627 schedule_delayed_work(&adapter->init_task, HZ);
5eae00c5
GR
3628}
3629
3630/**
129cf89e 3631 * iavf_shutdown - Shutdown the device in preparation for a reboot
5eae00c5
GR
3632 * @pdev: pci device structure
3633 **/
129cf89e 3634static void iavf_shutdown(struct pci_dev *pdev)
5eae00c5
GR
3635{
3636 struct net_device *netdev = pci_get_drvdata(pdev);
129cf89e 3637 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3638
3639 netif_device_detach(netdev);
3640
3641 if (netif_running(netdev))
129cf89e 3642 iavf_close(netdev);
5eae00c5 3643
00293fdc 3644 /* Prevent the watchdog from running. */
129cf89e 3645 adapter->state = __IAVF_REMOVE;
00293fdc 3646 adapter->aq_required = 0;
00293fdc 3647
5eae00c5
GR
3648#ifdef CONFIG_PM
3649 pci_save_state(pdev);
3650
3651#endif
3652 pci_disable_device(pdev);
3653}
3654
3655/**
129cf89e 3656 * iavf_probe - Device Initialization Routine
5eae00c5 3657 * @pdev: PCI device information struct
129cf89e 3658 * @ent: entry in iavf_pci_tbl
5eae00c5
GR
3659 *
3660 * Returns 0 on success, negative on failure
3661 *
129cf89e 3662 * iavf_probe initializes an adapter identified by a pci_dev structure.
5eae00c5
GR
3663 * The OS initialization, configuring of the adapter private structure,
3664 * and a hardware reset occur.
3665 **/
129cf89e 3666static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5eae00c5
GR
3667{
3668 struct net_device *netdev;
129cf89e 3669 struct iavf_adapter *adapter = NULL;
5eae00c5 3670 struct i40e_hw *hw = NULL;
dbbd8111 3671 int err;
5eae00c5
GR
3672
3673 err = pci_enable_device(pdev);
3674 if (err)
3675 return err;
3676
6494294f 3677 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 3678 if (err) {
e3e3bfdd
JS
3679 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3680 if (err) {
3681 dev_err(&pdev->dev,
3682 "DMA configuration failed: 0x%x\n", err);
3683 goto err_dma;
3684 }
5eae00c5
GR
3685 }
3686
129cf89e 3687 err = pci_request_regions(pdev, iavf_driver_name);
5eae00c5
GR
3688 if (err) {
3689 dev_err(&pdev->dev,
3690 "pci_request_regions failed 0x%x\n", err);
3691 goto err_pci_reg;
3692 }
3693
3694 pci_enable_pcie_error_reporting(pdev);
3695
3696 pci_set_master(pdev);
3697
129cf89e
JB
3698 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3699 IAVF_MAX_REQ_QUEUES);
5eae00c5
GR
3700 if (!netdev) {
3701 err = -ENOMEM;
3702 goto err_alloc_etherdev;
3703 }
3704
3705 SET_NETDEV_DEV(netdev, &pdev->dev);
3706
3707 pci_set_drvdata(pdev, netdev);
3708 adapter = netdev_priv(netdev);
5eae00c5
GR
3709
3710 adapter->netdev = netdev;
3711 adapter->pdev = pdev;
3712
3713 hw = &adapter->hw;
3714 hw->back = adapter;
3715
41a1d04b 3716 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
129cf89e 3717 adapter->state = __IAVF_STARTUP;
5eae00c5
GR
3718
3719 /* Call save state here because it relies on the adapter struct. */
3720 pci_save_state(pdev);
3721
3722 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3723 pci_resource_len(pdev, 0));
3724 if (!hw->hw_addr) {
3725 err = -EIO;
3726 goto err_ioremap;
3727 }
3728 hw->vendor_id = pdev->vendor;
3729 hw->device_id = pdev->device;
3730 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3731 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3732 hw->subsystem_device_id = pdev->subsystem_device;
3733 hw->bus.device = PCI_SLOT(pdev->devfn);
3734 hw->bus.func = PCI_FUNC(pdev->devfn);
b3f028fc 3735 hw->bus.bus_id = pdev->bus->number;
5eae00c5 3736
8ddb3326
JB
3737 /* set up the locks for the AQ, do this only once in probe
3738 * and destroy them only once in remove
3739 */
3740 mutex_init(&hw->aq.asq_mutex);
3741 mutex_init(&hw->aq.arq_mutex);
3742
504398f0 3743 spin_lock_init(&adapter->mac_vlan_list_lock);
0075fa0f 3744 spin_lock_init(&adapter->cloud_filter_list_lock);
504398f0 3745
8bb1a540
SK
3746 INIT_LIST_HEAD(&adapter->mac_filter_list);
3747 INIT_LIST_HEAD(&adapter->vlan_filter_list);
0075fa0f 3748 INIT_LIST_HEAD(&adapter->cloud_filter_list);
8bb1a540 3749
129cf89e
JB
3750 INIT_WORK(&adapter->reset_task, iavf_reset_task);
3751 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3752 INIT_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3753 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3754 INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
9b32b0b5
MW
3755 schedule_delayed_work(&adapter->init_task,
3756 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
5eae00c5 3757
fe2647ab
SM
3758 /* Setup the wait queue for indicating transition to down status */
3759 init_waitqueue_head(&adapter->down_waitqueue);
3760
5eae00c5
GR
3761 return 0;
3762
3763err_ioremap:
3764 free_netdev(netdev);
3765err_alloc_etherdev:
3766 pci_release_regions(pdev);
3767err_pci_reg:
3768err_dma:
3769 pci_disable_device(pdev);
3770 return err;
3771}
3772
3773#ifdef CONFIG_PM
3774/**
129cf89e 3775 * iavf_suspend - Power management suspend routine
5eae00c5
GR
3776 * @pdev: PCI device information struct
3777 * @state: unused
3778 *
3779 * Called when the system (VM) is entering sleep/suspend.
3780 **/
129cf89e 3781static int iavf_suspend(struct pci_dev *pdev, pm_message_t state)
5eae00c5
GR
3782{
3783 struct net_device *netdev = pci_get_drvdata(pdev);
129cf89e 3784 struct iavf_adapter *adapter = netdev_priv(netdev);
5eae00c5
GR
3785 int retval = 0;
3786
3787 netif_device_detach(netdev);
3788
129cf89e 3789 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
9b2aef12
JK
3790 &adapter->crit_section))
3791 usleep_range(500, 1000);
3792
5eae00c5
GR
3793 if (netif_running(netdev)) {
3794 rtnl_lock();
129cf89e 3795 iavf_down(adapter);
5eae00c5
GR
3796 rtnl_unlock();
3797 }
129cf89e
JB
3798 iavf_free_misc_irq(adapter);
3799 iavf_reset_interrupt_capability(adapter);
5eae00c5 3800
129cf89e 3801 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
9b2aef12 3802
5eae00c5
GR
3803 retval = pci_save_state(pdev);
3804 if (retval)
3805 return retval;
3806
3807 pci_disable_device(pdev);
3808
3809 return 0;
3810}
3811
3812/**
129cf89e 3813 * iavf_resume - Power management resume routine
5eae00c5
GR
3814 * @pdev: PCI device information struct
3815 *
3816 * Called when the system (VM) is resumed from sleep/suspend.
3817 **/
129cf89e 3818static int iavf_resume(struct pci_dev *pdev)
5eae00c5 3819{
129cf89e 3820 struct iavf_adapter *adapter = pci_get_drvdata(pdev);
5eae00c5
GR
3821 struct net_device *netdev = adapter->netdev;
3822 u32 err;
3823
3824 pci_set_power_state(pdev, PCI_D0);
3825 pci_restore_state(pdev);
3826 /* pci_restore_state clears dev->state_saved so call
3827 * pci_save_state to restore it.
3828 */
3829 pci_save_state(pdev);
3830
3831 err = pci_enable_device_mem(pdev);
3832 if (err) {
3833 dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
3834 return err;
3835 }
3836 pci_set_master(pdev);
3837
3838 rtnl_lock();
129cf89e 3839 err = iavf_set_interrupt_capability(adapter);
5eae00c5 3840 if (err) {
f2a1c368 3841 rtnl_unlock();
5eae00c5
GR
3842 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3843 return err;
3844 }
129cf89e 3845 err = iavf_request_misc_irq(adapter);
5eae00c5
GR
3846 rtnl_unlock();
3847 if (err) {
3848 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3849 return err;
3850 }
3851
3852 schedule_work(&adapter->reset_task);
3853
3854 netif_device_attach(netdev);
3855
3856 return err;
3857}
3858
3859#endif /* CONFIG_PM */
3860/**
129cf89e 3861 * iavf_remove - Device Removal Routine
5eae00c5
GR
3862 * @pdev: PCI device information struct
3863 *
129cf89e 3864 * iavf_remove is called by the PCI subsystem to alert the driver
5eae00c5
GR
3865 * that it should release a PCI device. The could be caused by a
3866 * Hot-Plug event, or because the driver is going to be removed from
3867 * memory.
3868 **/
129cf89e 3869static void iavf_remove(struct pci_dev *pdev)
5eae00c5
GR
3870{
3871 struct net_device *netdev = pci_get_drvdata(pdev);
129cf89e
JB
3872 struct iavf_adapter *adapter = netdev_priv(netdev);
3873 struct iavf_vlan_filter *vlf, *vlftmp;
3874 struct iavf_mac_filter *f, *ftmp;
3875 struct iavf_cloud_filter *cf, *cftmp;
5eae00c5 3876 struct i40e_hw *hw = &adapter->hw;
ed0e894d 3877 int err;
06aa040f 3878 /* Indicate we are in remove and not to run reset_task */
129cf89e 3879 set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
5eae00c5 3880 cancel_delayed_work_sync(&adapter->init_task);
ef8693eb 3881 cancel_work_sync(&adapter->reset_task);
ed0e894d 3882 cancel_delayed_work_sync(&adapter->client_task);
5eae00c5
GR
3883 if (adapter->netdev_registered) {
3884 unregister_netdev(netdev);
3885 adapter->netdev_registered = false;
3886 }
ed0e894d 3887 if (CLIENT_ALLOWED(adapter)) {
129cf89e 3888 err = iavf_lan_del_device(adapter);
ed0e894d
MW
3889 if (err)
3890 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3891 err);
3892 }
53d0b3ae 3893
f4a71881 3894 /* Shut down all the garbage mashers on the detention level */
129cf89e 3895 adapter->state = __IAVF_REMOVE;
f4a71881 3896 adapter->aq_required = 0;
129cf89e
JB
3897 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3898 iavf_request_reset(adapter);
22ead37f 3899 msleep(50);
f4a71881 3900 /* If the FW isn't responding, kick it once, but only once. */
129cf89e
JB
3901 if (!iavf_asq_done(hw)) {
3902 iavf_request_reset(adapter);
22ead37f 3903 msleep(50);
f4a71881 3904 }
129cf89e
JB
3905 iavf_free_all_tx_resources(adapter);
3906 iavf_free_all_rx_resources(adapter);
3907 iavf_misc_irq_disable(adapter);
3908 iavf_free_misc_irq(adapter);
3909 iavf_reset_interrupt_capability(adapter);
3910 iavf_free_q_vectors(adapter);
5eae00c5 3911
e5d17c3e
MW
3912 if (adapter->watchdog_timer.function)
3913 del_timer_sync(&adapter->watchdog_timer);
3914
babbcc60
LY
3915 cancel_work_sync(&adapter->adminq_task);
3916
129cf89e 3917 iavf_free_rss(adapter);
66f9af85 3918
5eae00c5 3919 if (hw->aq.asq.count)
129cf89e 3920 iavf_shutdown_adminq(hw);
5eae00c5 3921
8ddb3326
JB
3922 /* destroy the locks only once, here */
3923 mutex_destroy(&hw->aq.arq_mutex);
3924 mutex_destroy(&hw->aq.asq_mutex);
3925
5eae00c5
GR
3926 iounmap(hw->hw_addr);
3927 pci_release_regions(pdev);
129cf89e
JB
3928 iavf_free_all_tx_resources(adapter);
3929 iavf_free_all_rx_resources(adapter);
3930 iavf_free_queues(adapter);
5eae00c5 3931 kfree(adapter->vf_res);
504398f0 3932 spin_lock_bh(&adapter->mac_vlan_list_lock);
6ba36a24
MW
3933 /* If we got removed before an up/down sequence, we've got a filter
3934 * hanging out there that we need to get rid of.
3935 */
3936 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3937 list_del(&f->list);
3938 kfree(f);
3939 }
fbd5eb54
HR
3940 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3941 list) {
3942 list_del(&vlf->list);
3943 kfree(vlf);
37dfdf37 3944 }
5eae00c5 3945
504398f0
JK
3946 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3947
0075fa0f
HR
3948 spin_lock_bh(&adapter->cloud_filter_list_lock);
3949 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3950 list_del(&cf->list);
3951 kfree(cf);
3952 }
3953 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3954
5eae00c5
GR
3955 free_netdev(netdev);
3956
3957 pci_disable_pcie_error_reporting(pdev);
3958
3959 pci_disable_device(pdev);
3960}
3961
129cf89e
JB
3962static struct pci_driver iavf_driver = {
3963 .name = iavf_driver_name,
3964 .id_table = iavf_pci_tbl,
3965 .probe = iavf_probe,
3966 .remove = iavf_remove,
5eae00c5 3967#ifdef CONFIG_PM
129cf89e
JB
3968 .suspend = iavf_suspend,
3969 .resume = iavf_resume,
5eae00c5 3970#endif
129cf89e 3971 .shutdown = iavf_shutdown,
5eae00c5
GR
3972};
3973
3974/**
3975 * i40e_init_module - Driver Registration Routine
3976 *
3977 * i40e_init_module is the first routine called when the driver is
3978 * loaded. All it does is register with the PCI subsystem.
3979 **/
129cf89e 3980static int __init iavf_init_module(void)
5eae00c5
GR
3981{
3982 int ret;
75a64435 3983
129cf89e
JB
3984 pr_info("iavf: %s - version %s\n", iavf_driver_string,
3985 iavf_driver_version);
5eae00c5 3986
129cf89e 3987 pr_info("%s\n", iavf_copyright);
5eae00c5 3988
129cf89e
JB
3989 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
3990 iavf_driver_name);
3991 if (!iavf_wq) {
3992 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
2803b16c
JB
3993 return -ENOMEM;
3994 }
129cf89e 3995 ret = pci_register_driver(&iavf_driver);
5eae00c5
GR
3996 return ret;
3997}
3998
129cf89e 3999module_init(iavf_init_module);
5eae00c5
GR
4000
4001/**
4002 * i40e_exit_module - Driver Exit Cleanup Routine
4003 *
4004 * i40e_exit_module is called just before the driver is removed
4005 * from memory.
4006 **/
129cf89e 4007static void __exit iavf_exit_module(void)
5eae00c5 4008{
129cf89e
JB
4009 pci_unregister_driver(&iavf_driver);
4010 destroy_workqueue(iavf_wq);
5eae00c5
GR
4011}
4012
129cf89e 4013module_exit(iavf_exit_module);
5eae00c5 4014
129cf89e 4015/* iavf_main.c */