]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/intel/i40e/i40e_main.c
i40e: Use smp_rmb rather than read_barrier_depends
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
31
32 /* Local includes */
33 #include "i40e.h"
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
39 */
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
42
43 const char i40e_driver_name[] = "i40e";
44 static const char i40e_driver_string[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
46
47 #define DRV_KERN "-k"
48
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 1
51 #define DRV_VERSION_BUILD 14
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str[] = DRV_VERSION;
56 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
57
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
60 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
61 static int i40e_add_vsi(struct i40e_vsi *vsi);
62 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
63 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
64 static int i40e_setup_misc_vector(struct i40e_pf *pf);
65 static void i40e_determine_queue_usage(struct i40e_pf *pf);
66 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
67 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
68 static int i40e_reset(struct i40e_pf *pf);
69 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
70 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
71 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
72 static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
73 struct i40e_cloud_filter *filter,
74 bool add);
75 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
76 struct i40e_cloud_filter *filter,
77 bool add);
78 static int i40e_get_capabilities(struct i40e_pf *pf,
79 enum i40e_admin_queue_opc list_type);
80
81
82 /* i40e_pci_tbl - PCI Device ID Table
83 *
84 * Last entry must be all 0s
85 *
86 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
87 * Class, Class Mask, private data (not used) }
88 */
89 static const struct pci_device_id i40e_pci_tbl[] = {
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
91 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
92 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
93 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
94 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
95 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
96 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
97 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
98 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
99 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
100 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
101 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
102 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
103 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
104 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
105 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
106 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
107 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
108 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
109 /* required last entry */
110 {0, }
111 };
112 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
113
114 #define I40E_MAX_VF_COUNT 128
115 static int debug = -1;
116 module_param(debug, uint, 0);
117 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
118
119 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
120 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(DRV_VERSION);
123
124 static struct workqueue_struct *i40e_wq;
125
126 /**
127 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to fill out
130 * @size: size of memory requested
131 * @alignment: what to align the allocation to
132 **/
133 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
134 u64 size, u32 alignment)
135 {
136 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
137
138 mem->size = ALIGN(size, alignment);
139 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
140 &mem->pa, GFP_KERNEL);
141 if (!mem->va)
142 return -ENOMEM;
143
144 return 0;
145 }
146
147 /**
148 * i40e_free_dma_mem_d - OS specific memory free for shared code
149 * @hw: pointer to the HW structure
150 * @mem: ptr to mem struct to free
151 **/
152 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
153 {
154 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
155
156 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
157 mem->va = NULL;
158 mem->pa = 0;
159 mem->size = 0;
160
161 return 0;
162 }
163
164 /**
165 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
166 * @hw: pointer to the HW structure
167 * @mem: ptr to mem struct to fill out
168 * @size: size of memory requested
169 **/
170 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
171 u32 size)
172 {
173 mem->size = size;
174 mem->va = kzalloc(size, GFP_KERNEL);
175
176 if (!mem->va)
177 return -ENOMEM;
178
179 return 0;
180 }
181
182 /**
183 * i40e_free_virt_mem_d - OS specific memory free for shared code
184 * @hw: pointer to the HW structure
185 * @mem: ptr to mem struct to free
186 **/
187 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
188 {
189 /* it's ok to kfree a NULL pointer */
190 kfree(mem->va);
191 mem->va = NULL;
192 mem->size = 0;
193
194 return 0;
195 }
196
197 /**
198 * i40e_get_lump - find a lump of free generic resource
199 * @pf: board private structure
200 * @pile: the pile of resource to search
201 * @needed: the number of items needed
202 * @id: an owner id to stick on the items assigned
203 *
204 * Returns the base item index of the lump, or negative for error
205 *
206 * The search_hint trick and lack of advanced fit-finding only work
207 * because we're highly likely to have all the same size lump requests.
208 * Linear search time and any fragmentation should be minimal.
209 **/
210 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
211 u16 needed, u16 id)
212 {
213 int ret = -ENOMEM;
214 int i, j;
215
216 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
217 dev_info(&pf->pdev->dev,
218 "param err: pile=%p needed=%d id=0x%04x\n",
219 pile, needed, id);
220 return -EINVAL;
221 }
222
223 /* start the linear search with an imperfect hint */
224 i = pile->search_hint;
225 while (i < pile->num_entries) {
226 /* skip already allocated entries */
227 if (pile->list[i] & I40E_PILE_VALID_BIT) {
228 i++;
229 continue;
230 }
231
232 /* do we have enough in this lump? */
233 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
234 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
235 break;
236 }
237
238 if (j == needed) {
239 /* there was enough, so assign it to the requestor */
240 for (j = 0; j < needed; j++)
241 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
242 ret = i;
243 pile->search_hint = i + j;
244 break;
245 }
246
247 /* not enough, so skip over it and continue looking */
248 i += j;
249 }
250
251 return ret;
252 }
253
254 /**
255 * i40e_put_lump - return a lump of generic resource
256 * @pile: the pile of resource to search
257 * @index: the base item index
258 * @id: the owner id of the items assigned
259 *
260 * Returns the count of items in the lump
261 **/
262 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
263 {
264 int valid_id = (id | I40E_PILE_VALID_BIT);
265 int count = 0;
266 int i;
267
268 if (!pile || index >= pile->num_entries)
269 return -EINVAL;
270
271 for (i = index;
272 i < pile->num_entries && pile->list[i] == valid_id;
273 i++) {
274 pile->list[i] = 0;
275 count++;
276 }
277
278 if (count && index < pile->search_hint)
279 pile->search_hint = index;
280
281 return count;
282 }
283
284 /**
285 * i40e_find_vsi_from_id - searches for the vsi with the given id
286 * @pf - the pf structure to search for the vsi
287 * @id - id of the vsi it is searching for
288 **/
289 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
290 {
291 int i;
292
293 for (i = 0; i < pf->num_alloc_vsi; i++)
294 if (pf->vsi[i] && (pf->vsi[i]->id == id))
295 return pf->vsi[i];
296
297 return NULL;
298 }
299
300 /**
301 * i40e_service_event_schedule - Schedule the service task to wake up
302 * @pf: board private structure
303 *
304 * If not already scheduled, this puts the task into the work queue
305 **/
306 void i40e_service_event_schedule(struct i40e_pf *pf)
307 {
308 if (!test_bit(__I40E_DOWN, pf->state) &&
309 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
310 queue_work(i40e_wq, &pf->service_task);
311 }
312
313 /**
314 * i40e_tx_timeout - Respond to a Tx Hang
315 * @netdev: network interface device structure
316 *
317 * If any port has noticed a Tx timeout, it is likely that the whole
318 * device is munged, not just the one netdev port, so go for the full
319 * reset.
320 **/
321 static void i40e_tx_timeout(struct net_device *netdev)
322 {
323 struct i40e_netdev_priv *np = netdev_priv(netdev);
324 struct i40e_vsi *vsi = np->vsi;
325 struct i40e_pf *pf = vsi->back;
326 struct i40e_ring *tx_ring = NULL;
327 unsigned int i, hung_queue = 0;
328 u32 head, val;
329
330 pf->tx_timeout_count++;
331
332 /* find the stopped queue the same way the stack does */
333 for (i = 0; i < netdev->num_tx_queues; i++) {
334 struct netdev_queue *q;
335 unsigned long trans_start;
336
337 q = netdev_get_tx_queue(netdev, i);
338 trans_start = q->trans_start;
339 if (netif_xmit_stopped(q) &&
340 time_after(jiffies,
341 (trans_start + netdev->watchdog_timeo))) {
342 hung_queue = i;
343 break;
344 }
345 }
346
347 if (i == netdev->num_tx_queues) {
348 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
349 } else {
350 /* now that we have an index, find the tx_ring struct */
351 for (i = 0; i < vsi->num_queue_pairs; i++) {
352 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
353 if (hung_queue ==
354 vsi->tx_rings[i]->queue_index) {
355 tx_ring = vsi->tx_rings[i];
356 break;
357 }
358 }
359 }
360 }
361
362 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
363 pf->tx_timeout_recovery_level = 1; /* reset after some time */
364 else if (time_before(jiffies,
365 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
366 return; /* don't do any new action before the next timeout */
367
368 if (tx_ring) {
369 head = i40e_get_head(tx_ring);
370 /* Read interrupt register */
371 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
372 val = rd32(&pf->hw,
373 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
374 tx_ring->vsi->base_vector - 1));
375 else
376 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
377
378 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
379 vsi->seid, hung_queue, tx_ring->next_to_clean,
380 head, tx_ring->next_to_use,
381 readl(tx_ring->tail), val);
382 }
383
384 pf->tx_timeout_last_recovery = jiffies;
385 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
386 pf->tx_timeout_recovery_level, hung_queue);
387
388 switch (pf->tx_timeout_recovery_level) {
389 case 1:
390 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
391 break;
392 case 2:
393 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
394 break;
395 case 3:
396 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
397 break;
398 default:
399 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
400 break;
401 }
402
403 i40e_service_event_schedule(pf);
404 pf->tx_timeout_recovery_level++;
405 }
406
407 /**
408 * i40e_get_vsi_stats_struct - Get System Network Statistics
409 * @vsi: the VSI we care about
410 *
411 * Returns the address of the device statistics structure.
412 * The statistics are actually updated from the service task.
413 **/
414 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
415 {
416 return &vsi->net_stats;
417 }
418
419 /**
420 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
421 * @ring: Tx ring to get statistics from
422 * @stats: statistics entry to be updated
423 **/
424 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
425 struct rtnl_link_stats64 *stats)
426 {
427 u64 bytes, packets;
428 unsigned int start;
429
430 do {
431 start = u64_stats_fetch_begin_irq(&ring->syncp);
432 packets = ring->stats.packets;
433 bytes = ring->stats.bytes;
434 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
435
436 stats->tx_packets += packets;
437 stats->tx_bytes += bytes;
438 }
439
440 /**
441 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
442 * @netdev: network interface device structure
443 *
444 * Returns the address of the device statistics structure.
445 * The statistics are actually updated from the service task.
446 **/
447 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
448 struct rtnl_link_stats64 *stats)
449 {
450 struct i40e_netdev_priv *np = netdev_priv(netdev);
451 struct i40e_ring *tx_ring, *rx_ring;
452 struct i40e_vsi *vsi = np->vsi;
453 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
454 int i;
455
456 if (test_bit(__I40E_VSI_DOWN, vsi->state))
457 return;
458
459 if (!vsi->tx_rings)
460 return;
461
462 rcu_read_lock();
463 for (i = 0; i < vsi->num_queue_pairs; i++) {
464 u64 bytes, packets;
465 unsigned int start;
466
467 tx_ring = READ_ONCE(vsi->tx_rings[i]);
468 if (!tx_ring)
469 continue;
470 i40e_get_netdev_stats_struct_tx(tx_ring, stats);
471
472 rx_ring = &tx_ring[1];
473
474 do {
475 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
476 packets = rx_ring->stats.packets;
477 bytes = rx_ring->stats.bytes;
478 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
479
480 stats->rx_packets += packets;
481 stats->rx_bytes += bytes;
482
483 if (i40e_enabled_xdp_vsi(vsi))
484 i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
485 }
486 rcu_read_unlock();
487
488 /* following stats updated by i40e_watchdog_subtask() */
489 stats->multicast = vsi_stats->multicast;
490 stats->tx_errors = vsi_stats->tx_errors;
491 stats->tx_dropped = vsi_stats->tx_dropped;
492 stats->rx_errors = vsi_stats->rx_errors;
493 stats->rx_dropped = vsi_stats->rx_dropped;
494 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
495 stats->rx_length_errors = vsi_stats->rx_length_errors;
496 }
497
498 /**
499 * i40e_vsi_reset_stats - Resets all stats of the given vsi
500 * @vsi: the VSI to have its stats reset
501 **/
502 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
503 {
504 struct rtnl_link_stats64 *ns;
505 int i;
506
507 if (!vsi)
508 return;
509
510 ns = i40e_get_vsi_stats_struct(vsi);
511 memset(ns, 0, sizeof(*ns));
512 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
513 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
514 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
515 if (vsi->rx_rings && vsi->rx_rings[0]) {
516 for (i = 0; i < vsi->num_queue_pairs; i++) {
517 memset(&vsi->rx_rings[i]->stats, 0,
518 sizeof(vsi->rx_rings[i]->stats));
519 memset(&vsi->rx_rings[i]->rx_stats, 0,
520 sizeof(vsi->rx_rings[i]->rx_stats));
521 memset(&vsi->tx_rings[i]->stats, 0,
522 sizeof(vsi->tx_rings[i]->stats));
523 memset(&vsi->tx_rings[i]->tx_stats, 0,
524 sizeof(vsi->tx_rings[i]->tx_stats));
525 }
526 }
527 vsi->stat_offsets_loaded = false;
528 }
529
530 /**
531 * i40e_pf_reset_stats - Reset all of the stats for the given PF
532 * @pf: the PF to be reset
533 **/
534 void i40e_pf_reset_stats(struct i40e_pf *pf)
535 {
536 int i;
537
538 memset(&pf->stats, 0, sizeof(pf->stats));
539 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
540 pf->stat_offsets_loaded = false;
541
542 for (i = 0; i < I40E_MAX_VEB; i++) {
543 if (pf->veb[i]) {
544 memset(&pf->veb[i]->stats, 0,
545 sizeof(pf->veb[i]->stats));
546 memset(&pf->veb[i]->stats_offsets, 0,
547 sizeof(pf->veb[i]->stats_offsets));
548 pf->veb[i]->stat_offsets_loaded = false;
549 }
550 }
551 pf->hw_csum_rx_error = 0;
552 }
553
554 /**
555 * i40e_stat_update48 - read and update a 48 bit stat from the chip
556 * @hw: ptr to the hardware info
557 * @hireg: the high 32 bit reg to read
558 * @loreg: the low 32 bit reg to read
559 * @offset_loaded: has the initial offset been loaded yet
560 * @offset: ptr to current offset value
561 * @stat: ptr to the stat
562 *
563 * Since the device stats are not reset at PFReset, they likely will not
564 * be zeroed when the driver starts. We'll save the first values read
565 * and use them as offsets to be subtracted from the raw values in order
566 * to report stats that count from zero. In the process, we also manage
567 * the potential roll-over.
568 **/
569 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
570 bool offset_loaded, u64 *offset, u64 *stat)
571 {
572 u64 new_data;
573
574 if (hw->device_id == I40E_DEV_ID_QEMU) {
575 new_data = rd32(hw, loreg);
576 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
577 } else {
578 new_data = rd64(hw, loreg);
579 }
580 if (!offset_loaded)
581 *offset = new_data;
582 if (likely(new_data >= *offset))
583 *stat = new_data - *offset;
584 else
585 *stat = (new_data + BIT_ULL(48)) - *offset;
586 *stat &= 0xFFFFFFFFFFFFULL;
587 }
588
589 /**
590 * i40e_stat_update32 - read and update a 32 bit stat from the chip
591 * @hw: ptr to the hardware info
592 * @reg: the hw reg to read
593 * @offset_loaded: has the initial offset been loaded yet
594 * @offset: ptr to current offset value
595 * @stat: ptr to the stat
596 **/
597 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
598 bool offset_loaded, u64 *offset, u64 *stat)
599 {
600 u32 new_data;
601
602 new_data = rd32(hw, reg);
603 if (!offset_loaded)
604 *offset = new_data;
605 if (likely(new_data >= *offset))
606 *stat = (u32)(new_data - *offset);
607 else
608 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
609 }
610
611 /**
612 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
613 * @hw: ptr to the hardware info
614 * @reg: the hw reg to read and clear
615 * @stat: ptr to the stat
616 **/
617 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
618 {
619 u32 new_data = rd32(hw, reg);
620
621 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
622 *stat += new_data;
623 }
624
625 /**
626 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
627 * @vsi: the VSI to be updated
628 **/
629 void i40e_update_eth_stats(struct i40e_vsi *vsi)
630 {
631 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
632 struct i40e_pf *pf = vsi->back;
633 struct i40e_hw *hw = &pf->hw;
634 struct i40e_eth_stats *oes;
635 struct i40e_eth_stats *es; /* device's eth stats */
636
637 es = &vsi->eth_stats;
638 oes = &vsi->eth_stats_offsets;
639
640 /* Gather up the stats that the hw collects */
641 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
642 vsi->stat_offsets_loaded,
643 &oes->tx_errors, &es->tx_errors);
644 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
645 vsi->stat_offsets_loaded,
646 &oes->rx_discards, &es->rx_discards);
647 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
650 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
651 vsi->stat_offsets_loaded,
652 &oes->tx_errors, &es->tx_errors);
653
654 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
655 I40E_GLV_GORCL(stat_idx),
656 vsi->stat_offsets_loaded,
657 &oes->rx_bytes, &es->rx_bytes);
658 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
659 I40E_GLV_UPRCL(stat_idx),
660 vsi->stat_offsets_loaded,
661 &oes->rx_unicast, &es->rx_unicast);
662 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
663 I40E_GLV_MPRCL(stat_idx),
664 vsi->stat_offsets_loaded,
665 &oes->rx_multicast, &es->rx_multicast);
666 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
667 I40E_GLV_BPRCL(stat_idx),
668 vsi->stat_offsets_loaded,
669 &oes->rx_broadcast, &es->rx_broadcast);
670
671 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
672 I40E_GLV_GOTCL(stat_idx),
673 vsi->stat_offsets_loaded,
674 &oes->tx_bytes, &es->tx_bytes);
675 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
676 I40E_GLV_UPTCL(stat_idx),
677 vsi->stat_offsets_loaded,
678 &oes->tx_unicast, &es->tx_unicast);
679 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
680 I40E_GLV_MPTCL(stat_idx),
681 vsi->stat_offsets_loaded,
682 &oes->tx_multicast, &es->tx_multicast);
683 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
684 I40E_GLV_BPTCL(stat_idx),
685 vsi->stat_offsets_loaded,
686 &oes->tx_broadcast, &es->tx_broadcast);
687 vsi->stat_offsets_loaded = true;
688 }
689
690 /**
691 * i40e_update_veb_stats - Update Switch component statistics
692 * @veb: the VEB being updated
693 **/
694 static void i40e_update_veb_stats(struct i40e_veb *veb)
695 {
696 struct i40e_pf *pf = veb->pf;
697 struct i40e_hw *hw = &pf->hw;
698 struct i40e_eth_stats *oes;
699 struct i40e_eth_stats *es; /* device's eth stats */
700 struct i40e_veb_tc_stats *veb_oes;
701 struct i40e_veb_tc_stats *veb_es;
702 int i, idx = 0;
703
704 idx = veb->stats_idx;
705 es = &veb->stats;
706 oes = &veb->stats_offsets;
707 veb_es = &veb->tc_stats;
708 veb_oes = &veb->tc_stats_offsets;
709
710 /* Gather up the stats that the hw collects */
711 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
712 veb->stat_offsets_loaded,
713 &oes->tx_discards, &es->tx_discards);
714 if (hw->revision_id > 0)
715 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
716 veb->stat_offsets_loaded,
717 &oes->rx_unknown_protocol,
718 &es->rx_unknown_protocol);
719 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->rx_bytes, &es->rx_bytes);
722 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
723 veb->stat_offsets_loaded,
724 &oes->rx_unicast, &es->rx_unicast);
725 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
726 veb->stat_offsets_loaded,
727 &oes->rx_multicast, &es->rx_multicast);
728 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
729 veb->stat_offsets_loaded,
730 &oes->rx_broadcast, &es->rx_broadcast);
731
732 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
733 veb->stat_offsets_loaded,
734 &oes->tx_bytes, &es->tx_bytes);
735 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
736 veb->stat_offsets_loaded,
737 &oes->tx_unicast, &es->tx_unicast);
738 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
739 veb->stat_offsets_loaded,
740 &oes->tx_multicast, &es->tx_multicast);
741 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
742 veb->stat_offsets_loaded,
743 &oes->tx_broadcast, &es->tx_broadcast);
744 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
745 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
746 I40E_GLVEBTC_RPCL(i, idx),
747 veb->stat_offsets_loaded,
748 &veb_oes->tc_rx_packets[i],
749 &veb_es->tc_rx_packets[i]);
750 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
751 I40E_GLVEBTC_RBCL(i, idx),
752 veb->stat_offsets_loaded,
753 &veb_oes->tc_rx_bytes[i],
754 &veb_es->tc_rx_bytes[i]);
755 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
756 I40E_GLVEBTC_TPCL(i, idx),
757 veb->stat_offsets_loaded,
758 &veb_oes->tc_tx_packets[i],
759 &veb_es->tc_tx_packets[i]);
760 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
761 I40E_GLVEBTC_TBCL(i, idx),
762 veb->stat_offsets_loaded,
763 &veb_oes->tc_tx_bytes[i],
764 &veb_es->tc_tx_bytes[i]);
765 }
766 veb->stat_offsets_loaded = true;
767 }
768
769 /**
770 * i40e_update_vsi_stats - Update the vsi statistics counters.
771 * @vsi: the VSI to be updated
772 *
773 * There are a few instances where we store the same stat in a
774 * couple of different structs. This is partly because we have
775 * the netdev stats that need to be filled out, which is slightly
776 * different from the "eth_stats" defined by the chip and used in
777 * VF communications. We sort it out here.
778 **/
779 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
780 {
781 struct i40e_pf *pf = vsi->back;
782 struct rtnl_link_stats64 *ons;
783 struct rtnl_link_stats64 *ns; /* netdev stats */
784 struct i40e_eth_stats *oes;
785 struct i40e_eth_stats *es; /* device's eth stats */
786 u32 tx_restart, tx_busy;
787 struct i40e_ring *p;
788 u32 rx_page, rx_buf;
789 u64 bytes, packets;
790 unsigned int start;
791 u64 tx_linearize;
792 u64 tx_force_wb;
793 u64 rx_p, rx_b;
794 u64 tx_p, tx_b;
795 u16 q;
796
797 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
798 test_bit(__I40E_CONFIG_BUSY, pf->state))
799 return;
800
801 ns = i40e_get_vsi_stats_struct(vsi);
802 ons = &vsi->net_stats_offsets;
803 es = &vsi->eth_stats;
804 oes = &vsi->eth_stats_offsets;
805
806 /* Gather up the netdev and vsi stats that the driver collects
807 * on the fly during packet processing
808 */
809 rx_b = rx_p = 0;
810 tx_b = tx_p = 0;
811 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
812 rx_page = 0;
813 rx_buf = 0;
814 rcu_read_lock();
815 for (q = 0; q < vsi->num_queue_pairs; q++) {
816 /* locate Tx ring */
817 p = READ_ONCE(vsi->tx_rings[q]);
818
819 do {
820 start = u64_stats_fetch_begin_irq(&p->syncp);
821 packets = p->stats.packets;
822 bytes = p->stats.bytes;
823 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
824 tx_b += bytes;
825 tx_p += packets;
826 tx_restart += p->tx_stats.restart_queue;
827 tx_busy += p->tx_stats.tx_busy;
828 tx_linearize += p->tx_stats.tx_linearize;
829 tx_force_wb += p->tx_stats.tx_force_wb;
830
831 /* Rx queue is part of the same block as Tx queue */
832 p = &p[1];
833 do {
834 start = u64_stats_fetch_begin_irq(&p->syncp);
835 packets = p->stats.packets;
836 bytes = p->stats.bytes;
837 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
838 rx_b += bytes;
839 rx_p += packets;
840 rx_buf += p->rx_stats.alloc_buff_failed;
841 rx_page += p->rx_stats.alloc_page_failed;
842 }
843 rcu_read_unlock();
844 vsi->tx_restart = tx_restart;
845 vsi->tx_busy = tx_busy;
846 vsi->tx_linearize = tx_linearize;
847 vsi->tx_force_wb = tx_force_wb;
848 vsi->rx_page_failed = rx_page;
849 vsi->rx_buf_failed = rx_buf;
850
851 ns->rx_packets = rx_p;
852 ns->rx_bytes = rx_b;
853 ns->tx_packets = tx_p;
854 ns->tx_bytes = tx_b;
855
856 /* update netdev stats from eth stats */
857 i40e_update_eth_stats(vsi);
858 ons->tx_errors = oes->tx_errors;
859 ns->tx_errors = es->tx_errors;
860 ons->multicast = oes->rx_multicast;
861 ns->multicast = es->rx_multicast;
862 ons->rx_dropped = oes->rx_discards;
863 ns->rx_dropped = es->rx_discards;
864 ons->tx_dropped = oes->tx_discards;
865 ns->tx_dropped = es->tx_discards;
866
867 /* pull in a couple PF stats if this is the main vsi */
868 if (vsi == pf->vsi[pf->lan_vsi]) {
869 ns->rx_crc_errors = pf->stats.crc_errors;
870 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
871 ns->rx_length_errors = pf->stats.rx_length_errors;
872 }
873 }
874
875 /**
876 * i40e_update_pf_stats - Update the PF statistics counters.
877 * @pf: the PF to be updated
878 **/
879 static void i40e_update_pf_stats(struct i40e_pf *pf)
880 {
881 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
882 struct i40e_hw_port_stats *nsd = &pf->stats;
883 struct i40e_hw *hw = &pf->hw;
884 u32 val;
885 int i;
886
887 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
888 I40E_GLPRT_GORCL(hw->port),
889 pf->stat_offsets_loaded,
890 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
891 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
892 I40E_GLPRT_GOTCL(hw->port),
893 pf->stat_offsets_loaded,
894 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
895 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
896 pf->stat_offsets_loaded,
897 &osd->eth.rx_discards,
898 &nsd->eth.rx_discards);
899 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
900 I40E_GLPRT_UPRCL(hw->port),
901 pf->stat_offsets_loaded,
902 &osd->eth.rx_unicast,
903 &nsd->eth.rx_unicast);
904 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
905 I40E_GLPRT_MPRCL(hw->port),
906 pf->stat_offsets_loaded,
907 &osd->eth.rx_multicast,
908 &nsd->eth.rx_multicast);
909 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
910 I40E_GLPRT_BPRCL(hw->port),
911 pf->stat_offsets_loaded,
912 &osd->eth.rx_broadcast,
913 &nsd->eth.rx_broadcast);
914 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
915 I40E_GLPRT_UPTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_unicast,
918 &nsd->eth.tx_unicast);
919 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
920 I40E_GLPRT_MPTCL(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->eth.tx_multicast,
923 &nsd->eth.tx_multicast);
924 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
925 I40E_GLPRT_BPTCL(hw->port),
926 pf->stat_offsets_loaded,
927 &osd->eth.tx_broadcast,
928 &nsd->eth.tx_broadcast);
929
930 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
931 pf->stat_offsets_loaded,
932 &osd->tx_dropped_link_down,
933 &nsd->tx_dropped_link_down);
934
935 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
936 pf->stat_offsets_loaded,
937 &osd->crc_errors, &nsd->crc_errors);
938
939 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
940 pf->stat_offsets_loaded,
941 &osd->illegal_bytes, &nsd->illegal_bytes);
942
943 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->mac_local_faults,
946 &nsd->mac_local_faults);
947 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
948 pf->stat_offsets_loaded,
949 &osd->mac_remote_faults,
950 &nsd->mac_remote_faults);
951
952 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->rx_length_errors,
955 &nsd->rx_length_errors);
956
957 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->link_xon_rx, &nsd->link_xon_rx);
960 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->link_xon_tx, &nsd->link_xon_tx);
963 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->link_xoff_rx, &nsd->link_xoff_rx);
966 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
967 pf->stat_offsets_loaded,
968 &osd->link_xoff_tx, &nsd->link_xoff_tx);
969
970 for (i = 0; i < 8; i++) {
971 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
972 pf->stat_offsets_loaded,
973 &osd->priority_xoff_rx[i],
974 &nsd->priority_xoff_rx[i]);
975 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
976 pf->stat_offsets_loaded,
977 &osd->priority_xon_rx[i],
978 &nsd->priority_xon_rx[i]);
979 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
980 pf->stat_offsets_loaded,
981 &osd->priority_xon_tx[i],
982 &nsd->priority_xon_tx[i]);
983 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
984 pf->stat_offsets_loaded,
985 &osd->priority_xoff_tx[i],
986 &nsd->priority_xoff_tx[i]);
987 i40e_stat_update32(hw,
988 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
989 pf->stat_offsets_loaded,
990 &osd->priority_xon_2_xoff[i],
991 &nsd->priority_xon_2_xoff[i]);
992 }
993
994 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
995 I40E_GLPRT_PRC64L(hw->port),
996 pf->stat_offsets_loaded,
997 &osd->rx_size_64, &nsd->rx_size_64);
998 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
999 I40E_GLPRT_PRC127L(hw->port),
1000 pf->stat_offsets_loaded,
1001 &osd->rx_size_127, &nsd->rx_size_127);
1002 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1003 I40E_GLPRT_PRC255L(hw->port),
1004 pf->stat_offsets_loaded,
1005 &osd->rx_size_255, &nsd->rx_size_255);
1006 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1007 I40E_GLPRT_PRC511L(hw->port),
1008 pf->stat_offsets_loaded,
1009 &osd->rx_size_511, &nsd->rx_size_511);
1010 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1011 I40E_GLPRT_PRC1023L(hw->port),
1012 pf->stat_offsets_loaded,
1013 &osd->rx_size_1023, &nsd->rx_size_1023);
1014 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1015 I40E_GLPRT_PRC1522L(hw->port),
1016 pf->stat_offsets_loaded,
1017 &osd->rx_size_1522, &nsd->rx_size_1522);
1018 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1019 I40E_GLPRT_PRC9522L(hw->port),
1020 pf->stat_offsets_loaded,
1021 &osd->rx_size_big, &nsd->rx_size_big);
1022
1023 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1024 I40E_GLPRT_PTC64L(hw->port),
1025 pf->stat_offsets_loaded,
1026 &osd->tx_size_64, &nsd->tx_size_64);
1027 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1028 I40E_GLPRT_PTC127L(hw->port),
1029 pf->stat_offsets_loaded,
1030 &osd->tx_size_127, &nsd->tx_size_127);
1031 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1032 I40E_GLPRT_PTC255L(hw->port),
1033 pf->stat_offsets_loaded,
1034 &osd->tx_size_255, &nsd->tx_size_255);
1035 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1036 I40E_GLPRT_PTC511L(hw->port),
1037 pf->stat_offsets_loaded,
1038 &osd->tx_size_511, &nsd->tx_size_511);
1039 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1040 I40E_GLPRT_PTC1023L(hw->port),
1041 pf->stat_offsets_loaded,
1042 &osd->tx_size_1023, &nsd->tx_size_1023);
1043 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1044 I40E_GLPRT_PTC1522L(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->tx_size_1522, &nsd->tx_size_1522);
1047 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1048 I40E_GLPRT_PTC9522L(hw->port),
1049 pf->stat_offsets_loaded,
1050 &osd->tx_size_big, &nsd->tx_size_big);
1051
1052 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1053 pf->stat_offsets_loaded,
1054 &osd->rx_undersize, &nsd->rx_undersize);
1055 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1056 pf->stat_offsets_loaded,
1057 &osd->rx_fragments, &nsd->rx_fragments);
1058 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->rx_oversize, &nsd->rx_oversize);
1061 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1062 pf->stat_offsets_loaded,
1063 &osd->rx_jabber, &nsd->rx_jabber);
1064
1065 /* FDIR stats */
1066 i40e_stat_update_and_clear32(hw,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1068 &nsd->fd_atr_match);
1069 i40e_stat_update_and_clear32(hw,
1070 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1071 &nsd->fd_sb_match);
1072 i40e_stat_update_and_clear32(hw,
1073 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1074 &nsd->fd_atr_tunnel_match);
1075
1076 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1077 nsd->tx_lpi_status =
1078 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1079 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1080 nsd->rx_lpi_status =
1081 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1082 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1083 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1084 pf->stat_offsets_loaded,
1085 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1086 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1087 pf->stat_offsets_loaded,
1088 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1089
1090 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1091 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED))
1092 nsd->fd_sb_status = true;
1093 else
1094 nsd->fd_sb_status = false;
1095
1096 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1097 !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
1098 nsd->fd_atr_status = true;
1099 else
1100 nsd->fd_atr_status = false;
1101
1102 pf->stat_offsets_loaded = true;
1103 }
1104
1105 /**
1106 * i40e_update_stats - Update the various statistics counters.
1107 * @vsi: the VSI to be updated
1108 *
1109 * Update the various stats for this VSI and its related entities.
1110 **/
1111 void i40e_update_stats(struct i40e_vsi *vsi)
1112 {
1113 struct i40e_pf *pf = vsi->back;
1114
1115 if (vsi == pf->vsi[pf->lan_vsi])
1116 i40e_update_pf_stats(pf);
1117
1118 i40e_update_vsi_stats(vsi);
1119 }
1120
1121 /**
1122 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1123 * @vsi: the VSI to be searched
1124 * @macaddr: the MAC address
1125 * @vlan: the vlan
1126 *
1127 * Returns ptr to the filter object or NULL
1128 **/
1129 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1130 const u8 *macaddr, s16 vlan)
1131 {
1132 struct i40e_mac_filter *f;
1133 u64 key;
1134
1135 if (!vsi || !macaddr)
1136 return NULL;
1137
1138 key = i40e_addr_to_hkey(macaddr);
1139 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1140 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1141 (vlan == f->vlan))
1142 return f;
1143 }
1144 return NULL;
1145 }
1146
1147 /**
1148 * i40e_find_mac - Find a mac addr in the macvlan filters list
1149 * @vsi: the VSI to be searched
1150 * @macaddr: the MAC address we are searching for
1151 *
1152 * Returns the first filter with the provided MAC address or NULL if
1153 * MAC address was not found
1154 **/
1155 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1156 {
1157 struct i40e_mac_filter *f;
1158 u64 key;
1159
1160 if (!vsi || !macaddr)
1161 return NULL;
1162
1163 key = i40e_addr_to_hkey(macaddr);
1164 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1165 if ((ether_addr_equal(macaddr, f->macaddr)))
1166 return f;
1167 }
1168 return NULL;
1169 }
1170
1171 /**
1172 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1173 * @vsi: the VSI to be searched
1174 *
1175 * Returns true if VSI is in vlan mode or false otherwise
1176 **/
1177 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1178 {
1179 /* If we have a PVID, always operate in VLAN mode */
1180 if (vsi->info.pvid)
1181 return true;
1182
1183 /* We need to operate in VLAN mode whenever we have any filters with
1184 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1185 * time, incurring search cost repeatedly. However, we can notice two
1186 * things:
1187 *
1188 * 1) the only place where we can gain a VLAN filter is in
1189 * i40e_add_filter.
1190 *
1191 * 2) the only place where filters are actually removed is in
1192 * i40e_sync_filters_subtask.
1193 *
1194 * Thus, we can simply use a boolean value, has_vlan_filters which we
1195 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1196 * we have to perform the full search after deleting filters in
1197 * i40e_sync_filters_subtask, but we already have to search
1198 * filters here and can perform the check at the same time. This
1199 * results in avoiding embedding a loop for VLAN mode inside another
1200 * loop over all the filters, and should maintain correctness as noted
1201 * above.
1202 */
1203 return vsi->has_vlan_filter;
1204 }
1205
1206 /**
1207 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1208 * @vsi: the VSI to configure
1209 * @tmp_add_list: list of filters ready to be added
1210 * @tmp_del_list: list of filters ready to be deleted
1211 * @vlan_filters: the number of active VLAN filters
1212 *
1213 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1214 * behave as expected. If we have any active VLAN filters remaining or about
1215 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1216 * so that they only match against untagged traffic. If we no longer have any
1217 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1218 * so that they match against both tagged and untagged traffic. In this way,
1219 * we ensure that we correctly receive the desired traffic. This ensures that
1220 * when we have an active VLAN we will receive only untagged traffic and
1221 * traffic matching active VLANs. If we have no active VLANs then we will
1222 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1223 *
1224 * Finally, in a similar fashion, this function also corrects filters when
1225 * there is an active PVID assigned to this VSI.
1226 *
1227 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1228 *
1229 * This function is only expected to be called from within
1230 * i40e_sync_vsi_filters.
1231 *
1232 * NOTE: This function expects to be called while under the
1233 * mac_filter_hash_lock
1234 */
1235 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1236 struct hlist_head *tmp_add_list,
1237 struct hlist_head *tmp_del_list,
1238 int vlan_filters)
1239 {
1240 s16 pvid = le16_to_cpu(vsi->info.pvid);
1241 struct i40e_mac_filter *f, *add_head;
1242 struct i40e_new_mac_filter *new;
1243 struct hlist_node *h;
1244 int bkt, new_vlan;
1245
1246 /* To determine if a particular filter needs to be replaced we
1247 * have the three following conditions:
1248 *
1249 * a) if we have a PVID assigned, then all filters which are
1250 * not marked as VLAN=PVID must be replaced with filters that
1251 * are.
1252 * b) otherwise, if we have any active VLANS, all filters
1253 * which are marked as VLAN=-1 must be replaced with
1254 * filters marked as VLAN=0
1255 * c) finally, if we do not have any active VLANS, all filters
1256 * which are marked as VLAN=0 must be replaced with filters
1257 * marked as VLAN=-1
1258 */
1259
1260 /* Update the filters about to be added in place */
1261 hlist_for_each_entry(new, tmp_add_list, hlist) {
1262 if (pvid && new->f->vlan != pvid)
1263 new->f->vlan = pvid;
1264 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1265 new->f->vlan = 0;
1266 else if (!vlan_filters && new->f->vlan == 0)
1267 new->f->vlan = I40E_VLAN_ANY;
1268 }
1269
1270 /* Update the remaining active filters */
1271 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1272 /* Combine the checks for whether a filter needs to be changed
1273 * and then determine the new VLAN inside the if block, in
1274 * order to avoid duplicating code for adding the new filter
1275 * then deleting the old filter.
1276 */
1277 if ((pvid && f->vlan != pvid) ||
1278 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1279 (!vlan_filters && f->vlan == 0)) {
1280 /* Determine the new vlan we will be adding */
1281 if (pvid)
1282 new_vlan = pvid;
1283 else if (vlan_filters)
1284 new_vlan = 0;
1285 else
1286 new_vlan = I40E_VLAN_ANY;
1287
1288 /* Create the new filter */
1289 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1290 if (!add_head)
1291 return -ENOMEM;
1292
1293 /* Create a temporary i40e_new_mac_filter */
1294 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1295 if (!new)
1296 return -ENOMEM;
1297
1298 new->f = add_head;
1299 new->state = add_head->state;
1300
1301 /* Add the new filter to the tmp list */
1302 hlist_add_head(&new->hlist, tmp_add_list);
1303
1304 /* Put the original filter into the delete list */
1305 f->state = I40E_FILTER_REMOVE;
1306 hash_del(&f->hlist);
1307 hlist_add_head(&f->hlist, tmp_del_list);
1308 }
1309 }
1310
1311 vsi->has_vlan_filter = !!vlan_filters;
1312
1313 return 0;
1314 }
1315
1316 /**
1317 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1318 * @vsi: the PF Main VSI - inappropriate for any other VSI
1319 * @macaddr: the MAC address
1320 *
1321 * Remove whatever filter the firmware set up so the driver can manage
1322 * its own filtering intelligently.
1323 **/
1324 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1325 {
1326 struct i40e_aqc_remove_macvlan_element_data element;
1327 struct i40e_pf *pf = vsi->back;
1328
1329 /* Only appropriate for the PF main VSI */
1330 if (vsi->type != I40E_VSI_MAIN)
1331 return;
1332
1333 memset(&element, 0, sizeof(element));
1334 ether_addr_copy(element.mac_addr, macaddr);
1335 element.vlan_tag = 0;
1336 /* Ignore error returns, some firmware does it this way... */
1337 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1338 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1339
1340 memset(&element, 0, sizeof(element));
1341 ether_addr_copy(element.mac_addr, macaddr);
1342 element.vlan_tag = 0;
1343 /* ...and some firmware does it this way. */
1344 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1345 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1346 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1347 }
1348
1349 /**
1350 * i40e_add_filter - Add a mac/vlan filter to the VSI
1351 * @vsi: the VSI to be searched
1352 * @macaddr: the MAC address
1353 * @vlan: the vlan
1354 *
1355 * Returns ptr to the filter object or NULL when no memory available.
1356 *
1357 * NOTE: This function is expected to be called with mac_filter_hash_lock
1358 * being held.
1359 **/
1360 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1361 const u8 *macaddr, s16 vlan)
1362 {
1363 struct i40e_mac_filter *f;
1364 u64 key;
1365
1366 if (!vsi || !macaddr)
1367 return NULL;
1368
1369 f = i40e_find_filter(vsi, macaddr, vlan);
1370 if (!f) {
1371 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1372 if (!f)
1373 return NULL;
1374
1375 /* Update the boolean indicating if we need to function in
1376 * VLAN mode.
1377 */
1378 if (vlan >= 0)
1379 vsi->has_vlan_filter = true;
1380
1381 ether_addr_copy(f->macaddr, macaddr);
1382 f->vlan = vlan;
1383 /* If we're in overflow promisc mode, set the state directly
1384 * to failed, so we don't bother to try sending the filter
1385 * to the hardware.
1386 */
1387 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
1388 f->state = I40E_FILTER_FAILED;
1389 else
1390 f->state = I40E_FILTER_NEW;
1391 INIT_HLIST_NODE(&f->hlist);
1392
1393 key = i40e_addr_to_hkey(macaddr);
1394 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1395
1396 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1397 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1398 }
1399
1400 /* If we're asked to add a filter that has been marked for removal, it
1401 * is safe to simply restore it to active state. __i40e_del_filter
1402 * will have simply deleted any filters which were previously marked
1403 * NEW or FAILED, so if it is currently marked REMOVE it must have
1404 * previously been ACTIVE. Since we haven't yet run the sync filters
1405 * task, just restore this filter to the ACTIVE state so that the
1406 * sync task leaves it in place
1407 */
1408 if (f->state == I40E_FILTER_REMOVE)
1409 f->state = I40E_FILTER_ACTIVE;
1410
1411 return f;
1412 }
1413
1414 /**
1415 * __i40e_del_filter - Remove a specific filter from the VSI
1416 * @vsi: VSI to remove from
1417 * @f: the filter to remove from the list
1418 *
1419 * This function should be called instead of i40e_del_filter only if you know
1420 * the exact filter you will remove already, such as via i40e_find_filter or
1421 * i40e_find_mac.
1422 *
1423 * NOTE: This function is expected to be called with mac_filter_hash_lock
1424 * being held.
1425 * ANOTHER NOTE: This function MUST be called from within the context of
1426 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1427 * instead of list_for_each_entry().
1428 **/
1429 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1430 {
1431 if (!f)
1432 return;
1433
1434 /* If the filter was never added to firmware then we can just delete it
1435 * directly and we don't want to set the status to remove or else an
1436 * admin queue command will unnecessarily fire.
1437 */
1438 if ((f->state == I40E_FILTER_FAILED) ||
1439 (f->state == I40E_FILTER_NEW)) {
1440 hash_del(&f->hlist);
1441 kfree(f);
1442 } else {
1443 f->state = I40E_FILTER_REMOVE;
1444 }
1445
1446 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1447 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1448 }
1449
1450 /**
1451 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1452 * @vsi: the VSI to be searched
1453 * @macaddr: the MAC address
1454 * @vlan: the VLAN
1455 *
1456 * NOTE: This function is expected to be called with mac_filter_hash_lock
1457 * being held.
1458 * ANOTHER NOTE: This function MUST be called from within the context of
1459 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1460 * instead of list_for_each_entry().
1461 **/
1462 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1463 {
1464 struct i40e_mac_filter *f;
1465
1466 if (!vsi || !macaddr)
1467 return;
1468
1469 f = i40e_find_filter(vsi, macaddr, vlan);
1470 __i40e_del_filter(vsi, f);
1471 }
1472
1473 /**
1474 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1475 * @vsi: the VSI to be searched
1476 * @macaddr: the mac address to be filtered
1477 *
1478 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1479 * go through all the macvlan filters and add a macvlan filter for each
1480 * unique vlan that already exists. If a PVID has been assigned, instead only
1481 * add the macaddr to that VLAN.
1482 *
1483 * Returns last filter added on success, else NULL
1484 **/
1485 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1486 const u8 *macaddr)
1487 {
1488 struct i40e_mac_filter *f, *add = NULL;
1489 struct hlist_node *h;
1490 int bkt;
1491
1492 if (vsi->info.pvid)
1493 return i40e_add_filter(vsi, macaddr,
1494 le16_to_cpu(vsi->info.pvid));
1495
1496 if (!i40e_is_vsi_in_vlan(vsi))
1497 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1498
1499 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1500 if (f->state == I40E_FILTER_REMOVE)
1501 continue;
1502 add = i40e_add_filter(vsi, macaddr, f->vlan);
1503 if (!add)
1504 return NULL;
1505 }
1506
1507 return add;
1508 }
1509
1510 /**
1511 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1512 * @vsi: the VSI to be searched
1513 * @macaddr: the mac address to be removed
1514 *
1515 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1516 * associated with.
1517 *
1518 * Returns 0 for success, or error
1519 **/
1520 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1521 {
1522 struct i40e_mac_filter *f;
1523 struct hlist_node *h;
1524 bool found = false;
1525 int bkt;
1526
1527 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1528 "Missing mac_filter_hash_lock\n");
1529 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1530 if (ether_addr_equal(macaddr, f->macaddr)) {
1531 __i40e_del_filter(vsi, f);
1532 found = true;
1533 }
1534 }
1535
1536 if (found)
1537 return 0;
1538 else
1539 return -ENOENT;
1540 }
1541
1542 /**
1543 * i40e_set_mac - NDO callback to set mac address
1544 * @netdev: network interface device structure
1545 * @p: pointer to an address structure
1546 *
1547 * Returns 0 on success, negative on failure
1548 **/
1549 static int i40e_set_mac(struct net_device *netdev, void *p)
1550 {
1551 struct i40e_netdev_priv *np = netdev_priv(netdev);
1552 struct i40e_vsi *vsi = np->vsi;
1553 struct i40e_pf *pf = vsi->back;
1554 struct i40e_hw *hw = &pf->hw;
1555 struct sockaddr *addr = p;
1556
1557 if (!is_valid_ether_addr(addr->sa_data))
1558 return -EADDRNOTAVAIL;
1559
1560 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1561 netdev_info(netdev, "already using mac address %pM\n",
1562 addr->sa_data);
1563 return 0;
1564 }
1565
1566 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1567 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1568 return -EADDRNOTAVAIL;
1569
1570 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1571 netdev_info(netdev, "returning to hw mac address %pM\n",
1572 hw->mac.addr);
1573 else
1574 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1575
1576 spin_lock_bh(&vsi->mac_filter_hash_lock);
1577 i40e_del_mac_filter(vsi, netdev->dev_addr);
1578 i40e_add_mac_filter(vsi, addr->sa_data);
1579 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1580 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1581 if (vsi->type == I40E_VSI_MAIN) {
1582 i40e_status ret;
1583
1584 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1585 I40E_AQC_WRITE_TYPE_LAA_WOL,
1586 addr->sa_data, NULL);
1587 if (ret)
1588 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1589 i40e_stat_str(hw, ret),
1590 i40e_aq_str(hw, hw->aq.asq_last_status));
1591 }
1592
1593 /* schedule our worker thread which will take care of
1594 * applying the new filter changes
1595 */
1596 i40e_service_event_schedule(vsi->back);
1597 return 0;
1598 }
1599
1600 /**
1601 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1602 * @vsi: vsi structure
1603 * @seed: RSS hash seed
1604 **/
1605 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1606 u8 *lut, u16 lut_size)
1607 {
1608 struct i40e_pf *pf = vsi->back;
1609 struct i40e_hw *hw = &pf->hw;
1610 int ret = 0;
1611
1612 if (seed) {
1613 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1614 (struct i40e_aqc_get_set_rss_key_data *)seed;
1615 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1616 if (ret) {
1617 dev_info(&pf->pdev->dev,
1618 "Cannot set RSS key, err %s aq_err %s\n",
1619 i40e_stat_str(hw, ret),
1620 i40e_aq_str(hw, hw->aq.asq_last_status));
1621 return ret;
1622 }
1623 }
1624 if (lut) {
1625 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1626
1627 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1628 if (ret) {
1629 dev_info(&pf->pdev->dev,
1630 "Cannot set RSS lut, err %s aq_err %s\n",
1631 i40e_stat_str(hw, ret),
1632 i40e_aq_str(hw, hw->aq.asq_last_status));
1633 return ret;
1634 }
1635 }
1636 return ret;
1637 }
1638
1639 /**
1640 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1641 * @vsi: VSI structure
1642 **/
1643 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1644 {
1645 struct i40e_pf *pf = vsi->back;
1646 u8 seed[I40E_HKEY_ARRAY_SIZE];
1647 u8 *lut;
1648 int ret;
1649
1650 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1651 return 0;
1652 if (!vsi->rss_size)
1653 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1654 vsi->num_queue_pairs);
1655 if (!vsi->rss_size)
1656 return -EINVAL;
1657 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1658 if (!lut)
1659 return -ENOMEM;
1660
1661 /* Use the user configured hash keys and lookup table if there is one,
1662 * otherwise use default
1663 */
1664 if (vsi->rss_lut_user)
1665 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1666 else
1667 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1668 if (vsi->rss_hkey_user)
1669 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1670 else
1671 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1672 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1673 kfree(lut);
1674 return ret;
1675 }
1676
1677 /**
1678 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1679 * @vsi: the VSI being configured,
1680 * @ctxt: VSI context structure
1681 * @enabled_tc: number of traffic classes to enable
1682 *
1683 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1684 **/
1685 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1686 struct i40e_vsi_context *ctxt,
1687 u8 enabled_tc)
1688 {
1689 u16 qcount = 0, max_qcount, qmap, sections = 0;
1690 int i, override_q, pow, num_qps, ret;
1691 u8 netdev_tc = 0, offset = 0;
1692
1693 if (vsi->type != I40E_VSI_MAIN)
1694 return -EINVAL;
1695 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1696 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1697 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1698 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1699 num_qps = vsi->mqprio_qopt.qopt.count[0];
1700
1701 /* find the next higher power-of-2 of num queue pairs */
1702 pow = ilog2(num_qps);
1703 if (!is_power_of_2(num_qps))
1704 pow++;
1705 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1706 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1707
1708 /* Setup queue offset/count for all TCs for given VSI */
1709 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1710 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1711 /* See if the given TC is enabled for the given VSI */
1712 if (vsi->tc_config.enabled_tc & BIT(i)) {
1713 offset = vsi->mqprio_qopt.qopt.offset[i];
1714 qcount = vsi->mqprio_qopt.qopt.count[i];
1715 if (qcount > max_qcount)
1716 max_qcount = qcount;
1717 vsi->tc_config.tc_info[i].qoffset = offset;
1718 vsi->tc_config.tc_info[i].qcount = qcount;
1719 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1720 } else {
1721 /* TC is not enabled so set the offset to
1722 * default queue and allocate one queue
1723 * for the given TC.
1724 */
1725 vsi->tc_config.tc_info[i].qoffset = 0;
1726 vsi->tc_config.tc_info[i].qcount = 1;
1727 vsi->tc_config.tc_info[i].netdev_tc = 0;
1728 }
1729 }
1730
1731 /* Set actual Tx/Rx queue pairs */
1732 vsi->num_queue_pairs = offset + qcount;
1733
1734 /* Setup queue TC[0].qmap for given VSI context */
1735 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1736 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1737 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1738 ctxt->info.valid_sections |= cpu_to_le16(sections);
1739
1740 /* Reconfigure RSS for main VSI with max queue count */
1741 vsi->rss_size = max_qcount;
1742 ret = i40e_vsi_config_rss(vsi);
1743 if (ret) {
1744 dev_info(&vsi->back->pdev->dev,
1745 "Failed to reconfig rss for num_queues (%u)\n",
1746 max_qcount);
1747 return ret;
1748 }
1749 vsi->reconfig_rss = true;
1750 dev_dbg(&vsi->back->pdev->dev,
1751 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1752
1753 /* Find queue count available for channel VSIs and starting offset
1754 * for channel VSIs
1755 */
1756 override_q = vsi->mqprio_qopt.qopt.count[0];
1757 if (override_q && override_q < vsi->num_queue_pairs) {
1758 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1759 vsi->next_base_queue = override_q;
1760 }
1761 return 0;
1762 }
1763
1764 /**
1765 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1766 * @vsi: the VSI being setup
1767 * @ctxt: VSI context structure
1768 * @enabled_tc: Enabled TCs bitmap
1769 * @is_add: True if called before Add VSI
1770 *
1771 * Setup VSI queue mapping for enabled traffic classes.
1772 **/
1773 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1774 struct i40e_vsi_context *ctxt,
1775 u8 enabled_tc,
1776 bool is_add)
1777 {
1778 struct i40e_pf *pf = vsi->back;
1779 u16 sections = 0;
1780 u8 netdev_tc = 0;
1781 u16 numtc = 0;
1782 u16 qcount;
1783 u8 offset;
1784 u16 qmap;
1785 int i;
1786 u16 num_tc_qps = 0;
1787
1788 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1789 offset = 0;
1790
1791 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1792 /* Find numtc from enabled TC bitmap */
1793 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1794 if (enabled_tc & BIT(i)) /* TC is enabled */
1795 numtc++;
1796 }
1797 if (!numtc) {
1798 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1799 numtc = 1;
1800 }
1801 } else {
1802 /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
1803 numtc = 1;
1804 }
1805
1806 vsi->tc_config.numtc = numtc;
1807 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1808 /* Number of queues per enabled TC */
1809 qcount = vsi->alloc_queue_pairs;
1810
1811 num_tc_qps = qcount / numtc;
1812 num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1813
1814 /* Setup queue offset/count for all TCs for given VSI */
1815 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1816 /* See if the given TC is enabled for the given VSI */
1817 if (vsi->tc_config.enabled_tc & BIT(i)) {
1818 /* TC is enabled */
1819 int pow, num_qps;
1820
1821 switch (vsi->type) {
1822 case I40E_VSI_MAIN:
1823 qcount = min_t(int, pf->alloc_rss_size,
1824 num_tc_qps);
1825 break;
1826 case I40E_VSI_FDIR:
1827 case I40E_VSI_SRIOV:
1828 case I40E_VSI_VMDQ2:
1829 default:
1830 qcount = num_tc_qps;
1831 WARN_ON(i != 0);
1832 break;
1833 }
1834 vsi->tc_config.tc_info[i].qoffset = offset;
1835 vsi->tc_config.tc_info[i].qcount = qcount;
1836
1837 /* find the next higher power-of-2 of num queue pairs */
1838 num_qps = qcount;
1839 pow = 0;
1840 while (num_qps && (BIT_ULL(pow) < qcount)) {
1841 pow++;
1842 num_qps >>= 1;
1843 }
1844
1845 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1846 qmap =
1847 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1848 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1849
1850 offset += qcount;
1851 } else {
1852 /* TC is not enabled so set the offset to
1853 * default queue and allocate one queue
1854 * for the given TC.
1855 */
1856 vsi->tc_config.tc_info[i].qoffset = 0;
1857 vsi->tc_config.tc_info[i].qcount = 1;
1858 vsi->tc_config.tc_info[i].netdev_tc = 0;
1859
1860 qmap = 0;
1861 }
1862 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1863 }
1864
1865 /* Set actual Tx/Rx queue pairs */
1866 vsi->num_queue_pairs = offset;
1867 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1868 if (vsi->req_queue_pairs > 0)
1869 vsi->num_queue_pairs = vsi->req_queue_pairs;
1870 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1871 vsi->num_queue_pairs = pf->num_lan_msix;
1872 }
1873
1874 /* Scheduler section valid can only be set for ADD VSI */
1875 if (is_add) {
1876 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1877
1878 ctxt->info.up_enable_bits = enabled_tc;
1879 }
1880 if (vsi->type == I40E_VSI_SRIOV) {
1881 ctxt->info.mapping_flags |=
1882 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1883 for (i = 0; i < vsi->num_queue_pairs; i++)
1884 ctxt->info.queue_mapping[i] =
1885 cpu_to_le16(vsi->base_queue + i);
1886 } else {
1887 ctxt->info.mapping_flags |=
1888 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1889 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1890 }
1891 ctxt->info.valid_sections |= cpu_to_le16(sections);
1892 }
1893
1894 /**
1895 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1896 * @netdev: the netdevice
1897 * @addr: address to add
1898 *
1899 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1900 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1901 */
1902 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1903 {
1904 struct i40e_netdev_priv *np = netdev_priv(netdev);
1905 struct i40e_vsi *vsi = np->vsi;
1906
1907 if (i40e_add_mac_filter(vsi, addr))
1908 return 0;
1909 else
1910 return -ENOMEM;
1911 }
1912
1913 /**
1914 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1915 * @netdev: the netdevice
1916 * @addr: address to add
1917 *
1918 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1919 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1920 */
1921 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1922 {
1923 struct i40e_netdev_priv *np = netdev_priv(netdev);
1924 struct i40e_vsi *vsi = np->vsi;
1925
1926 i40e_del_mac_filter(vsi, addr);
1927
1928 return 0;
1929 }
1930
1931 /**
1932 * i40e_set_rx_mode - NDO callback to set the netdev filters
1933 * @netdev: network interface device structure
1934 **/
1935 static void i40e_set_rx_mode(struct net_device *netdev)
1936 {
1937 struct i40e_netdev_priv *np = netdev_priv(netdev);
1938 struct i40e_vsi *vsi = np->vsi;
1939
1940 spin_lock_bh(&vsi->mac_filter_hash_lock);
1941
1942 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1943 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1944
1945 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1946
1947 /* check for other flag changes */
1948 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1949 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1950 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1951 }
1952 }
1953
1954 /**
1955 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1956 * @vsi: Pointer to VSI struct
1957 * @from: Pointer to list which contains MAC filter entries - changes to
1958 * those entries needs to be undone.
1959 *
1960 * MAC filter entries from this list were slated for deletion.
1961 **/
1962 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1963 struct hlist_head *from)
1964 {
1965 struct i40e_mac_filter *f;
1966 struct hlist_node *h;
1967
1968 hlist_for_each_entry_safe(f, h, from, hlist) {
1969 u64 key = i40e_addr_to_hkey(f->macaddr);
1970
1971 /* Move the element back into MAC filter list*/
1972 hlist_del(&f->hlist);
1973 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1974 }
1975 }
1976
1977 /**
1978 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1979 * @vsi: Pointer to vsi struct
1980 * @from: Pointer to list which contains MAC filter entries - changes to
1981 * those entries needs to be undone.
1982 *
1983 * MAC filter entries from this list were slated for addition.
1984 **/
1985 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1986 struct hlist_head *from)
1987 {
1988 struct i40e_new_mac_filter *new;
1989 struct hlist_node *h;
1990
1991 hlist_for_each_entry_safe(new, h, from, hlist) {
1992 /* We can simply free the wrapper structure */
1993 hlist_del(&new->hlist);
1994 kfree(new);
1995 }
1996 }
1997
1998 /**
1999 * i40e_next_entry - Get the next non-broadcast filter from a list
2000 * @next: pointer to filter in list
2001 *
2002 * Returns the next non-broadcast filter in the list. Required so that we
2003 * ignore broadcast filters within the list, since these are not handled via
2004 * the normal firmware update path.
2005 */
2006 static
2007 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2008 {
2009 hlist_for_each_entry_continue(next, hlist) {
2010 if (!is_broadcast_ether_addr(next->f->macaddr))
2011 return next;
2012 }
2013
2014 return NULL;
2015 }
2016
2017 /**
2018 * i40e_update_filter_state - Update filter state based on return data
2019 * from firmware
2020 * @count: Number of filters added
2021 * @add_list: return data from fw
2022 * @head: pointer to first filter in current batch
2023 *
2024 * MAC filter entries from list were slated to be added to device. Returns
2025 * number of successful filters. Note that 0 does NOT mean success!
2026 **/
2027 static int
2028 i40e_update_filter_state(int count,
2029 struct i40e_aqc_add_macvlan_element_data *add_list,
2030 struct i40e_new_mac_filter *add_head)
2031 {
2032 int retval = 0;
2033 int i;
2034
2035 for (i = 0; i < count; i++) {
2036 /* Always check status of each filter. We don't need to check
2037 * the firmware return status because we pre-set the filter
2038 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2039 * request to the adminq. Thus, if it no longer matches then
2040 * we know the filter is active.
2041 */
2042 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2043 add_head->state = I40E_FILTER_FAILED;
2044 } else {
2045 add_head->state = I40E_FILTER_ACTIVE;
2046 retval++;
2047 }
2048
2049 add_head = i40e_next_filter(add_head);
2050 if (!add_head)
2051 break;
2052 }
2053
2054 return retval;
2055 }
2056
2057 /**
2058 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2059 * @vsi: ptr to the VSI
2060 * @vsi_name: name to display in messages
2061 * @list: the list of filters to send to firmware
2062 * @num_del: the number of filters to delete
2063 * @retval: Set to -EIO on failure to delete
2064 *
2065 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2066 * *retval instead of a return value so that success does not force ret_val to
2067 * be set to 0. This ensures that a sequence of calls to this function
2068 * preserve the previous value of *retval on successful delete.
2069 */
2070 static
2071 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2072 struct i40e_aqc_remove_macvlan_element_data *list,
2073 int num_del, int *retval)
2074 {
2075 struct i40e_hw *hw = &vsi->back->hw;
2076 i40e_status aq_ret;
2077 int aq_err;
2078
2079 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2080 aq_err = hw->aq.asq_last_status;
2081
2082 /* Explicitly ignore and do not report when firmware returns ENOENT */
2083 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2084 *retval = -EIO;
2085 dev_info(&vsi->back->pdev->dev,
2086 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2087 vsi_name, i40e_stat_str(hw, aq_ret),
2088 i40e_aq_str(hw, aq_err));
2089 }
2090 }
2091
2092 /**
2093 * i40e_aqc_add_filters - Request firmware to add a set of filters
2094 * @vsi: ptr to the VSI
2095 * @vsi_name: name to display in messages
2096 * @list: the list of filters to send to firmware
2097 * @add_head: Position in the add hlist
2098 * @num_add: the number of filters to add
2099 * @promisc_change: set to true on exit if promiscuous mode was forced on
2100 *
2101 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2102 * promisc_changed to true if the firmware has run out of space for more
2103 * filters.
2104 */
2105 static
2106 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2107 struct i40e_aqc_add_macvlan_element_data *list,
2108 struct i40e_new_mac_filter *add_head,
2109 int num_add, bool *promisc_changed)
2110 {
2111 struct i40e_hw *hw = &vsi->back->hw;
2112 int aq_err, fcnt;
2113
2114 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2115 aq_err = hw->aq.asq_last_status;
2116 fcnt = i40e_update_filter_state(num_add, list, add_head);
2117
2118 if (fcnt != num_add) {
2119 *promisc_changed = true;
2120 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2121 dev_warn(&vsi->back->pdev->dev,
2122 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2123 i40e_aq_str(hw, aq_err),
2124 vsi_name);
2125 }
2126 }
2127
2128 /**
2129 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2130 * @vsi: pointer to the VSI
2131 * @f: filter data
2132 *
2133 * This function sets or clears the promiscuous broadcast flags for VLAN
2134 * filters in order to properly receive broadcast frames. Assumes that only
2135 * broadcast filters are passed.
2136 *
2137 * Returns status indicating success or failure;
2138 **/
2139 static i40e_status
2140 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2141 struct i40e_mac_filter *f)
2142 {
2143 bool enable = f->state == I40E_FILTER_NEW;
2144 struct i40e_hw *hw = &vsi->back->hw;
2145 i40e_status aq_ret;
2146
2147 if (f->vlan == I40E_VLAN_ANY) {
2148 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2149 vsi->seid,
2150 enable,
2151 NULL);
2152 } else {
2153 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2154 vsi->seid,
2155 enable,
2156 f->vlan,
2157 NULL);
2158 }
2159
2160 if (aq_ret)
2161 dev_warn(&vsi->back->pdev->dev,
2162 "Error %s setting broadcast promiscuous mode on %s\n",
2163 i40e_aq_str(hw, hw->aq.asq_last_status),
2164 vsi_name);
2165
2166 return aq_ret;
2167 }
2168
2169 /**
2170 * i40e_set_promiscuous - set promiscuous mode
2171 * @pf: board private structure
2172 * @promisc: promisc on or off
2173 *
2174 * There are different ways of setting promiscuous mode on a PF depending on
2175 * what state/environment we're in. This identifies and sets it appropriately.
2176 * Returns 0 on success.
2177 **/
2178 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2179 {
2180 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2181 struct i40e_hw *hw = &pf->hw;
2182 i40e_status aq_ret;
2183
2184 if (vsi->type == I40E_VSI_MAIN &&
2185 pf->lan_veb != I40E_NO_VEB &&
2186 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2187 /* set defport ON for Main VSI instead of true promisc
2188 * this way we will get all unicast/multicast and VLAN
2189 * promisc behavior but will not get VF or VMDq traffic
2190 * replicated on the Main VSI.
2191 */
2192 if (promisc)
2193 aq_ret = i40e_aq_set_default_vsi(hw,
2194 vsi->seid,
2195 NULL);
2196 else
2197 aq_ret = i40e_aq_clear_default_vsi(hw,
2198 vsi->seid,
2199 NULL);
2200 if (aq_ret) {
2201 dev_info(&pf->pdev->dev,
2202 "Set default VSI failed, err %s, aq_err %s\n",
2203 i40e_stat_str(hw, aq_ret),
2204 i40e_aq_str(hw, hw->aq.asq_last_status));
2205 }
2206 } else {
2207 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2208 hw,
2209 vsi->seid,
2210 promisc, NULL,
2211 true);
2212 if (aq_ret) {
2213 dev_info(&pf->pdev->dev,
2214 "set unicast promisc failed, err %s, aq_err %s\n",
2215 i40e_stat_str(hw, aq_ret),
2216 i40e_aq_str(hw, hw->aq.asq_last_status));
2217 }
2218 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2219 hw,
2220 vsi->seid,
2221 promisc, NULL);
2222 if (aq_ret) {
2223 dev_info(&pf->pdev->dev,
2224 "set multicast promisc failed, err %s, aq_err %s\n",
2225 i40e_stat_str(hw, aq_ret),
2226 i40e_aq_str(hw, hw->aq.asq_last_status));
2227 }
2228 }
2229
2230 if (!aq_ret)
2231 pf->cur_promisc = promisc;
2232
2233 return aq_ret;
2234 }
2235
2236 /**
2237 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2238 * @vsi: ptr to the VSI
2239 *
2240 * Push any outstanding VSI filter changes through the AdminQ.
2241 *
2242 * Returns 0 or error value
2243 **/
2244 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2245 {
2246 struct hlist_head tmp_add_list, tmp_del_list;
2247 struct i40e_mac_filter *f;
2248 struct i40e_new_mac_filter *new, *add_head = NULL;
2249 struct i40e_hw *hw = &vsi->back->hw;
2250 unsigned int failed_filters = 0;
2251 unsigned int vlan_filters = 0;
2252 bool promisc_changed = false;
2253 char vsi_name[16] = "PF";
2254 int filter_list_len = 0;
2255 i40e_status aq_ret = 0;
2256 u32 changed_flags = 0;
2257 struct hlist_node *h;
2258 struct i40e_pf *pf;
2259 int num_add = 0;
2260 int num_del = 0;
2261 int retval = 0;
2262 u16 cmd_flags;
2263 int list_size;
2264 int bkt;
2265
2266 /* empty array typed pointers, kcalloc later */
2267 struct i40e_aqc_add_macvlan_element_data *add_list;
2268 struct i40e_aqc_remove_macvlan_element_data *del_list;
2269
2270 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2271 usleep_range(1000, 2000);
2272 pf = vsi->back;
2273
2274 if (vsi->netdev) {
2275 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2276 vsi->current_netdev_flags = vsi->netdev->flags;
2277 }
2278
2279 INIT_HLIST_HEAD(&tmp_add_list);
2280 INIT_HLIST_HEAD(&tmp_del_list);
2281
2282 if (vsi->type == I40E_VSI_SRIOV)
2283 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2284 else if (vsi->type != I40E_VSI_MAIN)
2285 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2286
2287 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2288 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2289
2290 spin_lock_bh(&vsi->mac_filter_hash_lock);
2291 /* Create a list of filters to delete. */
2292 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2293 if (f->state == I40E_FILTER_REMOVE) {
2294 /* Move the element into temporary del_list */
2295 hash_del(&f->hlist);
2296 hlist_add_head(&f->hlist, &tmp_del_list);
2297
2298 /* Avoid counting removed filters */
2299 continue;
2300 }
2301 if (f->state == I40E_FILTER_NEW) {
2302 /* Create a temporary i40e_new_mac_filter */
2303 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2304 if (!new)
2305 goto err_no_memory_locked;
2306
2307 /* Store pointer to the real filter */
2308 new->f = f;
2309 new->state = f->state;
2310
2311 /* Add it to the hash list */
2312 hlist_add_head(&new->hlist, &tmp_add_list);
2313 }
2314
2315 /* Count the number of active (current and new) VLAN
2316 * filters we have now. Does not count filters which
2317 * are marked for deletion.
2318 */
2319 if (f->vlan > 0)
2320 vlan_filters++;
2321 }
2322
2323 retval = i40e_correct_mac_vlan_filters(vsi,
2324 &tmp_add_list,
2325 &tmp_del_list,
2326 vlan_filters);
2327 if (retval)
2328 goto err_no_memory_locked;
2329
2330 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2331 }
2332
2333 /* Now process 'del_list' outside the lock */
2334 if (!hlist_empty(&tmp_del_list)) {
2335 filter_list_len = hw->aq.asq_buf_size /
2336 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2337 list_size = filter_list_len *
2338 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2339 del_list = kzalloc(list_size, GFP_ATOMIC);
2340 if (!del_list)
2341 goto err_no_memory;
2342
2343 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2344 cmd_flags = 0;
2345
2346 /* handle broadcast filters by updating the broadcast
2347 * promiscuous flag and release filter list.
2348 */
2349 if (is_broadcast_ether_addr(f->macaddr)) {
2350 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2351
2352 hlist_del(&f->hlist);
2353 kfree(f);
2354 continue;
2355 }
2356
2357 /* add to delete list */
2358 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2359 if (f->vlan == I40E_VLAN_ANY) {
2360 del_list[num_del].vlan_tag = 0;
2361 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2362 } else {
2363 del_list[num_del].vlan_tag =
2364 cpu_to_le16((u16)(f->vlan));
2365 }
2366
2367 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2368 del_list[num_del].flags = cmd_flags;
2369 num_del++;
2370
2371 /* flush a full buffer */
2372 if (num_del == filter_list_len) {
2373 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2374 num_del, &retval);
2375 memset(del_list, 0, list_size);
2376 num_del = 0;
2377 }
2378 /* Release memory for MAC filter entries which were
2379 * synced up with HW.
2380 */
2381 hlist_del(&f->hlist);
2382 kfree(f);
2383 }
2384
2385 if (num_del) {
2386 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2387 num_del, &retval);
2388 }
2389
2390 kfree(del_list);
2391 del_list = NULL;
2392 }
2393
2394 if (!hlist_empty(&tmp_add_list)) {
2395 /* Do all the adds now. */
2396 filter_list_len = hw->aq.asq_buf_size /
2397 sizeof(struct i40e_aqc_add_macvlan_element_data);
2398 list_size = filter_list_len *
2399 sizeof(struct i40e_aqc_add_macvlan_element_data);
2400 add_list = kzalloc(list_size, GFP_ATOMIC);
2401 if (!add_list)
2402 goto err_no_memory;
2403
2404 num_add = 0;
2405 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2406 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2407 vsi->state)) {
2408 new->state = I40E_FILTER_FAILED;
2409 continue;
2410 }
2411
2412 /* handle broadcast filters by updating the broadcast
2413 * promiscuous flag instead of adding a MAC filter.
2414 */
2415 if (is_broadcast_ether_addr(new->f->macaddr)) {
2416 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2417 new->f))
2418 new->state = I40E_FILTER_FAILED;
2419 else
2420 new->state = I40E_FILTER_ACTIVE;
2421 continue;
2422 }
2423
2424 /* add to add array */
2425 if (num_add == 0)
2426 add_head = new;
2427 cmd_flags = 0;
2428 ether_addr_copy(add_list[num_add].mac_addr,
2429 new->f->macaddr);
2430 if (new->f->vlan == I40E_VLAN_ANY) {
2431 add_list[num_add].vlan_tag = 0;
2432 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2433 } else {
2434 add_list[num_add].vlan_tag =
2435 cpu_to_le16((u16)(new->f->vlan));
2436 }
2437 add_list[num_add].queue_number = 0;
2438 /* set invalid match method for later detection */
2439 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2440 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2441 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2442 num_add++;
2443
2444 /* flush a full buffer */
2445 if (num_add == filter_list_len) {
2446 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2447 add_head, num_add,
2448 &promisc_changed);
2449 memset(add_list, 0, list_size);
2450 num_add = 0;
2451 }
2452 }
2453 if (num_add) {
2454 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2455 num_add, &promisc_changed);
2456 }
2457 /* Now move all of the filters from the temp add list back to
2458 * the VSI's list.
2459 */
2460 spin_lock_bh(&vsi->mac_filter_hash_lock);
2461 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2462 /* Only update the state if we're still NEW */
2463 if (new->f->state == I40E_FILTER_NEW)
2464 new->f->state = new->state;
2465 hlist_del(&new->hlist);
2466 kfree(new);
2467 }
2468 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2469 kfree(add_list);
2470 add_list = NULL;
2471 }
2472
2473 /* Determine the number of active and failed filters. */
2474 spin_lock_bh(&vsi->mac_filter_hash_lock);
2475 vsi->active_filters = 0;
2476 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2477 if (f->state == I40E_FILTER_ACTIVE)
2478 vsi->active_filters++;
2479 else if (f->state == I40E_FILTER_FAILED)
2480 failed_filters++;
2481 }
2482 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2483
2484 /* If promiscuous mode has changed, we need to calculate a new
2485 * threshold for when we are safe to exit
2486 */
2487 if (promisc_changed)
2488 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2489
2490 /* Check if we are able to exit overflow promiscuous mode. We can
2491 * safely exit if we didn't just enter, we no longer have any failed
2492 * filters, and we have reduced filters below the threshold value.
2493 */
2494 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
2495 !promisc_changed && !failed_filters &&
2496 (vsi->active_filters < vsi->promisc_threshold)) {
2497 dev_info(&pf->pdev->dev,
2498 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2499 vsi_name);
2500 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2501 promisc_changed = true;
2502 vsi->promisc_threshold = 0;
2503 }
2504
2505 /* if the VF is not trusted do not do promisc */
2506 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2507 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2508 goto out;
2509 }
2510
2511 /* check for changes in promiscuous modes */
2512 if (changed_flags & IFF_ALLMULTI) {
2513 bool cur_multipromisc;
2514
2515 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2516 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2517 vsi->seid,
2518 cur_multipromisc,
2519 NULL);
2520 if (aq_ret) {
2521 retval = i40e_aq_rc_to_posix(aq_ret,
2522 hw->aq.asq_last_status);
2523 dev_info(&pf->pdev->dev,
2524 "set multi promisc failed on %s, err %s aq_err %s\n",
2525 vsi_name,
2526 i40e_stat_str(hw, aq_ret),
2527 i40e_aq_str(hw, hw->aq.asq_last_status));
2528 }
2529 }
2530
2531 if ((changed_flags & IFF_PROMISC) || promisc_changed) {
2532 bool cur_promisc;
2533
2534 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2535 test_bit(__I40E_VSI_OVERFLOW_PROMISC,
2536 vsi->state));
2537 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2538 if (aq_ret) {
2539 retval = i40e_aq_rc_to_posix(aq_ret,
2540 hw->aq.asq_last_status);
2541 dev_info(&pf->pdev->dev,
2542 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2543 cur_promisc ? "on" : "off",
2544 vsi_name,
2545 i40e_stat_str(hw, aq_ret),
2546 i40e_aq_str(hw, hw->aq.asq_last_status));
2547 }
2548 }
2549 out:
2550 /* if something went wrong then set the changed flag so we try again */
2551 if (retval)
2552 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2553
2554 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2555 return retval;
2556
2557 err_no_memory:
2558 /* Restore elements on the temporary add and delete lists */
2559 spin_lock_bh(&vsi->mac_filter_hash_lock);
2560 err_no_memory_locked:
2561 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2562 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2563 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2564
2565 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2566 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2567 return -ENOMEM;
2568 }
2569
2570 /**
2571 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2572 * @pf: board private structure
2573 **/
2574 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2575 {
2576 int v;
2577
2578 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2579 return;
2580 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2581
2582 for (v = 0; v < pf->num_alloc_vsi; v++) {
2583 if (pf->vsi[v] &&
2584 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2585 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2586
2587 if (ret) {
2588 /* come back and try again later */
2589 pf->flags |= I40E_FLAG_FILTER_SYNC;
2590 break;
2591 }
2592 }
2593 }
2594 }
2595
2596 /**
2597 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2598 * @vsi: the vsi
2599 **/
2600 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2601 {
2602 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2603 return I40E_RXBUFFER_2048;
2604 else
2605 return I40E_RXBUFFER_3072;
2606 }
2607
2608 /**
2609 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2610 * @netdev: network interface device structure
2611 * @new_mtu: new value for maximum frame size
2612 *
2613 * Returns 0 on success, negative on failure
2614 **/
2615 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2616 {
2617 struct i40e_netdev_priv *np = netdev_priv(netdev);
2618 struct i40e_vsi *vsi = np->vsi;
2619 struct i40e_pf *pf = vsi->back;
2620
2621 if (i40e_enabled_xdp_vsi(vsi)) {
2622 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2623
2624 if (frame_size > i40e_max_xdp_frame_size(vsi))
2625 return -EINVAL;
2626 }
2627
2628 netdev_info(netdev, "changing MTU from %d to %d\n",
2629 netdev->mtu, new_mtu);
2630 netdev->mtu = new_mtu;
2631 if (netif_running(netdev))
2632 i40e_vsi_reinit_locked(vsi);
2633 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
2634 I40E_FLAG_CLIENT_L2_CHANGE);
2635 return 0;
2636 }
2637
2638 /**
2639 * i40e_ioctl - Access the hwtstamp interface
2640 * @netdev: network interface device structure
2641 * @ifr: interface request data
2642 * @cmd: ioctl command
2643 **/
2644 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2645 {
2646 struct i40e_netdev_priv *np = netdev_priv(netdev);
2647 struct i40e_pf *pf = np->vsi->back;
2648
2649 switch (cmd) {
2650 case SIOCGHWTSTAMP:
2651 return i40e_ptp_get_ts_config(pf, ifr);
2652 case SIOCSHWTSTAMP:
2653 return i40e_ptp_set_ts_config(pf, ifr);
2654 default:
2655 return -EOPNOTSUPP;
2656 }
2657 }
2658
2659 /**
2660 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2661 * @vsi: the vsi being adjusted
2662 **/
2663 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2664 {
2665 struct i40e_vsi_context ctxt;
2666 i40e_status ret;
2667
2668 if ((vsi->info.valid_sections &
2669 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2670 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2671 return; /* already enabled */
2672
2673 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2674 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2675 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2676
2677 ctxt.seid = vsi->seid;
2678 ctxt.info = vsi->info;
2679 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2680 if (ret) {
2681 dev_info(&vsi->back->pdev->dev,
2682 "update vlan stripping failed, err %s aq_err %s\n",
2683 i40e_stat_str(&vsi->back->hw, ret),
2684 i40e_aq_str(&vsi->back->hw,
2685 vsi->back->hw.aq.asq_last_status));
2686 }
2687 }
2688
2689 /**
2690 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2691 * @vsi: the vsi being adjusted
2692 **/
2693 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2694 {
2695 struct i40e_vsi_context ctxt;
2696 i40e_status ret;
2697
2698 if ((vsi->info.valid_sections &
2699 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2700 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2701 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2702 return; /* already disabled */
2703
2704 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2705 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2706 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2707
2708 ctxt.seid = vsi->seid;
2709 ctxt.info = vsi->info;
2710 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2711 if (ret) {
2712 dev_info(&vsi->back->pdev->dev,
2713 "update vlan stripping failed, err %s aq_err %s\n",
2714 i40e_stat_str(&vsi->back->hw, ret),
2715 i40e_aq_str(&vsi->back->hw,
2716 vsi->back->hw.aq.asq_last_status));
2717 }
2718 }
2719
2720 /**
2721 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2722 * @netdev: network interface to be adjusted
2723 * @features: netdev features to test if VLAN offload is enabled or not
2724 **/
2725 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2726 {
2727 struct i40e_netdev_priv *np = netdev_priv(netdev);
2728 struct i40e_vsi *vsi = np->vsi;
2729
2730 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2731 i40e_vlan_stripping_enable(vsi);
2732 else
2733 i40e_vlan_stripping_disable(vsi);
2734 }
2735
2736 /**
2737 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2738 * @vsi: the vsi being configured
2739 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2740 *
2741 * This is a helper function for adding a new MAC/VLAN filter with the
2742 * specified VLAN for each existing MAC address already in the hash table.
2743 * This function does *not* perform any accounting to update filters based on
2744 * VLAN mode.
2745 *
2746 * NOTE: this function expects to be called while under the
2747 * mac_filter_hash_lock
2748 **/
2749 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2750 {
2751 struct i40e_mac_filter *f, *add_f;
2752 struct hlist_node *h;
2753 int bkt;
2754
2755 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2756 if (f->state == I40E_FILTER_REMOVE)
2757 continue;
2758 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2759 if (!add_f) {
2760 dev_info(&vsi->back->pdev->dev,
2761 "Could not add vlan filter %d for %pM\n",
2762 vid, f->macaddr);
2763 return -ENOMEM;
2764 }
2765 }
2766
2767 return 0;
2768 }
2769
2770 /**
2771 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2772 * @vsi: the VSI being configured
2773 * @vid: VLAN id to be added
2774 **/
2775 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2776 {
2777 int err;
2778
2779 if (vsi->info.pvid)
2780 return -EINVAL;
2781
2782 /* The network stack will attempt to add VID=0, with the intention to
2783 * receive priority tagged packets with a VLAN of 0. Our HW receives
2784 * these packets by default when configured to receive untagged
2785 * packets, so we don't need to add a filter for this case.
2786 * Additionally, HW interprets adding a VID=0 filter as meaning to
2787 * receive *only* tagged traffic and stops receiving untagged traffic.
2788 * Thus, we do not want to actually add a filter for VID=0
2789 */
2790 if (!vid)
2791 return 0;
2792
2793 /* Locked once because all functions invoked below iterates list*/
2794 spin_lock_bh(&vsi->mac_filter_hash_lock);
2795 err = i40e_add_vlan_all_mac(vsi, vid);
2796 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2797 if (err)
2798 return err;
2799
2800 /* schedule our worker thread which will take care of
2801 * applying the new filter changes
2802 */
2803 i40e_service_event_schedule(vsi->back);
2804 return 0;
2805 }
2806
2807 /**
2808 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2809 * @vsi: the vsi being configured
2810 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2811 *
2812 * This function should be used to remove all VLAN filters which match the
2813 * given VID. It does not schedule the service event and does not take the
2814 * mac_filter_hash_lock so it may be combined with other operations under
2815 * a single invocation of the mac_filter_hash_lock.
2816 *
2817 * NOTE: this function expects to be called while under the
2818 * mac_filter_hash_lock
2819 */
2820 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2821 {
2822 struct i40e_mac_filter *f;
2823 struct hlist_node *h;
2824 int bkt;
2825
2826 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2827 if (f->vlan == vid)
2828 __i40e_del_filter(vsi, f);
2829 }
2830 }
2831
2832 /**
2833 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2834 * @vsi: the VSI being configured
2835 * @vid: VLAN id to be removed
2836 **/
2837 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2838 {
2839 if (!vid || vsi->info.pvid)
2840 return;
2841
2842 spin_lock_bh(&vsi->mac_filter_hash_lock);
2843 i40e_rm_vlan_all_mac(vsi, vid);
2844 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2845
2846 /* schedule our worker thread which will take care of
2847 * applying the new filter changes
2848 */
2849 i40e_service_event_schedule(vsi->back);
2850 }
2851
2852 /**
2853 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2854 * @netdev: network interface to be adjusted
2855 * @vid: vlan id to be added
2856 *
2857 * net_device_ops implementation for adding vlan ids
2858 **/
2859 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2860 __always_unused __be16 proto, u16 vid)
2861 {
2862 struct i40e_netdev_priv *np = netdev_priv(netdev);
2863 struct i40e_vsi *vsi = np->vsi;
2864 int ret = 0;
2865
2866 if (vid >= VLAN_N_VID)
2867 return -EINVAL;
2868
2869 ret = i40e_vsi_add_vlan(vsi, vid);
2870 if (!ret)
2871 set_bit(vid, vsi->active_vlans);
2872
2873 return ret;
2874 }
2875
2876 /**
2877 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2878 * @netdev: network interface to be adjusted
2879 * @vid: vlan id to be removed
2880 *
2881 * net_device_ops implementation for removing vlan ids
2882 **/
2883 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2884 __always_unused __be16 proto, u16 vid)
2885 {
2886 struct i40e_netdev_priv *np = netdev_priv(netdev);
2887 struct i40e_vsi *vsi = np->vsi;
2888
2889 /* return code is ignored as there is nothing a user
2890 * can do about failure to remove and a log message was
2891 * already printed from the other function
2892 */
2893 i40e_vsi_kill_vlan(vsi, vid);
2894
2895 clear_bit(vid, vsi->active_vlans);
2896
2897 return 0;
2898 }
2899
2900 /**
2901 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2902 * @vsi: the vsi being brought back up
2903 **/
2904 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2905 {
2906 u16 vid;
2907
2908 if (!vsi->netdev)
2909 return;
2910
2911 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2912
2913 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2914 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2915 vid);
2916 }
2917
2918 /**
2919 * i40e_vsi_add_pvid - Add pvid for the VSI
2920 * @vsi: the vsi being adjusted
2921 * @vid: the vlan id to set as a PVID
2922 **/
2923 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2924 {
2925 struct i40e_vsi_context ctxt;
2926 i40e_status ret;
2927
2928 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2929 vsi->info.pvid = cpu_to_le16(vid);
2930 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2931 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2932 I40E_AQ_VSI_PVLAN_EMOD_STR;
2933
2934 ctxt.seid = vsi->seid;
2935 ctxt.info = vsi->info;
2936 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2937 if (ret) {
2938 dev_info(&vsi->back->pdev->dev,
2939 "add pvid failed, err %s aq_err %s\n",
2940 i40e_stat_str(&vsi->back->hw, ret),
2941 i40e_aq_str(&vsi->back->hw,
2942 vsi->back->hw.aq.asq_last_status));
2943 return -ENOENT;
2944 }
2945
2946 return 0;
2947 }
2948
2949 /**
2950 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2951 * @vsi: the vsi being adjusted
2952 *
2953 * Just use the vlan_rx_register() service to put it back to normal
2954 **/
2955 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2956 {
2957 i40e_vlan_stripping_disable(vsi);
2958
2959 vsi->info.pvid = 0;
2960 }
2961
2962 /**
2963 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2964 * @vsi: ptr to the VSI
2965 *
2966 * If this function returns with an error, then it's possible one or
2967 * more of the rings is populated (while the rest are not). It is the
2968 * callers duty to clean those orphaned rings.
2969 *
2970 * Return 0 on success, negative on failure
2971 **/
2972 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2973 {
2974 int i, err = 0;
2975
2976 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2977 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2978
2979 if (!i40e_enabled_xdp_vsi(vsi))
2980 return err;
2981
2982 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2983 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
2984
2985 return err;
2986 }
2987
2988 /**
2989 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2990 * @vsi: ptr to the VSI
2991 *
2992 * Free VSI's transmit software resources
2993 **/
2994 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2995 {
2996 int i;
2997
2998 if (vsi->tx_rings) {
2999 for (i = 0; i < vsi->num_queue_pairs; i++)
3000 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3001 i40e_free_tx_resources(vsi->tx_rings[i]);
3002 }
3003
3004 if (vsi->xdp_rings) {
3005 for (i = 0; i < vsi->num_queue_pairs; i++)
3006 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3007 i40e_free_tx_resources(vsi->xdp_rings[i]);
3008 }
3009 }
3010
3011 /**
3012 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3013 * @vsi: ptr to the VSI
3014 *
3015 * If this function returns with an error, then it's possible one or
3016 * more of the rings is populated (while the rest are not). It is the
3017 * callers duty to clean those orphaned rings.
3018 *
3019 * Return 0 on success, negative on failure
3020 **/
3021 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3022 {
3023 int i, err = 0;
3024
3025 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3026 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3027 return err;
3028 }
3029
3030 /**
3031 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3032 * @vsi: ptr to the VSI
3033 *
3034 * Free all receive software resources
3035 **/
3036 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3037 {
3038 int i;
3039
3040 if (!vsi->rx_rings)
3041 return;
3042
3043 for (i = 0; i < vsi->num_queue_pairs; i++)
3044 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3045 i40e_free_rx_resources(vsi->rx_rings[i]);
3046 }
3047
3048 /**
3049 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3050 * @ring: The Tx ring to configure
3051 *
3052 * This enables/disables XPS for a given Tx descriptor ring
3053 * based on the TCs enabled for the VSI that ring belongs to.
3054 **/
3055 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3056 {
3057 int cpu;
3058
3059 if (!ring->q_vector || !ring->netdev || ring->ch)
3060 return;
3061
3062 /* We only initialize XPS once, so as not to overwrite user settings */
3063 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3064 return;
3065
3066 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3067 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3068 ring->queue_index);
3069 }
3070
3071 /**
3072 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3073 * @ring: The Tx ring to configure
3074 *
3075 * Configure the Tx descriptor ring in the HMC context.
3076 **/
3077 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3078 {
3079 struct i40e_vsi *vsi = ring->vsi;
3080 u16 pf_q = vsi->base_queue + ring->queue_index;
3081 struct i40e_hw *hw = &vsi->back->hw;
3082 struct i40e_hmc_obj_txq tx_ctx;
3083 i40e_status err = 0;
3084 u32 qtx_ctl = 0;
3085
3086 /* some ATR related tx ring init */
3087 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3088 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3089 ring->atr_count = 0;
3090 } else {
3091 ring->atr_sample_rate = 0;
3092 }
3093
3094 /* configure XPS */
3095 i40e_config_xps_tx_ring(ring);
3096
3097 /* clear the context structure first */
3098 memset(&tx_ctx, 0, sizeof(tx_ctx));
3099
3100 tx_ctx.new_context = 1;
3101 tx_ctx.base = (ring->dma / 128);
3102 tx_ctx.qlen = ring->count;
3103 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3104 I40E_FLAG_FD_ATR_ENABLED));
3105 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3106 /* FDIR VSI tx ring can still use RS bit and writebacks */
3107 if (vsi->type != I40E_VSI_FDIR)
3108 tx_ctx.head_wb_ena = 1;
3109 tx_ctx.head_wb_addr = ring->dma +
3110 (ring->count * sizeof(struct i40e_tx_desc));
3111
3112 /* As part of VSI creation/update, FW allocates certain
3113 * Tx arbitration queue sets for each TC enabled for
3114 * the VSI. The FW returns the handles to these queue
3115 * sets as part of the response buffer to Add VSI,
3116 * Update VSI, etc. AQ commands. It is expected that
3117 * these queue set handles be associated with the Tx
3118 * queues by the driver as part of the TX queue context
3119 * initialization. This has to be done regardless of
3120 * DCB as by default everything is mapped to TC0.
3121 */
3122
3123 if (ring->ch)
3124 tx_ctx.rdylist =
3125 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3126
3127 else
3128 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3129
3130 tx_ctx.rdylist_act = 0;
3131
3132 /* clear the context in the HMC */
3133 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3134 if (err) {
3135 dev_info(&vsi->back->pdev->dev,
3136 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3137 ring->queue_index, pf_q, err);
3138 return -ENOMEM;
3139 }
3140
3141 /* set the context in the HMC */
3142 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3143 if (err) {
3144 dev_info(&vsi->back->pdev->dev,
3145 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3146 ring->queue_index, pf_q, err);
3147 return -ENOMEM;
3148 }
3149
3150 /* Now associate this queue with this PCI function */
3151 if (ring->ch) {
3152 if (ring->ch->type == I40E_VSI_VMDQ2)
3153 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3154 else
3155 return -EINVAL;
3156
3157 qtx_ctl |= (ring->ch->vsi_number <<
3158 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3159 I40E_QTX_CTL_VFVM_INDX_MASK;
3160 } else {
3161 if (vsi->type == I40E_VSI_VMDQ2) {
3162 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3163 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3164 I40E_QTX_CTL_VFVM_INDX_MASK;
3165 } else {
3166 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3167 }
3168 }
3169
3170 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3171 I40E_QTX_CTL_PF_INDX_MASK);
3172 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3173 i40e_flush(hw);
3174
3175 /* cache tail off for easier writes later */
3176 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3177
3178 return 0;
3179 }
3180
3181 /**
3182 * i40e_configure_rx_ring - Configure a receive ring context
3183 * @ring: The Rx ring to configure
3184 *
3185 * Configure the Rx descriptor ring in the HMC context.
3186 **/
3187 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3188 {
3189 struct i40e_vsi *vsi = ring->vsi;
3190 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3191 u16 pf_q = vsi->base_queue + ring->queue_index;
3192 struct i40e_hw *hw = &vsi->back->hw;
3193 struct i40e_hmc_obj_rxq rx_ctx;
3194 i40e_status err = 0;
3195
3196 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3197
3198 /* clear the context structure first */
3199 memset(&rx_ctx, 0, sizeof(rx_ctx));
3200
3201 ring->rx_buf_len = vsi->rx_buf_len;
3202
3203 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3204 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3205
3206 rx_ctx.base = (ring->dma / 128);
3207 rx_ctx.qlen = ring->count;
3208
3209 /* use 32 byte descriptors */
3210 rx_ctx.dsize = 1;
3211
3212 /* descriptor type is always zero
3213 * rx_ctx.dtype = 0;
3214 */
3215 rx_ctx.hsplit_0 = 0;
3216
3217 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3218 if (hw->revision_id == 0)
3219 rx_ctx.lrxqthresh = 0;
3220 else
3221 rx_ctx.lrxqthresh = 1;
3222 rx_ctx.crcstrip = 1;
3223 rx_ctx.l2tsel = 1;
3224 /* this controls whether VLAN is stripped from inner headers */
3225 rx_ctx.showiv = 0;
3226 /* set the prefena field to 1 because the manual says to */
3227 rx_ctx.prefena = 1;
3228
3229 /* clear the context in the HMC */
3230 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3231 if (err) {
3232 dev_info(&vsi->back->pdev->dev,
3233 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3234 ring->queue_index, pf_q, err);
3235 return -ENOMEM;
3236 }
3237
3238 /* set the context in the HMC */
3239 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3240 if (err) {
3241 dev_info(&vsi->back->pdev->dev,
3242 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3243 ring->queue_index, pf_q, err);
3244 return -ENOMEM;
3245 }
3246
3247 /* configure Rx buffer alignment */
3248 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3249 clear_ring_build_skb_enabled(ring);
3250 else
3251 set_ring_build_skb_enabled(ring);
3252
3253 /* cache tail for quicker writes, and clear the reg before use */
3254 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3255 writel(0, ring->tail);
3256
3257 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3258
3259 return 0;
3260 }
3261
3262 /**
3263 * i40e_vsi_configure_tx - Configure the VSI for Tx
3264 * @vsi: VSI structure describing this set of rings and resources
3265 *
3266 * Configure the Tx VSI for operation.
3267 **/
3268 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3269 {
3270 int err = 0;
3271 u16 i;
3272
3273 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3274 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3275
3276 if (!i40e_enabled_xdp_vsi(vsi))
3277 return err;
3278
3279 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3280 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3281
3282 return err;
3283 }
3284
3285 /**
3286 * i40e_vsi_configure_rx - Configure the VSI for Rx
3287 * @vsi: the VSI being configured
3288 *
3289 * Configure the Rx VSI for operation.
3290 **/
3291 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3292 {
3293 int err = 0;
3294 u16 i;
3295
3296 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3297 vsi->max_frame = I40E_MAX_RXBUFFER;
3298 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3299 #if (PAGE_SIZE < 8192)
3300 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3301 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3302 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3303 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3304 #endif
3305 } else {
3306 vsi->max_frame = I40E_MAX_RXBUFFER;
3307 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3308 I40E_RXBUFFER_2048;
3309 }
3310
3311 /* set up individual rings */
3312 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3313 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3314
3315 return err;
3316 }
3317
3318 /**
3319 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3320 * @vsi: ptr to the VSI
3321 **/
3322 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3323 {
3324 struct i40e_ring *tx_ring, *rx_ring;
3325 u16 qoffset, qcount;
3326 int i, n;
3327
3328 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3329 /* Reset the TC information */
3330 for (i = 0; i < vsi->num_queue_pairs; i++) {
3331 rx_ring = vsi->rx_rings[i];
3332 tx_ring = vsi->tx_rings[i];
3333 rx_ring->dcb_tc = 0;
3334 tx_ring->dcb_tc = 0;
3335 }
3336 return;
3337 }
3338
3339 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3340 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3341 continue;
3342
3343 qoffset = vsi->tc_config.tc_info[n].qoffset;
3344 qcount = vsi->tc_config.tc_info[n].qcount;
3345 for (i = qoffset; i < (qoffset + qcount); i++) {
3346 rx_ring = vsi->rx_rings[i];
3347 tx_ring = vsi->tx_rings[i];
3348 rx_ring->dcb_tc = n;
3349 tx_ring->dcb_tc = n;
3350 }
3351 }
3352 }
3353
3354 /**
3355 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3356 * @vsi: ptr to the VSI
3357 **/
3358 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3359 {
3360 if (vsi->netdev)
3361 i40e_set_rx_mode(vsi->netdev);
3362 }
3363
3364 /**
3365 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3366 * @vsi: Pointer to the targeted VSI
3367 *
3368 * This function replays the hlist on the hw where all the SB Flow Director
3369 * filters were saved.
3370 **/
3371 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3372 {
3373 struct i40e_fdir_filter *filter;
3374 struct i40e_pf *pf = vsi->back;
3375 struct hlist_node *node;
3376
3377 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3378 return;
3379
3380 /* Reset FDir counters as we're replaying all existing filters */
3381 pf->fd_tcp4_filter_cnt = 0;
3382 pf->fd_udp4_filter_cnt = 0;
3383 pf->fd_sctp4_filter_cnt = 0;
3384 pf->fd_ip4_filter_cnt = 0;
3385
3386 hlist_for_each_entry_safe(filter, node,
3387 &pf->fdir_filter_list, fdir_node) {
3388 i40e_add_del_fdir(vsi, filter, true);
3389 }
3390 }
3391
3392 /**
3393 * i40e_vsi_configure - Set up the VSI for action
3394 * @vsi: the VSI being configured
3395 **/
3396 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3397 {
3398 int err;
3399
3400 i40e_set_vsi_rx_mode(vsi);
3401 i40e_restore_vlan(vsi);
3402 i40e_vsi_config_dcb_rings(vsi);
3403 err = i40e_vsi_configure_tx(vsi);
3404 if (!err)
3405 err = i40e_vsi_configure_rx(vsi);
3406
3407 return err;
3408 }
3409
3410 /**
3411 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3412 * @vsi: the VSI being configured
3413 **/
3414 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3415 {
3416 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3417 struct i40e_pf *pf = vsi->back;
3418 struct i40e_hw *hw = &pf->hw;
3419 u16 vector;
3420 int i, q;
3421 u32 qp;
3422
3423 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3424 * and PFINT_LNKLSTn registers, e.g.:
3425 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3426 */
3427 qp = vsi->base_queue;
3428 vector = vsi->base_vector;
3429 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3430 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3431
3432 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3433 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
3434 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3435 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3436 q_vector->rx.itr);
3437 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
3438 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3439 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3440 q_vector->tx.itr);
3441 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3442 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3443
3444 /* Linked list for the queuepairs assigned to this vector */
3445 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3446 for (q = 0; q < q_vector->num_ringpairs; q++) {
3447 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3448 u32 val;
3449
3450 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3451 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3452 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3453 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3454 (I40E_QUEUE_TYPE_TX <<
3455 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3456
3457 wr32(hw, I40E_QINT_RQCTL(qp), val);
3458
3459 if (has_xdp) {
3460 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3461 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3462 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3463 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3464 (I40E_QUEUE_TYPE_TX <<
3465 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3466
3467 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3468 }
3469
3470 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3471 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3472 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3473 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3474 (I40E_QUEUE_TYPE_RX <<
3475 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3476
3477 /* Terminate the linked list */
3478 if (q == (q_vector->num_ringpairs - 1))
3479 val |= (I40E_QUEUE_END_OF_LIST <<
3480 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3481
3482 wr32(hw, I40E_QINT_TQCTL(qp), val);
3483 qp++;
3484 }
3485 }
3486
3487 i40e_flush(hw);
3488 }
3489
3490 /**
3491 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3492 * @hw: ptr to the hardware info
3493 **/
3494 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3495 {
3496 struct i40e_hw *hw = &pf->hw;
3497 u32 val;
3498
3499 /* clear things first */
3500 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3501 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3502
3503 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3504 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3505 I40E_PFINT_ICR0_ENA_GRST_MASK |
3506 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3507 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3508 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3509 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3510 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3511
3512 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3513 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3514
3515 if (pf->flags & I40E_FLAG_PTP)
3516 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3517
3518 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3519
3520 /* SW_ITR_IDX = 0, but don't change INTENA */
3521 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3522 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3523
3524 /* OTHER_ITR_IDX = 0 */
3525 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3526 }
3527
3528 /**
3529 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3530 * @vsi: the VSI being configured
3531 **/
3532 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3533 {
3534 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3535 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3536 struct i40e_pf *pf = vsi->back;
3537 struct i40e_hw *hw = &pf->hw;
3538 u32 val;
3539
3540 /* set the ITR configuration */
3541 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3542 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
3543 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3544 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3545 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
3546 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3547 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3548
3549 i40e_enable_misc_int_causes(pf);
3550
3551 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3552 wr32(hw, I40E_PFINT_LNKLST0, 0);
3553
3554 /* Associate the queue pair to the vector and enable the queue int */
3555 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3556 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3557 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3558 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3559
3560 wr32(hw, I40E_QINT_RQCTL(0), val);
3561
3562 if (i40e_enabled_xdp_vsi(vsi)) {
3563 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3564 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3565 (I40E_QUEUE_TYPE_TX
3566 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3567
3568 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3569 }
3570
3571 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3572 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3573 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3574
3575 wr32(hw, I40E_QINT_TQCTL(0), val);
3576 i40e_flush(hw);
3577 }
3578
3579 /**
3580 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3581 * @pf: board private structure
3582 **/
3583 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3584 {
3585 struct i40e_hw *hw = &pf->hw;
3586
3587 wr32(hw, I40E_PFINT_DYN_CTL0,
3588 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3589 i40e_flush(hw);
3590 }
3591
3592 /**
3593 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3594 * @pf: board private structure
3595 **/
3596 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3597 {
3598 struct i40e_hw *hw = &pf->hw;
3599 u32 val;
3600
3601 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3602 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3603 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3604
3605 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3606 i40e_flush(hw);
3607 }
3608
3609 /**
3610 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3611 * @irq: interrupt number
3612 * @data: pointer to a q_vector
3613 **/
3614 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3615 {
3616 struct i40e_q_vector *q_vector = data;
3617
3618 if (!q_vector->tx.ring && !q_vector->rx.ring)
3619 return IRQ_HANDLED;
3620
3621 napi_schedule_irqoff(&q_vector->napi);
3622
3623 return IRQ_HANDLED;
3624 }
3625
3626 /**
3627 * i40e_irq_affinity_notify - Callback for affinity changes
3628 * @notify: context as to what irq was changed
3629 * @mask: the new affinity mask
3630 *
3631 * This is a callback function used by the irq_set_affinity_notifier function
3632 * so that we may register to receive changes to the irq affinity masks.
3633 **/
3634 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3635 const cpumask_t *mask)
3636 {
3637 struct i40e_q_vector *q_vector =
3638 container_of(notify, struct i40e_q_vector, affinity_notify);
3639
3640 cpumask_copy(&q_vector->affinity_mask, mask);
3641 }
3642
3643 /**
3644 * i40e_irq_affinity_release - Callback for affinity notifier release
3645 * @ref: internal core kernel usage
3646 *
3647 * This is a callback function used by the irq_set_affinity_notifier function
3648 * to inform the current notification subscriber that they will no longer
3649 * receive notifications.
3650 **/
3651 static void i40e_irq_affinity_release(struct kref *ref) {}
3652
3653 /**
3654 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3655 * @vsi: the VSI being configured
3656 * @basename: name for the vector
3657 *
3658 * Allocates MSI-X vectors and requests interrupts from the kernel.
3659 **/
3660 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3661 {
3662 int q_vectors = vsi->num_q_vectors;
3663 struct i40e_pf *pf = vsi->back;
3664 int base = vsi->base_vector;
3665 int rx_int_idx = 0;
3666 int tx_int_idx = 0;
3667 int vector, err;
3668 int irq_num;
3669 int cpu;
3670
3671 for (vector = 0; vector < q_vectors; vector++) {
3672 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3673
3674 irq_num = pf->msix_entries[base + vector].vector;
3675
3676 if (q_vector->tx.ring && q_vector->rx.ring) {
3677 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3678 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3679 tx_int_idx++;
3680 } else if (q_vector->rx.ring) {
3681 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3682 "%s-%s-%d", basename, "rx", rx_int_idx++);
3683 } else if (q_vector->tx.ring) {
3684 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3685 "%s-%s-%d", basename, "tx", tx_int_idx++);
3686 } else {
3687 /* skip this unused q_vector */
3688 continue;
3689 }
3690 err = request_irq(irq_num,
3691 vsi->irq_handler,
3692 0,
3693 q_vector->name,
3694 q_vector);
3695 if (err) {
3696 dev_info(&pf->pdev->dev,
3697 "MSIX request_irq failed, error: %d\n", err);
3698 goto free_queue_irqs;
3699 }
3700
3701 /* register for affinity change notifications */
3702 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3703 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3704 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3705 /* Spread affinity hints out across online CPUs.
3706 *
3707 * get_cpu_mask returns a static constant mask with
3708 * a permanent lifetime so it's ok to pass to
3709 * irq_set_affinity_hint without making a copy.
3710 */
3711 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3712 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3713 }
3714
3715 vsi->irqs_ready = true;
3716 return 0;
3717
3718 free_queue_irqs:
3719 while (vector) {
3720 vector--;
3721 irq_num = pf->msix_entries[base + vector].vector;
3722 irq_set_affinity_notifier(irq_num, NULL);
3723 irq_set_affinity_hint(irq_num, NULL);
3724 free_irq(irq_num, &vsi->q_vectors[vector]);
3725 }
3726 return err;
3727 }
3728
3729 /**
3730 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3731 * @vsi: the VSI being un-configured
3732 **/
3733 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3734 {
3735 struct i40e_pf *pf = vsi->back;
3736 struct i40e_hw *hw = &pf->hw;
3737 int base = vsi->base_vector;
3738 int i;
3739
3740 /* disable interrupt causation from each queue */
3741 for (i = 0; i < vsi->num_queue_pairs; i++) {
3742 u32 val;
3743
3744 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3745 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3746 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3747
3748 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3749 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3750 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3751
3752 if (!i40e_enabled_xdp_vsi(vsi))
3753 continue;
3754 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3755 }
3756
3757 /* disable each interrupt */
3758 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3759 for (i = vsi->base_vector;
3760 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3761 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3762
3763 i40e_flush(hw);
3764 for (i = 0; i < vsi->num_q_vectors; i++)
3765 synchronize_irq(pf->msix_entries[i + base].vector);
3766 } else {
3767 /* Legacy and MSI mode - this stops all interrupt handling */
3768 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3769 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3770 i40e_flush(hw);
3771 synchronize_irq(pf->pdev->irq);
3772 }
3773 }
3774
3775 /**
3776 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3777 * @vsi: the VSI being configured
3778 **/
3779 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3780 {
3781 struct i40e_pf *pf = vsi->back;
3782 int i;
3783
3784 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3785 for (i = 0; i < vsi->num_q_vectors; i++)
3786 i40e_irq_dynamic_enable(vsi, i);
3787 } else {
3788 i40e_irq_dynamic_enable_icr0(pf);
3789 }
3790
3791 i40e_flush(&pf->hw);
3792 return 0;
3793 }
3794
3795 /**
3796 * i40e_free_misc_vector - Free the vector that handles non-queue events
3797 * @pf: board private structure
3798 **/
3799 static void i40e_free_misc_vector(struct i40e_pf *pf)
3800 {
3801 /* Disable ICR 0 */
3802 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3803 i40e_flush(&pf->hw);
3804
3805 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3806 synchronize_irq(pf->msix_entries[0].vector);
3807 free_irq(pf->msix_entries[0].vector, pf);
3808 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3809 }
3810 }
3811
3812 /**
3813 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3814 * @irq: interrupt number
3815 * @data: pointer to a q_vector
3816 *
3817 * This is the handler used for all MSI/Legacy interrupts, and deals
3818 * with both queue and non-queue interrupts. This is also used in
3819 * MSIX mode to handle the non-queue interrupts.
3820 **/
3821 static irqreturn_t i40e_intr(int irq, void *data)
3822 {
3823 struct i40e_pf *pf = (struct i40e_pf *)data;
3824 struct i40e_hw *hw = &pf->hw;
3825 irqreturn_t ret = IRQ_NONE;
3826 u32 icr0, icr0_remaining;
3827 u32 val, ena_mask;
3828
3829 icr0 = rd32(hw, I40E_PFINT_ICR0);
3830 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3831
3832 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3833 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3834 goto enable_intr;
3835
3836 /* if interrupt but no bits showing, must be SWINT */
3837 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3838 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3839 pf->sw_int_count++;
3840
3841 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3842 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3843 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3844 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3845 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3846 }
3847
3848 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3849 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3850 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3851 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3852
3853 /* We do not have a way to disarm Queue causes while leaving
3854 * interrupt enabled for all other causes, ideally
3855 * interrupt should be disabled while we are in NAPI but
3856 * this is not a performance path and napi_schedule()
3857 * can deal with rescheduling.
3858 */
3859 if (!test_bit(__I40E_DOWN, pf->state))
3860 napi_schedule_irqoff(&q_vector->napi);
3861 }
3862
3863 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3864 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3865 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3866 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3867 }
3868
3869 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3870 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3871 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3872 }
3873
3874 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3875 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3876 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3877 }
3878
3879 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3880 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3881 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3882 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3883 val = rd32(hw, I40E_GLGEN_RSTAT);
3884 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3885 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3886 if (val == I40E_RESET_CORER) {
3887 pf->corer_count++;
3888 } else if (val == I40E_RESET_GLOBR) {
3889 pf->globr_count++;
3890 } else if (val == I40E_RESET_EMPR) {
3891 pf->empr_count++;
3892 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3893 }
3894 }
3895
3896 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3897 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3898 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3899 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3900 rd32(hw, I40E_PFHMC_ERRORINFO),
3901 rd32(hw, I40E_PFHMC_ERRORDATA));
3902 }
3903
3904 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3905 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3906
3907 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3908 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3909 i40e_ptp_tx_hwtstamp(pf);
3910 }
3911 }
3912
3913 /* If a critical error is pending we have no choice but to reset the
3914 * device.
3915 * Report and mask out any remaining unexpected interrupts.
3916 */
3917 icr0_remaining = icr0 & ena_mask;
3918 if (icr0_remaining) {
3919 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3920 icr0_remaining);
3921 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3922 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3923 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3924 dev_info(&pf->pdev->dev, "device will be reset\n");
3925 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3926 i40e_service_event_schedule(pf);
3927 }
3928 ena_mask &= ~icr0_remaining;
3929 }
3930 ret = IRQ_HANDLED;
3931
3932 enable_intr:
3933 /* re-enable interrupt causes */
3934 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3935 if (!test_bit(__I40E_DOWN, pf->state)) {
3936 i40e_service_event_schedule(pf);
3937 i40e_irq_dynamic_enable_icr0(pf);
3938 }
3939
3940 return ret;
3941 }
3942
3943 /**
3944 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3945 * @tx_ring: tx ring to clean
3946 * @budget: how many cleans we're allowed
3947 *
3948 * Returns true if there's any budget left (e.g. the clean is finished)
3949 **/
3950 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3951 {
3952 struct i40e_vsi *vsi = tx_ring->vsi;
3953 u16 i = tx_ring->next_to_clean;
3954 struct i40e_tx_buffer *tx_buf;
3955 struct i40e_tx_desc *tx_desc;
3956
3957 tx_buf = &tx_ring->tx_bi[i];
3958 tx_desc = I40E_TX_DESC(tx_ring, i);
3959 i -= tx_ring->count;
3960
3961 do {
3962 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3963
3964 /* if next_to_watch is not set then there is no work pending */
3965 if (!eop_desc)
3966 break;
3967
3968 /* prevent any other reads prior to eop_desc */
3969 smp_rmb();
3970
3971 /* if the descriptor isn't done, no work yet to do */
3972 if (!(eop_desc->cmd_type_offset_bsz &
3973 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3974 break;
3975
3976 /* clear next_to_watch to prevent false hangs */
3977 tx_buf->next_to_watch = NULL;
3978
3979 tx_desc->buffer_addr = 0;
3980 tx_desc->cmd_type_offset_bsz = 0;
3981 /* move past filter desc */
3982 tx_buf++;
3983 tx_desc++;
3984 i++;
3985 if (unlikely(!i)) {
3986 i -= tx_ring->count;
3987 tx_buf = tx_ring->tx_bi;
3988 tx_desc = I40E_TX_DESC(tx_ring, 0);
3989 }
3990 /* unmap skb header data */
3991 dma_unmap_single(tx_ring->dev,
3992 dma_unmap_addr(tx_buf, dma),
3993 dma_unmap_len(tx_buf, len),
3994 DMA_TO_DEVICE);
3995 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3996 kfree(tx_buf->raw_buf);
3997
3998 tx_buf->raw_buf = NULL;
3999 tx_buf->tx_flags = 0;
4000 tx_buf->next_to_watch = NULL;
4001 dma_unmap_len_set(tx_buf, len, 0);
4002 tx_desc->buffer_addr = 0;
4003 tx_desc->cmd_type_offset_bsz = 0;
4004
4005 /* move us past the eop_desc for start of next FD desc */
4006 tx_buf++;
4007 tx_desc++;
4008 i++;
4009 if (unlikely(!i)) {
4010 i -= tx_ring->count;
4011 tx_buf = tx_ring->tx_bi;
4012 tx_desc = I40E_TX_DESC(tx_ring, 0);
4013 }
4014
4015 /* update budget accounting */
4016 budget--;
4017 } while (likely(budget));
4018
4019 i += tx_ring->count;
4020 tx_ring->next_to_clean = i;
4021
4022 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4023 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4024
4025 return budget > 0;
4026 }
4027
4028 /**
4029 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4030 * @irq: interrupt number
4031 * @data: pointer to a q_vector
4032 **/
4033 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4034 {
4035 struct i40e_q_vector *q_vector = data;
4036 struct i40e_vsi *vsi;
4037
4038 if (!q_vector->tx.ring)
4039 return IRQ_HANDLED;
4040
4041 vsi = q_vector->tx.ring->vsi;
4042 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4043
4044 return IRQ_HANDLED;
4045 }
4046
4047 /**
4048 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4049 * @vsi: the VSI being configured
4050 * @v_idx: vector index
4051 * @qp_idx: queue pair index
4052 **/
4053 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4054 {
4055 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4056 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4057 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4058
4059 tx_ring->q_vector = q_vector;
4060 tx_ring->next = q_vector->tx.ring;
4061 q_vector->tx.ring = tx_ring;
4062 q_vector->tx.count++;
4063
4064 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4065 if (i40e_enabled_xdp_vsi(vsi)) {
4066 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4067
4068 xdp_ring->q_vector = q_vector;
4069 xdp_ring->next = q_vector->tx.ring;
4070 q_vector->tx.ring = xdp_ring;
4071 q_vector->tx.count++;
4072 }
4073
4074 rx_ring->q_vector = q_vector;
4075 rx_ring->next = q_vector->rx.ring;
4076 q_vector->rx.ring = rx_ring;
4077 q_vector->rx.count++;
4078 }
4079
4080 /**
4081 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4082 * @vsi: the VSI being configured
4083 *
4084 * This function maps descriptor rings to the queue-specific vectors
4085 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4086 * one vector per queue pair, but on a constrained vector budget, we
4087 * group the queue pairs as "efficiently" as possible.
4088 **/
4089 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4090 {
4091 int qp_remaining = vsi->num_queue_pairs;
4092 int q_vectors = vsi->num_q_vectors;
4093 int num_ringpairs;
4094 int v_start = 0;
4095 int qp_idx = 0;
4096
4097 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4098 * group them so there are multiple queues per vector.
4099 * It is also important to go through all the vectors available to be
4100 * sure that if we don't use all the vectors, that the remaining vectors
4101 * are cleared. This is especially important when decreasing the
4102 * number of queues in use.
4103 */
4104 for (; v_start < q_vectors; v_start++) {
4105 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4106
4107 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4108
4109 q_vector->num_ringpairs = num_ringpairs;
4110
4111 q_vector->rx.count = 0;
4112 q_vector->tx.count = 0;
4113 q_vector->rx.ring = NULL;
4114 q_vector->tx.ring = NULL;
4115
4116 while (num_ringpairs--) {
4117 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4118 qp_idx++;
4119 qp_remaining--;
4120 }
4121 }
4122 }
4123
4124 /**
4125 * i40e_vsi_request_irq - Request IRQ from the OS
4126 * @vsi: the VSI being configured
4127 * @basename: name for the vector
4128 **/
4129 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4130 {
4131 struct i40e_pf *pf = vsi->back;
4132 int err;
4133
4134 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4135 err = i40e_vsi_request_irq_msix(vsi, basename);
4136 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4137 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4138 pf->int_name, pf);
4139 else
4140 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4141 pf->int_name, pf);
4142
4143 if (err)
4144 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4145
4146 return err;
4147 }
4148
4149 #ifdef CONFIG_NET_POLL_CONTROLLER
4150 /**
4151 * i40e_netpoll - A Polling 'interrupt' handler
4152 * @netdev: network interface device structure
4153 *
4154 * This is used by netconsole to send skbs without having to re-enable
4155 * interrupts. It's not called while the normal interrupt routine is executing.
4156 **/
4157 static void i40e_netpoll(struct net_device *netdev)
4158 {
4159 struct i40e_netdev_priv *np = netdev_priv(netdev);
4160 struct i40e_vsi *vsi = np->vsi;
4161 struct i40e_pf *pf = vsi->back;
4162 int i;
4163
4164 /* if interface is down do nothing */
4165 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4166 return;
4167
4168 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4169 for (i = 0; i < vsi->num_q_vectors; i++)
4170 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4171 } else {
4172 i40e_intr(pf->pdev->irq, netdev);
4173 }
4174 }
4175 #endif
4176
4177 #define I40E_QTX_ENA_WAIT_COUNT 50
4178
4179 /**
4180 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4181 * @pf: the PF being configured
4182 * @pf_q: the PF queue
4183 * @enable: enable or disable state of the queue
4184 *
4185 * This routine will wait for the given Tx queue of the PF to reach the
4186 * enabled or disabled state.
4187 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4188 * multiple retries; else will return 0 in case of success.
4189 **/
4190 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4191 {
4192 int i;
4193 u32 tx_reg;
4194
4195 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4196 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4197 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4198 break;
4199
4200 usleep_range(10, 20);
4201 }
4202 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4203 return -ETIMEDOUT;
4204
4205 return 0;
4206 }
4207
4208 /**
4209 * i40e_control_tx_q - Start or stop a particular Tx queue
4210 * @pf: the PF structure
4211 * @pf_q: the PF queue to configure
4212 * @enable: start or stop the queue
4213 *
4214 * This function enables or disables a single queue. Note that any delay
4215 * required after the operation is expected to be handled by the caller of
4216 * this function.
4217 **/
4218 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4219 {
4220 struct i40e_hw *hw = &pf->hw;
4221 u32 tx_reg;
4222 int i;
4223
4224 /* warn the TX unit of coming changes */
4225 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4226 if (!enable)
4227 usleep_range(10, 20);
4228
4229 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4230 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4231 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4232 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4233 break;
4234 usleep_range(1000, 2000);
4235 }
4236
4237 /* Skip if the queue is already in the requested state */
4238 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4239 return;
4240
4241 /* turn on/off the queue */
4242 if (enable) {
4243 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4244 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4245 } else {
4246 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4247 }
4248
4249 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4250 }
4251
4252 /**
4253 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4254 * @seid: VSI SEID
4255 * @pf: the PF structure
4256 * @pf_q: the PF queue to configure
4257 * @is_xdp: true if the queue is used for XDP
4258 * @enable: start or stop the queue
4259 **/
4260 static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4261 bool is_xdp, bool enable)
4262 {
4263 int ret;
4264
4265 i40e_control_tx_q(pf, pf_q, enable);
4266
4267 /* wait for the change to finish */
4268 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4269 if (ret) {
4270 dev_info(&pf->pdev->dev,
4271 "VSI seid %d %sTx ring %d %sable timeout\n",
4272 seid, (is_xdp ? "XDP " : ""), pf_q,
4273 (enable ? "en" : "dis"));
4274 }
4275
4276 return ret;
4277 }
4278
4279 /**
4280 * i40e_vsi_control_tx - Start or stop a VSI's rings
4281 * @vsi: the VSI being configured
4282 * @enable: start or stop the rings
4283 **/
4284 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
4285 {
4286 struct i40e_pf *pf = vsi->back;
4287 int i, pf_q, ret = 0;
4288
4289 pf_q = vsi->base_queue;
4290 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4291 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4292 pf_q,
4293 false /*is xdp*/, enable);
4294 if (ret)
4295 break;
4296
4297 if (!i40e_enabled_xdp_vsi(vsi))
4298 continue;
4299
4300 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4301 pf_q + vsi->alloc_queue_pairs,
4302 true /*is xdp*/, enable);
4303 if (ret)
4304 break;
4305 }
4306
4307 return ret;
4308 }
4309
4310 /**
4311 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4312 * @pf: the PF being configured
4313 * @pf_q: the PF queue
4314 * @enable: enable or disable state of the queue
4315 *
4316 * This routine will wait for the given Rx queue of the PF to reach the
4317 * enabled or disabled state.
4318 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4319 * multiple retries; else will return 0 in case of success.
4320 **/
4321 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4322 {
4323 int i;
4324 u32 rx_reg;
4325
4326 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4327 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4328 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4329 break;
4330
4331 usleep_range(10, 20);
4332 }
4333 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4334 return -ETIMEDOUT;
4335
4336 return 0;
4337 }
4338
4339 /**
4340 * i40e_control_rx_q - Start or stop a particular Rx queue
4341 * @pf: the PF structure
4342 * @pf_q: the PF queue to configure
4343 * @enable: start or stop the queue
4344 *
4345 * This function enables or disables a single queue. Note that any delay
4346 * required after the operation is expected to be handled by the caller of
4347 * this function.
4348 **/
4349 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4350 {
4351 struct i40e_hw *hw = &pf->hw;
4352 u32 rx_reg;
4353 int i;
4354
4355 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4356 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4357 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4358 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4359 break;
4360 usleep_range(1000, 2000);
4361 }
4362
4363 /* Skip if the queue is already in the requested state */
4364 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4365 return;
4366
4367 /* turn on/off the queue */
4368 if (enable)
4369 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4370 else
4371 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4372
4373 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4374 }
4375
4376 /**
4377 * i40e_vsi_control_rx - Start or stop a VSI's rings
4378 * @vsi: the VSI being configured
4379 * @enable: start or stop the rings
4380 **/
4381 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
4382 {
4383 struct i40e_pf *pf = vsi->back;
4384 int i, pf_q, ret = 0;
4385
4386 pf_q = vsi->base_queue;
4387 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4388 i40e_control_rx_q(pf, pf_q, enable);
4389
4390 /* wait for the change to finish */
4391 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4392 if (ret) {
4393 dev_info(&pf->pdev->dev,
4394 "VSI seid %d Rx ring %d %sable timeout\n",
4395 vsi->seid, pf_q, (enable ? "en" : "dis"));
4396 break;
4397 }
4398 }
4399
4400 /* Due to HW errata, on Rx disable only, the register can indicate done
4401 * before it really is. Needs 50ms to be sure
4402 */
4403 if (!enable)
4404 mdelay(50);
4405
4406 return ret;
4407 }
4408
4409 /**
4410 * i40e_vsi_start_rings - Start a VSI's rings
4411 * @vsi: the VSI being configured
4412 **/
4413 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4414 {
4415 int ret = 0;
4416
4417 /* do rx first for enable and last for disable */
4418 ret = i40e_vsi_control_rx(vsi, true);
4419 if (ret)
4420 return ret;
4421 ret = i40e_vsi_control_tx(vsi, true);
4422
4423 return ret;
4424 }
4425
4426 /**
4427 * i40e_vsi_stop_rings - Stop a VSI's rings
4428 * @vsi: the VSI being configured
4429 **/
4430 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4431 {
4432 /* When port TX is suspended, don't wait */
4433 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4434 return i40e_vsi_stop_rings_no_wait(vsi);
4435
4436 /* do rx first for enable and last for disable
4437 * Ignore return value, we need to shutdown whatever we can
4438 */
4439 i40e_vsi_control_tx(vsi, false);
4440 i40e_vsi_control_rx(vsi, false);
4441 }
4442
4443 /**
4444 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4445 * @vsi: the VSI being shutdown
4446 *
4447 * This function stops all the rings for a VSI but does not delay to verify
4448 * that rings have been disabled. It is expected that the caller is shutting
4449 * down multiple VSIs at once and will delay together for all the VSIs after
4450 * initiating the shutdown. This is particularly useful for shutting down lots
4451 * of VFs together. Otherwise, a large delay can be incurred while configuring
4452 * each VSI in serial.
4453 **/
4454 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4455 {
4456 struct i40e_pf *pf = vsi->back;
4457 int i, pf_q;
4458
4459 pf_q = vsi->base_queue;
4460 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4461 i40e_control_tx_q(pf, pf_q, false);
4462 i40e_control_rx_q(pf, pf_q, false);
4463 }
4464 }
4465
4466 /**
4467 * i40e_vsi_free_irq - Free the irq association with the OS
4468 * @vsi: the VSI being configured
4469 **/
4470 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4471 {
4472 struct i40e_pf *pf = vsi->back;
4473 struct i40e_hw *hw = &pf->hw;
4474 int base = vsi->base_vector;
4475 u32 val, qp;
4476 int i;
4477
4478 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4479 if (!vsi->q_vectors)
4480 return;
4481
4482 if (!vsi->irqs_ready)
4483 return;
4484
4485 vsi->irqs_ready = false;
4486 for (i = 0; i < vsi->num_q_vectors; i++) {
4487 int irq_num;
4488 u16 vector;
4489
4490 vector = i + base;
4491 irq_num = pf->msix_entries[vector].vector;
4492
4493 /* free only the irqs that were actually requested */
4494 if (!vsi->q_vectors[i] ||
4495 !vsi->q_vectors[i]->num_ringpairs)
4496 continue;
4497
4498 /* clear the affinity notifier in the IRQ descriptor */
4499 irq_set_affinity_notifier(irq_num, NULL);
4500 /* remove our suggested affinity mask for this IRQ */
4501 irq_set_affinity_hint(irq_num, NULL);
4502 synchronize_irq(irq_num);
4503 free_irq(irq_num, vsi->q_vectors[i]);
4504
4505 /* Tear down the interrupt queue link list
4506 *
4507 * We know that they come in pairs and always
4508 * the Rx first, then the Tx. To clear the
4509 * link list, stick the EOL value into the
4510 * next_q field of the registers.
4511 */
4512 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4513 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4514 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4515 val |= I40E_QUEUE_END_OF_LIST
4516 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4517 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4518
4519 while (qp != I40E_QUEUE_END_OF_LIST) {
4520 u32 next;
4521
4522 val = rd32(hw, I40E_QINT_RQCTL(qp));
4523
4524 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4525 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4526 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4527 I40E_QINT_RQCTL_INTEVENT_MASK);
4528
4529 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4530 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4531
4532 wr32(hw, I40E_QINT_RQCTL(qp), val);
4533
4534 val = rd32(hw, I40E_QINT_TQCTL(qp));
4535
4536 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4537 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4538
4539 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4540 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4541 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4542 I40E_QINT_TQCTL_INTEVENT_MASK);
4543
4544 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4545 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4546
4547 wr32(hw, I40E_QINT_TQCTL(qp), val);
4548 qp = next;
4549 }
4550 }
4551 } else {
4552 free_irq(pf->pdev->irq, pf);
4553
4554 val = rd32(hw, I40E_PFINT_LNKLST0);
4555 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4556 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4557 val |= I40E_QUEUE_END_OF_LIST
4558 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4559 wr32(hw, I40E_PFINT_LNKLST0, val);
4560
4561 val = rd32(hw, I40E_QINT_RQCTL(qp));
4562 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4563 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4564 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4565 I40E_QINT_RQCTL_INTEVENT_MASK);
4566
4567 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4568 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4569
4570 wr32(hw, I40E_QINT_RQCTL(qp), val);
4571
4572 val = rd32(hw, I40E_QINT_TQCTL(qp));
4573
4574 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4575 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4576 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4577 I40E_QINT_TQCTL_INTEVENT_MASK);
4578
4579 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4580 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4581
4582 wr32(hw, I40E_QINT_TQCTL(qp), val);
4583 }
4584 }
4585
4586 /**
4587 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4588 * @vsi: the VSI being configured
4589 * @v_idx: Index of vector to be freed
4590 *
4591 * This function frees the memory allocated to the q_vector. In addition if
4592 * NAPI is enabled it will delete any references to the NAPI struct prior
4593 * to freeing the q_vector.
4594 **/
4595 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4596 {
4597 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4598 struct i40e_ring *ring;
4599
4600 if (!q_vector)
4601 return;
4602
4603 /* disassociate q_vector from rings */
4604 i40e_for_each_ring(ring, q_vector->tx)
4605 ring->q_vector = NULL;
4606
4607 i40e_for_each_ring(ring, q_vector->rx)
4608 ring->q_vector = NULL;
4609
4610 /* only VSI w/ an associated netdev is set up w/ NAPI */
4611 if (vsi->netdev)
4612 netif_napi_del(&q_vector->napi);
4613
4614 vsi->q_vectors[v_idx] = NULL;
4615
4616 kfree_rcu(q_vector, rcu);
4617 }
4618
4619 /**
4620 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4621 * @vsi: the VSI being un-configured
4622 *
4623 * This frees the memory allocated to the q_vectors and
4624 * deletes references to the NAPI struct.
4625 **/
4626 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4627 {
4628 int v_idx;
4629
4630 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4631 i40e_free_q_vector(vsi, v_idx);
4632 }
4633
4634 /**
4635 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4636 * @pf: board private structure
4637 **/
4638 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4639 {
4640 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4641 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4642 pci_disable_msix(pf->pdev);
4643 kfree(pf->msix_entries);
4644 pf->msix_entries = NULL;
4645 kfree(pf->irq_pile);
4646 pf->irq_pile = NULL;
4647 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4648 pci_disable_msi(pf->pdev);
4649 }
4650 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4651 }
4652
4653 /**
4654 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4655 * @pf: board private structure
4656 *
4657 * We go through and clear interrupt specific resources and reset the structure
4658 * to pre-load conditions
4659 **/
4660 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4661 {
4662 int i;
4663
4664 i40e_free_misc_vector(pf);
4665
4666 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4667 I40E_IWARP_IRQ_PILE_ID);
4668
4669 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4670 for (i = 0; i < pf->num_alloc_vsi; i++)
4671 if (pf->vsi[i])
4672 i40e_vsi_free_q_vectors(pf->vsi[i]);
4673 i40e_reset_interrupt_capability(pf);
4674 }
4675
4676 /**
4677 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4678 * @vsi: the VSI being configured
4679 **/
4680 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4681 {
4682 int q_idx;
4683
4684 if (!vsi->netdev)
4685 return;
4686
4687 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4688 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4689
4690 if (q_vector->rx.ring || q_vector->tx.ring)
4691 napi_enable(&q_vector->napi);
4692 }
4693 }
4694
4695 /**
4696 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4697 * @vsi: the VSI being configured
4698 **/
4699 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4700 {
4701 int q_idx;
4702
4703 if (!vsi->netdev)
4704 return;
4705
4706 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4707 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4708
4709 if (q_vector->rx.ring || q_vector->tx.ring)
4710 napi_disable(&q_vector->napi);
4711 }
4712 }
4713
4714 /**
4715 * i40e_vsi_close - Shut down a VSI
4716 * @vsi: the vsi to be quelled
4717 **/
4718 static void i40e_vsi_close(struct i40e_vsi *vsi)
4719 {
4720 struct i40e_pf *pf = vsi->back;
4721 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4722 i40e_down(vsi);
4723 i40e_vsi_free_irq(vsi);
4724 i40e_vsi_free_tx_resources(vsi);
4725 i40e_vsi_free_rx_resources(vsi);
4726 vsi->current_netdev_flags = 0;
4727 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
4728 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4729 pf->flags |= I40E_FLAG_CLIENT_RESET;
4730 }
4731
4732 /**
4733 * i40e_quiesce_vsi - Pause a given VSI
4734 * @vsi: the VSI being paused
4735 **/
4736 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4737 {
4738 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4739 return;
4740
4741 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4742 if (vsi->netdev && netif_running(vsi->netdev))
4743 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4744 else
4745 i40e_vsi_close(vsi);
4746 }
4747
4748 /**
4749 * i40e_unquiesce_vsi - Resume a given VSI
4750 * @vsi: the VSI being resumed
4751 **/
4752 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4753 {
4754 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4755 return;
4756
4757 if (vsi->netdev && netif_running(vsi->netdev))
4758 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4759 else
4760 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4761 }
4762
4763 /**
4764 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4765 * @pf: the PF
4766 **/
4767 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4768 {
4769 int v;
4770
4771 for (v = 0; v < pf->num_alloc_vsi; v++) {
4772 if (pf->vsi[v])
4773 i40e_quiesce_vsi(pf->vsi[v]);
4774 }
4775 }
4776
4777 /**
4778 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4779 * @pf: the PF
4780 **/
4781 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4782 {
4783 int v;
4784
4785 for (v = 0; v < pf->num_alloc_vsi; v++) {
4786 if (pf->vsi[v])
4787 i40e_unquiesce_vsi(pf->vsi[v]);
4788 }
4789 }
4790
4791 /**
4792 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4793 * @vsi: the VSI being configured
4794 *
4795 * Wait until all queues on a given VSI have been disabled.
4796 **/
4797 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4798 {
4799 struct i40e_pf *pf = vsi->back;
4800 int i, pf_q, ret;
4801
4802 pf_q = vsi->base_queue;
4803 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4804 /* Check and wait for the Tx queue */
4805 ret = i40e_pf_txq_wait(pf, pf_q, false);
4806 if (ret) {
4807 dev_info(&pf->pdev->dev,
4808 "VSI seid %d Tx ring %d disable timeout\n",
4809 vsi->seid, pf_q);
4810 return ret;
4811 }
4812
4813 if (!i40e_enabled_xdp_vsi(vsi))
4814 goto wait_rx;
4815
4816 /* Check and wait for the XDP Tx queue */
4817 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4818 false);
4819 if (ret) {
4820 dev_info(&pf->pdev->dev,
4821 "VSI seid %d XDP Tx ring %d disable timeout\n",
4822 vsi->seid, pf_q);
4823 return ret;
4824 }
4825 wait_rx:
4826 /* Check and wait for the Rx queue */
4827 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4828 if (ret) {
4829 dev_info(&pf->pdev->dev,
4830 "VSI seid %d Rx ring %d disable timeout\n",
4831 vsi->seid, pf_q);
4832 return ret;
4833 }
4834 }
4835
4836 return 0;
4837 }
4838
4839 #ifdef CONFIG_I40E_DCB
4840 /**
4841 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4842 * @pf: the PF
4843 *
4844 * This function waits for the queues to be in disabled state for all the
4845 * VSIs that are managed by this PF.
4846 **/
4847 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4848 {
4849 int v, ret = 0;
4850
4851 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4852 if (pf->vsi[v]) {
4853 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4854 if (ret)
4855 break;
4856 }
4857 }
4858
4859 return ret;
4860 }
4861
4862 #endif
4863
4864 /**
4865 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4866 * @q_idx: TX queue number
4867 * @vsi: Pointer to VSI struct
4868 *
4869 * This function checks specified queue for given VSI. Detects hung condition.
4870 * We proactively detect hung TX queues by checking if interrupts are disabled
4871 * but there are pending descriptors. If it appears hung, attempt to recover
4872 * by triggering a SW interrupt.
4873 **/
4874 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4875 {
4876 struct i40e_ring *tx_ring = NULL;
4877 struct i40e_pf *pf;
4878 u32 val, tx_pending;
4879 int i;
4880
4881 pf = vsi->back;
4882
4883 /* now that we have an index, find the tx_ring struct */
4884 for (i = 0; i < vsi->num_queue_pairs; i++) {
4885 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4886 if (q_idx == vsi->tx_rings[i]->queue_index) {
4887 tx_ring = vsi->tx_rings[i];
4888 break;
4889 }
4890 }
4891 }
4892
4893 if (!tx_ring)
4894 return;
4895
4896 /* Read interrupt register */
4897 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4898 val = rd32(&pf->hw,
4899 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4900 tx_ring->vsi->base_vector - 1));
4901 else
4902 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4903
4904 tx_pending = i40e_get_tx_pending(tx_ring);
4905
4906 /* Interrupts are disabled and TX pending is non-zero,
4907 * trigger the SW interrupt (don't wait). Worst case
4908 * there will be one extra interrupt which may result
4909 * into not cleaning any queues because queues are cleaned.
4910 */
4911 if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4912 i40e_force_wb(vsi, tx_ring->q_vector);
4913 }
4914
4915 /**
4916 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4917 * @pf: pointer to PF struct
4918 *
4919 * LAN VSI has netdev and netdev has TX queues. This function is to check
4920 * each of those TX queues if they are hung, trigger recovery by issuing
4921 * SW interrupt.
4922 **/
4923 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4924 {
4925 struct net_device *netdev;
4926 struct i40e_vsi *vsi;
4927 unsigned int i;
4928
4929 /* Only for LAN VSI */
4930 vsi = pf->vsi[pf->lan_vsi];
4931
4932 if (!vsi)
4933 return;
4934
4935 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4936 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
4937 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
4938 return;
4939
4940 /* Make sure type is MAIN VSI */
4941 if (vsi->type != I40E_VSI_MAIN)
4942 return;
4943
4944 netdev = vsi->netdev;
4945 if (!netdev)
4946 return;
4947
4948 /* Bail out if netif_carrier is not OK */
4949 if (!netif_carrier_ok(netdev))
4950 return;
4951
4952 /* Go thru' TX queues for netdev */
4953 for (i = 0; i < netdev->num_tx_queues; i++) {
4954 struct netdev_queue *q;
4955
4956 q = netdev_get_tx_queue(netdev, i);
4957 if (q)
4958 i40e_detect_recover_hung_queue(i, vsi);
4959 }
4960 }
4961
4962 /**
4963 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4964 * @pf: pointer to PF
4965 *
4966 * Get TC map for ISCSI PF type that will include iSCSI TC
4967 * and LAN TC.
4968 **/
4969 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4970 {
4971 struct i40e_dcb_app_priority_table app;
4972 struct i40e_hw *hw = &pf->hw;
4973 u8 enabled_tc = 1; /* TC0 is always enabled */
4974 u8 tc, i;
4975 /* Get the iSCSI APP TLV */
4976 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4977
4978 for (i = 0; i < dcbcfg->numapps; i++) {
4979 app = dcbcfg->app[i];
4980 if (app.selector == I40E_APP_SEL_TCPIP &&
4981 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4982 tc = dcbcfg->etscfg.prioritytable[app.priority];
4983 enabled_tc |= BIT(tc);
4984 break;
4985 }
4986 }
4987
4988 return enabled_tc;
4989 }
4990
4991 /**
4992 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4993 * @dcbcfg: the corresponding DCBx configuration structure
4994 *
4995 * Return the number of TCs from given DCBx configuration
4996 **/
4997 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4998 {
4999 int i, tc_unused = 0;
5000 u8 num_tc = 0;
5001 u8 ret = 0;
5002
5003 /* Scan the ETS Config Priority Table to find
5004 * traffic class enabled for a given priority
5005 * and create a bitmask of enabled TCs
5006 */
5007 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5008 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5009
5010 /* Now scan the bitmask to check for
5011 * contiguous TCs starting with TC0
5012 */
5013 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5014 if (num_tc & BIT(i)) {
5015 if (!tc_unused) {
5016 ret++;
5017 } else {
5018 pr_err("Non-contiguous TC - Disabling DCB\n");
5019 return 1;
5020 }
5021 } else {
5022 tc_unused = 1;
5023 }
5024 }
5025
5026 /* There is always at least TC0 */
5027 if (!ret)
5028 ret = 1;
5029
5030 return ret;
5031 }
5032
5033 /**
5034 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5035 * @dcbcfg: the corresponding DCBx configuration structure
5036 *
5037 * Query the current DCB configuration and return the number of
5038 * traffic classes enabled from the given DCBX config
5039 **/
5040 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5041 {
5042 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5043 u8 enabled_tc = 1;
5044 u8 i;
5045
5046 for (i = 0; i < num_tc; i++)
5047 enabled_tc |= BIT(i);
5048
5049 return enabled_tc;
5050 }
5051
5052 /**
5053 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5054 * @pf: PF being queried
5055 *
5056 * Query the current MQPRIO configuration and return the number of
5057 * traffic classes enabled.
5058 **/
5059 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5060 {
5061 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5062 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5063 u8 enabled_tc = 1, i;
5064
5065 for (i = 1; i < num_tc; i++)
5066 enabled_tc |= BIT(i);
5067 return enabled_tc;
5068 }
5069
5070 /**
5071 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5072 * @pf: PF being queried
5073 *
5074 * Return number of traffic classes enabled for the given PF
5075 **/
5076 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5077 {
5078 struct i40e_hw *hw = &pf->hw;
5079 u8 i, enabled_tc = 1;
5080 u8 num_tc = 0;
5081 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5082
5083 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5084 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5085
5086 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5087 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5088 return 1;
5089
5090 /* SFP mode will be enabled for all TCs on port */
5091 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5092 return i40e_dcb_get_num_tc(dcbcfg);
5093
5094 /* MFP mode return count of enabled TCs for this PF */
5095 if (pf->hw.func_caps.iscsi)
5096 enabled_tc = i40e_get_iscsi_tc_map(pf);
5097 else
5098 return 1; /* Only TC0 */
5099
5100 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5101 if (enabled_tc & BIT(i))
5102 num_tc++;
5103 }
5104 return num_tc;
5105 }
5106
5107 /**
5108 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5109 * @pf: PF being queried
5110 *
5111 * Return a bitmap for enabled traffic classes for this PF.
5112 **/
5113 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5114 {
5115 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5116 return i40e_mqprio_get_enabled_tc(pf);
5117
5118 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5119 * default TC
5120 */
5121 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5122 return I40E_DEFAULT_TRAFFIC_CLASS;
5123
5124 /* SFP mode we want PF to be enabled for all TCs */
5125 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5126 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5127
5128 /* MFP enabled and iSCSI PF type */
5129 if (pf->hw.func_caps.iscsi)
5130 return i40e_get_iscsi_tc_map(pf);
5131 else
5132 return I40E_DEFAULT_TRAFFIC_CLASS;
5133 }
5134
5135 /**
5136 * i40e_vsi_get_bw_info - Query VSI BW Information
5137 * @vsi: the VSI being queried
5138 *
5139 * Returns 0 on success, negative value on failure
5140 **/
5141 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5142 {
5143 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5144 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5145 struct i40e_pf *pf = vsi->back;
5146 struct i40e_hw *hw = &pf->hw;
5147 i40e_status ret;
5148 u32 tc_bw_max;
5149 int i;
5150
5151 /* Get the VSI level BW configuration */
5152 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5153 if (ret) {
5154 dev_info(&pf->pdev->dev,
5155 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5156 i40e_stat_str(&pf->hw, ret),
5157 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5158 return -EINVAL;
5159 }
5160
5161 /* Get the VSI level BW configuration per TC */
5162 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5163 NULL);
5164 if (ret) {
5165 dev_info(&pf->pdev->dev,
5166 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5167 i40e_stat_str(&pf->hw, ret),
5168 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5169 return -EINVAL;
5170 }
5171
5172 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5173 dev_info(&pf->pdev->dev,
5174 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5175 bw_config.tc_valid_bits,
5176 bw_ets_config.tc_valid_bits);
5177 /* Still continuing */
5178 }
5179
5180 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5181 vsi->bw_max_quanta = bw_config.max_bw;
5182 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5183 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5184 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5185 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5186 vsi->bw_ets_limit_credits[i] =
5187 le16_to_cpu(bw_ets_config.credits[i]);
5188 /* 3 bits out of 4 for each TC */
5189 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5190 }
5191
5192 return 0;
5193 }
5194
5195 /**
5196 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5197 * @vsi: the VSI being configured
5198 * @enabled_tc: TC bitmap
5199 * @bw_credits: BW shared credits per TC
5200 *
5201 * Returns 0 on success, negative value on failure
5202 **/
5203 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5204 u8 *bw_share)
5205 {
5206 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5207 i40e_status ret;
5208 int i;
5209
5210 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
5211 return 0;
5212 if (!vsi->mqprio_qopt.qopt.hw) {
5213 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5214 if (ret)
5215 dev_info(&vsi->back->pdev->dev,
5216 "Failed to reset tx rate for vsi->seid %u\n",
5217 vsi->seid);
5218 return ret;
5219 }
5220 bw_data.tc_valid_bits = enabled_tc;
5221 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5222 bw_data.tc_bw_credits[i] = bw_share[i];
5223
5224 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
5225 NULL);
5226 if (ret) {
5227 dev_info(&vsi->back->pdev->dev,
5228 "AQ command Config VSI BW allocation per TC failed = %d\n",
5229 vsi->back->hw.aq.asq_last_status);
5230 return -EINVAL;
5231 }
5232
5233 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5234 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5235
5236 return 0;
5237 }
5238
5239 /**
5240 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5241 * @vsi: the VSI being configured
5242 * @enabled_tc: TC map to be enabled
5243 *
5244 **/
5245 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5246 {
5247 struct net_device *netdev = vsi->netdev;
5248 struct i40e_pf *pf = vsi->back;
5249 struct i40e_hw *hw = &pf->hw;
5250 u8 netdev_tc = 0;
5251 int i;
5252 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5253
5254 if (!netdev)
5255 return;
5256
5257 if (!enabled_tc) {
5258 netdev_reset_tc(netdev);
5259 return;
5260 }
5261
5262 /* Set up actual enabled TCs on the VSI */
5263 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5264 return;
5265
5266 /* set per TC queues for the VSI */
5267 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5268 /* Only set TC queues for enabled tcs
5269 *
5270 * e.g. For a VSI that has TC0 and TC3 enabled the
5271 * enabled_tc bitmap would be 0x00001001; the driver
5272 * will set the numtc for netdev as 2 that will be
5273 * referenced by the netdev layer as TC 0 and 1.
5274 */
5275 if (vsi->tc_config.enabled_tc & BIT(i))
5276 netdev_set_tc_queue(netdev,
5277 vsi->tc_config.tc_info[i].netdev_tc,
5278 vsi->tc_config.tc_info[i].qcount,
5279 vsi->tc_config.tc_info[i].qoffset);
5280 }
5281
5282 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5283 return;
5284
5285 /* Assign UP2TC map for the VSI */
5286 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5287 /* Get the actual TC# for the UP */
5288 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5289 /* Get the mapped netdev TC# for the UP */
5290 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5291 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5292 }
5293 }
5294
5295 /**
5296 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5297 * @vsi: the VSI being configured
5298 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5299 **/
5300 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5301 struct i40e_vsi_context *ctxt)
5302 {
5303 /* copy just the sections touched not the entire info
5304 * since not all sections are valid as returned by
5305 * update vsi params
5306 */
5307 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5308 memcpy(&vsi->info.queue_mapping,
5309 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5310 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5311 sizeof(vsi->info.tc_mapping));
5312 }
5313
5314 /**
5315 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5316 * @vsi: VSI to be configured
5317 * @enabled_tc: TC bitmap
5318 *
5319 * This configures a particular VSI for TCs that are mapped to the
5320 * given TC bitmap. It uses default bandwidth share for TCs across
5321 * VSIs to configure TC for a particular VSI.
5322 *
5323 * NOTE:
5324 * It is expected that the VSI queues have been quisced before calling
5325 * this function.
5326 **/
5327 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5328 {
5329 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5330 struct i40e_vsi_context ctxt;
5331 int ret = 0;
5332 int i;
5333
5334 /* Check if enabled_tc is same as existing or new TCs */
5335 if (vsi->tc_config.enabled_tc == enabled_tc &&
5336 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5337 return ret;
5338
5339 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5340 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5341 if (enabled_tc & BIT(i))
5342 bw_share[i] = 1;
5343 }
5344
5345 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5346 if (ret) {
5347 dev_info(&vsi->back->pdev->dev,
5348 "Failed configuring TC map %d for VSI %d\n",
5349 enabled_tc, vsi->seid);
5350 goto out;
5351 }
5352
5353 /* Update Queue Pairs Mapping for currently enabled UPs */
5354 ctxt.seid = vsi->seid;
5355 ctxt.pf_num = vsi->back->hw.pf_id;
5356 ctxt.vf_num = 0;
5357 ctxt.uplink_seid = vsi->uplink_seid;
5358 ctxt.info = vsi->info;
5359 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5360 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5361 if (ret)
5362 goto out;
5363 } else {
5364 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5365 }
5366
5367 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5368 * queues changed.
5369 */
5370 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5371 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5372 vsi->num_queue_pairs);
5373 ret = i40e_vsi_config_rss(vsi);
5374 if (ret) {
5375 dev_info(&vsi->back->pdev->dev,
5376 "Failed to reconfig rss for num_queues\n");
5377 return ret;
5378 }
5379 vsi->reconfig_rss = false;
5380 }
5381 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5382 ctxt.info.valid_sections |=
5383 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5384 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5385 }
5386
5387 /* Update the VSI after updating the VSI queue-mapping
5388 * information
5389 */
5390 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5391 if (ret) {
5392 dev_info(&vsi->back->pdev->dev,
5393 "Update vsi tc config failed, err %s aq_err %s\n",
5394 i40e_stat_str(&vsi->back->hw, ret),
5395 i40e_aq_str(&vsi->back->hw,
5396 vsi->back->hw.aq.asq_last_status));
5397 goto out;
5398 }
5399 /* update the local VSI info with updated queue map */
5400 i40e_vsi_update_queue_map(vsi, &ctxt);
5401 vsi->info.valid_sections = 0;
5402
5403 /* Update current VSI BW information */
5404 ret = i40e_vsi_get_bw_info(vsi);
5405 if (ret) {
5406 dev_info(&vsi->back->pdev->dev,
5407 "Failed updating vsi bw info, err %s aq_err %s\n",
5408 i40e_stat_str(&vsi->back->hw, ret),
5409 i40e_aq_str(&vsi->back->hw,
5410 vsi->back->hw.aq.asq_last_status));
5411 goto out;
5412 }
5413
5414 /* Update the netdev TC setup */
5415 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5416 out:
5417 return ret;
5418 }
5419
5420 /**
5421 * i40e_get_link_speed - Returns link speed for the interface
5422 * @vsi: VSI to be configured
5423 *
5424 **/
5425 int i40e_get_link_speed(struct i40e_vsi *vsi)
5426 {
5427 struct i40e_pf *pf = vsi->back;
5428
5429 switch (pf->hw.phy.link_info.link_speed) {
5430 case I40E_LINK_SPEED_40GB:
5431 return 40000;
5432 case I40E_LINK_SPEED_25GB:
5433 return 25000;
5434 case I40E_LINK_SPEED_20GB:
5435 return 20000;
5436 case I40E_LINK_SPEED_10GB:
5437 return 10000;
5438 case I40E_LINK_SPEED_1GB:
5439 return 1000;
5440 default:
5441 return -EINVAL;
5442 }
5443 }
5444
5445 /**
5446 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5447 * @vsi: VSI to be configured
5448 * @seid: seid of the channel/VSI
5449 * @max_tx_rate: max TX rate to be configured as BW limit
5450 *
5451 * Helper function to set BW limit for a given VSI
5452 **/
5453 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5454 {
5455 struct i40e_pf *pf = vsi->back;
5456 u64 credits = 0;
5457 int speed = 0;
5458 int ret = 0;
5459
5460 speed = i40e_get_link_speed(vsi);
5461 if (max_tx_rate > speed) {
5462 dev_err(&pf->pdev->dev,
5463 "Invalid max tx rate %llu specified for VSI seid %d.",
5464 max_tx_rate, seid);
5465 return -EINVAL;
5466 }
5467 if (max_tx_rate && max_tx_rate < 50) {
5468 dev_warn(&pf->pdev->dev,
5469 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5470 max_tx_rate = 50;
5471 }
5472
5473 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5474 credits = max_tx_rate;
5475 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5476 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5477 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5478 if (ret)
5479 dev_err(&pf->pdev->dev,
5480 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5481 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5482 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5483 return ret;
5484 }
5485
5486 /**
5487 * i40e_remove_queue_channels - Remove queue channels for the TCs
5488 * @vsi: VSI to be configured
5489 *
5490 * Remove queue channels for the TCs
5491 **/
5492 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5493 {
5494 enum i40e_admin_queue_err last_aq_status;
5495 struct i40e_cloud_filter *cfilter;
5496 struct i40e_channel *ch, *ch_tmp;
5497 struct i40e_pf *pf = vsi->back;
5498 struct hlist_node *node;
5499 int ret, i;
5500
5501 /* Reset rss size that was stored when reconfiguring rss for
5502 * channel VSIs with non-power-of-2 queue count.
5503 */
5504 vsi->current_rss_size = 0;
5505
5506 /* perform cleanup for channels if they exist */
5507 if (list_empty(&vsi->ch_list))
5508 return;
5509
5510 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5511 struct i40e_vsi *p_vsi;
5512
5513 list_del(&ch->list);
5514 p_vsi = ch->parent_vsi;
5515 if (!p_vsi || !ch->initialized) {
5516 kfree(ch);
5517 continue;
5518 }
5519 /* Reset queue contexts */
5520 for (i = 0; i < ch->num_queue_pairs; i++) {
5521 struct i40e_ring *tx_ring, *rx_ring;
5522 u16 pf_q;
5523
5524 pf_q = ch->base_queue + i;
5525 tx_ring = vsi->tx_rings[pf_q];
5526 tx_ring->ch = NULL;
5527
5528 rx_ring = vsi->rx_rings[pf_q];
5529 rx_ring->ch = NULL;
5530 }
5531
5532 /* Reset BW configured for this VSI via mqprio */
5533 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5534 if (ret)
5535 dev_info(&vsi->back->pdev->dev,
5536 "Failed to reset tx rate for ch->seid %u\n",
5537 ch->seid);
5538
5539 /* delete cloud filters associated with this channel */
5540 hlist_for_each_entry_safe(cfilter, node,
5541 &pf->cloud_filter_list, cloud_node) {
5542 if (cfilter->seid != ch->seid)
5543 continue;
5544
5545 hash_del(&cfilter->cloud_node);
5546 if (cfilter->dst_port)
5547 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5548 cfilter,
5549 false);
5550 else
5551 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5552 false);
5553 last_aq_status = pf->hw.aq.asq_last_status;
5554 if (ret)
5555 dev_info(&pf->pdev->dev,
5556 "Failed to delete cloud filter, err %s aq_err %s\n",
5557 i40e_stat_str(&pf->hw, ret),
5558 i40e_aq_str(&pf->hw, last_aq_status));
5559 kfree(cfilter);
5560 }
5561
5562 /* delete VSI from FW */
5563 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5564 NULL);
5565 if (ret)
5566 dev_err(&vsi->back->pdev->dev,
5567 "unable to remove channel (%d) for parent VSI(%d)\n",
5568 ch->seid, p_vsi->seid);
5569 kfree(ch);
5570 }
5571 INIT_LIST_HEAD(&vsi->ch_list);
5572 }
5573
5574 /**
5575 * i40e_is_any_channel - channel exist or not
5576 * @vsi: ptr to VSI to which channels are associated with
5577 *
5578 * Returns true or false if channel(s) exist for associated VSI or not
5579 **/
5580 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5581 {
5582 struct i40e_channel *ch, *ch_tmp;
5583
5584 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5585 if (ch->initialized)
5586 return true;
5587 }
5588
5589 return false;
5590 }
5591
5592 /**
5593 * i40e_get_max_queues_for_channel
5594 * @vsi: ptr to VSI to which channels are associated with
5595 *
5596 * Helper function which returns max value among the queue counts set on the
5597 * channels/TCs created.
5598 **/
5599 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5600 {
5601 struct i40e_channel *ch, *ch_tmp;
5602 int max = 0;
5603
5604 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5605 if (!ch->initialized)
5606 continue;
5607 if (ch->num_queue_pairs > max)
5608 max = ch->num_queue_pairs;
5609 }
5610
5611 return max;
5612 }
5613
5614 /**
5615 * i40e_validate_num_queues - validate num_queues w.r.t channel
5616 * @pf: ptr to PF device
5617 * @num_queues: number of queues
5618 * @vsi: the parent VSI
5619 * @reconfig_rss: indicates should the RSS be reconfigured or not
5620 *
5621 * This function validates number of queues in the context of new channel
5622 * which is being established and determines if RSS should be reconfigured
5623 * or not for parent VSI.
5624 **/
5625 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5626 struct i40e_vsi *vsi, bool *reconfig_rss)
5627 {
5628 int max_ch_queues;
5629
5630 if (!reconfig_rss)
5631 return -EINVAL;
5632
5633 *reconfig_rss = false;
5634 if (vsi->current_rss_size) {
5635 if (num_queues > vsi->current_rss_size) {
5636 dev_dbg(&pf->pdev->dev,
5637 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5638 num_queues, vsi->current_rss_size);
5639 return -EINVAL;
5640 } else if ((num_queues < vsi->current_rss_size) &&
5641 (!is_power_of_2(num_queues))) {
5642 dev_dbg(&pf->pdev->dev,
5643 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5644 num_queues, vsi->current_rss_size);
5645 return -EINVAL;
5646 }
5647 }
5648
5649 if (!is_power_of_2(num_queues)) {
5650 /* Find the max num_queues configured for channel if channel
5651 * exist.
5652 * if channel exist, then enforce 'num_queues' to be more than
5653 * max ever queues configured for channel.
5654 */
5655 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5656 if (num_queues < max_ch_queues) {
5657 dev_dbg(&pf->pdev->dev,
5658 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5659 num_queues, max_ch_queues);
5660 return -EINVAL;
5661 }
5662 *reconfig_rss = true;
5663 }
5664
5665 return 0;
5666 }
5667
5668 /**
5669 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5670 * @vsi: the VSI being setup
5671 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5672 *
5673 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5674 **/
5675 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5676 {
5677 struct i40e_pf *pf = vsi->back;
5678 u8 seed[I40E_HKEY_ARRAY_SIZE];
5679 struct i40e_hw *hw = &pf->hw;
5680 int local_rss_size;
5681 u8 *lut;
5682 int ret;
5683
5684 if (!vsi->rss_size)
5685 return -EINVAL;
5686
5687 if (rss_size > vsi->rss_size)
5688 return -EINVAL;
5689
5690 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5691 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5692 if (!lut)
5693 return -ENOMEM;
5694
5695 /* Ignoring user configured lut if there is one */
5696 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5697
5698 /* Use user configured hash key if there is one, otherwise
5699 * use default.
5700 */
5701 if (vsi->rss_hkey_user)
5702 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5703 else
5704 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5705
5706 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5707 if (ret) {
5708 dev_info(&pf->pdev->dev,
5709 "Cannot set RSS lut, err %s aq_err %s\n",
5710 i40e_stat_str(hw, ret),
5711 i40e_aq_str(hw, hw->aq.asq_last_status));
5712 kfree(lut);
5713 return ret;
5714 }
5715 kfree(lut);
5716
5717 /* Do the update w.r.t. storing rss_size */
5718 if (!vsi->orig_rss_size)
5719 vsi->orig_rss_size = vsi->rss_size;
5720 vsi->current_rss_size = local_rss_size;
5721
5722 return ret;
5723 }
5724
5725 /**
5726 * i40e_channel_setup_queue_map - Setup a channel queue map
5727 * @pf: ptr to PF device
5728 * @vsi: the VSI being setup
5729 * @ctxt: VSI context structure
5730 * @ch: ptr to channel structure
5731 *
5732 * Setup queue map for a specific channel
5733 **/
5734 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5735 struct i40e_vsi_context *ctxt,
5736 struct i40e_channel *ch)
5737 {
5738 u16 qcount, qmap, sections = 0;
5739 u8 offset = 0;
5740 int pow;
5741
5742 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5743 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5744
5745 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5746 ch->num_queue_pairs = qcount;
5747
5748 /* find the next higher power-of-2 of num queue pairs */
5749 pow = ilog2(qcount);
5750 if (!is_power_of_2(qcount))
5751 pow++;
5752
5753 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5754 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5755
5756 /* Setup queue TC[0].qmap for given VSI context */
5757 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5758
5759 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5760 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5761 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5762 ctxt->info.valid_sections |= cpu_to_le16(sections);
5763 }
5764
5765 /**
5766 * i40e_add_channel - add a channel by adding VSI
5767 * @pf: ptr to PF device
5768 * @uplink_seid: underlying HW switching element (VEB) ID
5769 * @ch: ptr to channel structure
5770 *
5771 * Add a channel (VSI) using add_vsi and queue_map
5772 **/
5773 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5774 struct i40e_channel *ch)
5775 {
5776 struct i40e_hw *hw = &pf->hw;
5777 struct i40e_vsi_context ctxt;
5778 u8 enabled_tc = 0x1; /* TC0 enabled */
5779 int ret;
5780
5781 if (ch->type != I40E_VSI_VMDQ2) {
5782 dev_info(&pf->pdev->dev,
5783 "add new vsi failed, ch->type %d\n", ch->type);
5784 return -EINVAL;
5785 }
5786
5787 memset(&ctxt, 0, sizeof(ctxt));
5788 ctxt.pf_num = hw->pf_id;
5789 ctxt.vf_num = 0;
5790 ctxt.uplink_seid = uplink_seid;
5791 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5792 if (ch->type == I40E_VSI_VMDQ2)
5793 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5794
5795 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5796 ctxt.info.valid_sections |=
5797 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5798 ctxt.info.switch_id =
5799 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5800 }
5801
5802 /* Set queue map for a given VSI context */
5803 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5804
5805 /* Now time to create VSI */
5806 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5807 if (ret) {
5808 dev_info(&pf->pdev->dev,
5809 "add new vsi failed, err %s aq_err %s\n",
5810 i40e_stat_str(&pf->hw, ret),
5811 i40e_aq_str(&pf->hw,
5812 pf->hw.aq.asq_last_status));
5813 return -ENOENT;
5814 }
5815
5816 /* Success, update channel */
5817 ch->enabled_tc = enabled_tc;
5818 ch->seid = ctxt.seid;
5819 ch->vsi_number = ctxt.vsi_number;
5820 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5821
5822 /* copy just the sections touched not the entire info
5823 * since not all sections are valid as returned by
5824 * update vsi params
5825 */
5826 ch->info.mapping_flags = ctxt.info.mapping_flags;
5827 memcpy(&ch->info.queue_mapping,
5828 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5829 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5830 sizeof(ctxt.info.tc_mapping));
5831
5832 return 0;
5833 }
5834
5835 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5836 u8 *bw_share)
5837 {
5838 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5839 i40e_status ret;
5840 int i;
5841
5842 bw_data.tc_valid_bits = ch->enabled_tc;
5843 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5844 bw_data.tc_bw_credits[i] = bw_share[i];
5845
5846 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5847 &bw_data, NULL);
5848 if (ret) {
5849 dev_info(&vsi->back->pdev->dev,
5850 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5851 vsi->back->hw.aq.asq_last_status, ch->seid);
5852 return -EINVAL;
5853 }
5854
5855 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5856 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5857
5858 return 0;
5859 }
5860
5861 /**
5862 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5863 * @pf: ptr to PF device
5864 * @vsi: the VSI being setup
5865 * @ch: ptr to channel structure
5866 *
5867 * Configure TX rings associated with channel (VSI) since queues are being
5868 * from parent VSI.
5869 **/
5870 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5871 struct i40e_vsi *vsi,
5872 struct i40e_channel *ch)
5873 {
5874 i40e_status ret;
5875 int i;
5876 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5877
5878 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5879 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5880 if (ch->enabled_tc & BIT(i))
5881 bw_share[i] = 1;
5882 }
5883
5884 /* configure BW for new VSI */
5885 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5886 if (ret) {
5887 dev_info(&vsi->back->pdev->dev,
5888 "Failed configuring TC map %d for channel (seid %u)\n",
5889 ch->enabled_tc, ch->seid);
5890 return ret;
5891 }
5892
5893 for (i = 0; i < ch->num_queue_pairs; i++) {
5894 struct i40e_ring *tx_ring, *rx_ring;
5895 u16 pf_q;
5896
5897 pf_q = ch->base_queue + i;
5898
5899 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5900 * context
5901 */
5902 tx_ring = vsi->tx_rings[pf_q];
5903 tx_ring->ch = ch;
5904
5905 /* Get the RX ring ptr */
5906 rx_ring = vsi->rx_rings[pf_q];
5907 rx_ring->ch = ch;
5908 }
5909
5910 return 0;
5911 }
5912
5913 /**
5914 * i40e_setup_hw_channel - setup new channel
5915 * @pf: ptr to PF device
5916 * @vsi: the VSI being setup
5917 * @ch: ptr to channel structure
5918 * @uplink_seid: underlying HW switching element (VEB) ID
5919 * @type: type of channel to be created (VMDq2/VF)
5920 *
5921 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5922 * and configures TX rings accordingly
5923 **/
5924 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5925 struct i40e_vsi *vsi,
5926 struct i40e_channel *ch,
5927 u16 uplink_seid, u8 type)
5928 {
5929 int ret;
5930
5931 ch->initialized = false;
5932 ch->base_queue = vsi->next_base_queue;
5933 ch->type = type;
5934
5935 /* Proceed with creation of channel (VMDq2) VSI */
5936 ret = i40e_add_channel(pf, uplink_seid, ch);
5937 if (ret) {
5938 dev_info(&pf->pdev->dev,
5939 "failed to add_channel using uplink_seid %u\n",
5940 uplink_seid);
5941 return ret;
5942 }
5943
5944 /* Mark the successful creation of channel */
5945 ch->initialized = true;
5946
5947 /* Reconfigure TX queues using QTX_CTL register */
5948 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
5949 if (ret) {
5950 dev_info(&pf->pdev->dev,
5951 "failed to configure TX rings for channel %u\n",
5952 ch->seid);
5953 return ret;
5954 }
5955
5956 /* update 'next_base_queue' */
5957 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
5958 dev_dbg(&pf->pdev->dev,
5959 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5960 ch->seid, ch->vsi_number, ch->stat_counter_idx,
5961 ch->num_queue_pairs,
5962 vsi->next_base_queue);
5963 return ret;
5964 }
5965
5966 /**
5967 * i40e_setup_channel - setup new channel using uplink element
5968 * @pf: ptr to PF device
5969 * @type: type of channel to be created (VMDq2/VF)
5970 * @uplink_seid: underlying HW switching element (VEB) ID
5971 * @ch: ptr to channel structure
5972 *
5973 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5974 * and uplink switching element (uplink_seid)
5975 **/
5976 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
5977 struct i40e_channel *ch)
5978 {
5979 u8 vsi_type;
5980 u16 seid;
5981 int ret;
5982
5983 if (vsi->type == I40E_VSI_MAIN) {
5984 vsi_type = I40E_VSI_VMDQ2;
5985 } else {
5986 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
5987 vsi->type);
5988 return false;
5989 }
5990
5991 /* underlying switching element */
5992 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5993
5994 /* create channel (VSI), configure TX rings */
5995 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
5996 if (ret) {
5997 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
5998 return false;
5999 }
6000
6001 return ch->initialized ? true : false;
6002 }
6003
6004 /**
6005 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6006 * @vsi: ptr to VSI which has PF backing
6007 *
6008 * Sets up switch mode correctly if it needs to be changed and perform
6009 * what are allowed modes.
6010 **/
6011 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6012 {
6013 u8 mode;
6014 struct i40e_pf *pf = vsi->back;
6015 struct i40e_hw *hw = &pf->hw;
6016 int ret;
6017
6018 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6019 if (ret)
6020 return -EINVAL;
6021
6022 if (hw->dev_caps.switch_mode) {
6023 /* if switch mode is set, support mode2 (non-tunneled for
6024 * cloud filter) for now
6025 */
6026 u32 switch_mode = hw->dev_caps.switch_mode &
6027 I40E_SWITCH_MODE_MASK;
6028 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6029 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6030 return 0;
6031 dev_err(&pf->pdev->dev,
6032 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6033 hw->dev_caps.switch_mode);
6034 return -EINVAL;
6035 }
6036 }
6037
6038 /* Set Bit 7 to be valid */
6039 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6040
6041 /* Set L4type to both TCP and UDP support */
6042 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
6043
6044 /* Set cloud filter mode */
6045 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6046
6047 /* Prep mode field for set_switch_config */
6048 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6049 pf->last_sw_conf_valid_flags,
6050 mode, NULL);
6051 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6052 dev_err(&pf->pdev->dev,
6053 "couldn't set switch config bits, err %s aq_err %s\n",
6054 i40e_stat_str(hw, ret),
6055 i40e_aq_str(hw,
6056 hw->aq.asq_last_status));
6057
6058 return ret;
6059 }
6060
6061 /**
6062 * i40e_create_queue_channel - function to create channel
6063 * @vsi: VSI to be configured
6064 * @ch: ptr to channel (it contains channel specific params)
6065 *
6066 * This function creates channel (VSI) using num_queues specified by user,
6067 * reconfigs RSS if needed.
6068 **/
6069 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6070 struct i40e_channel *ch)
6071 {
6072 struct i40e_pf *pf = vsi->back;
6073 bool reconfig_rss;
6074 int err;
6075
6076 if (!ch)
6077 return -EINVAL;
6078
6079 if (!ch->num_queue_pairs) {
6080 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6081 ch->num_queue_pairs);
6082 return -EINVAL;
6083 }
6084
6085 /* validate user requested num_queues for channel */
6086 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6087 &reconfig_rss);
6088 if (err) {
6089 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6090 ch->num_queue_pairs);
6091 return -EINVAL;
6092 }
6093
6094 /* By default we are in VEPA mode, if this is the first VF/VMDq
6095 * VSI to be added switch to VEB mode.
6096 */
6097 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6098 (!i40e_is_any_channel(vsi))) {
6099 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6100 dev_dbg(&pf->pdev->dev,
6101 "Failed to create channel. Override queues (%u) not power of 2\n",
6102 vsi->tc_config.tc_info[0].qcount);
6103 return -EINVAL;
6104 }
6105
6106 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6107 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6108
6109 if (vsi->type == I40E_VSI_MAIN) {
6110 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6111 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6112 true);
6113 else
6114 i40e_do_reset_safe(pf,
6115 I40E_PF_RESET_FLAG);
6116 }
6117 }
6118 /* now onwards for main VSI, number of queues will be value
6119 * of TC0's queue count
6120 */
6121 }
6122
6123 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6124 * it should be more than num_queues
6125 */
6126 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6127 dev_dbg(&pf->pdev->dev,
6128 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6129 vsi->cnt_q_avail, ch->num_queue_pairs);
6130 return -EINVAL;
6131 }
6132
6133 /* reconfig_rss only if vsi type is MAIN_VSI */
6134 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6135 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6136 if (err) {
6137 dev_info(&pf->pdev->dev,
6138 "Error: unable to reconfig rss for num_queues (%u)\n",
6139 ch->num_queue_pairs);
6140 return -EINVAL;
6141 }
6142 }
6143
6144 if (!i40e_setup_channel(pf, vsi, ch)) {
6145 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6146 return -EINVAL;
6147 }
6148
6149 dev_info(&pf->pdev->dev,
6150 "Setup channel (id:%u) utilizing num_queues %d\n",
6151 ch->seid, ch->num_queue_pairs);
6152
6153 /* configure VSI for BW limit */
6154 if (ch->max_tx_rate) {
6155 u64 credits = ch->max_tx_rate;
6156
6157 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6158 return -EINVAL;
6159
6160 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6161 dev_dbg(&pf->pdev->dev,
6162 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6163 ch->max_tx_rate,
6164 credits,
6165 ch->seid);
6166 }
6167
6168 /* in case of VF, this will be main SRIOV VSI */
6169 ch->parent_vsi = vsi;
6170
6171 /* and update main_vsi's count for queue_available to use */
6172 vsi->cnt_q_avail -= ch->num_queue_pairs;
6173
6174 return 0;
6175 }
6176
6177 /**
6178 * i40e_configure_queue_channels - Add queue channel for the given TCs
6179 * @vsi: VSI to be configured
6180 *
6181 * Configures queue channel mapping to the given TCs
6182 **/
6183 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6184 {
6185 struct i40e_channel *ch;
6186 u64 max_rate = 0;
6187 int ret = 0, i;
6188
6189 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6190 vsi->tc_seid_map[0] = vsi->seid;
6191 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6192 if (vsi->tc_config.enabled_tc & BIT(i)) {
6193 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6194 if (!ch) {
6195 ret = -ENOMEM;
6196 goto err_free;
6197 }
6198
6199 INIT_LIST_HEAD(&ch->list);
6200 ch->num_queue_pairs =
6201 vsi->tc_config.tc_info[i].qcount;
6202 ch->base_queue =
6203 vsi->tc_config.tc_info[i].qoffset;
6204
6205 /* Bandwidth limit through tc interface is in bytes/s,
6206 * change to Mbit/s
6207 */
6208 max_rate = vsi->mqprio_qopt.max_rate[i];
6209 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6210 ch->max_tx_rate = max_rate;
6211
6212 list_add_tail(&ch->list, &vsi->ch_list);
6213
6214 ret = i40e_create_queue_channel(vsi, ch);
6215 if (ret) {
6216 dev_err(&vsi->back->pdev->dev,
6217 "Failed creating queue channel with TC%d: queues %d\n",
6218 i, ch->num_queue_pairs);
6219 goto err_free;
6220 }
6221 vsi->tc_seid_map[i] = ch->seid;
6222 }
6223 }
6224 return ret;
6225
6226 err_free:
6227 i40e_remove_queue_channels(vsi);
6228 return ret;
6229 }
6230
6231 /**
6232 * i40e_veb_config_tc - Configure TCs for given VEB
6233 * @veb: given VEB
6234 * @enabled_tc: TC bitmap
6235 *
6236 * Configures given TC bitmap for VEB (switching) element
6237 **/
6238 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6239 {
6240 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6241 struct i40e_pf *pf = veb->pf;
6242 int ret = 0;
6243 int i;
6244
6245 /* No TCs or already enabled TCs just return */
6246 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6247 return ret;
6248
6249 bw_data.tc_valid_bits = enabled_tc;
6250 /* bw_data.absolute_credits is not set (relative) */
6251
6252 /* Enable ETS TCs with equal BW Share for now */
6253 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6254 if (enabled_tc & BIT(i))
6255 bw_data.tc_bw_share_credits[i] = 1;
6256 }
6257
6258 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6259 &bw_data, NULL);
6260 if (ret) {
6261 dev_info(&pf->pdev->dev,
6262 "VEB bw config failed, err %s aq_err %s\n",
6263 i40e_stat_str(&pf->hw, ret),
6264 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6265 goto out;
6266 }
6267
6268 /* Update the BW information */
6269 ret = i40e_veb_get_bw_info(veb);
6270 if (ret) {
6271 dev_info(&pf->pdev->dev,
6272 "Failed getting veb bw config, err %s aq_err %s\n",
6273 i40e_stat_str(&pf->hw, ret),
6274 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6275 }
6276
6277 out:
6278 return ret;
6279 }
6280
6281 #ifdef CONFIG_I40E_DCB
6282 /**
6283 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6284 * @pf: PF struct
6285 *
6286 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6287 * the caller would've quiesce all the VSIs before calling
6288 * this function
6289 **/
6290 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6291 {
6292 u8 tc_map = 0;
6293 int ret;
6294 u8 v;
6295
6296 /* Enable the TCs available on PF to all VEBs */
6297 tc_map = i40e_pf_get_tc_map(pf);
6298 for (v = 0; v < I40E_MAX_VEB; v++) {
6299 if (!pf->veb[v])
6300 continue;
6301 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6302 if (ret) {
6303 dev_info(&pf->pdev->dev,
6304 "Failed configuring TC for VEB seid=%d\n",
6305 pf->veb[v]->seid);
6306 /* Will try to configure as many components */
6307 }
6308 }
6309
6310 /* Update each VSI */
6311 for (v = 0; v < pf->num_alloc_vsi; v++) {
6312 if (!pf->vsi[v])
6313 continue;
6314
6315 /* - Enable all TCs for the LAN VSI
6316 * - For all others keep them at TC0 for now
6317 */
6318 if (v == pf->lan_vsi)
6319 tc_map = i40e_pf_get_tc_map(pf);
6320 else
6321 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6322
6323 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6324 if (ret) {
6325 dev_info(&pf->pdev->dev,
6326 "Failed configuring TC for VSI seid=%d\n",
6327 pf->vsi[v]->seid);
6328 /* Will try to configure as many components */
6329 } else {
6330 /* Re-configure VSI vectors based on updated TC map */
6331 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6332 if (pf->vsi[v]->netdev)
6333 i40e_dcbnl_set_all(pf->vsi[v]);
6334 }
6335 }
6336 }
6337
6338 /**
6339 * i40e_resume_port_tx - Resume port Tx
6340 * @pf: PF struct
6341 *
6342 * Resume a port's Tx and issue a PF reset in case of failure to
6343 * resume.
6344 **/
6345 static int i40e_resume_port_tx(struct i40e_pf *pf)
6346 {
6347 struct i40e_hw *hw = &pf->hw;
6348 int ret;
6349
6350 ret = i40e_aq_resume_port_tx(hw, NULL);
6351 if (ret) {
6352 dev_info(&pf->pdev->dev,
6353 "Resume Port Tx failed, err %s aq_err %s\n",
6354 i40e_stat_str(&pf->hw, ret),
6355 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6356 /* Schedule PF reset to recover */
6357 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6358 i40e_service_event_schedule(pf);
6359 }
6360
6361 return ret;
6362 }
6363
6364 /**
6365 * i40e_init_pf_dcb - Initialize DCB configuration
6366 * @pf: PF being configured
6367 *
6368 * Query the current DCB configuration and cache it
6369 * in the hardware structure
6370 **/
6371 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6372 {
6373 struct i40e_hw *hw = &pf->hw;
6374 int err = 0;
6375
6376 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
6377 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT)
6378 goto out;
6379
6380 /* Get the initial DCB configuration */
6381 err = i40e_init_dcb(hw);
6382 if (!err) {
6383 /* Device/Function is not DCBX capable */
6384 if ((!hw->func_caps.dcb) ||
6385 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6386 dev_info(&pf->pdev->dev,
6387 "DCBX offload is not supported or is disabled for this PF.\n");
6388 } else {
6389 /* When status is not DISABLED then DCBX in FW */
6390 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6391 DCB_CAP_DCBX_VER_IEEE;
6392
6393 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6394 /* Enable DCB tagging only when more than one TC
6395 * or explicitly disable if only one TC
6396 */
6397 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6398 pf->flags |= I40E_FLAG_DCB_ENABLED;
6399 else
6400 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6401 dev_dbg(&pf->pdev->dev,
6402 "DCBX offload is supported for this PF.\n");
6403 }
6404 } else {
6405 dev_info(&pf->pdev->dev,
6406 "Query for DCB configuration failed, err %s aq_err %s\n",
6407 i40e_stat_str(&pf->hw, err),
6408 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6409 }
6410
6411 out:
6412 return err;
6413 }
6414 #endif /* CONFIG_I40E_DCB */
6415 #define SPEED_SIZE 14
6416 #define FC_SIZE 8
6417 /**
6418 * i40e_print_link_message - print link up or down
6419 * @vsi: the VSI for which link needs a message
6420 */
6421 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6422 {
6423 enum i40e_aq_link_speed new_speed;
6424 struct i40e_pf *pf = vsi->back;
6425 char *speed = "Unknown";
6426 char *fc = "Unknown";
6427 char *fec = "";
6428 char *req_fec = "";
6429 char *an = "";
6430
6431 new_speed = pf->hw.phy.link_info.link_speed;
6432
6433 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6434 return;
6435 vsi->current_isup = isup;
6436 vsi->current_speed = new_speed;
6437 if (!isup) {
6438 netdev_info(vsi->netdev, "NIC Link is Down\n");
6439 return;
6440 }
6441
6442 /* Warn user if link speed on NPAR enabled partition is not at
6443 * least 10GB
6444 */
6445 if (pf->hw.func_caps.npar_enable &&
6446 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6447 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6448 netdev_warn(vsi->netdev,
6449 "The partition detected link speed that is less than 10Gbps\n");
6450
6451 switch (pf->hw.phy.link_info.link_speed) {
6452 case I40E_LINK_SPEED_40GB:
6453 speed = "40 G";
6454 break;
6455 case I40E_LINK_SPEED_20GB:
6456 speed = "20 G";
6457 break;
6458 case I40E_LINK_SPEED_25GB:
6459 speed = "25 G";
6460 break;
6461 case I40E_LINK_SPEED_10GB:
6462 speed = "10 G";
6463 break;
6464 case I40E_LINK_SPEED_1GB:
6465 speed = "1000 M";
6466 break;
6467 case I40E_LINK_SPEED_100MB:
6468 speed = "100 M";
6469 break;
6470 default:
6471 break;
6472 }
6473
6474 switch (pf->hw.fc.current_mode) {
6475 case I40E_FC_FULL:
6476 fc = "RX/TX";
6477 break;
6478 case I40E_FC_TX_PAUSE:
6479 fc = "TX";
6480 break;
6481 case I40E_FC_RX_PAUSE:
6482 fc = "RX";
6483 break;
6484 default:
6485 fc = "None";
6486 break;
6487 }
6488
6489 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6490 req_fec = ", Requested FEC: None";
6491 fec = ", FEC: None";
6492 an = ", Autoneg: False";
6493
6494 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6495 an = ", Autoneg: True";
6496
6497 if (pf->hw.phy.link_info.fec_info &
6498 I40E_AQ_CONFIG_FEC_KR_ENA)
6499 fec = ", FEC: CL74 FC-FEC/BASE-R";
6500 else if (pf->hw.phy.link_info.fec_info &
6501 I40E_AQ_CONFIG_FEC_RS_ENA)
6502 fec = ", FEC: CL108 RS-FEC";
6503
6504 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6505 * both RS and FC are requested
6506 */
6507 if (vsi->back->hw.phy.link_info.req_fec_info &
6508 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6509 if (vsi->back->hw.phy.link_info.req_fec_info &
6510 I40E_AQ_REQUEST_FEC_RS)
6511 req_fec = ", Requested FEC: CL108 RS-FEC";
6512 else
6513 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6514 }
6515 }
6516
6517 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6518 speed, req_fec, fec, an, fc);
6519 }
6520
6521 /**
6522 * i40e_up_complete - Finish the last steps of bringing up a connection
6523 * @vsi: the VSI being configured
6524 **/
6525 static int i40e_up_complete(struct i40e_vsi *vsi)
6526 {
6527 struct i40e_pf *pf = vsi->back;
6528 int err;
6529
6530 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6531 i40e_vsi_configure_msix(vsi);
6532 else
6533 i40e_configure_msi_and_legacy(vsi);
6534
6535 /* start rings */
6536 err = i40e_vsi_start_rings(vsi);
6537 if (err)
6538 return err;
6539
6540 clear_bit(__I40E_VSI_DOWN, vsi->state);
6541 i40e_napi_enable_all(vsi);
6542 i40e_vsi_enable_irq(vsi);
6543
6544 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6545 (vsi->netdev)) {
6546 i40e_print_link_message(vsi, true);
6547 netif_tx_start_all_queues(vsi->netdev);
6548 netif_carrier_on(vsi->netdev);
6549 }
6550
6551 /* replay FDIR SB filters */
6552 if (vsi->type == I40E_VSI_FDIR) {
6553 /* reset fd counters */
6554 pf->fd_add_err = 0;
6555 pf->fd_atr_cnt = 0;
6556 i40e_fdir_filter_restore(vsi);
6557 }
6558
6559 /* On the next run of the service_task, notify any clients of the new
6560 * opened netdev
6561 */
6562 pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
6563 i40e_service_event_schedule(pf);
6564
6565 return 0;
6566 }
6567
6568 /**
6569 * i40e_vsi_reinit_locked - Reset the VSI
6570 * @vsi: the VSI being configured
6571 *
6572 * Rebuild the ring structs after some configuration
6573 * has changed, e.g. MTU size.
6574 **/
6575 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6576 {
6577 struct i40e_pf *pf = vsi->back;
6578
6579 WARN_ON(in_interrupt());
6580 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6581 usleep_range(1000, 2000);
6582 i40e_down(vsi);
6583
6584 i40e_up(vsi);
6585 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6586 }
6587
6588 /**
6589 * i40e_up - Bring the connection back up after being down
6590 * @vsi: the VSI being configured
6591 **/
6592 int i40e_up(struct i40e_vsi *vsi)
6593 {
6594 int err;
6595
6596 err = i40e_vsi_configure(vsi);
6597 if (!err)
6598 err = i40e_up_complete(vsi);
6599
6600 return err;
6601 }
6602
6603 /**
6604 * i40e_down - Shutdown the connection processing
6605 * @vsi: the VSI being stopped
6606 **/
6607 void i40e_down(struct i40e_vsi *vsi)
6608 {
6609 int i;
6610
6611 /* It is assumed that the caller of this function
6612 * sets the vsi->state __I40E_VSI_DOWN bit.
6613 */
6614 if (vsi->netdev) {
6615 netif_carrier_off(vsi->netdev);
6616 netif_tx_disable(vsi->netdev);
6617 }
6618 i40e_vsi_disable_irq(vsi);
6619 i40e_vsi_stop_rings(vsi);
6620 i40e_napi_disable_all(vsi);
6621
6622 for (i = 0; i < vsi->num_queue_pairs; i++) {
6623 i40e_clean_tx_ring(vsi->tx_rings[i]);
6624 if (i40e_enabled_xdp_vsi(vsi))
6625 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6626 i40e_clean_rx_ring(vsi->rx_rings[i]);
6627 }
6628
6629 }
6630
6631 /**
6632 * i40e_validate_mqprio_qopt- validate queue mapping info
6633 * @vsi: the VSI being configured
6634 * @mqprio_qopt: queue parametrs
6635 **/
6636 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6637 struct tc_mqprio_qopt_offload *mqprio_qopt)
6638 {
6639 u64 sum_max_rate = 0;
6640 u64 max_rate = 0;
6641 int i;
6642
6643 if (mqprio_qopt->qopt.offset[0] != 0 ||
6644 mqprio_qopt->qopt.num_tc < 1 ||
6645 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6646 return -EINVAL;
6647 for (i = 0; ; i++) {
6648 if (!mqprio_qopt->qopt.count[i])
6649 return -EINVAL;
6650 if (mqprio_qopt->min_rate[i]) {
6651 dev_err(&vsi->back->pdev->dev,
6652 "Invalid min tx rate (greater than 0) specified\n");
6653 return -EINVAL;
6654 }
6655 max_rate = mqprio_qopt->max_rate[i];
6656 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6657 sum_max_rate += max_rate;
6658
6659 if (i >= mqprio_qopt->qopt.num_tc - 1)
6660 break;
6661 if (mqprio_qopt->qopt.offset[i + 1] !=
6662 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6663 return -EINVAL;
6664 }
6665 if (vsi->num_queue_pairs <
6666 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6667 return -EINVAL;
6668 }
6669 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6670 dev_err(&vsi->back->pdev->dev,
6671 "Invalid max tx rate specified\n");
6672 return -EINVAL;
6673 }
6674 return 0;
6675 }
6676
6677 /**
6678 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6679 * @vsi: the VSI being configured
6680 **/
6681 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6682 {
6683 u16 qcount;
6684 int i;
6685
6686 /* Only TC0 is enabled */
6687 vsi->tc_config.numtc = 1;
6688 vsi->tc_config.enabled_tc = 1;
6689 qcount = min_t(int, vsi->alloc_queue_pairs,
6690 i40e_pf_get_max_q_per_tc(vsi->back));
6691 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6692 /* For the TC that is not enabled set the offset to to default
6693 * queue and allocate one queue for the given TC.
6694 */
6695 vsi->tc_config.tc_info[i].qoffset = 0;
6696 if (i == 0)
6697 vsi->tc_config.tc_info[i].qcount = qcount;
6698 else
6699 vsi->tc_config.tc_info[i].qcount = 1;
6700 vsi->tc_config.tc_info[i].netdev_tc = 0;
6701 }
6702 }
6703
6704 /**
6705 * i40e_setup_tc - configure multiple traffic classes
6706 * @netdev: net device to configure
6707 * @type_data: tc offload data
6708 **/
6709 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6710 {
6711 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6712 struct i40e_netdev_priv *np = netdev_priv(netdev);
6713 struct i40e_vsi *vsi = np->vsi;
6714 struct i40e_pf *pf = vsi->back;
6715 u8 enabled_tc = 0, num_tc, hw;
6716 bool need_reset = false;
6717 int ret = -EINVAL;
6718 u16 mode;
6719 int i;
6720
6721 num_tc = mqprio_qopt->qopt.num_tc;
6722 hw = mqprio_qopt->qopt.hw;
6723 mode = mqprio_qopt->mode;
6724 if (!hw) {
6725 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6726 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6727 goto config_tc;
6728 }
6729
6730 /* Check if MFP enabled */
6731 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6732 netdev_info(netdev,
6733 "Configuring TC not supported in MFP mode\n");
6734 return ret;
6735 }
6736 switch (mode) {
6737 case TC_MQPRIO_MODE_DCB:
6738 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6739
6740 /* Check if DCB enabled to continue */
6741 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6742 netdev_info(netdev,
6743 "DCB is not enabled for adapter\n");
6744 return ret;
6745 }
6746
6747 /* Check whether tc count is within enabled limit */
6748 if (num_tc > i40e_pf_get_num_tc(pf)) {
6749 netdev_info(netdev,
6750 "TC count greater than enabled on link for adapter\n");
6751 return ret;
6752 }
6753 break;
6754 case TC_MQPRIO_MODE_CHANNEL:
6755 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6756 netdev_info(netdev,
6757 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6758 return ret;
6759 }
6760 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6761 return ret;
6762 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6763 if (ret)
6764 return ret;
6765 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6766 sizeof(*mqprio_qopt));
6767 pf->flags |= I40E_FLAG_TC_MQPRIO;
6768 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6769 break;
6770 default:
6771 return -EINVAL;
6772 }
6773
6774 config_tc:
6775 /* Generate TC map for number of tc requested */
6776 for (i = 0; i < num_tc; i++)
6777 enabled_tc |= BIT(i);
6778
6779 /* Requesting same TC configuration as already enabled */
6780 if (enabled_tc == vsi->tc_config.enabled_tc &&
6781 mode != TC_MQPRIO_MODE_CHANNEL)
6782 return 0;
6783
6784 /* Quiesce VSI queues */
6785 i40e_quiesce_vsi(vsi);
6786
6787 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6788 i40e_remove_queue_channels(vsi);
6789
6790 /* Configure VSI for enabled TCs */
6791 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6792 if (ret) {
6793 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6794 vsi->seid);
6795 need_reset = true;
6796 goto exit;
6797 }
6798
6799 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6800 if (vsi->mqprio_qopt.max_rate[0]) {
6801 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6802
6803 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
6804 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6805 if (!ret) {
6806 u64 credits = max_tx_rate;
6807
6808 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6809 dev_dbg(&vsi->back->pdev->dev,
6810 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6811 max_tx_rate,
6812 credits,
6813 vsi->seid);
6814 } else {
6815 need_reset = true;
6816 goto exit;
6817 }
6818 }
6819 ret = i40e_configure_queue_channels(vsi);
6820 if (ret) {
6821 netdev_info(netdev,
6822 "Failed configuring queue channels\n");
6823 need_reset = true;
6824 goto exit;
6825 }
6826 }
6827
6828 exit:
6829 /* Reset the configuration data to defaults, only TC0 is enabled */
6830 if (need_reset) {
6831 i40e_vsi_set_default_tc_config(vsi);
6832 need_reset = false;
6833 }
6834
6835 /* Unquiesce VSI */
6836 i40e_unquiesce_vsi(vsi);
6837 return ret;
6838 }
6839
6840 /**
6841 * i40e_set_cld_element - sets cloud filter element data
6842 * @filter: cloud filter rule
6843 * @cld: ptr to cloud filter element data
6844 *
6845 * This is helper function to copy data into cloud filter element
6846 **/
6847 static inline void
6848 i40e_set_cld_element(struct i40e_cloud_filter *filter,
6849 struct i40e_aqc_cloud_filters_element_data *cld)
6850 {
6851 int i, j;
6852 u32 ipa;
6853
6854 memset(cld, 0, sizeof(*cld));
6855 ether_addr_copy(cld->outer_mac, filter->dst_mac);
6856 ether_addr_copy(cld->inner_mac, filter->src_mac);
6857
6858 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
6859 return;
6860
6861 if (filter->n_proto == ETH_P_IPV6) {
6862 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6863 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
6864 i++, j += 2) {
6865 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
6866 ipa = cpu_to_le32(ipa);
6867 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
6868 }
6869 } else {
6870 ipa = be32_to_cpu(filter->dst_ipv4);
6871 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
6872 }
6873
6874 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
6875
6876 /* tenant_id is not supported by FW now, once the support is enabled
6877 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6878 */
6879 if (filter->tenant_id)
6880 return;
6881 }
6882
6883 /**
6884 * i40e_add_del_cloud_filter - Add/del cloud filter
6885 * @vsi: pointer to VSI
6886 * @filter: cloud filter rule
6887 * @add: if true, add, if false, delete
6888 *
6889 * Add or delete a cloud filter for a specific flow spec.
6890 * Returns 0 if the filter were successfully added.
6891 **/
6892 static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
6893 struct i40e_cloud_filter *filter, bool add)
6894 {
6895 struct i40e_aqc_cloud_filters_element_data cld_filter;
6896 struct i40e_pf *pf = vsi->back;
6897 int ret;
6898 static const u16 flag_table[128] = {
6899 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
6900 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
6901 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
6902 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
6903 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
6904 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
6905 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
6906 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
6907 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
6908 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
6909 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
6910 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
6911 [I40E_CLOUD_FILTER_FLAGS_IIP] =
6912 I40E_AQC_ADD_CLOUD_FILTER_IIP,
6913 };
6914
6915 if (filter->flags >= ARRAY_SIZE(flag_table))
6916 return I40E_ERR_CONFIG;
6917
6918 /* copy element needed to add cloud filter from filter */
6919 i40e_set_cld_element(filter, &cld_filter);
6920
6921 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
6922 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
6923 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
6924
6925 if (filter->n_proto == ETH_P_IPV6)
6926 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
6927 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
6928 else
6929 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
6930 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
6931
6932 if (add)
6933 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
6934 &cld_filter, 1);
6935 else
6936 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
6937 &cld_filter, 1);
6938 if (ret)
6939 dev_dbg(&pf->pdev->dev,
6940 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
6941 add ? "add" : "delete", filter->dst_port, ret,
6942 pf->hw.aq.asq_last_status);
6943 else
6944 dev_info(&pf->pdev->dev,
6945 "%s cloud filter for VSI: %d\n",
6946 add ? "Added" : "Deleted", filter->seid);
6947 return ret;
6948 }
6949
6950 /**
6951 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
6952 * @vsi: pointer to VSI
6953 * @filter: cloud filter rule
6954 * @add: if true, add, if false, delete
6955 *
6956 * Add or delete a cloud filter for a specific flow spec using big buffer.
6957 * Returns 0 if the filter were successfully added.
6958 **/
6959 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
6960 struct i40e_cloud_filter *filter,
6961 bool add)
6962 {
6963 struct i40e_aqc_cloud_filters_element_bb cld_filter;
6964 struct i40e_pf *pf = vsi->back;
6965 int ret;
6966
6967 /* Both (src/dst) valid mac_addr are not supported */
6968 if ((is_valid_ether_addr(filter->dst_mac) &&
6969 is_valid_ether_addr(filter->src_mac)) ||
6970 (is_multicast_ether_addr(filter->dst_mac) &&
6971 is_multicast_ether_addr(filter->src_mac)))
6972 return -EINVAL;
6973
6974 /* Make sure port is specified, otherwise bail out, for channel
6975 * specific cloud filter needs 'L4 port' to be non-zero
6976 */
6977 if (!filter->dst_port)
6978 return -EINVAL;
6979
6980 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter->src_port || filter->src_ipv4 ||
6982 !ipv6_addr_any(&filter->ip.v6.src_ip6))
6983 return -EINVAL;
6984
6985 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter, &cld_filter.element);
6987
6988 if (is_valid_ether_addr(filter->dst_mac) ||
6989 is_valid_ether_addr(filter->src_mac) ||
6990 is_multicast_ether_addr(filter->dst_mac) ||
6991 is_multicast_ether_addr(filter->src_mac)) {
6992 /* MAC + IP : unsupported mode */
6993 if (filter->dst_ipv4)
6994 return -EINVAL;
6995
6996 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value
6998 * and update if vlan is present or not
6999 */
7000 cld_filter.element.flags =
7001 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7002
7003 if (filter->vlan_id) {
7004 cld_filter.element.flags =
7005 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7006 }
7007
7008 } else if (filter->dst_ipv4 ||
7009 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7010 cld_filter.element.flags =
7011 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7012 if (filter->n_proto == ETH_P_IPV6)
7013 cld_filter.element.flags |=
7014 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7015 else
7016 cld_filter.element.flags |=
7017 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7018 } else {
7019 dev_err(&pf->pdev->dev,
7020 "either mac or ip has to be valid for cloud filter\n");
7021 return -EINVAL;
7022 }
7023
7024 /* Now copy L4 port in Byte 6..7 in general fields */
7025 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7026 be16_to_cpu(filter->dst_port);
7027
7028 if (add) {
7029 /* Validate current device switch mode, change if necessary */
7030 ret = i40e_validate_and_set_switch_mode(vsi);
7031 if (ret) {
7032 dev_err(&pf->pdev->dev,
7033 "failed to set switch mode, ret %d\n",
7034 ret);
7035 return ret;
7036 }
7037
7038 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7039 &cld_filter, 1);
7040 } else {
7041 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7042 &cld_filter, 1);
7043 }
7044
7045 if (ret)
7046 dev_dbg(&pf->pdev->dev,
7047 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7048 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7049 else
7050 dev_info(&pf->pdev->dev,
7051 "%s cloud filter for VSI: %d, L4 port: %d\n",
7052 add ? "add" : "delete", filter->seid,
7053 ntohs(filter->dst_port));
7054 return ret;
7055 }
7056
7057 /**
7058 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7059 * @vsi: Pointer to VSI
7060 * @cls_flower: Pointer to struct tc_cls_flower_offload
7061 * @filter: Pointer to cloud filter structure
7062 *
7063 **/
7064 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7065 struct tc_cls_flower_offload *f,
7066 struct i40e_cloud_filter *filter)
7067 {
7068 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7069 struct i40e_pf *pf = vsi->back;
7070 u8 field_flags = 0;
7071
7072 if (f->dissector->used_keys &
7073 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7074 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7075 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7076 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7077 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7078 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7079 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7080 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7081 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7082 f->dissector->used_keys);
7083 return -EOPNOTSUPP;
7084 }
7085
7086 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7087 struct flow_dissector_key_keyid *key =
7088 skb_flow_dissector_target(f->dissector,
7089 FLOW_DISSECTOR_KEY_ENC_KEYID,
7090 f->key);
7091
7092 struct flow_dissector_key_keyid *mask =
7093 skb_flow_dissector_target(f->dissector,
7094 FLOW_DISSECTOR_KEY_ENC_KEYID,
7095 f->mask);
7096
7097 if (mask->keyid != 0)
7098 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7099
7100 filter->tenant_id = be32_to_cpu(key->keyid);
7101 }
7102
7103 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7104 struct flow_dissector_key_basic *key =
7105 skb_flow_dissector_target(f->dissector,
7106 FLOW_DISSECTOR_KEY_BASIC,
7107 f->key);
7108
7109 struct flow_dissector_key_basic *mask =
7110 skb_flow_dissector_target(f->dissector,
7111 FLOW_DISSECTOR_KEY_BASIC,
7112 f->mask);
7113
7114 n_proto_key = ntohs(key->n_proto);
7115 n_proto_mask = ntohs(mask->n_proto);
7116
7117 if (n_proto_key == ETH_P_ALL) {
7118 n_proto_key = 0;
7119 n_proto_mask = 0;
7120 }
7121 filter->n_proto = n_proto_key & n_proto_mask;
7122 filter->ip_proto = key->ip_proto;
7123 }
7124
7125 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7126 struct flow_dissector_key_eth_addrs *key =
7127 skb_flow_dissector_target(f->dissector,
7128 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7129 f->key);
7130
7131 struct flow_dissector_key_eth_addrs *mask =
7132 skb_flow_dissector_target(f->dissector,
7133 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7134 f->mask);
7135
7136 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7137 if (!is_zero_ether_addr(mask->dst)) {
7138 if (is_broadcast_ether_addr(mask->dst)) {
7139 field_flags |= I40E_CLOUD_FIELD_OMAC;
7140 } else {
7141 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7142 mask->dst);
7143 return I40E_ERR_CONFIG;
7144 }
7145 }
7146
7147 if (!is_zero_ether_addr(mask->src)) {
7148 if (is_broadcast_ether_addr(mask->src)) {
7149 field_flags |= I40E_CLOUD_FIELD_IMAC;
7150 } else {
7151 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7152 mask->src);
7153 return I40E_ERR_CONFIG;
7154 }
7155 }
7156 ether_addr_copy(filter->dst_mac, key->dst);
7157 ether_addr_copy(filter->src_mac, key->src);
7158 }
7159
7160 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7161 struct flow_dissector_key_vlan *key =
7162 skb_flow_dissector_target(f->dissector,
7163 FLOW_DISSECTOR_KEY_VLAN,
7164 f->key);
7165 struct flow_dissector_key_vlan *mask =
7166 skb_flow_dissector_target(f->dissector,
7167 FLOW_DISSECTOR_KEY_VLAN,
7168 f->mask);
7169
7170 if (mask->vlan_id) {
7171 if (mask->vlan_id == VLAN_VID_MASK) {
7172 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7173
7174 } else {
7175 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7176 mask->vlan_id);
7177 return I40E_ERR_CONFIG;
7178 }
7179 }
7180
7181 filter->vlan_id = cpu_to_be16(key->vlan_id);
7182 }
7183
7184 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7185 struct flow_dissector_key_control *key =
7186 skb_flow_dissector_target(f->dissector,
7187 FLOW_DISSECTOR_KEY_CONTROL,
7188 f->key);
7189
7190 addr_type = key->addr_type;
7191 }
7192
7193 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7194 struct flow_dissector_key_ipv4_addrs *key =
7195 skb_flow_dissector_target(f->dissector,
7196 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7197 f->key);
7198 struct flow_dissector_key_ipv4_addrs *mask =
7199 skb_flow_dissector_target(f->dissector,
7200 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7201 f->mask);
7202
7203 if (mask->dst) {
7204 if (mask->dst == cpu_to_be32(0xffffffff)) {
7205 field_flags |= I40E_CLOUD_FIELD_IIP;
7206 } else {
7207 mask->dst = be32_to_cpu(mask->dst);
7208 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
7209 &mask->dst);
7210 return I40E_ERR_CONFIG;
7211 }
7212 }
7213
7214 if (mask->src) {
7215 if (mask->src == cpu_to_be32(0xffffffff)) {
7216 field_flags |= I40E_CLOUD_FIELD_IIP;
7217 } else {
7218 mask->src = be32_to_cpu(mask->src);
7219 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
7220 &mask->src);
7221 return I40E_ERR_CONFIG;
7222 }
7223 }
7224
7225 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7226 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7227 return I40E_ERR_CONFIG;
7228 }
7229 filter->dst_ipv4 = key->dst;
7230 filter->src_ipv4 = key->src;
7231 }
7232
7233 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7234 struct flow_dissector_key_ipv6_addrs *key =
7235 skb_flow_dissector_target(f->dissector,
7236 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7237 f->key);
7238 struct flow_dissector_key_ipv6_addrs *mask =
7239 skb_flow_dissector_target(f->dissector,
7240 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7241 f->mask);
7242
7243 /* src and dest IPV6 address should not be LOOPBACK
7244 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7245 */
7246 if (ipv6_addr_loopback(&key->dst) ||
7247 ipv6_addr_loopback(&key->src)) {
7248 dev_err(&pf->pdev->dev,
7249 "Bad ipv6, addr is LOOPBACK\n");
7250 return I40E_ERR_CONFIG;
7251 }
7252 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
7253 field_flags |= I40E_CLOUD_FIELD_IIP;
7254
7255 memcpy(&filter->src_ipv6, &key->src.s6_addr32,
7256 sizeof(filter->src_ipv6));
7257 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
7258 sizeof(filter->dst_ipv6));
7259 }
7260
7261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7262 struct flow_dissector_key_ports *key =
7263 skb_flow_dissector_target(f->dissector,
7264 FLOW_DISSECTOR_KEY_PORTS,
7265 f->key);
7266 struct flow_dissector_key_ports *mask =
7267 skb_flow_dissector_target(f->dissector,
7268 FLOW_DISSECTOR_KEY_PORTS,
7269 f->mask);
7270
7271 if (mask->src) {
7272 if (mask->src == cpu_to_be16(0xffff)) {
7273 field_flags |= I40E_CLOUD_FIELD_IIP;
7274 } else {
7275 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7276 be16_to_cpu(mask->src));
7277 return I40E_ERR_CONFIG;
7278 }
7279 }
7280
7281 if (mask->dst) {
7282 if (mask->dst == cpu_to_be16(0xffff)) {
7283 field_flags |= I40E_CLOUD_FIELD_IIP;
7284 } else {
7285 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7286 be16_to_cpu(mask->dst));
7287 return I40E_ERR_CONFIG;
7288 }
7289 }
7290
7291 filter->dst_port = key->dst;
7292 filter->src_port = key->src;
7293
7294 switch (filter->ip_proto) {
7295 case IPPROTO_TCP:
7296 case IPPROTO_UDP:
7297 break;
7298 default:
7299 dev_err(&pf->pdev->dev,
7300 "Only UDP and TCP transport are supported\n");
7301 return -EINVAL;
7302 }
7303 }
7304 filter->flags = field_flags;
7305 return 0;
7306 }
7307
7308 /**
7309 * i40e_handle_tclass: Forward to a traffic class on the device
7310 * @vsi: Pointer to VSI
7311 * @tc: traffic class index on the device
7312 * @filter: Pointer to cloud filter structure
7313 *
7314 **/
7315 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7316 struct i40e_cloud_filter *filter)
7317 {
7318 struct i40e_channel *ch, *ch_tmp;
7319
7320 /* direct to a traffic class on the same device */
7321 if (tc == 0) {
7322 filter->seid = vsi->seid;
7323 return 0;
7324 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7325 if (!filter->dst_port) {
7326 dev_err(&vsi->back->pdev->dev,
7327 "Specify destination port to direct to traffic class that is not default\n");
7328 return -EINVAL;
7329 }
7330 if (list_empty(&vsi->ch_list))
7331 return -EINVAL;
7332 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7333 list) {
7334 if (ch->seid == vsi->tc_seid_map[tc])
7335 filter->seid = ch->seid;
7336 }
7337 return 0;
7338 }
7339 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7340 return -EINVAL;
7341 }
7342
7343 /**
7344 * i40e_configure_clsflower - Configure tc flower filters
7345 * @vsi: Pointer to VSI
7346 * @cls_flower: Pointer to struct tc_cls_flower_offload
7347 *
7348 **/
7349 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7350 struct tc_cls_flower_offload *cls_flower)
7351 {
7352 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7353 struct i40e_cloud_filter *filter = NULL;
7354 struct i40e_pf *pf = vsi->back;
7355 int err = 0;
7356
7357 if (tc < 0) {
7358 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7359 return -EINVAL;
7360 }
7361
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7363 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7364 return -EBUSY;
7365
7366 if (pf->fdir_pf_active_filters ||
7367 (!hlist_empty(&pf->fdir_filter_list))) {
7368 dev_err(&vsi->back->pdev->dev,
7369 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7370 return -EINVAL;
7371 }
7372
7373 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7374 dev_err(&vsi->back->pdev->dev,
7375 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7376 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7377 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7378 }
7379
7380 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7381 if (!filter)
7382 return -ENOMEM;
7383
7384 filter->cookie = cls_flower->cookie;
7385
7386 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7387 if (err < 0)
7388 goto err;
7389
7390 err = i40e_handle_tclass(vsi, tc, filter);
7391 if (err < 0)
7392 goto err;
7393
7394 /* Add cloud filter */
7395 if (filter->dst_port)
7396 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7397 else
7398 err = i40e_add_del_cloud_filter(vsi, filter, true);
7399
7400 if (err) {
7401 dev_err(&pf->pdev->dev,
7402 "Failed to add cloud filter, err %s\n",
7403 i40e_stat_str(&pf->hw, err));
7404 err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7405 goto err;
7406 }
7407
7408 /* add filter to the ordered list */
7409 INIT_HLIST_NODE(&filter->cloud_node);
7410
7411 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7412
7413 pf->num_cloud_filters++;
7414
7415 return err;
7416 err:
7417 kfree(filter);
7418 return err;
7419 }
7420
7421 /**
7422 * i40e_find_cloud_filter - Find the could filter in the list
7423 * @vsi: Pointer to VSI
7424 * @cookie: filter specific cookie
7425 *
7426 **/
7427 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7428 unsigned long *cookie)
7429 {
7430 struct i40e_cloud_filter *filter = NULL;
7431 struct hlist_node *node2;
7432
7433 hlist_for_each_entry_safe(filter, node2,
7434 &vsi->back->cloud_filter_list, cloud_node)
7435 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7436 return filter;
7437 return NULL;
7438 }
7439
7440 /**
7441 * i40e_delete_clsflower - Remove tc flower filters
7442 * @vsi: Pointer to VSI
7443 * @cls_flower: Pointer to struct tc_cls_flower_offload
7444 *
7445 **/
7446 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7447 struct tc_cls_flower_offload *cls_flower)
7448 {
7449 struct i40e_cloud_filter *filter = NULL;
7450 struct i40e_pf *pf = vsi->back;
7451 int err = 0;
7452
7453 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7454
7455 if (!filter)
7456 return -EINVAL;
7457
7458 hash_del(&filter->cloud_node);
7459
7460 if (filter->dst_port)
7461 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7462 else
7463 err = i40e_add_del_cloud_filter(vsi, filter, false);
7464
7465 kfree(filter);
7466 if (err) {
7467 dev_err(&pf->pdev->dev,
7468 "Failed to delete cloud filter, err %s\n",
7469 i40e_stat_str(&pf->hw, err));
7470 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7471 }
7472
7473 pf->num_cloud_filters--;
7474 if (!pf->num_cloud_filters)
7475 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7476 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7477 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7478 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7479 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7480 }
7481 return 0;
7482 }
7483
7484 /**
7485 * i40e_setup_tc_cls_flower - flower classifier offloads
7486 * @netdev: net device to configure
7487 * @type_data: offload data
7488 **/
7489 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7490 struct tc_cls_flower_offload *cls_flower)
7491 {
7492 struct i40e_vsi *vsi = np->vsi;
7493
7494 if (cls_flower->common.chain_index)
7495 return -EOPNOTSUPP;
7496
7497 switch (cls_flower->command) {
7498 case TC_CLSFLOWER_REPLACE:
7499 return i40e_configure_clsflower(vsi, cls_flower);
7500 case TC_CLSFLOWER_DESTROY:
7501 return i40e_delete_clsflower(vsi, cls_flower);
7502 case TC_CLSFLOWER_STATS:
7503 return -EOPNOTSUPP;
7504 default:
7505 return -EINVAL;
7506 }
7507 }
7508
7509 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7510 void *cb_priv)
7511 {
7512 struct i40e_netdev_priv *np = cb_priv;
7513
7514 switch (type) {
7515 case TC_SETUP_CLSFLOWER:
7516 return i40e_setup_tc_cls_flower(np, type_data);
7517
7518 default:
7519 return -EOPNOTSUPP;
7520 }
7521 }
7522
7523 static int i40e_setup_tc_block(struct net_device *dev,
7524 struct tc_block_offload *f)
7525 {
7526 struct i40e_netdev_priv *np = netdev_priv(dev);
7527
7528 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7529 return -EOPNOTSUPP;
7530
7531 switch (f->command) {
7532 case TC_BLOCK_BIND:
7533 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7534 np, np);
7535 case TC_BLOCK_UNBIND:
7536 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7537 return 0;
7538 default:
7539 return -EOPNOTSUPP;
7540 }
7541 }
7542
7543 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7544 void *type_data)
7545 {
7546 switch (type) {
7547 case TC_SETUP_QDISC_MQPRIO:
7548 return i40e_setup_tc(netdev, type_data);
7549 case TC_SETUP_BLOCK:
7550 return i40e_setup_tc_block(netdev, type_data);
7551 default:
7552 return -EOPNOTSUPP;
7553 }
7554 }
7555
7556 /**
7557 * i40e_open - Called when a network interface is made active
7558 * @netdev: network interface device structure
7559 *
7560 * The open entry point is called when a network interface is made
7561 * active by the system (IFF_UP). At this point all resources needed
7562 * for transmit and receive operations are allocated, the interrupt
7563 * handler is registered with the OS, the netdev watchdog subtask is
7564 * enabled, and the stack is notified that the interface is ready.
7565 *
7566 * Returns 0 on success, negative value on failure
7567 **/
7568 int i40e_open(struct net_device *netdev)
7569 {
7570 struct i40e_netdev_priv *np = netdev_priv(netdev);
7571 struct i40e_vsi *vsi = np->vsi;
7572 struct i40e_pf *pf = vsi->back;
7573 int err;
7574
7575 /* disallow open during test or if eeprom is broken */
7576 if (test_bit(__I40E_TESTING, pf->state) ||
7577 test_bit(__I40E_BAD_EEPROM, pf->state))
7578 return -EBUSY;
7579
7580 netif_carrier_off(netdev);
7581
7582 err = i40e_vsi_open(vsi);
7583 if (err)
7584 return err;
7585
7586 /* configure global TSO hardware offload settings */
7587 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7588 TCP_FLAG_FIN) >> 16);
7589 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7590 TCP_FLAG_FIN |
7591 TCP_FLAG_CWR) >> 16);
7592 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7593
7594 udp_tunnel_get_rx_info(netdev);
7595
7596 return 0;
7597 }
7598
7599 /**
7600 * i40e_vsi_open -
7601 * @vsi: the VSI to open
7602 *
7603 * Finish initialization of the VSI.
7604 *
7605 * Returns 0 on success, negative value on failure
7606 *
7607 * Note: expects to be called while under rtnl_lock()
7608 **/
7609 int i40e_vsi_open(struct i40e_vsi *vsi)
7610 {
7611 struct i40e_pf *pf = vsi->back;
7612 char int_name[I40E_INT_NAME_STR_LEN];
7613 int err;
7614
7615 /* allocate descriptors */
7616 err = i40e_vsi_setup_tx_resources(vsi);
7617 if (err)
7618 goto err_setup_tx;
7619 err = i40e_vsi_setup_rx_resources(vsi);
7620 if (err)
7621 goto err_setup_rx;
7622
7623 err = i40e_vsi_configure(vsi);
7624 if (err)
7625 goto err_setup_rx;
7626
7627 if (vsi->netdev) {
7628 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7629 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7630 err = i40e_vsi_request_irq(vsi, int_name);
7631 if (err)
7632 goto err_setup_rx;
7633
7634 /* Notify the stack of the actual queue counts. */
7635 err = netif_set_real_num_tx_queues(vsi->netdev,
7636 vsi->num_queue_pairs);
7637 if (err)
7638 goto err_set_queues;
7639
7640 err = netif_set_real_num_rx_queues(vsi->netdev,
7641 vsi->num_queue_pairs);
7642 if (err)
7643 goto err_set_queues;
7644
7645 } else if (vsi->type == I40E_VSI_FDIR) {
7646 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7647 dev_driver_string(&pf->pdev->dev),
7648 dev_name(&pf->pdev->dev));
7649 err = i40e_vsi_request_irq(vsi, int_name);
7650
7651 } else {
7652 err = -EINVAL;
7653 goto err_setup_rx;
7654 }
7655
7656 err = i40e_up_complete(vsi);
7657 if (err)
7658 goto err_up_complete;
7659
7660 return 0;
7661
7662 err_up_complete:
7663 i40e_down(vsi);
7664 err_set_queues:
7665 i40e_vsi_free_irq(vsi);
7666 err_setup_rx:
7667 i40e_vsi_free_rx_resources(vsi);
7668 err_setup_tx:
7669 i40e_vsi_free_tx_resources(vsi);
7670 if (vsi == pf->vsi[pf->lan_vsi])
7671 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7672
7673 return err;
7674 }
7675
7676 /**
7677 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7678 * @pf: Pointer to PF
7679 *
7680 * This function destroys the hlist where all the Flow Director
7681 * filters were saved.
7682 **/
7683 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7684 {
7685 struct i40e_fdir_filter *filter;
7686 struct i40e_flex_pit *pit_entry, *tmp;
7687 struct hlist_node *node2;
7688
7689 hlist_for_each_entry_safe(filter, node2,
7690 &pf->fdir_filter_list, fdir_node) {
7691 hlist_del(&filter->fdir_node);
7692 kfree(filter);
7693 }
7694
7695 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7696 list_del(&pit_entry->list);
7697 kfree(pit_entry);
7698 }
7699 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7700
7701 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7702 list_del(&pit_entry->list);
7703 kfree(pit_entry);
7704 }
7705 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7706
7707 pf->fdir_pf_active_filters = 0;
7708 pf->fd_tcp4_filter_cnt = 0;
7709 pf->fd_udp4_filter_cnt = 0;
7710 pf->fd_sctp4_filter_cnt = 0;
7711 pf->fd_ip4_filter_cnt = 0;
7712
7713 /* Reprogram the default input set for TCP/IPv4 */
7714 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7715 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7716 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7717
7718 /* Reprogram the default input set for UDP/IPv4 */
7719 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7720 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7721 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7722
7723 /* Reprogram the default input set for SCTP/IPv4 */
7724 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7725 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7726 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7727
7728 /* Reprogram the default input set for Other/IPv4 */
7729 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7730 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7731 }
7732
7733 /**
7734 * i40e_cloud_filter_exit - Cleans up the cloud filters
7735 * @pf: Pointer to PF
7736 *
7737 * This function destroys the hlist where all the cloud filters
7738 * were saved.
7739 **/
7740 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7741 {
7742 struct i40e_cloud_filter *cfilter;
7743 struct hlist_node *node;
7744
7745 hlist_for_each_entry_safe(cfilter, node,
7746 &pf->cloud_filter_list, cloud_node) {
7747 hlist_del(&cfilter->cloud_node);
7748 kfree(cfilter);
7749 }
7750 pf->num_cloud_filters = 0;
7751
7752 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7753 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7754 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7755 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7756 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7757 }
7758 }
7759
7760 /**
7761 * i40e_close - Disables a network interface
7762 * @netdev: network interface device structure
7763 *
7764 * The close entry point is called when an interface is de-activated
7765 * by the OS. The hardware is still under the driver's control, but
7766 * this netdev interface is disabled.
7767 *
7768 * Returns 0, this is not allowed to fail
7769 **/
7770 int i40e_close(struct net_device *netdev)
7771 {
7772 struct i40e_netdev_priv *np = netdev_priv(netdev);
7773 struct i40e_vsi *vsi = np->vsi;
7774
7775 i40e_vsi_close(vsi);
7776
7777 return 0;
7778 }
7779
7780 /**
7781 * i40e_do_reset - Start a PF or Core Reset sequence
7782 * @pf: board private structure
7783 * @reset_flags: which reset is requested
7784 * @lock_acquired: indicates whether or not the lock has been acquired
7785 * before this function was called.
7786 *
7787 * The essential difference in resets is that the PF Reset
7788 * doesn't clear the packet buffers, doesn't reset the PE
7789 * firmware, and doesn't bother the other PFs on the chip.
7790 **/
7791 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7792 {
7793 u32 val;
7794
7795 WARN_ON(in_interrupt());
7796
7797
7798 /* do the biggest reset indicated */
7799 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
7800
7801 /* Request a Global Reset
7802 *
7803 * This will start the chip's countdown to the actual full
7804 * chip reset event, and a warning interrupt to be sent
7805 * to all PFs, including the requestor. Our handler
7806 * for the warning interrupt will deal with the shutdown
7807 * and recovery of the switch setup.
7808 */
7809 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
7810 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7811 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7812 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7813
7814 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
7815
7816 /* Request a Core Reset
7817 *
7818 * Same as Global Reset, except does *not* include the MAC/PHY
7819 */
7820 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
7821 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7822 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7823 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7824 i40e_flush(&pf->hw);
7825
7826 } else if (reset_flags & I40E_PF_RESET_FLAG) {
7827
7828 /* Request a PF Reset
7829 *
7830 * Resets only the PF-specific registers
7831 *
7832 * This goes directly to the tear-down and rebuild of
7833 * the switch, since we need to do all the recovery as
7834 * for the Core Reset.
7835 */
7836 dev_dbg(&pf->pdev->dev, "PFR requested\n");
7837 i40e_handle_reset_warning(pf, lock_acquired);
7838
7839 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
7840 int v;
7841
7842 /* Find the VSI(s) that requested a re-init */
7843 dev_info(&pf->pdev->dev,
7844 "VSI reinit requested\n");
7845 for (v = 0; v < pf->num_alloc_vsi; v++) {
7846 struct i40e_vsi *vsi = pf->vsi[v];
7847
7848 if (vsi != NULL &&
7849 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
7850 vsi->state))
7851 i40e_vsi_reinit_locked(pf->vsi[v]);
7852 }
7853 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
7854 int v;
7855
7856 /* Find the VSI(s) that needs to be brought down */
7857 dev_info(&pf->pdev->dev, "VSI down requested\n");
7858 for (v = 0; v < pf->num_alloc_vsi; v++) {
7859 struct i40e_vsi *vsi = pf->vsi[v];
7860
7861 if (vsi != NULL &&
7862 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
7863 vsi->state)) {
7864 set_bit(__I40E_VSI_DOWN, vsi->state);
7865 i40e_down(vsi);
7866 }
7867 }
7868 } else {
7869 dev_info(&pf->pdev->dev,
7870 "bad reset request 0x%08x\n", reset_flags);
7871 }
7872 }
7873
7874 #ifdef CONFIG_I40E_DCB
7875 /**
7876 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7877 * @pf: board private structure
7878 * @old_cfg: current DCB config
7879 * @new_cfg: new DCB config
7880 **/
7881 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
7882 struct i40e_dcbx_config *old_cfg,
7883 struct i40e_dcbx_config *new_cfg)
7884 {
7885 bool need_reconfig = false;
7886
7887 /* Check if ETS configuration has changed */
7888 if (memcmp(&new_cfg->etscfg,
7889 &old_cfg->etscfg,
7890 sizeof(new_cfg->etscfg))) {
7891 /* If Priority Table has changed reconfig is needed */
7892 if (memcmp(&new_cfg->etscfg.prioritytable,
7893 &old_cfg->etscfg.prioritytable,
7894 sizeof(new_cfg->etscfg.prioritytable))) {
7895 need_reconfig = true;
7896 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
7897 }
7898
7899 if (memcmp(&new_cfg->etscfg.tcbwtable,
7900 &old_cfg->etscfg.tcbwtable,
7901 sizeof(new_cfg->etscfg.tcbwtable)))
7902 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
7903
7904 if (memcmp(&new_cfg->etscfg.tsatable,
7905 &old_cfg->etscfg.tsatable,
7906 sizeof(new_cfg->etscfg.tsatable)))
7907 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
7908 }
7909
7910 /* Check if PFC configuration has changed */
7911 if (memcmp(&new_cfg->pfc,
7912 &old_cfg->pfc,
7913 sizeof(new_cfg->pfc))) {
7914 need_reconfig = true;
7915 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
7916 }
7917
7918 /* Check if APP Table has changed */
7919 if (memcmp(&new_cfg->app,
7920 &old_cfg->app,
7921 sizeof(new_cfg->app))) {
7922 need_reconfig = true;
7923 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
7924 }
7925
7926 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
7927 return need_reconfig;
7928 }
7929
7930 /**
7931 * i40e_handle_lldp_event - Handle LLDP Change MIB event
7932 * @pf: board private structure
7933 * @e: event info posted on ARQ
7934 **/
7935 static int i40e_handle_lldp_event(struct i40e_pf *pf,
7936 struct i40e_arq_event_info *e)
7937 {
7938 struct i40e_aqc_lldp_get_mib *mib =
7939 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
7940 struct i40e_hw *hw = &pf->hw;
7941 struct i40e_dcbx_config tmp_dcbx_cfg;
7942 bool need_reconfig = false;
7943 int ret = 0;
7944 u8 type;
7945
7946 /* Not DCB capable or capability disabled */
7947 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
7948 return ret;
7949
7950 /* Ignore if event is not for Nearest Bridge */
7951 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
7952 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
7953 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
7954 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
7955 return ret;
7956
7957 /* Check MIB Type and return if event for Remote MIB update */
7958 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
7959 dev_dbg(&pf->pdev->dev,
7960 "LLDP event mib type %s\n", type ? "remote" : "local");
7961 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
7962 /* Update the remote cached instance and return */
7963 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
7964 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
7965 &hw->remote_dcbx_config);
7966 goto exit;
7967 }
7968
7969 /* Store the old configuration */
7970 tmp_dcbx_cfg = hw->local_dcbx_config;
7971
7972 /* Reset the old DCBx configuration data */
7973 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
7974 /* Get updated DCBX data from firmware */
7975 ret = i40e_get_dcb_config(&pf->hw);
7976 if (ret) {
7977 dev_info(&pf->pdev->dev,
7978 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
7979 i40e_stat_str(&pf->hw, ret),
7980 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7981 goto exit;
7982 }
7983
7984 /* No change detected in DCBX configs */
7985 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
7986 sizeof(tmp_dcbx_cfg))) {
7987 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
7988 goto exit;
7989 }
7990
7991 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
7992 &hw->local_dcbx_config);
7993
7994 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
7995
7996 if (!need_reconfig)
7997 goto exit;
7998
7999 /* Enable DCB tagging only when more than one TC */
8000 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8001 pf->flags |= I40E_FLAG_DCB_ENABLED;
8002 else
8003 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8004
8005 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8006 /* Reconfiguration needed quiesce all VSIs */
8007 i40e_pf_quiesce_all_vsi(pf);
8008
8009 /* Changes in configuration update VEB/VSI */
8010 i40e_dcb_reconfigure(pf);
8011
8012 ret = i40e_resume_port_tx(pf);
8013
8014 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8015 /* In case of error no point in resuming VSIs */
8016 if (ret)
8017 goto exit;
8018
8019 /* Wait for the PF's queues to be disabled */
8020 ret = i40e_pf_wait_queues_disabled(pf);
8021 if (ret) {
8022 /* Schedule PF reset to recover */
8023 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8024 i40e_service_event_schedule(pf);
8025 } else {
8026 i40e_pf_unquiesce_all_vsi(pf);
8027 pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
8028 I40E_FLAG_CLIENT_L2_CHANGE);
8029 }
8030
8031 exit:
8032 return ret;
8033 }
8034 #endif /* CONFIG_I40E_DCB */
8035
8036 /**
8037 * i40e_do_reset_safe - Protected reset path for userland calls.
8038 * @pf: board private structure
8039 * @reset_flags: which reset is requested
8040 *
8041 **/
8042 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8043 {
8044 rtnl_lock();
8045 i40e_do_reset(pf, reset_flags, true);
8046 rtnl_unlock();
8047 }
8048
8049 /**
8050 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8051 * @pf: board private structure
8052 * @e: event info posted on ARQ
8053 *
8054 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8055 * and VF queues
8056 **/
8057 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8058 struct i40e_arq_event_info *e)
8059 {
8060 struct i40e_aqc_lan_overflow *data =
8061 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8062 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8063 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8064 struct i40e_hw *hw = &pf->hw;
8065 struct i40e_vf *vf;
8066 u16 vf_id;
8067
8068 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8069 queue, qtx_ctl);
8070
8071 /* Queue belongs to VF, find the VF and issue VF reset */
8072 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8073 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8074 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8075 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8076 vf_id -= hw->func_caps.vf_base_id;
8077 vf = &pf->vf[vf_id];
8078 i40e_vc_notify_vf_reset(vf);
8079 /* Allow VF to process pending reset notification */
8080 msleep(20);
8081 i40e_reset_vf(vf, false);
8082 }
8083 }
8084
8085 /**
8086 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8087 * @pf: board private structure
8088 **/
8089 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8090 {
8091 u32 val, fcnt_prog;
8092
8093 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8094 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8095 return fcnt_prog;
8096 }
8097
8098 /**
8099 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8100 * @pf: board private structure
8101 **/
8102 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8103 {
8104 u32 val, fcnt_prog;
8105
8106 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8107 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8108 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8109 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8110 return fcnt_prog;
8111 }
8112
8113 /**
8114 * i40e_get_global_fd_count - Get total FD filters programmed on device
8115 * @pf: board private structure
8116 **/
8117 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8118 {
8119 u32 val, fcnt_prog;
8120
8121 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8122 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8123 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8124 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8125 return fcnt_prog;
8126 }
8127
8128 /**
8129 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8130 * @pf: board private structure
8131 **/
8132 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8133 {
8134 struct i40e_fdir_filter *filter;
8135 u32 fcnt_prog, fcnt_avail;
8136 struct hlist_node *node;
8137
8138 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8139 return;
8140
8141 /* Check if we have enough room to re-enable FDir SB capability. */
8142 fcnt_prog = i40e_get_global_fd_count(pf);
8143 fcnt_avail = pf->fdir_pf_filter_count;
8144 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8145 (pf->fd_add_err == 0) ||
8146 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
8147 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
8148 pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED;
8149 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8150 (I40E_DEBUG_FD & pf->hw.debug_mask))
8151 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8152 }
8153 }
8154
8155 /* We should wait for even more space before re-enabling ATR.
8156 * Additionally, we cannot enable ATR as long as we still have TCP SB
8157 * rules active.
8158 */
8159 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8160 (pf->fd_tcp4_filter_cnt == 0)) {
8161 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
8162 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
8163 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8164 (I40E_DEBUG_FD & pf->hw.debug_mask))
8165 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8166 }
8167 }
8168
8169 /* if hw had a problem adding a filter, delete it */
8170 if (pf->fd_inv > 0) {
8171 hlist_for_each_entry_safe(filter, node,
8172 &pf->fdir_filter_list, fdir_node) {
8173 if (filter->fd_id == pf->fd_inv) {
8174 hlist_del(&filter->fdir_node);
8175 kfree(filter);
8176 pf->fdir_pf_active_filters--;
8177 pf->fd_inv = 0;
8178 }
8179 }
8180 }
8181 }
8182
8183 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8184 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8185 /**
8186 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8187 * @pf: board private structure
8188 **/
8189 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8190 {
8191 unsigned long min_flush_time;
8192 int flush_wait_retry = 50;
8193 bool disable_atr = false;
8194 int fd_room;
8195 int reg;
8196
8197 if (!time_after(jiffies, pf->fd_flush_timestamp +
8198 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8199 return;
8200
8201 /* If the flush is happening too quick and we have mostly SB rules we
8202 * should not re-enable ATR for some time.
8203 */
8204 min_flush_time = pf->fd_flush_timestamp +
8205 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8206 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8207
8208 if (!(time_after(jiffies, min_flush_time)) &&
8209 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8210 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8211 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8212 disable_atr = true;
8213 }
8214
8215 pf->fd_flush_timestamp = jiffies;
8216 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
8217 /* flush all filters */
8218 wr32(&pf->hw, I40E_PFQF_CTL_1,
8219 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8220 i40e_flush(&pf->hw);
8221 pf->fd_flush_cnt++;
8222 pf->fd_add_err = 0;
8223 do {
8224 /* Check FD flush status every 5-6msec */
8225 usleep_range(5000, 6000);
8226 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8227 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8228 break;
8229 } while (flush_wait_retry--);
8230 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8231 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8232 } else {
8233 /* replay sideband filters */
8234 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8235 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8236 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
8237 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8238 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8239 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8240 }
8241 }
8242
8243 /**
8244 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8245 * @pf: board private structure
8246 **/
8247 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8248 {
8249 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8250 }
8251
8252 /* We can see up to 256 filter programming desc in transit if the filters are
8253 * being applied really fast; before we see the first
8254 * filter miss error on Rx queue 0. Accumulating enough error messages before
8255 * reacting will make sure we don't cause flush too often.
8256 */
8257 #define I40E_MAX_FD_PROGRAM_ERROR 256
8258
8259 /**
8260 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8261 * @pf: board private structure
8262 **/
8263 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8264 {
8265
8266 /* if interface is down do nothing */
8267 if (test_bit(__I40E_DOWN, pf->state))
8268 return;
8269
8270 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8271 i40e_fdir_flush_and_replay(pf);
8272
8273 i40e_fdir_check_and_reenable(pf);
8274
8275 }
8276
8277 /**
8278 * i40e_vsi_link_event - notify VSI of a link event
8279 * @vsi: vsi to be notified
8280 * @link_up: link up or down
8281 **/
8282 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8283 {
8284 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8285 return;
8286
8287 switch (vsi->type) {
8288 case I40E_VSI_MAIN:
8289 if (!vsi->netdev || !vsi->netdev_registered)
8290 break;
8291
8292 if (link_up) {
8293 netif_carrier_on(vsi->netdev);
8294 netif_tx_wake_all_queues(vsi->netdev);
8295 } else {
8296 netif_carrier_off(vsi->netdev);
8297 netif_tx_stop_all_queues(vsi->netdev);
8298 }
8299 break;
8300
8301 case I40E_VSI_SRIOV:
8302 case I40E_VSI_VMDQ2:
8303 case I40E_VSI_CTRL:
8304 case I40E_VSI_IWARP:
8305 case I40E_VSI_MIRROR:
8306 default:
8307 /* there is no notification for other VSIs */
8308 break;
8309 }
8310 }
8311
8312 /**
8313 * i40e_veb_link_event - notify elements on the veb of a link event
8314 * @veb: veb to be notified
8315 * @link_up: link up or down
8316 **/
8317 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8318 {
8319 struct i40e_pf *pf;
8320 int i;
8321
8322 if (!veb || !veb->pf)
8323 return;
8324 pf = veb->pf;
8325
8326 /* depth first... */
8327 for (i = 0; i < I40E_MAX_VEB; i++)
8328 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8329 i40e_veb_link_event(pf->veb[i], link_up);
8330
8331 /* ... now the local VSIs */
8332 for (i = 0; i < pf->num_alloc_vsi; i++)
8333 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8334 i40e_vsi_link_event(pf->vsi[i], link_up);
8335 }
8336
8337 /**
8338 * i40e_link_event - Update netif_carrier status
8339 * @pf: board private structure
8340 **/
8341 static void i40e_link_event(struct i40e_pf *pf)
8342 {
8343 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8344 u8 new_link_speed, old_link_speed;
8345 i40e_status status;
8346 bool new_link, old_link;
8347
8348 /* save off old link status information */
8349 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
8350
8351 /* set this to force the get_link_status call to refresh state */
8352 pf->hw.phy.get_link_info = true;
8353
8354 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8355
8356 status = i40e_get_link_status(&pf->hw, &new_link);
8357
8358 /* On success, disable temp link polling */
8359 if (status == I40E_SUCCESS) {
8360 if (pf->flags & I40E_FLAG_TEMP_LINK_POLLING)
8361 pf->flags &= ~I40E_FLAG_TEMP_LINK_POLLING;
8362 } else {
8363 /* Enable link polling temporarily until i40e_get_link_status
8364 * returns I40E_SUCCESS
8365 */
8366 pf->flags |= I40E_FLAG_TEMP_LINK_POLLING;
8367 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8368 status);
8369 return;
8370 }
8371
8372 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8373 new_link_speed = pf->hw.phy.link_info.link_speed;
8374
8375 if (new_link == old_link &&
8376 new_link_speed == old_link_speed &&
8377 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8378 new_link == netif_carrier_ok(vsi->netdev)))
8379 return;
8380
8381 i40e_print_link_message(vsi, new_link);
8382
8383 /* Notify the base of the switch tree connected to
8384 * the link. Floating VEBs are not notified.
8385 */
8386 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8387 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8388 else
8389 i40e_vsi_link_event(vsi, new_link);
8390
8391 if (pf->vf)
8392 i40e_vc_notify_link_state(pf);
8393
8394 if (pf->flags & I40E_FLAG_PTP)
8395 i40e_ptp_set_increment(pf);
8396 }
8397
8398 /**
8399 * i40e_watchdog_subtask - periodic checks not using event driven response
8400 * @pf: board private structure
8401 **/
8402 static void i40e_watchdog_subtask(struct i40e_pf *pf)
8403 {
8404 int i;
8405
8406 /* if interface is down do nothing */
8407 if (test_bit(__I40E_DOWN, pf->state) ||
8408 test_bit(__I40E_CONFIG_BUSY, pf->state))
8409 return;
8410
8411 /* make sure we don't do these things too often */
8412 if (time_before(jiffies, (pf->service_timer_previous +
8413 pf->service_timer_period)))
8414 return;
8415 pf->service_timer_previous = jiffies;
8416
8417 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8418 (pf->flags & I40E_FLAG_TEMP_LINK_POLLING))
8419 i40e_link_event(pf);
8420
8421 /* Update the stats for active netdevs so the network stack
8422 * can look at updated numbers whenever it cares to
8423 */
8424 for (i = 0; i < pf->num_alloc_vsi; i++)
8425 if (pf->vsi[i] && pf->vsi[i]->netdev)
8426 i40e_update_stats(pf->vsi[i]);
8427
8428 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8429 /* Update the stats for the active switching components */
8430 for (i = 0; i < I40E_MAX_VEB; i++)
8431 if (pf->veb[i])
8432 i40e_update_veb_stats(pf->veb[i]);
8433 }
8434
8435 i40e_ptp_rx_hang(pf);
8436 i40e_ptp_tx_hang(pf);
8437 }
8438
8439 /**
8440 * i40e_reset_subtask - Set up for resetting the device and driver
8441 * @pf: board private structure
8442 **/
8443 static void i40e_reset_subtask(struct i40e_pf *pf)
8444 {
8445 u32 reset_flags = 0;
8446
8447 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8448 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8449 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8450 }
8451 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8452 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8453 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8454 }
8455 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8456 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8457 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8458 }
8459 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8460 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8461 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8462 }
8463 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8464 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8465 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8466 }
8467
8468 /* If there's a recovery already waiting, it takes
8469 * precedence before starting a new reset sequence.
8470 */
8471 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8472 i40e_prep_for_reset(pf, false);
8473 i40e_reset(pf);
8474 i40e_rebuild(pf, false, false);
8475 }
8476
8477 /* If we're already down or resetting, just bail */
8478 if (reset_flags &&
8479 !test_bit(__I40E_DOWN, pf->state) &&
8480 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8481 i40e_do_reset(pf, reset_flags, false);
8482 }
8483 }
8484
8485 /**
8486 * i40e_handle_link_event - Handle link event
8487 * @pf: board private structure
8488 * @e: event info posted on ARQ
8489 **/
8490 static void i40e_handle_link_event(struct i40e_pf *pf,
8491 struct i40e_arq_event_info *e)
8492 {
8493 struct i40e_aqc_get_link_status *status =
8494 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8495
8496 /* Do a new status request to re-enable LSE reporting
8497 * and load new status information into the hw struct
8498 * This completely ignores any state information
8499 * in the ARQ event info, instead choosing to always
8500 * issue the AQ update link status command.
8501 */
8502 i40e_link_event(pf);
8503
8504 /* Check if module meets thermal requirements */
8505 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8506 dev_err(&pf->pdev->dev,
8507 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8508 dev_err(&pf->pdev->dev,
8509 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8510 } else {
8511 /* check for unqualified module, if link is down, suppress
8512 * the message if link was forced to be down.
8513 */
8514 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8515 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8516 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8517 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8518 dev_err(&pf->pdev->dev,
8519 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8520 dev_err(&pf->pdev->dev,
8521 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8522 }
8523 }
8524 }
8525
8526 /**
8527 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8528 * @pf: board private structure
8529 **/
8530 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8531 {
8532 struct i40e_arq_event_info event;
8533 struct i40e_hw *hw = &pf->hw;
8534 u16 pending, i = 0;
8535 i40e_status ret;
8536 u16 opcode;
8537 u32 oldval;
8538 u32 val;
8539
8540 /* Do not run clean AQ when PF reset fails */
8541 if (test_bit(__I40E_RESET_FAILED, pf->state))
8542 return;
8543
8544 /* check for error indications */
8545 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8546 oldval = val;
8547 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8548 if (hw->debug_mask & I40E_DEBUG_AQ)
8549 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8550 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8551 }
8552 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8553 if (hw->debug_mask & I40E_DEBUG_AQ)
8554 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8555 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8556 pf->arq_overflows++;
8557 }
8558 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8559 if (hw->debug_mask & I40E_DEBUG_AQ)
8560 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8561 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8562 }
8563 if (oldval != val)
8564 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8565
8566 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8567 oldval = val;
8568 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8569 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8570 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8571 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8572 }
8573 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8574 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8575 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8576 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8577 }
8578 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8579 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8580 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8581 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8582 }
8583 if (oldval != val)
8584 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8585
8586 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8587 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8588 if (!event.msg_buf)
8589 return;
8590
8591 do {
8592 ret = i40e_clean_arq_element(hw, &event, &pending);
8593 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8594 break;
8595 else if (ret) {
8596 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8597 break;
8598 }
8599
8600 opcode = le16_to_cpu(event.desc.opcode);
8601 switch (opcode) {
8602
8603 case i40e_aqc_opc_get_link_status:
8604 i40e_handle_link_event(pf, &event);
8605 break;
8606 case i40e_aqc_opc_send_msg_to_pf:
8607 ret = i40e_vc_process_vf_msg(pf,
8608 le16_to_cpu(event.desc.retval),
8609 le32_to_cpu(event.desc.cookie_high),
8610 le32_to_cpu(event.desc.cookie_low),
8611 event.msg_buf,
8612 event.msg_len);
8613 break;
8614 case i40e_aqc_opc_lldp_update_mib:
8615 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8616 #ifdef CONFIG_I40E_DCB
8617 rtnl_lock();
8618 ret = i40e_handle_lldp_event(pf, &event);
8619 rtnl_unlock();
8620 #endif /* CONFIG_I40E_DCB */
8621 break;
8622 case i40e_aqc_opc_event_lan_overflow:
8623 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8624 i40e_handle_lan_overflow_event(pf, &event);
8625 break;
8626 case i40e_aqc_opc_send_msg_to_peer:
8627 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8628 break;
8629 case i40e_aqc_opc_nvm_erase:
8630 case i40e_aqc_opc_nvm_update:
8631 case i40e_aqc_opc_oem_post_update:
8632 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8633 "ARQ NVM operation 0x%04x completed\n",
8634 opcode);
8635 break;
8636 default:
8637 dev_info(&pf->pdev->dev,
8638 "ARQ: Unknown event 0x%04x ignored\n",
8639 opcode);
8640 break;
8641 }
8642 } while (i++ < pf->adminq_work_limit);
8643
8644 if (i < pf->adminq_work_limit)
8645 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8646
8647 /* re-enable Admin queue interrupt cause */
8648 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8649 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8650 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8651 i40e_flush(hw);
8652
8653 kfree(event.msg_buf);
8654 }
8655
8656 /**
8657 * i40e_verify_eeprom - make sure eeprom is good to use
8658 * @pf: board private structure
8659 **/
8660 static void i40e_verify_eeprom(struct i40e_pf *pf)
8661 {
8662 int err;
8663
8664 err = i40e_diag_eeprom_test(&pf->hw);
8665 if (err) {
8666 /* retry in case of garbage read */
8667 err = i40e_diag_eeprom_test(&pf->hw);
8668 if (err) {
8669 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8670 err);
8671 set_bit(__I40E_BAD_EEPROM, pf->state);
8672 }
8673 }
8674
8675 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8676 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8677 clear_bit(__I40E_BAD_EEPROM, pf->state);
8678 }
8679 }
8680
8681 /**
8682 * i40e_enable_pf_switch_lb
8683 * @pf: pointer to the PF structure
8684 *
8685 * enable switch loop back or die - no point in a return value
8686 **/
8687 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8688 {
8689 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8690 struct i40e_vsi_context ctxt;
8691 int ret;
8692
8693 ctxt.seid = pf->main_vsi_seid;
8694 ctxt.pf_num = pf->hw.pf_id;
8695 ctxt.vf_num = 0;
8696 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8697 if (ret) {
8698 dev_info(&pf->pdev->dev,
8699 "couldn't get PF vsi config, err %s aq_err %s\n",
8700 i40e_stat_str(&pf->hw, ret),
8701 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8702 return;
8703 }
8704 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8705 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8706 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8707
8708 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8709 if (ret) {
8710 dev_info(&pf->pdev->dev,
8711 "update vsi switch failed, err %s aq_err %s\n",
8712 i40e_stat_str(&pf->hw, ret),
8713 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8714 }
8715 }
8716
8717 /**
8718 * i40e_disable_pf_switch_lb
8719 * @pf: pointer to the PF structure
8720 *
8721 * disable switch loop back or die - no point in a return value
8722 **/
8723 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8724 {
8725 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8726 struct i40e_vsi_context ctxt;
8727 int ret;
8728
8729 ctxt.seid = pf->main_vsi_seid;
8730 ctxt.pf_num = pf->hw.pf_id;
8731 ctxt.vf_num = 0;
8732 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8733 if (ret) {
8734 dev_info(&pf->pdev->dev,
8735 "couldn't get PF vsi config, err %s aq_err %s\n",
8736 i40e_stat_str(&pf->hw, ret),
8737 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8738 return;
8739 }
8740 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8741 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8742 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8743
8744 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8745 if (ret) {
8746 dev_info(&pf->pdev->dev,
8747 "update vsi switch failed, err %s aq_err %s\n",
8748 i40e_stat_str(&pf->hw, ret),
8749 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8750 }
8751 }
8752
8753 /**
8754 * i40e_config_bridge_mode - Configure the HW bridge mode
8755 * @veb: pointer to the bridge instance
8756 *
8757 * Configure the loop back mode for the LAN VSI that is downlink to the
8758 * specified HW bridge instance. It is expected this function is called
8759 * when a new HW bridge is instantiated.
8760 **/
8761 static void i40e_config_bridge_mode(struct i40e_veb *veb)
8762 {
8763 struct i40e_pf *pf = veb->pf;
8764
8765 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8766 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8767 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8768 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8769 i40e_disable_pf_switch_lb(pf);
8770 else
8771 i40e_enable_pf_switch_lb(pf);
8772 }
8773
8774 /**
8775 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8776 * @veb: pointer to the VEB instance
8777 *
8778 * This is a recursive function that first builds the attached VSIs then
8779 * recurses in to build the next layer of VEB. We track the connections
8780 * through our own index numbers because the seid's from the HW could
8781 * change across the reset.
8782 **/
8783 static int i40e_reconstitute_veb(struct i40e_veb *veb)
8784 {
8785 struct i40e_vsi *ctl_vsi = NULL;
8786 struct i40e_pf *pf = veb->pf;
8787 int v, veb_idx;
8788 int ret;
8789
8790 /* build VSI that owns this VEB, temporarily attached to base VEB */
8791 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
8792 if (pf->vsi[v] &&
8793 pf->vsi[v]->veb_idx == veb->idx &&
8794 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8795 ctl_vsi = pf->vsi[v];
8796 break;
8797 }
8798 }
8799 if (!ctl_vsi) {
8800 dev_info(&pf->pdev->dev,
8801 "missing owner VSI for veb_idx %d\n", veb->idx);
8802 ret = -ENOENT;
8803 goto end_reconstitute;
8804 }
8805 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8806 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8807 ret = i40e_add_vsi(ctl_vsi);
8808 if (ret) {
8809 dev_info(&pf->pdev->dev,
8810 "rebuild of veb_idx %d owner VSI failed: %d\n",
8811 veb->idx, ret);
8812 goto end_reconstitute;
8813 }
8814 i40e_vsi_reset_stats(ctl_vsi);
8815
8816 /* create the VEB in the switch and move the VSI onto the VEB */
8817 ret = i40e_add_veb(veb, ctl_vsi);
8818 if (ret)
8819 goto end_reconstitute;
8820
8821 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
8822 veb->bridge_mode = BRIDGE_MODE_VEB;
8823 else
8824 veb->bridge_mode = BRIDGE_MODE_VEPA;
8825 i40e_config_bridge_mode(veb);
8826
8827 /* create the remaining VSIs attached to this VEB */
8828 for (v = 0; v < pf->num_alloc_vsi; v++) {
8829 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
8830 continue;
8831
8832 if (pf->vsi[v]->veb_idx == veb->idx) {
8833 struct i40e_vsi *vsi = pf->vsi[v];
8834
8835 vsi->uplink_seid = veb->seid;
8836 ret = i40e_add_vsi(vsi);
8837 if (ret) {
8838 dev_info(&pf->pdev->dev,
8839 "rebuild of vsi_idx %d failed: %d\n",
8840 v, ret);
8841 goto end_reconstitute;
8842 }
8843 i40e_vsi_reset_stats(vsi);
8844 }
8845 }
8846
8847 /* create any VEBs attached to this VEB - RECURSION */
8848 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8849 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
8850 pf->veb[veb_idx]->uplink_seid = veb->seid;
8851 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
8852 if (ret)
8853 break;
8854 }
8855 }
8856
8857 end_reconstitute:
8858 return ret;
8859 }
8860
8861 /**
8862 * i40e_get_capabilities - get info about the HW
8863 * @pf: the PF struct
8864 **/
8865 static int i40e_get_capabilities(struct i40e_pf *pf,
8866 enum i40e_admin_queue_opc list_type)
8867 {
8868 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
8869 u16 data_size;
8870 int buf_len;
8871 int err;
8872
8873 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
8874 do {
8875 cap_buf = kzalloc(buf_len, GFP_KERNEL);
8876 if (!cap_buf)
8877 return -ENOMEM;
8878
8879 /* this loads the data into the hw struct for us */
8880 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
8881 &data_size, list_type,
8882 NULL);
8883 /* data loaded, buffer no longer needed */
8884 kfree(cap_buf);
8885
8886 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
8887 /* retry with a larger buffer */
8888 buf_len = data_size;
8889 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
8890 dev_info(&pf->pdev->dev,
8891 "capability discovery failed, err %s aq_err %s\n",
8892 i40e_stat_str(&pf->hw, err),
8893 i40e_aq_str(&pf->hw,
8894 pf->hw.aq.asq_last_status));
8895 return -ENODEV;
8896 }
8897 } while (err);
8898
8899 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
8900 if (list_type == i40e_aqc_opc_list_func_capabilities) {
8901 dev_info(&pf->pdev->dev,
8902 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
8903 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
8904 pf->hw.func_caps.num_msix_vectors,
8905 pf->hw.func_caps.num_msix_vectors_vf,
8906 pf->hw.func_caps.fd_filters_guaranteed,
8907 pf->hw.func_caps.fd_filters_best_effort,
8908 pf->hw.func_caps.num_tx_qp,
8909 pf->hw.func_caps.num_vsis);
8910 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
8911 dev_info(&pf->pdev->dev,
8912 "switch_mode=0x%04x, function_valid=0x%08x\n",
8913 pf->hw.dev_caps.switch_mode,
8914 pf->hw.dev_caps.valid_functions);
8915 dev_info(&pf->pdev->dev,
8916 "SR-IOV=%d, num_vfs for all function=%u\n",
8917 pf->hw.dev_caps.sr_iov_1_1,
8918 pf->hw.dev_caps.num_vfs);
8919 dev_info(&pf->pdev->dev,
8920 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
8921 pf->hw.dev_caps.num_vsis,
8922 pf->hw.dev_caps.num_rx_qp,
8923 pf->hw.dev_caps.num_tx_qp);
8924 }
8925 }
8926 if (list_type == i40e_aqc_opc_list_func_capabilities) {
8927 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
8928 + pf->hw.func_caps.num_vfs)
8929 if (pf->hw.revision_id == 0 &&
8930 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
8931 dev_info(&pf->pdev->dev,
8932 "got num_vsis %d, setting num_vsis to %d\n",
8933 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
8934 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
8935 }
8936 }
8937 return 0;
8938 }
8939
8940 static int i40e_vsi_clear(struct i40e_vsi *vsi);
8941
8942 /**
8943 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
8944 * @pf: board private structure
8945 **/
8946 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
8947 {
8948 struct i40e_vsi *vsi;
8949
8950 /* quick workaround for an NVM issue that leaves a critical register
8951 * uninitialized
8952 */
8953 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
8954 static const u32 hkey[] = {
8955 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
8956 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
8957 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
8958 0x95b3a76d};
8959 int i;
8960
8961 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
8962 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
8963 }
8964
8965 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8966 return;
8967
8968 /* find existing VSI and see if it needs configuring */
8969 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
8970
8971 /* create a new VSI if none exists */
8972 if (!vsi) {
8973 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
8974 pf->vsi[pf->lan_vsi]->seid, 0);
8975 if (!vsi) {
8976 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8977 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8978 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
8979 return;
8980 }
8981 }
8982
8983 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
8984 }
8985
8986 /**
8987 * i40e_fdir_teardown - release the Flow Director resources
8988 * @pf: board private structure
8989 **/
8990 static void i40e_fdir_teardown(struct i40e_pf *pf)
8991 {
8992 struct i40e_vsi *vsi;
8993
8994 i40e_fdir_filter_exit(pf);
8995 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
8996 if (vsi)
8997 i40e_vsi_release(vsi);
8998 }
8999
9000 /**
9001 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9002 * @vsi: PF main vsi
9003 * @seid: seid of main or channel VSIs
9004 *
9005 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9006 * existed before reset
9007 **/
9008 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9009 {
9010 struct i40e_cloud_filter *cfilter;
9011 struct i40e_pf *pf = vsi->back;
9012 struct hlist_node *node;
9013 i40e_status ret;
9014
9015 /* Add cloud filters back if they exist */
9016 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9017 cloud_node) {
9018 if (cfilter->seid != seid)
9019 continue;
9020
9021 if (cfilter->dst_port)
9022 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9023 true);
9024 else
9025 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9026
9027 if (ret) {
9028 dev_dbg(&pf->pdev->dev,
9029 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9030 i40e_stat_str(&pf->hw, ret),
9031 i40e_aq_str(&pf->hw,
9032 pf->hw.aq.asq_last_status));
9033 return ret;
9034 }
9035 }
9036 return 0;
9037 }
9038
9039 /**
9040 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9041 * @vsi: PF main vsi
9042 *
9043 * Rebuilds channel VSIs if they existed before reset
9044 **/
9045 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9046 {
9047 struct i40e_channel *ch, *ch_tmp;
9048 i40e_status ret;
9049
9050 if (list_empty(&vsi->ch_list))
9051 return 0;
9052
9053 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9054 if (!ch->initialized)
9055 break;
9056 /* Proceed with creation of channel (VMDq2) VSI */
9057 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9058 if (ret) {
9059 dev_info(&vsi->back->pdev->dev,
9060 "failed to rebuild channels using uplink_seid %u\n",
9061 vsi->uplink_seid);
9062 return ret;
9063 }
9064 if (ch->max_tx_rate) {
9065 u64 credits = ch->max_tx_rate;
9066
9067 if (i40e_set_bw_limit(vsi, ch->seid,
9068 ch->max_tx_rate))
9069 return -EINVAL;
9070
9071 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9072 dev_dbg(&vsi->back->pdev->dev,
9073 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9074 ch->max_tx_rate,
9075 credits,
9076 ch->seid);
9077 }
9078 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9079 if (ret) {
9080 dev_dbg(&vsi->back->pdev->dev,
9081 "Failed to rebuild cloud filters for channel VSI %u\n",
9082 ch->seid);
9083 return ret;
9084 }
9085 }
9086 return 0;
9087 }
9088
9089 /**
9090 * i40e_prep_for_reset - prep for the core to reset
9091 * @pf: board private structure
9092 * @lock_acquired: indicates whether or not the lock has been acquired
9093 * before this function was called.
9094 *
9095 * Close up the VFs and other things in prep for PF Reset.
9096 **/
9097 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9098 {
9099 struct i40e_hw *hw = &pf->hw;
9100 i40e_status ret = 0;
9101 u32 v;
9102
9103 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9104 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9105 return;
9106 if (i40e_check_asq_alive(&pf->hw))
9107 i40e_vc_notify_reset(pf);
9108
9109 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9110
9111 /* quiesce the VSIs and their queues that are not already DOWN */
9112 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9113 if (!lock_acquired)
9114 rtnl_lock();
9115 i40e_pf_quiesce_all_vsi(pf);
9116 if (!lock_acquired)
9117 rtnl_unlock();
9118
9119 for (v = 0; v < pf->num_alloc_vsi; v++) {
9120 if (pf->vsi[v])
9121 pf->vsi[v]->seid = 0;
9122 }
9123
9124 i40e_shutdown_adminq(&pf->hw);
9125
9126 /* call shutdown HMC */
9127 if (hw->hmc.hmc_obj) {
9128 ret = i40e_shutdown_lan_hmc(hw);
9129 if (ret)
9130 dev_warn(&pf->pdev->dev,
9131 "shutdown_lan_hmc failed: %d\n", ret);
9132 }
9133 }
9134
9135 /**
9136 * i40e_send_version - update firmware with driver version
9137 * @pf: PF struct
9138 */
9139 static void i40e_send_version(struct i40e_pf *pf)
9140 {
9141 struct i40e_driver_version dv;
9142
9143 dv.major_version = DRV_VERSION_MAJOR;
9144 dv.minor_version = DRV_VERSION_MINOR;
9145 dv.build_version = DRV_VERSION_BUILD;
9146 dv.subbuild_version = 0;
9147 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9148 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9149 }
9150
9151 /**
9152 * i40e_get_oem_version - get OEM specific version information
9153 * @hw: pointer to the hardware structure
9154 **/
9155 static void i40e_get_oem_version(struct i40e_hw *hw)
9156 {
9157 u16 block_offset = 0xffff;
9158 u16 block_length = 0;
9159 u16 capabilities = 0;
9160 u16 gen_snap = 0;
9161 u16 release = 0;
9162
9163 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9164 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9165 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9166 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9167 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9168 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9169 #define I40E_NVM_OEM_LENGTH 3
9170
9171 /* Check if pointer to OEM version block is valid. */
9172 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9173 if (block_offset == 0xffff)
9174 return;
9175
9176 /* Check if OEM version block has correct length. */
9177 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9178 &block_length);
9179 if (block_length < I40E_NVM_OEM_LENGTH)
9180 return;
9181
9182 /* Check if OEM version format is as expected. */
9183 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9184 &capabilities);
9185 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9186 return;
9187
9188 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9189 &gen_snap);
9190 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9191 &release);
9192 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9193 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9194 }
9195
9196 /**
9197 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9198 * @pf: board private structure
9199 **/
9200 static int i40e_reset(struct i40e_pf *pf)
9201 {
9202 struct i40e_hw *hw = &pf->hw;
9203 i40e_status ret;
9204
9205 ret = i40e_pf_reset(hw);
9206 if (ret) {
9207 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9208 set_bit(__I40E_RESET_FAILED, pf->state);
9209 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9210 } else {
9211 pf->pfr_count++;
9212 }
9213 return ret;
9214 }
9215
9216 /**
9217 * i40e_rebuild - rebuild using a saved config
9218 * @pf: board private structure
9219 * @reinit: if the Main VSI needs to re-initialized.
9220 * @lock_acquired: indicates whether or not the lock has been acquired
9221 * before this function was called.
9222 **/
9223 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9224 {
9225 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9226 struct i40e_hw *hw = &pf->hw;
9227 u8 set_fc_aq_fail = 0;
9228 i40e_status ret;
9229 u32 val;
9230 int v;
9231
9232 if (test_bit(__I40E_DOWN, pf->state))
9233 goto clear_recovery;
9234 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9235
9236 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9237 ret = i40e_init_adminq(&pf->hw);
9238 if (ret) {
9239 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9240 i40e_stat_str(&pf->hw, ret),
9241 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9242 goto clear_recovery;
9243 }
9244 i40e_get_oem_version(&pf->hw);
9245
9246 /* re-verify the eeprom if we just had an EMP reset */
9247 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9248 i40e_verify_eeprom(pf);
9249
9250 i40e_clear_pxe_mode(hw);
9251 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9252 if (ret)
9253 goto end_core_reset;
9254
9255 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9256 hw->func_caps.num_rx_qp, 0, 0);
9257 if (ret) {
9258 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9259 goto end_core_reset;
9260 }
9261 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9262 if (ret) {
9263 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9264 goto end_core_reset;
9265 }
9266
9267 #ifdef CONFIG_I40E_DCB
9268 ret = i40e_init_pf_dcb(pf);
9269 if (ret) {
9270 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9271 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9272 /* Continue without DCB enabled */
9273 }
9274 #endif /* CONFIG_I40E_DCB */
9275 /* do basic switch setup */
9276 if (!lock_acquired)
9277 rtnl_lock();
9278 ret = i40e_setup_pf_switch(pf, reinit);
9279 if (ret)
9280 goto end_unlock;
9281
9282 /* The driver only wants link up/down and module qualification
9283 * reports from firmware. Note the negative logic.
9284 */
9285 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9286 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9287 I40E_AQ_EVENT_MEDIA_NA |
9288 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9289 if (ret)
9290 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9291 i40e_stat_str(&pf->hw, ret),
9292 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9293
9294 /* make sure our flow control settings are restored */
9295 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
9296 if (ret)
9297 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
9298 i40e_stat_str(&pf->hw, ret),
9299 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9300
9301 /* Rebuild the VSIs and VEBs that existed before reset.
9302 * They are still in our local switch element arrays, so only
9303 * need to rebuild the switch model in the HW.
9304 *
9305 * If there were VEBs but the reconstitution failed, we'll try
9306 * try to recover minimal use by getting the basic PF VSI working.
9307 */
9308 if (vsi->uplink_seid != pf->mac_seid) {
9309 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9310 /* find the one VEB connected to the MAC, and find orphans */
9311 for (v = 0; v < I40E_MAX_VEB; v++) {
9312 if (!pf->veb[v])
9313 continue;
9314
9315 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9316 pf->veb[v]->uplink_seid == 0) {
9317 ret = i40e_reconstitute_veb(pf->veb[v]);
9318
9319 if (!ret)
9320 continue;
9321
9322 /* If Main VEB failed, we're in deep doodoo,
9323 * so give up rebuilding the switch and set up
9324 * for minimal rebuild of PF VSI.
9325 * If orphan failed, we'll report the error
9326 * but try to keep going.
9327 */
9328 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9329 dev_info(&pf->pdev->dev,
9330 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9331 ret);
9332 vsi->uplink_seid = pf->mac_seid;
9333 break;
9334 } else if (pf->veb[v]->uplink_seid == 0) {
9335 dev_info(&pf->pdev->dev,
9336 "rebuild of orphan VEB failed: %d\n",
9337 ret);
9338 }
9339 }
9340 }
9341 }
9342
9343 if (vsi->uplink_seid == pf->mac_seid) {
9344 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9345 /* no VEB, so rebuild only the Main VSI */
9346 ret = i40e_add_vsi(vsi);
9347 if (ret) {
9348 dev_info(&pf->pdev->dev,
9349 "rebuild of Main VSI failed: %d\n", ret);
9350 goto end_unlock;
9351 }
9352 }
9353
9354 if (vsi->mqprio_qopt.max_rate[0]) {
9355 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9356 u64 credits = 0;
9357
9358 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
9359 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9360 if (ret)
9361 goto end_unlock;
9362
9363 credits = max_tx_rate;
9364 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9365 dev_dbg(&vsi->back->pdev->dev,
9366 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9367 max_tx_rate,
9368 credits,
9369 vsi->seid);
9370 }
9371
9372 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9373 if (ret)
9374 goto end_unlock;
9375
9376 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9377 * for this main VSI if they exist
9378 */
9379 ret = i40e_rebuild_channels(vsi);
9380 if (ret)
9381 goto end_unlock;
9382
9383 /* Reconfigure hardware for allowing smaller MSS in the case
9384 * of TSO, so that we avoid the MDD being fired and causing
9385 * a reset in the case of small MSS+TSO.
9386 */
9387 #define I40E_REG_MSS 0x000E64DC
9388 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9389 #define I40E_64BYTE_MSS 0x400000
9390 val = rd32(hw, I40E_REG_MSS);
9391 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9392 val &= ~I40E_REG_MSS_MIN_MASK;
9393 val |= I40E_64BYTE_MSS;
9394 wr32(hw, I40E_REG_MSS, val);
9395 }
9396
9397 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9398 msleep(75);
9399 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9400 if (ret)
9401 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9402 i40e_stat_str(&pf->hw, ret),
9403 i40e_aq_str(&pf->hw,
9404 pf->hw.aq.asq_last_status));
9405 }
9406 /* reinit the misc interrupt */
9407 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9408 ret = i40e_setup_misc_vector(pf);
9409
9410 /* Add a filter to drop all Flow control frames from any VSI from being
9411 * transmitted. By doing so we stop a malicious VF from sending out
9412 * PAUSE or PFC frames and potentially controlling traffic for other
9413 * PF/VF VSIs.
9414 * The FW can still send Flow control frames if enabled.
9415 */
9416 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9417 pf->main_vsi_seid);
9418
9419 /* restart the VSIs that were rebuilt and running before the reset */
9420 i40e_pf_unquiesce_all_vsi(pf);
9421
9422 /* Release the RTNL lock before we start resetting VFs */
9423 if (!lock_acquired)
9424 rtnl_unlock();
9425
9426 /* Restore promiscuous settings */
9427 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9428 if (ret)
9429 dev_warn(&pf->pdev->dev,
9430 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9431 pf->cur_promisc ? "on" : "off",
9432 i40e_stat_str(&pf->hw, ret),
9433 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9434
9435 i40e_reset_all_vfs(pf, true);
9436
9437 /* tell the firmware that we're starting */
9438 i40e_send_version(pf);
9439
9440 /* We've already released the lock, so don't do it again */
9441 goto end_core_reset;
9442
9443 end_unlock:
9444 if (!lock_acquired)
9445 rtnl_unlock();
9446 end_core_reset:
9447 clear_bit(__I40E_RESET_FAILED, pf->state);
9448 clear_recovery:
9449 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9450 }
9451
9452 /**
9453 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9454 * @pf: board private structure
9455 * @reinit: if the Main VSI needs to re-initialized.
9456 * @lock_acquired: indicates whether or not the lock has been acquired
9457 * before this function was called.
9458 **/
9459 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9460 bool lock_acquired)
9461 {
9462 int ret;
9463 /* Now we wait for GRST to settle out.
9464 * We don't have to delete the VEBs or VSIs from the hw switch
9465 * because the reset will make them disappear.
9466 */
9467 ret = i40e_reset(pf);
9468 if (!ret)
9469 i40e_rebuild(pf, reinit, lock_acquired);
9470 }
9471
9472 /**
9473 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9474 * @pf: board private structure
9475 *
9476 * Close up the VFs and other things in prep for a Core Reset,
9477 * then get ready to rebuild the world.
9478 * @lock_acquired: indicates whether or not the lock has been acquired
9479 * before this function was called.
9480 **/
9481 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9482 {
9483 i40e_prep_for_reset(pf, lock_acquired);
9484 i40e_reset_and_rebuild(pf, false, lock_acquired);
9485 }
9486
9487 /**
9488 * i40e_handle_mdd_event
9489 * @pf: pointer to the PF structure
9490 *
9491 * Called from the MDD irq handler to identify possibly malicious vfs
9492 **/
9493 static void i40e_handle_mdd_event(struct i40e_pf *pf)
9494 {
9495 struct i40e_hw *hw = &pf->hw;
9496 bool mdd_detected = false;
9497 bool pf_mdd_detected = false;
9498 struct i40e_vf *vf;
9499 u32 reg;
9500 int i;
9501
9502 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9503 return;
9504
9505 /* find what triggered the MDD event */
9506 reg = rd32(hw, I40E_GL_MDET_TX);
9507 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9508 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9509 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9510 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9511 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9512 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9513 I40E_GL_MDET_TX_EVENT_SHIFT;
9514 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9515 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9516 pf->hw.func_caps.base_queue;
9517 if (netif_msg_tx_err(pf))
9518 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9519 event, queue, pf_num, vf_num);
9520 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9521 mdd_detected = true;
9522 }
9523 reg = rd32(hw, I40E_GL_MDET_RX);
9524 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9525 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9526 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9527 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9528 I40E_GL_MDET_RX_EVENT_SHIFT;
9529 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9530 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9531 pf->hw.func_caps.base_queue;
9532 if (netif_msg_rx_err(pf))
9533 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9534 event, queue, func);
9535 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9536 mdd_detected = true;
9537 }
9538
9539 if (mdd_detected) {
9540 reg = rd32(hw, I40E_PF_MDET_TX);
9541 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9542 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9543 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9544 pf_mdd_detected = true;
9545 }
9546 reg = rd32(hw, I40E_PF_MDET_RX);
9547 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9548 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9549 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9550 pf_mdd_detected = true;
9551 }
9552 /* Queue belongs to the PF, initiate a reset */
9553 if (pf_mdd_detected) {
9554 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9555 i40e_service_event_schedule(pf);
9556 }
9557 }
9558
9559 /* see if one of the VFs needs its hand slapped */
9560 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9561 vf = &(pf->vf[i]);
9562 reg = rd32(hw, I40E_VP_MDET_TX(i));
9563 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9564 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9565 vf->num_mdd_events++;
9566 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9567 i);
9568 }
9569
9570 reg = rd32(hw, I40E_VP_MDET_RX(i));
9571 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9572 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9573 vf->num_mdd_events++;
9574 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9575 i);
9576 }
9577
9578 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9579 dev_info(&pf->pdev->dev,
9580 "Too many MDD events on VF %d, disabled\n", i);
9581 dev_info(&pf->pdev->dev,
9582 "Use PF Control I/F to re-enable the VF\n");
9583 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9584 }
9585 }
9586
9587 /* re-enable mdd interrupt cause */
9588 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9589 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9590 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9591 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9592 i40e_flush(hw);
9593 }
9594
9595 static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
9596 {
9597 switch (port->type) {
9598 case UDP_TUNNEL_TYPE_VXLAN:
9599 return "vxlan";
9600 case UDP_TUNNEL_TYPE_GENEVE:
9601 return "geneve";
9602 default:
9603 return "unknown";
9604 }
9605 }
9606
9607 /**
9608 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9609 * @pf: board private structure
9610 **/
9611 static void i40e_sync_udp_filters(struct i40e_pf *pf)
9612 {
9613 int i;
9614
9615 /* loop through and set pending bit for all active UDP filters */
9616 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9617 if (pf->udp_ports[i].port)
9618 pf->pending_udp_bitmap |= BIT_ULL(i);
9619 }
9620
9621 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
9622 }
9623
9624 /**
9625 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9626 * @pf: board private structure
9627 **/
9628 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9629 {
9630 struct i40e_hw *hw = &pf->hw;
9631 i40e_status ret;
9632 u16 port;
9633 int i;
9634
9635 if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
9636 return;
9637
9638 pf->flags &= ~I40E_FLAG_UDP_FILTER_SYNC;
9639
9640 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9641 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9642 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9643 port = pf->udp_ports[i].port;
9644 if (port)
9645 ret = i40e_aq_add_udp_tunnel(hw, port,
9646 pf->udp_ports[i].type,
9647 NULL, NULL);
9648 else
9649 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
9650
9651 if (ret) {
9652 dev_info(&pf->pdev->dev,
9653 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9654 i40e_tunnel_name(&pf->udp_ports[i]),
9655 port ? "add" : "delete",
9656 port, i,
9657 i40e_stat_str(&pf->hw, ret),
9658 i40e_aq_str(&pf->hw,
9659 pf->hw.aq.asq_last_status));
9660 pf->udp_ports[i].port = 0;
9661 }
9662 }
9663 }
9664 }
9665
9666 /**
9667 * i40e_service_task - Run the driver's async subtasks
9668 * @work: pointer to work_struct containing our data
9669 **/
9670 static void i40e_service_task(struct work_struct *work)
9671 {
9672 struct i40e_pf *pf = container_of(work,
9673 struct i40e_pf,
9674 service_task);
9675 unsigned long start_time = jiffies;
9676
9677 /* don't bother with service tasks if a reset is in progress */
9678 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9679 return;
9680
9681 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9682 return;
9683
9684 i40e_detect_recover_hung(pf);
9685 i40e_sync_filters_subtask(pf);
9686 i40e_reset_subtask(pf);
9687 i40e_handle_mdd_event(pf);
9688 i40e_vc_process_vflr_event(pf);
9689 i40e_watchdog_subtask(pf);
9690 i40e_fdir_reinit_subtask(pf);
9691 if (pf->flags & I40E_FLAG_CLIENT_RESET) {
9692 /* Client subtask will reopen next time through. */
9693 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
9694 pf->flags &= ~I40E_FLAG_CLIENT_RESET;
9695 } else {
9696 i40e_client_subtask(pf);
9697 if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
9698 i40e_notify_client_of_l2_param_changes(
9699 pf->vsi[pf->lan_vsi]);
9700 pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
9701 }
9702 }
9703 i40e_sync_filters_subtask(pf);
9704 i40e_sync_udp_filters_subtask(pf);
9705 i40e_clean_adminq_subtask(pf);
9706
9707 /* flush memory to make sure state is correct before next watchdog */
9708 smp_mb__before_atomic();
9709 clear_bit(__I40E_SERVICE_SCHED, pf->state);
9710
9711 /* If the tasks have taken longer than one timer cycle or there
9712 * is more work to be done, reschedule the service task now
9713 * rather than wait for the timer to tick again.
9714 */
9715 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
9716 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
9717 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
9718 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
9719 i40e_service_event_schedule(pf);
9720 }
9721
9722 /**
9723 * i40e_service_timer - timer callback
9724 * @data: pointer to PF struct
9725 **/
9726 static void i40e_service_timer(struct timer_list *t)
9727 {
9728 struct i40e_pf *pf = from_timer(pf, t, service_timer);
9729
9730 mod_timer(&pf->service_timer,
9731 round_jiffies(jiffies + pf->service_timer_period));
9732 i40e_service_event_schedule(pf);
9733 }
9734
9735 /**
9736 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9737 * @vsi: the VSI being configured
9738 **/
9739 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
9740 {
9741 struct i40e_pf *pf = vsi->back;
9742
9743 switch (vsi->type) {
9744 case I40E_VSI_MAIN:
9745 vsi->alloc_queue_pairs = pf->num_lan_qps;
9746 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9747 I40E_REQ_DESCRIPTOR_MULTIPLE);
9748 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9749 vsi->num_q_vectors = pf->num_lan_msix;
9750 else
9751 vsi->num_q_vectors = 1;
9752
9753 break;
9754
9755 case I40E_VSI_FDIR:
9756 vsi->alloc_queue_pairs = 1;
9757 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
9758 I40E_REQ_DESCRIPTOR_MULTIPLE);
9759 vsi->num_q_vectors = pf->num_fdsb_msix;
9760 break;
9761
9762 case I40E_VSI_VMDQ2:
9763 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
9764 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9765 I40E_REQ_DESCRIPTOR_MULTIPLE);
9766 vsi->num_q_vectors = pf->num_vmdq_msix;
9767 break;
9768
9769 case I40E_VSI_SRIOV:
9770 vsi->alloc_queue_pairs = pf->num_vf_qps;
9771 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9772 I40E_REQ_DESCRIPTOR_MULTIPLE);
9773 break;
9774
9775 default:
9776 WARN_ON(1);
9777 return -ENODATA;
9778 }
9779
9780 return 0;
9781 }
9782
9783 /**
9784 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9785 * @vsi: VSI pointer
9786 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
9787 *
9788 * On error: returns error code (negative)
9789 * On success: returns 0
9790 **/
9791 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
9792 {
9793 struct i40e_ring **next_rings;
9794 int size;
9795 int ret = 0;
9796
9797 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
9798 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
9799 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
9800 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
9801 if (!vsi->tx_rings)
9802 return -ENOMEM;
9803 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
9804 if (i40e_enabled_xdp_vsi(vsi)) {
9805 vsi->xdp_rings = next_rings;
9806 next_rings += vsi->alloc_queue_pairs;
9807 }
9808 vsi->rx_rings = next_rings;
9809
9810 if (alloc_qvectors) {
9811 /* allocate memory for q_vector pointers */
9812 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
9813 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
9814 if (!vsi->q_vectors) {
9815 ret = -ENOMEM;
9816 goto err_vectors;
9817 }
9818 }
9819 return ret;
9820
9821 err_vectors:
9822 kfree(vsi->tx_rings);
9823 return ret;
9824 }
9825
9826 /**
9827 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
9828 * @pf: board private structure
9829 * @type: type of VSI
9830 *
9831 * On error: returns error code (negative)
9832 * On success: returns vsi index in PF (positive)
9833 **/
9834 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
9835 {
9836 int ret = -ENODEV;
9837 struct i40e_vsi *vsi;
9838 int vsi_idx;
9839 int i;
9840
9841 /* Need to protect the allocation of the VSIs at the PF level */
9842 mutex_lock(&pf->switch_mutex);
9843
9844 /* VSI list may be fragmented if VSI creation/destruction has
9845 * been happening. We can afford to do a quick scan to look
9846 * for any free VSIs in the list.
9847 *
9848 * find next empty vsi slot, looping back around if necessary
9849 */
9850 i = pf->next_vsi;
9851 while (i < pf->num_alloc_vsi && pf->vsi[i])
9852 i++;
9853 if (i >= pf->num_alloc_vsi) {
9854 i = 0;
9855 while (i < pf->next_vsi && pf->vsi[i])
9856 i++;
9857 }
9858
9859 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
9860 vsi_idx = i; /* Found one! */
9861 } else {
9862 ret = -ENODEV;
9863 goto unlock_pf; /* out of VSI slots! */
9864 }
9865 pf->next_vsi = ++i;
9866
9867 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
9868 if (!vsi) {
9869 ret = -ENOMEM;
9870 goto unlock_pf;
9871 }
9872 vsi->type = type;
9873 vsi->back = pf;
9874 set_bit(__I40E_VSI_DOWN, vsi->state);
9875 vsi->flags = 0;
9876 vsi->idx = vsi_idx;
9877 vsi->int_rate_limit = 0;
9878 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
9879 pf->rss_table_size : 64;
9880 vsi->netdev_registered = false;
9881 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
9882 hash_init(vsi->mac_filter_hash);
9883 vsi->irqs_ready = false;
9884
9885 ret = i40e_set_num_rings_in_vsi(vsi);
9886 if (ret)
9887 goto err_rings;
9888
9889 ret = i40e_vsi_alloc_arrays(vsi, true);
9890 if (ret)
9891 goto err_rings;
9892
9893 /* Setup default MSIX irq handler for VSI */
9894 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
9895
9896 /* Initialize VSI lock */
9897 spin_lock_init(&vsi->mac_filter_hash_lock);
9898 pf->vsi[vsi_idx] = vsi;
9899 ret = vsi_idx;
9900 goto unlock_pf;
9901
9902 err_rings:
9903 pf->next_vsi = i - 1;
9904 kfree(vsi);
9905 unlock_pf:
9906 mutex_unlock(&pf->switch_mutex);
9907 return ret;
9908 }
9909
9910 /**
9911 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
9912 * @type: VSI pointer
9913 * @free_qvectors: a bool to specify if q_vectors need to be freed.
9914 *
9915 * On error: returns error code (negative)
9916 * On success: returns 0
9917 **/
9918 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
9919 {
9920 /* free the ring and vector containers */
9921 if (free_qvectors) {
9922 kfree(vsi->q_vectors);
9923 vsi->q_vectors = NULL;
9924 }
9925 kfree(vsi->tx_rings);
9926 vsi->tx_rings = NULL;
9927 vsi->rx_rings = NULL;
9928 vsi->xdp_rings = NULL;
9929 }
9930
9931 /**
9932 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
9933 * and lookup table
9934 * @vsi: Pointer to VSI structure
9935 */
9936 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
9937 {
9938 if (!vsi)
9939 return;
9940
9941 kfree(vsi->rss_hkey_user);
9942 vsi->rss_hkey_user = NULL;
9943
9944 kfree(vsi->rss_lut_user);
9945 vsi->rss_lut_user = NULL;
9946 }
9947
9948 /**
9949 * i40e_vsi_clear - Deallocate the VSI provided
9950 * @vsi: the VSI being un-configured
9951 **/
9952 static int i40e_vsi_clear(struct i40e_vsi *vsi)
9953 {
9954 struct i40e_pf *pf;
9955
9956 if (!vsi)
9957 return 0;
9958
9959 if (!vsi->back)
9960 goto free_vsi;
9961 pf = vsi->back;
9962
9963 mutex_lock(&pf->switch_mutex);
9964 if (!pf->vsi[vsi->idx]) {
9965 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
9966 vsi->idx, vsi->idx, vsi, vsi->type);
9967 goto unlock_vsi;
9968 }
9969
9970 if (pf->vsi[vsi->idx] != vsi) {
9971 dev_err(&pf->pdev->dev,
9972 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
9973 pf->vsi[vsi->idx]->idx,
9974 pf->vsi[vsi->idx],
9975 pf->vsi[vsi->idx]->type,
9976 vsi->idx, vsi, vsi->type);
9977 goto unlock_vsi;
9978 }
9979
9980 /* updates the PF for this cleared vsi */
9981 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9982 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
9983
9984 i40e_vsi_free_arrays(vsi, true);
9985 i40e_clear_rss_config_user(vsi);
9986
9987 pf->vsi[vsi->idx] = NULL;
9988 if (vsi->idx < pf->next_vsi)
9989 pf->next_vsi = vsi->idx;
9990
9991 unlock_vsi:
9992 mutex_unlock(&pf->switch_mutex);
9993 free_vsi:
9994 kfree(vsi);
9995
9996 return 0;
9997 }
9998
9999 /**
10000 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10001 * @vsi: the VSI being cleaned
10002 **/
10003 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10004 {
10005 int i;
10006
10007 if (vsi->tx_rings && vsi->tx_rings[0]) {
10008 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10009 kfree_rcu(vsi->tx_rings[i], rcu);
10010 vsi->tx_rings[i] = NULL;
10011 vsi->rx_rings[i] = NULL;
10012 if (vsi->xdp_rings)
10013 vsi->xdp_rings[i] = NULL;
10014 }
10015 }
10016 }
10017
10018 /**
10019 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10020 * @vsi: the VSI being configured
10021 **/
10022 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10023 {
10024 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10025 struct i40e_pf *pf = vsi->back;
10026 struct i40e_ring *ring;
10027
10028 /* Set basic values in the rings to be used later during open() */
10029 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10030 /* allocate space for both Tx and Rx in one shot */
10031 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10032 if (!ring)
10033 goto err_out;
10034
10035 ring->queue_index = i;
10036 ring->reg_idx = vsi->base_queue + i;
10037 ring->ring_active = false;
10038 ring->vsi = vsi;
10039 ring->netdev = vsi->netdev;
10040 ring->dev = &pf->pdev->dev;
10041 ring->count = vsi->num_desc;
10042 ring->size = 0;
10043 ring->dcb_tc = 0;
10044 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10045 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10046 ring->tx_itr_setting = pf->tx_itr_default;
10047 vsi->tx_rings[i] = ring++;
10048
10049 if (!i40e_enabled_xdp_vsi(vsi))
10050 goto setup_rx;
10051
10052 ring->queue_index = vsi->alloc_queue_pairs + i;
10053 ring->reg_idx = vsi->base_queue + ring->queue_index;
10054 ring->ring_active = false;
10055 ring->vsi = vsi;
10056 ring->netdev = NULL;
10057 ring->dev = &pf->pdev->dev;
10058 ring->count = vsi->num_desc;
10059 ring->size = 0;
10060 ring->dcb_tc = 0;
10061 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10062 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10063 set_ring_xdp(ring);
10064 ring->tx_itr_setting = pf->tx_itr_default;
10065 vsi->xdp_rings[i] = ring++;
10066
10067 setup_rx:
10068 ring->queue_index = i;
10069 ring->reg_idx = vsi->base_queue + i;
10070 ring->ring_active = false;
10071 ring->vsi = vsi;
10072 ring->netdev = vsi->netdev;
10073 ring->dev = &pf->pdev->dev;
10074 ring->count = vsi->num_desc;
10075 ring->size = 0;
10076 ring->dcb_tc = 0;
10077 ring->rx_itr_setting = pf->rx_itr_default;
10078 vsi->rx_rings[i] = ring;
10079 }
10080
10081 return 0;
10082
10083 err_out:
10084 i40e_vsi_clear_rings(vsi);
10085 return -ENOMEM;
10086 }
10087
10088 /**
10089 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10090 * @pf: board private structure
10091 * @vectors: the number of MSI-X vectors to request
10092 *
10093 * Returns the number of vectors reserved, or error
10094 **/
10095 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10096 {
10097 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10098 I40E_MIN_MSIX, vectors);
10099 if (vectors < 0) {
10100 dev_info(&pf->pdev->dev,
10101 "MSI-X vector reservation failed: %d\n", vectors);
10102 vectors = 0;
10103 }
10104
10105 return vectors;
10106 }
10107
10108 /**
10109 * i40e_init_msix - Setup the MSIX capability
10110 * @pf: board private structure
10111 *
10112 * Work with the OS to set up the MSIX vectors needed.
10113 *
10114 * Returns the number of vectors reserved or negative on failure
10115 **/
10116 static int i40e_init_msix(struct i40e_pf *pf)
10117 {
10118 struct i40e_hw *hw = &pf->hw;
10119 int cpus, extra_vectors;
10120 int vectors_left;
10121 int v_budget, i;
10122 int v_actual;
10123 int iwarp_requested = 0;
10124
10125 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10126 return -ENODEV;
10127
10128 /* The number of vectors we'll request will be comprised of:
10129 * - Add 1 for "other" cause for Admin Queue events, etc.
10130 * - The number of LAN queue pairs
10131 * - Queues being used for RSS.
10132 * We don't need as many as max_rss_size vectors.
10133 * use rss_size instead in the calculation since that
10134 * is governed by number of cpus in the system.
10135 * - assumes symmetric Tx/Rx pairing
10136 * - The number of VMDq pairs
10137 * - The CPU count within the NUMA node if iWARP is enabled
10138 * Once we count this up, try the request.
10139 *
10140 * If we can't get what we want, we'll simplify to nearly nothing
10141 * and try again. If that still fails, we punt.
10142 */
10143 vectors_left = hw->func_caps.num_msix_vectors;
10144 v_budget = 0;
10145
10146 /* reserve one vector for miscellaneous handler */
10147 if (vectors_left) {
10148 v_budget++;
10149 vectors_left--;
10150 }
10151
10152 /* reserve some vectors for the main PF traffic queues. Initially we
10153 * only reserve at most 50% of the available vectors, in the case that
10154 * the number of online CPUs is large. This ensures that we can enable
10155 * extra features as well. Once we've enabled the other features, we
10156 * will use any remaining vectors to reach as close as we can to the
10157 * number of online CPUs.
10158 */
10159 cpus = num_online_cpus();
10160 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10161 vectors_left -= pf->num_lan_msix;
10162
10163 /* reserve one vector for sideband flow director */
10164 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10165 if (vectors_left) {
10166 pf->num_fdsb_msix = 1;
10167 v_budget++;
10168 vectors_left--;
10169 } else {
10170 pf->num_fdsb_msix = 0;
10171 }
10172 }
10173
10174 /* can we reserve enough for iWARP? */
10175 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10176 iwarp_requested = pf->num_iwarp_msix;
10177
10178 if (!vectors_left)
10179 pf->num_iwarp_msix = 0;
10180 else if (vectors_left < pf->num_iwarp_msix)
10181 pf->num_iwarp_msix = 1;
10182 v_budget += pf->num_iwarp_msix;
10183 vectors_left -= pf->num_iwarp_msix;
10184 }
10185
10186 /* any vectors left over go for VMDq support */
10187 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10188 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
10189 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
10190
10191 if (!vectors_left) {
10192 pf->num_vmdq_msix = 0;
10193 pf->num_vmdq_qps = 0;
10194 } else {
10195 /* if we're short on vectors for what's desired, we limit
10196 * the queues per vmdq. If this is still more than are
10197 * available, the user will need to change the number of
10198 * queues/vectors used by the PF later with the ethtool
10199 * channels command
10200 */
10201 if (vmdq_vecs < vmdq_vecs_wanted)
10202 pf->num_vmdq_qps = 1;
10203 pf->num_vmdq_msix = pf->num_vmdq_qps;
10204
10205 v_budget += vmdq_vecs;
10206 vectors_left -= vmdq_vecs;
10207 }
10208 }
10209
10210 /* On systems with a large number of SMP cores, we previously limited
10211 * the number of vectors for num_lan_msix to be at most 50% of the
10212 * available vectors, to allow for other features. Now, we add back
10213 * the remaining vectors. However, we ensure that the total
10214 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10215 * calculate the number of vectors we can add without going over the
10216 * cap of CPUs. For systems with a small number of CPUs this will be
10217 * zero.
10218 */
10219 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10220 pf->num_lan_msix += extra_vectors;
10221 vectors_left -= extra_vectors;
10222
10223 WARN(vectors_left < 0,
10224 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10225
10226 v_budget += pf->num_lan_msix;
10227 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10228 GFP_KERNEL);
10229 if (!pf->msix_entries)
10230 return -ENOMEM;
10231
10232 for (i = 0; i < v_budget; i++)
10233 pf->msix_entries[i].entry = i;
10234 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10235
10236 if (v_actual < I40E_MIN_MSIX) {
10237 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10238 kfree(pf->msix_entries);
10239 pf->msix_entries = NULL;
10240 pci_disable_msix(pf->pdev);
10241 return -ENODEV;
10242
10243 } else if (v_actual == I40E_MIN_MSIX) {
10244 /* Adjust for minimal MSIX use */
10245 pf->num_vmdq_vsis = 0;
10246 pf->num_vmdq_qps = 0;
10247 pf->num_lan_qps = 1;
10248 pf->num_lan_msix = 1;
10249
10250 } else if (v_actual != v_budget) {
10251 /* If we have limited resources, we will start with no vectors
10252 * for the special features and then allocate vectors to some
10253 * of these features based on the policy and at the end disable
10254 * the features that did not get any vectors.
10255 */
10256 int vec;
10257
10258 dev_info(&pf->pdev->dev,
10259 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10260 v_actual, v_budget);
10261 /* reserve the misc vector */
10262 vec = v_actual - 1;
10263
10264 /* Scale vector usage down */
10265 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
10266 pf->num_vmdq_vsis = 1;
10267 pf->num_vmdq_qps = 1;
10268
10269 /* partition out the remaining vectors */
10270 switch (vec) {
10271 case 2:
10272 pf->num_lan_msix = 1;
10273 break;
10274 case 3:
10275 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10276 pf->num_lan_msix = 1;
10277 pf->num_iwarp_msix = 1;
10278 } else {
10279 pf->num_lan_msix = 2;
10280 }
10281 break;
10282 default:
10283 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10284 pf->num_iwarp_msix = min_t(int, (vec / 3),
10285 iwarp_requested);
10286 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10287 I40E_DEFAULT_NUM_VMDQ_VSI);
10288 } else {
10289 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10290 I40E_DEFAULT_NUM_VMDQ_VSI);
10291 }
10292 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10293 pf->num_fdsb_msix = 1;
10294 vec--;
10295 }
10296 pf->num_lan_msix = min_t(int,
10297 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10298 pf->num_lan_msix);
10299 pf->num_lan_qps = pf->num_lan_msix;
10300 break;
10301 }
10302 }
10303
10304 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10305 (pf->num_fdsb_msix == 0)) {
10306 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10307 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10308 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10309 }
10310 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10311 (pf->num_vmdq_msix == 0)) {
10312 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10313 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10314 }
10315
10316 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10317 (pf->num_iwarp_msix == 0)) {
10318 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10319 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10320 }
10321 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10322 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10323 pf->num_lan_msix,
10324 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10325 pf->num_fdsb_msix,
10326 pf->num_iwarp_msix);
10327
10328 return v_actual;
10329 }
10330
10331 /**
10332 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10333 * @vsi: the VSI being configured
10334 * @v_idx: index of the vector in the vsi struct
10335 * @cpu: cpu to be used on affinity_mask
10336 *
10337 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10338 **/
10339 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10340 {
10341 struct i40e_q_vector *q_vector;
10342
10343 /* allocate q_vector */
10344 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10345 if (!q_vector)
10346 return -ENOMEM;
10347
10348 q_vector->vsi = vsi;
10349 q_vector->v_idx = v_idx;
10350 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10351
10352 if (vsi->netdev)
10353 netif_napi_add(vsi->netdev, &q_vector->napi,
10354 i40e_napi_poll, NAPI_POLL_WEIGHT);
10355
10356 q_vector->rx.latency_range = I40E_LOW_LATENCY;
10357 q_vector->tx.latency_range = I40E_LOW_LATENCY;
10358
10359 /* tie q_vector and vsi together */
10360 vsi->q_vectors[v_idx] = q_vector;
10361
10362 return 0;
10363 }
10364
10365 /**
10366 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10367 * @vsi: the VSI being configured
10368 *
10369 * We allocate one q_vector per queue interrupt. If allocation fails we
10370 * return -ENOMEM.
10371 **/
10372 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10373 {
10374 struct i40e_pf *pf = vsi->back;
10375 int err, v_idx, num_q_vectors, current_cpu;
10376
10377 /* if not MSIX, give the one vector only to the LAN VSI */
10378 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10379 num_q_vectors = vsi->num_q_vectors;
10380 else if (vsi == pf->vsi[pf->lan_vsi])
10381 num_q_vectors = 1;
10382 else
10383 return -EINVAL;
10384
10385 current_cpu = cpumask_first(cpu_online_mask);
10386
10387 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10388 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10389 if (err)
10390 goto err_out;
10391 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10392 if (unlikely(current_cpu >= nr_cpu_ids))
10393 current_cpu = cpumask_first(cpu_online_mask);
10394 }
10395
10396 return 0;
10397
10398 err_out:
10399 while (v_idx--)
10400 i40e_free_q_vector(vsi, v_idx);
10401
10402 return err;
10403 }
10404
10405 /**
10406 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10407 * @pf: board private structure to initialize
10408 **/
10409 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10410 {
10411 int vectors = 0;
10412 ssize_t size;
10413
10414 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10415 vectors = i40e_init_msix(pf);
10416 if (vectors < 0) {
10417 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10418 I40E_FLAG_IWARP_ENABLED |
10419 I40E_FLAG_RSS_ENABLED |
10420 I40E_FLAG_DCB_CAPABLE |
10421 I40E_FLAG_DCB_ENABLED |
10422 I40E_FLAG_SRIOV_ENABLED |
10423 I40E_FLAG_FD_SB_ENABLED |
10424 I40E_FLAG_FD_ATR_ENABLED |
10425 I40E_FLAG_VMDQ_ENABLED);
10426 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10427
10428 /* rework the queue expectations without MSIX */
10429 i40e_determine_queue_usage(pf);
10430 }
10431 }
10432
10433 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10434 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10435 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10436 vectors = pci_enable_msi(pf->pdev);
10437 if (vectors < 0) {
10438 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10439 vectors);
10440 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10441 }
10442 vectors = 1; /* one MSI or Legacy vector */
10443 }
10444
10445 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10446 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10447
10448 /* set up vector assignment tracking */
10449 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10450 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10451 if (!pf->irq_pile) {
10452 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
10453 return -ENOMEM;
10454 }
10455 pf->irq_pile->num_entries = vectors;
10456 pf->irq_pile->search_hint = 0;
10457
10458 /* track first vector for misc interrupts, ignore return */
10459 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10460
10461 return 0;
10462 }
10463
10464 /**
10465 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10466 * @pf: private board data structure
10467 *
10468 * Restore the interrupt scheme that was cleared when we suspended the
10469 * device. This should be called during resume to re-allocate the q_vectors
10470 * and reacquire IRQs.
10471 */
10472 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10473 {
10474 int err, i;
10475
10476 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10477 * scheme. We need to re-enabled them here in order to attempt to
10478 * re-acquire the MSI or MSI-X vectors
10479 */
10480 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10481
10482 err = i40e_init_interrupt_scheme(pf);
10483 if (err)
10484 return err;
10485
10486 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10487 * rings together again.
10488 */
10489 for (i = 0; i < pf->num_alloc_vsi; i++) {
10490 if (pf->vsi[i]) {
10491 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10492 if (err)
10493 goto err_unwind;
10494 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10495 }
10496 }
10497
10498 err = i40e_setup_misc_vector(pf);
10499 if (err)
10500 goto err_unwind;
10501
10502 return 0;
10503
10504 err_unwind:
10505 while (i--) {
10506 if (pf->vsi[i])
10507 i40e_vsi_free_q_vectors(pf->vsi[i]);
10508 }
10509
10510 return err;
10511 }
10512
10513 /**
10514 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10515 * @pf: board private structure
10516 *
10517 * This sets up the handler for MSIX 0, which is used to manage the
10518 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10519 * when in MSI or Legacy interrupt mode.
10520 **/
10521 static int i40e_setup_misc_vector(struct i40e_pf *pf)
10522 {
10523 struct i40e_hw *hw = &pf->hw;
10524 int err = 0;
10525
10526 /* Only request the IRQ once, the first time through. */
10527 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10528 err = request_irq(pf->msix_entries[0].vector,
10529 i40e_intr, 0, pf->int_name, pf);
10530 if (err) {
10531 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10532 dev_info(&pf->pdev->dev,
10533 "request_irq for %s failed: %d\n",
10534 pf->int_name, err);
10535 return -EFAULT;
10536 }
10537 }
10538
10539 i40e_enable_misc_int_causes(pf);
10540
10541 /* associate no queues to the misc vector */
10542 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10543 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
10544
10545 i40e_flush(hw);
10546
10547 i40e_irq_dynamic_enable_icr0(pf);
10548
10549 return err;
10550 }
10551
10552 /**
10553 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10554 * @vsi: Pointer to vsi structure
10555 * @seed: Buffter to store the hash keys
10556 * @lut: Buffer to store the lookup table entries
10557 * @lut_size: Size of buffer to store the lookup table entries
10558 *
10559 * Return 0 on success, negative on failure
10560 */
10561 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10562 u8 *lut, u16 lut_size)
10563 {
10564 struct i40e_pf *pf = vsi->back;
10565 struct i40e_hw *hw = &pf->hw;
10566 int ret = 0;
10567
10568 if (seed) {
10569 ret = i40e_aq_get_rss_key(hw, vsi->id,
10570 (struct i40e_aqc_get_set_rss_key_data *)seed);
10571 if (ret) {
10572 dev_info(&pf->pdev->dev,
10573 "Cannot get RSS key, err %s aq_err %s\n",
10574 i40e_stat_str(&pf->hw, ret),
10575 i40e_aq_str(&pf->hw,
10576 pf->hw.aq.asq_last_status));
10577 return ret;
10578 }
10579 }
10580
10581 if (lut) {
10582 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10583
10584 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10585 if (ret) {
10586 dev_info(&pf->pdev->dev,
10587 "Cannot get RSS lut, err %s aq_err %s\n",
10588 i40e_stat_str(&pf->hw, ret),
10589 i40e_aq_str(&pf->hw,
10590 pf->hw.aq.asq_last_status));
10591 return ret;
10592 }
10593 }
10594
10595 return ret;
10596 }
10597
10598 /**
10599 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10600 * @vsi: Pointer to vsi structure
10601 * @seed: RSS hash seed
10602 * @lut: Lookup table
10603 * @lut_size: Lookup table size
10604 *
10605 * Returns 0 on success, negative on failure
10606 **/
10607 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10608 const u8 *lut, u16 lut_size)
10609 {
10610 struct i40e_pf *pf = vsi->back;
10611 struct i40e_hw *hw = &pf->hw;
10612 u16 vf_id = vsi->vf_id;
10613 u8 i;
10614
10615 /* Fill out hash function seed */
10616 if (seed) {
10617 u32 *seed_dw = (u32 *)seed;
10618
10619 if (vsi->type == I40E_VSI_MAIN) {
10620 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10621 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10622 } else if (vsi->type == I40E_VSI_SRIOV) {
10623 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10624 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10625 } else {
10626 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10627 }
10628 }
10629
10630 if (lut) {
10631 u32 *lut_dw = (u32 *)lut;
10632
10633 if (vsi->type == I40E_VSI_MAIN) {
10634 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10635 return -EINVAL;
10636 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10637 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10638 } else if (vsi->type == I40E_VSI_SRIOV) {
10639 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10640 return -EINVAL;
10641 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10642 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10643 } else {
10644 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10645 }
10646 }
10647 i40e_flush(hw);
10648
10649 return 0;
10650 }
10651
10652 /**
10653 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10654 * @vsi: Pointer to VSI structure
10655 * @seed: Buffer to store the keys
10656 * @lut: Buffer to store the lookup table entries
10657 * @lut_size: Size of buffer to store the lookup table entries
10658 *
10659 * Returns 0 on success, negative on failure
10660 */
10661 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10662 u8 *lut, u16 lut_size)
10663 {
10664 struct i40e_pf *pf = vsi->back;
10665 struct i40e_hw *hw = &pf->hw;
10666 u16 i;
10667
10668 if (seed) {
10669 u32 *seed_dw = (u32 *)seed;
10670
10671 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10672 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
10673 }
10674 if (lut) {
10675 u32 *lut_dw = (u32 *)lut;
10676
10677 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10678 return -EINVAL;
10679 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10680 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
10681 }
10682
10683 return 0;
10684 }
10685
10686 /**
10687 * i40e_config_rss - Configure RSS keys and lut
10688 * @vsi: Pointer to VSI structure
10689 * @seed: RSS hash seed
10690 * @lut: Lookup table
10691 * @lut_size: Lookup table size
10692 *
10693 * Returns 0 on success, negative on failure
10694 */
10695 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10696 {
10697 struct i40e_pf *pf = vsi->back;
10698
10699 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10700 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
10701 else
10702 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
10703 }
10704
10705 /**
10706 * i40e_get_rss - Get RSS keys and lut
10707 * @vsi: Pointer to VSI structure
10708 * @seed: Buffer to store the keys
10709 * @lut: Buffer to store the lookup table entries
10710 * lut_size: Size of buffer to store the lookup table entries
10711 *
10712 * Returns 0 on success, negative on failure
10713 */
10714 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10715 {
10716 struct i40e_pf *pf = vsi->back;
10717
10718 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10719 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
10720 else
10721 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
10722 }
10723
10724 /**
10725 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10726 * @pf: Pointer to board private structure
10727 * @lut: Lookup table
10728 * @rss_table_size: Lookup table size
10729 * @rss_size: Range of queue number for hashing
10730 */
10731 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
10732 u16 rss_table_size, u16 rss_size)
10733 {
10734 u16 i;
10735
10736 for (i = 0; i < rss_table_size; i++)
10737 lut[i] = i % rss_size;
10738 }
10739
10740 /**
10741 * i40e_pf_config_rss - Prepare for RSS if used
10742 * @pf: board private structure
10743 **/
10744 static int i40e_pf_config_rss(struct i40e_pf *pf)
10745 {
10746 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10747 u8 seed[I40E_HKEY_ARRAY_SIZE];
10748 u8 *lut;
10749 struct i40e_hw *hw = &pf->hw;
10750 u32 reg_val;
10751 u64 hena;
10752 int ret;
10753
10754 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10755 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
10756 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
10757 hena |= i40e_pf_get_default_rss_hena(pf);
10758
10759 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
10760 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
10761
10762 /* Determine the RSS table size based on the hardware capabilities */
10763 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
10764 reg_val = (pf->rss_table_size == 512) ?
10765 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
10766 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
10767 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
10768
10769 /* Determine the RSS size of the VSI */
10770 if (!vsi->rss_size) {
10771 u16 qcount;
10772
10773 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
10774 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
10775 }
10776 if (!vsi->rss_size)
10777 return -EINVAL;
10778
10779 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
10780 if (!lut)
10781 return -ENOMEM;
10782
10783 /* Use user configured lut if there is one, otherwise use default */
10784 if (vsi->rss_lut_user)
10785 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
10786 else
10787 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
10788
10789 /* Use user configured hash key if there is one, otherwise
10790 * use default.
10791 */
10792 if (vsi->rss_hkey_user)
10793 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
10794 else
10795 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
10796 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
10797 kfree(lut);
10798
10799 return ret;
10800 }
10801
10802 /**
10803 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
10804 * @pf: board private structure
10805 * @queue_count: the requested queue count for rss.
10806 *
10807 * returns 0 if rss is not enabled, if enabled returns the final rss queue
10808 * count which may be different from the requested queue count.
10809 * Note: expects to be called while under rtnl_lock()
10810 **/
10811 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
10812 {
10813 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10814 int new_rss_size;
10815
10816 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
10817 return 0;
10818
10819 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
10820
10821 if (queue_count != vsi->num_queue_pairs) {
10822 u16 qcount;
10823
10824 vsi->req_queue_pairs = queue_count;
10825 i40e_prep_for_reset(pf, true);
10826
10827 pf->alloc_rss_size = new_rss_size;
10828
10829 i40e_reset_and_rebuild(pf, true, true);
10830
10831 /* Discard the user configured hash keys and lut, if less
10832 * queues are enabled.
10833 */
10834 if (queue_count < vsi->rss_size) {
10835 i40e_clear_rss_config_user(vsi);
10836 dev_dbg(&pf->pdev->dev,
10837 "discard user configured hash keys and lut\n");
10838 }
10839
10840 /* Reset vsi->rss_size, as number of enabled queues changed */
10841 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
10842 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
10843
10844 i40e_pf_config_rss(pf);
10845 }
10846 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
10847 vsi->req_queue_pairs, pf->rss_size_max);
10848 return pf->alloc_rss_size;
10849 }
10850
10851 /**
10852 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
10853 * @pf: board private structure
10854 **/
10855 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
10856 {
10857 i40e_status status;
10858 bool min_valid, max_valid;
10859 u32 max_bw, min_bw;
10860
10861 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
10862 &min_valid, &max_valid);
10863
10864 if (!status) {
10865 if (min_valid)
10866 pf->min_bw = min_bw;
10867 if (max_valid)
10868 pf->max_bw = max_bw;
10869 }
10870
10871 return status;
10872 }
10873
10874 /**
10875 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
10876 * @pf: board private structure
10877 **/
10878 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
10879 {
10880 struct i40e_aqc_configure_partition_bw_data bw_data;
10881 i40e_status status;
10882
10883 /* Set the valid bit for this PF */
10884 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
10885 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
10886 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
10887
10888 /* Set the new bandwidths */
10889 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
10890
10891 return status;
10892 }
10893
10894 /**
10895 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
10896 * @pf: board private structure
10897 **/
10898 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
10899 {
10900 /* Commit temporary BW setting to permanent NVM image */
10901 enum i40e_admin_queue_err last_aq_status;
10902 i40e_status ret;
10903 u16 nvm_word;
10904
10905 if (pf->hw.partition_id != 1) {
10906 dev_info(&pf->pdev->dev,
10907 "Commit BW only works on partition 1! This is partition %d",
10908 pf->hw.partition_id);
10909 ret = I40E_NOT_SUPPORTED;
10910 goto bw_commit_out;
10911 }
10912
10913 /* Acquire NVM for read access */
10914 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
10915 last_aq_status = pf->hw.aq.asq_last_status;
10916 if (ret) {
10917 dev_info(&pf->pdev->dev,
10918 "Cannot acquire NVM for read access, err %s aq_err %s\n",
10919 i40e_stat_str(&pf->hw, ret),
10920 i40e_aq_str(&pf->hw, last_aq_status));
10921 goto bw_commit_out;
10922 }
10923
10924 /* Read word 0x10 of NVM - SW compatibility word 1 */
10925 ret = i40e_aq_read_nvm(&pf->hw,
10926 I40E_SR_NVM_CONTROL_WORD,
10927 0x10, sizeof(nvm_word), &nvm_word,
10928 false, NULL);
10929 /* Save off last admin queue command status before releasing
10930 * the NVM
10931 */
10932 last_aq_status = pf->hw.aq.asq_last_status;
10933 i40e_release_nvm(&pf->hw);
10934 if (ret) {
10935 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
10936 i40e_stat_str(&pf->hw, ret),
10937 i40e_aq_str(&pf->hw, last_aq_status));
10938 goto bw_commit_out;
10939 }
10940
10941 /* Wait a bit for NVM release to complete */
10942 msleep(50);
10943
10944 /* Acquire NVM for write access */
10945 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
10946 last_aq_status = pf->hw.aq.asq_last_status;
10947 if (ret) {
10948 dev_info(&pf->pdev->dev,
10949 "Cannot acquire NVM for write access, err %s aq_err %s\n",
10950 i40e_stat_str(&pf->hw, ret),
10951 i40e_aq_str(&pf->hw, last_aq_status));
10952 goto bw_commit_out;
10953 }
10954 /* Write it back out unchanged to initiate update NVM,
10955 * which will force a write of the shadow (alt) RAM to
10956 * the NVM - thus storing the bandwidth values permanently.
10957 */
10958 ret = i40e_aq_update_nvm(&pf->hw,
10959 I40E_SR_NVM_CONTROL_WORD,
10960 0x10, sizeof(nvm_word),
10961 &nvm_word, true, NULL);
10962 /* Save off last admin queue command status before releasing
10963 * the NVM
10964 */
10965 last_aq_status = pf->hw.aq.asq_last_status;
10966 i40e_release_nvm(&pf->hw);
10967 if (ret)
10968 dev_info(&pf->pdev->dev,
10969 "BW settings NOT SAVED, err %s aq_err %s\n",
10970 i40e_stat_str(&pf->hw, ret),
10971 i40e_aq_str(&pf->hw, last_aq_status));
10972 bw_commit_out:
10973
10974 return ret;
10975 }
10976
10977 /**
10978 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
10979 * @pf: board private structure to initialize
10980 *
10981 * i40e_sw_init initializes the Adapter private data structure.
10982 * Fields are initialized based on PCI device information and
10983 * OS network device settings (MTU size).
10984 **/
10985 static int i40e_sw_init(struct i40e_pf *pf)
10986 {
10987 int err = 0;
10988 int size;
10989
10990 /* Set default capability flags */
10991 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
10992 I40E_FLAG_MSI_ENABLED |
10993 I40E_FLAG_MSIX_ENABLED;
10994
10995 /* Set default ITR */
10996 pf->rx_itr_default = I40E_ITR_RX_DEF;
10997 pf->tx_itr_default = I40E_ITR_TX_DEF;
10998
10999 /* Depending on PF configurations, it is possible that the RSS
11000 * maximum might end up larger than the available queues
11001 */
11002 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11003 pf->alloc_rss_size = 1;
11004 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11005 pf->rss_size_max = min_t(int, pf->rss_size_max,
11006 pf->hw.func_caps.num_tx_qp);
11007 if (pf->hw.func_caps.rss) {
11008 pf->flags |= I40E_FLAG_RSS_ENABLED;
11009 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11010 num_online_cpus());
11011 }
11012
11013 /* MFP mode enabled */
11014 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11015 pf->flags |= I40E_FLAG_MFP_ENABLED;
11016 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11017 if (i40e_get_partition_bw_setting(pf)) {
11018 dev_warn(&pf->pdev->dev,
11019 "Could not get partition bw settings\n");
11020 } else {
11021 dev_info(&pf->pdev->dev,
11022 "Partition BW Min = %8.8x, Max = %8.8x\n",
11023 pf->min_bw, pf->max_bw);
11024
11025 /* nudge the Tx scheduler */
11026 i40e_set_partition_bw_setting(pf);
11027 }
11028 }
11029
11030 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11031 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11032 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11033 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11034 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11035 pf->hw.num_partitions > 1)
11036 dev_info(&pf->pdev->dev,
11037 "Flow Director Sideband mode Disabled in MFP mode\n");
11038 else
11039 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11040 pf->fdir_pf_filter_count =
11041 pf->hw.func_caps.fd_filters_guaranteed;
11042 pf->hw.fdir_shared_filter_count =
11043 pf->hw.func_caps.fd_filters_best_effort;
11044 }
11045
11046 if (pf->hw.mac.type == I40E_MAC_X722) {
11047 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11048 I40E_HW_128_QP_RSS_CAPABLE |
11049 I40E_HW_ATR_EVICT_CAPABLE |
11050 I40E_HW_WB_ON_ITR_CAPABLE |
11051 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11052 I40E_HW_NO_PCI_LINK_CHECK |
11053 I40E_HW_USE_SET_LLDP_MIB |
11054 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11055 I40E_HW_PTP_L4_CAPABLE |
11056 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11057 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11058
11059 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11060 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11061 I40E_FDEVICT_PCTYPE_DEFAULT) {
11062 dev_warn(&pf->pdev->dev,
11063 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11064 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11065 }
11066 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11067 ((pf->hw.aq.api_maj_ver == 1) &&
11068 (pf->hw.aq.api_min_ver > 4))) {
11069 /* Supported in FW API version higher than 1.4 */
11070 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11071 }
11072
11073 /* Enable HW ATR eviction if possible */
11074 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11075 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11076
11077 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11078 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11079 (pf->hw.aq.fw_maj_ver < 4))) {
11080 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11081 /* No DCB support for FW < v4.33 */
11082 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11083 }
11084
11085 /* Disable FW LLDP if FW < v4.3 */
11086 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11087 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11088 (pf->hw.aq.fw_maj_ver < 4)))
11089 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11090
11091 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11092 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11093 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11094 (pf->hw.aq.fw_maj_ver >= 5)))
11095 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11096
11097 /* Enable PTP L4 if FW > v6.0 */
11098 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11099 pf->hw.aq.fw_maj_ver >= 6)
11100 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11101
11102 if (pf->hw.func_caps.vmdq) {
11103 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11104 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11105 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11106 }
11107
11108 if (pf->hw.func_caps.iwarp) {
11109 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11110 /* IWARP needs one extra vector for CQP just like MISC.*/
11111 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11112 }
11113
11114 #ifdef CONFIG_PCI_IOV
11115 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11116 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11117 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11118 pf->num_req_vfs = min_t(int,
11119 pf->hw.func_caps.num_vfs,
11120 I40E_MAX_VF_COUNT);
11121 }
11122 #endif /* CONFIG_PCI_IOV */
11123 pf->eeprom_version = 0xDEAD;
11124 pf->lan_veb = I40E_NO_VEB;
11125 pf->lan_vsi = I40E_NO_VSI;
11126
11127 /* By default FW has this off for performance reasons */
11128 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11129
11130 /* set up queue assignment tracking */
11131 size = sizeof(struct i40e_lump_tracking)
11132 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11133 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11134 if (!pf->qp_pile) {
11135 err = -ENOMEM;
11136 goto sw_init_done;
11137 }
11138 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11139 pf->qp_pile->search_hint = 0;
11140
11141 pf->tx_timeout_recovery_level = 1;
11142
11143 mutex_init(&pf->switch_mutex);
11144
11145 sw_init_done:
11146 return err;
11147 }
11148
11149 /**
11150 * i40e_set_ntuple - set the ntuple feature flag and take action
11151 * @pf: board private structure to initialize
11152 * @features: the feature set that the stack is suggesting
11153 *
11154 * returns a bool to indicate if reset needs to happen
11155 **/
11156 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11157 {
11158 bool need_reset = false;
11159
11160 /* Check if Flow Director n-tuple support was enabled or disabled. If
11161 * the state changed, we need to reset.
11162 */
11163 if (features & NETIF_F_NTUPLE) {
11164 /* Enable filters and mark for reset */
11165 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11166 need_reset = true;
11167 /* enable FD_SB only if there is MSI-X vector and no cloud
11168 * filters exist
11169 */
11170 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11171 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11172 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11173 }
11174 } else {
11175 /* turn off filters, mark for reset and clear SW filter list */
11176 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11177 need_reset = true;
11178 i40e_fdir_filter_exit(pf);
11179 }
11180 pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
11181 I40E_FLAG_FD_SB_AUTO_DISABLED);
11182 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11183
11184 /* reset fd counters */
11185 pf->fd_add_err = 0;
11186 pf->fd_atr_cnt = 0;
11187 /* if ATR was auto disabled it can be re-enabled. */
11188 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) {
11189 pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED;
11190 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11191 (I40E_DEBUG_FD & pf->hw.debug_mask))
11192 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11193 }
11194 }
11195 return need_reset;
11196 }
11197
11198 /**
11199 * i40e_clear_rss_lut - clear the rx hash lookup table
11200 * @vsi: the VSI being configured
11201 **/
11202 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11203 {
11204 struct i40e_pf *pf = vsi->back;
11205 struct i40e_hw *hw = &pf->hw;
11206 u16 vf_id = vsi->vf_id;
11207 u8 i;
11208
11209 if (vsi->type == I40E_VSI_MAIN) {
11210 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11211 wr32(hw, I40E_PFQF_HLUT(i), 0);
11212 } else if (vsi->type == I40E_VSI_SRIOV) {
11213 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11214 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11215 } else {
11216 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11217 }
11218 }
11219
11220 /**
11221 * i40e_set_features - set the netdev feature flags
11222 * @netdev: ptr to the netdev being adjusted
11223 * @features: the feature set that the stack is suggesting
11224 * Note: expects to be called while under rtnl_lock()
11225 **/
11226 static int i40e_set_features(struct net_device *netdev,
11227 netdev_features_t features)
11228 {
11229 struct i40e_netdev_priv *np = netdev_priv(netdev);
11230 struct i40e_vsi *vsi = np->vsi;
11231 struct i40e_pf *pf = vsi->back;
11232 bool need_reset;
11233
11234 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11235 i40e_pf_config_rss(pf);
11236 else if (!(features & NETIF_F_RXHASH) &&
11237 netdev->features & NETIF_F_RXHASH)
11238 i40e_clear_rss_lut(vsi);
11239
11240 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11241 i40e_vlan_stripping_enable(vsi);
11242 else
11243 i40e_vlan_stripping_disable(vsi);
11244
11245 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11246 dev_err(&pf->pdev->dev,
11247 "Offloaded tc filters active, can't turn hw_tc_offload off");
11248 return -EINVAL;
11249 }
11250
11251 need_reset = i40e_set_ntuple(pf, features);
11252
11253 if (need_reset)
11254 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11255
11256 return 0;
11257 }
11258
11259 /**
11260 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11261 * @pf: board private structure
11262 * @port: The UDP port to look up
11263 *
11264 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11265 **/
11266 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11267 {
11268 u8 i;
11269
11270 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11271 if (pf->udp_ports[i].port == port)
11272 return i;
11273 }
11274
11275 return i;
11276 }
11277
11278 /**
11279 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11280 * @netdev: This physical port's netdev
11281 * @ti: Tunnel endpoint information
11282 **/
11283 static void i40e_udp_tunnel_add(struct net_device *netdev,
11284 struct udp_tunnel_info *ti)
11285 {
11286 struct i40e_netdev_priv *np = netdev_priv(netdev);
11287 struct i40e_vsi *vsi = np->vsi;
11288 struct i40e_pf *pf = vsi->back;
11289 u16 port = ntohs(ti->port);
11290 u8 next_idx;
11291 u8 idx;
11292
11293 idx = i40e_get_udp_port_idx(pf, port);
11294
11295 /* Check if port already exists */
11296 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11297 netdev_info(netdev, "port %d already offloaded\n", port);
11298 return;
11299 }
11300
11301 /* Now check if there is space to add the new port */
11302 next_idx = i40e_get_udp_port_idx(pf, 0);
11303
11304 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11305 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11306 port);
11307 return;
11308 }
11309
11310 switch (ti->type) {
11311 case UDP_TUNNEL_TYPE_VXLAN:
11312 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11313 break;
11314 case UDP_TUNNEL_TYPE_GENEVE:
11315 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11316 return;
11317 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11318 break;
11319 default:
11320 return;
11321 }
11322
11323 /* New port: add it and mark its index in the bitmap */
11324 pf->udp_ports[next_idx].port = port;
11325 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11326 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
11327 }
11328
11329 /**
11330 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11331 * @netdev: This physical port's netdev
11332 * @ti: Tunnel endpoint information
11333 **/
11334 static void i40e_udp_tunnel_del(struct net_device *netdev,
11335 struct udp_tunnel_info *ti)
11336 {
11337 struct i40e_netdev_priv *np = netdev_priv(netdev);
11338 struct i40e_vsi *vsi = np->vsi;
11339 struct i40e_pf *pf = vsi->back;
11340 u16 port = ntohs(ti->port);
11341 u8 idx;
11342
11343 idx = i40e_get_udp_port_idx(pf, port);
11344
11345 /* Check if port already exists */
11346 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11347 goto not_found;
11348
11349 switch (ti->type) {
11350 case UDP_TUNNEL_TYPE_VXLAN:
11351 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11352 goto not_found;
11353 break;
11354 case UDP_TUNNEL_TYPE_GENEVE:
11355 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11356 goto not_found;
11357 break;
11358 default:
11359 goto not_found;
11360 }
11361
11362 /* if port exists, set it to 0 (mark for deletion)
11363 * and make it pending
11364 */
11365 pf->udp_ports[idx].port = 0;
11366 pf->pending_udp_bitmap |= BIT_ULL(idx);
11367 pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
11368
11369 return;
11370 not_found:
11371 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11372 port);
11373 }
11374
11375 static int i40e_get_phys_port_id(struct net_device *netdev,
11376 struct netdev_phys_item_id *ppid)
11377 {
11378 struct i40e_netdev_priv *np = netdev_priv(netdev);
11379 struct i40e_pf *pf = np->vsi->back;
11380 struct i40e_hw *hw = &pf->hw;
11381
11382 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11383 return -EOPNOTSUPP;
11384
11385 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11386 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11387
11388 return 0;
11389 }
11390
11391 /**
11392 * i40e_ndo_fdb_add - add an entry to the hardware database
11393 * @ndm: the input from the stack
11394 * @tb: pointer to array of nladdr (unused)
11395 * @dev: the net device pointer
11396 * @addr: the MAC address entry being added
11397 * @flags: instructions from stack about fdb operation
11398 */
11399 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11400 struct net_device *dev,
11401 const unsigned char *addr, u16 vid,
11402 u16 flags)
11403 {
11404 struct i40e_netdev_priv *np = netdev_priv(dev);
11405 struct i40e_pf *pf = np->vsi->back;
11406 int err = 0;
11407
11408 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11409 return -EOPNOTSUPP;
11410
11411 if (vid) {
11412 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11413 return -EINVAL;
11414 }
11415
11416 /* Hardware does not support aging addresses so if a
11417 * ndm_state is given only allow permanent addresses
11418 */
11419 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11420 netdev_info(dev, "FDB only supports static addresses\n");
11421 return -EINVAL;
11422 }
11423
11424 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11425 err = dev_uc_add_excl(dev, addr);
11426 else if (is_multicast_ether_addr(addr))
11427 err = dev_mc_add_excl(dev, addr);
11428 else
11429 err = -EINVAL;
11430
11431 /* Only return duplicate errors if NLM_F_EXCL is set */
11432 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11433 err = 0;
11434
11435 return err;
11436 }
11437
11438 /**
11439 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11440 * @dev: the netdev being configured
11441 * @nlh: RTNL message
11442 *
11443 * Inserts a new hardware bridge if not already created and
11444 * enables the bridging mode requested (VEB or VEPA). If the
11445 * hardware bridge has already been inserted and the request
11446 * is to change the mode then that requires a PF reset to
11447 * allow rebuild of the components with required hardware
11448 * bridge mode enabled.
11449 *
11450 * Note: expects to be called while under rtnl_lock()
11451 **/
11452 static int i40e_ndo_bridge_setlink(struct net_device *dev,
11453 struct nlmsghdr *nlh,
11454 u16 flags)
11455 {
11456 struct i40e_netdev_priv *np = netdev_priv(dev);
11457 struct i40e_vsi *vsi = np->vsi;
11458 struct i40e_pf *pf = vsi->back;
11459 struct i40e_veb *veb = NULL;
11460 struct nlattr *attr, *br_spec;
11461 int i, rem;
11462
11463 /* Only for PF VSI for now */
11464 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11465 return -EOPNOTSUPP;
11466
11467 /* Find the HW bridge for PF VSI */
11468 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11469 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11470 veb = pf->veb[i];
11471 }
11472
11473 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11474
11475 nla_for_each_nested(attr, br_spec, rem) {
11476 __u16 mode;
11477
11478 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11479 continue;
11480
11481 mode = nla_get_u16(attr);
11482 if ((mode != BRIDGE_MODE_VEPA) &&
11483 (mode != BRIDGE_MODE_VEB))
11484 return -EINVAL;
11485
11486 /* Insert a new HW bridge */
11487 if (!veb) {
11488 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11489 vsi->tc_config.enabled_tc);
11490 if (veb) {
11491 veb->bridge_mode = mode;
11492 i40e_config_bridge_mode(veb);
11493 } else {
11494 /* No Bridge HW offload available */
11495 return -ENOENT;
11496 }
11497 break;
11498 } else if (mode != veb->bridge_mode) {
11499 /* Existing HW bridge but different mode needs reset */
11500 veb->bridge_mode = mode;
11501 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11502 if (mode == BRIDGE_MODE_VEB)
11503 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11504 else
11505 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11506 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11507 break;
11508 }
11509 }
11510
11511 return 0;
11512 }
11513
11514 /**
11515 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11516 * @skb: skb buff
11517 * @pid: process id
11518 * @seq: RTNL message seq #
11519 * @dev: the netdev being configured
11520 * @filter_mask: unused
11521 * @nlflags: netlink flags passed in
11522 *
11523 * Return the mode in which the hardware bridge is operating in
11524 * i.e VEB or VEPA.
11525 **/
11526 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11527 struct net_device *dev,
11528 u32 __always_unused filter_mask,
11529 int nlflags)
11530 {
11531 struct i40e_netdev_priv *np = netdev_priv(dev);
11532 struct i40e_vsi *vsi = np->vsi;
11533 struct i40e_pf *pf = vsi->back;
11534 struct i40e_veb *veb = NULL;
11535 int i;
11536
11537 /* Only for PF VSI for now */
11538 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11539 return -EOPNOTSUPP;
11540
11541 /* Find the HW bridge for the PF VSI */
11542 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11543 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11544 veb = pf->veb[i];
11545 }
11546
11547 if (!veb)
11548 return 0;
11549
11550 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11551 0, 0, nlflags, filter_mask, NULL);
11552 }
11553
11554 /**
11555 * i40e_features_check - Validate encapsulated packet conforms to limits
11556 * @skb: skb buff
11557 * @dev: This physical port's netdev
11558 * @features: Offload features that the stack believes apply
11559 **/
11560 static netdev_features_t i40e_features_check(struct sk_buff *skb,
11561 struct net_device *dev,
11562 netdev_features_t features)
11563 {
11564 size_t len;
11565
11566 /* No point in doing any of this if neither checksum nor GSO are
11567 * being requested for this frame. We can rule out both by just
11568 * checking for CHECKSUM_PARTIAL
11569 */
11570 if (skb->ip_summed != CHECKSUM_PARTIAL)
11571 return features;
11572
11573 /* We cannot support GSO if the MSS is going to be less than
11574 * 64 bytes. If it is then we need to drop support for GSO.
11575 */
11576 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11577 features &= ~NETIF_F_GSO_MASK;
11578
11579 /* MACLEN can support at most 63 words */
11580 len = skb_network_header(skb) - skb->data;
11581 if (len & ~(63 * 2))
11582 goto out_err;
11583
11584 /* IPLEN and EIPLEN can support at most 127 dwords */
11585 len = skb_transport_header(skb) - skb_network_header(skb);
11586 if (len & ~(127 * 4))
11587 goto out_err;
11588
11589 if (skb->encapsulation) {
11590 /* L4TUNLEN can support 127 words */
11591 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11592 if (len & ~(127 * 2))
11593 goto out_err;
11594
11595 /* IPLEN can support at most 127 dwords */
11596 len = skb_inner_transport_header(skb) -
11597 skb_inner_network_header(skb);
11598 if (len & ~(127 * 4))
11599 goto out_err;
11600 }
11601
11602 /* No need to validate L4LEN as TCP is the only protocol with a
11603 * a flexible value and we support all possible values supported
11604 * by TCP, which is at most 15 dwords
11605 */
11606
11607 return features;
11608 out_err:
11609 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11610 }
11611
11612 /**
11613 * i40e_xdp_setup - add/remove an XDP program
11614 * @vsi: VSI to changed
11615 * @prog: XDP program
11616 **/
11617 static int i40e_xdp_setup(struct i40e_vsi *vsi,
11618 struct bpf_prog *prog)
11619 {
11620 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11621 struct i40e_pf *pf = vsi->back;
11622 struct bpf_prog *old_prog;
11623 bool need_reset;
11624 int i;
11625
11626 /* Don't allow frames that span over multiple buffers */
11627 if (frame_size > vsi->rx_buf_len)
11628 return -EINVAL;
11629
11630 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
11631 return 0;
11632
11633 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11634 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
11635
11636 if (need_reset)
11637 i40e_prep_for_reset(pf, true);
11638
11639 old_prog = xchg(&vsi->xdp_prog, prog);
11640
11641 if (need_reset)
11642 i40e_reset_and_rebuild(pf, true, true);
11643
11644 for (i = 0; i < vsi->num_queue_pairs; i++)
11645 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
11646
11647 if (old_prog)
11648 bpf_prog_put(old_prog);
11649
11650 return 0;
11651 }
11652
11653 /**
11654 * i40e_xdp - implements ndo_bpf for i40e
11655 * @dev: netdevice
11656 * @xdp: XDP command
11657 **/
11658 static int i40e_xdp(struct net_device *dev,
11659 struct netdev_bpf *xdp)
11660 {
11661 struct i40e_netdev_priv *np = netdev_priv(dev);
11662 struct i40e_vsi *vsi = np->vsi;
11663
11664 if (vsi->type != I40E_VSI_MAIN)
11665 return -EINVAL;
11666
11667 switch (xdp->command) {
11668 case XDP_SETUP_PROG:
11669 return i40e_xdp_setup(vsi, xdp->prog);
11670 case XDP_QUERY_PROG:
11671 xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
11672 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
11673 return 0;
11674 default:
11675 return -EINVAL;
11676 }
11677 }
11678
11679 static const struct net_device_ops i40e_netdev_ops = {
11680 .ndo_open = i40e_open,
11681 .ndo_stop = i40e_close,
11682 .ndo_start_xmit = i40e_lan_xmit_frame,
11683 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
11684 .ndo_set_rx_mode = i40e_set_rx_mode,
11685 .ndo_validate_addr = eth_validate_addr,
11686 .ndo_set_mac_address = i40e_set_mac,
11687 .ndo_change_mtu = i40e_change_mtu,
11688 .ndo_do_ioctl = i40e_ioctl,
11689 .ndo_tx_timeout = i40e_tx_timeout,
11690 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
11691 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
11692 #ifdef CONFIG_NET_POLL_CONTROLLER
11693 .ndo_poll_controller = i40e_netpoll,
11694 #endif
11695 .ndo_setup_tc = __i40e_setup_tc,
11696 .ndo_set_features = i40e_set_features,
11697 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
11698 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
11699 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
11700 .ndo_get_vf_config = i40e_ndo_get_vf_config,
11701 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
11702 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
11703 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
11704 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
11705 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
11706 .ndo_get_phys_port_id = i40e_get_phys_port_id,
11707 .ndo_fdb_add = i40e_ndo_fdb_add,
11708 .ndo_features_check = i40e_features_check,
11709 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
11710 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
11711 .ndo_bpf = i40e_xdp,
11712 };
11713
11714 /**
11715 * i40e_config_netdev - Setup the netdev flags
11716 * @vsi: the VSI being configured
11717 *
11718 * Returns 0 on success, negative value on failure
11719 **/
11720 static int i40e_config_netdev(struct i40e_vsi *vsi)
11721 {
11722 struct i40e_pf *pf = vsi->back;
11723 struct i40e_hw *hw = &pf->hw;
11724 struct i40e_netdev_priv *np;
11725 struct net_device *netdev;
11726 u8 broadcast[ETH_ALEN];
11727 u8 mac_addr[ETH_ALEN];
11728 int etherdev_size;
11729 netdev_features_t hw_enc_features;
11730 netdev_features_t hw_features;
11731
11732 etherdev_size = sizeof(struct i40e_netdev_priv);
11733 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
11734 if (!netdev)
11735 return -ENOMEM;
11736
11737 vsi->netdev = netdev;
11738 np = netdev_priv(netdev);
11739 np->vsi = vsi;
11740
11741 hw_enc_features = NETIF_F_SG |
11742 NETIF_F_IP_CSUM |
11743 NETIF_F_IPV6_CSUM |
11744 NETIF_F_HIGHDMA |
11745 NETIF_F_SOFT_FEATURES |
11746 NETIF_F_TSO |
11747 NETIF_F_TSO_ECN |
11748 NETIF_F_TSO6 |
11749 NETIF_F_GSO_GRE |
11750 NETIF_F_GSO_GRE_CSUM |
11751 NETIF_F_GSO_PARTIAL |
11752 NETIF_F_GSO_UDP_TUNNEL |
11753 NETIF_F_GSO_UDP_TUNNEL_CSUM |
11754 NETIF_F_SCTP_CRC |
11755 NETIF_F_RXHASH |
11756 NETIF_F_RXCSUM |
11757 0;
11758
11759 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
11760 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
11761
11762 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
11763
11764 netdev->hw_enc_features |= hw_enc_features;
11765
11766 /* record features VLANs can make use of */
11767 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
11768
11769 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
11770 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
11771
11772 hw_features = hw_enc_features |
11773 NETIF_F_HW_VLAN_CTAG_TX |
11774 NETIF_F_HW_VLAN_CTAG_RX;
11775
11776 netdev->hw_features |= hw_features;
11777
11778 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
11779 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
11780
11781 if (vsi->type == I40E_VSI_MAIN) {
11782 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
11783 ether_addr_copy(mac_addr, hw->mac.perm_addr);
11784 /* The following steps are necessary for two reasons. First,
11785 * some older NVM configurations load a default MAC-VLAN
11786 * filter that will accept any tagged packet, and we want to
11787 * replace this with a normal filter. Additionally, it is
11788 * possible our MAC address was provided by the platform using
11789 * Open Firmware or similar.
11790 *
11791 * Thus, we need to remove the default filter and install one
11792 * specific to the MAC address.
11793 */
11794 i40e_rm_default_mac_filter(vsi, mac_addr);
11795 spin_lock_bh(&vsi->mac_filter_hash_lock);
11796 i40e_add_mac_filter(vsi, mac_addr);
11797 spin_unlock_bh(&vsi->mac_filter_hash_lock);
11798 } else {
11799 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
11800 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
11801 * the end, which is 4 bytes long, so force truncation of the
11802 * original name by IFNAMSIZ - 4
11803 */
11804 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
11805 IFNAMSIZ - 4,
11806 pf->vsi[pf->lan_vsi]->netdev->name);
11807 random_ether_addr(mac_addr);
11808
11809 spin_lock_bh(&vsi->mac_filter_hash_lock);
11810 i40e_add_mac_filter(vsi, mac_addr);
11811 spin_unlock_bh(&vsi->mac_filter_hash_lock);
11812 }
11813
11814 /* Add the broadcast filter so that we initially will receive
11815 * broadcast packets. Note that when a new VLAN is first added the
11816 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
11817 * specific filters as part of transitioning into "vlan" operation.
11818 * When more VLANs are added, the driver will copy each existing MAC
11819 * filter and add it for the new VLAN.
11820 *
11821 * Broadcast filters are handled specially by
11822 * i40e_sync_filters_subtask, as the driver must to set the broadcast
11823 * promiscuous bit instead of adding this directly as a MAC/VLAN
11824 * filter. The subtask will update the correct broadcast promiscuous
11825 * bits as VLANs become active or inactive.
11826 */
11827 eth_broadcast_addr(broadcast);
11828 spin_lock_bh(&vsi->mac_filter_hash_lock);
11829 i40e_add_mac_filter(vsi, broadcast);
11830 spin_unlock_bh(&vsi->mac_filter_hash_lock);
11831
11832 ether_addr_copy(netdev->dev_addr, mac_addr);
11833 ether_addr_copy(netdev->perm_addr, mac_addr);
11834
11835 netdev->priv_flags |= IFF_UNICAST_FLT;
11836 netdev->priv_flags |= IFF_SUPP_NOFCS;
11837 /* Setup netdev TC information */
11838 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
11839
11840 netdev->netdev_ops = &i40e_netdev_ops;
11841 netdev->watchdog_timeo = 5 * HZ;
11842 i40e_set_ethtool_ops(netdev);
11843
11844 /* MTU range: 68 - 9706 */
11845 netdev->min_mtu = ETH_MIN_MTU;
11846 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
11847
11848 return 0;
11849 }
11850
11851 /**
11852 * i40e_vsi_delete - Delete a VSI from the switch
11853 * @vsi: the VSI being removed
11854 *
11855 * Returns 0 on success, negative value on failure
11856 **/
11857 static void i40e_vsi_delete(struct i40e_vsi *vsi)
11858 {
11859 /* remove default VSI is not allowed */
11860 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
11861 return;
11862
11863 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
11864 }
11865
11866 /**
11867 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
11868 * @vsi: the VSI being queried
11869 *
11870 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
11871 **/
11872 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
11873 {
11874 struct i40e_veb *veb;
11875 struct i40e_pf *pf = vsi->back;
11876
11877 /* Uplink is not a bridge so default to VEB */
11878 if (vsi->veb_idx == I40E_NO_VEB)
11879 return 1;
11880
11881 veb = pf->veb[vsi->veb_idx];
11882 if (!veb) {
11883 dev_info(&pf->pdev->dev,
11884 "There is no veb associated with the bridge\n");
11885 return -ENOENT;
11886 }
11887
11888 /* Uplink is a bridge in VEPA mode */
11889 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
11890 return 0;
11891 } else {
11892 /* Uplink is a bridge in VEB mode */
11893 return 1;
11894 }
11895
11896 /* VEPA is now default bridge, so return 0 */
11897 return 0;
11898 }
11899
11900 /**
11901 * i40e_add_vsi - Add a VSI to the switch
11902 * @vsi: the VSI being configured
11903 *
11904 * This initializes a VSI context depending on the VSI type to be added and
11905 * passes it down to the add_vsi aq command.
11906 **/
11907 static int i40e_add_vsi(struct i40e_vsi *vsi)
11908 {
11909 int ret = -ENODEV;
11910 struct i40e_pf *pf = vsi->back;
11911 struct i40e_hw *hw = &pf->hw;
11912 struct i40e_vsi_context ctxt;
11913 struct i40e_mac_filter *f;
11914 struct hlist_node *h;
11915 int bkt;
11916
11917 u8 enabled_tc = 0x1; /* TC0 enabled */
11918 int f_count = 0;
11919
11920 memset(&ctxt, 0, sizeof(ctxt));
11921 switch (vsi->type) {
11922 case I40E_VSI_MAIN:
11923 /* The PF's main VSI is already setup as part of the
11924 * device initialization, so we'll not bother with
11925 * the add_vsi call, but we will retrieve the current
11926 * VSI context.
11927 */
11928 ctxt.seid = pf->main_vsi_seid;
11929 ctxt.pf_num = pf->hw.pf_id;
11930 ctxt.vf_num = 0;
11931 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
11932 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
11933 if (ret) {
11934 dev_info(&pf->pdev->dev,
11935 "couldn't get PF vsi config, err %s aq_err %s\n",
11936 i40e_stat_str(&pf->hw, ret),
11937 i40e_aq_str(&pf->hw,
11938 pf->hw.aq.asq_last_status));
11939 return -ENOENT;
11940 }
11941 vsi->info = ctxt.info;
11942 vsi->info.valid_sections = 0;
11943
11944 vsi->seid = ctxt.seid;
11945 vsi->id = ctxt.vsi_number;
11946
11947 enabled_tc = i40e_pf_get_tc_map(pf);
11948
11949 /* Source pruning is enabled by default, so the flag is
11950 * negative logic - if it's set, we need to fiddle with
11951 * the VSI to disable source pruning.
11952 */
11953 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
11954 memset(&ctxt, 0, sizeof(ctxt));
11955 ctxt.seid = pf->main_vsi_seid;
11956 ctxt.pf_num = pf->hw.pf_id;
11957 ctxt.vf_num = 0;
11958 ctxt.info.valid_sections |=
11959 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
11960 ctxt.info.switch_id =
11961 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
11962 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11963 if (ret) {
11964 dev_info(&pf->pdev->dev,
11965 "update vsi failed, err %s aq_err %s\n",
11966 i40e_stat_str(&pf->hw, ret),
11967 i40e_aq_str(&pf->hw,
11968 pf->hw.aq.asq_last_status));
11969 ret = -ENOENT;
11970 goto err;
11971 }
11972 }
11973
11974 /* MFP mode setup queue map and update VSI */
11975 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
11976 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
11977 memset(&ctxt, 0, sizeof(ctxt));
11978 ctxt.seid = pf->main_vsi_seid;
11979 ctxt.pf_num = pf->hw.pf_id;
11980 ctxt.vf_num = 0;
11981 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
11982 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11983 if (ret) {
11984 dev_info(&pf->pdev->dev,
11985 "update vsi failed, err %s aq_err %s\n",
11986 i40e_stat_str(&pf->hw, ret),
11987 i40e_aq_str(&pf->hw,
11988 pf->hw.aq.asq_last_status));
11989 ret = -ENOENT;
11990 goto err;
11991 }
11992 /* update the local VSI info queue map */
11993 i40e_vsi_update_queue_map(vsi, &ctxt);
11994 vsi->info.valid_sections = 0;
11995 } else {
11996 /* Default/Main VSI is only enabled for TC0
11997 * reconfigure it to enable all TCs that are
11998 * available on the port in SFP mode.
11999 * For MFP case the iSCSI PF would use this
12000 * flow to enable LAN+iSCSI TC.
12001 */
12002 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12003 if (ret) {
12004 /* Single TC condition is not fatal,
12005 * message and continue
12006 */
12007 dev_info(&pf->pdev->dev,
12008 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12009 enabled_tc,
12010 i40e_stat_str(&pf->hw, ret),
12011 i40e_aq_str(&pf->hw,
12012 pf->hw.aq.asq_last_status));
12013 }
12014 }
12015 break;
12016
12017 case I40E_VSI_FDIR:
12018 ctxt.pf_num = hw->pf_id;
12019 ctxt.vf_num = 0;
12020 ctxt.uplink_seid = vsi->uplink_seid;
12021 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12022 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12023 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12024 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12025 ctxt.info.valid_sections |=
12026 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12027 ctxt.info.switch_id =
12028 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12029 }
12030 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12031 break;
12032
12033 case I40E_VSI_VMDQ2:
12034 ctxt.pf_num = hw->pf_id;
12035 ctxt.vf_num = 0;
12036 ctxt.uplink_seid = vsi->uplink_seid;
12037 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12038 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12039
12040 /* This VSI is connected to VEB so the switch_id
12041 * should be set to zero by default.
12042 */
12043 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12044 ctxt.info.valid_sections |=
12045 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12046 ctxt.info.switch_id =
12047 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12048 }
12049
12050 /* Setup the VSI tx/rx queue map for TC0 only for now */
12051 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12052 break;
12053
12054 case I40E_VSI_SRIOV:
12055 ctxt.pf_num = hw->pf_id;
12056 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12057 ctxt.uplink_seid = vsi->uplink_seid;
12058 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12059 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12060
12061 /* This VSI is connected to VEB so the switch_id
12062 * should be set to zero by default.
12063 */
12064 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12065 ctxt.info.valid_sections |=
12066 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12067 ctxt.info.switch_id =
12068 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12069 }
12070
12071 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12072 ctxt.info.valid_sections |=
12073 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12074 ctxt.info.queueing_opt_flags |=
12075 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12076 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12077 }
12078
12079 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12080 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12081 if (pf->vf[vsi->vf_id].spoofchk) {
12082 ctxt.info.valid_sections |=
12083 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12084 ctxt.info.sec_flags |=
12085 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12086 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12087 }
12088 /* Setup the VSI tx/rx queue map for TC0 only for now */
12089 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12090 break;
12091
12092 case I40E_VSI_IWARP:
12093 /* send down message to iWARP */
12094 break;
12095
12096 default:
12097 return -ENODEV;
12098 }
12099
12100 if (vsi->type != I40E_VSI_MAIN) {
12101 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12102 if (ret) {
12103 dev_info(&vsi->back->pdev->dev,
12104 "add vsi failed, err %s aq_err %s\n",
12105 i40e_stat_str(&pf->hw, ret),
12106 i40e_aq_str(&pf->hw,
12107 pf->hw.aq.asq_last_status));
12108 ret = -ENOENT;
12109 goto err;
12110 }
12111 vsi->info = ctxt.info;
12112 vsi->info.valid_sections = 0;
12113 vsi->seid = ctxt.seid;
12114 vsi->id = ctxt.vsi_number;
12115 }
12116
12117 vsi->active_filters = 0;
12118 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12119 spin_lock_bh(&vsi->mac_filter_hash_lock);
12120 /* If macvlan filters already exist, force them to get loaded */
12121 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12122 f->state = I40E_FILTER_NEW;
12123 f_count++;
12124 }
12125 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12126
12127 if (f_count) {
12128 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12129 pf->flags |= I40E_FLAG_FILTER_SYNC;
12130 }
12131
12132 /* Update VSI BW information */
12133 ret = i40e_vsi_get_bw_info(vsi);
12134 if (ret) {
12135 dev_info(&pf->pdev->dev,
12136 "couldn't get vsi bw info, err %s aq_err %s\n",
12137 i40e_stat_str(&pf->hw, ret),
12138 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12139 /* VSI is already added so not tearing that up */
12140 ret = 0;
12141 }
12142
12143 err:
12144 return ret;
12145 }
12146
12147 /**
12148 * i40e_vsi_release - Delete a VSI and free its resources
12149 * @vsi: the VSI being removed
12150 *
12151 * Returns 0 on success or < 0 on error
12152 **/
12153 int i40e_vsi_release(struct i40e_vsi *vsi)
12154 {
12155 struct i40e_mac_filter *f;
12156 struct hlist_node *h;
12157 struct i40e_veb *veb = NULL;
12158 struct i40e_pf *pf;
12159 u16 uplink_seid;
12160 int i, n, bkt;
12161
12162 pf = vsi->back;
12163
12164 /* release of a VEB-owner or last VSI is not allowed */
12165 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12166 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12167 vsi->seid, vsi->uplink_seid);
12168 return -ENODEV;
12169 }
12170 if (vsi == pf->vsi[pf->lan_vsi] &&
12171 !test_bit(__I40E_DOWN, pf->state)) {
12172 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12173 return -ENODEV;
12174 }
12175
12176 uplink_seid = vsi->uplink_seid;
12177 if (vsi->type != I40E_VSI_SRIOV) {
12178 if (vsi->netdev_registered) {
12179 vsi->netdev_registered = false;
12180 if (vsi->netdev) {
12181 /* results in a call to i40e_close() */
12182 unregister_netdev(vsi->netdev);
12183 }
12184 } else {
12185 i40e_vsi_close(vsi);
12186 }
12187 i40e_vsi_disable_irq(vsi);
12188 }
12189
12190 spin_lock_bh(&vsi->mac_filter_hash_lock);
12191
12192 /* clear the sync flag on all filters */
12193 if (vsi->netdev) {
12194 __dev_uc_unsync(vsi->netdev, NULL);
12195 __dev_mc_unsync(vsi->netdev, NULL);
12196 }
12197
12198 /* make sure any remaining filters are marked for deletion */
12199 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12200 __i40e_del_filter(vsi, f);
12201
12202 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12203
12204 i40e_sync_vsi_filters(vsi);
12205
12206 i40e_vsi_delete(vsi);
12207 i40e_vsi_free_q_vectors(vsi);
12208 if (vsi->netdev) {
12209 free_netdev(vsi->netdev);
12210 vsi->netdev = NULL;
12211 }
12212 i40e_vsi_clear_rings(vsi);
12213 i40e_vsi_clear(vsi);
12214
12215 /* If this was the last thing on the VEB, except for the
12216 * controlling VSI, remove the VEB, which puts the controlling
12217 * VSI onto the next level down in the switch.
12218 *
12219 * Well, okay, there's one more exception here: don't remove
12220 * the orphan VEBs yet. We'll wait for an explicit remove request
12221 * from up the network stack.
12222 */
12223 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12224 if (pf->vsi[i] &&
12225 pf->vsi[i]->uplink_seid == uplink_seid &&
12226 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12227 n++; /* count the VSIs */
12228 }
12229 }
12230 for (i = 0; i < I40E_MAX_VEB; i++) {
12231 if (!pf->veb[i])
12232 continue;
12233 if (pf->veb[i]->uplink_seid == uplink_seid)
12234 n++; /* count the VEBs */
12235 if (pf->veb[i]->seid == uplink_seid)
12236 veb = pf->veb[i];
12237 }
12238 if (n == 0 && veb && veb->uplink_seid != 0)
12239 i40e_veb_release(veb);
12240
12241 return 0;
12242 }
12243
12244 /**
12245 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12246 * @vsi: ptr to the VSI
12247 *
12248 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12249 * corresponding SW VSI structure and initializes num_queue_pairs for the
12250 * newly allocated VSI.
12251 *
12252 * Returns 0 on success or negative on failure
12253 **/
12254 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12255 {
12256 int ret = -ENOENT;
12257 struct i40e_pf *pf = vsi->back;
12258
12259 if (vsi->q_vectors[0]) {
12260 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12261 vsi->seid);
12262 return -EEXIST;
12263 }
12264
12265 if (vsi->base_vector) {
12266 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12267 vsi->seid, vsi->base_vector);
12268 return -EEXIST;
12269 }
12270
12271 ret = i40e_vsi_alloc_q_vectors(vsi);
12272 if (ret) {
12273 dev_info(&pf->pdev->dev,
12274 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12275 vsi->num_q_vectors, vsi->seid, ret);
12276 vsi->num_q_vectors = 0;
12277 goto vector_setup_out;
12278 }
12279
12280 /* In Legacy mode, we do not have to get any other vector since we
12281 * piggyback on the misc/ICR0 for queue interrupts.
12282 */
12283 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12284 return ret;
12285 if (vsi->num_q_vectors)
12286 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12287 vsi->num_q_vectors, vsi->idx);
12288 if (vsi->base_vector < 0) {
12289 dev_info(&pf->pdev->dev,
12290 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12291 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12292 i40e_vsi_free_q_vectors(vsi);
12293 ret = -ENOENT;
12294 goto vector_setup_out;
12295 }
12296
12297 vector_setup_out:
12298 return ret;
12299 }
12300
12301 /**
12302 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12303 * @vsi: pointer to the vsi.
12304 *
12305 * This re-allocates a vsi's queue resources.
12306 *
12307 * Returns pointer to the successfully allocated and configured VSI sw struct
12308 * on success, otherwise returns NULL on failure.
12309 **/
12310 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12311 {
12312 u16 alloc_queue_pairs;
12313 struct i40e_pf *pf;
12314 u8 enabled_tc;
12315 int ret;
12316
12317 if (!vsi)
12318 return NULL;
12319
12320 pf = vsi->back;
12321
12322 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12323 i40e_vsi_clear_rings(vsi);
12324
12325 i40e_vsi_free_arrays(vsi, false);
12326 i40e_set_num_rings_in_vsi(vsi);
12327 ret = i40e_vsi_alloc_arrays(vsi, false);
12328 if (ret)
12329 goto err_vsi;
12330
12331 alloc_queue_pairs = vsi->alloc_queue_pairs *
12332 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12333
12334 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12335 if (ret < 0) {
12336 dev_info(&pf->pdev->dev,
12337 "failed to get tracking for %d queues for VSI %d err %d\n",
12338 alloc_queue_pairs, vsi->seid, ret);
12339 goto err_vsi;
12340 }
12341 vsi->base_queue = ret;
12342
12343 /* Update the FW view of the VSI. Force a reset of TC and queue
12344 * layout configurations.
12345 */
12346 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12347 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12348 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12349 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12350 if (vsi->type == I40E_VSI_MAIN)
12351 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12352
12353 /* assign it some queues */
12354 ret = i40e_alloc_rings(vsi);
12355 if (ret)
12356 goto err_rings;
12357
12358 /* map all of the rings to the q_vectors */
12359 i40e_vsi_map_rings_to_vectors(vsi);
12360 return vsi;
12361
12362 err_rings:
12363 i40e_vsi_free_q_vectors(vsi);
12364 if (vsi->netdev_registered) {
12365 vsi->netdev_registered = false;
12366 unregister_netdev(vsi->netdev);
12367 free_netdev(vsi->netdev);
12368 vsi->netdev = NULL;
12369 }
12370 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12371 err_vsi:
12372 i40e_vsi_clear(vsi);
12373 return NULL;
12374 }
12375
12376 /**
12377 * i40e_vsi_setup - Set up a VSI by a given type
12378 * @pf: board private structure
12379 * @type: VSI type
12380 * @uplink_seid: the switch element to link to
12381 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12382 *
12383 * This allocates the sw VSI structure and its queue resources, then add a VSI
12384 * to the identified VEB.
12385 *
12386 * Returns pointer to the successfully allocated and configure VSI sw struct on
12387 * success, otherwise returns NULL on failure.
12388 **/
12389 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12390 u16 uplink_seid, u32 param1)
12391 {
12392 struct i40e_vsi *vsi = NULL;
12393 struct i40e_veb *veb = NULL;
12394 u16 alloc_queue_pairs;
12395 int ret, i;
12396 int v_idx;
12397
12398 /* The requested uplink_seid must be either
12399 * - the PF's port seid
12400 * no VEB is needed because this is the PF
12401 * or this is a Flow Director special case VSI
12402 * - seid of an existing VEB
12403 * - seid of a VSI that owns an existing VEB
12404 * - seid of a VSI that doesn't own a VEB
12405 * a new VEB is created and the VSI becomes the owner
12406 * - seid of the PF VSI, which is what creates the first VEB
12407 * this is a special case of the previous
12408 *
12409 * Find which uplink_seid we were given and create a new VEB if needed
12410 */
12411 for (i = 0; i < I40E_MAX_VEB; i++) {
12412 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12413 veb = pf->veb[i];
12414 break;
12415 }
12416 }
12417
12418 if (!veb && uplink_seid != pf->mac_seid) {
12419
12420 for (i = 0; i < pf->num_alloc_vsi; i++) {
12421 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12422 vsi = pf->vsi[i];
12423 break;
12424 }
12425 }
12426 if (!vsi) {
12427 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12428 uplink_seid);
12429 return NULL;
12430 }
12431
12432 if (vsi->uplink_seid == pf->mac_seid)
12433 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12434 vsi->tc_config.enabled_tc);
12435 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12436 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12437 vsi->tc_config.enabled_tc);
12438 if (veb) {
12439 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12440 dev_info(&vsi->back->pdev->dev,
12441 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12442 return NULL;
12443 }
12444 /* We come up by default in VEPA mode if SRIOV is not
12445 * already enabled, in which case we can't force VEPA
12446 * mode.
12447 */
12448 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12449 veb->bridge_mode = BRIDGE_MODE_VEPA;
12450 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12451 }
12452 i40e_config_bridge_mode(veb);
12453 }
12454 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12455 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12456 veb = pf->veb[i];
12457 }
12458 if (!veb) {
12459 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12460 return NULL;
12461 }
12462
12463 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12464 uplink_seid = veb->seid;
12465 }
12466
12467 /* get vsi sw struct */
12468 v_idx = i40e_vsi_mem_alloc(pf, type);
12469 if (v_idx < 0)
12470 goto err_alloc;
12471 vsi = pf->vsi[v_idx];
12472 if (!vsi)
12473 goto err_alloc;
12474 vsi->type = type;
12475 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12476
12477 if (type == I40E_VSI_MAIN)
12478 pf->lan_vsi = v_idx;
12479 else if (type == I40E_VSI_SRIOV)
12480 vsi->vf_id = param1;
12481 /* assign it some queues */
12482 alloc_queue_pairs = vsi->alloc_queue_pairs *
12483 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12484
12485 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12486 if (ret < 0) {
12487 dev_info(&pf->pdev->dev,
12488 "failed to get tracking for %d queues for VSI %d err=%d\n",
12489 alloc_queue_pairs, vsi->seid, ret);
12490 goto err_vsi;
12491 }
12492 vsi->base_queue = ret;
12493
12494 /* get a VSI from the hardware */
12495 vsi->uplink_seid = uplink_seid;
12496 ret = i40e_add_vsi(vsi);
12497 if (ret)
12498 goto err_vsi;
12499
12500 switch (vsi->type) {
12501 /* setup the netdev if needed */
12502 case I40E_VSI_MAIN:
12503 case I40E_VSI_VMDQ2:
12504 ret = i40e_config_netdev(vsi);
12505 if (ret)
12506 goto err_netdev;
12507 ret = register_netdev(vsi->netdev);
12508 if (ret)
12509 goto err_netdev;
12510 vsi->netdev_registered = true;
12511 netif_carrier_off(vsi->netdev);
12512 #ifdef CONFIG_I40E_DCB
12513 /* Setup DCB netlink interface */
12514 i40e_dcbnl_setup(vsi);
12515 #endif /* CONFIG_I40E_DCB */
12516 /* fall through */
12517
12518 case I40E_VSI_FDIR:
12519 /* set up vectors and rings if needed */
12520 ret = i40e_vsi_setup_vectors(vsi);
12521 if (ret)
12522 goto err_msix;
12523
12524 ret = i40e_alloc_rings(vsi);
12525 if (ret)
12526 goto err_rings;
12527
12528 /* map all of the rings to the q_vectors */
12529 i40e_vsi_map_rings_to_vectors(vsi);
12530
12531 i40e_vsi_reset_stats(vsi);
12532 break;
12533
12534 default:
12535 /* no netdev or rings for the other VSI types */
12536 break;
12537 }
12538
12539 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
12540 (vsi->type == I40E_VSI_VMDQ2)) {
12541 ret = i40e_vsi_config_rss(vsi);
12542 }
12543 return vsi;
12544
12545 err_rings:
12546 i40e_vsi_free_q_vectors(vsi);
12547 err_msix:
12548 if (vsi->netdev_registered) {
12549 vsi->netdev_registered = false;
12550 unregister_netdev(vsi->netdev);
12551 free_netdev(vsi->netdev);
12552 vsi->netdev = NULL;
12553 }
12554 err_netdev:
12555 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12556 err_vsi:
12557 i40e_vsi_clear(vsi);
12558 err_alloc:
12559 return NULL;
12560 }
12561
12562 /**
12563 * i40e_veb_get_bw_info - Query VEB BW information
12564 * @veb: the veb to query
12565 *
12566 * Query the Tx scheduler BW configuration data for given VEB
12567 **/
12568 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
12569 {
12570 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
12571 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
12572 struct i40e_pf *pf = veb->pf;
12573 struct i40e_hw *hw = &pf->hw;
12574 u32 tc_bw_max;
12575 int ret = 0;
12576 int i;
12577
12578 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
12579 &bw_data, NULL);
12580 if (ret) {
12581 dev_info(&pf->pdev->dev,
12582 "query veb bw config failed, err %s aq_err %s\n",
12583 i40e_stat_str(&pf->hw, ret),
12584 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12585 goto out;
12586 }
12587
12588 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
12589 &ets_data, NULL);
12590 if (ret) {
12591 dev_info(&pf->pdev->dev,
12592 "query veb bw ets config failed, err %s aq_err %s\n",
12593 i40e_stat_str(&pf->hw, ret),
12594 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12595 goto out;
12596 }
12597
12598 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
12599 veb->bw_max_quanta = ets_data.tc_bw_max;
12600 veb->is_abs_credits = bw_data.absolute_credits_enable;
12601 veb->enabled_tc = ets_data.tc_valid_bits;
12602 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
12603 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
12604 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12605 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
12606 veb->bw_tc_limit_credits[i] =
12607 le16_to_cpu(bw_data.tc_bw_limits[i]);
12608 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
12609 }
12610
12611 out:
12612 return ret;
12613 }
12614
12615 /**
12616 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12617 * @pf: board private structure
12618 *
12619 * On error: returns error code (negative)
12620 * On success: returns vsi index in PF (positive)
12621 **/
12622 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
12623 {
12624 int ret = -ENOENT;
12625 struct i40e_veb *veb;
12626 int i;
12627
12628 /* Need to protect the allocation of switch elements at the PF level */
12629 mutex_lock(&pf->switch_mutex);
12630
12631 /* VEB list may be fragmented if VEB creation/destruction has
12632 * been happening. We can afford to do a quick scan to look
12633 * for any free slots in the list.
12634 *
12635 * find next empty veb slot, looping back around if necessary
12636 */
12637 i = 0;
12638 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
12639 i++;
12640 if (i >= I40E_MAX_VEB) {
12641 ret = -ENOMEM;
12642 goto err_alloc_veb; /* out of VEB slots! */
12643 }
12644
12645 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
12646 if (!veb) {
12647 ret = -ENOMEM;
12648 goto err_alloc_veb;
12649 }
12650 veb->pf = pf;
12651 veb->idx = i;
12652 veb->enabled_tc = 1;
12653
12654 pf->veb[i] = veb;
12655 ret = i;
12656 err_alloc_veb:
12657 mutex_unlock(&pf->switch_mutex);
12658 return ret;
12659 }
12660
12661 /**
12662 * i40e_switch_branch_release - Delete a branch of the switch tree
12663 * @branch: where to start deleting
12664 *
12665 * This uses recursion to find the tips of the branch to be
12666 * removed, deleting until we get back to and can delete this VEB.
12667 **/
12668 static void i40e_switch_branch_release(struct i40e_veb *branch)
12669 {
12670 struct i40e_pf *pf = branch->pf;
12671 u16 branch_seid = branch->seid;
12672 u16 veb_idx = branch->idx;
12673 int i;
12674
12675 /* release any VEBs on this VEB - RECURSION */
12676 for (i = 0; i < I40E_MAX_VEB; i++) {
12677 if (!pf->veb[i])
12678 continue;
12679 if (pf->veb[i]->uplink_seid == branch->seid)
12680 i40e_switch_branch_release(pf->veb[i]);
12681 }
12682
12683 /* Release the VSIs on this VEB, but not the owner VSI.
12684 *
12685 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
12686 * the VEB itself, so don't use (*branch) after this loop.
12687 */
12688 for (i = 0; i < pf->num_alloc_vsi; i++) {
12689 if (!pf->vsi[i])
12690 continue;
12691 if (pf->vsi[i]->uplink_seid == branch_seid &&
12692 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12693 i40e_vsi_release(pf->vsi[i]);
12694 }
12695 }
12696
12697 /* There's one corner case where the VEB might not have been
12698 * removed, so double check it here and remove it if needed.
12699 * This case happens if the veb was created from the debugfs
12700 * commands and no VSIs were added to it.
12701 */
12702 if (pf->veb[veb_idx])
12703 i40e_veb_release(pf->veb[veb_idx]);
12704 }
12705
12706 /**
12707 * i40e_veb_clear - remove veb struct
12708 * @veb: the veb to remove
12709 **/
12710 static void i40e_veb_clear(struct i40e_veb *veb)
12711 {
12712 if (!veb)
12713 return;
12714
12715 if (veb->pf) {
12716 struct i40e_pf *pf = veb->pf;
12717
12718 mutex_lock(&pf->switch_mutex);
12719 if (pf->veb[veb->idx] == veb)
12720 pf->veb[veb->idx] = NULL;
12721 mutex_unlock(&pf->switch_mutex);
12722 }
12723
12724 kfree(veb);
12725 }
12726
12727 /**
12728 * i40e_veb_release - Delete a VEB and free its resources
12729 * @veb: the VEB being removed
12730 **/
12731 void i40e_veb_release(struct i40e_veb *veb)
12732 {
12733 struct i40e_vsi *vsi = NULL;
12734 struct i40e_pf *pf;
12735 int i, n = 0;
12736
12737 pf = veb->pf;
12738
12739 /* find the remaining VSI and check for extras */
12740 for (i = 0; i < pf->num_alloc_vsi; i++) {
12741 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
12742 n++;
12743 vsi = pf->vsi[i];
12744 }
12745 }
12746 if (n != 1) {
12747 dev_info(&pf->pdev->dev,
12748 "can't remove VEB %d with %d VSIs left\n",
12749 veb->seid, n);
12750 return;
12751 }
12752
12753 /* move the remaining VSI to uplink veb */
12754 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
12755 if (veb->uplink_seid) {
12756 vsi->uplink_seid = veb->uplink_seid;
12757 if (veb->uplink_seid == pf->mac_seid)
12758 vsi->veb_idx = I40E_NO_VEB;
12759 else
12760 vsi->veb_idx = veb->veb_idx;
12761 } else {
12762 /* floating VEB */
12763 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
12764 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
12765 }
12766
12767 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
12768 i40e_veb_clear(veb);
12769 }
12770
12771 /**
12772 * i40e_add_veb - create the VEB in the switch
12773 * @veb: the VEB to be instantiated
12774 * @vsi: the controlling VSI
12775 **/
12776 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
12777 {
12778 struct i40e_pf *pf = veb->pf;
12779 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
12780 int ret;
12781
12782 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
12783 veb->enabled_tc, false,
12784 &veb->seid, enable_stats, NULL);
12785
12786 /* get a VEB from the hardware */
12787 if (ret) {
12788 dev_info(&pf->pdev->dev,
12789 "couldn't add VEB, err %s aq_err %s\n",
12790 i40e_stat_str(&pf->hw, ret),
12791 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12792 return -EPERM;
12793 }
12794
12795 /* get statistics counter */
12796 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
12797 &veb->stats_idx, NULL, NULL, NULL);
12798 if (ret) {
12799 dev_info(&pf->pdev->dev,
12800 "couldn't get VEB statistics idx, err %s aq_err %s\n",
12801 i40e_stat_str(&pf->hw, ret),
12802 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12803 return -EPERM;
12804 }
12805 ret = i40e_veb_get_bw_info(veb);
12806 if (ret) {
12807 dev_info(&pf->pdev->dev,
12808 "couldn't get VEB bw info, err %s aq_err %s\n",
12809 i40e_stat_str(&pf->hw, ret),
12810 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12811 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
12812 return -ENOENT;
12813 }
12814
12815 vsi->uplink_seid = veb->seid;
12816 vsi->veb_idx = veb->idx;
12817 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12818
12819 return 0;
12820 }
12821
12822 /**
12823 * i40e_veb_setup - Set up a VEB
12824 * @pf: board private structure
12825 * @flags: VEB setup flags
12826 * @uplink_seid: the switch element to link to
12827 * @vsi_seid: the initial VSI seid
12828 * @enabled_tc: Enabled TC bit-map
12829 *
12830 * This allocates the sw VEB structure and links it into the switch
12831 * It is possible and legal for this to be a duplicate of an already
12832 * existing VEB. It is also possible for both uplink and vsi seids
12833 * to be zero, in order to create a floating VEB.
12834 *
12835 * Returns pointer to the successfully allocated VEB sw struct on
12836 * success, otherwise returns NULL on failure.
12837 **/
12838 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
12839 u16 uplink_seid, u16 vsi_seid,
12840 u8 enabled_tc)
12841 {
12842 struct i40e_veb *veb, *uplink_veb = NULL;
12843 int vsi_idx, veb_idx;
12844 int ret;
12845
12846 /* if one seid is 0, the other must be 0 to create a floating relay */
12847 if ((uplink_seid == 0 || vsi_seid == 0) &&
12848 (uplink_seid + vsi_seid != 0)) {
12849 dev_info(&pf->pdev->dev,
12850 "one, not both seid's are 0: uplink=%d vsi=%d\n",
12851 uplink_seid, vsi_seid);
12852 return NULL;
12853 }
12854
12855 /* make sure there is such a vsi and uplink */
12856 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
12857 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
12858 break;
12859 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
12860 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
12861 vsi_seid);
12862 return NULL;
12863 }
12864
12865 if (uplink_seid && uplink_seid != pf->mac_seid) {
12866 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
12867 if (pf->veb[veb_idx] &&
12868 pf->veb[veb_idx]->seid == uplink_seid) {
12869 uplink_veb = pf->veb[veb_idx];
12870 break;
12871 }
12872 }
12873 if (!uplink_veb) {
12874 dev_info(&pf->pdev->dev,
12875 "uplink seid %d not found\n", uplink_seid);
12876 return NULL;
12877 }
12878 }
12879
12880 /* get veb sw struct */
12881 veb_idx = i40e_veb_mem_alloc(pf);
12882 if (veb_idx < 0)
12883 goto err_alloc;
12884 veb = pf->veb[veb_idx];
12885 veb->flags = flags;
12886 veb->uplink_seid = uplink_seid;
12887 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
12888 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
12889
12890 /* create the VEB in the switch */
12891 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
12892 if (ret)
12893 goto err_veb;
12894 if (vsi_idx == pf->lan_vsi)
12895 pf->lan_veb = veb->idx;
12896
12897 return veb;
12898
12899 err_veb:
12900 i40e_veb_clear(veb);
12901 err_alloc:
12902 return NULL;
12903 }
12904
12905 /**
12906 * i40e_setup_pf_switch_element - set PF vars based on switch type
12907 * @pf: board private structure
12908 * @ele: element we are building info from
12909 * @num_reported: total number of elements
12910 * @printconfig: should we print the contents
12911 *
12912 * helper function to assist in extracting a few useful SEID values.
12913 **/
12914 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
12915 struct i40e_aqc_switch_config_element_resp *ele,
12916 u16 num_reported, bool printconfig)
12917 {
12918 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
12919 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
12920 u8 element_type = ele->element_type;
12921 u16 seid = le16_to_cpu(ele->seid);
12922
12923 if (printconfig)
12924 dev_info(&pf->pdev->dev,
12925 "type=%d seid=%d uplink=%d downlink=%d\n",
12926 element_type, seid, uplink_seid, downlink_seid);
12927
12928 switch (element_type) {
12929 case I40E_SWITCH_ELEMENT_TYPE_MAC:
12930 pf->mac_seid = seid;
12931 break;
12932 case I40E_SWITCH_ELEMENT_TYPE_VEB:
12933 /* Main VEB? */
12934 if (uplink_seid != pf->mac_seid)
12935 break;
12936 if (pf->lan_veb == I40E_NO_VEB) {
12937 int v;
12938
12939 /* find existing or else empty VEB */
12940 for (v = 0; v < I40E_MAX_VEB; v++) {
12941 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
12942 pf->lan_veb = v;
12943 break;
12944 }
12945 }
12946 if (pf->lan_veb == I40E_NO_VEB) {
12947 v = i40e_veb_mem_alloc(pf);
12948 if (v < 0)
12949 break;
12950 pf->lan_veb = v;
12951 }
12952 }
12953
12954 pf->veb[pf->lan_veb]->seid = seid;
12955 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
12956 pf->veb[pf->lan_veb]->pf = pf;
12957 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
12958 break;
12959 case I40E_SWITCH_ELEMENT_TYPE_VSI:
12960 if (num_reported != 1)
12961 break;
12962 /* This is immediately after a reset so we can assume this is
12963 * the PF's VSI
12964 */
12965 pf->mac_seid = uplink_seid;
12966 pf->pf_seid = downlink_seid;
12967 pf->main_vsi_seid = seid;
12968 if (printconfig)
12969 dev_info(&pf->pdev->dev,
12970 "pf_seid=%d main_vsi_seid=%d\n",
12971 pf->pf_seid, pf->main_vsi_seid);
12972 break;
12973 case I40E_SWITCH_ELEMENT_TYPE_PF:
12974 case I40E_SWITCH_ELEMENT_TYPE_VF:
12975 case I40E_SWITCH_ELEMENT_TYPE_EMP:
12976 case I40E_SWITCH_ELEMENT_TYPE_BMC:
12977 case I40E_SWITCH_ELEMENT_TYPE_PE:
12978 case I40E_SWITCH_ELEMENT_TYPE_PA:
12979 /* ignore these for now */
12980 break;
12981 default:
12982 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
12983 element_type, seid);
12984 break;
12985 }
12986 }
12987
12988 /**
12989 * i40e_fetch_switch_configuration - Get switch config from firmware
12990 * @pf: board private structure
12991 * @printconfig: should we print the contents
12992 *
12993 * Get the current switch configuration from the device and
12994 * extract a few useful SEID values.
12995 **/
12996 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
12997 {
12998 struct i40e_aqc_get_switch_config_resp *sw_config;
12999 u16 next_seid = 0;
13000 int ret = 0;
13001 u8 *aq_buf;
13002 int i;
13003
13004 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13005 if (!aq_buf)
13006 return -ENOMEM;
13007
13008 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13009 do {
13010 u16 num_reported, num_total;
13011
13012 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13013 I40E_AQ_LARGE_BUF,
13014 &next_seid, NULL);
13015 if (ret) {
13016 dev_info(&pf->pdev->dev,
13017 "get switch config failed err %s aq_err %s\n",
13018 i40e_stat_str(&pf->hw, ret),
13019 i40e_aq_str(&pf->hw,
13020 pf->hw.aq.asq_last_status));
13021 kfree(aq_buf);
13022 return -ENOENT;
13023 }
13024
13025 num_reported = le16_to_cpu(sw_config->header.num_reported);
13026 num_total = le16_to_cpu(sw_config->header.num_total);
13027
13028 if (printconfig)
13029 dev_info(&pf->pdev->dev,
13030 "header: %d reported %d total\n",
13031 num_reported, num_total);
13032
13033 for (i = 0; i < num_reported; i++) {
13034 struct i40e_aqc_switch_config_element_resp *ele =
13035 &sw_config->element[i];
13036
13037 i40e_setup_pf_switch_element(pf, ele, num_reported,
13038 printconfig);
13039 }
13040 } while (next_seid != 0);
13041
13042 kfree(aq_buf);
13043 return ret;
13044 }
13045
13046 /**
13047 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13048 * @pf: board private structure
13049 * @reinit: if the Main VSI needs to re-initialized.
13050 *
13051 * Returns 0 on success, negative value on failure
13052 **/
13053 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13054 {
13055 u16 flags = 0;
13056 int ret;
13057
13058 /* find out what's out there already */
13059 ret = i40e_fetch_switch_configuration(pf, false);
13060 if (ret) {
13061 dev_info(&pf->pdev->dev,
13062 "couldn't fetch switch config, err %s aq_err %s\n",
13063 i40e_stat_str(&pf->hw, ret),
13064 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13065 return ret;
13066 }
13067 i40e_pf_reset_stats(pf);
13068
13069 /* set the switch config bit for the whole device to
13070 * support limited promisc or true promisc
13071 * when user requests promisc. The default is limited
13072 * promisc.
13073 */
13074
13075 if ((pf->hw.pf_id == 0) &&
13076 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13077 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13078 pf->last_sw_conf_flags = flags;
13079 }
13080
13081 if (pf->hw.pf_id == 0) {
13082 u16 valid_flags;
13083
13084 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13085 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13086 NULL);
13087 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13088 dev_info(&pf->pdev->dev,
13089 "couldn't set switch config bits, err %s aq_err %s\n",
13090 i40e_stat_str(&pf->hw, ret),
13091 i40e_aq_str(&pf->hw,
13092 pf->hw.aq.asq_last_status));
13093 /* not a fatal problem, just keep going */
13094 }
13095 pf->last_sw_conf_valid_flags = valid_flags;
13096 }
13097
13098 /* first time setup */
13099 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13100 struct i40e_vsi *vsi = NULL;
13101 u16 uplink_seid;
13102
13103 /* Set up the PF VSI associated with the PF's main VSI
13104 * that is already in the HW switch
13105 */
13106 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13107 uplink_seid = pf->veb[pf->lan_veb]->seid;
13108 else
13109 uplink_seid = pf->mac_seid;
13110 if (pf->lan_vsi == I40E_NO_VSI)
13111 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13112 else if (reinit)
13113 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13114 if (!vsi) {
13115 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13116 i40e_cloud_filter_exit(pf);
13117 i40e_fdir_teardown(pf);
13118 return -EAGAIN;
13119 }
13120 } else {
13121 /* force a reset of TC and queue layout configurations */
13122 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13123
13124 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13125 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13126 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13127 }
13128 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13129
13130 i40e_fdir_sb_setup(pf);
13131
13132 /* Setup static PF queue filter control settings */
13133 ret = i40e_setup_pf_filter_control(pf);
13134 if (ret) {
13135 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13136 ret);
13137 /* Failure here should not stop continuing other steps */
13138 }
13139
13140 /* enable RSS in the HW, even for only one queue, as the stack can use
13141 * the hash
13142 */
13143 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13144 i40e_pf_config_rss(pf);
13145
13146 /* fill in link information and enable LSE reporting */
13147 i40e_link_event(pf);
13148
13149 /* Initialize user-specific link properties */
13150 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13151 I40E_AQ_AN_COMPLETED) ? true : false);
13152
13153 i40e_ptp_init(pf);
13154
13155 /* repopulate tunnel port filters */
13156 i40e_sync_udp_filters(pf);
13157
13158 return ret;
13159 }
13160
13161 /**
13162 * i40e_determine_queue_usage - Work out queue distribution
13163 * @pf: board private structure
13164 **/
13165 static void i40e_determine_queue_usage(struct i40e_pf *pf)
13166 {
13167 int queues_left;
13168 int q_max;
13169
13170 pf->num_lan_qps = 0;
13171
13172 /* Find the max queues to be put into basic use. We'll always be
13173 * using TC0, whether or not DCB is running, and TC0 will get the
13174 * big RSS set.
13175 */
13176 queues_left = pf->hw.func_caps.num_tx_qp;
13177
13178 if ((queues_left == 1) ||
13179 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13180 /* one qp for PF, no queues for anything else */
13181 queues_left = 0;
13182 pf->alloc_rss_size = pf->num_lan_qps = 1;
13183
13184 /* make sure all the fancies are disabled */
13185 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13186 I40E_FLAG_IWARP_ENABLED |
13187 I40E_FLAG_FD_SB_ENABLED |
13188 I40E_FLAG_FD_ATR_ENABLED |
13189 I40E_FLAG_DCB_CAPABLE |
13190 I40E_FLAG_DCB_ENABLED |
13191 I40E_FLAG_SRIOV_ENABLED |
13192 I40E_FLAG_VMDQ_ENABLED);
13193 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13194 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13195 I40E_FLAG_FD_SB_ENABLED |
13196 I40E_FLAG_FD_ATR_ENABLED |
13197 I40E_FLAG_DCB_CAPABLE))) {
13198 /* one qp for PF */
13199 pf->alloc_rss_size = pf->num_lan_qps = 1;
13200 queues_left -= pf->num_lan_qps;
13201
13202 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13203 I40E_FLAG_IWARP_ENABLED |
13204 I40E_FLAG_FD_SB_ENABLED |
13205 I40E_FLAG_FD_ATR_ENABLED |
13206 I40E_FLAG_DCB_ENABLED |
13207 I40E_FLAG_VMDQ_ENABLED);
13208 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13209 } else {
13210 /* Not enough queues for all TCs */
13211 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13212 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13213 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13214 I40E_FLAG_DCB_ENABLED);
13215 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13216 }
13217
13218 /* limit lan qps to the smaller of qps, cpus or msix */
13219 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13220 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13221 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13222 pf->num_lan_qps = q_max;
13223
13224 queues_left -= pf->num_lan_qps;
13225 }
13226
13227 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13228 if (queues_left > 1) {
13229 queues_left -= 1; /* save 1 queue for FD */
13230 } else {
13231 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13232 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13233 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13234 }
13235 }
13236
13237 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13238 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13239 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13240 (queues_left / pf->num_vf_qps));
13241 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13242 }
13243
13244 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13245 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13246 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13247 (queues_left / pf->num_vmdq_qps));
13248 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13249 }
13250
13251 pf->queues_left = queues_left;
13252 dev_dbg(&pf->pdev->dev,
13253 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13254 pf->hw.func_caps.num_tx_qp,
13255 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13256 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13257 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13258 queues_left);
13259 }
13260
13261 /**
13262 * i40e_setup_pf_filter_control - Setup PF static filter control
13263 * @pf: PF to be setup
13264 *
13265 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13266 * settings. If PE/FCoE are enabled then it will also set the per PF
13267 * based filter sizes required for them. It also enables Flow director,
13268 * ethertype and macvlan type filter settings for the pf.
13269 *
13270 * Returns 0 on success, negative on failure
13271 **/
13272 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13273 {
13274 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13275
13276 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13277
13278 /* Flow Director is enabled */
13279 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13280 settings->enable_fdir = true;
13281
13282 /* Ethtype and MACVLAN filters enabled for PF */
13283 settings->enable_ethtype = true;
13284 settings->enable_macvlan = true;
13285
13286 if (i40e_set_filter_control(&pf->hw, settings))
13287 return -ENOENT;
13288
13289 return 0;
13290 }
13291
13292 #define INFO_STRING_LEN 255
13293 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13294 static void i40e_print_features(struct i40e_pf *pf)
13295 {
13296 struct i40e_hw *hw = &pf->hw;
13297 char *buf;
13298 int i;
13299
13300 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13301 if (!buf)
13302 return;
13303
13304 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13305 #ifdef CONFIG_PCI_IOV
13306 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13307 #endif
13308 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13309 pf->hw.func_caps.num_vsis,
13310 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13311 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13312 i += snprintf(&buf[i], REMAIN(i), " RSS");
13313 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13314 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13315 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13316 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13317 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13318 }
13319 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13320 i += snprintf(&buf[i], REMAIN(i), " DCB");
13321 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13322 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13323 if (pf->flags & I40E_FLAG_PTP)
13324 i += snprintf(&buf[i], REMAIN(i), " PTP");
13325 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13326 i += snprintf(&buf[i], REMAIN(i), " VEB");
13327 else
13328 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13329
13330 dev_info(&pf->pdev->dev, "%s\n", buf);
13331 kfree(buf);
13332 WARN_ON(i > INFO_STRING_LEN);
13333 }
13334
13335 /**
13336 * i40e_get_platform_mac_addr - get platform-specific MAC address
13337 * @pdev: PCI device information struct
13338 * @pf: board private structure
13339 *
13340 * Look up the MAC address for the device. First we'll try
13341 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13342 * specific fallback. Otherwise, we'll default to the stored value in
13343 * firmware.
13344 **/
13345 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13346 {
13347 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13348 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13349 }
13350
13351 /**
13352 * i40e_probe - Device initialization routine
13353 * @pdev: PCI device information struct
13354 * @ent: entry in i40e_pci_tbl
13355 *
13356 * i40e_probe initializes a PF identified by a pci_dev structure.
13357 * The OS initialization, configuring of the PF private structure,
13358 * and a hardware reset occur.
13359 *
13360 * Returns 0 on success, negative on failure
13361 **/
13362 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13363 {
13364 struct i40e_aq_get_phy_abilities_resp abilities;
13365 struct i40e_pf *pf;
13366 struct i40e_hw *hw;
13367 static u16 pfs_found;
13368 u16 wol_nvm_bits;
13369 u16 link_status;
13370 int err;
13371 u32 val;
13372 u32 i;
13373 u8 set_fc_aq_fail;
13374
13375 err = pci_enable_device_mem(pdev);
13376 if (err)
13377 return err;
13378
13379 /* set up for high or low dma */
13380 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
13381 if (err) {
13382 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13383 if (err) {
13384 dev_err(&pdev->dev,
13385 "DMA configuration failed: 0x%x\n", err);
13386 goto err_dma;
13387 }
13388 }
13389
13390 /* set up pci connections */
13391 err = pci_request_mem_regions(pdev, i40e_driver_name);
13392 if (err) {
13393 dev_info(&pdev->dev,
13394 "pci_request_selected_regions failed %d\n", err);
13395 goto err_pci_reg;
13396 }
13397
13398 pci_enable_pcie_error_reporting(pdev);
13399 pci_set_master(pdev);
13400
13401 /* Now that we have a PCI connection, we need to do the
13402 * low level device setup. This is primarily setting up
13403 * the Admin Queue structures and then querying for the
13404 * device's current profile information.
13405 */
13406 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13407 if (!pf) {
13408 err = -ENOMEM;
13409 goto err_pf_alloc;
13410 }
13411 pf->next_vsi = 0;
13412 pf->pdev = pdev;
13413 set_bit(__I40E_DOWN, pf->state);
13414
13415 hw = &pf->hw;
13416 hw->back = pf;
13417
13418 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13419 I40E_MAX_CSR_SPACE);
13420
13421 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
13422 if (!hw->hw_addr) {
13423 err = -EIO;
13424 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13425 (unsigned int)pci_resource_start(pdev, 0),
13426 pf->ioremap_len, err);
13427 goto err_ioremap;
13428 }
13429 hw->vendor_id = pdev->vendor;
13430 hw->device_id = pdev->device;
13431 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13432 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13433 hw->subsystem_device_id = pdev->subsystem_device;
13434 hw->bus.device = PCI_SLOT(pdev->devfn);
13435 hw->bus.func = PCI_FUNC(pdev->devfn);
13436 hw->bus.bus_id = pdev->bus->number;
13437 pf->instance = pfs_found;
13438
13439 /* Select something other than the 802.1ad ethertype for the
13440 * switch to use internally and drop on ingress.
13441 */
13442 hw->switch_tag = 0xffff;
13443 hw->first_tag = ETH_P_8021AD;
13444 hw->second_tag = ETH_P_8021Q;
13445
13446 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13447 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13448
13449 /* set up the locks for the AQ, do this only once in probe
13450 * and destroy them only once in remove
13451 */
13452 mutex_init(&hw->aq.asq_mutex);
13453 mutex_init(&hw->aq.arq_mutex);
13454
13455 pf->msg_enable = netif_msg_init(debug,
13456 NETIF_MSG_DRV |
13457 NETIF_MSG_PROBE |
13458 NETIF_MSG_LINK);
13459 if (debug < -1)
13460 pf->hw.debug_mask = debug;
13461
13462 /* do a special CORER for clearing PXE mode once at init */
13463 if (hw->revision_id == 0 &&
13464 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
13465 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
13466 i40e_flush(hw);
13467 msleep(200);
13468 pf->corer_count++;
13469
13470 i40e_clear_pxe_mode(hw);
13471 }
13472
13473 /* Reset here to make sure all is clean and to define PF 'n' */
13474 i40e_clear_hw(hw);
13475 err = i40e_pf_reset(hw);
13476 if (err) {
13477 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
13478 goto err_pf_reset;
13479 }
13480 pf->pfr_count++;
13481
13482 hw->aq.num_arq_entries = I40E_AQ_LEN;
13483 hw->aq.num_asq_entries = I40E_AQ_LEN;
13484 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13485 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13486 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
13487
13488 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
13489 "%s-%s:misc",
13490 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
13491
13492 err = i40e_init_shared_code(hw);
13493 if (err) {
13494 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
13495 err);
13496 goto err_pf_reset;
13497 }
13498
13499 /* set up a default setting for link flow control */
13500 pf->hw.fc.requested_mode = I40E_FC_NONE;
13501
13502 err = i40e_init_adminq(hw);
13503 if (err) {
13504 if (err == I40E_ERR_FIRMWARE_API_VERSION)
13505 dev_info(&pdev->dev,
13506 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13507 else
13508 dev_info(&pdev->dev,
13509 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13510
13511 goto err_pf_reset;
13512 }
13513 i40e_get_oem_version(hw);
13514
13515 /* provide nvm, fw, api versions */
13516 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
13517 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
13518 hw->aq.api_maj_ver, hw->aq.api_min_ver,
13519 i40e_nvm_version_str(hw));
13520
13521 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
13522 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
13523 dev_info(&pdev->dev,
13524 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13525 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
13526 dev_info(&pdev->dev,
13527 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13528
13529 i40e_verify_eeprom(pf);
13530
13531 /* Rev 0 hardware was never productized */
13532 if (hw->revision_id < 1)
13533 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13534
13535 i40e_clear_pxe_mode(hw);
13536 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
13537 if (err)
13538 goto err_adminq_setup;
13539
13540 err = i40e_sw_init(pf);
13541 if (err) {
13542 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
13543 goto err_sw_init;
13544 }
13545
13546 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
13547 hw->func_caps.num_rx_qp, 0, 0);
13548 if (err) {
13549 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
13550 goto err_init_lan_hmc;
13551 }
13552
13553 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
13554 if (err) {
13555 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
13556 err = -ENOENT;
13557 goto err_configure_lan_hmc;
13558 }
13559
13560 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13561 * Ignore error return codes because if it was already disabled via
13562 * hardware settings this will fail
13563 */
13564 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
13565 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
13566 i40e_aq_stop_lldp(hw, true, NULL);
13567 }
13568
13569 /* allow a platform config to override the HW addr */
13570 i40e_get_platform_mac_addr(pdev, pf);
13571
13572 if (!is_valid_ether_addr(hw->mac.addr)) {
13573 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
13574 err = -EIO;
13575 goto err_mac_addr;
13576 }
13577 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
13578 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
13579 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
13580 if (is_valid_ether_addr(hw->mac.port_addr))
13581 pf->hw_features |= I40E_HW_PORT_ID_VALID;
13582
13583 pci_set_drvdata(pdev, pf);
13584 pci_save_state(pdev);
13585 #ifdef CONFIG_I40E_DCB
13586 err = i40e_init_pf_dcb(pf);
13587 if (err) {
13588 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
13589 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
13590 /* Continue without DCB enabled */
13591 }
13592 #endif /* CONFIG_I40E_DCB */
13593
13594 /* set up periodic task facility */
13595 timer_setup(&pf->service_timer, i40e_service_timer, 0);
13596 pf->service_timer_period = HZ;
13597
13598 INIT_WORK(&pf->service_task, i40e_service_task);
13599 clear_bit(__I40E_SERVICE_SCHED, pf->state);
13600
13601 /* NVM bit on means WoL disabled for the port */
13602 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
13603 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
13604 pf->wol_en = false;
13605 else
13606 pf->wol_en = true;
13607 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
13608
13609 /* set up the main switch operations */
13610 i40e_determine_queue_usage(pf);
13611 err = i40e_init_interrupt_scheme(pf);
13612 if (err)
13613 goto err_switch_setup;
13614
13615 /* The number of VSIs reported by the FW is the minimum guaranteed
13616 * to us; HW supports far more and we share the remaining pool with
13617 * the other PFs. We allocate space for more than the guarantee with
13618 * the understanding that we might not get them all later.
13619 */
13620 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
13621 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
13622 else
13623 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
13624
13625 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
13626 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
13627 GFP_KERNEL);
13628 if (!pf->vsi) {
13629 err = -ENOMEM;
13630 goto err_switch_setup;
13631 }
13632
13633 #ifdef CONFIG_PCI_IOV
13634 /* prep for VF support */
13635 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13636 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
13637 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
13638 if (pci_num_vf(pdev))
13639 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13640 }
13641 #endif
13642 err = i40e_setup_pf_switch(pf, false);
13643 if (err) {
13644 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
13645 goto err_vsis;
13646 }
13647 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
13648
13649 /* Make sure flow control is set according to current settings */
13650 err = i40e_set_fc(hw, &set_fc_aq_fail, true);
13651 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
13652 dev_dbg(&pf->pdev->dev,
13653 "Set fc with err %s aq_err %s on get_phy_cap\n",
13654 i40e_stat_str(hw, err),
13655 i40e_aq_str(hw, hw->aq.asq_last_status));
13656 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
13657 dev_dbg(&pf->pdev->dev,
13658 "Set fc with err %s aq_err %s on set_phy_config\n",
13659 i40e_stat_str(hw, err),
13660 i40e_aq_str(hw, hw->aq.asq_last_status));
13661 if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
13662 dev_dbg(&pf->pdev->dev,
13663 "Set fc with err %s aq_err %s on get_link_info\n",
13664 i40e_stat_str(hw, err),
13665 i40e_aq_str(hw, hw->aq.asq_last_status));
13666
13667 /* if FDIR VSI was set up, start it now */
13668 for (i = 0; i < pf->num_alloc_vsi; i++) {
13669 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
13670 i40e_vsi_open(pf->vsi[i]);
13671 break;
13672 }
13673 }
13674
13675 /* The driver only wants link up/down and module qualification
13676 * reports from firmware. Note the negative logic.
13677 */
13678 err = i40e_aq_set_phy_int_mask(&pf->hw,
13679 ~(I40E_AQ_EVENT_LINK_UPDOWN |
13680 I40E_AQ_EVENT_MEDIA_NA |
13681 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
13682 if (err)
13683 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
13684 i40e_stat_str(&pf->hw, err),
13685 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13686
13687 /* Reconfigure hardware for allowing smaller MSS in the case
13688 * of TSO, so that we avoid the MDD being fired and causing
13689 * a reset in the case of small MSS+TSO.
13690 */
13691 val = rd32(hw, I40E_REG_MSS);
13692 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
13693 val &= ~I40E_REG_MSS_MIN_MASK;
13694 val |= I40E_64BYTE_MSS;
13695 wr32(hw, I40E_REG_MSS, val);
13696 }
13697
13698 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
13699 msleep(75);
13700 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
13701 if (err)
13702 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
13703 i40e_stat_str(&pf->hw, err),
13704 i40e_aq_str(&pf->hw,
13705 pf->hw.aq.asq_last_status));
13706 }
13707 /* The main driver is (mostly) up and happy. We need to set this state
13708 * before setting up the misc vector or we get a race and the vector
13709 * ends up disabled forever.
13710 */
13711 clear_bit(__I40E_DOWN, pf->state);
13712
13713 /* In case of MSIX we are going to setup the misc vector right here
13714 * to handle admin queue events etc. In case of legacy and MSI
13715 * the misc functionality and queue processing is combined in
13716 * the same vector and that gets setup at open.
13717 */
13718 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13719 err = i40e_setup_misc_vector(pf);
13720 if (err) {
13721 dev_info(&pdev->dev,
13722 "setup of misc vector failed: %d\n", err);
13723 goto err_vsis;
13724 }
13725 }
13726
13727 #ifdef CONFIG_PCI_IOV
13728 /* prep for VF support */
13729 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13730 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
13731 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
13732 /* disable link interrupts for VFs */
13733 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
13734 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
13735 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
13736 i40e_flush(hw);
13737
13738 if (pci_num_vf(pdev)) {
13739 dev_info(&pdev->dev,
13740 "Active VFs found, allocating resources.\n");
13741 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
13742 if (err)
13743 dev_info(&pdev->dev,
13744 "Error %d allocating resources for existing VFs\n",
13745 err);
13746 }
13747 }
13748 #endif /* CONFIG_PCI_IOV */
13749
13750 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
13751 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
13752 pf->num_iwarp_msix,
13753 I40E_IWARP_IRQ_PILE_ID);
13754 if (pf->iwarp_base_vector < 0) {
13755 dev_info(&pdev->dev,
13756 "failed to get tracking for %d vectors for IWARP err=%d\n",
13757 pf->num_iwarp_msix, pf->iwarp_base_vector);
13758 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
13759 }
13760 }
13761
13762 i40e_dbg_pf_init(pf);
13763
13764 /* tell the firmware that we're starting */
13765 i40e_send_version(pf);
13766
13767 /* since everything's happy, start the service_task timer */
13768 mod_timer(&pf->service_timer,
13769 round_jiffies(jiffies + pf->service_timer_period));
13770
13771 /* add this PF to client device list and launch a client service task */
13772 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
13773 err = i40e_lan_add_device(pf);
13774 if (err)
13775 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
13776 err);
13777 }
13778
13779 #define PCI_SPEED_SIZE 8
13780 #define PCI_WIDTH_SIZE 8
13781 /* Devices on the IOSF bus do not have this information
13782 * and will report PCI Gen 1 x 1 by default so don't bother
13783 * checking them.
13784 */
13785 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
13786 char speed[PCI_SPEED_SIZE] = "Unknown";
13787 char width[PCI_WIDTH_SIZE] = "Unknown";
13788
13789 /* Get the negotiated link width and speed from PCI config
13790 * space
13791 */
13792 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
13793 &link_status);
13794
13795 i40e_set_pci_config_data(hw, link_status);
13796
13797 switch (hw->bus.speed) {
13798 case i40e_bus_speed_8000:
13799 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
13800 case i40e_bus_speed_5000:
13801 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
13802 case i40e_bus_speed_2500:
13803 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
13804 default:
13805 break;
13806 }
13807 switch (hw->bus.width) {
13808 case i40e_bus_width_pcie_x8:
13809 strncpy(width, "8", PCI_WIDTH_SIZE); break;
13810 case i40e_bus_width_pcie_x4:
13811 strncpy(width, "4", PCI_WIDTH_SIZE); break;
13812 case i40e_bus_width_pcie_x2:
13813 strncpy(width, "2", PCI_WIDTH_SIZE); break;
13814 case i40e_bus_width_pcie_x1:
13815 strncpy(width, "1", PCI_WIDTH_SIZE); break;
13816 default:
13817 break;
13818 }
13819
13820 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
13821 speed, width);
13822
13823 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
13824 hw->bus.speed < i40e_bus_speed_8000) {
13825 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
13826 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
13827 }
13828 }
13829
13830 /* get the requested speeds from the fw */
13831 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
13832 if (err)
13833 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
13834 i40e_stat_str(&pf->hw, err),
13835 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13836 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
13837
13838 /* get the supported phy types from the fw */
13839 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
13840 if (err)
13841 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
13842 i40e_stat_str(&pf->hw, err),
13843 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13844
13845 /* Add a filter to drop all Flow control frames from any VSI from being
13846 * transmitted. By doing so we stop a malicious VF from sending out
13847 * PAUSE or PFC frames and potentially controlling traffic for other
13848 * PF/VF VSIs.
13849 * The FW can still send Flow control frames if enabled.
13850 */
13851 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
13852 pf->main_vsi_seid);
13853
13854 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
13855 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
13856 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
13857 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
13858 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
13859 /* print a string summarizing features */
13860 i40e_print_features(pf);
13861
13862 return 0;
13863
13864 /* Unwind what we've done if something failed in the setup */
13865 err_vsis:
13866 set_bit(__I40E_DOWN, pf->state);
13867 i40e_clear_interrupt_scheme(pf);
13868 kfree(pf->vsi);
13869 err_switch_setup:
13870 i40e_reset_interrupt_capability(pf);
13871 del_timer_sync(&pf->service_timer);
13872 err_mac_addr:
13873 err_configure_lan_hmc:
13874 (void)i40e_shutdown_lan_hmc(hw);
13875 err_init_lan_hmc:
13876 kfree(pf->qp_pile);
13877 err_sw_init:
13878 err_adminq_setup:
13879 err_pf_reset:
13880 iounmap(hw->hw_addr);
13881 err_ioremap:
13882 kfree(pf);
13883 err_pf_alloc:
13884 pci_disable_pcie_error_reporting(pdev);
13885 pci_release_mem_regions(pdev);
13886 err_pci_reg:
13887 err_dma:
13888 pci_disable_device(pdev);
13889 return err;
13890 }
13891
13892 /**
13893 * i40e_remove - Device removal routine
13894 * @pdev: PCI device information struct
13895 *
13896 * i40e_remove is called by the PCI subsystem to alert the driver
13897 * that is should release a PCI device. This could be caused by a
13898 * Hot-Plug event, or because the driver is going to be removed from
13899 * memory.
13900 **/
13901 static void i40e_remove(struct pci_dev *pdev)
13902 {
13903 struct i40e_pf *pf = pci_get_drvdata(pdev);
13904 struct i40e_hw *hw = &pf->hw;
13905 i40e_status ret_code;
13906 int i;
13907
13908 i40e_dbg_pf_exit(pf);
13909
13910 i40e_ptp_stop(pf);
13911
13912 /* Disable RSS in hw */
13913 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
13914 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
13915
13916 /* no more scheduling of any task */
13917 set_bit(__I40E_SUSPENDED, pf->state);
13918 set_bit(__I40E_DOWN, pf->state);
13919 if (pf->service_timer.function)
13920 del_timer_sync(&pf->service_timer);
13921 if (pf->service_task.func)
13922 cancel_work_sync(&pf->service_task);
13923
13924 /* Client close must be called explicitly here because the timer
13925 * has been stopped.
13926 */
13927 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
13928
13929 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
13930 i40e_free_vfs(pf);
13931 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
13932 }
13933
13934 i40e_fdir_teardown(pf);
13935
13936 /* If there is a switch structure or any orphans, remove them.
13937 * This will leave only the PF's VSI remaining.
13938 */
13939 for (i = 0; i < I40E_MAX_VEB; i++) {
13940 if (!pf->veb[i])
13941 continue;
13942
13943 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
13944 pf->veb[i]->uplink_seid == 0)
13945 i40e_switch_branch_release(pf->veb[i]);
13946 }
13947
13948 /* Now we can shutdown the PF's VSI, just before we kill
13949 * adminq and hmc.
13950 */
13951 if (pf->vsi[pf->lan_vsi])
13952 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
13953
13954 i40e_cloud_filter_exit(pf);
13955
13956 /* remove attached clients */
13957 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
13958 ret_code = i40e_lan_del_device(pf);
13959 if (ret_code)
13960 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
13961 ret_code);
13962 }
13963
13964 /* shutdown and destroy the HMC */
13965 if (hw->hmc.hmc_obj) {
13966 ret_code = i40e_shutdown_lan_hmc(hw);
13967 if (ret_code)
13968 dev_warn(&pdev->dev,
13969 "Failed to destroy the HMC resources: %d\n",
13970 ret_code);
13971 }
13972
13973 /* shutdown the adminq */
13974 i40e_shutdown_adminq(hw);
13975
13976 /* destroy the locks only once, here */
13977 mutex_destroy(&hw->aq.arq_mutex);
13978 mutex_destroy(&hw->aq.asq_mutex);
13979
13980 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
13981 i40e_clear_interrupt_scheme(pf);
13982 for (i = 0; i < pf->num_alloc_vsi; i++) {
13983 if (pf->vsi[i]) {
13984 i40e_vsi_clear_rings(pf->vsi[i]);
13985 i40e_vsi_clear(pf->vsi[i]);
13986 pf->vsi[i] = NULL;
13987 }
13988 }
13989
13990 for (i = 0; i < I40E_MAX_VEB; i++) {
13991 kfree(pf->veb[i]);
13992 pf->veb[i] = NULL;
13993 }
13994
13995 kfree(pf->qp_pile);
13996 kfree(pf->vsi);
13997
13998 iounmap(hw->hw_addr);
13999 kfree(pf);
14000 pci_release_mem_regions(pdev);
14001
14002 pci_disable_pcie_error_reporting(pdev);
14003 pci_disable_device(pdev);
14004 }
14005
14006 /**
14007 * i40e_pci_error_detected - warning that something funky happened in PCI land
14008 * @pdev: PCI device information struct
14009 *
14010 * Called to warn that something happened and the error handling steps
14011 * are in progress. Allows the driver to quiesce things, be ready for
14012 * remediation.
14013 **/
14014 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14015 enum pci_channel_state error)
14016 {
14017 struct i40e_pf *pf = pci_get_drvdata(pdev);
14018
14019 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14020
14021 if (!pf) {
14022 dev_info(&pdev->dev,
14023 "Cannot recover - error happened during device probe\n");
14024 return PCI_ERS_RESULT_DISCONNECT;
14025 }
14026
14027 /* shutdown all operations */
14028 if (!test_bit(__I40E_SUSPENDED, pf->state))
14029 i40e_prep_for_reset(pf, false);
14030
14031 /* Request a slot reset */
14032 return PCI_ERS_RESULT_NEED_RESET;
14033 }
14034
14035 /**
14036 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14037 * @pdev: PCI device information struct
14038 *
14039 * Called to find if the driver can work with the device now that
14040 * the pci slot has been reset. If a basic connection seems good
14041 * (registers are readable and have sane content) then return a
14042 * happy little PCI_ERS_RESULT_xxx.
14043 **/
14044 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14045 {
14046 struct i40e_pf *pf = pci_get_drvdata(pdev);
14047 pci_ers_result_t result;
14048 int err;
14049 u32 reg;
14050
14051 dev_dbg(&pdev->dev, "%s\n", __func__);
14052 if (pci_enable_device_mem(pdev)) {
14053 dev_info(&pdev->dev,
14054 "Cannot re-enable PCI device after reset.\n");
14055 result = PCI_ERS_RESULT_DISCONNECT;
14056 } else {
14057 pci_set_master(pdev);
14058 pci_restore_state(pdev);
14059 pci_save_state(pdev);
14060 pci_wake_from_d3(pdev, false);
14061
14062 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14063 if (reg == 0)
14064 result = PCI_ERS_RESULT_RECOVERED;
14065 else
14066 result = PCI_ERS_RESULT_DISCONNECT;
14067 }
14068
14069 err = pci_cleanup_aer_uncorrect_error_status(pdev);
14070 if (err) {
14071 dev_info(&pdev->dev,
14072 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14073 err);
14074 /* non-fatal, continue */
14075 }
14076
14077 return result;
14078 }
14079
14080 /**
14081 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14082 * @pdev: PCI device information struct
14083 */
14084 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14085 {
14086 struct i40e_pf *pf = pci_get_drvdata(pdev);
14087
14088 i40e_prep_for_reset(pf, false);
14089 }
14090
14091 /**
14092 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14093 * @pdev: PCI device information struct
14094 */
14095 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14096 {
14097 struct i40e_pf *pf = pci_get_drvdata(pdev);
14098
14099 i40e_reset_and_rebuild(pf, false, false);
14100 }
14101
14102 /**
14103 * i40e_pci_error_resume - restart operations after PCI error recovery
14104 * @pdev: PCI device information struct
14105 *
14106 * Called to allow the driver to bring things back up after PCI error
14107 * and/or reset recovery has finished.
14108 **/
14109 static void i40e_pci_error_resume(struct pci_dev *pdev)
14110 {
14111 struct i40e_pf *pf = pci_get_drvdata(pdev);
14112
14113 dev_dbg(&pdev->dev, "%s\n", __func__);
14114 if (test_bit(__I40E_SUSPENDED, pf->state))
14115 return;
14116
14117 i40e_handle_reset_warning(pf, false);
14118 }
14119
14120 /**
14121 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14122 * using the mac_address_write admin q function
14123 * @pf: pointer to i40e_pf struct
14124 **/
14125 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14126 {
14127 struct i40e_hw *hw = &pf->hw;
14128 i40e_status ret;
14129 u8 mac_addr[6];
14130 u16 flags = 0;
14131
14132 /* Get current MAC address in case it's an LAA */
14133 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14134 ether_addr_copy(mac_addr,
14135 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14136 } else {
14137 dev_err(&pf->pdev->dev,
14138 "Failed to retrieve MAC address; using default\n");
14139 ether_addr_copy(mac_addr, hw->mac.addr);
14140 }
14141
14142 /* The FW expects the mac address write cmd to first be called with
14143 * one of these flags before calling it again with the multicast
14144 * enable flags.
14145 */
14146 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14147
14148 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14149 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14150
14151 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14152 if (ret) {
14153 dev_err(&pf->pdev->dev,
14154 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14155 return;
14156 }
14157
14158 flags = I40E_AQC_MC_MAG_EN
14159 | I40E_AQC_WOL_PRESERVE_ON_PFR
14160 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14161 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14162 if (ret)
14163 dev_err(&pf->pdev->dev,
14164 "Failed to enable Multicast Magic Packet wake up\n");
14165 }
14166
14167 /**
14168 * i40e_shutdown - PCI callback for shutting down
14169 * @pdev: PCI device information struct
14170 **/
14171 static void i40e_shutdown(struct pci_dev *pdev)
14172 {
14173 struct i40e_pf *pf = pci_get_drvdata(pdev);
14174 struct i40e_hw *hw = &pf->hw;
14175
14176 set_bit(__I40E_SUSPENDED, pf->state);
14177 set_bit(__I40E_DOWN, pf->state);
14178 rtnl_lock();
14179 i40e_prep_for_reset(pf, true);
14180 rtnl_unlock();
14181
14182 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14183 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14184
14185 del_timer_sync(&pf->service_timer);
14186 cancel_work_sync(&pf->service_task);
14187 i40e_cloud_filter_exit(pf);
14188 i40e_fdir_teardown(pf);
14189
14190 /* Client close must be called explicitly here because the timer
14191 * has been stopped.
14192 */
14193 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14194
14195 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14196 i40e_enable_mc_magic_wake(pf);
14197
14198 i40e_prep_for_reset(pf, false);
14199
14200 wr32(hw, I40E_PFPM_APM,
14201 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14202 wr32(hw, I40E_PFPM_WUFC,
14203 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14204
14205 i40e_clear_interrupt_scheme(pf);
14206
14207 if (system_state == SYSTEM_POWER_OFF) {
14208 pci_wake_from_d3(pdev, pf->wol_en);
14209 pci_set_power_state(pdev, PCI_D3hot);
14210 }
14211 }
14212
14213 /**
14214 * i40e_suspend - PM callback for moving to D3
14215 * @dev: generic device information structure
14216 **/
14217 static int __maybe_unused i40e_suspend(struct device *dev)
14218 {
14219 struct pci_dev *pdev = to_pci_dev(dev);
14220 struct i40e_pf *pf = pci_get_drvdata(pdev);
14221 struct i40e_hw *hw = &pf->hw;
14222
14223 /* If we're already suspended, then there is nothing to do */
14224 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14225 return 0;
14226
14227 set_bit(__I40E_DOWN, pf->state);
14228
14229 /* Ensure service task will not be running */
14230 del_timer_sync(&pf->service_timer);
14231 cancel_work_sync(&pf->service_task);
14232
14233 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14234 i40e_enable_mc_magic_wake(pf);
14235
14236 i40e_prep_for_reset(pf, false);
14237
14238 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14239 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14240
14241 /* Clear the interrupt scheme and release our IRQs so that the system
14242 * can safely hibernate even when there are a large number of CPUs.
14243 * Otherwise hibernation might fail when mapping all the vectors back
14244 * to CPU0.
14245 */
14246 i40e_clear_interrupt_scheme(pf);
14247
14248 return 0;
14249 }
14250
14251 /**
14252 * i40e_resume - PM callback for waking up from D3
14253 * @dev: generic device information structure
14254 **/
14255 static int __maybe_unused i40e_resume(struct device *dev)
14256 {
14257 struct pci_dev *pdev = to_pci_dev(dev);
14258 struct i40e_pf *pf = pci_get_drvdata(pdev);
14259 int err;
14260
14261 /* If we're not suspended, then there is nothing to do */
14262 if (!test_bit(__I40E_SUSPENDED, pf->state))
14263 return 0;
14264
14265 /* We cleared the interrupt scheme when we suspended, so we need to
14266 * restore it now to resume device functionality.
14267 */
14268 err = i40e_restore_interrupt_scheme(pf);
14269 if (err) {
14270 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14271 err);
14272 }
14273
14274 clear_bit(__I40E_DOWN, pf->state);
14275 i40e_reset_and_rebuild(pf, false, false);
14276
14277 /* Clear suspended state last after everything is recovered */
14278 clear_bit(__I40E_SUSPENDED, pf->state);
14279
14280 /* Restart the service task */
14281 mod_timer(&pf->service_timer,
14282 round_jiffies(jiffies + pf->service_timer_period));
14283
14284 return 0;
14285 }
14286
14287 static const struct pci_error_handlers i40e_err_handler = {
14288 .error_detected = i40e_pci_error_detected,
14289 .slot_reset = i40e_pci_error_slot_reset,
14290 .reset_prepare = i40e_pci_error_reset_prepare,
14291 .reset_done = i40e_pci_error_reset_done,
14292 .resume = i40e_pci_error_resume,
14293 };
14294
14295 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14296
14297 static struct pci_driver i40e_driver = {
14298 .name = i40e_driver_name,
14299 .id_table = i40e_pci_tbl,
14300 .probe = i40e_probe,
14301 .remove = i40e_remove,
14302 .driver = {
14303 .pm = &i40e_pm_ops,
14304 },
14305 .shutdown = i40e_shutdown,
14306 .err_handler = &i40e_err_handler,
14307 .sriov_configure = i40e_pci_sriov_configure,
14308 };
14309
14310 /**
14311 * i40e_init_module - Driver registration routine
14312 *
14313 * i40e_init_module is the first routine called when the driver is
14314 * loaded. All it does is register with the PCI subsystem.
14315 **/
14316 static int __init i40e_init_module(void)
14317 {
14318 pr_info("%s: %s - version %s\n", i40e_driver_name,
14319 i40e_driver_string, i40e_driver_version_str);
14320 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
14321
14322 /* There is no need to throttle the number of active tasks because
14323 * each device limits its own task using a state bit for scheduling
14324 * the service task, and the device tasks do not interfere with each
14325 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14326 * since we need to be able to guarantee forward progress even under
14327 * memory pressure.
14328 */
14329 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
14330 if (!i40e_wq) {
14331 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14332 return -ENOMEM;
14333 }
14334
14335 i40e_dbg_init();
14336 return pci_register_driver(&i40e_driver);
14337 }
14338 module_init(i40e_init_module);
14339
14340 /**
14341 * i40e_exit_module - Driver exit cleanup routine
14342 *
14343 * i40e_exit_module is called just before the driver is removed
14344 * from memory.
14345 **/
14346 static void __exit i40e_exit_module(void)
14347 {
14348 pci_unregister_driver(&i40e_driver);
14349 destroy_workqueue(i40e_wq);
14350 i40e_dbg_exit();
14351 }
14352 module_exit(i40e_exit_module);