]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/intel/i40e/i40e_main.c
Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2015 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 2
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 /* required last entry */
80 {0, }
81 };
82 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
83
84 #define I40E_MAX_VF_COUNT 128
85 static int debug = -1;
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
88
89 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
90 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
93
94 /**
95 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
96 * @hw: pointer to the HW structure
97 * @mem: ptr to mem struct to fill out
98 * @size: size of memory requested
99 * @alignment: what to align the allocation to
100 **/
101 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
102 u64 size, u32 alignment)
103 {
104 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
105
106 mem->size = ALIGN(size, alignment);
107 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
108 &mem->pa, GFP_KERNEL);
109 if (!mem->va)
110 return -ENOMEM;
111
112 return 0;
113 }
114
115 /**
116 * i40e_free_dma_mem_d - OS specific memory free for shared code
117 * @hw: pointer to the HW structure
118 * @mem: ptr to mem struct to free
119 **/
120 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
121 {
122 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
123
124 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
125 mem->va = NULL;
126 mem->pa = 0;
127 mem->size = 0;
128
129 return 0;
130 }
131
132 /**
133 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
134 * @hw: pointer to the HW structure
135 * @mem: ptr to mem struct to fill out
136 * @size: size of memory requested
137 **/
138 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
139 u32 size)
140 {
141 mem->size = size;
142 mem->va = kzalloc(size, GFP_KERNEL);
143
144 if (!mem->va)
145 return -ENOMEM;
146
147 return 0;
148 }
149
150 /**
151 * i40e_free_virt_mem_d - OS specific memory free for shared code
152 * @hw: pointer to the HW structure
153 * @mem: ptr to mem struct to free
154 **/
155 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
156 {
157 /* it's ok to kfree a NULL pointer */
158 kfree(mem->va);
159 mem->va = NULL;
160 mem->size = 0;
161
162 return 0;
163 }
164
165 /**
166 * i40e_get_lump - find a lump of free generic resource
167 * @pf: board private structure
168 * @pile: the pile of resource to search
169 * @needed: the number of items needed
170 * @id: an owner id to stick on the items assigned
171 *
172 * Returns the base item index of the lump, or negative for error
173 *
174 * The search_hint trick and lack of advanced fit-finding only work
175 * because we're highly likely to have all the same size lump requests.
176 * Linear search time and any fragmentation should be minimal.
177 **/
178 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
179 u16 needed, u16 id)
180 {
181 int ret = -ENOMEM;
182 int i, j;
183
184 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
185 dev_info(&pf->pdev->dev,
186 "param err: pile=%p needed=%d id=0x%04x\n",
187 pile, needed, id);
188 return -EINVAL;
189 }
190
191 /* start the linear search with an imperfect hint */
192 i = pile->search_hint;
193 while (i < pile->num_entries) {
194 /* skip already allocated entries */
195 if (pile->list[i] & I40E_PILE_VALID_BIT) {
196 i++;
197 continue;
198 }
199
200 /* do we have enough in this lump? */
201 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
202 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
203 break;
204 }
205
206 if (j == needed) {
207 /* there was enough, so assign it to the requestor */
208 for (j = 0; j < needed; j++)
209 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
210 ret = i;
211 pile->search_hint = i + j;
212 break;
213 } else {
214 /* not enough, so skip over it and continue looking */
215 i += j;
216 }
217 }
218
219 return ret;
220 }
221
222 /**
223 * i40e_put_lump - return a lump of generic resource
224 * @pile: the pile of resource to search
225 * @index: the base item index
226 * @id: the owner id of the items assigned
227 *
228 * Returns the count of items in the lump
229 **/
230 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
231 {
232 int valid_id = (id | I40E_PILE_VALID_BIT);
233 int count = 0;
234 int i;
235
236 if (!pile || index >= pile->num_entries)
237 return -EINVAL;
238
239 for (i = index;
240 i < pile->num_entries && pile->list[i] == valid_id;
241 i++) {
242 pile->list[i] = 0;
243 count++;
244 }
245
246 if (count && index < pile->search_hint)
247 pile->search_hint = index;
248
249 return count;
250 }
251
252 /**
253 * i40e_find_vsi_from_id - searches for the vsi with the given id
254 * @pf - the pf structure to search for the vsi
255 * @id - id of the vsi it is searching for
256 **/
257 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
258 {
259 int i;
260
261 for (i = 0; i < pf->num_alloc_vsi; i++)
262 if (pf->vsi[i] && (pf->vsi[i]->id == id))
263 return pf->vsi[i];
264
265 return NULL;
266 }
267
268 /**
269 * i40e_service_event_schedule - Schedule the service task to wake up
270 * @pf: board private structure
271 *
272 * If not already scheduled, this puts the task into the work queue
273 **/
274 static void i40e_service_event_schedule(struct i40e_pf *pf)
275 {
276 if (!test_bit(__I40E_DOWN, &pf->state) &&
277 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
278 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
279 schedule_work(&pf->service_task);
280 }
281
282 /**
283 * i40e_tx_timeout - Respond to a Tx Hang
284 * @netdev: network interface device structure
285 *
286 * If any port has noticed a Tx timeout, it is likely that the whole
287 * device is munged, not just the one netdev port, so go for the full
288 * reset.
289 **/
290 #ifdef I40E_FCOE
291 void i40e_tx_timeout(struct net_device *netdev)
292 #else
293 static void i40e_tx_timeout(struct net_device *netdev)
294 #endif
295 {
296 struct i40e_netdev_priv *np = netdev_priv(netdev);
297 struct i40e_vsi *vsi = np->vsi;
298 struct i40e_pf *pf = vsi->back;
299
300 pf->tx_timeout_count++;
301
302 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
303 pf->tx_timeout_recovery_level = 1;
304 pf->tx_timeout_last_recovery = jiffies;
305 netdev_info(netdev, "tx_timeout recovery level %d\n",
306 pf->tx_timeout_recovery_level);
307
308 switch (pf->tx_timeout_recovery_level) {
309 case 0:
310 /* disable and re-enable queues for the VSI */
311 if (in_interrupt()) {
312 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
313 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
314 } else {
315 i40e_vsi_reinit_locked(vsi);
316 }
317 break;
318 case 1:
319 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
320 break;
321 case 2:
322 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
323 break;
324 case 3:
325 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
326 break;
327 default:
328 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
329 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
330 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
331 break;
332 }
333 i40e_service_event_schedule(pf);
334 pf->tx_timeout_recovery_level++;
335 }
336
337 /**
338 * i40e_release_rx_desc - Store the new tail and head values
339 * @rx_ring: ring to bump
340 * @val: new head index
341 **/
342 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
343 {
344 rx_ring->next_to_use = val;
345
346 /* Force memory writes to complete before letting h/w
347 * know there are new descriptors to fetch. (Only
348 * applicable for weak-ordered memory model archs,
349 * such as IA-64).
350 */
351 wmb();
352 writel(val, rx_ring->tail);
353 }
354
355 /**
356 * i40e_get_vsi_stats_struct - Get System Network Statistics
357 * @vsi: the VSI we care about
358 *
359 * Returns the address of the device statistics structure.
360 * The statistics are actually updated from the service task.
361 **/
362 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
363 {
364 return &vsi->net_stats;
365 }
366
367 /**
368 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
369 * @netdev: network interface device structure
370 *
371 * Returns the address of the device statistics structure.
372 * The statistics are actually updated from the service task.
373 **/
374 #ifdef I40E_FCOE
375 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
376 struct net_device *netdev,
377 struct rtnl_link_stats64 *stats)
378 #else
379 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
380 struct net_device *netdev,
381 struct rtnl_link_stats64 *stats)
382 #endif
383 {
384 struct i40e_netdev_priv *np = netdev_priv(netdev);
385 struct i40e_ring *tx_ring, *rx_ring;
386 struct i40e_vsi *vsi = np->vsi;
387 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
388 int i;
389
390 if (test_bit(__I40E_DOWN, &vsi->state))
391 return stats;
392
393 if (!vsi->tx_rings)
394 return stats;
395
396 rcu_read_lock();
397 for (i = 0; i < vsi->num_queue_pairs; i++) {
398 u64 bytes, packets;
399 unsigned int start;
400
401 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
402 if (!tx_ring)
403 continue;
404
405 do {
406 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
407 packets = tx_ring->stats.packets;
408 bytes = tx_ring->stats.bytes;
409 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
410
411 stats->tx_packets += packets;
412 stats->tx_bytes += bytes;
413 rx_ring = &tx_ring[1];
414
415 do {
416 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
417 packets = rx_ring->stats.packets;
418 bytes = rx_ring->stats.bytes;
419 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
420
421 stats->rx_packets += packets;
422 stats->rx_bytes += bytes;
423 }
424 rcu_read_unlock();
425
426 /* following stats updated by i40e_watchdog_subtask() */
427 stats->multicast = vsi_stats->multicast;
428 stats->tx_errors = vsi_stats->tx_errors;
429 stats->tx_dropped = vsi_stats->tx_dropped;
430 stats->rx_errors = vsi_stats->rx_errors;
431 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
432 stats->rx_length_errors = vsi_stats->rx_length_errors;
433
434 return stats;
435 }
436
437 /**
438 * i40e_vsi_reset_stats - Resets all stats of the given vsi
439 * @vsi: the VSI to have its stats reset
440 **/
441 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
442 {
443 struct rtnl_link_stats64 *ns;
444 int i;
445
446 if (!vsi)
447 return;
448
449 ns = i40e_get_vsi_stats_struct(vsi);
450 memset(ns, 0, sizeof(*ns));
451 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
452 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
453 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
454 if (vsi->rx_rings && vsi->rx_rings[0]) {
455 for (i = 0; i < vsi->num_queue_pairs; i++) {
456 memset(&vsi->rx_rings[i]->stats, 0 ,
457 sizeof(vsi->rx_rings[i]->stats));
458 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
459 sizeof(vsi->rx_rings[i]->rx_stats));
460 memset(&vsi->tx_rings[i]->stats, 0 ,
461 sizeof(vsi->tx_rings[i]->stats));
462 memset(&vsi->tx_rings[i]->tx_stats, 0,
463 sizeof(vsi->tx_rings[i]->tx_stats));
464 }
465 }
466 vsi->stat_offsets_loaded = false;
467 }
468
469 /**
470 * i40e_pf_reset_stats - Reset all of the stats for the given PF
471 * @pf: the PF to be reset
472 **/
473 void i40e_pf_reset_stats(struct i40e_pf *pf)
474 {
475 int i;
476
477 memset(&pf->stats, 0, sizeof(pf->stats));
478 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
479 pf->stat_offsets_loaded = false;
480
481 for (i = 0; i < I40E_MAX_VEB; i++) {
482 if (pf->veb[i]) {
483 memset(&pf->veb[i]->stats, 0,
484 sizeof(pf->veb[i]->stats));
485 memset(&pf->veb[i]->stats_offsets, 0,
486 sizeof(pf->veb[i]->stats_offsets));
487 pf->veb[i]->stat_offsets_loaded = false;
488 }
489 }
490 }
491
492 /**
493 * i40e_stat_update48 - read and update a 48 bit stat from the chip
494 * @hw: ptr to the hardware info
495 * @hireg: the high 32 bit reg to read
496 * @loreg: the low 32 bit reg to read
497 * @offset_loaded: has the initial offset been loaded yet
498 * @offset: ptr to current offset value
499 * @stat: ptr to the stat
500 *
501 * Since the device stats are not reset at PFReset, they likely will not
502 * be zeroed when the driver starts. We'll save the first values read
503 * and use them as offsets to be subtracted from the raw values in order
504 * to report stats that count from zero. In the process, we also manage
505 * the potential roll-over.
506 **/
507 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
508 bool offset_loaded, u64 *offset, u64 *stat)
509 {
510 u64 new_data;
511
512 if (hw->device_id == I40E_DEV_ID_QEMU) {
513 new_data = rd32(hw, loreg);
514 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
515 } else {
516 new_data = rd64(hw, loreg);
517 }
518 if (!offset_loaded)
519 *offset = new_data;
520 if (likely(new_data >= *offset))
521 *stat = new_data - *offset;
522 else
523 *stat = (new_data + ((u64)1 << 48)) - *offset;
524 *stat &= 0xFFFFFFFFFFFFULL;
525 }
526
527 /**
528 * i40e_stat_update32 - read and update a 32 bit stat from the chip
529 * @hw: ptr to the hardware info
530 * @reg: the hw reg to read
531 * @offset_loaded: has the initial offset been loaded yet
532 * @offset: ptr to current offset value
533 * @stat: ptr to the stat
534 **/
535 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
536 bool offset_loaded, u64 *offset, u64 *stat)
537 {
538 u32 new_data;
539
540 new_data = rd32(hw, reg);
541 if (!offset_loaded)
542 *offset = new_data;
543 if (likely(new_data >= *offset))
544 *stat = (u32)(new_data - *offset);
545 else
546 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
547 }
548
549 /**
550 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
551 * @vsi: the VSI to be updated
552 **/
553 void i40e_update_eth_stats(struct i40e_vsi *vsi)
554 {
555 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
556 struct i40e_pf *pf = vsi->back;
557 struct i40e_hw *hw = &pf->hw;
558 struct i40e_eth_stats *oes;
559 struct i40e_eth_stats *es; /* device's eth stats */
560
561 es = &vsi->eth_stats;
562 oes = &vsi->eth_stats_offsets;
563
564 /* Gather up the stats that the hw collects */
565 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
566 vsi->stat_offsets_loaded,
567 &oes->tx_errors, &es->tx_errors);
568 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
569 vsi->stat_offsets_loaded,
570 &oes->rx_discards, &es->rx_discards);
571 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
572 vsi->stat_offsets_loaded,
573 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
574 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
575 vsi->stat_offsets_loaded,
576 &oes->tx_errors, &es->tx_errors);
577
578 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
579 I40E_GLV_GORCL(stat_idx),
580 vsi->stat_offsets_loaded,
581 &oes->rx_bytes, &es->rx_bytes);
582 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
583 I40E_GLV_UPRCL(stat_idx),
584 vsi->stat_offsets_loaded,
585 &oes->rx_unicast, &es->rx_unicast);
586 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
587 I40E_GLV_MPRCL(stat_idx),
588 vsi->stat_offsets_loaded,
589 &oes->rx_multicast, &es->rx_multicast);
590 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
591 I40E_GLV_BPRCL(stat_idx),
592 vsi->stat_offsets_loaded,
593 &oes->rx_broadcast, &es->rx_broadcast);
594
595 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
596 I40E_GLV_GOTCL(stat_idx),
597 vsi->stat_offsets_loaded,
598 &oes->tx_bytes, &es->tx_bytes);
599 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
600 I40E_GLV_UPTCL(stat_idx),
601 vsi->stat_offsets_loaded,
602 &oes->tx_unicast, &es->tx_unicast);
603 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
604 I40E_GLV_MPTCL(stat_idx),
605 vsi->stat_offsets_loaded,
606 &oes->tx_multicast, &es->tx_multicast);
607 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
608 I40E_GLV_BPTCL(stat_idx),
609 vsi->stat_offsets_loaded,
610 &oes->tx_broadcast, &es->tx_broadcast);
611 vsi->stat_offsets_loaded = true;
612 }
613
614 /**
615 * i40e_update_veb_stats - Update Switch component statistics
616 * @veb: the VEB being updated
617 **/
618 static void i40e_update_veb_stats(struct i40e_veb *veb)
619 {
620 struct i40e_pf *pf = veb->pf;
621 struct i40e_hw *hw = &pf->hw;
622 struct i40e_eth_stats *oes;
623 struct i40e_eth_stats *es; /* device's eth stats */
624 int idx = 0;
625
626 idx = veb->stats_idx;
627 es = &veb->stats;
628 oes = &veb->stats_offsets;
629
630 /* Gather up the stats that the hw collects */
631 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
632 veb->stat_offsets_loaded,
633 &oes->tx_discards, &es->tx_discards);
634 if (hw->revision_id > 0)
635 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
636 veb->stat_offsets_loaded,
637 &oes->rx_unknown_protocol,
638 &es->rx_unknown_protocol);
639 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
640 veb->stat_offsets_loaded,
641 &oes->rx_bytes, &es->rx_bytes);
642 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
643 veb->stat_offsets_loaded,
644 &oes->rx_unicast, &es->rx_unicast);
645 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
646 veb->stat_offsets_loaded,
647 &oes->rx_multicast, &es->rx_multicast);
648 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
649 veb->stat_offsets_loaded,
650 &oes->rx_broadcast, &es->rx_broadcast);
651
652 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
653 veb->stat_offsets_loaded,
654 &oes->tx_bytes, &es->tx_bytes);
655 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
656 veb->stat_offsets_loaded,
657 &oes->tx_unicast, &es->tx_unicast);
658 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
659 veb->stat_offsets_loaded,
660 &oes->tx_multicast, &es->tx_multicast);
661 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
662 veb->stat_offsets_loaded,
663 &oes->tx_broadcast, &es->tx_broadcast);
664 veb->stat_offsets_loaded = true;
665 }
666
667 #ifdef I40E_FCOE
668 /**
669 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
670 * @vsi: the VSI that is capable of doing FCoE
671 **/
672 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
673 {
674 struct i40e_pf *pf = vsi->back;
675 struct i40e_hw *hw = &pf->hw;
676 struct i40e_fcoe_stats *ofs;
677 struct i40e_fcoe_stats *fs; /* device's eth stats */
678 int idx;
679
680 if (vsi->type != I40E_VSI_FCOE)
681 return;
682
683 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
684 fs = &vsi->fcoe_stats;
685 ofs = &vsi->fcoe_stats_offsets;
686
687 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
688 vsi->fcoe_stat_offsets_loaded,
689 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
690 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
691 vsi->fcoe_stat_offsets_loaded,
692 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
693 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
694 vsi->fcoe_stat_offsets_loaded,
695 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
696 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
697 vsi->fcoe_stat_offsets_loaded,
698 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
699 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
700 vsi->fcoe_stat_offsets_loaded,
701 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
702 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
703 vsi->fcoe_stat_offsets_loaded,
704 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
705 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
706 vsi->fcoe_stat_offsets_loaded,
707 &ofs->fcoe_last_error, &fs->fcoe_last_error);
708 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
709 vsi->fcoe_stat_offsets_loaded,
710 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
711
712 vsi->fcoe_stat_offsets_loaded = true;
713 }
714
715 #endif
716 /**
717 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
718 * @pf: the corresponding PF
719 *
720 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
721 **/
722 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
723 {
724 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
725 struct i40e_hw_port_stats *nsd = &pf->stats;
726 struct i40e_hw *hw = &pf->hw;
727 u64 xoff = 0;
728 u16 i, v;
729
730 if ((hw->fc.current_mode != I40E_FC_FULL) &&
731 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
732 return;
733
734 xoff = nsd->link_xoff_rx;
735 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
736 pf->stat_offsets_loaded,
737 &osd->link_xoff_rx, &nsd->link_xoff_rx);
738
739 /* No new LFC xoff rx */
740 if (!(nsd->link_xoff_rx - xoff))
741 return;
742
743 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
744 for (v = 0; v < pf->num_alloc_vsi; v++) {
745 struct i40e_vsi *vsi = pf->vsi[v];
746
747 if (!vsi || !vsi->tx_rings[0])
748 continue;
749
750 for (i = 0; i < vsi->num_queue_pairs; i++) {
751 struct i40e_ring *ring = vsi->tx_rings[i];
752 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
753 }
754 }
755 }
756
757 /**
758 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
759 * @pf: the corresponding PF
760 *
761 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
762 **/
763 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
764 {
765 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
766 struct i40e_hw_port_stats *nsd = &pf->stats;
767 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
768 struct i40e_dcbx_config *dcb_cfg;
769 struct i40e_hw *hw = &pf->hw;
770 u16 i, v;
771 u8 tc;
772
773 dcb_cfg = &hw->local_dcbx_config;
774
775 /* See if DCB enabled with PFC TC */
776 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
777 !(dcb_cfg->pfc.pfcenable)) {
778 i40e_update_link_xoff_rx(pf);
779 return;
780 }
781
782 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
783 u64 prio_xoff = nsd->priority_xoff_rx[i];
784 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
785 pf->stat_offsets_loaded,
786 &osd->priority_xoff_rx[i],
787 &nsd->priority_xoff_rx[i]);
788
789 /* No new PFC xoff rx */
790 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
791 continue;
792 /* Get the TC for given priority */
793 tc = dcb_cfg->etscfg.prioritytable[i];
794 xoff[tc] = true;
795 }
796
797 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
798 for (v = 0; v < pf->num_alloc_vsi; v++) {
799 struct i40e_vsi *vsi = pf->vsi[v];
800
801 if (!vsi || !vsi->tx_rings[0])
802 continue;
803
804 for (i = 0; i < vsi->num_queue_pairs; i++) {
805 struct i40e_ring *ring = vsi->tx_rings[i];
806
807 tc = ring->dcb_tc;
808 if (xoff[tc])
809 clear_bit(__I40E_HANG_CHECK_ARMED,
810 &ring->state);
811 }
812 }
813 }
814
815 /**
816 * i40e_update_vsi_stats - Update the vsi statistics counters.
817 * @vsi: the VSI to be updated
818 *
819 * There are a few instances where we store the same stat in a
820 * couple of different structs. This is partly because we have
821 * the netdev stats that need to be filled out, which is slightly
822 * different from the "eth_stats" defined by the chip and used in
823 * VF communications. We sort it out here.
824 **/
825 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
826 {
827 struct i40e_pf *pf = vsi->back;
828 struct rtnl_link_stats64 *ons;
829 struct rtnl_link_stats64 *ns; /* netdev stats */
830 struct i40e_eth_stats *oes;
831 struct i40e_eth_stats *es; /* device's eth stats */
832 u32 tx_restart, tx_busy;
833 struct i40e_ring *p;
834 u32 rx_page, rx_buf;
835 u64 bytes, packets;
836 unsigned int start;
837 u64 rx_p, rx_b;
838 u64 tx_p, tx_b;
839 u16 q;
840
841 if (test_bit(__I40E_DOWN, &vsi->state) ||
842 test_bit(__I40E_CONFIG_BUSY, &pf->state))
843 return;
844
845 ns = i40e_get_vsi_stats_struct(vsi);
846 ons = &vsi->net_stats_offsets;
847 es = &vsi->eth_stats;
848 oes = &vsi->eth_stats_offsets;
849
850 /* Gather up the netdev and vsi stats that the driver collects
851 * on the fly during packet processing
852 */
853 rx_b = rx_p = 0;
854 tx_b = tx_p = 0;
855 tx_restart = tx_busy = 0;
856 rx_page = 0;
857 rx_buf = 0;
858 rcu_read_lock();
859 for (q = 0; q < vsi->num_queue_pairs; q++) {
860 /* locate Tx ring */
861 p = ACCESS_ONCE(vsi->tx_rings[q]);
862
863 do {
864 start = u64_stats_fetch_begin_irq(&p->syncp);
865 packets = p->stats.packets;
866 bytes = p->stats.bytes;
867 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
868 tx_b += bytes;
869 tx_p += packets;
870 tx_restart += p->tx_stats.restart_queue;
871 tx_busy += p->tx_stats.tx_busy;
872
873 /* Rx queue is part of the same block as Tx queue */
874 p = &p[1];
875 do {
876 start = u64_stats_fetch_begin_irq(&p->syncp);
877 packets = p->stats.packets;
878 bytes = p->stats.bytes;
879 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
880 rx_b += bytes;
881 rx_p += packets;
882 rx_buf += p->rx_stats.alloc_buff_failed;
883 rx_page += p->rx_stats.alloc_page_failed;
884 }
885 rcu_read_unlock();
886 vsi->tx_restart = tx_restart;
887 vsi->tx_busy = tx_busy;
888 vsi->rx_page_failed = rx_page;
889 vsi->rx_buf_failed = rx_buf;
890
891 ns->rx_packets = rx_p;
892 ns->rx_bytes = rx_b;
893 ns->tx_packets = tx_p;
894 ns->tx_bytes = tx_b;
895
896 /* update netdev stats from eth stats */
897 i40e_update_eth_stats(vsi);
898 ons->tx_errors = oes->tx_errors;
899 ns->tx_errors = es->tx_errors;
900 ons->multicast = oes->rx_multicast;
901 ns->multicast = es->rx_multicast;
902 ons->rx_dropped = oes->rx_discards;
903 ns->rx_dropped = es->rx_discards;
904 ons->tx_dropped = oes->tx_discards;
905 ns->tx_dropped = es->tx_discards;
906
907 /* pull in a couple PF stats if this is the main vsi */
908 if (vsi == pf->vsi[pf->lan_vsi]) {
909 ns->rx_crc_errors = pf->stats.crc_errors;
910 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
911 ns->rx_length_errors = pf->stats.rx_length_errors;
912 }
913 }
914
915 /**
916 * i40e_update_pf_stats - Update the PF statistics counters.
917 * @pf: the PF to be updated
918 **/
919 static void i40e_update_pf_stats(struct i40e_pf *pf)
920 {
921 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
922 struct i40e_hw_port_stats *nsd = &pf->stats;
923 struct i40e_hw *hw = &pf->hw;
924 u32 val;
925 int i;
926
927 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
928 I40E_GLPRT_GORCL(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
931 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
932 I40E_GLPRT_GOTCL(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
935 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
936 pf->stat_offsets_loaded,
937 &osd->eth.rx_discards,
938 &nsd->eth.rx_discards);
939 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
940 I40E_GLPRT_UPRCL(hw->port),
941 pf->stat_offsets_loaded,
942 &osd->eth.rx_unicast,
943 &nsd->eth.rx_unicast);
944 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
945 I40E_GLPRT_MPRCL(hw->port),
946 pf->stat_offsets_loaded,
947 &osd->eth.rx_multicast,
948 &nsd->eth.rx_multicast);
949 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
950 I40E_GLPRT_BPRCL(hw->port),
951 pf->stat_offsets_loaded,
952 &osd->eth.rx_broadcast,
953 &nsd->eth.rx_broadcast);
954 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
955 I40E_GLPRT_UPTCL(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->eth.tx_unicast,
958 &nsd->eth.tx_unicast);
959 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
960 I40E_GLPRT_MPTCL(hw->port),
961 pf->stat_offsets_loaded,
962 &osd->eth.tx_multicast,
963 &nsd->eth.tx_multicast);
964 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
965 I40E_GLPRT_BPTCL(hw->port),
966 pf->stat_offsets_loaded,
967 &osd->eth.tx_broadcast,
968 &nsd->eth.tx_broadcast);
969
970 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
971 pf->stat_offsets_loaded,
972 &osd->tx_dropped_link_down,
973 &nsd->tx_dropped_link_down);
974
975 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->crc_errors, &nsd->crc_errors);
978
979 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
980 pf->stat_offsets_loaded,
981 &osd->illegal_bytes, &nsd->illegal_bytes);
982
983 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
984 pf->stat_offsets_loaded,
985 &osd->mac_local_faults,
986 &nsd->mac_local_faults);
987 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
988 pf->stat_offsets_loaded,
989 &osd->mac_remote_faults,
990 &nsd->mac_remote_faults);
991
992 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->rx_length_errors,
995 &nsd->rx_length_errors);
996
997 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
998 pf->stat_offsets_loaded,
999 &osd->link_xon_rx, &nsd->link_xon_rx);
1000 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1001 pf->stat_offsets_loaded,
1002 &osd->link_xon_tx, &nsd->link_xon_tx);
1003 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
1004 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->link_xoff_tx, &nsd->link_xoff_tx);
1007
1008 for (i = 0; i < 8; i++) {
1009 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1010 pf->stat_offsets_loaded,
1011 &osd->priority_xon_rx[i],
1012 &nsd->priority_xon_rx[i]);
1013 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1014 pf->stat_offsets_loaded,
1015 &osd->priority_xon_tx[i],
1016 &nsd->priority_xon_tx[i]);
1017 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1018 pf->stat_offsets_loaded,
1019 &osd->priority_xoff_tx[i],
1020 &nsd->priority_xoff_tx[i]);
1021 i40e_stat_update32(hw,
1022 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1023 pf->stat_offsets_loaded,
1024 &osd->priority_xon_2_xoff[i],
1025 &nsd->priority_xon_2_xoff[i]);
1026 }
1027
1028 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1029 I40E_GLPRT_PRC64L(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_size_64, &nsd->rx_size_64);
1032 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1033 I40E_GLPRT_PRC127L(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->rx_size_127, &nsd->rx_size_127);
1036 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1037 I40E_GLPRT_PRC255L(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->rx_size_255, &nsd->rx_size_255);
1040 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1041 I40E_GLPRT_PRC511L(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_size_511, &nsd->rx_size_511);
1044 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1045 I40E_GLPRT_PRC1023L(hw->port),
1046 pf->stat_offsets_loaded,
1047 &osd->rx_size_1023, &nsd->rx_size_1023);
1048 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1049 I40E_GLPRT_PRC1522L(hw->port),
1050 pf->stat_offsets_loaded,
1051 &osd->rx_size_1522, &nsd->rx_size_1522);
1052 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1053 I40E_GLPRT_PRC9522L(hw->port),
1054 pf->stat_offsets_loaded,
1055 &osd->rx_size_big, &nsd->rx_size_big);
1056
1057 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1058 I40E_GLPRT_PTC64L(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->tx_size_64, &nsd->tx_size_64);
1061 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1062 I40E_GLPRT_PTC127L(hw->port),
1063 pf->stat_offsets_loaded,
1064 &osd->tx_size_127, &nsd->tx_size_127);
1065 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1066 I40E_GLPRT_PTC255L(hw->port),
1067 pf->stat_offsets_loaded,
1068 &osd->tx_size_255, &nsd->tx_size_255);
1069 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1070 I40E_GLPRT_PTC511L(hw->port),
1071 pf->stat_offsets_loaded,
1072 &osd->tx_size_511, &nsd->tx_size_511);
1073 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1074 I40E_GLPRT_PTC1023L(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->tx_size_1023, &nsd->tx_size_1023);
1077 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1078 I40E_GLPRT_PTC1522L(hw->port),
1079 pf->stat_offsets_loaded,
1080 &osd->tx_size_1522, &nsd->tx_size_1522);
1081 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1082 I40E_GLPRT_PTC9522L(hw->port),
1083 pf->stat_offsets_loaded,
1084 &osd->tx_size_big, &nsd->tx_size_big);
1085
1086 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1087 pf->stat_offsets_loaded,
1088 &osd->rx_undersize, &nsd->rx_undersize);
1089 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1090 pf->stat_offsets_loaded,
1091 &osd->rx_fragments, &nsd->rx_fragments);
1092 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1093 pf->stat_offsets_loaded,
1094 &osd->rx_oversize, &nsd->rx_oversize);
1095 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1096 pf->stat_offsets_loaded,
1097 &osd->rx_jabber, &nsd->rx_jabber);
1098
1099 /* FDIR stats */
1100 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1101 pf->stat_offsets_loaded,
1102 &osd->fd_atr_match, &nsd->fd_atr_match);
1103 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1104 pf->stat_offsets_loaded,
1105 &osd->fd_sb_match, &nsd->fd_sb_match);
1106
1107 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1108 nsd->tx_lpi_status =
1109 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1110 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1111 nsd->rx_lpi_status =
1112 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1113 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1114 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1115 pf->stat_offsets_loaded,
1116 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1117 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1118 pf->stat_offsets_loaded,
1119 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1120
1121 pf->stat_offsets_loaded = true;
1122 }
1123
1124 /**
1125 * i40e_update_stats - Update the various statistics counters.
1126 * @vsi: the VSI to be updated
1127 *
1128 * Update the various stats for this VSI and its related entities.
1129 **/
1130 void i40e_update_stats(struct i40e_vsi *vsi)
1131 {
1132 struct i40e_pf *pf = vsi->back;
1133
1134 if (vsi == pf->vsi[pf->lan_vsi])
1135 i40e_update_pf_stats(pf);
1136
1137 i40e_update_vsi_stats(vsi);
1138 #ifdef I40E_FCOE
1139 i40e_update_fcoe_stats(vsi);
1140 #endif
1141 }
1142
1143 /**
1144 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1145 * @vsi: the VSI to be searched
1146 * @macaddr: the MAC address
1147 * @vlan: the vlan
1148 * @is_vf: make sure its a VF filter, else doesn't matter
1149 * @is_netdev: make sure its a netdev filter, else doesn't matter
1150 *
1151 * Returns ptr to the filter object or NULL
1152 **/
1153 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1154 u8 *macaddr, s16 vlan,
1155 bool is_vf, bool is_netdev)
1156 {
1157 struct i40e_mac_filter *f;
1158
1159 if (!vsi || !macaddr)
1160 return NULL;
1161
1162 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1163 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1164 (vlan == f->vlan) &&
1165 (!is_vf || f->is_vf) &&
1166 (!is_netdev || f->is_netdev))
1167 return f;
1168 }
1169 return NULL;
1170 }
1171
1172 /**
1173 * i40e_find_mac - Find a mac addr in the macvlan filters list
1174 * @vsi: the VSI to be searched
1175 * @macaddr: the MAC address we are searching for
1176 * @is_vf: make sure its a VF filter, else doesn't matter
1177 * @is_netdev: make sure its a netdev filter, else doesn't matter
1178 *
1179 * Returns the first filter with the provided MAC address or NULL if
1180 * MAC address was not found
1181 **/
1182 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1183 bool is_vf, bool is_netdev)
1184 {
1185 struct i40e_mac_filter *f;
1186
1187 if (!vsi || !macaddr)
1188 return NULL;
1189
1190 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1191 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1192 (!is_vf || f->is_vf) &&
1193 (!is_netdev || f->is_netdev))
1194 return f;
1195 }
1196 return NULL;
1197 }
1198
1199 /**
1200 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1201 * @vsi: the VSI to be searched
1202 *
1203 * Returns true if VSI is in vlan mode or false otherwise
1204 **/
1205 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1206 {
1207 struct i40e_mac_filter *f;
1208
1209 /* Only -1 for all the filters denotes not in vlan mode
1210 * so we have to go through all the list in order to make sure
1211 */
1212 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1213 if (f->vlan >= 0)
1214 return true;
1215 }
1216
1217 return false;
1218 }
1219
1220 /**
1221 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1222 * @vsi: the VSI to be searched
1223 * @macaddr: the mac address to be filtered
1224 * @is_vf: true if it is a VF
1225 * @is_netdev: true if it is a netdev
1226 *
1227 * Goes through all the macvlan filters and adds a
1228 * macvlan filter for each unique vlan that already exists
1229 *
1230 * Returns first filter found on success, else NULL
1231 **/
1232 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1233 bool is_vf, bool is_netdev)
1234 {
1235 struct i40e_mac_filter *f;
1236
1237 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1238 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1239 is_vf, is_netdev)) {
1240 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1241 is_vf, is_netdev))
1242 return NULL;
1243 }
1244 }
1245
1246 return list_first_entry_or_null(&vsi->mac_filter_list,
1247 struct i40e_mac_filter, list);
1248 }
1249
1250 /**
1251 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1252 * @vsi: the PF Main VSI - inappropriate for any other VSI
1253 * @macaddr: the MAC address
1254 *
1255 * Some older firmware configurations set up a default promiscuous VLAN
1256 * filter that needs to be removed.
1257 **/
1258 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1259 {
1260 struct i40e_aqc_remove_macvlan_element_data element;
1261 struct i40e_pf *pf = vsi->back;
1262 i40e_status aq_ret;
1263
1264 /* Only appropriate for the PF main VSI */
1265 if (vsi->type != I40E_VSI_MAIN)
1266 return -EINVAL;
1267
1268 memset(&element, 0, sizeof(element));
1269 ether_addr_copy(element.mac_addr, macaddr);
1270 element.vlan_tag = 0;
1271 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1272 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1273 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1274 if (aq_ret)
1275 return -ENOENT;
1276
1277 return 0;
1278 }
1279
1280 /**
1281 * i40e_add_filter - Add a mac/vlan filter to the VSI
1282 * @vsi: the VSI to be searched
1283 * @macaddr: the MAC address
1284 * @vlan: the vlan
1285 * @is_vf: make sure its a VF filter, else doesn't matter
1286 * @is_netdev: make sure its a netdev filter, else doesn't matter
1287 *
1288 * Returns ptr to the filter object or NULL when no memory available.
1289 **/
1290 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1291 u8 *macaddr, s16 vlan,
1292 bool is_vf, bool is_netdev)
1293 {
1294 struct i40e_mac_filter *f;
1295
1296 if (!vsi || !macaddr)
1297 return NULL;
1298
1299 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1300 if (!f) {
1301 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1302 if (!f)
1303 goto add_filter_out;
1304
1305 ether_addr_copy(f->macaddr, macaddr);
1306 f->vlan = vlan;
1307 f->changed = true;
1308
1309 INIT_LIST_HEAD(&f->list);
1310 list_add(&f->list, &vsi->mac_filter_list);
1311 }
1312
1313 /* increment counter and add a new flag if needed */
1314 if (is_vf) {
1315 if (!f->is_vf) {
1316 f->is_vf = true;
1317 f->counter++;
1318 }
1319 } else if (is_netdev) {
1320 if (!f->is_netdev) {
1321 f->is_netdev = true;
1322 f->counter++;
1323 }
1324 } else {
1325 f->counter++;
1326 }
1327
1328 /* changed tells sync_filters_subtask to
1329 * push the filter down to the firmware
1330 */
1331 if (f->changed) {
1332 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1333 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1334 }
1335
1336 add_filter_out:
1337 return f;
1338 }
1339
1340 /**
1341 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1342 * @vsi: the VSI to be searched
1343 * @macaddr: the MAC address
1344 * @vlan: the vlan
1345 * @is_vf: make sure it's a VF filter, else doesn't matter
1346 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1347 **/
1348 void i40e_del_filter(struct i40e_vsi *vsi,
1349 u8 *macaddr, s16 vlan,
1350 bool is_vf, bool is_netdev)
1351 {
1352 struct i40e_mac_filter *f;
1353
1354 if (!vsi || !macaddr)
1355 return;
1356
1357 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1358 if (!f || f->counter == 0)
1359 return;
1360
1361 if (is_vf) {
1362 if (f->is_vf) {
1363 f->is_vf = false;
1364 f->counter--;
1365 }
1366 } else if (is_netdev) {
1367 if (f->is_netdev) {
1368 f->is_netdev = false;
1369 f->counter--;
1370 }
1371 } else {
1372 /* make sure we don't remove a filter in use by VF or netdev */
1373 int min_f = 0;
1374 min_f += (f->is_vf ? 1 : 0);
1375 min_f += (f->is_netdev ? 1 : 0);
1376
1377 if (f->counter > min_f)
1378 f->counter--;
1379 }
1380
1381 /* counter == 0 tells sync_filters_subtask to
1382 * remove the filter from the firmware's list
1383 */
1384 if (f->counter == 0) {
1385 f->changed = true;
1386 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1387 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1388 }
1389 }
1390
1391 /**
1392 * i40e_set_mac - NDO callback to set mac address
1393 * @netdev: network interface device structure
1394 * @p: pointer to an address structure
1395 *
1396 * Returns 0 on success, negative on failure
1397 **/
1398 #ifdef I40E_FCOE
1399 int i40e_set_mac(struct net_device *netdev, void *p)
1400 #else
1401 static int i40e_set_mac(struct net_device *netdev, void *p)
1402 #endif
1403 {
1404 struct i40e_netdev_priv *np = netdev_priv(netdev);
1405 struct i40e_vsi *vsi = np->vsi;
1406 struct i40e_pf *pf = vsi->back;
1407 struct i40e_hw *hw = &pf->hw;
1408 struct sockaddr *addr = p;
1409 struct i40e_mac_filter *f;
1410
1411 if (!is_valid_ether_addr(addr->sa_data))
1412 return -EADDRNOTAVAIL;
1413
1414 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1415 netdev_info(netdev, "already using mac address %pM\n",
1416 addr->sa_data);
1417 return 0;
1418 }
1419
1420 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1421 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1422 return -EADDRNOTAVAIL;
1423
1424 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1425 netdev_info(netdev, "returning to hw mac address %pM\n",
1426 hw->mac.addr);
1427 else
1428 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1429
1430 if (vsi->type == I40E_VSI_MAIN) {
1431 i40e_status ret;
1432 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1433 I40E_AQC_WRITE_TYPE_LAA_WOL,
1434 addr->sa_data, NULL);
1435 if (ret) {
1436 netdev_info(netdev,
1437 "Addr change for Main VSI failed: %d\n",
1438 ret);
1439 return -EADDRNOTAVAIL;
1440 }
1441 }
1442
1443 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1444 struct i40e_aqc_remove_macvlan_element_data element;
1445
1446 memset(&element, 0, sizeof(element));
1447 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1448 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1449 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450 } else {
1451 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1452 false, false);
1453 }
1454
1455 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1456 struct i40e_aqc_add_macvlan_element_data element;
1457
1458 memset(&element, 0, sizeof(element));
1459 ether_addr_copy(element.mac_addr, hw->mac.addr);
1460 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1461 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1462 } else {
1463 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1464 false, false);
1465 if (f)
1466 f->is_laa = true;
1467 }
1468
1469 i40e_sync_vsi_filters(vsi);
1470 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1471
1472 return 0;
1473 }
1474
1475 /**
1476 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1477 * @vsi: the VSI being setup
1478 * @ctxt: VSI context structure
1479 * @enabled_tc: Enabled TCs bitmap
1480 * @is_add: True if called before Add VSI
1481 *
1482 * Setup VSI queue mapping for enabled traffic classes.
1483 **/
1484 #ifdef I40E_FCOE
1485 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1486 struct i40e_vsi_context *ctxt,
1487 u8 enabled_tc,
1488 bool is_add)
1489 #else
1490 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1491 struct i40e_vsi_context *ctxt,
1492 u8 enabled_tc,
1493 bool is_add)
1494 #endif
1495 {
1496 struct i40e_pf *pf = vsi->back;
1497 u16 sections = 0;
1498 u8 netdev_tc = 0;
1499 u16 numtc = 0;
1500 u16 qcount;
1501 u8 offset;
1502 u16 qmap;
1503 int i;
1504 u16 num_tc_qps = 0;
1505
1506 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1507 offset = 0;
1508
1509 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1510 /* Find numtc from enabled TC bitmap */
1511 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1512 if (enabled_tc & (1 << i)) /* TC is enabled */
1513 numtc++;
1514 }
1515 if (!numtc) {
1516 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1517 numtc = 1;
1518 }
1519 } else {
1520 /* At least TC0 is enabled in case of non-DCB case */
1521 numtc = 1;
1522 }
1523
1524 vsi->tc_config.numtc = numtc;
1525 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1526 /* Number of queues per enabled TC */
1527 /* In MFP case we can have a much lower count of MSIx
1528 * vectors available and so we need to lower the used
1529 * q count.
1530 */
1531 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1532 num_tc_qps = qcount / numtc;
1533 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1534
1535 /* Setup queue offset/count for all TCs for given VSI */
1536 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1537 /* See if the given TC is enabled for the given VSI */
1538 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1539 int pow, num_qps;
1540
1541 switch (vsi->type) {
1542 case I40E_VSI_MAIN:
1543 qcount = min_t(int, pf->rss_size, num_tc_qps);
1544 break;
1545 #ifdef I40E_FCOE
1546 case I40E_VSI_FCOE:
1547 qcount = num_tc_qps;
1548 break;
1549 #endif
1550 case I40E_VSI_FDIR:
1551 case I40E_VSI_SRIOV:
1552 case I40E_VSI_VMDQ2:
1553 default:
1554 qcount = num_tc_qps;
1555 WARN_ON(i != 0);
1556 break;
1557 }
1558 vsi->tc_config.tc_info[i].qoffset = offset;
1559 vsi->tc_config.tc_info[i].qcount = qcount;
1560
1561 /* find the next higher power-of-2 of num queue pairs */
1562 num_qps = qcount;
1563 pow = 0;
1564 while (num_qps && ((1 << pow) < qcount)) {
1565 pow++;
1566 num_qps >>= 1;
1567 }
1568
1569 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1570 qmap =
1571 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1572 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1573
1574 offset += qcount;
1575 } else {
1576 /* TC is not enabled so set the offset to
1577 * default queue and allocate one queue
1578 * for the given TC.
1579 */
1580 vsi->tc_config.tc_info[i].qoffset = 0;
1581 vsi->tc_config.tc_info[i].qcount = 1;
1582 vsi->tc_config.tc_info[i].netdev_tc = 0;
1583
1584 qmap = 0;
1585 }
1586 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1587 }
1588
1589 /* Set actual Tx/Rx queue pairs */
1590 vsi->num_queue_pairs = offset;
1591 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1592 if (vsi->req_queue_pairs > 0)
1593 vsi->num_queue_pairs = vsi->req_queue_pairs;
1594 else
1595 vsi->num_queue_pairs = pf->num_lan_msix;
1596 }
1597
1598 /* Scheduler section valid can only be set for ADD VSI */
1599 if (is_add) {
1600 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1601
1602 ctxt->info.up_enable_bits = enabled_tc;
1603 }
1604 if (vsi->type == I40E_VSI_SRIOV) {
1605 ctxt->info.mapping_flags |=
1606 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1607 for (i = 0; i < vsi->num_queue_pairs; i++)
1608 ctxt->info.queue_mapping[i] =
1609 cpu_to_le16(vsi->base_queue + i);
1610 } else {
1611 ctxt->info.mapping_flags |=
1612 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1613 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1614 }
1615 ctxt->info.valid_sections |= cpu_to_le16(sections);
1616 }
1617
1618 /**
1619 * i40e_set_rx_mode - NDO callback to set the netdev filters
1620 * @netdev: network interface device structure
1621 **/
1622 #ifdef I40E_FCOE
1623 void i40e_set_rx_mode(struct net_device *netdev)
1624 #else
1625 static void i40e_set_rx_mode(struct net_device *netdev)
1626 #endif
1627 {
1628 struct i40e_netdev_priv *np = netdev_priv(netdev);
1629 struct i40e_mac_filter *f, *ftmp;
1630 struct i40e_vsi *vsi = np->vsi;
1631 struct netdev_hw_addr *uca;
1632 struct netdev_hw_addr *mca;
1633 struct netdev_hw_addr *ha;
1634
1635 /* add addr if not already in the filter list */
1636 netdev_for_each_uc_addr(uca, netdev) {
1637 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1638 if (i40e_is_vsi_in_vlan(vsi))
1639 i40e_put_mac_in_vlan(vsi, uca->addr,
1640 false, true);
1641 else
1642 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1643 false, true);
1644 }
1645 }
1646
1647 netdev_for_each_mc_addr(mca, netdev) {
1648 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1649 if (i40e_is_vsi_in_vlan(vsi))
1650 i40e_put_mac_in_vlan(vsi, mca->addr,
1651 false, true);
1652 else
1653 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1654 false, true);
1655 }
1656 }
1657
1658 /* remove filter if not in netdev list */
1659 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1660 bool found = false;
1661
1662 if (!f->is_netdev)
1663 continue;
1664
1665 if (is_multicast_ether_addr(f->macaddr)) {
1666 netdev_for_each_mc_addr(mca, netdev) {
1667 if (ether_addr_equal(mca->addr, f->macaddr)) {
1668 found = true;
1669 break;
1670 }
1671 }
1672 } else {
1673 netdev_for_each_uc_addr(uca, netdev) {
1674 if (ether_addr_equal(uca->addr, f->macaddr)) {
1675 found = true;
1676 break;
1677 }
1678 }
1679
1680 for_each_dev_addr(netdev, ha) {
1681 if (ether_addr_equal(ha->addr, f->macaddr)) {
1682 found = true;
1683 break;
1684 }
1685 }
1686 }
1687 if (!found)
1688 i40e_del_filter(
1689 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1690 }
1691
1692 /* check for other flag changes */
1693 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1694 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1695 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1696 }
1697 }
1698
1699 /**
1700 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1701 * @vsi: ptr to the VSI
1702 *
1703 * Push any outstanding VSI filter changes through the AdminQ.
1704 *
1705 * Returns 0 or error value
1706 **/
1707 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1708 {
1709 struct i40e_mac_filter *f, *ftmp;
1710 bool promisc_forced_on = false;
1711 bool add_happened = false;
1712 int filter_list_len = 0;
1713 u32 changed_flags = 0;
1714 i40e_status aq_ret = 0;
1715 struct i40e_pf *pf;
1716 int num_add = 0;
1717 int num_del = 0;
1718 u16 cmd_flags;
1719
1720 /* empty array typed pointers, kcalloc later */
1721 struct i40e_aqc_add_macvlan_element_data *add_list;
1722 struct i40e_aqc_remove_macvlan_element_data *del_list;
1723
1724 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1725 usleep_range(1000, 2000);
1726 pf = vsi->back;
1727
1728 if (vsi->netdev) {
1729 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1730 vsi->current_netdev_flags = vsi->netdev->flags;
1731 }
1732
1733 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1734 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1735
1736 filter_list_len = pf->hw.aq.asq_buf_size /
1737 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1738 del_list = kcalloc(filter_list_len,
1739 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1740 GFP_KERNEL);
1741 if (!del_list)
1742 return -ENOMEM;
1743
1744 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1745 if (!f->changed)
1746 continue;
1747
1748 if (f->counter != 0)
1749 continue;
1750 f->changed = false;
1751 cmd_flags = 0;
1752
1753 /* add to delete list */
1754 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1755 del_list[num_del].vlan_tag =
1756 cpu_to_le16((u16)(f->vlan ==
1757 I40E_VLAN_ANY ? 0 : f->vlan));
1758
1759 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1760 del_list[num_del].flags = cmd_flags;
1761 num_del++;
1762
1763 /* unlink from filter list */
1764 list_del(&f->list);
1765 kfree(f);
1766
1767 /* flush a full buffer */
1768 if (num_del == filter_list_len) {
1769 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1770 vsi->seid, del_list, num_del,
1771 NULL);
1772 num_del = 0;
1773 memset(del_list, 0, sizeof(*del_list));
1774
1775 if (aq_ret &&
1776 pf->hw.aq.asq_last_status !=
1777 I40E_AQ_RC_ENOENT)
1778 dev_info(&pf->pdev->dev,
1779 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1780 aq_ret,
1781 pf->hw.aq.asq_last_status);
1782 }
1783 }
1784 if (num_del) {
1785 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1786 del_list, num_del, NULL);
1787 num_del = 0;
1788
1789 if (aq_ret &&
1790 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1791 dev_info(&pf->pdev->dev,
1792 "ignoring delete macvlan error, err %d, aq_err %d\n",
1793 aq_ret, pf->hw.aq.asq_last_status);
1794 }
1795
1796 kfree(del_list);
1797 del_list = NULL;
1798
1799 /* do all the adds now */
1800 filter_list_len = pf->hw.aq.asq_buf_size /
1801 sizeof(struct i40e_aqc_add_macvlan_element_data),
1802 add_list = kcalloc(filter_list_len,
1803 sizeof(struct i40e_aqc_add_macvlan_element_data),
1804 GFP_KERNEL);
1805 if (!add_list)
1806 return -ENOMEM;
1807
1808 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1809 if (!f->changed)
1810 continue;
1811
1812 if (f->counter == 0)
1813 continue;
1814 f->changed = false;
1815 add_happened = true;
1816 cmd_flags = 0;
1817
1818 /* add to add array */
1819 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1820 add_list[num_add].vlan_tag =
1821 cpu_to_le16(
1822 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1823 add_list[num_add].queue_number = 0;
1824
1825 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1826 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1827 num_add++;
1828
1829 /* flush a full buffer */
1830 if (num_add == filter_list_len) {
1831 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1832 add_list, num_add,
1833 NULL);
1834 num_add = 0;
1835
1836 if (aq_ret)
1837 break;
1838 memset(add_list, 0, sizeof(*add_list));
1839 }
1840 }
1841 if (num_add) {
1842 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1843 add_list, num_add, NULL);
1844 num_add = 0;
1845 }
1846 kfree(add_list);
1847 add_list = NULL;
1848
1849 if (add_happened && aq_ret &&
1850 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
1851 dev_info(&pf->pdev->dev,
1852 "add filter failed, err %d, aq_err %d\n",
1853 aq_ret, pf->hw.aq.asq_last_status);
1854 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1855 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1856 &vsi->state)) {
1857 promisc_forced_on = true;
1858 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1859 &vsi->state);
1860 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1861 }
1862 }
1863 }
1864
1865 /* check for changes in promiscuous modes */
1866 if (changed_flags & IFF_ALLMULTI) {
1867 bool cur_multipromisc;
1868 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1869 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1870 vsi->seid,
1871 cur_multipromisc,
1872 NULL);
1873 if (aq_ret)
1874 dev_info(&pf->pdev->dev,
1875 "set multi promisc failed, err %d, aq_err %d\n",
1876 aq_ret, pf->hw.aq.asq_last_status);
1877 }
1878 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1879 bool cur_promisc;
1880 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1881 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1882 &vsi->state));
1883 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1884 vsi->seid,
1885 cur_promisc, NULL);
1886 if (aq_ret)
1887 dev_info(&pf->pdev->dev,
1888 "set uni promisc failed, err %d, aq_err %d\n",
1889 aq_ret, pf->hw.aq.asq_last_status);
1890 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1891 vsi->seid,
1892 cur_promisc, NULL);
1893 if (aq_ret)
1894 dev_info(&pf->pdev->dev,
1895 "set brdcast promisc failed, err %d, aq_err %d\n",
1896 aq_ret, pf->hw.aq.asq_last_status);
1897 }
1898
1899 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1900 return 0;
1901 }
1902
1903 /**
1904 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1905 * @pf: board private structure
1906 **/
1907 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1908 {
1909 int v;
1910
1911 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1912 return;
1913 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1914
1915 for (v = 0; v < pf->num_alloc_vsi; v++) {
1916 if (pf->vsi[v] &&
1917 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1918 i40e_sync_vsi_filters(pf->vsi[v]);
1919 }
1920 }
1921
1922 /**
1923 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1924 * @netdev: network interface device structure
1925 * @new_mtu: new value for maximum frame size
1926 *
1927 * Returns 0 on success, negative on failure
1928 **/
1929 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1930 {
1931 struct i40e_netdev_priv *np = netdev_priv(netdev);
1932 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1933 struct i40e_vsi *vsi = np->vsi;
1934
1935 /* MTU < 68 is an error and causes problems on some kernels */
1936 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1937 return -EINVAL;
1938
1939 netdev_info(netdev, "changing MTU from %d to %d\n",
1940 netdev->mtu, new_mtu);
1941 netdev->mtu = new_mtu;
1942 if (netif_running(netdev))
1943 i40e_vsi_reinit_locked(vsi);
1944
1945 return 0;
1946 }
1947
1948 /**
1949 * i40e_ioctl - Access the hwtstamp interface
1950 * @netdev: network interface device structure
1951 * @ifr: interface request data
1952 * @cmd: ioctl command
1953 **/
1954 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1955 {
1956 struct i40e_netdev_priv *np = netdev_priv(netdev);
1957 struct i40e_pf *pf = np->vsi->back;
1958
1959 switch (cmd) {
1960 case SIOCGHWTSTAMP:
1961 return i40e_ptp_get_ts_config(pf, ifr);
1962 case SIOCSHWTSTAMP:
1963 return i40e_ptp_set_ts_config(pf, ifr);
1964 default:
1965 return -EOPNOTSUPP;
1966 }
1967 }
1968
1969 /**
1970 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1971 * @vsi: the vsi being adjusted
1972 **/
1973 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1974 {
1975 struct i40e_vsi_context ctxt;
1976 i40e_status ret;
1977
1978 if ((vsi->info.valid_sections &
1979 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1980 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1981 return; /* already enabled */
1982
1983 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1984 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1985 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1986
1987 ctxt.seid = vsi->seid;
1988 ctxt.info = vsi->info;
1989 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1990 if (ret) {
1991 dev_info(&vsi->back->pdev->dev,
1992 "%s: update vsi failed, aq_err=%d\n",
1993 __func__, vsi->back->hw.aq.asq_last_status);
1994 }
1995 }
1996
1997 /**
1998 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1999 * @vsi: the vsi being adjusted
2000 **/
2001 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2002 {
2003 struct i40e_vsi_context ctxt;
2004 i40e_status ret;
2005
2006 if ((vsi->info.valid_sections &
2007 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2008 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2009 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2010 return; /* already disabled */
2011
2012 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2013 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2014 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2015
2016 ctxt.seid = vsi->seid;
2017 ctxt.info = vsi->info;
2018 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2019 if (ret) {
2020 dev_info(&vsi->back->pdev->dev,
2021 "%s: update vsi failed, aq_err=%d\n",
2022 __func__, vsi->back->hw.aq.asq_last_status);
2023 }
2024 }
2025
2026 /**
2027 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2028 * @netdev: network interface to be adjusted
2029 * @features: netdev features to test if VLAN offload is enabled or not
2030 **/
2031 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2032 {
2033 struct i40e_netdev_priv *np = netdev_priv(netdev);
2034 struct i40e_vsi *vsi = np->vsi;
2035
2036 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2037 i40e_vlan_stripping_enable(vsi);
2038 else
2039 i40e_vlan_stripping_disable(vsi);
2040 }
2041
2042 /**
2043 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2044 * @vsi: the vsi being configured
2045 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2046 **/
2047 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2048 {
2049 struct i40e_mac_filter *f, *add_f;
2050 bool is_netdev, is_vf;
2051
2052 is_vf = (vsi->type == I40E_VSI_SRIOV);
2053 is_netdev = !!(vsi->netdev);
2054
2055 if (is_netdev) {
2056 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2057 is_vf, is_netdev);
2058 if (!add_f) {
2059 dev_info(&vsi->back->pdev->dev,
2060 "Could not add vlan filter %d for %pM\n",
2061 vid, vsi->netdev->dev_addr);
2062 return -ENOMEM;
2063 }
2064 }
2065
2066 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2067 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2068 if (!add_f) {
2069 dev_info(&vsi->back->pdev->dev,
2070 "Could not add vlan filter %d for %pM\n",
2071 vid, f->macaddr);
2072 return -ENOMEM;
2073 }
2074 }
2075
2076 /* Now if we add a vlan tag, make sure to check if it is the first
2077 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2078 * with 0, so we now accept untagged and specified tagged traffic
2079 * (and not any taged and untagged)
2080 */
2081 if (vid > 0) {
2082 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2083 I40E_VLAN_ANY,
2084 is_vf, is_netdev)) {
2085 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2086 I40E_VLAN_ANY, is_vf, is_netdev);
2087 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2088 is_vf, is_netdev);
2089 if (!add_f) {
2090 dev_info(&vsi->back->pdev->dev,
2091 "Could not add filter 0 for %pM\n",
2092 vsi->netdev->dev_addr);
2093 return -ENOMEM;
2094 }
2095 }
2096 }
2097
2098 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2099 if (vid > 0 && !vsi->info.pvid) {
2100 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2101 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2102 is_vf, is_netdev)) {
2103 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2104 is_vf, is_netdev);
2105 add_f = i40e_add_filter(vsi, f->macaddr,
2106 0, is_vf, is_netdev);
2107 if (!add_f) {
2108 dev_info(&vsi->back->pdev->dev,
2109 "Could not add filter 0 for %pM\n",
2110 f->macaddr);
2111 return -ENOMEM;
2112 }
2113 }
2114 }
2115 }
2116
2117 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2118 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2119 return 0;
2120
2121 return i40e_sync_vsi_filters(vsi);
2122 }
2123
2124 /**
2125 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2126 * @vsi: the vsi being configured
2127 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2128 *
2129 * Return: 0 on success or negative otherwise
2130 **/
2131 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2132 {
2133 struct net_device *netdev = vsi->netdev;
2134 struct i40e_mac_filter *f, *add_f;
2135 bool is_vf, is_netdev;
2136 int filter_count = 0;
2137
2138 is_vf = (vsi->type == I40E_VSI_SRIOV);
2139 is_netdev = !!(netdev);
2140
2141 if (is_netdev)
2142 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2143
2144 list_for_each_entry(f, &vsi->mac_filter_list, list)
2145 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2146
2147 /* go through all the filters for this VSI and if there is only
2148 * vid == 0 it means there are no other filters, so vid 0 must
2149 * be replaced with -1. This signifies that we should from now
2150 * on accept any traffic (with any tag present, or untagged)
2151 */
2152 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2153 if (is_netdev) {
2154 if (f->vlan &&
2155 ether_addr_equal(netdev->dev_addr, f->macaddr))
2156 filter_count++;
2157 }
2158
2159 if (f->vlan)
2160 filter_count++;
2161 }
2162
2163 if (!filter_count && is_netdev) {
2164 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2165 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2166 is_vf, is_netdev);
2167 if (!f) {
2168 dev_info(&vsi->back->pdev->dev,
2169 "Could not add filter %d for %pM\n",
2170 I40E_VLAN_ANY, netdev->dev_addr);
2171 return -ENOMEM;
2172 }
2173 }
2174
2175 if (!filter_count) {
2176 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2177 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2178 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2179 is_vf, is_netdev);
2180 if (!add_f) {
2181 dev_info(&vsi->back->pdev->dev,
2182 "Could not add filter %d for %pM\n",
2183 I40E_VLAN_ANY, f->macaddr);
2184 return -ENOMEM;
2185 }
2186 }
2187 }
2188
2189 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2190 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2191 return 0;
2192
2193 return i40e_sync_vsi_filters(vsi);
2194 }
2195
2196 /**
2197 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2198 * @netdev: network interface to be adjusted
2199 * @vid: vlan id to be added
2200 *
2201 * net_device_ops implementation for adding vlan ids
2202 **/
2203 #ifdef I40E_FCOE
2204 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2205 __always_unused __be16 proto, u16 vid)
2206 #else
2207 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2208 __always_unused __be16 proto, u16 vid)
2209 #endif
2210 {
2211 struct i40e_netdev_priv *np = netdev_priv(netdev);
2212 struct i40e_vsi *vsi = np->vsi;
2213 int ret = 0;
2214
2215 if (vid > 4095)
2216 return -EINVAL;
2217
2218 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2219
2220 /* If the network stack called us with vid = 0 then
2221 * it is asking to receive priority tagged packets with
2222 * vlan id 0. Our HW receives them by default when configured
2223 * to receive untagged packets so there is no need to add an
2224 * extra filter for vlan 0 tagged packets.
2225 */
2226 if (vid)
2227 ret = i40e_vsi_add_vlan(vsi, vid);
2228
2229 if (!ret && (vid < VLAN_N_VID))
2230 set_bit(vid, vsi->active_vlans);
2231
2232 return ret;
2233 }
2234
2235 /**
2236 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2237 * @netdev: network interface to be adjusted
2238 * @vid: vlan id to be removed
2239 *
2240 * net_device_ops implementation for removing vlan ids
2241 **/
2242 #ifdef I40E_FCOE
2243 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2244 __always_unused __be16 proto, u16 vid)
2245 #else
2246 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2247 __always_unused __be16 proto, u16 vid)
2248 #endif
2249 {
2250 struct i40e_netdev_priv *np = netdev_priv(netdev);
2251 struct i40e_vsi *vsi = np->vsi;
2252
2253 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2254
2255 /* return code is ignored as there is nothing a user
2256 * can do about failure to remove and a log message was
2257 * already printed from the other function
2258 */
2259 i40e_vsi_kill_vlan(vsi, vid);
2260
2261 clear_bit(vid, vsi->active_vlans);
2262
2263 return 0;
2264 }
2265
2266 /**
2267 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2268 * @vsi: the vsi being brought back up
2269 **/
2270 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2271 {
2272 u16 vid;
2273
2274 if (!vsi->netdev)
2275 return;
2276
2277 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2278
2279 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2280 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2281 vid);
2282 }
2283
2284 /**
2285 * i40e_vsi_add_pvid - Add pvid for the VSI
2286 * @vsi: the vsi being adjusted
2287 * @vid: the vlan id to set as a PVID
2288 **/
2289 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2290 {
2291 struct i40e_vsi_context ctxt;
2292 i40e_status aq_ret;
2293
2294 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2295 vsi->info.pvid = cpu_to_le16(vid);
2296 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2297 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2298 I40E_AQ_VSI_PVLAN_EMOD_STR;
2299
2300 ctxt.seid = vsi->seid;
2301 ctxt.info = vsi->info;
2302 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2303 if (aq_ret) {
2304 dev_info(&vsi->back->pdev->dev,
2305 "%s: update vsi failed, aq_err=%d\n",
2306 __func__, vsi->back->hw.aq.asq_last_status);
2307 return -ENOENT;
2308 }
2309
2310 return 0;
2311 }
2312
2313 /**
2314 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2315 * @vsi: the vsi being adjusted
2316 *
2317 * Just use the vlan_rx_register() service to put it back to normal
2318 **/
2319 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2320 {
2321 i40e_vlan_stripping_disable(vsi);
2322
2323 vsi->info.pvid = 0;
2324 }
2325
2326 /**
2327 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2328 * @vsi: ptr to the VSI
2329 *
2330 * If this function returns with an error, then it's possible one or
2331 * more of the rings is populated (while the rest are not). It is the
2332 * callers duty to clean those orphaned rings.
2333 *
2334 * Return 0 on success, negative on failure
2335 **/
2336 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2337 {
2338 int i, err = 0;
2339
2340 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2341 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2342
2343 return err;
2344 }
2345
2346 /**
2347 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2348 * @vsi: ptr to the VSI
2349 *
2350 * Free VSI's transmit software resources
2351 **/
2352 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2353 {
2354 int i;
2355
2356 if (!vsi->tx_rings)
2357 return;
2358
2359 for (i = 0; i < vsi->num_queue_pairs; i++)
2360 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2361 i40e_free_tx_resources(vsi->tx_rings[i]);
2362 }
2363
2364 /**
2365 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2366 * @vsi: ptr to the VSI
2367 *
2368 * If this function returns with an error, then it's possible one or
2369 * more of the rings is populated (while the rest are not). It is the
2370 * callers duty to clean those orphaned rings.
2371 *
2372 * Return 0 on success, negative on failure
2373 **/
2374 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2375 {
2376 int i, err = 0;
2377
2378 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2379 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2380 #ifdef I40E_FCOE
2381 i40e_fcoe_setup_ddp_resources(vsi);
2382 #endif
2383 return err;
2384 }
2385
2386 /**
2387 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2388 * @vsi: ptr to the VSI
2389 *
2390 * Free all receive software resources
2391 **/
2392 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2393 {
2394 int i;
2395
2396 if (!vsi->rx_rings)
2397 return;
2398
2399 for (i = 0; i < vsi->num_queue_pairs; i++)
2400 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2401 i40e_free_rx_resources(vsi->rx_rings[i]);
2402 #ifdef I40E_FCOE
2403 i40e_fcoe_free_ddp_resources(vsi);
2404 #endif
2405 }
2406
2407 /**
2408 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2409 * @ring: The Tx ring to configure
2410 *
2411 * This enables/disables XPS for a given Tx descriptor ring
2412 * based on the TCs enabled for the VSI that ring belongs to.
2413 **/
2414 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2415 {
2416 struct i40e_vsi *vsi = ring->vsi;
2417 cpumask_var_t mask;
2418
2419 if (!ring->q_vector || !ring->netdev)
2420 return;
2421
2422 /* Single TC mode enable XPS */
2423 if (vsi->tc_config.numtc <= 1) {
2424 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2425 netif_set_xps_queue(ring->netdev,
2426 &ring->q_vector->affinity_mask,
2427 ring->queue_index);
2428 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2429 /* Disable XPS to allow selection based on TC */
2430 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2431 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2432 free_cpumask_var(mask);
2433 }
2434 }
2435
2436 /**
2437 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2438 * @ring: The Tx ring to configure
2439 *
2440 * Configure the Tx descriptor ring in the HMC context.
2441 **/
2442 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2443 {
2444 struct i40e_vsi *vsi = ring->vsi;
2445 u16 pf_q = vsi->base_queue + ring->queue_index;
2446 struct i40e_hw *hw = &vsi->back->hw;
2447 struct i40e_hmc_obj_txq tx_ctx;
2448 i40e_status err = 0;
2449 u32 qtx_ctl = 0;
2450
2451 /* some ATR related tx ring init */
2452 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2453 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2454 ring->atr_count = 0;
2455 } else {
2456 ring->atr_sample_rate = 0;
2457 }
2458
2459 /* configure XPS */
2460 i40e_config_xps_tx_ring(ring);
2461
2462 /* clear the context structure first */
2463 memset(&tx_ctx, 0, sizeof(tx_ctx));
2464
2465 tx_ctx.new_context = 1;
2466 tx_ctx.base = (ring->dma / 128);
2467 tx_ctx.qlen = ring->count;
2468 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2469 I40E_FLAG_FD_ATR_ENABLED));
2470 #ifdef I40E_FCOE
2471 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2472 #endif
2473 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2474 /* FDIR VSI tx ring can still use RS bit and writebacks */
2475 if (vsi->type != I40E_VSI_FDIR)
2476 tx_ctx.head_wb_ena = 1;
2477 tx_ctx.head_wb_addr = ring->dma +
2478 (ring->count * sizeof(struct i40e_tx_desc));
2479
2480 /* As part of VSI creation/update, FW allocates certain
2481 * Tx arbitration queue sets for each TC enabled for
2482 * the VSI. The FW returns the handles to these queue
2483 * sets as part of the response buffer to Add VSI,
2484 * Update VSI, etc. AQ commands. It is expected that
2485 * these queue set handles be associated with the Tx
2486 * queues by the driver as part of the TX queue context
2487 * initialization. This has to be done regardless of
2488 * DCB as by default everything is mapped to TC0.
2489 */
2490 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2491 tx_ctx.rdylist_act = 0;
2492
2493 /* clear the context in the HMC */
2494 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2495 if (err) {
2496 dev_info(&vsi->back->pdev->dev,
2497 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2498 ring->queue_index, pf_q, err);
2499 return -ENOMEM;
2500 }
2501
2502 /* set the context in the HMC */
2503 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2504 if (err) {
2505 dev_info(&vsi->back->pdev->dev,
2506 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2507 ring->queue_index, pf_q, err);
2508 return -ENOMEM;
2509 }
2510
2511 /* Now associate this queue with this PCI function */
2512 if (vsi->type == I40E_VSI_VMDQ2) {
2513 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2514 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2515 I40E_QTX_CTL_VFVM_INDX_MASK;
2516 } else {
2517 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2518 }
2519
2520 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2521 I40E_QTX_CTL_PF_INDX_MASK);
2522 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2523 i40e_flush(hw);
2524
2525 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2526
2527 /* cache tail off for easier writes later */
2528 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2529
2530 return 0;
2531 }
2532
2533 /**
2534 * i40e_configure_rx_ring - Configure a receive ring context
2535 * @ring: The Rx ring to configure
2536 *
2537 * Configure the Rx descriptor ring in the HMC context.
2538 **/
2539 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2540 {
2541 struct i40e_vsi *vsi = ring->vsi;
2542 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2543 u16 pf_q = vsi->base_queue + ring->queue_index;
2544 struct i40e_hw *hw = &vsi->back->hw;
2545 struct i40e_hmc_obj_rxq rx_ctx;
2546 i40e_status err = 0;
2547
2548 ring->state = 0;
2549
2550 /* clear the context structure first */
2551 memset(&rx_ctx, 0, sizeof(rx_ctx));
2552
2553 ring->rx_buf_len = vsi->rx_buf_len;
2554 ring->rx_hdr_len = vsi->rx_hdr_len;
2555
2556 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2557 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2558
2559 rx_ctx.base = (ring->dma / 128);
2560 rx_ctx.qlen = ring->count;
2561
2562 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2563 set_ring_16byte_desc_enabled(ring);
2564 rx_ctx.dsize = 0;
2565 } else {
2566 rx_ctx.dsize = 1;
2567 }
2568
2569 rx_ctx.dtype = vsi->dtype;
2570 if (vsi->dtype) {
2571 set_ring_ps_enabled(ring);
2572 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2573 I40E_RX_SPLIT_IP |
2574 I40E_RX_SPLIT_TCP_UDP |
2575 I40E_RX_SPLIT_SCTP;
2576 } else {
2577 rx_ctx.hsplit_0 = 0;
2578 }
2579
2580 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2581 (chain_len * ring->rx_buf_len));
2582 if (hw->revision_id == 0)
2583 rx_ctx.lrxqthresh = 0;
2584 else
2585 rx_ctx.lrxqthresh = 2;
2586 rx_ctx.crcstrip = 1;
2587 rx_ctx.l2tsel = 1;
2588 rx_ctx.showiv = 1;
2589 #ifdef I40E_FCOE
2590 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2591 #endif
2592 /* set the prefena field to 1 because the manual says to */
2593 rx_ctx.prefena = 1;
2594
2595 /* clear the context in the HMC */
2596 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2597 if (err) {
2598 dev_info(&vsi->back->pdev->dev,
2599 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2600 ring->queue_index, pf_q, err);
2601 return -ENOMEM;
2602 }
2603
2604 /* set the context in the HMC */
2605 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2606 if (err) {
2607 dev_info(&vsi->back->pdev->dev,
2608 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2609 ring->queue_index, pf_q, err);
2610 return -ENOMEM;
2611 }
2612
2613 /* cache tail for quicker writes, and clear the reg before use */
2614 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2615 writel(0, ring->tail);
2616
2617 if (ring_is_ps_enabled(ring)) {
2618 i40e_alloc_rx_headers(ring);
2619 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2620 } else {
2621 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2622 }
2623
2624 return 0;
2625 }
2626
2627 /**
2628 * i40e_vsi_configure_tx - Configure the VSI for Tx
2629 * @vsi: VSI structure describing this set of rings and resources
2630 *
2631 * Configure the Tx VSI for operation.
2632 **/
2633 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2634 {
2635 int err = 0;
2636 u16 i;
2637
2638 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2639 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2640
2641 return err;
2642 }
2643
2644 /**
2645 * i40e_vsi_configure_rx - Configure the VSI for Rx
2646 * @vsi: the VSI being configured
2647 *
2648 * Configure the Rx VSI for operation.
2649 **/
2650 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2651 {
2652 int err = 0;
2653 u16 i;
2654
2655 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2656 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2657 + ETH_FCS_LEN + VLAN_HLEN;
2658 else
2659 vsi->max_frame = I40E_RXBUFFER_2048;
2660
2661 /* figure out correct receive buffer length */
2662 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2663 I40E_FLAG_RX_PS_ENABLED)) {
2664 case I40E_FLAG_RX_1BUF_ENABLED:
2665 vsi->rx_hdr_len = 0;
2666 vsi->rx_buf_len = vsi->max_frame;
2667 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2668 break;
2669 case I40E_FLAG_RX_PS_ENABLED:
2670 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2671 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2672 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2673 break;
2674 default:
2675 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2676 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2677 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2678 break;
2679 }
2680
2681 #ifdef I40E_FCOE
2682 /* setup rx buffer for FCoE */
2683 if ((vsi->type == I40E_VSI_FCOE) &&
2684 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2685 vsi->rx_hdr_len = 0;
2686 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2687 vsi->max_frame = I40E_RXBUFFER_3072;
2688 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2689 }
2690
2691 #endif /* I40E_FCOE */
2692 /* round up for the chip's needs */
2693 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2694 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2695 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2696 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2697
2698 /* set up individual rings */
2699 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2700 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2701
2702 return err;
2703 }
2704
2705 /**
2706 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2707 * @vsi: ptr to the VSI
2708 **/
2709 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2710 {
2711 struct i40e_ring *tx_ring, *rx_ring;
2712 u16 qoffset, qcount;
2713 int i, n;
2714
2715 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2716 /* Reset the TC information */
2717 for (i = 0; i < vsi->num_queue_pairs; i++) {
2718 rx_ring = vsi->rx_rings[i];
2719 tx_ring = vsi->tx_rings[i];
2720 rx_ring->dcb_tc = 0;
2721 tx_ring->dcb_tc = 0;
2722 }
2723 }
2724
2725 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2726 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2727 continue;
2728
2729 qoffset = vsi->tc_config.tc_info[n].qoffset;
2730 qcount = vsi->tc_config.tc_info[n].qcount;
2731 for (i = qoffset; i < (qoffset + qcount); i++) {
2732 rx_ring = vsi->rx_rings[i];
2733 tx_ring = vsi->tx_rings[i];
2734 rx_ring->dcb_tc = n;
2735 tx_ring->dcb_tc = n;
2736 }
2737 }
2738 }
2739
2740 /**
2741 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2742 * @vsi: ptr to the VSI
2743 **/
2744 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2745 {
2746 if (vsi->netdev)
2747 i40e_set_rx_mode(vsi->netdev);
2748 }
2749
2750 /**
2751 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2752 * @vsi: Pointer to the targeted VSI
2753 *
2754 * This function replays the hlist on the hw where all the SB Flow Director
2755 * filters were saved.
2756 **/
2757 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2758 {
2759 struct i40e_fdir_filter *filter;
2760 struct i40e_pf *pf = vsi->back;
2761 struct hlist_node *node;
2762
2763 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2764 return;
2765
2766 hlist_for_each_entry_safe(filter, node,
2767 &pf->fdir_filter_list, fdir_node) {
2768 i40e_add_del_fdir(vsi, filter, true);
2769 }
2770 }
2771
2772 /**
2773 * i40e_vsi_configure - Set up the VSI for action
2774 * @vsi: the VSI being configured
2775 **/
2776 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2777 {
2778 int err;
2779
2780 i40e_set_vsi_rx_mode(vsi);
2781 i40e_restore_vlan(vsi);
2782 i40e_vsi_config_dcb_rings(vsi);
2783 err = i40e_vsi_configure_tx(vsi);
2784 if (!err)
2785 err = i40e_vsi_configure_rx(vsi);
2786
2787 return err;
2788 }
2789
2790 /**
2791 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2792 * @vsi: the VSI being configured
2793 **/
2794 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2795 {
2796 struct i40e_pf *pf = vsi->back;
2797 struct i40e_q_vector *q_vector;
2798 struct i40e_hw *hw = &pf->hw;
2799 u16 vector;
2800 int i, q;
2801 u32 val;
2802 u32 qp;
2803
2804 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2805 * and PFINT_LNKLSTn registers, e.g.:
2806 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2807 */
2808 qp = vsi->base_queue;
2809 vector = vsi->base_vector;
2810 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2811 q_vector = vsi->q_vectors[i];
2812 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2813 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2814 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2815 q_vector->rx.itr);
2816 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2817 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2818 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2819 q_vector->tx.itr);
2820
2821 /* Linked list for the queuepairs assigned to this vector */
2822 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2823 for (q = 0; q < q_vector->num_ringpairs; q++) {
2824 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2825 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2826 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2827 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2828 (I40E_QUEUE_TYPE_TX
2829 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2830
2831 wr32(hw, I40E_QINT_RQCTL(qp), val);
2832
2833 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2834 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2835 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2836 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2837 (I40E_QUEUE_TYPE_RX
2838 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2839
2840 /* Terminate the linked list */
2841 if (q == (q_vector->num_ringpairs - 1))
2842 val |= (I40E_QUEUE_END_OF_LIST
2843 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2844
2845 wr32(hw, I40E_QINT_TQCTL(qp), val);
2846 qp++;
2847 }
2848 }
2849
2850 i40e_flush(hw);
2851 }
2852
2853 /**
2854 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2855 * @hw: ptr to the hardware info
2856 **/
2857 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2858 {
2859 struct i40e_hw *hw = &pf->hw;
2860 u32 val;
2861
2862 /* clear things first */
2863 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2864 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2865
2866 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2867 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2868 I40E_PFINT_ICR0_ENA_GRST_MASK |
2869 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2870 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2871 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2872 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2873 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2874
2875 if (pf->flags & I40E_FLAG_PTP)
2876 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2877
2878 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2879
2880 /* SW_ITR_IDX = 0, but don't change INTENA */
2881 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2882 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2883
2884 /* OTHER_ITR_IDX = 0 */
2885 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2886 }
2887
2888 /**
2889 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2890 * @vsi: the VSI being configured
2891 **/
2892 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2893 {
2894 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2895 struct i40e_pf *pf = vsi->back;
2896 struct i40e_hw *hw = &pf->hw;
2897 u32 val;
2898
2899 /* set the ITR configuration */
2900 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2901 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2902 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2903 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2904 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2905 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2906
2907 i40e_enable_misc_int_causes(pf);
2908
2909 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2910 wr32(hw, I40E_PFINT_LNKLST0, 0);
2911
2912 /* Associate the queue pair to the vector and enable the queue int */
2913 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2914 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2915 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2916
2917 wr32(hw, I40E_QINT_RQCTL(0), val);
2918
2919 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2920 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2921 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2922
2923 wr32(hw, I40E_QINT_TQCTL(0), val);
2924 i40e_flush(hw);
2925 }
2926
2927 /**
2928 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2929 * @pf: board private structure
2930 **/
2931 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2932 {
2933 struct i40e_hw *hw = &pf->hw;
2934
2935 wr32(hw, I40E_PFINT_DYN_CTL0,
2936 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2937 i40e_flush(hw);
2938 }
2939
2940 /**
2941 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2942 * @pf: board private structure
2943 **/
2944 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2945 {
2946 struct i40e_hw *hw = &pf->hw;
2947 u32 val;
2948
2949 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2950 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2951 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2952
2953 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2954 i40e_flush(hw);
2955 }
2956
2957 /**
2958 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2959 * @vsi: pointer to a vsi
2960 * @vector: enable a particular Hw Interrupt vector
2961 **/
2962 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2963 {
2964 struct i40e_pf *pf = vsi->back;
2965 struct i40e_hw *hw = &pf->hw;
2966 u32 val;
2967
2968 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2969 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2970 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2971 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2972 /* skip the flush */
2973 }
2974
2975 /**
2976 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2977 * @vsi: pointer to a vsi
2978 * @vector: disable a particular Hw Interrupt vector
2979 **/
2980 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2981 {
2982 struct i40e_pf *pf = vsi->back;
2983 struct i40e_hw *hw = &pf->hw;
2984 u32 val;
2985
2986 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2987 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2988 i40e_flush(hw);
2989 }
2990
2991 /**
2992 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2993 * @irq: interrupt number
2994 * @data: pointer to a q_vector
2995 **/
2996 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2997 {
2998 struct i40e_q_vector *q_vector = data;
2999
3000 if (!q_vector->tx.ring && !q_vector->rx.ring)
3001 return IRQ_HANDLED;
3002
3003 napi_schedule(&q_vector->napi);
3004
3005 return IRQ_HANDLED;
3006 }
3007
3008 /**
3009 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3010 * @vsi: the VSI being configured
3011 * @basename: name for the vector
3012 *
3013 * Allocates MSI-X vectors and requests interrupts from the kernel.
3014 **/
3015 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3016 {
3017 int q_vectors = vsi->num_q_vectors;
3018 struct i40e_pf *pf = vsi->back;
3019 int base = vsi->base_vector;
3020 int rx_int_idx = 0;
3021 int tx_int_idx = 0;
3022 int vector, err;
3023
3024 for (vector = 0; vector < q_vectors; vector++) {
3025 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3026
3027 if (q_vector->tx.ring && q_vector->rx.ring) {
3028 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3029 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3030 tx_int_idx++;
3031 } else if (q_vector->rx.ring) {
3032 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3033 "%s-%s-%d", basename, "rx", rx_int_idx++);
3034 } else if (q_vector->tx.ring) {
3035 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3036 "%s-%s-%d", basename, "tx", tx_int_idx++);
3037 } else {
3038 /* skip this unused q_vector */
3039 continue;
3040 }
3041 err = request_irq(pf->msix_entries[base + vector].vector,
3042 vsi->irq_handler,
3043 0,
3044 q_vector->name,
3045 q_vector);
3046 if (err) {
3047 dev_info(&pf->pdev->dev,
3048 "%s: request_irq failed, error: %d\n",
3049 __func__, err);
3050 goto free_queue_irqs;
3051 }
3052 /* assign the mask for this irq */
3053 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3054 &q_vector->affinity_mask);
3055 }
3056
3057 vsi->irqs_ready = true;
3058 return 0;
3059
3060 free_queue_irqs:
3061 while (vector) {
3062 vector--;
3063 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3064 NULL);
3065 free_irq(pf->msix_entries[base + vector].vector,
3066 &(vsi->q_vectors[vector]));
3067 }
3068 return err;
3069 }
3070
3071 /**
3072 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3073 * @vsi: the VSI being un-configured
3074 **/
3075 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3076 {
3077 struct i40e_pf *pf = vsi->back;
3078 struct i40e_hw *hw = &pf->hw;
3079 int base = vsi->base_vector;
3080 int i;
3081
3082 for (i = 0; i < vsi->num_queue_pairs; i++) {
3083 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3084 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3085 }
3086
3087 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3088 for (i = vsi->base_vector;
3089 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3090 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3091
3092 i40e_flush(hw);
3093 for (i = 0; i < vsi->num_q_vectors; i++)
3094 synchronize_irq(pf->msix_entries[i + base].vector);
3095 } else {
3096 /* Legacy and MSI mode - this stops all interrupt handling */
3097 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3098 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3099 i40e_flush(hw);
3100 synchronize_irq(pf->pdev->irq);
3101 }
3102 }
3103
3104 /**
3105 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3106 * @vsi: the VSI being configured
3107 **/
3108 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3109 {
3110 struct i40e_pf *pf = vsi->back;
3111 int i;
3112
3113 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3114 for (i = vsi->base_vector;
3115 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3116 i40e_irq_dynamic_enable(vsi, i);
3117 } else {
3118 i40e_irq_dynamic_enable_icr0(pf);
3119 }
3120
3121 i40e_flush(&pf->hw);
3122 return 0;
3123 }
3124
3125 /**
3126 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3127 * @pf: board private structure
3128 **/
3129 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3130 {
3131 /* Disable ICR 0 */
3132 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3133 i40e_flush(&pf->hw);
3134 }
3135
3136 /**
3137 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3138 * @irq: interrupt number
3139 * @data: pointer to a q_vector
3140 *
3141 * This is the handler used for all MSI/Legacy interrupts, and deals
3142 * with both queue and non-queue interrupts. This is also used in
3143 * MSIX mode to handle the non-queue interrupts.
3144 **/
3145 static irqreturn_t i40e_intr(int irq, void *data)
3146 {
3147 struct i40e_pf *pf = (struct i40e_pf *)data;
3148 struct i40e_hw *hw = &pf->hw;
3149 irqreturn_t ret = IRQ_NONE;
3150 u32 icr0, icr0_remaining;
3151 u32 val, ena_mask;
3152
3153 icr0 = rd32(hw, I40E_PFINT_ICR0);
3154 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3155
3156 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3157 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3158 goto enable_intr;
3159
3160 /* if interrupt but no bits showing, must be SWINT */
3161 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3162 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3163 pf->sw_int_count++;
3164
3165 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3166 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3167
3168 /* temporarily disable queue cause for NAPI processing */
3169 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3170 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3171 wr32(hw, I40E_QINT_RQCTL(0), qval);
3172
3173 qval = rd32(hw, I40E_QINT_TQCTL(0));
3174 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3175 wr32(hw, I40E_QINT_TQCTL(0), qval);
3176
3177 if (!test_bit(__I40E_DOWN, &pf->state))
3178 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3179 }
3180
3181 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3182 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3183 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3184 }
3185
3186 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3187 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3188 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3189 }
3190
3191 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3192 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3193 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3194 }
3195
3196 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3197 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3198 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3199 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3200 val = rd32(hw, I40E_GLGEN_RSTAT);
3201 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3202 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3203 if (val == I40E_RESET_CORER) {
3204 pf->corer_count++;
3205 } else if (val == I40E_RESET_GLOBR) {
3206 pf->globr_count++;
3207 } else if (val == I40E_RESET_EMPR) {
3208 pf->empr_count++;
3209 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3210 }
3211 }
3212
3213 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3214 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3215 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3216 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3217 rd32(hw, I40E_PFHMC_ERRORINFO),
3218 rd32(hw, I40E_PFHMC_ERRORDATA));
3219 }
3220
3221 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3222 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3223
3224 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3225 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3226 i40e_ptp_tx_hwtstamp(pf);
3227 }
3228 }
3229
3230 /* If a critical error is pending we have no choice but to reset the
3231 * device.
3232 * Report and mask out any remaining unexpected interrupts.
3233 */
3234 icr0_remaining = icr0 & ena_mask;
3235 if (icr0_remaining) {
3236 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3237 icr0_remaining);
3238 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3239 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3240 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3241 dev_info(&pf->pdev->dev, "device will be reset\n");
3242 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3243 i40e_service_event_schedule(pf);
3244 }
3245 ena_mask &= ~icr0_remaining;
3246 }
3247 ret = IRQ_HANDLED;
3248
3249 enable_intr:
3250 /* re-enable interrupt causes */
3251 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3252 if (!test_bit(__I40E_DOWN, &pf->state)) {
3253 i40e_service_event_schedule(pf);
3254 i40e_irq_dynamic_enable_icr0(pf);
3255 }
3256
3257 return ret;
3258 }
3259
3260 /**
3261 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3262 * @tx_ring: tx ring to clean
3263 * @budget: how many cleans we're allowed
3264 *
3265 * Returns true if there's any budget left (e.g. the clean is finished)
3266 **/
3267 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3268 {
3269 struct i40e_vsi *vsi = tx_ring->vsi;
3270 u16 i = tx_ring->next_to_clean;
3271 struct i40e_tx_buffer *tx_buf;
3272 struct i40e_tx_desc *tx_desc;
3273
3274 tx_buf = &tx_ring->tx_bi[i];
3275 tx_desc = I40E_TX_DESC(tx_ring, i);
3276 i -= tx_ring->count;
3277
3278 do {
3279 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3280
3281 /* if next_to_watch is not set then there is no work pending */
3282 if (!eop_desc)
3283 break;
3284
3285 /* prevent any other reads prior to eop_desc */
3286 read_barrier_depends();
3287
3288 /* if the descriptor isn't done, no work yet to do */
3289 if (!(eop_desc->cmd_type_offset_bsz &
3290 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3291 break;
3292
3293 /* clear next_to_watch to prevent false hangs */
3294 tx_buf->next_to_watch = NULL;
3295
3296 tx_desc->buffer_addr = 0;
3297 tx_desc->cmd_type_offset_bsz = 0;
3298 /* move past filter desc */
3299 tx_buf++;
3300 tx_desc++;
3301 i++;
3302 if (unlikely(!i)) {
3303 i -= tx_ring->count;
3304 tx_buf = tx_ring->tx_bi;
3305 tx_desc = I40E_TX_DESC(tx_ring, 0);
3306 }
3307 /* unmap skb header data */
3308 dma_unmap_single(tx_ring->dev,
3309 dma_unmap_addr(tx_buf, dma),
3310 dma_unmap_len(tx_buf, len),
3311 DMA_TO_DEVICE);
3312 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3313 kfree(tx_buf->raw_buf);
3314
3315 tx_buf->raw_buf = NULL;
3316 tx_buf->tx_flags = 0;
3317 tx_buf->next_to_watch = NULL;
3318 dma_unmap_len_set(tx_buf, len, 0);
3319 tx_desc->buffer_addr = 0;
3320 tx_desc->cmd_type_offset_bsz = 0;
3321
3322 /* move us past the eop_desc for start of next FD desc */
3323 tx_buf++;
3324 tx_desc++;
3325 i++;
3326 if (unlikely(!i)) {
3327 i -= tx_ring->count;
3328 tx_buf = tx_ring->tx_bi;
3329 tx_desc = I40E_TX_DESC(tx_ring, 0);
3330 }
3331
3332 /* update budget accounting */
3333 budget--;
3334 } while (likely(budget));
3335
3336 i += tx_ring->count;
3337 tx_ring->next_to_clean = i;
3338
3339 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3340 i40e_irq_dynamic_enable(vsi,
3341 tx_ring->q_vector->v_idx + vsi->base_vector);
3342 }
3343 return budget > 0;
3344 }
3345
3346 /**
3347 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3348 * @irq: interrupt number
3349 * @data: pointer to a q_vector
3350 **/
3351 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3352 {
3353 struct i40e_q_vector *q_vector = data;
3354 struct i40e_vsi *vsi;
3355
3356 if (!q_vector->tx.ring)
3357 return IRQ_HANDLED;
3358
3359 vsi = q_vector->tx.ring->vsi;
3360 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3361
3362 return IRQ_HANDLED;
3363 }
3364
3365 /**
3366 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3367 * @vsi: the VSI being configured
3368 * @v_idx: vector index
3369 * @qp_idx: queue pair index
3370 **/
3371 static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3372 {
3373 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3374 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3375 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3376
3377 tx_ring->q_vector = q_vector;
3378 tx_ring->next = q_vector->tx.ring;
3379 q_vector->tx.ring = tx_ring;
3380 q_vector->tx.count++;
3381
3382 rx_ring->q_vector = q_vector;
3383 rx_ring->next = q_vector->rx.ring;
3384 q_vector->rx.ring = rx_ring;
3385 q_vector->rx.count++;
3386 }
3387
3388 /**
3389 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3390 * @vsi: the VSI being configured
3391 *
3392 * This function maps descriptor rings to the queue-specific vectors
3393 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3394 * one vector per queue pair, but on a constrained vector budget, we
3395 * group the queue pairs as "efficiently" as possible.
3396 **/
3397 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3398 {
3399 int qp_remaining = vsi->num_queue_pairs;
3400 int q_vectors = vsi->num_q_vectors;
3401 int num_ringpairs;
3402 int v_start = 0;
3403 int qp_idx = 0;
3404
3405 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3406 * group them so there are multiple queues per vector.
3407 * It is also important to go through all the vectors available to be
3408 * sure that if we don't use all the vectors, that the remaining vectors
3409 * are cleared. This is especially important when decreasing the
3410 * number of queues in use.
3411 */
3412 for (; v_start < q_vectors; v_start++) {
3413 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3414
3415 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3416
3417 q_vector->num_ringpairs = num_ringpairs;
3418
3419 q_vector->rx.count = 0;
3420 q_vector->tx.count = 0;
3421 q_vector->rx.ring = NULL;
3422 q_vector->tx.ring = NULL;
3423
3424 while (num_ringpairs--) {
3425 map_vector_to_qp(vsi, v_start, qp_idx);
3426 qp_idx++;
3427 qp_remaining--;
3428 }
3429 }
3430 }
3431
3432 /**
3433 * i40e_vsi_request_irq - Request IRQ from the OS
3434 * @vsi: the VSI being configured
3435 * @basename: name for the vector
3436 **/
3437 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3438 {
3439 struct i40e_pf *pf = vsi->back;
3440 int err;
3441
3442 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3443 err = i40e_vsi_request_irq_msix(vsi, basename);
3444 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3445 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3446 pf->int_name, pf);
3447 else
3448 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3449 pf->int_name, pf);
3450
3451 if (err)
3452 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3453
3454 return err;
3455 }
3456
3457 #ifdef CONFIG_NET_POLL_CONTROLLER
3458 /**
3459 * i40e_netpoll - A Polling 'interrupt'handler
3460 * @netdev: network interface device structure
3461 *
3462 * This is used by netconsole to send skbs without having to re-enable
3463 * interrupts. It's not called while the normal interrupt routine is executing.
3464 **/
3465 #ifdef I40E_FCOE
3466 void i40e_netpoll(struct net_device *netdev)
3467 #else
3468 static void i40e_netpoll(struct net_device *netdev)
3469 #endif
3470 {
3471 struct i40e_netdev_priv *np = netdev_priv(netdev);
3472 struct i40e_vsi *vsi = np->vsi;
3473 struct i40e_pf *pf = vsi->back;
3474 int i;
3475
3476 /* if interface is down do nothing */
3477 if (test_bit(__I40E_DOWN, &vsi->state))
3478 return;
3479
3480 pf->flags |= I40E_FLAG_IN_NETPOLL;
3481 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3482 for (i = 0; i < vsi->num_q_vectors; i++)
3483 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3484 } else {
3485 i40e_intr(pf->pdev->irq, netdev);
3486 }
3487 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3488 }
3489 #endif
3490
3491 /**
3492 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3493 * @pf: the PF being configured
3494 * @pf_q: the PF queue
3495 * @enable: enable or disable state of the queue
3496 *
3497 * This routine will wait for the given Tx queue of the PF to reach the
3498 * enabled or disabled state.
3499 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3500 * multiple retries; else will return 0 in case of success.
3501 **/
3502 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3503 {
3504 int i;
3505 u32 tx_reg;
3506
3507 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3508 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3509 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3510 break;
3511
3512 usleep_range(10, 20);
3513 }
3514 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3515 return -ETIMEDOUT;
3516
3517 return 0;
3518 }
3519
3520 /**
3521 * i40e_vsi_control_tx - Start or stop a VSI's rings
3522 * @vsi: the VSI being configured
3523 * @enable: start or stop the rings
3524 **/
3525 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3526 {
3527 struct i40e_pf *pf = vsi->back;
3528 struct i40e_hw *hw = &pf->hw;
3529 int i, j, pf_q, ret = 0;
3530 u32 tx_reg;
3531
3532 pf_q = vsi->base_queue;
3533 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3534
3535 /* warn the TX unit of coming changes */
3536 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3537 if (!enable)
3538 usleep_range(10, 20);
3539
3540 for (j = 0; j < 50; j++) {
3541 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3542 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3543 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3544 break;
3545 usleep_range(1000, 2000);
3546 }
3547 /* Skip if the queue is already in the requested state */
3548 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3549 continue;
3550
3551 /* turn on/off the queue */
3552 if (enable) {
3553 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3554 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3555 } else {
3556 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3557 }
3558
3559 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3560 /* No waiting for the Tx queue to disable */
3561 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3562 continue;
3563
3564 /* wait for the change to finish */
3565 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3566 if (ret) {
3567 dev_info(&pf->pdev->dev,
3568 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3569 __func__, vsi->seid, pf_q,
3570 (enable ? "en" : "dis"));
3571 break;
3572 }
3573 }
3574
3575 if (hw->revision_id == 0)
3576 mdelay(50);
3577 return ret;
3578 }
3579
3580 /**
3581 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3582 * @pf: the PF being configured
3583 * @pf_q: the PF queue
3584 * @enable: enable or disable state of the queue
3585 *
3586 * This routine will wait for the given Rx queue of the PF to reach the
3587 * enabled or disabled state.
3588 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3589 * multiple retries; else will return 0 in case of success.
3590 **/
3591 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3592 {
3593 int i;
3594 u32 rx_reg;
3595
3596 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3597 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3598 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3599 break;
3600
3601 usleep_range(10, 20);
3602 }
3603 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3604 return -ETIMEDOUT;
3605
3606 return 0;
3607 }
3608
3609 /**
3610 * i40e_vsi_control_rx - Start or stop a VSI's rings
3611 * @vsi: the VSI being configured
3612 * @enable: start or stop the rings
3613 **/
3614 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3615 {
3616 struct i40e_pf *pf = vsi->back;
3617 struct i40e_hw *hw = &pf->hw;
3618 int i, j, pf_q, ret = 0;
3619 u32 rx_reg;
3620
3621 pf_q = vsi->base_queue;
3622 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3623 for (j = 0; j < 50; j++) {
3624 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3625 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3626 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3627 break;
3628 usleep_range(1000, 2000);
3629 }
3630
3631 /* Skip if the queue is already in the requested state */
3632 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3633 continue;
3634
3635 /* turn on/off the queue */
3636 if (enable)
3637 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3638 else
3639 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3640 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3641
3642 /* wait for the change to finish */
3643 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3644 if (ret) {
3645 dev_info(&pf->pdev->dev,
3646 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3647 __func__, vsi->seid, pf_q,
3648 (enable ? "en" : "dis"));
3649 break;
3650 }
3651 }
3652
3653 return ret;
3654 }
3655
3656 /**
3657 * i40e_vsi_control_rings - Start or stop a VSI's rings
3658 * @vsi: the VSI being configured
3659 * @enable: start or stop the rings
3660 **/
3661 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3662 {
3663 int ret = 0;
3664
3665 /* do rx first for enable and last for disable */
3666 if (request) {
3667 ret = i40e_vsi_control_rx(vsi, request);
3668 if (ret)
3669 return ret;
3670 ret = i40e_vsi_control_tx(vsi, request);
3671 } else {
3672 /* Ignore return value, we need to shutdown whatever we can */
3673 i40e_vsi_control_tx(vsi, request);
3674 i40e_vsi_control_rx(vsi, request);
3675 }
3676
3677 return ret;
3678 }
3679
3680 /**
3681 * i40e_vsi_free_irq - Free the irq association with the OS
3682 * @vsi: the VSI being configured
3683 **/
3684 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3685 {
3686 struct i40e_pf *pf = vsi->back;
3687 struct i40e_hw *hw = &pf->hw;
3688 int base = vsi->base_vector;
3689 u32 val, qp;
3690 int i;
3691
3692 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3693 if (!vsi->q_vectors)
3694 return;
3695
3696 if (!vsi->irqs_ready)
3697 return;
3698
3699 vsi->irqs_ready = false;
3700 for (i = 0; i < vsi->num_q_vectors; i++) {
3701 u16 vector = i + base;
3702
3703 /* free only the irqs that were actually requested */
3704 if (!vsi->q_vectors[i] ||
3705 !vsi->q_vectors[i]->num_ringpairs)
3706 continue;
3707
3708 /* clear the affinity_mask in the IRQ descriptor */
3709 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3710 NULL);
3711 free_irq(pf->msix_entries[vector].vector,
3712 vsi->q_vectors[i]);
3713
3714 /* Tear down the interrupt queue link list
3715 *
3716 * We know that they come in pairs and always
3717 * the Rx first, then the Tx. To clear the
3718 * link list, stick the EOL value into the
3719 * next_q field of the registers.
3720 */
3721 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3722 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3723 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3724 val |= I40E_QUEUE_END_OF_LIST
3725 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3726 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3727
3728 while (qp != I40E_QUEUE_END_OF_LIST) {
3729 u32 next;
3730
3731 val = rd32(hw, I40E_QINT_RQCTL(qp));
3732
3733 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3734 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3735 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3736 I40E_QINT_RQCTL_INTEVENT_MASK);
3737
3738 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3739 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3740
3741 wr32(hw, I40E_QINT_RQCTL(qp), val);
3742
3743 val = rd32(hw, I40E_QINT_TQCTL(qp));
3744
3745 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3746 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3747
3748 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3749 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3750 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3751 I40E_QINT_TQCTL_INTEVENT_MASK);
3752
3753 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3754 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3755
3756 wr32(hw, I40E_QINT_TQCTL(qp), val);
3757 qp = next;
3758 }
3759 }
3760 } else {
3761 free_irq(pf->pdev->irq, pf);
3762
3763 val = rd32(hw, I40E_PFINT_LNKLST0);
3764 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3765 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3766 val |= I40E_QUEUE_END_OF_LIST
3767 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3768 wr32(hw, I40E_PFINT_LNKLST0, val);
3769
3770 val = rd32(hw, I40E_QINT_RQCTL(qp));
3771 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3772 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3773 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3774 I40E_QINT_RQCTL_INTEVENT_MASK);
3775
3776 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3777 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3778
3779 wr32(hw, I40E_QINT_RQCTL(qp), val);
3780
3781 val = rd32(hw, I40E_QINT_TQCTL(qp));
3782
3783 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3784 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3785 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3786 I40E_QINT_TQCTL_INTEVENT_MASK);
3787
3788 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3789 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3790
3791 wr32(hw, I40E_QINT_TQCTL(qp), val);
3792 }
3793 }
3794
3795 /**
3796 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3797 * @vsi: the VSI being configured
3798 * @v_idx: Index of vector to be freed
3799 *
3800 * This function frees the memory allocated to the q_vector. In addition if
3801 * NAPI is enabled it will delete any references to the NAPI struct prior
3802 * to freeing the q_vector.
3803 **/
3804 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3805 {
3806 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3807 struct i40e_ring *ring;
3808
3809 if (!q_vector)
3810 return;
3811
3812 /* disassociate q_vector from rings */
3813 i40e_for_each_ring(ring, q_vector->tx)
3814 ring->q_vector = NULL;
3815
3816 i40e_for_each_ring(ring, q_vector->rx)
3817 ring->q_vector = NULL;
3818
3819 /* only VSI w/ an associated netdev is set up w/ NAPI */
3820 if (vsi->netdev)
3821 netif_napi_del(&q_vector->napi);
3822
3823 vsi->q_vectors[v_idx] = NULL;
3824
3825 kfree_rcu(q_vector, rcu);
3826 }
3827
3828 /**
3829 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3830 * @vsi: the VSI being un-configured
3831 *
3832 * This frees the memory allocated to the q_vectors and
3833 * deletes references to the NAPI struct.
3834 **/
3835 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3836 {
3837 int v_idx;
3838
3839 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3840 i40e_free_q_vector(vsi, v_idx);
3841 }
3842
3843 /**
3844 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3845 * @pf: board private structure
3846 **/
3847 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3848 {
3849 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3850 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3851 pci_disable_msix(pf->pdev);
3852 kfree(pf->msix_entries);
3853 pf->msix_entries = NULL;
3854 kfree(pf->irq_pile);
3855 pf->irq_pile = NULL;
3856 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3857 pci_disable_msi(pf->pdev);
3858 }
3859 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3860 }
3861
3862 /**
3863 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3864 * @pf: board private structure
3865 *
3866 * We go through and clear interrupt specific resources and reset the structure
3867 * to pre-load conditions
3868 **/
3869 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3870 {
3871 int i;
3872
3873 i40e_stop_misc_vector(pf);
3874 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3875 synchronize_irq(pf->msix_entries[0].vector);
3876 free_irq(pf->msix_entries[0].vector, pf);
3877 }
3878
3879 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3880 for (i = 0; i < pf->num_alloc_vsi; i++)
3881 if (pf->vsi[i])
3882 i40e_vsi_free_q_vectors(pf->vsi[i]);
3883 i40e_reset_interrupt_capability(pf);
3884 }
3885
3886 /**
3887 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3888 * @vsi: the VSI being configured
3889 **/
3890 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3891 {
3892 int q_idx;
3893
3894 if (!vsi->netdev)
3895 return;
3896
3897 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3898 napi_enable(&vsi->q_vectors[q_idx]->napi);
3899 }
3900
3901 /**
3902 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3903 * @vsi: the VSI being configured
3904 **/
3905 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3906 {
3907 int q_idx;
3908
3909 if (!vsi->netdev)
3910 return;
3911
3912 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3913 napi_disable(&vsi->q_vectors[q_idx]->napi);
3914 }
3915
3916 /**
3917 * i40e_vsi_close - Shut down a VSI
3918 * @vsi: the vsi to be quelled
3919 **/
3920 static void i40e_vsi_close(struct i40e_vsi *vsi)
3921 {
3922 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3923 i40e_down(vsi);
3924 i40e_vsi_free_irq(vsi);
3925 i40e_vsi_free_tx_resources(vsi);
3926 i40e_vsi_free_rx_resources(vsi);
3927 }
3928
3929 /**
3930 * i40e_quiesce_vsi - Pause a given VSI
3931 * @vsi: the VSI being paused
3932 **/
3933 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3934 {
3935 if (test_bit(__I40E_DOWN, &vsi->state))
3936 return;
3937
3938 /* No need to disable FCoE VSI when Tx suspended */
3939 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3940 vsi->type == I40E_VSI_FCOE) {
3941 dev_dbg(&vsi->back->pdev->dev,
3942 "%s: VSI seid %d skipping FCoE VSI disable\n",
3943 __func__, vsi->seid);
3944 return;
3945 }
3946
3947 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3948 if (vsi->netdev && netif_running(vsi->netdev)) {
3949 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3950 } else {
3951 i40e_vsi_close(vsi);
3952 }
3953 }
3954
3955 /**
3956 * i40e_unquiesce_vsi - Resume a given VSI
3957 * @vsi: the VSI being resumed
3958 **/
3959 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3960 {
3961 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3962 return;
3963
3964 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3965 if (vsi->netdev && netif_running(vsi->netdev))
3966 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3967 else
3968 i40e_vsi_open(vsi); /* this clears the DOWN bit */
3969 }
3970
3971 /**
3972 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3973 * @pf: the PF
3974 **/
3975 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3976 {
3977 int v;
3978
3979 for (v = 0; v < pf->num_alloc_vsi; v++) {
3980 if (pf->vsi[v])
3981 i40e_quiesce_vsi(pf->vsi[v]);
3982 }
3983 }
3984
3985 /**
3986 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3987 * @pf: the PF
3988 **/
3989 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3990 {
3991 int v;
3992
3993 for (v = 0; v < pf->num_alloc_vsi; v++) {
3994 if (pf->vsi[v])
3995 i40e_unquiesce_vsi(pf->vsi[v]);
3996 }
3997 }
3998
3999 #ifdef CONFIG_I40E_DCB
4000 /**
4001 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4002 * @vsi: the VSI being configured
4003 *
4004 * This function waits for the given VSI's Tx queues to be disabled.
4005 **/
4006 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4007 {
4008 struct i40e_pf *pf = vsi->back;
4009 int i, pf_q, ret;
4010
4011 pf_q = vsi->base_queue;
4012 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4013 /* Check and wait for the disable status of the queue */
4014 ret = i40e_pf_txq_wait(pf, pf_q, false);
4015 if (ret) {
4016 dev_info(&pf->pdev->dev,
4017 "%s: VSI seid %d Tx ring %d disable timeout\n",
4018 __func__, vsi->seid, pf_q);
4019 return ret;
4020 }
4021 }
4022
4023 return 0;
4024 }
4025
4026 /**
4027 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4028 * @pf: the PF
4029 *
4030 * This function waits for the Tx queues to be in disabled state for all the
4031 * VSIs that are managed by this PF.
4032 **/
4033 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4034 {
4035 int v, ret = 0;
4036
4037 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4038 /* No need to wait for FCoE VSI queues */
4039 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4040 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4041 if (ret)
4042 break;
4043 }
4044 }
4045
4046 return ret;
4047 }
4048
4049 #endif
4050 /**
4051 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4052 * @pf: pointer to PF
4053 *
4054 * Get TC map for ISCSI PF type that will include iSCSI TC
4055 * and LAN TC.
4056 **/
4057 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4058 {
4059 struct i40e_dcb_app_priority_table app;
4060 struct i40e_hw *hw = &pf->hw;
4061 u8 enabled_tc = 1; /* TC0 is always enabled */
4062 u8 tc, i;
4063 /* Get the iSCSI APP TLV */
4064 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4065
4066 for (i = 0; i < dcbcfg->numapps; i++) {
4067 app = dcbcfg->app[i];
4068 if (app.selector == I40E_APP_SEL_TCPIP &&
4069 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4070 tc = dcbcfg->etscfg.prioritytable[app.priority];
4071 enabled_tc |= (1 << tc);
4072 break;
4073 }
4074 }
4075
4076 return enabled_tc;
4077 }
4078
4079 /**
4080 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4081 * @dcbcfg: the corresponding DCBx configuration structure
4082 *
4083 * Return the number of TCs from given DCBx configuration
4084 **/
4085 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4086 {
4087 u8 num_tc = 0;
4088 int i;
4089
4090 /* Scan the ETS Config Priority Table to find
4091 * traffic class enabled for a given priority
4092 * and use the traffic class index to get the
4093 * number of traffic classes enabled
4094 */
4095 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4096 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4097 num_tc = dcbcfg->etscfg.prioritytable[i];
4098 }
4099
4100 /* Traffic class index starts from zero so
4101 * increment to return the actual count
4102 */
4103 return num_tc + 1;
4104 }
4105
4106 /**
4107 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4108 * @dcbcfg: the corresponding DCBx configuration structure
4109 *
4110 * Query the current DCB configuration and return the number of
4111 * traffic classes enabled from the given DCBX config
4112 **/
4113 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4114 {
4115 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4116 u8 enabled_tc = 1;
4117 u8 i;
4118
4119 for (i = 0; i < num_tc; i++)
4120 enabled_tc |= 1 << i;
4121
4122 return enabled_tc;
4123 }
4124
4125 /**
4126 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4127 * @pf: PF being queried
4128 *
4129 * Return number of traffic classes enabled for the given PF
4130 **/
4131 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4132 {
4133 struct i40e_hw *hw = &pf->hw;
4134 u8 i, enabled_tc;
4135 u8 num_tc = 0;
4136 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4137
4138 /* If DCB is not enabled then always in single TC */
4139 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4140 return 1;
4141
4142 /* SFP mode will be enabled for all TCs on port */
4143 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4144 return i40e_dcb_get_num_tc(dcbcfg);
4145
4146 /* MFP mode return count of enabled TCs for this PF */
4147 if (pf->hw.func_caps.iscsi)
4148 enabled_tc = i40e_get_iscsi_tc_map(pf);
4149 else
4150 return 1; /* Only TC0 */
4151
4152 /* At least have TC0 */
4153 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4154 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4155 if (enabled_tc & (1 << i))
4156 num_tc++;
4157 }
4158 return num_tc;
4159 }
4160
4161 /**
4162 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4163 * @pf: PF being queried
4164 *
4165 * Return a bitmap for first enabled traffic class for this PF.
4166 **/
4167 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4168 {
4169 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4170 u8 i = 0;
4171
4172 if (!enabled_tc)
4173 return 0x1; /* TC0 */
4174
4175 /* Find the first enabled TC */
4176 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4177 if (enabled_tc & (1 << i))
4178 break;
4179 }
4180
4181 return 1 << i;
4182 }
4183
4184 /**
4185 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4186 * @pf: PF being queried
4187 *
4188 * Return a bitmap for enabled traffic classes for this PF.
4189 **/
4190 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4191 {
4192 /* If DCB is not enabled for this PF then just return default TC */
4193 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4194 return i40e_pf_get_default_tc(pf);
4195
4196 /* SFP mode we want PF to be enabled for all TCs */
4197 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4198 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4199
4200 /* MFP enabled and iSCSI PF type */
4201 if (pf->hw.func_caps.iscsi)
4202 return i40e_get_iscsi_tc_map(pf);
4203 else
4204 return i40e_pf_get_default_tc(pf);
4205 }
4206
4207 /**
4208 * i40e_vsi_get_bw_info - Query VSI BW Information
4209 * @vsi: the VSI being queried
4210 *
4211 * Returns 0 on success, negative value on failure
4212 **/
4213 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4214 {
4215 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4216 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4217 struct i40e_pf *pf = vsi->back;
4218 struct i40e_hw *hw = &pf->hw;
4219 i40e_status aq_ret;
4220 u32 tc_bw_max;
4221 int i;
4222
4223 /* Get the VSI level BW configuration */
4224 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4225 if (aq_ret) {
4226 dev_info(&pf->pdev->dev,
4227 "couldn't get PF vsi bw config, err %d, aq_err %d\n",
4228 aq_ret, pf->hw.aq.asq_last_status);
4229 return -EINVAL;
4230 }
4231
4232 /* Get the VSI level BW configuration per TC */
4233 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4234 NULL);
4235 if (aq_ret) {
4236 dev_info(&pf->pdev->dev,
4237 "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
4238 aq_ret, pf->hw.aq.asq_last_status);
4239 return -EINVAL;
4240 }
4241
4242 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4243 dev_info(&pf->pdev->dev,
4244 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4245 bw_config.tc_valid_bits,
4246 bw_ets_config.tc_valid_bits);
4247 /* Still continuing */
4248 }
4249
4250 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4251 vsi->bw_max_quanta = bw_config.max_bw;
4252 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4253 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4254 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4255 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4256 vsi->bw_ets_limit_credits[i] =
4257 le16_to_cpu(bw_ets_config.credits[i]);
4258 /* 3 bits out of 4 for each TC */
4259 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4260 }
4261
4262 return 0;
4263 }
4264
4265 /**
4266 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4267 * @vsi: the VSI being configured
4268 * @enabled_tc: TC bitmap
4269 * @bw_credits: BW shared credits per TC
4270 *
4271 * Returns 0 on success, negative value on failure
4272 **/
4273 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4274 u8 *bw_share)
4275 {
4276 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4277 i40e_status aq_ret;
4278 int i;
4279
4280 bw_data.tc_valid_bits = enabled_tc;
4281 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4282 bw_data.tc_bw_credits[i] = bw_share[i];
4283
4284 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4285 NULL);
4286 if (aq_ret) {
4287 dev_info(&vsi->back->pdev->dev,
4288 "AQ command Config VSI BW allocation per TC failed = %d\n",
4289 vsi->back->hw.aq.asq_last_status);
4290 return -EINVAL;
4291 }
4292
4293 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4294 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4295
4296 return 0;
4297 }
4298
4299 /**
4300 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4301 * @vsi: the VSI being configured
4302 * @enabled_tc: TC map to be enabled
4303 *
4304 **/
4305 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4306 {
4307 struct net_device *netdev = vsi->netdev;
4308 struct i40e_pf *pf = vsi->back;
4309 struct i40e_hw *hw = &pf->hw;
4310 u8 netdev_tc = 0;
4311 int i;
4312 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4313
4314 if (!netdev)
4315 return;
4316
4317 if (!enabled_tc) {
4318 netdev_reset_tc(netdev);
4319 return;
4320 }
4321
4322 /* Set up actual enabled TCs on the VSI */
4323 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4324 return;
4325
4326 /* set per TC queues for the VSI */
4327 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4328 /* Only set TC queues for enabled tcs
4329 *
4330 * e.g. For a VSI that has TC0 and TC3 enabled the
4331 * enabled_tc bitmap would be 0x00001001; the driver
4332 * will set the numtc for netdev as 2 that will be
4333 * referenced by the netdev layer as TC 0 and 1.
4334 */
4335 if (vsi->tc_config.enabled_tc & (1 << i))
4336 netdev_set_tc_queue(netdev,
4337 vsi->tc_config.tc_info[i].netdev_tc,
4338 vsi->tc_config.tc_info[i].qcount,
4339 vsi->tc_config.tc_info[i].qoffset);
4340 }
4341
4342 /* Assign UP2TC map for the VSI */
4343 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4344 /* Get the actual TC# for the UP */
4345 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4346 /* Get the mapped netdev TC# for the UP */
4347 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4348 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4349 }
4350 }
4351
4352 /**
4353 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4354 * @vsi: the VSI being configured
4355 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4356 **/
4357 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4358 struct i40e_vsi_context *ctxt)
4359 {
4360 /* copy just the sections touched not the entire info
4361 * since not all sections are valid as returned by
4362 * update vsi params
4363 */
4364 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4365 memcpy(&vsi->info.queue_mapping,
4366 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4367 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4368 sizeof(vsi->info.tc_mapping));
4369 }
4370
4371 /**
4372 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4373 * @vsi: VSI to be configured
4374 * @enabled_tc: TC bitmap
4375 *
4376 * This configures a particular VSI for TCs that are mapped to the
4377 * given TC bitmap. It uses default bandwidth share for TCs across
4378 * VSIs to configure TC for a particular VSI.
4379 *
4380 * NOTE:
4381 * It is expected that the VSI queues have been quisced before calling
4382 * this function.
4383 **/
4384 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4385 {
4386 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4387 struct i40e_vsi_context ctxt;
4388 int ret = 0;
4389 int i;
4390
4391 /* Check if enabled_tc is same as existing or new TCs */
4392 if (vsi->tc_config.enabled_tc == enabled_tc)
4393 return ret;
4394
4395 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4396 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4397 if (enabled_tc & (1 << i))
4398 bw_share[i] = 1;
4399 }
4400
4401 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4402 if (ret) {
4403 dev_info(&vsi->back->pdev->dev,
4404 "Failed configuring TC map %d for VSI %d\n",
4405 enabled_tc, vsi->seid);
4406 goto out;
4407 }
4408
4409 /* Update Queue Pairs Mapping for currently enabled UPs */
4410 ctxt.seid = vsi->seid;
4411 ctxt.pf_num = vsi->back->hw.pf_id;
4412 ctxt.vf_num = 0;
4413 ctxt.uplink_seid = vsi->uplink_seid;
4414 ctxt.info = vsi->info;
4415 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4416
4417 /* Update the VSI after updating the VSI queue-mapping information */
4418 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4419 if (ret) {
4420 dev_info(&vsi->back->pdev->dev,
4421 "update vsi failed, aq_err=%d\n",
4422 vsi->back->hw.aq.asq_last_status);
4423 goto out;
4424 }
4425 /* update the local VSI info with updated queue map */
4426 i40e_vsi_update_queue_map(vsi, &ctxt);
4427 vsi->info.valid_sections = 0;
4428
4429 /* Update current VSI BW information */
4430 ret = i40e_vsi_get_bw_info(vsi);
4431 if (ret) {
4432 dev_info(&vsi->back->pdev->dev,
4433 "Failed updating vsi bw info, aq_err=%d\n",
4434 vsi->back->hw.aq.asq_last_status);
4435 goto out;
4436 }
4437
4438 /* Update the netdev TC setup */
4439 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4440 out:
4441 return ret;
4442 }
4443
4444 /**
4445 * i40e_veb_config_tc - Configure TCs for given VEB
4446 * @veb: given VEB
4447 * @enabled_tc: TC bitmap
4448 *
4449 * Configures given TC bitmap for VEB (switching) element
4450 **/
4451 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4452 {
4453 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4454 struct i40e_pf *pf = veb->pf;
4455 int ret = 0;
4456 int i;
4457
4458 /* No TCs or already enabled TCs just return */
4459 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4460 return ret;
4461
4462 bw_data.tc_valid_bits = enabled_tc;
4463 /* bw_data.absolute_credits is not set (relative) */
4464
4465 /* Enable ETS TCs with equal BW Share for now */
4466 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4467 if (enabled_tc & (1 << i))
4468 bw_data.tc_bw_share_credits[i] = 1;
4469 }
4470
4471 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4472 &bw_data, NULL);
4473 if (ret) {
4474 dev_info(&pf->pdev->dev,
4475 "veb bw config failed, aq_err=%d\n",
4476 pf->hw.aq.asq_last_status);
4477 goto out;
4478 }
4479
4480 /* Update the BW information */
4481 ret = i40e_veb_get_bw_info(veb);
4482 if (ret) {
4483 dev_info(&pf->pdev->dev,
4484 "Failed getting veb bw config, aq_err=%d\n",
4485 pf->hw.aq.asq_last_status);
4486 }
4487
4488 out:
4489 return ret;
4490 }
4491
4492 #ifdef CONFIG_I40E_DCB
4493 /**
4494 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4495 * @pf: PF struct
4496 *
4497 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4498 * the caller would've quiesce all the VSIs before calling
4499 * this function
4500 **/
4501 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4502 {
4503 u8 tc_map = 0;
4504 int ret;
4505 u8 v;
4506
4507 /* Enable the TCs available on PF to all VEBs */
4508 tc_map = i40e_pf_get_tc_map(pf);
4509 for (v = 0; v < I40E_MAX_VEB; v++) {
4510 if (!pf->veb[v])
4511 continue;
4512 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4513 if (ret) {
4514 dev_info(&pf->pdev->dev,
4515 "Failed configuring TC for VEB seid=%d\n",
4516 pf->veb[v]->seid);
4517 /* Will try to configure as many components */
4518 }
4519 }
4520
4521 /* Update each VSI */
4522 for (v = 0; v < pf->num_alloc_vsi; v++) {
4523 if (!pf->vsi[v])
4524 continue;
4525
4526 /* - Enable all TCs for the LAN VSI
4527 #ifdef I40E_FCOE
4528 * - For FCoE VSI only enable the TC configured
4529 * as per the APP TLV
4530 #endif
4531 * - For all others keep them at TC0 for now
4532 */
4533 if (v == pf->lan_vsi)
4534 tc_map = i40e_pf_get_tc_map(pf);
4535 else
4536 tc_map = i40e_pf_get_default_tc(pf);
4537 #ifdef I40E_FCOE
4538 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4539 tc_map = i40e_get_fcoe_tc_map(pf);
4540 #endif /* #ifdef I40E_FCOE */
4541
4542 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4543 if (ret) {
4544 dev_info(&pf->pdev->dev,
4545 "Failed configuring TC for VSI seid=%d\n",
4546 pf->vsi[v]->seid);
4547 /* Will try to configure as many components */
4548 } else {
4549 /* Re-configure VSI vectors based on updated TC map */
4550 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4551 if (pf->vsi[v]->netdev)
4552 i40e_dcbnl_set_all(pf->vsi[v]);
4553 }
4554 }
4555 }
4556
4557 /**
4558 * i40e_resume_port_tx - Resume port Tx
4559 * @pf: PF struct
4560 *
4561 * Resume a port's Tx and issue a PF reset in case of failure to
4562 * resume.
4563 **/
4564 static int i40e_resume_port_tx(struct i40e_pf *pf)
4565 {
4566 struct i40e_hw *hw = &pf->hw;
4567 int ret;
4568
4569 ret = i40e_aq_resume_port_tx(hw, NULL);
4570 if (ret) {
4571 dev_info(&pf->pdev->dev,
4572 "AQ command Resume Port Tx failed = %d\n",
4573 pf->hw.aq.asq_last_status);
4574 /* Schedule PF reset to recover */
4575 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4576 i40e_service_event_schedule(pf);
4577 }
4578
4579 return ret;
4580 }
4581
4582 /**
4583 * i40e_init_pf_dcb - Initialize DCB configuration
4584 * @pf: PF being configured
4585 *
4586 * Query the current DCB configuration and cache it
4587 * in the hardware structure
4588 **/
4589 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4590 {
4591 struct i40e_hw *hw = &pf->hw;
4592 int err = 0;
4593
4594 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4595 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4596 (pf->hw.aq.fw_maj_ver < 4))
4597 goto out;
4598
4599 /* Get the initial DCB configuration */
4600 err = i40e_init_dcb(hw);
4601 if (!err) {
4602 /* Device/Function is not DCBX capable */
4603 if ((!hw->func_caps.dcb) ||
4604 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4605 dev_info(&pf->pdev->dev,
4606 "DCBX offload is not supported or is disabled for this PF.\n");
4607
4608 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4609 goto out;
4610
4611 } else {
4612 /* When status is not DISABLED then DCBX in FW */
4613 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4614 DCB_CAP_DCBX_VER_IEEE;
4615
4616 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4617 /* Enable DCB tagging only when more than one TC */
4618 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4619 pf->flags |= I40E_FLAG_DCB_ENABLED;
4620 dev_dbg(&pf->pdev->dev,
4621 "DCBX offload is supported for this PF.\n");
4622 }
4623 } else {
4624 dev_info(&pf->pdev->dev,
4625 "AQ Querying DCB configuration failed: aq_err %d\n",
4626 pf->hw.aq.asq_last_status);
4627 }
4628
4629 out:
4630 return err;
4631 }
4632 #endif /* CONFIG_I40E_DCB */
4633 #define SPEED_SIZE 14
4634 #define FC_SIZE 8
4635 /**
4636 * i40e_print_link_message - print link up or down
4637 * @vsi: the VSI for which link needs a message
4638 */
4639 static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4640 {
4641 char speed[SPEED_SIZE] = "Unknown";
4642 char fc[FC_SIZE] = "RX/TX";
4643
4644 if (!isup) {
4645 netdev_info(vsi->netdev, "NIC Link is Down\n");
4646 return;
4647 }
4648
4649 /* Warn user if link speed on NPAR enabled partition is not at
4650 * least 10GB
4651 */
4652 if (vsi->back->hw.func_caps.npar_enable &&
4653 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4654 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4655 netdev_warn(vsi->netdev,
4656 "The partition detected link speed that is less than 10Gbps\n");
4657
4658 switch (vsi->back->hw.phy.link_info.link_speed) {
4659 case I40E_LINK_SPEED_40GB:
4660 strlcpy(speed, "40 Gbps", SPEED_SIZE);
4661 break;
4662 case I40E_LINK_SPEED_20GB:
4663 strncpy(speed, "20 Gbps", SPEED_SIZE);
4664 break;
4665 case I40E_LINK_SPEED_10GB:
4666 strlcpy(speed, "10 Gbps", SPEED_SIZE);
4667 break;
4668 case I40E_LINK_SPEED_1GB:
4669 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
4670 break;
4671 case I40E_LINK_SPEED_100MB:
4672 strncpy(speed, "100 Mbps", SPEED_SIZE);
4673 break;
4674 default:
4675 break;
4676 }
4677
4678 switch (vsi->back->hw.fc.current_mode) {
4679 case I40E_FC_FULL:
4680 strlcpy(fc, "RX/TX", FC_SIZE);
4681 break;
4682 case I40E_FC_TX_PAUSE:
4683 strlcpy(fc, "TX", FC_SIZE);
4684 break;
4685 case I40E_FC_RX_PAUSE:
4686 strlcpy(fc, "RX", FC_SIZE);
4687 break;
4688 default:
4689 strlcpy(fc, "None", FC_SIZE);
4690 break;
4691 }
4692
4693 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4694 speed, fc);
4695 }
4696
4697 /**
4698 * i40e_up_complete - Finish the last steps of bringing up a connection
4699 * @vsi: the VSI being configured
4700 **/
4701 static int i40e_up_complete(struct i40e_vsi *vsi)
4702 {
4703 struct i40e_pf *pf = vsi->back;
4704 int err;
4705
4706 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4707 i40e_vsi_configure_msix(vsi);
4708 else
4709 i40e_configure_msi_and_legacy(vsi);
4710
4711 /* start rings */
4712 err = i40e_vsi_control_rings(vsi, true);
4713 if (err)
4714 return err;
4715
4716 clear_bit(__I40E_DOWN, &vsi->state);
4717 i40e_napi_enable_all(vsi);
4718 i40e_vsi_enable_irq(vsi);
4719
4720 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4721 (vsi->netdev)) {
4722 i40e_print_link_message(vsi, true);
4723 netif_tx_start_all_queues(vsi->netdev);
4724 netif_carrier_on(vsi->netdev);
4725 } else if (vsi->netdev) {
4726 i40e_print_link_message(vsi, false);
4727 /* need to check for qualified module here*/
4728 if ((pf->hw.phy.link_info.link_info &
4729 I40E_AQ_MEDIA_AVAILABLE) &&
4730 (!(pf->hw.phy.link_info.an_info &
4731 I40E_AQ_QUALIFIED_MODULE)))
4732 netdev_err(vsi->netdev,
4733 "the driver failed to link because an unqualified module was detected.");
4734 }
4735
4736 /* replay FDIR SB filters */
4737 if (vsi->type == I40E_VSI_FDIR) {
4738 /* reset fd counters */
4739 pf->fd_add_err = pf->fd_atr_cnt = 0;
4740 if (pf->fd_tcp_rule > 0) {
4741 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4742 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4743 pf->fd_tcp_rule = 0;
4744 }
4745 i40e_fdir_filter_restore(vsi);
4746 }
4747 i40e_service_event_schedule(pf);
4748
4749 return 0;
4750 }
4751
4752 /**
4753 * i40e_vsi_reinit_locked - Reset the VSI
4754 * @vsi: the VSI being configured
4755 *
4756 * Rebuild the ring structs after some configuration
4757 * has changed, e.g. MTU size.
4758 **/
4759 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4760 {
4761 struct i40e_pf *pf = vsi->back;
4762
4763 WARN_ON(in_interrupt());
4764 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4765 usleep_range(1000, 2000);
4766 i40e_down(vsi);
4767
4768 /* Give a VF some time to respond to the reset. The
4769 * two second wait is based upon the watchdog cycle in
4770 * the VF driver.
4771 */
4772 if (vsi->type == I40E_VSI_SRIOV)
4773 msleep(2000);
4774 i40e_up(vsi);
4775 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4776 }
4777
4778 /**
4779 * i40e_up - Bring the connection back up after being down
4780 * @vsi: the VSI being configured
4781 **/
4782 int i40e_up(struct i40e_vsi *vsi)
4783 {
4784 int err;
4785
4786 err = i40e_vsi_configure(vsi);
4787 if (!err)
4788 err = i40e_up_complete(vsi);
4789
4790 return err;
4791 }
4792
4793 /**
4794 * i40e_down - Shutdown the connection processing
4795 * @vsi: the VSI being stopped
4796 **/
4797 void i40e_down(struct i40e_vsi *vsi)
4798 {
4799 int i;
4800
4801 /* It is assumed that the caller of this function
4802 * sets the vsi->state __I40E_DOWN bit.
4803 */
4804 if (vsi->netdev) {
4805 netif_carrier_off(vsi->netdev);
4806 netif_tx_disable(vsi->netdev);
4807 }
4808 i40e_vsi_disable_irq(vsi);
4809 i40e_vsi_control_rings(vsi, false);
4810 i40e_napi_disable_all(vsi);
4811
4812 for (i = 0; i < vsi->num_queue_pairs; i++) {
4813 i40e_clean_tx_ring(vsi->tx_rings[i]);
4814 i40e_clean_rx_ring(vsi->rx_rings[i]);
4815 }
4816 }
4817
4818 /**
4819 * i40e_setup_tc - configure multiple traffic classes
4820 * @netdev: net device to configure
4821 * @tc: number of traffic classes to enable
4822 **/
4823 #ifdef I40E_FCOE
4824 int i40e_setup_tc(struct net_device *netdev, u8 tc)
4825 #else
4826 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
4827 #endif
4828 {
4829 struct i40e_netdev_priv *np = netdev_priv(netdev);
4830 struct i40e_vsi *vsi = np->vsi;
4831 struct i40e_pf *pf = vsi->back;
4832 u8 enabled_tc = 0;
4833 int ret = -EINVAL;
4834 int i;
4835
4836 /* Check if DCB enabled to continue */
4837 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4838 netdev_info(netdev, "DCB is not enabled for adapter\n");
4839 goto exit;
4840 }
4841
4842 /* Check if MFP enabled */
4843 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4844 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4845 goto exit;
4846 }
4847
4848 /* Check whether tc count is within enabled limit */
4849 if (tc > i40e_pf_get_num_tc(pf)) {
4850 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4851 goto exit;
4852 }
4853
4854 /* Generate TC map for number of tc requested */
4855 for (i = 0; i < tc; i++)
4856 enabled_tc |= (1 << i);
4857
4858 /* Requesting same TC configuration as already enabled */
4859 if (enabled_tc == vsi->tc_config.enabled_tc)
4860 return 0;
4861
4862 /* Quiesce VSI queues */
4863 i40e_quiesce_vsi(vsi);
4864
4865 /* Configure VSI for enabled TCs */
4866 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4867 if (ret) {
4868 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4869 vsi->seid);
4870 goto exit;
4871 }
4872
4873 /* Unquiesce VSI */
4874 i40e_unquiesce_vsi(vsi);
4875
4876 exit:
4877 return ret;
4878 }
4879
4880 /**
4881 * i40e_open - Called when a network interface is made active
4882 * @netdev: network interface device structure
4883 *
4884 * The open entry point is called when a network interface is made
4885 * active by the system (IFF_UP). At this point all resources needed
4886 * for transmit and receive operations are allocated, the interrupt
4887 * handler is registered with the OS, the netdev watchdog subtask is
4888 * enabled, and the stack is notified that the interface is ready.
4889 *
4890 * Returns 0 on success, negative value on failure
4891 **/
4892 int i40e_open(struct net_device *netdev)
4893 {
4894 struct i40e_netdev_priv *np = netdev_priv(netdev);
4895 struct i40e_vsi *vsi = np->vsi;
4896 struct i40e_pf *pf = vsi->back;
4897 int err;
4898
4899 /* disallow open during test or if eeprom is broken */
4900 if (test_bit(__I40E_TESTING, &pf->state) ||
4901 test_bit(__I40E_BAD_EEPROM, &pf->state))
4902 return -EBUSY;
4903
4904 netif_carrier_off(netdev);
4905
4906 err = i40e_vsi_open(vsi);
4907 if (err)
4908 return err;
4909
4910 /* configure global TSO hardware offload settings */
4911 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4912 TCP_FLAG_FIN) >> 16);
4913 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4914 TCP_FLAG_FIN |
4915 TCP_FLAG_CWR) >> 16);
4916 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4917
4918 #ifdef CONFIG_I40E_VXLAN
4919 vxlan_get_rx_port(netdev);
4920 #endif
4921
4922 return 0;
4923 }
4924
4925 /**
4926 * i40e_vsi_open -
4927 * @vsi: the VSI to open
4928 *
4929 * Finish initialization of the VSI.
4930 *
4931 * Returns 0 on success, negative value on failure
4932 **/
4933 int i40e_vsi_open(struct i40e_vsi *vsi)
4934 {
4935 struct i40e_pf *pf = vsi->back;
4936 char int_name[I40E_INT_NAME_STR_LEN];
4937 int err;
4938
4939 /* allocate descriptors */
4940 err = i40e_vsi_setup_tx_resources(vsi);
4941 if (err)
4942 goto err_setup_tx;
4943 err = i40e_vsi_setup_rx_resources(vsi);
4944 if (err)
4945 goto err_setup_rx;
4946
4947 err = i40e_vsi_configure(vsi);
4948 if (err)
4949 goto err_setup_rx;
4950
4951 if (vsi->netdev) {
4952 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4953 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4954 err = i40e_vsi_request_irq(vsi, int_name);
4955 if (err)
4956 goto err_setup_rx;
4957
4958 /* Notify the stack of the actual queue counts. */
4959 err = netif_set_real_num_tx_queues(vsi->netdev,
4960 vsi->num_queue_pairs);
4961 if (err)
4962 goto err_set_queues;
4963
4964 err = netif_set_real_num_rx_queues(vsi->netdev,
4965 vsi->num_queue_pairs);
4966 if (err)
4967 goto err_set_queues;
4968
4969 } else if (vsi->type == I40E_VSI_FDIR) {
4970 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
4971 dev_driver_string(&pf->pdev->dev),
4972 dev_name(&pf->pdev->dev));
4973 err = i40e_vsi_request_irq(vsi, int_name);
4974
4975 } else {
4976 err = -EINVAL;
4977 goto err_setup_rx;
4978 }
4979
4980 err = i40e_up_complete(vsi);
4981 if (err)
4982 goto err_up_complete;
4983
4984 return 0;
4985
4986 err_up_complete:
4987 i40e_down(vsi);
4988 err_set_queues:
4989 i40e_vsi_free_irq(vsi);
4990 err_setup_rx:
4991 i40e_vsi_free_rx_resources(vsi);
4992 err_setup_tx:
4993 i40e_vsi_free_tx_resources(vsi);
4994 if (vsi == pf->vsi[pf->lan_vsi])
4995 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4996
4997 return err;
4998 }
4999
5000 /**
5001 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5002 * @pf: Pointer to PF
5003 *
5004 * This function destroys the hlist where all the Flow Director
5005 * filters were saved.
5006 **/
5007 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5008 {
5009 struct i40e_fdir_filter *filter;
5010 struct hlist_node *node2;
5011
5012 hlist_for_each_entry_safe(filter, node2,
5013 &pf->fdir_filter_list, fdir_node) {
5014 hlist_del(&filter->fdir_node);
5015 kfree(filter);
5016 }
5017 pf->fdir_pf_active_filters = 0;
5018 }
5019
5020 /**
5021 * i40e_close - Disables a network interface
5022 * @netdev: network interface device structure
5023 *
5024 * The close entry point is called when an interface is de-activated
5025 * by the OS. The hardware is still under the driver's control, but
5026 * this netdev interface is disabled.
5027 *
5028 * Returns 0, this is not allowed to fail
5029 **/
5030 #ifdef I40E_FCOE
5031 int i40e_close(struct net_device *netdev)
5032 #else
5033 static int i40e_close(struct net_device *netdev)
5034 #endif
5035 {
5036 struct i40e_netdev_priv *np = netdev_priv(netdev);
5037 struct i40e_vsi *vsi = np->vsi;
5038
5039 i40e_vsi_close(vsi);
5040
5041 return 0;
5042 }
5043
5044 /**
5045 * i40e_do_reset - Start a PF or Core Reset sequence
5046 * @pf: board private structure
5047 * @reset_flags: which reset is requested
5048 *
5049 * The essential difference in resets is that the PF Reset
5050 * doesn't clear the packet buffers, doesn't reset the PE
5051 * firmware, and doesn't bother the other PFs on the chip.
5052 **/
5053 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5054 {
5055 u32 val;
5056
5057 WARN_ON(in_interrupt());
5058
5059 if (i40e_check_asq_alive(&pf->hw))
5060 i40e_vc_notify_reset(pf);
5061
5062 /* do the biggest reset indicated */
5063 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
5064
5065 /* Request a Global Reset
5066 *
5067 * This will start the chip's countdown to the actual full
5068 * chip reset event, and a warning interrupt to be sent
5069 * to all PFs, including the requestor. Our handler
5070 * for the warning interrupt will deal with the shutdown
5071 * and recovery of the switch setup.
5072 */
5073 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5074 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5075 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5076 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5077
5078 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
5079
5080 /* Request a Core Reset
5081 *
5082 * Same as Global Reset, except does *not* include the MAC/PHY
5083 */
5084 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5085 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5086 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5087 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5088 i40e_flush(&pf->hw);
5089
5090 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
5091
5092 /* Request a PF Reset
5093 *
5094 * Resets only the PF-specific registers
5095 *
5096 * This goes directly to the tear-down and rebuild of
5097 * the switch, since we need to do all the recovery as
5098 * for the Core Reset.
5099 */
5100 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5101 i40e_handle_reset_warning(pf);
5102
5103 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
5104 int v;
5105
5106 /* Find the VSI(s) that requested a re-init */
5107 dev_info(&pf->pdev->dev,
5108 "VSI reinit requested\n");
5109 for (v = 0; v < pf->num_alloc_vsi; v++) {
5110 struct i40e_vsi *vsi = pf->vsi[v];
5111 if (vsi != NULL &&
5112 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5113 i40e_vsi_reinit_locked(pf->vsi[v]);
5114 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5115 }
5116 }
5117
5118 /* no further action needed, so return now */
5119 return;
5120 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
5121 int v;
5122
5123 /* Find the VSI(s) that needs to be brought down */
5124 dev_info(&pf->pdev->dev, "VSI down requested\n");
5125 for (v = 0; v < pf->num_alloc_vsi; v++) {
5126 struct i40e_vsi *vsi = pf->vsi[v];
5127 if (vsi != NULL &&
5128 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5129 set_bit(__I40E_DOWN, &vsi->state);
5130 i40e_down(vsi);
5131 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5132 }
5133 }
5134
5135 /* no further action needed, so return now */
5136 return;
5137 } else {
5138 dev_info(&pf->pdev->dev,
5139 "bad reset request 0x%08x\n", reset_flags);
5140 return;
5141 }
5142 }
5143
5144 #ifdef CONFIG_I40E_DCB
5145 /**
5146 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5147 * @pf: board private structure
5148 * @old_cfg: current DCB config
5149 * @new_cfg: new DCB config
5150 **/
5151 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5152 struct i40e_dcbx_config *old_cfg,
5153 struct i40e_dcbx_config *new_cfg)
5154 {
5155 bool need_reconfig = false;
5156
5157 /* Check if ETS configuration has changed */
5158 if (memcmp(&new_cfg->etscfg,
5159 &old_cfg->etscfg,
5160 sizeof(new_cfg->etscfg))) {
5161 /* If Priority Table has changed reconfig is needed */
5162 if (memcmp(&new_cfg->etscfg.prioritytable,
5163 &old_cfg->etscfg.prioritytable,
5164 sizeof(new_cfg->etscfg.prioritytable))) {
5165 need_reconfig = true;
5166 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5167 }
5168
5169 if (memcmp(&new_cfg->etscfg.tcbwtable,
5170 &old_cfg->etscfg.tcbwtable,
5171 sizeof(new_cfg->etscfg.tcbwtable)))
5172 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5173
5174 if (memcmp(&new_cfg->etscfg.tsatable,
5175 &old_cfg->etscfg.tsatable,
5176 sizeof(new_cfg->etscfg.tsatable)))
5177 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5178 }
5179
5180 /* Check if PFC configuration has changed */
5181 if (memcmp(&new_cfg->pfc,
5182 &old_cfg->pfc,
5183 sizeof(new_cfg->pfc))) {
5184 need_reconfig = true;
5185 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5186 }
5187
5188 /* Check if APP Table has changed */
5189 if (memcmp(&new_cfg->app,
5190 &old_cfg->app,
5191 sizeof(new_cfg->app))) {
5192 need_reconfig = true;
5193 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5194 }
5195
5196 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5197 need_reconfig);
5198 return need_reconfig;
5199 }
5200
5201 /**
5202 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5203 * @pf: board private structure
5204 * @e: event info posted on ARQ
5205 **/
5206 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5207 struct i40e_arq_event_info *e)
5208 {
5209 struct i40e_aqc_lldp_get_mib *mib =
5210 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5211 struct i40e_hw *hw = &pf->hw;
5212 struct i40e_dcbx_config tmp_dcbx_cfg;
5213 bool need_reconfig = false;
5214 int ret = 0;
5215 u8 type;
5216
5217 /* Not DCB capable or capability disabled */
5218 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5219 return ret;
5220
5221 /* Ignore if event is not for Nearest Bridge */
5222 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5223 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5224 dev_dbg(&pf->pdev->dev,
5225 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
5226 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5227 return ret;
5228
5229 /* Check MIB Type and return if event for Remote MIB update */
5230 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5231 dev_dbg(&pf->pdev->dev,
5232 "%s: LLDP event mib type %s\n", __func__,
5233 type ? "remote" : "local");
5234 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5235 /* Update the remote cached instance and return */
5236 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5237 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5238 &hw->remote_dcbx_config);
5239 goto exit;
5240 }
5241
5242 /* Store the old configuration */
5243 tmp_dcbx_cfg = hw->local_dcbx_config;
5244
5245 /* Reset the old DCBx configuration data */
5246 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5247 /* Get updated DCBX data from firmware */
5248 ret = i40e_get_dcb_config(&pf->hw);
5249 if (ret) {
5250 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
5251 goto exit;
5252 }
5253
5254 /* No change detected in DCBX configs */
5255 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5256 sizeof(tmp_dcbx_cfg))) {
5257 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5258 goto exit;
5259 }
5260
5261 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5262 &hw->local_dcbx_config);
5263
5264 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5265
5266 if (!need_reconfig)
5267 goto exit;
5268
5269 /* Enable DCB tagging only when more than one TC */
5270 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5271 pf->flags |= I40E_FLAG_DCB_ENABLED;
5272 else
5273 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5274
5275 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5276 /* Reconfiguration needed quiesce all VSIs */
5277 i40e_pf_quiesce_all_vsi(pf);
5278
5279 /* Changes in configuration update VEB/VSI */
5280 i40e_dcb_reconfigure(pf);
5281
5282 ret = i40e_resume_port_tx(pf);
5283
5284 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5285 /* In case of error no point in resuming VSIs */
5286 if (ret)
5287 goto exit;
5288
5289 /* Wait for the PF's Tx queues to be disabled */
5290 ret = i40e_pf_wait_txq_disabled(pf);
5291 if (ret) {
5292 /* Schedule PF reset to recover */
5293 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5294 i40e_service_event_schedule(pf);
5295 } else {
5296 i40e_pf_unquiesce_all_vsi(pf);
5297 }
5298
5299 exit:
5300 return ret;
5301 }
5302 #endif /* CONFIG_I40E_DCB */
5303
5304 /**
5305 * i40e_do_reset_safe - Protected reset path for userland calls.
5306 * @pf: board private structure
5307 * @reset_flags: which reset is requested
5308 *
5309 **/
5310 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5311 {
5312 rtnl_lock();
5313 i40e_do_reset(pf, reset_flags);
5314 rtnl_unlock();
5315 }
5316
5317 /**
5318 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5319 * @pf: board private structure
5320 * @e: event info posted on ARQ
5321 *
5322 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5323 * and VF queues
5324 **/
5325 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5326 struct i40e_arq_event_info *e)
5327 {
5328 struct i40e_aqc_lan_overflow *data =
5329 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5330 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5331 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5332 struct i40e_hw *hw = &pf->hw;
5333 struct i40e_vf *vf;
5334 u16 vf_id;
5335
5336 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5337 queue, qtx_ctl);
5338
5339 /* Queue belongs to VF, find the VF and issue VF reset */
5340 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5341 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5342 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5343 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5344 vf_id -= hw->func_caps.vf_base_id;
5345 vf = &pf->vf[vf_id];
5346 i40e_vc_notify_vf_reset(vf);
5347 /* Allow VF to process pending reset notification */
5348 msleep(20);
5349 i40e_reset_vf(vf, false);
5350 }
5351 }
5352
5353 /**
5354 * i40e_service_event_complete - Finish up the service event
5355 * @pf: board private structure
5356 **/
5357 static void i40e_service_event_complete(struct i40e_pf *pf)
5358 {
5359 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5360
5361 /* flush memory to make sure state is correct before next watchog */
5362 smp_mb__before_atomic();
5363 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5364 }
5365
5366 /**
5367 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5368 * @pf: board private structure
5369 **/
5370 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5371 {
5372 u32 val, fcnt_prog;
5373
5374 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5375 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5376 return fcnt_prog;
5377 }
5378
5379 /**
5380 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5381 * @pf: board private structure
5382 **/
5383 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5384 {
5385 u32 val, fcnt_prog;
5386
5387 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5388 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5389 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5390 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5391 return fcnt_prog;
5392 }
5393
5394 /**
5395 * i40e_get_global_fd_count - Get total FD filters programmed on device
5396 * @pf: board private structure
5397 **/
5398 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5399 {
5400 u32 val, fcnt_prog;
5401
5402 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5403 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5404 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5405 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5406 return fcnt_prog;
5407 }
5408
5409 /**
5410 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5411 * @pf: board private structure
5412 **/
5413 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5414 {
5415 u32 fcnt_prog, fcnt_avail;
5416
5417 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5418 return;
5419
5420 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5421 * to re-enable
5422 */
5423 fcnt_prog = i40e_get_global_fd_count(pf);
5424 fcnt_avail = pf->fdir_pf_filter_count;
5425 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5426 (pf->fd_add_err == 0) ||
5427 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5428 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5429 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5430 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5431 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5432 }
5433 }
5434 /* Wait for some more space to be available to turn on ATR */
5435 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5436 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5437 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5438 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5439 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5440 }
5441 }
5442 }
5443
5444 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5445 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5446 /**
5447 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5448 * @pf: board private structure
5449 **/
5450 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5451 {
5452 unsigned long min_flush_time;
5453 int flush_wait_retry = 50;
5454 bool disable_atr = false;
5455 int fd_room;
5456 int reg;
5457
5458 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5459 return;
5460
5461 if (time_after(jiffies, pf->fd_flush_timestamp +
5462 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5463 /* If the flush is happening too quick and we have mostly
5464 * SB rules we should not re-enable ATR for some time.
5465 */
5466 min_flush_time = pf->fd_flush_timestamp
5467 + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5468 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5469
5470 if (!(time_after(jiffies, min_flush_time)) &&
5471 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5472 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5473 disable_atr = true;
5474 }
5475
5476 pf->fd_flush_timestamp = jiffies;
5477 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5478 /* flush all filters */
5479 wr32(&pf->hw, I40E_PFQF_CTL_1,
5480 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5481 i40e_flush(&pf->hw);
5482 pf->fd_flush_cnt++;
5483 pf->fd_add_err = 0;
5484 do {
5485 /* Check FD flush status every 5-6msec */
5486 usleep_range(5000, 6000);
5487 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5488 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5489 break;
5490 } while (flush_wait_retry--);
5491 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5492 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5493 } else {
5494 /* replay sideband filters */
5495 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5496 if (!disable_atr)
5497 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5498 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5499 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5500 }
5501 }
5502 }
5503
5504 /**
5505 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5506 * @pf: board private structure
5507 **/
5508 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5509 {
5510 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5511 }
5512
5513 /* We can see up to 256 filter programming desc in transit if the filters are
5514 * being applied really fast; before we see the first
5515 * filter miss error on Rx queue 0. Accumulating enough error messages before
5516 * reacting will make sure we don't cause flush too often.
5517 */
5518 #define I40E_MAX_FD_PROGRAM_ERROR 256
5519
5520 /**
5521 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5522 * @pf: board private structure
5523 **/
5524 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5525 {
5526
5527 /* if interface is down do nothing */
5528 if (test_bit(__I40E_DOWN, &pf->state))
5529 return;
5530
5531 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5532 return;
5533
5534 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5535 i40e_fdir_flush_and_replay(pf);
5536
5537 i40e_fdir_check_and_reenable(pf);
5538
5539 }
5540
5541 /**
5542 * i40e_vsi_link_event - notify VSI of a link event
5543 * @vsi: vsi to be notified
5544 * @link_up: link up or down
5545 **/
5546 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5547 {
5548 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5549 return;
5550
5551 switch (vsi->type) {
5552 case I40E_VSI_MAIN:
5553 #ifdef I40E_FCOE
5554 case I40E_VSI_FCOE:
5555 #endif
5556 if (!vsi->netdev || !vsi->netdev_registered)
5557 break;
5558
5559 if (link_up) {
5560 netif_carrier_on(vsi->netdev);
5561 netif_tx_wake_all_queues(vsi->netdev);
5562 } else {
5563 netif_carrier_off(vsi->netdev);
5564 netif_tx_stop_all_queues(vsi->netdev);
5565 }
5566 break;
5567
5568 case I40E_VSI_SRIOV:
5569 case I40E_VSI_VMDQ2:
5570 case I40E_VSI_CTRL:
5571 case I40E_VSI_MIRROR:
5572 default:
5573 /* there is no notification for other VSIs */
5574 break;
5575 }
5576 }
5577
5578 /**
5579 * i40e_veb_link_event - notify elements on the veb of a link event
5580 * @veb: veb to be notified
5581 * @link_up: link up or down
5582 **/
5583 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5584 {
5585 struct i40e_pf *pf;
5586 int i;
5587
5588 if (!veb || !veb->pf)
5589 return;
5590 pf = veb->pf;
5591
5592 /* depth first... */
5593 for (i = 0; i < I40E_MAX_VEB; i++)
5594 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5595 i40e_veb_link_event(pf->veb[i], link_up);
5596
5597 /* ... now the local VSIs */
5598 for (i = 0; i < pf->num_alloc_vsi; i++)
5599 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5600 i40e_vsi_link_event(pf->vsi[i], link_up);
5601 }
5602
5603 /**
5604 * i40e_link_event - Update netif_carrier status
5605 * @pf: board private structure
5606 **/
5607 static void i40e_link_event(struct i40e_pf *pf)
5608 {
5609 bool new_link, old_link;
5610 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5611 u8 new_link_speed, old_link_speed;
5612
5613 /* set this to force the get_link_status call to refresh state */
5614 pf->hw.phy.get_link_info = true;
5615
5616 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5617 new_link = i40e_get_link_status(&pf->hw);
5618 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5619 new_link_speed = pf->hw.phy.link_info.link_speed;
5620
5621 if (new_link == old_link &&
5622 new_link_speed == old_link_speed &&
5623 (test_bit(__I40E_DOWN, &vsi->state) ||
5624 new_link == netif_carrier_ok(vsi->netdev)))
5625 return;
5626
5627 if (!test_bit(__I40E_DOWN, &vsi->state))
5628 i40e_print_link_message(vsi, new_link);
5629
5630 /* Notify the base of the switch tree connected to
5631 * the link. Floating VEBs are not notified.
5632 */
5633 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5634 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5635 else
5636 i40e_vsi_link_event(vsi, new_link);
5637
5638 if (pf->vf)
5639 i40e_vc_notify_link_state(pf);
5640
5641 if (pf->flags & I40E_FLAG_PTP)
5642 i40e_ptp_set_increment(pf);
5643 }
5644
5645 /**
5646 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5647 * @pf: board private structure
5648 *
5649 * Set the per-queue flags to request a check for stuck queues in the irq
5650 * clean functions, then force interrupts to be sure the irq clean is called.
5651 **/
5652 static void i40e_check_hang_subtask(struct i40e_pf *pf)
5653 {
5654 int i, v;
5655
5656 /* If we're down or resetting, just bail */
5657 if (test_bit(__I40E_DOWN, &pf->state) ||
5658 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5659 return;
5660
5661 /* for each VSI/netdev
5662 * for each Tx queue
5663 * set the check flag
5664 * for each q_vector
5665 * force an interrupt
5666 */
5667 for (v = 0; v < pf->num_alloc_vsi; v++) {
5668 struct i40e_vsi *vsi = pf->vsi[v];
5669 int armed = 0;
5670
5671 if (!pf->vsi[v] ||
5672 test_bit(__I40E_DOWN, &vsi->state) ||
5673 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5674 continue;
5675
5676 for (i = 0; i < vsi->num_queue_pairs; i++) {
5677 set_check_for_tx_hang(vsi->tx_rings[i]);
5678 if (test_bit(__I40E_HANG_CHECK_ARMED,
5679 &vsi->tx_rings[i]->state))
5680 armed++;
5681 }
5682
5683 if (armed) {
5684 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5685 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5686 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5687 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5688 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5689 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5690 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
5691 } else {
5692 u16 vec = vsi->base_vector - 1;
5693 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5694 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5695 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5696 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5697 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
5698 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5699 wr32(&vsi->back->hw,
5700 I40E_PFINT_DYN_CTLN(vec), val);
5701 }
5702 i40e_flush(&vsi->back->hw);
5703 }
5704 }
5705 }
5706
5707 /**
5708 * i40e_watchdog_subtask - periodic checks not using event driven response
5709 * @pf: board private structure
5710 **/
5711 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5712 {
5713 int i;
5714
5715 /* if interface is down do nothing */
5716 if (test_bit(__I40E_DOWN, &pf->state) ||
5717 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5718 return;
5719
5720 /* make sure we don't do these things too often */
5721 if (time_before(jiffies, (pf->service_timer_previous +
5722 pf->service_timer_period)))
5723 return;
5724 pf->service_timer_previous = jiffies;
5725
5726 i40e_check_hang_subtask(pf);
5727 i40e_link_event(pf);
5728
5729 /* Update the stats for active netdevs so the network stack
5730 * can look at updated numbers whenever it cares to
5731 */
5732 for (i = 0; i < pf->num_alloc_vsi; i++)
5733 if (pf->vsi[i] && pf->vsi[i]->netdev)
5734 i40e_update_stats(pf->vsi[i]);
5735
5736 /* Update the stats for the active switching components */
5737 for (i = 0; i < I40E_MAX_VEB; i++)
5738 if (pf->veb[i])
5739 i40e_update_veb_stats(pf->veb[i]);
5740
5741 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5742 }
5743
5744 /**
5745 * i40e_reset_subtask - Set up for resetting the device and driver
5746 * @pf: board private structure
5747 **/
5748 static void i40e_reset_subtask(struct i40e_pf *pf)
5749 {
5750 u32 reset_flags = 0;
5751
5752 rtnl_lock();
5753 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5754 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5755 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5756 }
5757 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5758 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5759 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5760 }
5761 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5762 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5763 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5764 }
5765 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5766 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5767 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5768 }
5769 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5770 reset_flags |= (1 << __I40E_DOWN_REQUESTED);
5771 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5772 }
5773
5774 /* If there's a recovery already waiting, it takes
5775 * precedence before starting a new reset sequence.
5776 */
5777 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5778 i40e_handle_reset_warning(pf);
5779 goto unlock;
5780 }
5781
5782 /* If we're already down or resetting, just bail */
5783 if (reset_flags &&
5784 !test_bit(__I40E_DOWN, &pf->state) &&
5785 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5786 i40e_do_reset(pf, reset_flags);
5787
5788 unlock:
5789 rtnl_unlock();
5790 }
5791
5792 /**
5793 * i40e_handle_link_event - Handle link event
5794 * @pf: board private structure
5795 * @e: event info posted on ARQ
5796 **/
5797 static void i40e_handle_link_event(struct i40e_pf *pf,
5798 struct i40e_arq_event_info *e)
5799 {
5800 struct i40e_hw *hw = &pf->hw;
5801 struct i40e_aqc_get_link_status *status =
5802 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5803
5804 /* save off old link status information */
5805 hw->phy.link_info_old = hw->phy.link_info;
5806
5807 /* Do a new status request to re-enable LSE reporting
5808 * and load new status information into the hw struct
5809 * This completely ignores any state information
5810 * in the ARQ event info, instead choosing to always
5811 * issue the AQ update link status command.
5812 */
5813 i40e_link_event(pf);
5814
5815 /* check for unqualified module, if link is down */
5816 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5817 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5818 (!(status->link_info & I40E_AQ_LINK_UP)))
5819 dev_err(&pf->pdev->dev,
5820 "The driver failed to link because an unqualified module was detected.\n");
5821 }
5822
5823 /**
5824 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5825 * @pf: board private structure
5826 **/
5827 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5828 {
5829 struct i40e_arq_event_info event;
5830 struct i40e_hw *hw = &pf->hw;
5831 u16 pending, i = 0;
5832 i40e_status ret;
5833 u16 opcode;
5834 u32 oldval;
5835 u32 val;
5836
5837 /* Do not run clean AQ when PF reset fails */
5838 if (test_bit(__I40E_RESET_FAILED, &pf->state))
5839 return;
5840
5841 /* check for error indications */
5842 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5843 oldval = val;
5844 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5845 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5846 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5847 }
5848 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5849 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5850 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5851 }
5852 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5853 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5854 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5855 }
5856 if (oldval != val)
5857 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5858
5859 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5860 oldval = val;
5861 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5862 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5863 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5864 }
5865 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5866 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5867 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5868 }
5869 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5870 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5871 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5872 }
5873 if (oldval != val)
5874 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5875
5876 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5877 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
5878 if (!event.msg_buf)
5879 return;
5880
5881 do {
5882 ret = i40e_clean_arq_element(hw, &event, &pending);
5883 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
5884 break;
5885 else if (ret) {
5886 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5887 break;
5888 }
5889
5890 opcode = le16_to_cpu(event.desc.opcode);
5891 switch (opcode) {
5892
5893 case i40e_aqc_opc_get_link_status:
5894 i40e_handle_link_event(pf, &event);
5895 break;
5896 case i40e_aqc_opc_send_msg_to_pf:
5897 ret = i40e_vc_process_vf_msg(pf,
5898 le16_to_cpu(event.desc.retval),
5899 le32_to_cpu(event.desc.cookie_high),
5900 le32_to_cpu(event.desc.cookie_low),
5901 event.msg_buf,
5902 event.msg_len);
5903 break;
5904 case i40e_aqc_opc_lldp_update_mib:
5905 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
5906 #ifdef CONFIG_I40E_DCB
5907 rtnl_lock();
5908 ret = i40e_handle_lldp_event(pf, &event);
5909 rtnl_unlock();
5910 #endif /* CONFIG_I40E_DCB */
5911 break;
5912 case i40e_aqc_opc_event_lan_overflow:
5913 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
5914 i40e_handle_lan_overflow_event(pf, &event);
5915 break;
5916 case i40e_aqc_opc_send_msg_to_peer:
5917 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5918 break;
5919 case i40e_aqc_opc_nvm_erase:
5920 case i40e_aqc_opc_nvm_update:
5921 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
5922 break;
5923 default:
5924 dev_info(&pf->pdev->dev,
5925 "ARQ Error: Unknown event 0x%04x received\n",
5926 opcode);
5927 break;
5928 }
5929 } while (pending && (i++ < pf->adminq_work_limit));
5930
5931 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5932 /* re-enable Admin queue interrupt cause */
5933 val = rd32(hw, I40E_PFINT_ICR0_ENA);
5934 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5935 wr32(hw, I40E_PFINT_ICR0_ENA, val);
5936 i40e_flush(hw);
5937
5938 kfree(event.msg_buf);
5939 }
5940
5941 /**
5942 * i40e_verify_eeprom - make sure eeprom is good to use
5943 * @pf: board private structure
5944 **/
5945 static void i40e_verify_eeprom(struct i40e_pf *pf)
5946 {
5947 int err;
5948
5949 err = i40e_diag_eeprom_test(&pf->hw);
5950 if (err) {
5951 /* retry in case of garbage read */
5952 err = i40e_diag_eeprom_test(&pf->hw);
5953 if (err) {
5954 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5955 err);
5956 set_bit(__I40E_BAD_EEPROM, &pf->state);
5957 }
5958 }
5959
5960 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5961 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5962 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5963 }
5964 }
5965
5966 /**
5967 * i40e_enable_pf_switch_lb
5968 * @pf: pointer to the PF structure
5969 *
5970 * enable switch loop back or die - no point in a return value
5971 **/
5972 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
5973 {
5974 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5975 struct i40e_vsi_context ctxt;
5976 int aq_ret;
5977
5978 ctxt.seid = pf->main_vsi_seid;
5979 ctxt.pf_num = pf->hw.pf_id;
5980 ctxt.vf_num = 0;
5981 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
5982 if (aq_ret) {
5983 dev_info(&pf->pdev->dev,
5984 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
5985 __func__, aq_ret, pf->hw.aq.asq_last_status);
5986 return;
5987 }
5988 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5989 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5990 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5991
5992 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
5993 if (aq_ret) {
5994 dev_info(&pf->pdev->dev,
5995 "%s: update vsi switch failed, aq_err=%d\n",
5996 __func__, vsi->back->hw.aq.asq_last_status);
5997 }
5998 }
5999
6000 /**
6001 * i40e_disable_pf_switch_lb
6002 * @pf: pointer to the PF structure
6003 *
6004 * disable switch loop back or die - no point in a return value
6005 **/
6006 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6007 {
6008 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6009 struct i40e_vsi_context ctxt;
6010 int aq_ret;
6011
6012 ctxt.seid = pf->main_vsi_seid;
6013 ctxt.pf_num = pf->hw.pf_id;
6014 ctxt.vf_num = 0;
6015 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6016 if (aq_ret) {
6017 dev_info(&pf->pdev->dev,
6018 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
6019 __func__, aq_ret, pf->hw.aq.asq_last_status);
6020 return;
6021 }
6022 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6023 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6024 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6025
6026 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6027 if (aq_ret) {
6028 dev_info(&pf->pdev->dev,
6029 "%s: update vsi switch failed, aq_err=%d\n",
6030 __func__, vsi->back->hw.aq.asq_last_status);
6031 }
6032 }
6033
6034 /**
6035 * i40e_config_bridge_mode - Configure the HW bridge mode
6036 * @veb: pointer to the bridge instance
6037 *
6038 * Configure the loop back mode for the LAN VSI that is downlink to the
6039 * specified HW bridge instance. It is expected this function is called
6040 * when a new HW bridge is instantiated.
6041 **/
6042 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6043 {
6044 struct i40e_pf *pf = veb->pf;
6045
6046 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6047 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6048 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6049 i40e_disable_pf_switch_lb(pf);
6050 else
6051 i40e_enable_pf_switch_lb(pf);
6052 }
6053
6054 /**
6055 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6056 * @veb: pointer to the VEB instance
6057 *
6058 * This is a recursive function that first builds the attached VSIs then
6059 * recurses in to build the next layer of VEB. We track the connections
6060 * through our own index numbers because the seid's from the HW could
6061 * change across the reset.
6062 **/
6063 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6064 {
6065 struct i40e_vsi *ctl_vsi = NULL;
6066 struct i40e_pf *pf = veb->pf;
6067 int v, veb_idx;
6068 int ret;
6069
6070 /* build VSI that owns this VEB, temporarily attached to base VEB */
6071 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6072 if (pf->vsi[v] &&
6073 pf->vsi[v]->veb_idx == veb->idx &&
6074 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6075 ctl_vsi = pf->vsi[v];
6076 break;
6077 }
6078 }
6079 if (!ctl_vsi) {
6080 dev_info(&pf->pdev->dev,
6081 "missing owner VSI for veb_idx %d\n", veb->idx);
6082 ret = -ENOENT;
6083 goto end_reconstitute;
6084 }
6085 if (ctl_vsi != pf->vsi[pf->lan_vsi])
6086 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6087 ret = i40e_add_vsi(ctl_vsi);
6088 if (ret) {
6089 dev_info(&pf->pdev->dev,
6090 "rebuild of owner VSI failed: %d\n", ret);
6091 goto end_reconstitute;
6092 }
6093 i40e_vsi_reset_stats(ctl_vsi);
6094
6095 /* create the VEB in the switch and move the VSI onto the VEB */
6096 ret = i40e_add_veb(veb, ctl_vsi);
6097 if (ret)
6098 goto end_reconstitute;
6099
6100 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6101 veb->bridge_mode = BRIDGE_MODE_VEB;
6102 else
6103 veb->bridge_mode = BRIDGE_MODE_VEPA;
6104 i40e_config_bridge_mode(veb);
6105
6106 /* create the remaining VSIs attached to this VEB */
6107 for (v = 0; v < pf->num_alloc_vsi; v++) {
6108 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6109 continue;
6110
6111 if (pf->vsi[v]->veb_idx == veb->idx) {
6112 struct i40e_vsi *vsi = pf->vsi[v];
6113 vsi->uplink_seid = veb->seid;
6114 ret = i40e_add_vsi(vsi);
6115 if (ret) {
6116 dev_info(&pf->pdev->dev,
6117 "rebuild of vsi_idx %d failed: %d\n",
6118 v, ret);
6119 goto end_reconstitute;
6120 }
6121 i40e_vsi_reset_stats(vsi);
6122 }
6123 }
6124
6125 /* create any VEBs attached to this VEB - RECURSION */
6126 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6127 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6128 pf->veb[veb_idx]->uplink_seid = veb->seid;
6129 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6130 if (ret)
6131 break;
6132 }
6133 }
6134
6135 end_reconstitute:
6136 return ret;
6137 }
6138
6139 /**
6140 * i40e_get_capabilities - get info about the HW
6141 * @pf: the PF struct
6142 **/
6143 static int i40e_get_capabilities(struct i40e_pf *pf)
6144 {
6145 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6146 u16 data_size;
6147 int buf_len;
6148 int err;
6149
6150 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6151 do {
6152 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6153 if (!cap_buf)
6154 return -ENOMEM;
6155
6156 /* this loads the data into the hw struct for us */
6157 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6158 &data_size,
6159 i40e_aqc_opc_list_func_capabilities,
6160 NULL);
6161 /* data loaded, buffer no longer needed */
6162 kfree(cap_buf);
6163
6164 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6165 /* retry with a larger buffer */
6166 buf_len = data_size;
6167 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6168 dev_info(&pf->pdev->dev,
6169 "capability discovery failed: aq=%d\n",
6170 pf->hw.aq.asq_last_status);
6171 return -ENODEV;
6172 }
6173 } while (err);
6174
6175 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6176 (pf->hw.aq.fw_maj_ver < 2)) {
6177 pf->hw.func_caps.num_msix_vectors++;
6178 pf->hw.func_caps.num_msix_vectors_vf++;
6179 }
6180
6181 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6182 dev_info(&pf->pdev->dev,
6183 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6184 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6185 pf->hw.func_caps.num_msix_vectors,
6186 pf->hw.func_caps.num_msix_vectors_vf,
6187 pf->hw.func_caps.fd_filters_guaranteed,
6188 pf->hw.func_caps.fd_filters_best_effort,
6189 pf->hw.func_caps.num_tx_qp,
6190 pf->hw.func_caps.num_vsis);
6191
6192 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6193 + pf->hw.func_caps.num_vfs)
6194 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6195 dev_info(&pf->pdev->dev,
6196 "got num_vsis %d, setting num_vsis to %d\n",
6197 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6198 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6199 }
6200
6201 return 0;
6202 }
6203
6204 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6205
6206 /**
6207 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6208 * @pf: board private structure
6209 **/
6210 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6211 {
6212 struct i40e_vsi *vsi;
6213 int i;
6214
6215 /* quick workaround for an NVM issue that leaves a critical register
6216 * uninitialized
6217 */
6218 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6219 static const u32 hkey[] = {
6220 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6221 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6222 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6223 0x95b3a76d};
6224
6225 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6226 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6227 }
6228
6229 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6230 return;
6231
6232 /* find existing VSI and see if it needs configuring */
6233 vsi = NULL;
6234 for (i = 0; i < pf->num_alloc_vsi; i++) {
6235 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6236 vsi = pf->vsi[i];
6237 break;
6238 }
6239 }
6240
6241 /* create a new VSI if none exists */
6242 if (!vsi) {
6243 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6244 pf->vsi[pf->lan_vsi]->seid, 0);
6245 if (!vsi) {
6246 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6247 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6248 return;
6249 }
6250 }
6251
6252 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6253 }
6254
6255 /**
6256 * i40e_fdir_teardown - release the Flow Director resources
6257 * @pf: board private structure
6258 **/
6259 static void i40e_fdir_teardown(struct i40e_pf *pf)
6260 {
6261 int i;
6262
6263 i40e_fdir_filter_exit(pf);
6264 for (i = 0; i < pf->num_alloc_vsi; i++) {
6265 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6266 i40e_vsi_release(pf->vsi[i]);
6267 break;
6268 }
6269 }
6270 }
6271
6272 /**
6273 * i40e_prep_for_reset - prep for the core to reset
6274 * @pf: board private structure
6275 *
6276 * Close up the VFs and other things in prep for PF Reset.
6277 **/
6278 static void i40e_prep_for_reset(struct i40e_pf *pf)
6279 {
6280 struct i40e_hw *hw = &pf->hw;
6281 i40e_status ret = 0;
6282 u32 v;
6283
6284 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6285 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6286 return;
6287
6288 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6289
6290 /* quiesce the VSIs and their queues that are not already DOWN */
6291 i40e_pf_quiesce_all_vsi(pf);
6292
6293 for (v = 0; v < pf->num_alloc_vsi; v++) {
6294 if (pf->vsi[v])
6295 pf->vsi[v]->seid = 0;
6296 }
6297
6298 i40e_shutdown_adminq(&pf->hw);
6299
6300 /* call shutdown HMC */
6301 if (hw->hmc.hmc_obj) {
6302 ret = i40e_shutdown_lan_hmc(hw);
6303 if (ret)
6304 dev_warn(&pf->pdev->dev,
6305 "shutdown_lan_hmc failed: %d\n", ret);
6306 }
6307 }
6308
6309 /**
6310 * i40e_send_version - update firmware with driver version
6311 * @pf: PF struct
6312 */
6313 static void i40e_send_version(struct i40e_pf *pf)
6314 {
6315 struct i40e_driver_version dv;
6316
6317 dv.major_version = DRV_VERSION_MAJOR;
6318 dv.minor_version = DRV_VERSION_MINOR;
6319 dv.build_version = DRV_VERSION_BUILD;
6320 dv.subbuild_version = 0;
6321 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6322 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6323 }
6324
6325 /**
6326 * i40e_reset_and_rebuild - reset and rebuild using a saved config
6327 * @pf: board private structure
6328 * @reinit: if the Main VSI needs to re-initialized.
6329 **/
6330 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6331 {
6332 struct i40e_hw *hw = &pf->hw;
6333 u8 set_fc_aq_fail = 0;
6334 i40e_status ret;
6335 u32 v;
6336
6337 /* Now we wait for GRST to settle out.
6338 * We don't have to delete the VEBs or VSIs from the hw switch
6339 * because the reset will make them disappear.
6340 */
6341 ret = i40e_pf_reset(hw);
6342 if (ret) {
6343 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6344 set_bit(__I40E_RESET_FAILED, &pf->state);
6345 goto clear_recovery;
6346 }
6347 pf->pfr_count++;
6348
6349 if (test_bit(__I40E_DOWN, &pf->state))
6350 goto clear_recovery;
6351 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6352
6353 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6354 ret = i40e_init_adminq(&pf->hw);
6355 if (ret) {
6356 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
6357 goto clear_recovery;
6358 }
6359
6360 /* re-verify the eeprom if we just had an EMP reset */
6361 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6362 i40e_verify_eeprom(pf);
6363
6364 i40e_clear_pxe_mode(hw);
6365 ret = i40e_get_capabilities(pf);
6366 if (ret) {
6367 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
6368 ret);
6369 goto end_core_reset;
6370 }
6371
6372 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6373 hw->func_caps.num_rx_qp,
6374 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6375 if (ret) {
6376 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6377 goto end_core_reset;
6378 }
6379 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6380 if (ret) {
6381 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6382 goto end_core_reset;
6383 }
6384
6385 #ifdef CONFIG_I40E_DCB
6386 ret = i40e_init_pf_dcb(pf);
6387 if (ret) {
6388 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6389 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6390 /* Continue without DCB enabled */
6391 }
6392 #endif /* CONFIG_I40E_DCB */
6393 #ifdef I40E_FCOE
6394 ret = i40e_init_pf_fcoe(pf);
6395 if (ret)
6396 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
6397
6398 #endif
6399 /* do basic switch setup */
6400 ret = i40e_setup_pf_switch(pf, reinit);
6401 if (ret)
6402 goto end_core_reset;
6403
6404 /* driver is only interested in link up/down and module qualification
6405 * reports from firmware
6406 */
6407 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6408 I40E_AQ_EVENT_LINK_UPDOWN |
6409 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6410 if (ret)
6411 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
6412
6413 /* make sure our flow control settings are restored */
6414 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6415 if (ret)
6416 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
6417
6418 /* Rebuild the VSIs and VEBs that existed before reset.
6419 * They are still in our local switch element arrays, so only
6420 * need to rebuild the switch model in the HW.
6421 *
6422 * If there were VEBs but the reconstitution failed, we'll try
6423 * try to recover minimal use by getting the basic PF VSI working.
6424 */
6425 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6426 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6427 /* find the one VEB connected to the MAC, and find orphans */
6428 for (v = 0; v < I40E_MAX_VEB; v++) {
6429 if (!pf->veb[v])
6430 continue;
6431
6432 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6433 pf->veb[v]->uplink_seid == 0) {
6434 ret = i40e_reconstitute_veb(pf->veb[v]);
6435
6436 if (!ret)
6437 continue;
6438
6439 /* If Main VEB failed, we're in deep doodoo,
6440 * so give up rebuilding the switch and set up
6441 * for minimal rebuild of PF VSI.
6442 * If orphan failed, we'll report the error
6443 * but try to keep going.
6444 */
6445 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6446 dev_info(&pf->pdev->dev,
6447 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6448 ret);
6449 pf->vsi[pf->lan_vsi]->uplink_seid
6450 = pf->mac_seid;
6451 break;
6452 } else if (pf->veb[v]->uplink_seid == 0) {
6453 dev_info(&pf->pdev->dev,
6454 "rebuild of orphan VEB failed: %d\n",
6455 ret);
6456 }
6457 }
6458 }
6459 }
6460
6461 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6462 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6463 /* no VEB, so rebuild only the Main VSI */
6464 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6465 if (ret) {
6466 dev_info(&pf->pdev->dev,
6467 "rebuild of Main VSI failed: %d\n", ret);
6468 goto end_core_reset;
6469 }
6470 }
6471
6472 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6473 (pf->hw.aq.fw_maj_ver < 4)) {
6474 msleep(75);
6475 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6476 if (ret)
6477 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
6478 pf->hw.aq.asq_last_status);
6479 }
6480 /* reinit the misc interrupt */
6481 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6482 ret = i40e_setup_misc_vector(pf);
6483
6484 /* restart the VSIs that were rebuilt and running before the reset */
6485 i40e_pf_unquiesce_all_vsi(pf);
6486
6487 if (pf->num_alloc_vfs) {
6488 for (v = 0; v < pf->num_alloc_vfs; v++)
6489 i40e_reset_vf(&pf->vf[v], true);
6490 }
6491
6492 /* tell the firmware that we're starting */
6493 i40e_send_version(pf);
6494
6495 end_core_reset:
6496 clear_bit(__I40E_RESET_FAILED, &pf->state);
6497 clear_recovery:
6498 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6499 }
6500
6501 /**
6502 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6503 * @pf: board private structure
6504 *
6505 * Close up the VFs and other things in prep for a Core Reset,
6506 * then get ready to rebuild the world.
6507 **/
6508 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6509 {
6510 i40e_prep_for_reset(pf);
6511 i40e_reset_and_rebuild(pf, false);
6512 }
6513
6514 /**
6515 * i40e_handle_mdd_event
6516 * @pf: pointer to the PF structure
6517 *
6518 * Called from the MDD irq handler to identify possibly malicious vfs
6519 **/
6520 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6521 {
6522 struct i40e_hw *hw = &pf->hw;
6523 bool mdd_detected = false;
6524 bool pf_mdd_detected = false;
6525 struct i40e_vf *vf;
6526 u32 reg;
6527 int i;
6528
6529 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6530 return;
6531
6532 /* find what triggered the MDD event */
6533 reg = rd32(hw, I40E_GL_MDET_TX);
6534 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6535 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6536 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6537 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6538 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6539 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6540 I40E_GL_MDET_TX_EVENT_SHIFT;
6541 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6542 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6543 pf->hw.func_caps.base_queue;
6544 if (netif_msg_tx_err(pf))
6545 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6546 event, queue, pf_num, vf_num);
6547 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6548 mdd_detected = true;
6549 }
6550 reg = rd32(hw, I40E_GL_MDET_RX);
6551 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6552 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6553 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6554 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6555 I40E_GL_MDET_RX_EVENT_SHIFT;
6556 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6557 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6558 pf->hw.func_caps.base_queue;
6559 if (netif_msg_rx_err(pf))
6560 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6561 event, queue, func);
6562 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6563 mdd_detected = true;
6564 }
6565
6566 if (mdd_detected) {
6567 reg = rd32(hw, I40E_PF_MDET_TX);
6568 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6569 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6570 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6571 pf_mdd_detected = true;
6572 }
6573 reg = rd32(hw, I40E_PF_MDET_RX);
6574 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6575 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6576 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6577 pf_mdd_detected = true;
6578 }
6579 /* Queue belongs to the PF, initiate a reset */
6580 if (pf_mdd_detected) {
6581 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6582 i40e_service_event_schedule(pf);
6583 }
6584 }
6585
6586 /* see if one of the VFs needs its hand slapped */
6587 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6588 vf = &(pf->vf[i]);
6589 reg = rd32(hw, I40E_VP_MDET_TX(i));
6590 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6591 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6592 vf->num_mdd_events++;
6593 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6594 i);
6595 }
6596
6597 reg = rd32(hw, I40E_VP_MDET_RX(i));
6598 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6599 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6600 vf->num_mdd_events++;
6601 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6602 i);
6603 }
6604
6605 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6606 dev_info(&pf->pdev->dev,
6607 "Too many MDD events on VF %d, disabled\n", i);
6608 dev_info(&pf->pdev->dev,
6609 "Use PF Control I/F to re-enable the VF\n");
6610 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6611 }
6612 }
6613
6614 /* re-enable mdd interrupt cause */
6615 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6616 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6617 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6618 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6619 i40e_flush(hw);
6620 }
6621
6622 #ifdef CONFIG_I40E_VXLAN
6623 /**
6624 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6625 * @pf: board private structure
6626 **/
6627 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6628 {
6629 struct i40e_hw *hw = &pf->hw;
6630 i40e_status ret;
6631 __be16 port;
6632 int i;
6633
6634 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6635 return;
6636
6637 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6638
6639 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6640 if (pf->pending_vxlan_bitmap & (1 << i)) {
6641 pf->pending_vxlan_bitmap &= ~(1 << i);
6642 port = pf->vxlan_ports[i];
6643 if (port)
6644 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6645 I40E_AQC_TUNNEL_TYPE_VXLAN,
6646 NULL, NULL);
6647 else
6648 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6649
6650 if (ret) {
6651 dev_info(&pf->pdev->dev,
6652 "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
6653 port ? "add" : "delete",
6654 ntohs(port), i, ret,
6655 pf->hw.aq.asq_last_status);
6656 pf->vxlan_ports[i] = 0;
6657 }
6658 }
6659 }
6660 }
6661
6662 #endif
6663 /**
6664 * i40e_service_task - Run the driver's async subtasks
6665 * @work: pointer to work_struct containing our data
6666 **/
6667 static void i40e_service_task(struct work_struct *work)
6668 {
6669 struct i40e_pf *pf = container_of(work,
6670 struct i40e_pf,
6671 service_task);
6672 unsigned long start_time = jiffies;
6673
6674 /* don't bother with service tasks if a reset is in progress */
6675 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6676 i40e_service_event_complete(pf);
6677 return;
6678 }
6679
6680 i40e_reset_subtask(pf);
6681 i40e_handle_mdd_event(pf);
6682 i40e_vc_process_vflr_event(pf);
6683 i40e_watchdog_subtask(pf);
6684 i40e_fdir_reinit_subtask(pf);
6685 i40e_sync_filters_subtask(pf);
6686 #ifdef CONFIG_I40E_VXLAN
6687 i40e_sync_vxlan_filters_subtask(pf);
6688 #endif
6689 i40e_clean_adminq_subtask(pf);
6690
6691 i40e_service_event_complete(pf);
6692
6693 /* If the tasks have taken longer than one timer cycle or there
6694 * is more work to be done, reschedule the service task now
6695 * rather than wait for the timer to tick again.
6696 */
6697 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6698 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
6699 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
6700 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6701 i40e_service_event_schedule(pf);
6702 }
6703
6704 /**
6705 * i40e_service_timer - timer callback
6706 * @data: pointer to PF struct
6707 **/
6708 static void i40e_service_timer(unsigned long data)
6709 {
6710 struct i40e_pf *pf = (struct i40e_pf *)data;
6711
6712 mod_timer(&pf->service_timer,
6713 round_jiffies(jiffies + pf->service_timer_period));
6714 i40e_service_event_schedule(pf);
6715 }
6716
6717 /**
6718 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6719 * @vsi: the VSI being configured
6720 **/
6721 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6722 {
6723 struct i40e_pf *pf = vsi->back;
6724
6725 switch (vsi->type) {
6726 case I40E_VSI_MAIN:
6727 vsi->alloc_queue_pairs = pf->num_lan_qps;
6728 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6729 I40E_REQ_DESCRIPTOR_MULTIPLE);
6730 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6731 vsi->num_q_vectors = pf->num_lan_msix;
6732 else
6733 vsi->num_q_vectors = 1;
6734
6735 break;
6736
6737 case I40E_VSI_FDIR:
6738 vsi->alloc_queue_pairs = 1;
6739 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6740 I40E_REQ_DESCRIPTOR_MULTIPLE);
6741 vsi->num_q_vectors = 1;
6742 break;
6743
6744 case I40E_VSI_VMDQ2:
6745 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6746 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6747 I40E_REQ_DESCRIPTOR_MULTIPLE);
6748 vsi->num_q_vectors = pf->num_vmdq_msix;
6749 break;
6750
6751 case I40E_VSI_SRIOV:
6752 vsi->alloc_queue_pairs = pf->num_vf_qps;
6753 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6754 I40E_REQ_DESCRIPTOR_MULTIPLE);
6755 break;
6756
6757 #ifdef I40E_FCOE
6758 case I40E_VSI_FCOE:
6759 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6760 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6761 I40E_REQ_DESCRIPTOR_MULTIPLE);
6762 vsi->num_q_vectors = pf->num_fcoe_msix;
6763 break;
6764
6765 #endif /* I40E_FCOE */
6766 default:
6767 WARN_ON(1);
6768 return -ENODATA;
6769 }
6770
6771 return 0;
6772 }
6773
6774 /**
6775 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6776 * @type: VSI pointer
6777 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6778 *
6779 * On error: returns error code (negative)
6780 * On success: returns 0
6781 **/
6782 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6783 {
6784 int size;
6785 int ret = 0;
6786
6787 /* allocate memory for both Tx and Rx ring pointers */
6788 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6789 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6790 if (!vsi->tx_rings)
6791 return -ENOMEM;
6792 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6793
6794 if (alloc_qvectors) {
6795 /* allocate memory for q_vector pointers */
6796 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6797 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6798 if (!vsi->q_vectors) {
6799 ret = -ENOMEM;
6800 goto err_vectors;
6801 }
6802 }
6803 return ret;
6804
6805 err_vectors:
6806 kfree(vsi->tx_rings);
6807 return ret;
6808 }
6809
6810 /**
6811 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6812 * @pf: board private structure
6813 * @type: type of VSI
6814 *
6815 * On error: returns error code (negative)
6816 * On success: returns vsi index in PF (positive)
6817 **/
6818 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6819 {
6820 int ret = -ENODEV;
6821 struct i40e_vsi *vsi;
6822 int vsi_idx;
6823 int i;
6824
6825 /* Need to protect the allocation of the VSIs at the PF level */
6826 mutex_lock(&pf->switch_mutex);
6827
6828 /* VSI list may be fragmented if VSI creation/destruction has
6829 * been happening. We can afford to do a quick scan to look
6830 * for any free VSIs in the list.
6831 *
6832 * find next empty vsi slot, looping back around if necessary
6833 */
6834 i = pf->next_vsi;
6835 while (i < pf->num_alloc_vsi && pf->vsi[i])
6836 i++;
6837 if (i >= pf->num_alloc_vsi) {
6838 i = 0;
6839 while (i < pf->next_vsi && pf->vsi[i])
6840 i++;
6841 }
6842
6843 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
6844 vsi_idx = i; /* Found one! */
6845 } else {
6846 ret = -ENODEV;
6847 goto unlock_pf; /* out of VSI slots! */
6848 }
6849 pf->next_vsi = ++i;
6850
6851 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6852 if (!vsi) {
6853 ret = -ENOMEM;
6854 goto unlock_pf;
6855 }
6856 vsi->type = type;
6857 vsi->back = pf;
6858 set_bit(__I40E_DOWN, &vsi->state);
6859 vsi->flags = 0;
6860 vsi->idx = vsi_idx;
6861 vsi->rx_itr_setting = pf->rx_itr_default;
6862 vsi->tx_itr_setting = pf->tx_itr_default;
6863 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
6864 pf->rss_table_size : 64;
6865 vsi->netdev_registered = false;
6866 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6867 INIT_LIST_HEAD(&vsi->mac_filter_list);
6868 vsi->irqs_ready = false;
6869
6870 ret = i40e_set_num_rings_in_vsi(vsi);
6871 if (ret)
6872 goto err_rings;
6873
6874 ret = i40e_vsi_alloc_arrays(vsi, true);
6875 if (ret)
6876 goto err_rings;
6877
6878 /* Setup default MSIX irq handler for VSI */
6879 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6880
6881 pf->vsi[vsi_idx] = vsi;
6882 ret = vsi_idx;
6883 goto unlock_pf;
6884
6885 err_rings:
6886 pf->next_vsi = i - 1;
6887 kfree(vsi);
6888 unlock_pf:
6889 mutex_unlock(&pf->switch_mutex);
6890 return ret;
6891 }
6892
6893 /**
6894 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6895 * @type: VSI pointer
6896 * @free_qvectors: a bool to specify if q_vectors need to be freed.
6897 *
6898 * On error: returns error code (negative)
6899 * On success: returns 0
6900 **/
6901 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
6902 {
6903 /* free the ring and vector containers */
6904 if (free_qvectors) {
6905 kfree(vsi->q_vectors);
6906 vsi->q_vectors = NULL;
6907 }
6908 kfree(vsi->tx_rings);
6909 vsi->tx_rings = NULL;
6910 vsi->rx_rings = NULL;
6911 }
6912
6913 /**
6914 * i40e_vsi_clear - Deallocate the VSI provided
6915 * @vsi: the VSI being un-configured
6916 **/
6917 static int i40e_vsi_clear(struct i40e_vsi *vsi)
6918 {
6919 struct i40e_pf *pf;
6920
6921 if (!vsi)
6922 return 0;
6923
6924 if (!vsi->back)
6925 goto free_vsi;
6926 pf = vsi->back;
6927
6928 mutex_lock(&pf->switch_mutex);
6929 if (!pf->vsi[vsi->idx]) {
6930 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6931 vsi->idx, vsi->idx, vsi, vsi->type);
6932 goto unlock_vsi;
6933 }
6934
6935 if (pf->vsi[vsi->idx] != vsi) {
6936 dev_err(&pf->pdev->dev,
6937 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6938 pf->vsi[vsi->idx]->idx,
6939 pf->vsi[vsi->idx],
6940 pf->vsi[vsi->idx]->type,
6941 vsi->idx, vsi, vsi->type);
6942 goto unlock_vsi;
6943 }
6944
6945 /* updates the PF for this cleared vsi */
6946 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6947 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6948
6949 i40e_vsi_free_arrays(vsi, true);
6950
6951 pf->vsi[vsi->idx] = NULL;
6952 if (vsi->idx < pf->next_vsi)
6953 pf->next_vsi = vsi->idx;
6954
6955 unlock_vsi:
6956 mutex_unlock(&pf->switch_mutex);
6957 free_vsi:
6958 kfree(vsi);
6959
6960 return 0;
6961 }
6962
6963 /**
6964 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6965 * @vsi: the VSI being cleaned
6966 **/
6967 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
6968 {
6969 int i;
6970
6971 if (vsi->tx_rings && vsi->tx_rings[0]) {
6972 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6973 kfree_rcu(vsi->tx_rings[i], rcu);
6974 vsi->tx_rings[i] = NULL;
6975 vsi->rx_rings[i] = NULL;
6976 }
6977 }
6978 }
6979
6980 /**
6981 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6982 * @vsi: the VSI being configured
6983 **/
6984 static int i40e_alloc_rings(struct i40e_vsi *vsi)
6985 {
6986 struct i40e_ring *tx_ring, *rx_ring;
6987 struct i40e_pf *pf = vsi->back;
6988 int i;
6989
6990 /* Set basic values in the rings to be used later during open() */
6991 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
6992 /* allocate space for both Tx and Rx in one shot */
6993 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6994 if (!tx_ring)
6995 goto err_out;
6996
6997 tx_ring->queue_index = i;
6998 tx_ring->reg_idx = vsi->base_queue + i;
6999 tx_ring->ring_active = false;
7000 tx_ring->vsi = vsi;
7001 tx_ring->netdev = vsi->netdev;
7002 tx_ring->dev = &pf->pdev->dev;
7003 tx_ring->count = vsi->num_desc;
7004 tx_ring->size = 0;
7005 tx_ring->dcb_tc = 0;
7006 vsi->tx_rings[i] = tx_ring;
7007
7008 rx_ring = &tx_ring[1];
7009 rx_ring->queue_index = i;
7010 rx_ring->reg_idx = vsi->base_queue + i;
7011 rx_ring->ring_active = false;
7012 rx_ring->vsi = vsi;
7013 rx_ring->netdev = vsi->netdev;
7014 rx_ring->dev = &pf->pdev->dev;
7015 rx_ring->count = vsi->num_desc;
7016 rx_ring->size = 0;
7017 rx_ring->dcb_tc = 0;
7018 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7019 set_ring_16byte_desc_enabled(rx_ring);
7020 else
7021 clear_ring_16byte_desc_enabled(rx_ring);
7022 vsi->rx_rings[i] = rx_ring;
7023 }
7024
7025 return 0;
7026
7027 err_out:
7028 i40e_vsi_clear_rings(vsi);
7029 return -ENOMEM;
7030 }
7031
7032 /**
7033 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7034 * @pf: board private structure
7035 * @vectors: the number of MSI-X vectors to request
7036 *
7037 * Returns the number of vectors reserved, or error
7038 **/
7039 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7040 {
7041 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7042 I40E_MIN_MSIX, vectors);
7043 if (vectors < 0) {
7044 dev_info(&pf->pdev->dev,
7045 "MSI-X vector reservation failed: %d\n", vectors);
7046 vectors = 0;
7047 }
7048
7049 return vectors;
7050 }
7051
7052 /**
7053 * i40e_init_msix - Setup the MSIX capability
7054 * @pf: board private structure
7055 *
7056 * Work with the OS to set up the MSIX vectors needed.
7057 *
7058 * Returns the number of vectors reserved or negative on failure
7059 **/
7060 static int i40e_init_msix(struct i40e_pf *pf)
7061 {
7062 struct i40e_hw *hw = &pf->hw;
7063 int vectors_left;
7064 int v_budget, i;
7065 int v_actual;
7066
7067 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7068 return -ENODEV;
7069
7070 /* The number of vectors we'll request will be comprised of:
7071 * - Add 1 for "other" cause for Admin Queue events, etc.
7072 * - The number of LAN queue pairs
7073 * - Queues being used for RSS.
7074 * We don't need as many as max_rss_size vectors.
7075 * use rss_size instead in the calculation since that
7076 * is governed by number of cpus in the system.
7077 * - assumes symmetric Tx/Rx pairing
7078 * - The number of VMDq pairs
7079 #ifdef I40E_FCOE
7080 * - The number of FCOE qps.
7081 #endif
7082 * Once we count this up, try the request.
7083 *
7084 * If we can't get what we want, we'll simplify to nearly nothing
7085 * and try again. If that still fails, we punt.
7086 */
7087 vectors_left = hw->func_caps.num_msix_vectors;
7088 v_budget = 0;
7089
7090 /* reserve one vector for miscellaneous handler */
7091 if (vectors_left) {
7092 v_budget++;
7093 vectors_left--;
7094 }
7095
7096 /* reserve vectors for the main PF traffic queues */
7097 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7098 vectors_left -= pf->num_lan_msix;
7099 v_budget += pf->num_lan_msix;
7100
7101 /* reserve one vector for sideband flow director */
7102 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7103 if (vectors_left) {
7104 v_budget++;
7105 vectors_left--;
7106 } else {
7107 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7108 }
7109 }
7110
7111 #ifdef I40E_FCOE
7112 /* can we reserve enough for FCoE? */
7113 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7114 if (!vectors_left)
7115 pf->num_fcoe_msix = 0;
7116 else if (vectors_left >= pf->num_fcoe_qps)
7117 pf->num_fcoe_msix = pf->num_fcoe_qps;
7118 else
7119 pf->num_fcoe_msix = 1;
7120 v_budget += pf->num_fcoe_msix;
7121 vectors_left -= pf->num_fcoe_msix;
7122 }
7123
7124 #endif
7125 /* any vectors left over go for VMDq support */
7126 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7127 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7128 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7129
7130 /* if we're short on vectors for what's desired, we limit
7131 * the queues per vmdq. If this is still more than are
7132 * available, the user will need to change the number of
7133 * queues/vectors used by the PF later with the ethtool
7134 * channels command
7135 */
7136 if (vmdq_vecs < vmdq_vecs_wanted)
7137 pf->num_vmdq_qps = 1;
7138 pf->num_vmdq_msix = pf->num_vmdq_qps;
7139
7140 v_budget += vmdq_vecs;
7141 vectors_left -= vmdq_vecs;
7142 }
7143
7144 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7145 GFP_KERNEL);
7146 if (!pf->msix_entries)
7147 return -ENOMEM;
7148
7149 for (i = 0; i < v_budget; i++)
7150 pf->msix_entries[i].entry = i;
7151 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7152
7153 if (v_actual != v_budget) {
7154 /* If we have limited resources, we will start with no vectors
7155 * for the special features and then allocate vectors to some
7156 * of these features based on the policy and at the end disable
7157 * the features that did not get any vectors.
7158 */
7159 #ifdef I40E_FCOE
7160 pf->num_fcoe_qps = 0;
7161 pf->num_fcoe_msix = 0;
7162 #endif
7163 pf->num_vmdq_msix = 0;
7164 }
7165
7166 if (v_actual < I40E_MIN_MSIX) {
7167 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7168 kfree(pf->msix_entries);
7169 pf->msix_entries = NULL;
7170 return -ENODEV;
7171
7172 } else if (v_actual == I40E_MIN_MSIX) {
7173 /* Adjust for minimal MSIX use */
7174 pf->num_vmdq_vsis = 0;
7175 pf->num_vmdq_qps = 0;
7176 pf->num_lan_qps = 1;
7177 pf->num_lan_msix = 1;
7178
7179 } else if (v_actual != v_budget) {
7180 int vec;
7181
7182 /* reserve the misc vector */
7183 vec = v_actual - 1;
7184
7185 /* Scale vector usage down */
7186 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
7187 pf->num_vmdq_vsis = 1;
7188 pf->num_vmdq_qps = 1;
7189 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7190
7191 /* partition out the remaining vectors */
7192 switch (vec) {
7193 case 2:
7194 pf->num_lan_msix = 1;
7195 break;
7196 case 3:
7197 #ifdef I40E_FCOE
7198 /* give one vector to FCoE */
7199 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7200 pf->num_lan_msix = 1;
7201 pf->num_fcoe_msix = 1;
7202 }
7203 #else
7204 pf->num_lan_msix = 2;
7205 #endif
7206 break;
7207 default:
7208 #ifdef I40E_FCOE
7209 /* give one vector to FCoE */
7210 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7211 pf->num_fcoe_msix = 1;
7212 vec--;
7213 }
7214 #endif
7215 /* give the rest to the PF */
7216 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7217 break;
7218 }
7219 }
7220
7221 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7222 (pf->num_vmdq_msix == 0)) {
7223 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7224 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7225 }
7226 #ifdef I40E_FCOE
7227
7228 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7229 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7230 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7231 }
7232 #endif
7233 return v_actual;
7234 }
7235
7236 /**
7237 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7238 * @vsi: the VSI being configured
7239 * @v_idx: index of the vector in the vsi struct
7240 *
7241 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7242 **/
7243 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7244 {
7245 struct i40e_q_vector *q_vector;
7246
7247 /* allocate q_vector */
7248 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7249 if (!q_vector)
7250 return -ENOMEM;
7251
7252 q_vector->vsi = vsi;
7253 q_vector->v_idx = v_idx;
7254 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7255 if (vsi->netdev)
7256 netif_napi_add(vsi->netdev, &q_vector->napi,
7257 i40e_napi_poll, NAPI_POLL_WEIGHT);
7258
7259 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7260 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7261
7262 /* tie q_vector and vsi together */
7263 vsi->q_vectors[v_idx] = q_vector;
7264
7265 return 0;
7266 }
7267
7268 /**
7269 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7270 * @vsi: the VSI being configured
7271 *
7272 * We allocate one q_vector per queue interrupt. If allocation fails we
7273 * return -ENOMEM.
7274 **/
7275 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7276 {
7277 struct i40e_pf *pf = vsi->back;
7278 int v_idx, num_q_vectors;
7279 int err;
7280
7281 /* if not MSIX, give the one vector only to the LAN VSI */
7282 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7283 num_q_vectors = vsi->num_q_vectors;
7284 else if (vsi == pf->vsi[pf->lan_vsi])
7285 num_q_vectors = 1;
7286 else
7287 return -EINVAL;
7288
7289 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7290 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7291 if (err)
7292 goto err_out;
7293 }
7294
7295 return 0;
7296
7297 err_out:
7298 while (v_idx--)
7299 i40e_free_q_vector(vsi, v_idx);
7300
7301 return err;
7302 }
7303
7304 /**
7305 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7306 * @pf: board private structure to initialize
7307 **/
7308 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7309 {
7310 int vectors = 0;
7311 ssize_t size;
7312
7313 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7314 vectors = i40e_init_msix(pf);
7315 if (vectors < 0) {
7316 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
7317 #ifdef I40E_FCOE
7318 I40E_FLAG_FCOE_ENABLED |
7319 #endif
7320 I40E_FLAG_RSS_ENABLED |
7321 I40E_FLAG_DCB_CAPABLE |
7322 I40E_FLAG_SRIOV_ENABLED |
7323 I40E_FLAG_FD_SB_ENABLED |
7324 I40E_FLAG_FD_ATR_ENABLED |
7325 I40E_FLAG_VMDQ_ENABLED);
7326
7327 /* rework the queue expectations without MSIX */
7328 i40e_determine_queue_usage(pf);
7329 }
7330 }
7331
7332 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7333 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7334 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7335 vectors = pci_enable_msi(pf->pdev);
7336 if (vectors < 0) {
7337 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7338 vectors);
7339 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7340 }
7341 vectors = 1; /* one MSI or Legacy vector */
7342 }
7343
7344 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7345 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7346
7347 /* set up vector assignment tracking */
7348 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7349 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7350 if (!pf->irq_pile) {
7351 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7352 return -ENOMEM;
7353 }
7354 pf->irq_pile->num_entries = vectors;
7355 pf->irq_pile->search_hint = 0;
7356
7357 /* track first vector for misc interrupts, ignore return */
7358 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7359
7360 return 0;
7361 }
7362
7363 /**
7364 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7365 * @pf: board private structure
7366 *
7367 * This sets up the handler for MSIX 0, which is used to manage the
7368 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7369 * when in MSI or Legacy interrupt mode.
7370 **/
7371 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7372 {
7373 struct i40e_hw *hw = &pf->hw;
7374 int err = 0;
7375
7376 /* Only request the irq if this is the first time through, and
7377 * not when we're rebuilding after a Reset
7378 */
7379 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7380 err = request_irq(pf->msix_entries[0].vector,
7381 i40e_intr, 0, pf->int_name, pf);
7382 if (err) {
7383 dev_info(&pf->pdev->dev,
7384 "request_irq for %s failed: %d\n",
7385 pf->int_name, err);
7386 return -EFAULT;
7387 }
7388 }
7389
7390 i40e_enable_misc_int_causes(pf);
7391
7392 /* associate no queues to the misc vector */
7393 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7394 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7395
7396 i40e_flush(hw);
7397
7398 i40e_irq_dynamic_enable_icr0(pf);
7399
7400 return err;
7401 }
7402
7403 /**
7404 * i40e_config_rss - Prepare for RSS if used
7405 * @pf: board private structure
7406 **/
7407 static int i40e_config_rss(struct i40e_pf *pf)
7408 {
7409 u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
7410 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7411 struct i40e_hw *hw = &pf->hw;
7412 u32 lut = 0;
7413 int i, j;
7414 u64 hena;
7415 u32 reg_val;
7416
7417 netdev_rss_key_fill(rss_key, sizeof(rss_key));
7418 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7419 wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
7420
7421 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7422 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7423 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7424 hena |= I40E_DEFAULT_RSS_HENA;
7425 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7426 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7427
7428 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7429
7430 /* Check capability and Set table size and register per hw expectation*/
7431 reg_val = rd32(hw, I40E_PFQF_CTL_0);
7432 if (pf->rss_table_size == 512)
7433 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7434 else
7435 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7436 wr32(hw, I40E_PFQF_CTL_0, reg_val);
7437
7438 /* Populate the LUT with max no. of queues in round robin fashion */
7439 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
7440
7441 /* The assumption is that lan qp count will be the highest
7442 * qp count for any PF VSI that needs RSS.
7443 * If multiple VSIs need RSS support, all the qp counts
7444 * for those VSIs should be a power of 2 for RSS to work.
7445 * If LAN VSI is the only consumer for RSS then this requirement
7446 * is not necessary.
7447 */
7448 if (j == vsi->rss_size)
7449 j = 0;
7450 /* lut = 4-byte sliding window of 4 lut entries */
7451 lut = (lut << 8) | (j &
7452 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
7453 /* On i = 3, we have 4 entries in lut; write to the register */
7454 if ((i & 3) == 3)
7455 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
7456 }
7457 i40e_flush(hw);
7458
7459 return 0;
7460 }
7461
7462 /**
7463 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7464 * @pf: board private structure
7465 * @queue_count: the requested queue count for rss.
7466 *
7467 * returns 0 if rss is not enabled, if enabled returns the final rss queue
7468 * count which may be different from the requested queue count.
7469 **/
7470 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7471 {
7472 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7473 int new_rss_size;
7474
7475 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7476 return 0;
7477
7478 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7479
7480 if (queue_count != vsi->num_queue_pairs) {
7481 vsi->req_queue_pairs = queue_count;
7482 i40e_prep_for_reset(pf);
7483
7484 pf->rss_size = new_rss_size;
7485
7486 i40e_reset_and_rebuild(pf, true);
7487 i40e_config_rss(pf);
7488 }
7489 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
7490 return pf->rss_size;
7491 }
7492
7493 /**
7494 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7495 * @pf: board private structure
7496 **/
7497 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7498 {
7499 i40e_status status;
7500 bool min_valid, max_valid;
7501 u32 max_bw, min_bw;
7502
7503 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7504 &min_valid, &max_valid);
7505
7506 if (!status) {
7507 if (min_valid)
7508 pf->npar_min_bw = min_bw;
7509 if (max_valid)
7510 pf->npar_max_bw = max_bw;
7511 }
7512
7513 return status;
7514 }
7515
7516 /**
7517 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7518 * @pf: board private structure
7519 **/
7520 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7521 {
7522 struct i40e_aqc_configure_partition_bw_data bw_data;
7523 i40e_status status;
7524
7525 /* Set the valid bit for this PF */
7526 bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
7527 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7528 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7529
7530 /* Set the new bandwidths */
7531 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7532
7533 return status;
7534 }
7535
7536 /**
7537 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7538 * @pf: board private structure
7539 **/
7540 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7541 {
7542 /* Commit temporary BW setting to permanent NVM image */
7543 enum i40e_admin_queue_err last_aq_status;
7544 i40e_status ret;
7545 u16 nvm_word;
7546
7547 if (pf->hw.partition_id != 1) {
7548 dev_info(&pf->pdev->dev,
7549 "Commit BW only works on partition 1! This is partition %d",
7550 pf->hw.partition_id);
7551 ret = I40E_NOT_SUPPORTED;
7552 goto bw_commit_out;
7553 }
7554
7555 /* Acquire NVM for read access */
7556 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7557 last_aq_status = pf->hw.aq.asq_last_status;
7558 if (ret) {
7559 dev_info(&pf->pdev->dev,
7560 "Cannot acquire NVM for read access, err %d: aq_err %d\n",
7561 ret, last_aq_status);
7562 goto bw_commit_out;
7563 }
7564
7565 /* Read word 0x10 of NVM - SW compatibility word 1 */
7566 ret = i40e_aq_read_nvm(&pf->hw,
7567 I40E_SR_NVM_CONTROL_WORD,
7568 0x10, sizeof(nvm_word), &nvm_word,
7569 false, NULL);
7570 /* Save off last admin queue command status before releasing
7571 * the NVM
7572 */
7573 last_aq_status = pf->hw.aq.asq_last_status;
7574 i40e_release_nvm(&pf->hw);
7575 if (ret) {
7576 dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
7577 ret, last_aq_status);
7578 goto bw_commit_out;
7579 }
7580
7581 /* Wait a bit for NVM release to complete */
7582 msleep(50);
7583
7584 /* Acquire NVM for write access */
7585 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7586 last_aq_status = pf->hw.aq.asq_last_status;
7587 if (ret) {
7588 dev_info(&pf->pdev->dev,
7589 "Cannot acquire NVM for write access, err %d: aq_err %d\n",
7590 ret, last_aq_status);
7591 goto bw_commit_out;
7592 }
7593 /* Write it back out unchanged to initiate update NVM,
7594 * which will force a write of the shadow (alt) RAM to
7595 * the NVM - thus storing the bandwidth values permanently.
7596 */
7597 ret = i40e_aq_update_nvm(&pf->hw,
7598 I40E_SR_NVM_CONTROL_WORD,
7599 0x10, sizeof(nvm_word),
7600 &nvm_word, true, NULL);
7601 /* Save off last admin queue command status before releasing
7602 * the NVM
7603 */
7604 last_aq_status = pf->hw.aq.asq_last_status;
7605 i40e_release_nvm(&pf->hw);
7606 if (ret)
7607 dev_info(&pf->pdev->dev,
7608 "BW settings NOT SAVED, err %d aq_err %d\n",
7609 ret, last_aq_status);
7610 bw_commit_out:
7611
7612 return ret;
7613 }
7614
7615 /**
7616 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7617 * @pf: board private structure to initialize
7618 *
7619 * i40e_sw_init initializes the Adapter private data structure.
7620 * Fields are initialized based on PCI device information and
7621 * OS network device settings (MTU size).
7622 **/
7623 static int i40e_sw_init(struct i40e_pf *pf)
7624 {
7625 int err = 0;
7626 int size;
7627
7628 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7629 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7630 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7631 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7632 if (I40E_DEBUG_USER & debug)
7633 pf->hw.debug_mask = debug;
7634 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7635 I40E_DEFAULT_MSG_ENABLE);
7636 }
7637
7638 /* Set default capability flags */
7639 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7640 I40E_FLAG_MSI_ENABLED |
7641 I40E_FLAG_MSIX_ENABLED;
7642
7643 if (iommu_present(&pci_bus_type))
7644 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7645 else
7646 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7647
7648 /* Set default ITR */
7649 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7650 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7651
7652 /* Depending on PF configurations, it is possible that the RSS
7653 * maximum might end up larger than the available queues
7654 */
7655 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7656 pf->rss_size = 1;
7657 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7658 pf->rss_size_max = min_t(int, pf->rss_size_max,
7659 pf->hw.func_caps.num_tx_qp);
7660 if (pf->hw.func_caps.rss) {
7661 pf->flags |= I40E_FLAG_RSS_ENABLED;
7662 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7663 }
7664
7665 /* MFP mode enabled */
7666 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
7667 pf->flags |= I40E_FLAG_MFP_ENABLED;
7668 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7669 if (i40e_get_npar_bw_setting(pf))
7670 dev_warn(&pf->pdev->dev,
7671 "Could not get NPAR bw settings\n");
7672 else
7673 dev_info(&pf->pdev->dev,
7674 "Min BW = %8.8x, Max BW = %8.8x\n",
7675 pf->npar_min_bw, pf->npar_max_bw);
7676 }
7677
7678 /* FW/NVM is not yet fixed in this regard */
7679 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7680 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7681 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7682 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7683 /* Setup a counter for fd_atr per PF */
7684 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
7685 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
7686 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7687 /* Setup a counter for fd_sb per PF */
7688 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
7689 } else {
7690 dev_info(&pf->pdev->dev,
7691 "Flow Director Sideband mode Disabled in MFP mode\n");
7692 }
7693 pf->fdir_pf_filter_count =
7694 pf->hw.func_caps.fd_filters_guaranteed;
7695 pf->hw.fdir_shared_filter_count =
7696 pf->hw.func_caps.fd_filters_best_effort;
7697 }
7698
7699 if (pf->hw.func_caps.vmdq) {
7700 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7701 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7702 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
7703 }
7704
7705 #ifdef I40E_FCOE
7706 err = i40e_init_pf_fcoe(pf);
7707 if (err)
7708 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7709
7710 #endif /* I40E_FCOE */
7711 #ifdef CONFIG_PCI_IOV
7712 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7713 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7714 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7715 pf->num_req_vfs = min_t(int,
7716 pf->hw.func_caps.num_vfs,
7717 I40E_MAX_VF_COUNT);
7718 }
7719 #endif /* CONFIG_PCI_IOV */
7720 pf->eeprom_version = 0xDEAD;
7721 pf->lan_veb = I40E_NO_VEB;
7722 pf->lan_vsi = I40E_NO_VSI;
7723
7724 /* set up queue assignment tracking */
7725 size = sizeof(struct i40e_lump_tracking)
7726 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7727 pf->qp_pile = kzalloc(size, GFP_KERNEL);
7728 if (!pf->qp_pile) {
7729 err = -ENOMEM;
7730 goto sw_init_done;
7731 }
7732 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7733 pf->qp_pile->search_hint = 0;
7734
7735 pf->tx_timeout_recovery_level = 1;
7736
7737 mutex_init(&pf->switch_mutex);
7738
7739 /* If NPAR is enabled nudge the Tx scheduler */
7740 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
7741 i40e_set_npar_bw_setting(pf);
7742
7743 sw_init_done:
7744 return err;
7745 }
7746
7747 /**
7748 * i40e_set_ntuple - set the ntuple feature flag and take action
7749 * @pf: board private structure to initialize
7750 * @features: the feature set that the stack is suggesting
7751 *
7752 * returns a bool to indicate if reset needs to happen
7753 **/
7754 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7755 {
7756 bool need_reset = false;
7757
7758 /* Check if Flow Director n-tuple support was enabled or disabled. If
7759 * the state changed, we need to reset.
7760 */
7761 if (features & NETIF_F_NTUPLE) {
7762 /* Enable filters and mark for reset */
7763 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7764 need_reset = true;
7765 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7766 } else {
7767 /* turn off filters, mark for reset and clear SW filter list */
7768 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7769 need_reset = true;
7770 i40e_fdir_filter_exit(pf);
7771 }
7772 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7773 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7774 /* reset fd counters */
7775 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7776 pf->fdir_pf_active_filters = 0;
7777 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7778 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7779 /* if ATR was auto disabled it can be re-enabled. */
7780 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7781 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7782 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7783 }
7784 return need_reset;
7785 }
7786
7787 /**
7788 * i40e_set_features - set the netdev feature flags
7789 * @netdev: ptr to the netdev being adjusted
7790 * @features: the feature set that the stack is suggesting
7791 **/
7792 static int i40e_set_features(struct net_device *netdev,
7793 netdev_features_t features)
7794 {
7795 struct i40e_netdev_priv *np = netdev_priv(netdev);
7796 struct i40e_vsi *vsi = np->vsi;
7797 struct i40e_pf *pf = vsi->back;
7798 bool need_reset;
7799
7800 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7801 i40e_vlan_stripping_enable(vsi);
7802 else
7803 i40e_vlan_stripping_disable(vsi);
7804
7805 need_reset = i40e_set_ntuple(pf, features);
7806
7807 if (need_reset)
7808 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
7809
7810 return 0;
7811 }
7812
7813 #ifdef CONFIG_I40E_VXLAN
7814 /**
7815 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7816 * @pf: board private structure
7817 * @port: The UDP port to look up
7818 *
7819 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7820 **/
7821 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7822 {
7823 u8 i;
7824
7825 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7826 if (pf->vxlan_ports[i] == port)
7827 return i;
7828 }
7829
7830 return i;
7831 }
7832
7833 /**
7834 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
7835 * @netdev: This physical port's netdev
7836 * @sa_family: Socket Family that VXLAN is notifying us about
7837 * @port: New UDP port number that VXLAN started listening to
7838 **/
7839 static void i40e_add_vxlan_port(struct net_device *netdev,
7840 sa_family_t sa_family, __be16 port)
7841 {
7842 struct i40e_netdev_priv *np = netdev_priv(netdev);
7843 struct i40e_vsi *vsi = np->vsi;
7844 struct i40e_pf *pf = vsi->back;
7845 u8 next_idx;
7846 u8 idx;
7847
7848 if (sa_family == AF_INET6)
7849 return;
7850
7851 idx = i40e_get_vxlan_port_idx(pf, port);
7852
7853 /* Check if port already exists */
7854 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7855 netdev_info(netdev, "vxlan port %d already offloaded\n",
7856 ntohs(port));
7857 return;
7858 }
7859
7860 /* Now check if there is space to add the new port */
7861 next_idx = i40e_get_vxlan_port_idx(pf, 0);
7862
7863 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7864 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
7865 ntohs(port));
7866 return;
7867 }
7868
7869 /* New port: add it and mark its index in the bitmap */
7870 pf->vxlan_ports[next_idx] = port;
7871 pf->pending_vxlan_bitmap |= (1 << next_idx);
7872 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7873
7874 dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
7875 }
7876
7877 /**
7878 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
7879 * @netdev: This physical port's netdev
7880 * @sa_family: Socket Family that VXLAN is notifying us about
7881 * @port: UDP port number that VXLAN stopped listening to
7882 **/
7883 static void i40e_del_vxlan_port(struct net_device *netdev,
7884 sa_family_t sa_family, __be16 port)
7885 {
7886 struct i40e_netdev_priv *np = netdev_priv(netdev);
7887 struct i40e_vsi *vsi = np->vsi;
7888 struct i40e_pf *pf = vsi->back;
7889 u8 idx;
7890
7891 if (sa_family == AF_INET6)
7892 return;
7893
7894 idx = i40e_get_vxlan_port_idx(pf, port);
7895
7896 /* Check if port already exists */
7897 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7898 /* if port exists, set it to 0 (mark for deletion)
7899 * and make it pending
7900 */
7901 pf->vxlan_ports[idx] = 0;
7902 pf->pending_vxlan_bitmap |= (1 << idx);
7903 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7904
7905 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
7906 ntohs(port));
7907 } else {
7908 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
7909 ntohs(port));
7910 }
7911 }
7912
7913 #endif
7914 static int i40e_get_phys_port_id(struct net_device *netdev,
7915 struct netdev_phys_item_id *ppid)
7916 {
7917 struct i40e_netdev_priv *np = netdev_priv(netdev);
7918 struct i40e_pf *pf = np->vsi->back;
7919 struct i40e_hw *hw = &pf->hw;
7920
7921 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
7922 return -EOPNOTSUPP;
7923
7924 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
7925 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
7926
7927 return 0;
7928 }
7929
7930 /**
7931 * i40e_ndo_fdb_add - add an entry to the hardware database
7932 * @ndm: the input from the stack
7933 * @tb: pointer to array of nladdr (unused)
7934 * @dev: the net device pointer
7935 * @addr: the MAC address entry being added
7936 * @flags: instructions from stack about fdb operation
7937 */
7938 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7939 struct net_device *dev,
7940 const unsigned char *addr, u16 vid,
7941 u16 flags)
7942 {
7943 struct i40e_netdev_priv *np = netdev_priv(dev);
7944 struct i40e_pf *pf = np->vsi->back;
7945 int err = 0;
7946
7947 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
7948 return -EOPNOTSUPP;
7949
7950 if (vid) {
7951 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
7952 return -EINVAL;
7953 }
7954
7955 /* Hardware does not support aging addresses so if a
7956 * ndm_state is given only allow permanent addresses
7957 */
7958 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7959 netdev_info(dev, "FDB only supports static addresses\n");
7960 return -EINVAL;
7961 }
7962
7963 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
7964 err = dev_uc_add_excl(dev, addr);
7965 else if (is_multicast_ether_addr(addr))
7966 err = dev_mc_add_excl(dev, addr);
7967 else
7968 err = -EINVAL;
7969
7970 /* Only return duplicate errors if NLM_F_EXCL is set */
7971 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7972 err = 0;
7973
7974 return err;
7975 }
7976
7977 #ifdef HAVE_BRIDGE_ATTRIBS
7978 /**
7979 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
7980 * @dev: the netdev being configured
7981 * @nlh: RTNL message
7982 *
7983 * Inserts a new hardware bridge if not already created and
7984 * enables the bridging mode requested (VEB or VEPA). If the
7985 * hardware bridge has already been inserted and the request
7986 * is to change the mode then that requires a PF reset to
7987 * allow rebuild of the components with required hardware
7988 * bridge mode enabled.
7989 **/
7990 static int i40e_ndo_bridge_setlink(struct net_device *dev,
7991 struct nlmsghdr *nlh)
7992 {
7993 struct i40e_netdev_priv *np = netdev_priv(dev);
7994 struct i40e_vsi *vsi = np->vsi;
7995 struct i40e_pf *pf = vsi->back;
7996 struct i40e_veb *veb = NULL;
7997 struct nlattr *attr, *br_spec;
7998 int i, rem;
7999
8000 /* Only for PF VSI for now */
8001 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8002 return -EOPNOTSUPP;
8003
8004 /* Find the HW bridge for PF VSI */
8005 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8006 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8007 veb = pf->veb[i];
8008 }
8009
8010 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8011
8012 nla_for_each_nested(attr, br_spec, rem) {
8013 __u16 mode;
8014
8015 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8016 continue;
8017
8018 mode = nla_get_u16(attr);
8019 if ((mode != BRIDGE_MODE_VEPA) &&
8020 (mode != BRIDGE_MODE_VEB))
8021 return -EINVAL;
8022
8023 /* Insert a new HW bridge */
8024 if (!veb) {
8025 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8026 vsi->tc_config.enabled_tc);
8027 if (veb) {
8028 veb->bridge_mode = mode;
8029 i40e_config_bridge_mode(veb);
8030 } else {
8031 /* No Bridge HW offload available */
8032 return -ENOENT;
8033 }
8034 break;
8035 } else if (mode != veb->bridge_mode) {
8036 /* Existing HW bridge but different mode needs reset */
8037 veb->bridge_mode = mode;
8038 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8039 if (mode == BRIDGE_MODE_VEB)
8040 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8041 else
8042 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8043 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8044 break;
8045 }
8046 }
8047
8048 return 0;
8049 }
8050
8051 /**
8052 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8053 * @skb: skb buff
8054 * @pid: process id
8055 * @seq: RTNL message seq #
8056 * @dev: the netdev being configured
8057 * @filter_mask: unused
8058 *
8059 * Return the mode in which the hardware bridge is operating in
8060 * i.e VEB or VEPA.
8061 **/
8062 #ifdef HAVE_BRIDGE_FILTER
8063 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8064 struct net_device *dev,
8065 u32 __always_unused filter_mask, int nlflags)
8066 #else
8067 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8068 struct net_device *dev, int nlflags)
8069 #endif /* HAVE_BRIDGE_FILTER */
8070 {
8071 struct i40e_netdev_priv *np = netdev_priv(dev);
8072 struct i40e_vsi *vsi = np->vsi;
8073 struct i40e_pf *pf = vsi->back;
8074 struct i40e_veb *veb = NULL;
8075 int i;
8076
8077 /* Only for PF VSI for now */
8078 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8079 return -EOPNOTSUPP;
8080
8081 /* Find the HW bridge for the PF VSI */
8082 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8083 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8084 veb = pf->veb[i];
8085 }
8086
8087 if (!veb)
8088 return 0;
8089
8090 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8091 nlflags);
8092 }
8093 #endif /* HAVE_BRIDGE_ATTRIBS */
8094
8095 static const struct net_device_ops i40e_netdev_ops = {
8096 .ndo_open = i40e_open,
8097 .ndo_stop = i40e_close,
8098 .ndo_start_xmit = i40e_lan_xmit_frame,
8099 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
8100 .ndo_set_rx_mode = i40e_set_rx_mode,
8101 .ndo_validate_addr = eth_validate_addr,
8102 .ndo_set_mac_address = i40e_set_mac,
8103 .ndo_change_mtu = i40e_change_mtu,
8104 .ndo_do_ioctl = i40e_ioctl,
8105 .ndo_tx_timeout = i40e_tx_timeout,
8106 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
8107 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
8108 #ifdef CONFIG_NET_POLL_CONTROLLER
8109 .ndo_poll_controller = i40e_netpoll,
8110 #endif
8111 .ndo_setup_tc = i40e_setup_tc,
8112 #ifdef I40E_FCOE
8113 .ndo_fcoe_enable = i40e_fcoe_enable,
8114 .ndo_fcoe_disable = i40e_fcoe_disable,
8115 #endif
8116 .ndo_set_features = i40e_set_features,
8117 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
8118 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
8119 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
8120 .ndo_get_vf_config = i40e_ndo_get_vf_config,
8121 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
8122 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
8123 #ifdef CONFIG_I40E_VXLAN
8124 .ndo_add_vxlan_port = i40e_add_vxlan_port,
8125 .ndo_del_vxlan_port = i40e_del_vxlan_port,
8126 #endif
8127 .ndo_get_phys_port_id = i40e_get_phys_port_id,
8128 .ndo_fdb_add = i40e_ndo_fdb_add,
8129 #ifdef HAVE_BRIDGE_ATTRIBS
8130 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
8131 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
8132 #endif /* HAVE_BRIDGE_ATTRIBS */
8133 };
8134
8135 /**
8136 * i40e_config_netdev - Setup the netdev flags
8137 * @vsi: the VSI being configured
8138 *
8139 * Returns 0 on success, negative value on failure
8140 **/
8141 static int i40e_config_netdev(struct i40e_vsi *vsi)
8142 {
8143 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8144 struct i40e_pf *pf = vsi->back;
8145 struct i40e_hw *hw = &pf->hw;
8146 struct i40e_netdev_priv *np;
8147 struct net_device *netdev;
8148 u8 mac_addr[ETH_ALEN];
8149 int etherdev_size;
8150
8151 etherdev_size = sizeof(struct i40e_netdev_priv);
8152 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8153 if (!netdev)
8154 return -ENOMEM;
8155
8156 vsi->netdev = netdev;
8157 np = netdev_priv(netdev);
8158 np->vsi = vsi;
8159
8160 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
8161 NETIF_F_GSO_UDP_TUNNEL |
8162 NETIF_F_TSO;
8163
8164 netdev->features = NETIF_F_SG |
8165 NETIF_F_IP_CSUM |
8166 NETIF_F_SCTP_CSUM |
8167 NETIF_F_HIGHDMA |
8168 NETIF_F_GSO_UDP_TUNNEL |
8169 NETIF_F_HW_VLAN_CTAG_TX |
8170 NETIF_F_HW_VLAN_CTAG_RX |
8171 NETIF_F_HW_VLAN_CTAG_FILTER |
8172 NETIF_F_IPV6_CSUM |
8173 NETIF_F_TSO |
8174 NETIF_F_TSO_ECN |
8175 NETIF_F_TSO6 |
8176 NETIF_F_RXCSUM |
8177 NETIF_F_RXHASH |
8178 0;
8179
8180 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8181 netdev->features |= NETIF_F_NTUPLE;
8182
8183 /* copy netdev features into list of user selectable features */
8184 netdev->hw_features |= netdev->features;
8185
8186 if (vsi->type == I40E_VSI_MAIN) {
8187 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8188 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8189 /* The following steps are necessary to prevent reception
8190 * of tagged packets - some older NVM configurations load a
8191 * default a MAC-VLAN filter that accepts any tagged packet
8192 * which must be replaced by a normal filter.
8193 */
8194 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8195 i40e_add_filter(vsi, mac_addr,
8196 I40E_VLAN_ANY, false, true);
8197 } else {
8198 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8199 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8200 pf->vsi[pf->lan_vsi]->netdev->name);
8201 random_ether_addr(mac_addr);
8202 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8203 }
8204 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8205
8206 ether_addr_copy(netdev->dev_addr, mac_addr);
8207 ether_addr_copy(netdev->perm_addr, mac_addr);
8208 /* vlan gets same features (except vlan offload)
8209 * after any tweaks for specific VSI types
8210 */
8211 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8212 NETIF_F_HW_VLAN_CTAG_RX |
8213 NETIF_F_HW_VLAN_CTAG_FILTER);
8214 netdev->priv_flags |= IFF_UNICAST_FLT;
8215 netdev->priv_flags |= IFF_SUPP_NOFCS;
8216 /* Setup netdev TC information */
8217 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8218
8219 netdev->netdev_ops = &i40e_netdev_ops;
8220 netdev->watchdog_timeo = 5 * HZ;
8221 i40e_set_ethtool_ops(netdev);
8222 #ifdef I40E_FCOE
8223 i40e_fcoe_config_netdev(netdev, vsi);
8224 #endif
8225
8226 return 0;
8227 }
8228
8229 /**
8230 * i40e_vsi_delete - Delete a VSI from the switch
8231 * @vsi: the VSI being removed
8232 *
8233 * Returns 0 on success, negative value on failure
8234 **/
8235 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8236 {
8237 /* remove default VSI is not allowed */
8238 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8239 return;
8240
8241 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8242 }
8243
8244 /**
8245 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8246 * @vsi: the VSI being queried
8247 *
8248 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8249 **/
8250 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8251 {
8252 struct i40e_veb *veb;
8253 struct i40e_pf *pf = vsi->back;
8254
8255 /* Uplink is not a bridge so default to VEB */
8256 if (vsi->veb_idx == I40E_NO_VEB)
8257 return 1;
8258
8259 veb = pf->veb[vsi->veb_idx];
8260 /* Uplink is a bridge in VEPA mode */
8261 if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8262 return 0;
8263
8264 /* Uplink is a bridge in VEB mode */
8265 return 1;
8266 }
8267
8268 /**
8269 * i40e_add_vsi - Add a VSI to the switch
8270 * @vsi: the VSI being configured
8271 *
8272 * This initializes a VSI context depending on the VSI type to be added and
8273 * passes it down to the add_vsi aq command.
8274 **/
8275 static int i40e_add_vsi(struct i40e_vsi *vsi)
8276 {
8277 int ret = -ENODEV;
8278 struct i40e_mac_filter *f, *ftmp;
8279 struct i40e_pf *pf = vsi->back;
8280 struct i40e_hw *hw = &pf->hw;
8281 struct i40e_vsi_context ctxt;
8282 u8 enabled_tc = 0x1; /* TC0 enabled */
8283 int f_count = 0;
8284
8285 memset(&ctxt, 0, sizeof(ctxt));
8286 switch (vsi->type) {
8287 case I40E_VSI_MAIN:
8288 /* The PF's main VSI is already setup as part of the
8289 * device initialization, so we'll not bother with
8290 * the add_vsi call, but we will retrieve the current
8291 * VSI context.
8292 */
8293 ctxt.seid = pf->main_vsi_seid;
8294 ctxt.pf_num = pf->hw.pf_id;
8295 ctxt.vf_num = 0;
8296 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8297 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8298 if (ret) {
8299 dev_info(&pf->pdev->dev,
8300 "couldn't get PF vsi config, err %d, aq_err %d\n",
8301 ret, pf->hw.aq.asq_last_status);
8302 return -ENOENT;
8303 }
8304 vsi->info = ctxt.info;
8305 vsi->info.valid_sections = 0;
8306
8307 vsi->seid = ctxt.seid;
8308 vsi->id = ctxt.vsi_number;
8309
8310 enabled_tc = i40e_pf_get_tc_map(pf);
8311
8312 /* MFP mode setup queue map and update VSI */
8313 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8314 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8315 memset(&ctxt, 0, sizeof(ctxt));
8316 ctxt.seid = pf->main_vsi_seid;
8317 ctxt.pf_num = pf->hw.pf_id;
8318 ctxt.vf_num = 0;
8319 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8320 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8321 if (ret) {
8322 dev_info(&pf->pdev->dev,
8323 "update vsi failed, aq_err=%d\n",
8324 pf->hw.aq.asq_last_status);
8325 ret = -ENOENT;
8326 goto err;
8327 }
8328 /* update the local VSI info queue map */
8329 i40e_vsi_update_queue_map(vsi, &ctxt);
8330 vsi->info.valid_sections = 0;
8331 } else {
8332 /* Default/Main VSI is only enabled for TC0
8333 * reconfigure it to enable all TCs that are
8334 * available on the port in SFP mode.
8335 * For MFP case the iSCSI PF would use this
8336 * flow to enable LAN+iSCSI TC.
8337 */
8338 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8339 if (ret) {
8340 dev_info(&pf->pdev->dev,
8341 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
8342 enabled_tc, ret,
8343 pf->hw.aq.asq_last_status);
8344 ret = -ENOENT;
8345 }
8346 }
8347 break;
8348
8349 case I40E_VSI_FDIR:
8350 ctxt.pf_num = hw->pf_id;
8351 ctxt.vf_num = 0;
8352 ctxt.uplink_seid = vsi->uplink_seid;
8353 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8354 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8355 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8356 (i40e_is_vsi_uplink_mode_veb(vsi))) {
8357 ctxt.info.valid_sections |=
8358 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8359 ctxt.info.switch_id =
8360 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8361 }
8362 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8363 break;
8364
8365 case I40E_VSI_VMDQ2:
8366 ctxt.pf_num = hw->pf_id;
8367 ctxt.vf_num = 0;
8368 ctxt.uplink_seid = vsi->uplink_seid;
8369 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8370 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8371
8372 /* This VSI is connected to VEB so the switch_id
8373 * should be set to zero by default.
8374 */
8375 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8376 ctxt.info.valid_sections |=
8377 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8378 ctxt.info.switch_id =
8379 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8380 }
8381
8382 /* Setup the VSI tx/rx queue map for TC0 only for now */
8383 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8384 break;
8385
8386 case I40E_VSI_SRIOV:
8387 ctxt.pf_num = hw->pf_id;
8388 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8389 ctxt.uplink_seid = vsi->uplink_seid;
8390 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8391 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8392
8393 /* This VSI is connected to VEB so the switch_id
8394 * should be set to zero by default.
8395 */
8396 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8397 ctxt.info.valid_sections |=
8398 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8399 ctxt.info.switch_id =
8400 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8401 }
8402
8403 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8404 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8405 if (pf->vf[vsi->vf_id].spoofchk) {
8406 ctxt.info.valid_sections |=
8407 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8408 ctxt.info.sec_flags |=
8409 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8410 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8411 }
8412 /* Setup the VSI tx/rx queue map for TC0 only for now */
8413 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8414 break;
8415
8416 #ifdef I40E_FCOE
8417 case I40E_VSI_FCOE:
8418 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8419 if (ret) {
8420 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8421 return ret;
8422 }
8423 break;
8424
8425 #endif /* I40E_FCOE */
8426 default:
8427 return -ENODEV;
8428 }
8429
8430 if (vsi->type != I40E_VSI_MAIN) {
8431 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8432 if (ret) {
8433 dev_info(&vsi->back->pdev->dev,
8434 "add vsi failed, aq_err=%d\n",
8435 vsi->back->hw.aq.asq_last_status);
8436 ret = -ENOENT;
8437 goto err;
8438 }
8439 vsi->info = ctxt.info;
8440 vsi->info.valid_sections = 0;
8441 vsi->seid = ctxt.seid;
8442 vsi->id = ctxt.vsi_number;
8443 }
8444
8445 /* If macvlan filters already exist, force them to get loaded */
8446 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8447 f->changed = true;
8448 f_count++;
8449
8450 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8451 struct i40e_aqc_remove_macvlan_element_data element;
8452
8453 memset(&element, 0, sizeof(element));
8454 ether_addr_copy(element.mac_addr, f->macaddr);
8455 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8456 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8457 &element, 1, NULL);
8458 if (ret) {
8459 /* some older FW has a different default */
8460 element.flags |=
8461 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8462 i40e_aq_remove_macvlan(hw, vsi->seid,
8463 &element, 1, NULL);
8464 }
8465
8466 i40e_aq_mac_address_write(hw,
8467 I40E_AQC_WRITE_TYPE_LAA_WOL,
8468 f->macaddr, NULL);
8469 }
8470 }
8471 if (f_count) {
8472 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8473 pf->flags |= I40E_FLAG_FILTER_SYNC;
8474 }
8475
8476 /* Update VSI BW information */
8477 ret = i40e_vsi_get_bw_info(vsi);
8478 if (ret) {
8479 dev_info(&pf->pdev->dev,
8480 "couldn't get vsi bw info, err %d, aq_err %d\n",
8481 ret, pf->hw.aq.asq_last_status);
8482 /* VSI is already added so not tearing that up */
8483 ret = 0;
8484 }
8485
8486 err:
8487 return ret;
8488 }
8489
8490 /**
8491 * i40e_vsi_release - Delete a VSI and free its resources
8492 * @vsi: the VSI being removed
8493 *
8494 * Returns 0 on success or < 0 on error
8495 **/
8496 int i40e_vsi_release(struct i40e_vsi *vsi)
8497 {
8498 struct i40e_mac_filter *f, *ftmp;
8499 struct i40e_veb *veb = NULL;
8500 struct i40e_pf *pf;
8501 u16 uplink_seid;
8502 int i, n;
8503
8504 pf = vsi->back;
8505
8506 /* release of a VEB-owner or last VSI is not allowed */
8507 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8508 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8509 vsi->seid, vsi->uplink_seid);
8510 return -ENODEV;
8511 }
8512 if (vsi == pf->vsi[pf->lan_vsi] &&
8513 !test_bit(__I40E_DOWN, &pf->state)) {
8514 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8515 return -ENODEV;
8516 }
8517
8518 uplink_seid = vsi->uplink_seid;
8519 if (vsi->type != I40E_VSI_SRIOV) {
8520 if (vsi->netdev_registered) {
8521 vsi->netdev_registered = false;
8522 if (vsi->netdev) {
8523 /* results in a call to i40e_close() */
8524 unregister_netdev(vsi->netdev);
8525 }
8526 } else {
8527 i40e_vsi_close(vsi);
8528 }
8529 i40e_vsi_disable_irq(vsi);
8530 }
8531
8532 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8533 i40e_del_filter(vsi, f->macaddr, f->vlan,
8534 f->is_vf, f->is_netdev);
8535 i40e_sync_vsi_filters(vsi);
8536
8537 i40e_vsi_delete(vsi);
8538 i40e_vsi_free_q_vectors(vsi);
8539 if (vsi->netdev) {
8540 free_netdev(vsi->netdev);
8541 vsi->netdev = NULL;
8542 }
8543 i40e_vsi_clear_rings(vsi);
8544 i40e_vsi_clear(vsi);
8545
8546 /* If this was the last thing on the VEB, except for the
8547 * controlling VSI, remove the VEB, which puts the controlling
8548 * VSI onto the next level down in the switch.
8549 *
8550 * Well, okay, there's one more exception here: don't remove
8551 * the orphan VEBs yet. We'll wait for an explicit remove request
8552 * from up the network stack.
8553 */
8554 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8555 if (pf->vsi[i] &&
8556 pf->vsi[i]->uplink_seid == uplink_seid &&
8557 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8558 n++; /* count the VSIs */
8559 }
8560 }
8561 for (i = 0; i < I40E_MAX_VEB; i++) {
8562 if (!pf->veb[i])
8563 continue;
8564 if (pf->veb[i]->uplink_seid == uplink_seid)
8565 n++; /* count the VEBs */
8566 if (pf->veb[i]->seid == uplink_seid)
8567 veb = pf->veb[i];
8568 }
8569 if (n == 0 && veb && veb->uplink_seid != 0)
8570 i40e_veb_release(veb);
8571
8572 return 0;
8573 }
8574
8575 /**
8576 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8577 * @vsi: ptr to the VSI
8578 *
8579 * This should only be called after i40e_vsi_mem_alloc() which allocates the
8580 * corresponding SW VSI structure and initializes num_queue_pairs for the
8581 * newly allocated VSI.
8582 *
8583 * Returns 0 on success or negative on failure
8584 **/
8585 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8586 {
8587 int ret = -ENOENT;
8588 struct i40e_pf *pf = vsi->back;
8589
8590 if (vsi->q_vectors[0]) {
8591 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8592 vsi->seid);
8593 return -EEXIST;
8594 }
8595
8596 if (vsi->base_vector) {
8597 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8598 vsi->seid, vsi->base_vector);
8599 return -EEXIST;
8600 }
8601
8602 ret = i40e_vsi_alloc_q_vectors(vsi);
8603 if (ret) {
8604 dev_info(&pf->pdev->dev,
8605 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8606 vsi->num_q_vectors, vsi->seid, ret);
8607 vsi->num_q_vectors = 0;
8608 goto vector_setup_out;
8609 }
8610
8611 if (vsi->num_q_vectors)
8612 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8613 vsi->num_q_vectors, vsi->idx);
8614 if (vsi->base_vector < 0) {
8615 dev_info(&pf->pdev->dev,
8616 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8617 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8618 i40e_vsi_free_q_vectors(vsi);
8619 ret = -ENOENT;
8620 goto vector_setup_out;
8621 }
8622
8623 vector_setup_out:
8624 return ret;
8625 }
8626
8627 /**
8628 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8629 * @vsi: pointer to the vsi.
8630 *
8631 * This re-allocates a vsi's queue resources.
8632 *
8633 * Returns pointer to the successfully allocated and configured VSI sw struct
8634 * on success, otherwise returns NULL on failure.
8635 **/
8636 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8637 {
8638 struct i40e_pf *pf = vsi->back;
8639 u8 enabled_tc;
8640 int ret;
8641
8642 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8643 i40e_vsi_clear_rings(vsi);
8644
8645 i40e_vsi_free_arrays(vsi, false);
8646 i40e_set_num_rings_in_vsi(vsi);
8647 ret = i40e_vsi_alloc_arrays(vsi, false);
8648 if (ret)
8649 goto err_vsi;
8650
8651 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8652 if (ret < 0) {
8653 dev_info(&pf->pdev->dev,
8654 "failed to get tracking for %d queues for VSI %d err=%d\n",
8655 vsi->alloc_queue_pairs, vsi->seid, ret);
8656 goto err_vsi;
8657 }
8658 vsi->base_queue = ret;
8659
8660 /* Update the FW view of the VSI. Force a reset of TC and queue
8661 * layout configurations.
8662 */
8663 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8664 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8665 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8666 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8667
8668 /* assign it some queues */
8669 ret = i40e_alloc_rings(vsi);
8670 if (ret)
8671 goto err_rings;
8672
8673 /* map all of the rings to the q_vectors */
8674 i40e_vsi_map_rings_to_vectors(vsi);
8675 return vsi;
8676
8677 err_rings:
8678 i40e_vsi_free_q_vectors(vsi);
8679 if (vsi->netdev_registered) {
8680 vsi->netdev_registered = false;
8681 unregister_netdev(vsi->netdev);
8682 free_netdev(vsi->netdev);
8683 vsi->netdev = NULL;
8684 }
8685 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8686 err_vsi:
8687 i40e_vsi_clear(vsi);
8688 return NULL;
8689 }
8690
8691 /**
8692 * i40e_vsi_setup - Set up a VSI by a given type
8693 * @pf: board private structure
8694 * @type: VSI type
8695 * @uplink_seid: the switch element to link to
8696 * @param1: usage depends upon VSI type. For VF types, indicates VF id
8697 *
8698 * This allocates the sw VSI structure and its queue resources, then add a VSI
8699 * to the identified VEB.
8700 *
8701 * Returns pointer to the successfully allocated and configure VSI sw struct on
8702 * success, otherwise returns NULL on failure.
8703 **/
8704 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8705 u16 uplink_seid, u32 param1)
8706 {
8707 struct i40e_vsi *vsi = NULL;
8708 struct i40e_veb *veb = NULL;
8709 int ret, i;
8710 int v_idx;
8711
8712 /* The requested uplink_seid must be either
8713 * - the PF's port seid
8714 * no VEB is needed because this is the PF
8715 * or this is a Flow Director special case VSI
8716 * - seid of an existing VEB
8717 * - seid of a VSI that owns an existing VEB
8718 * - seid of a VSI that doesn't own a VEB
8719 * a new VEB is created and the VSI becomes the owner
8720 * - seid of the PF VSI, which is what creates the first VEB
8721 * this is a special case of the previous
8722 *
8723 * Find which uplink_seid we were given and create a new VEB if needed
8724 */
8725 for (i = 0; i < I40E_MAX_VEB; i++) {
8726 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8727 veb = pf->veb[i];
8728 break;
8729 }
8730 }
8731
8732 if (!veb && uplink_seid != pf->mac_seid) {
8733
8734 for (i = 0; i < pf->num_alloc_vsi; i++) {
8735 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8736 vsi = pf->vsi[i];
8737 break;
8738 }
8739 }
8740 if (!vsi) {
8741 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8742 uplink_seid);
8743 return NULL;
8744 }
8745
8746 if (vsi->uplink_seid == pf->mac_seid)
8747 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8748 vsi->tc_config.enabled_tc);
8749 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8750 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8751 vsi->tc_config.enabled_tc);
8752 if (veb) {
8753 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8754 dev_info(&vsi->back->pdev->dev,
8755 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8756 __func__);
8757 return NULL;
8758 }
8759 /* We come up by default in VEPA mode if SRIOV is not
8760 * already enabled, in which case we can't force VEPA
8761 * mode.
8762 */
8763 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
8764 veb->bridge_mode = BRIDGE_MODE_VEPA;
8765 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8766 }
8767 i40e_config_bridge_mode(veb);
8768 }
8769 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8770 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8771 veb = pf->veb[i];
8772 }
8773 if (!veb) {
8774 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8775 return NULL;
8776 }
8777
8778 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8779 uplink_seid = veb->seid;
8780 }
8781
8782 /* get vsi sw struct */
8783 v_idx = i40e_vsi_mem_alloc(pf, type);
8784 if (v_idx < 0)
8785 goto err_alloc;
8786 vsi = pf->vsi[v_idx];
8787 if (!vsi)
8788 goto err_alloc;
8789 vsi->type = type;
8790 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8791
8792 if (type == I40E_VSI_MAIN)
8793 pf->lan_vsi = v_idx;
8794 else if (type == I40E_VSI_SRIOV)
8795 vsi->vf_id = param1;
8796 /* assign it some queues */
8797 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8798 vsi->idx);
8799 if (ret < 0) {
8800 dev_info(&pf->pdev->dev,
8801 "failed to get tracking for %d queues for VSI %d err=%d\n",
8802 vsi->alloc_queue_pairs, vsi->seid, ret);
8803 goto err_vsi;
8804 }
8805 vsi->base_queue = ret;
8806
8807 /* get a VSI from the hardware */
8808 vsi->uplink_seid = uplink_seid;
8809 ret = i40e_add_vsi(vsi);
8810 if (ret)
8811 goto err_vsi;
8812
8813 switch (vsi->type) {
8814 /* setup the netdev if needed */
8815 case I40E_VSI_MAIN:
8816 case I40E_VSI_VMDQ2:
8817 case I40E_VSI_FCOE:
8818 ret = i40e_config_netdev(vsi);
8819 if (ret)
8820 goto err_netdev;
8821 ret = register_netdev(vsi->netdev);
8822 if (ret)
8823 goto err_netdev;
8824 vsi->netdev_registered = true;
8825 netif_carrier_off(vsi->netdev);
8826 #ifdef CONFIG_I40E_DCB
8827 /* Setup DCB netlink interface */
8828 i40e_dcbnl_setup(vsi);
8829 #endif /* CONFIG_I40E_DCB */
8830 /* fall through */
8831
8832 case I40E_VSI_FDIR:
8833 /* set up vectors and rings if needed */
8834 ret = i40e_vsi_setup_vectors(vsi);
8835 if (ret)
8836 goto err_msix;
8837
8838 ret = i40e_alloc_rings(vsi);
8839 if (ret)
8840 goto err_rings;
8841
8842 /* map all of the rings to the q_vectors */
8843 i40e_vsi_map_rings_to_vectors(vsi);
8844
8845 i40e_vsi_reset_stats(vsi);
8846 break;
8847
8848 default:
8849 /* no netdev or rings for the other VSI types */
8850 break;
8851 }
8852
8853 return vsi;
8854
8855 err_rings:
8856 i40e_vsi_free_q_vectors(vsi);
8857 err_msix:
8858 if (vsi->netdev_registered) {
8859 vsi->netdev_registered = false;
8860 unregister_netdev(vsi->netdev);
8861 free_netdev(vsi->netdev);
8862 vsi->netdev = NULL;
8863 }
8864 err_netdev:
8865 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8866 err_vsi:
8867 i40e_vsi_clear(vsi);
8868 err_alloc:
8869 return NULL;
8870 }
8871
8872 /**
8873 * i40e_veb_get_bw_info - Query VEB BW information
8874 * @veb: the veb to query
8875 *
8876 * Query the Tx scheduler BW configuration data for given VEB
8877 **/
8878 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
8879 {
8880 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
8881 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
8882 struct i40e_pf *pf = veb->pf;
8883 struct i40e_hw *hw = &pf->hw;
8884 u32 tc_bw_max;
8885 int ret = 0;
8886 int i;
8887
8888 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
8889 &bw_data, NULL);
8890 if (ret) {
8891 dev_info(&pf->pdev->dev,
8892 "query veb bw config failed, aq_err=%d\n",
8893 hw->aq.asq_last_status);
8894 goto out;
8895 }
8896
8897 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
8898 &ets_data, NULL);
8899 if (ret) {
8900 dev_info(&pf->pdev->dev,
8901 "query veb bw ets config failed, aq_err=%d\n",
8902 hw->aq.asq_last_status);
8903 goto out;
8904 }
8905
8906 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
8907 veb->bw_max_quanta = ets_data.tc_bw_max;
8908 veb->is_abs_credits = bw_data.absolute_credits_enable;
8909 veb->enabled_tc = ets_data.tc_valid_bits;
8910 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
8911 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
8912 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8913 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
8914 veb->bw_tc_limit_credits[i] =
8915 le16_to_cpu(bw_data.tc_bw_limits[i]);
8916 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
8917 }
8918
8919 out:
8920 return ret;
8921 }
8922
8923 /**
8924 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
8925 * @pf: board private structure
8926 *
8927 * On error: returns error code (negative)
8928 * On success: returns vsi index in PF (positive)
8929 **/
8930 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
8931 {
8932 int ret = -ENOENT;
8933 struct i40e_veb *veb;
8934 int i;
8935
8936 /* Need to protect the allocation of switch elements at the PF level */
8937 mutex_lock(&pf->switch_mutex);
8938
8939 /* VEB list may be fragmented if VEB creation/destruction has
8940 * been happening. We can afford to do a quick scan to look
8941 * for any free slots in the list.
8942 *
8943 * find next empty veb slot, looping back around if necessary
8944 */
8945 i = 0;
8946 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
8947 i++;
8948 if (i >= I40E_MAX_VEB) {
8949 ret = -ENOMEM;
8950 goto err_alloc_veb; /* out of VEB slots! */
8951 }
8952
8953 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
8954 if (!veb) {
8955 ret = -ENOMEM;
8956 goto err_alloc_veb;
8957 }
8958 veb->pf = pf;
8959 veb->idx = i;
8960 veb->enabled_tc = 1;
8961
8962 pf->veb[i] = veb;
8963 ret = i;
8964 err_alloc_veb:
8965 mutex_unlock(&pf->switch_mutex);
8966 return ret;
8967 }
8968
8969 /**
8970 * i40e_switch_branch_release - Delete a branch of the switch tree
8971 * @branch: where to start deleting
8972 *
8973 * This uses recursion to find the tips of the branch to be
8974 * removed, deleting until we get back to and can delete this VEB.
8975 **/
8976 static void i40e_switch_branch_release(struct i40e_veb *branch)
8977 {
8978 struct i40e_pf *pf = branch->pf;
8979 u16 branch_seid = branch->seid;
8980 u16 veb_idx = branch->idx;
8981 int i;
8982
8983 /* release any VEBs on this VEB - RECURSION */
8984 for (i = 0; i < I40E_MAX_VEB; i++) {
8985 if (!pf->veb[i])
8986 continue;
8987 if (pf->veb[i]->uplink_seid == branch->seid)
8988 i40e_switch_branch_release(pf->veb[i]);
8989 }
8990
8991 /* Release the VSIs on this VEB, but not the owner VSI.
8992 *
8993 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
8994 * the VEB itself, so don't use (*branch) after this loop.
8995 */
8996 for (i = 0; i < pf->num_alloc_vsi; i++) {
8997 if (!pf->vsi[i])
8998 continue;
8999 if (pf->vsi[i]->uplink_seid == branch_seid &&
9000 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9001 i40e_vsi_release(pf->vsi[i]);
9002 }
9003 }
9004
9005 /* There's one corner case where the VEB might not have been
9006 * removed, so double check it here and remove it if needed.
9007 * This case happens if the veb was created from the debugfs
9008 * commands and no VSIs were added to it.
9009 */
9010 if (pf->veb[veb_idx])
9011 i40e_veb_release(pf->veb[veb_idx]);
9012 }
9013
9014 /**
9015 * i40e_veb_clear - remove veb struct
9016 * @veb: the veb to remove
9017 **/
9018 static void i40e_veb_clear(struct i40e_veb *veb)
9019 {
9020 if (!veb)
9021 return;
9022
9023 if (veb->pf) {
9024 struct i40e_pf *pf = veb->pf;
9025
9026 mutex_lock(&pf->switch_mutex);
9027 if (pf->veb[veb->idx] == veb)
9028 pf->veb[veb->idx] = NULL;
9029 mutex_unlock(&pf->switch_mutex);
9030 }
9031
9032 kfree(veb);
9033 }
9034
9035 /**
9036 * i40e_veb_release - Delete a VEB and free its resources
9037 * @veb: the VEB being removed
9038 **/
9039 void i40e_veb_release(struct i40e_veb *veb)
9040 {
9041 struct i40e_vsi *vsi = NULL;
9042 struct i40e_pf *pf;
9043 int i, n = 0;
9044
9045 pf = veb->pf;
9046
9047 /* find the remaining VSI and check for extras */
9048 for (i = 0; i < pf->num_alloc_vsi; i++) {
9049 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9050 n++;
9051 vsi = pf->vsi[i];
9052 }
9053 }
9054 if (n != 1) {
9055 dev_info(&pf->pdev->dev,
9056 "can't remove VEB %d with %d VSIs left\n",
9057 veb->seid, n);
9058 return;
9059 }
9060
9061 /* move the remaining VSI to uplink veb */
9062 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9063 if (veb->uplink_seid) {
9064 vsi->uplink_seid = veb->uplink_seid;
9065 if (veb->uplink_seid == pf->mac_seid)
9066 vsi->veb_idx = I40E_NO_VEB;
9067 else
9068 vsi->veb_idx = veb->veb_idx;
9069 } else {
9070 /* floating VEB */
9071 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9072 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9073 }
9074
9075 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9076 i40e_veb_clear(veb);
9077 }
9078
9079 /**
9080 * i40e_add_veb - create the VEB in the switch
9081 * @veb: the VEB to be instantiated
9082 * @vsi: the controlling VSI
9083 **/
9084 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9085 {
9086 bool is_default = false;
9087 bool is_cloud = false;
9088 int ret;
9089
9090 /* get a VEB from the hardware */
9091 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
9092 veb->enabled_tc, is_default,
9093 is_cloud, &veb->seid, NULL);
9094 if (ret) {
9095 dev_info(&veb->pf->pdev->dev,
9096 "couldn't add VEB, err %d, aq_err %d\n",
9097 ret, veb->pf->hw.aq.asq_last_status);
9098 return -EPERM;
9099 }
9100
9101 /* get statistics counter */
9102 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
9103 &veb->stats_idx, NULL, NULL, NULL);
9104 if (ret) {
9105 dev_info(&veb->pf->pdev->dev,
9106 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
9107 ret, veb->pf->hw.aq.asq_last_status);
9108 return -EPERM;
9109 }
9110 ret = i40e_veb_get_bw_info(veb);
9111 if (ret) {
9112 dev_info(&veb->pf->pdev->dev,
9113 "couldn't get VEB bw info, err %d, aq_err %d\n",
9114 ret, veb->pf->hw.aq.asq_last_status);
9115 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
9116 return -ENOENT;
9117 }
9118
9119 vsi->uplink_seid = veb->seid;
9120 vsi->veb_idx = veb->idx;
9121 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9122
9123 return 0;
9124 }
9125
9126 /**
9127 * i40e_veb_setup - Set up a VEB
9128 * @pf: board private structure
9129 * @flags: VEB setup flags
9130 * @uplink_seid: the switch element to link to
9131 * @vsi_seid: the initial VSI seid
9132 * @enabled_tc: Enabled TC bit-map
9133 *
9134 * This allocates the sw VEB structure and links it into the switch
9135 * It is possible and legal for this to be a duplicate of an already
9136 * existing VEB. It is also possible for both uplink and vsi seids
9137 * to be zero, in order to create a floating VEB.
9138 *
9139 * Returns pointer to the successfully allocated VEB sw struct on
9140 * success, otherwise returns NULL on failure.
9141 **/
9142 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9143 u16 uplink_seid, u16 vsi_seid,
9144 u8 enabled_tc)
9145 {
9146 struct i40e_veb *veb, *uplink_veb = NULL;
9147 int vsi_idx, veb_idx;
9148 int ret;
9149
9150 /* if one seid is 0, the other must be 0 to create a floating relay */
9151 if ((uplink_seid == 0 || vsi_seid == 0) &&
9152 (uplink_seid + vsi_seid != 0)) {
9153 dev_info(&pf->pdev->dev,
9154 "one, not both seid's are 0: uplink=%d vsi=%d\n",
9155 uplink_seid, vsi_seid);
9156 return NULL;
9157 }
9158
9159 /* make sure there is such a vsi and uplink */
9160 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9161 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9162 break;
9163 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9164 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9165 vsi_seid);
9166 return NULL;
9167 }
9168
9169 if (uplink_seid && uplink_seid != pf->mac_seid) {
9170 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9171 if (pf->veb[veb_idx] &&
9172 pf->veb[veb_idx]->seid == uplink_seid) {
9173 uplink_veb = pf->veb[veb_idx];
9174 break;
9175 }
9176 }
9177 if (!uplink_veb) {
9178 dev_info(&pf->pdev->dev,
9179 "uplink seid %d not found\n", uplink_seid);
9180 return NULL;
9181 }
9182 }
9183
9184 /* get veb sw struct */
9185 veb_idx = i40e_veb_mem_alloc(pf);
9186 if (veb_idx < 0)
9187 goto err_alloc;
9188 veb = pf->veb[veb_idx];
9189 veb->flags = flags;
9190 veb->uplink_seid = uplink_seid;
9191 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9192 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9193
9194 /* create the VEB in the switch */
9195 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9196 if (ret)
9197 goto err_veb;
9198 if (vsi_idx == pf->lan_vsi)
9199 pf->lan_veb = veb->idx;
9200
9201 return veb;
9202
9203 err_veb:
9204 i40e_veb_clear(veb);
9205 err_alloc:
9206 return NULL;
9207 }
9208
9209 /**
9210 * i40e_setup_pf_switch_element - set PF vars based on switch type
9211 * @pf: board private structure
9212 * @ele: element we are building info from
9213 * @num_reported: total number of elements
9214 * @printconfig: should we print the contents
9215 *
9216 * helper function to assist in extracting a few useful SEID values.
9217 **/
9218 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9219 struct i40e_aqc_switch_config_element_resp *ele,
9220 u16 num_reported, bool printconfig)
9221 {
9222 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9223 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9224 u8 element_type = ele->element_type;
9225 u16 seid = le16_to_cpu(ele->seid);
9226
9227 if (printconfig)
9228 dev_info(&pf->pdev->dev,
9229 "type=%d seid=%d uplink=%d downlink=%d\n",
9230 element_type, seid, uplink_seid, downlink_seid);
9231
9232 switch (element_type) {
9233 case I40E_SWITCH_ELEMENT_TYPE_MAC:
9234 pf->mac_seid = seid;
9235 break;
9236 case I40E_SWITCH_ELEMENT_TYPE_VEB:
9237 /* Main VEB? */
9238 if (uplink_seid != pf->mac_seid)
9239 break;
9240 if (pf->lan_veb == I40E_NO_VEB) {
9241 int v;
9242
9243 /* find existing or else empty VEB */
9244 for (v = 0; v < I40E_MAX_VEB; v++) {
9245 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9246 pf->lan_veb = v;
9247 break;
9248 }
9249 }
9250 if (pf->lan_veb == I40E_NO_VEB) {
9251 v = i40e_veb_mem_alloc(pf);
9252 if (v < 0)
9253 break;
9254 pf->lan_veb = v;
9255 }
9256 }
9257
9258 pf->veb[pf->lan_veb]->seid = seid;
9259 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9260 pf->veb[pf->lan_veb]->pf = pf;
9261 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9262 break;
9263 case I40E_SWITCH_ELEMENT_TYPE_VSI:
9264 if (num_reported != 1)
9265 break;
9266 /* This is immediately after a reset so we can assume this is
9267 * the PF's VSI
9268 */
9269 pf->mac_seid = uplink_seid;
9270 pf->pf_seid = downlink_seid;
9271 pf->main_vsi_seid = seid;
9272 if (printconfig)
9273 dev_info(&pf->pdev->dev,
9274 "pf_seid=%d main_vsi_seid=%d\n",
9275 pf->pf_seid, pf->main_vsi_seid);
9276 break;
9277 case I40E_SWITCH_ELEMENT_TYPE_PF:
9278 case I40E_SWITCH_ELEMENT_TYPE_VF:
9279 case I40E_SWITCH_ELEMENT_TYPE_EMP:
9280 case I40E_SWITCH_ELEMENT_TYPE_BMC:
9281 case I40E_SWITCH_ELEMENT_TYPE_PE:
9282 case I40E_SWITCH_ELEMENT_TYPE_PA:
9283 /* ignore these for now */
9284 break;
9285 default:
9286 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9287 element_type, seid);
9288 break;
9289 }
9290 }
9291
9292 /**
9293 * i40e_fetch_switch_configuration - Get switch config from firmware
9294 * @pf: board private structure
9295 * @printconfig: should we print the contents
9296 *
9297 * Get the current switch configuration from the device and
9298 * extract a few useful SEID values.
9299 **/
9300 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9301 {
9302 struct i40e_aqc_get_switch_config_resp *sw_config;
9303 u16 next_seid = 0;
9304 int ret = 0;
9305 u8 *aq_buf;
9306 int i;
9307
9308 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9309 if (!aq_buf)
9310 return -ENOMEM;
9311
9312 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9313 do {
9314 u16 num_reported, num_total;
9315
9316 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9317 I40E_AQ_LARGE_BUF,
9318 &next_seid, NULL);
9319 if (ret) {
9320 dev_info(&pf->pdev->dev,
9321 "get switch config failed %d aq_err=%x\n",
9322 ret, pf->hw.aq.asq_last_status);
9323 kfree(aq_buf);
9324 return -ENOENT;
9325 }
9326
9327 num_reported = le16_to_cpu(sw_config->header.num_reported);
9328 num_total = le16_to_cpu(sw_config->header.num_total);
9329
9330 if (printconfig)
9331 dev_info(&pf->pdev->dev,
9332 "header: %d reported %d total\n",
9333 num_reported, num_total);
9334
9335 for (i = 0; i < num_reported; i++) {
9336 struct i40e_aqc_switch_config_element_resp *ele =
9337 &sw_config->element[i];
9338
9339 i40e_setup_pf_switch_element(pf, ele, num_reported,
9340 printconfig);
9341 }
9342 } while (next_seid != 0);
9343
9344 kfree(aq_buf);
9345 return ret;
9346 }
9347
9348 /**
9349 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9350 * @pf: board private structure
9351 * @reinit: if the Main VSI needs to re-initialized.
9352 *
9353 * Returns 0 on success, negative value on failure
9354 **/
9355 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9356 {
9357 int ret;
9358
9359 /* find out what's out there already */
9360 ret = i40e_fetch_switch_configuration(pf, false);
9361 if (ret) {
9362 dev_info(&pf->pdev->dev,
9363 "couldn't fetch switch config, err %d, aq_err %d\n",
9364 ret, pf->hw.aq.asq_last_status);
9365 return ret;
9366 }
9367 i40e_pf_reset_stats(pf);
9368
9369 /* first time setup */
9370 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9371 struct i40e_vsi *vsi = NULL;
9372 u16 uplink_seid;
9373
9374 /* Set up the PF VSI associated with the PF's main VSI
9375 * that is already in the HW switch
9376 */
9377 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9378 uplink_seid = pf->veb[pf->lan_veb]->seid;
9379 else
9380 uplink_seid = pf->mac_seid;
9381 if (pf->lan_vsi == I40E_NO_VSI)
9382 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9383 else if (reinit)
9384 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9385 if (!vsi) {
9386 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9387 i40e_fdir_teardown(pf);
9388 return -EAGAIN;
9389 }
9390 } else {
9391 /* force a reset of TC and queue layout configurations */
9392 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9393 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9394 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9395 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9396 }
9397 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9398
9399 i40e_fdir_sb_setup(pf);
9400
9401 /* Setup static PF queue filter control settings */
9402 ret = i40e_setup_pf_filter_control(pf);
9403 if (ret) {
9404 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9405 ret);
9406 /* Failure here should not stop continuing other steps */
9407 }
9408
9409 /* enable RSS in the HW, even for only one queue, as the stack can use
9410 * the hash
9411 */
9412 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9413 i40e_config_rss(pf);
9414
9415 /* fill in link information and enable LSE reporting */
9416 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
9417 i40e_link_event(pf);
9418
9419 /* Initialize user-specific link properties */
9420 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9421 I40E_AQ_AN_COMPLETED) ? true : false);
9422
9423 i40e_ptp_init(pf);
9424
9425 return ret;
9426 }
9427
9428 /**
9429 * i40e_determine_queue_usage - Work out queue distribution
9430 * @pf: board private structure
9431 **/
9432 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9433 {
9434 int queues_left;
9435
9436 pf->num_lan_qps = 0;
9437 #ifdef I40E_FCOE
9438 pf->num_fcoe_qps = 0;
9439 #endif
9440
9441 /* Find the max queues to be put into basic use. We'll always be
9442 * using TC0, whether or not DCB is running, and TC0 will get the
9443 * big RSS set.
9444 */
9445 queues_left = pf->hw.func_caps.num_tx_qp;
9446
9447 if ((queues_left == 1) ||
9448 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9449 /* one qp for PF, no queues for anything else */
9450 queues_left = 0;
9451 pf->rss_size = pf->num_lan_qps = 1;
9452
9453 /* make sure all the fancies are disabled */
9454 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
9455 #ifdef I40E_FCOE
9456 I40E_FLAG_FCOE_ENABLED |
9457 #endif
9458 I40E_FLAG_FD_SB_ENABLED |
9459 I40E_FLAG_FD_ATR_ENABLED |
9460 I40E_FLAG_DCB_CAPABLE |
9461 I40E_FLAG_SRIOV_ENABLED |
9462 I40E_FLAG_VMDQ_ENABLED);
9463 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9464 I40E_FLAG_FD_SB_ENABLED |
9465 I40E_FLAG_FD_ATR_ENABLED |
9466 I40E_FLAG_DCB_CAPABLE))) {
9467 /* one qp for PF */
9468 pf->rss_size = pf->num_lan_qps = 1;
9469 queues_left -= pf->num_lan_qps;
9470
9471 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
9472 #ifdef I40E_FCOE
9473 I40E_FLAG_FCOE_ENABLED |
9474 #endif
9475 I40E_FLAG_FD_SB_ENABLED |
9476 I40E_FLAG_FD_ATR_ENABLED |
9477 I40E_FLAG_DCB_ENABLED |
9478 I40E_FLAG_VMDQ_ENABLED);
9479 } else {
9480 /* Not enough queues for all TCs */
9481 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9482 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9483 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9484 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9485 }
9486 pf->num_lan_qps = max_t(int, pf->rss_size_max,
9487 num_online_cpus());
9488 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9489 pf->hw.func_caps.num_tx_qp);
9490
9491 queues_left -= pf->num_lan_qps;
9492 }
9493
9494 #ifdef I40E_FCOE
9495 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9496 if (I40E_DEFAULT_FCOE <= queues_left) {
9497 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9498 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9499 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9500 } else {
9501 pf->num_fcoe_qps = 0;
9502 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9503 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9504 }
9505
9506 queues_left -= pf->num_fcoe_qps;
9507 }
9508
9509 #endif
9510 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9511 if (queues_left > 1) {
9512 queues_left -= 1; /* save 1 queue for FD */
9513 } else {
9514 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9515 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9516 }
9517 }
9518
9519 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9520 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9521 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9522 (queues_left / pf->num_vf_qps));
9523 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9524 }
9525
9526 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9527 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9528 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9529 (queues_left / pf->num_vmdq_qps));
9530 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9531 }
9532
9533 pf->queues_left = queues_left;
9534 #ifdef I40E_FCOE
9535 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9536 #endif
9537 }
9538
9539 /**
9540 * i40e_setup_pf_filter_control - Setup PF static filter control
9541 * @pf: PF to be setup
9542 *
9543 * i40e_setup_pf_filter_control sets up a PF's initial filter control
9544 * settings. If PE/FCoE are enabled then it will also set the per PF
9545 * based filter sizes required for them. It also enables Flow director,
9546 * ethertype and macvlan type filter settings for the pf.
9547 *
9548 * Returns 0 on success, negative on failure
9549 **/
9550 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9551 {
9552 struct i40e_filter_control_settings *settings = &pf->filter_settings;
9553
9554 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9555
9556 /* Flow Director is enabled */
9557 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9558 settings->enable_fdir = true;
9559
9560 /* Ethtype and MACVLAN filters enabled for PF */
9561 settings->enable_ethtype = true;
9562 settings->enable_macvlan = true;
9563
9564 if (i40e_set_filter_control(&pf->hw, settings))
9565 return -ENOENT;
9566
9567 return 0;
9568 }
9569
9570 #define INFO_STRING_LEN 255
9571 static void i40e_print_features(struct i40e_pf *pf)
9572 {
9573 struct i40e_hw *hw = &pf->hw;
9574 char *buf, *string;
9575
9576 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9577 if (!string) {
9578 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9579 return;
9580 }
9581
9582 buf = string;
9583
9584 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9585 #ifdef CONFIG_PCI_IOV
9586 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9587 #endif
9588 buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9589 pf->hw.func_caps.num_vsis,
9590 pf->vsi[pf->lan_vsi]->num_queue_pairs,
9591 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9592
9593 if (pf->flags & I40E_FLAG_RSS_ENABLED)
9594 buf += sprintf(buf, "RSS ");
9595 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9596 buf += sprintf(buf, "FD_ATR ");
9597 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9598 buf += sprintf(buf, "FD_SB ");
9599 buf += sprintf(buf, "NTUPLE ");
9600 }
9601 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9602 buf += sprintf(buf, "DCB ");
9603 if (pf->flags & I40E_FLAG_PTP)
9604 buf += sprintf(buf, "PTP ");
9605 #ifdef I40E_FCOE
9606 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9607 buf += sprintf(buf, "FCOE ");
9608 #endif
9609
9610 BUG_ON(buf > (string + INFO_STRING_LEN));
9611 dev_info(&pf->pdev->dev, "%s\n", string);
9612 kfree(string);
9613 }
9614
9615 /**
9616 * i40e_probe - Device initialization routine
9617 * @pdev: PCI device information struct
9618 * @ent: entry in i40e_pci_tbl
9619 *
9620 * i40e_probe initializes a PF identified by a pci_dev structure.
9621 * The OS initialization, configuring of the PF private structure,
9622 * and a hardware reset occur.
9623 *
9624 * Returns 0 on success, negative on failure
9625 **/
9626 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9627 {
9628 struct i40e_aq_get_phy_abilities_resp abilities;
9629 unsigned long ioremap_len;
9630 struct i40e_pf *pf;
9631 struct i40e_hw *hw;
9632 static u16 pfs_found;
9633 u16 link_status;
9634 int err = 0;
9635 u32 len;
9636 u32 i;
9637
9638 err = pci_enable_device_mem(pdev);
9639 if (err)
9640 return err;
9641
9642 /* set up for high or low dma */
9643 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9644 if (err) {
9645 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9646 if (err) {
9647 dev_err(&pdev->dev,
9648 "DMA configuration failed: 0x%x\n", err);
9649 goto err_dma;
9650 }
9651 }
9652
9653 /* set up pci connections */
9654 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9655 IORESOURCE_MEM), i40e_driver_name);
9656 if (err) {
9657 dev_info(&pdev->dev,
9658 "pci_request_selected_regions failed %d\n", err);
9659 goto err_pci_reg;
9660 }
9661
9662 pci_enable_pcie_error_reporting(pdev);
9663 pci_set_master(pdev);
9664
9665 /* Now that we have a PCI connection, we need to do the
9666 * low level device setup. This is primarily setting up
9667 * the Admin Queue structures and then querying for the
9668 * device's current profile information.
9669 */
9670 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9671 if (!pf) {
9672 err = -ENOMEM;
9673 goto err_pf_alloc;
9674 }
9675 pf->next_vsi = 0;
9676 pf->pdev = pdev;
9677 set_bit(__I40E_DOWN, &pf->state);
9678
9679 hw = &pf->hw;
9680 hw->back = pf;
9681
9682 ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
9683 I40E_MAX_CSR_SPACE);
9684
9685 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
9686 if (!hw->hw_addr) {
9687 err = -EIO;
9688 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9689 (unsigned int)pci_resource_start(pdev, 0),
9690 (unsigned int)pci_resource_len(pdev, 0), err);
9691 goto err_ioremap;
9692 }
9693 hw->vendor_id = pdev->vendor;
9694 hw->device_id = pdev->device;
9695 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9696 hw->subsystem_vendor_id = pdev->subsystem_vendor;
9697 hw->subsystem_device_id = pdev->subsystem_device;
9698 hw->bus.device = PCI_SLOT(pdev->devfn);
9699 hw->bus.func = PCI_FUNC(pdev->devfn);
9700 pf->instance = pfs_found;
9701
9702 if (debug != -1) {
9703 pf->msg_enable = pf->hw.debug_mask;
9704 pf->msg_enable = debug;
9705 }
9706
9707 /* do a special CORER for clearing PXE mode once at init */
9708 if (hw->revision_id == 0 &&
9709 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9710 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9711 i40e_flush(hw);
9712 msleep(200);
9713 pf->corer_count++;
9714
9715 i40e_clear_pxe_mode(hw);
9716 }
9717
9718 /* Reset here to make sure all is clean and to define PF 'n' */
9719 i40e_clear_hw(hw);
9720 err = i40e_pf_reset(hw);
9721 if (err) {
9722 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9723 goto err_pf_reset;
9724 }
9725 pf->pfr_count++;
9726
9727 hw->aq.num_arq_entries = I40E_AQ_LEN;
9728 hw->aq.num_asq_entries = I40E_AQ_LEN;
9729 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9730 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9731 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
9732
9733 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
9734 "%s-%s:misc",
9735 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
9736
9737 err = i40e_init_shared_code(hw);
9738 if (err) {
9739 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
9740 goto err_pf_reset;
9741 }
9742
9743 /* set up a default setting for link flow control */
9744 pf->hw.fc.requested_mode = I40E_FC_NONE;
9745
9746 err = i40e_init_adminq(hw);
9747 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9748 if (err) {
9749 dev_info(&pdev->dev,
9750 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
9751 goto err_pf_reset;
9752 }
9753
9754 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9755 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
9756 dev_info(&pdev->dev,
9757 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9758 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9759 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
9760 dev_info(&pdev->dev,
9761 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
9762
9763 i40e_verify_eeprom(pf);
9764
9765 /* Rev 0 hardware was never productized */
9766 if (hw->revision_id < 1)
9767 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9768
9769 i40e_clear_pxe_mode(hw);
9770 err = i40e_get_capabilities(pf);
9771 if (err)
9772 goto err_adminq_setup;
9773
9774 err = i40e_sw_init(pf);
9775 if (err) {
9776 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9777 goto err_sw_init;
9778 }
9779
9780 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9781 hw->func_caps.num_rx_qp,
9782 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9783 if (err) {
9784 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9785 goto err_init_lan_hmc;
9786 }
9787
9788 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9789 if (err) {
9790 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9791 err = -ENOENT;
9792 goto err_configure_lan_hmc;
9793 }
9794
9795 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9796 * Ignore error return codes because if it was already disabled via
9797 * hardware settings this will fail
9798 */
9799 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9800 (pf->hw.aq.fw_maj_ver < 4)) {
9801 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9802 i40e_aq_stop_lldp(hw, true, NULL);
9803 }
9804
9805 i40e_get_mac_addr(hw, hw->mac.addr);
9806 if (!is_valid_ether_addr(hw->mac.addr)) {
9807 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
9808 err = -EIO;
9809 goto err_mac_addr;
9810 }
9811 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9812 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
9813 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
9814 if (is_valid_ether_addr(hw->mac.port_addr))
9815 pf->flags |= I40E_FLAG_PORT_ID_VALID;
9816 #ifdef I40E_FCOE
9817 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
9818 if (err)
9819 dev_info(&pdev->dev,
9820 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
9821 if (!is_valid_ether_addr(hw->mac.san_addr)) {
9822 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
9823 hw->mac.san_addr);
9824 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
9825 }
9826 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
9827 #endif /* I40E_FCOE */
9828
9829 pci_set_drvdata(pdev, pf);
9830 pci_save_state(pdev);
9831 #ifdef CONFIG_I40E_DCB
9832 err = i40e_init_pf_dcb(pf);
9833 if (err) {
9834 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
9835 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9836 /* Continue without DCB enabled */
9837 }
9838 #endif /* CONFIG_I40E_DCB */
9839
9840 /* set up periodic task facility */
9841 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
9842 pf->service_timer_period = HZ;
9843
9844 INIT_WORK(&pf->service_task, i40e_service_task);
9845 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
9846 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
9847 pf->link_check_timeout = jiffies;
9848
9849 /* WoL defaults to disabled */
9850 pf->wol_en = false;
9851 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
9852
9853 /* set up the main switch operations */
9854 i40e_determine_queue_usage(pf);
9855 err = i40e_init_interrupt_scheme(pf);
9856 if (err)
9857 goto err_switch_setup;
9858
9859 /* The number of VSIs reported by the FW is the minimum guaranteed
9860 * to us; HW supports far more and we share the remaining pool with
9861 * the other PFs. We allocate space for more than the guarantee with
9862 * the understanding that we might not get them all later.
9863 */
9864 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
9865 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
9866 else
9867 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
9868
9869 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
9870 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
9871 pf->vsi = kzalloc(len, GFP_KERNEL);
9872 if (!pf->vsi) {
9873 err = -ENOMEM;
9874 goto err_switch_setup;
9875 }
9876
9877 #ifdef CONFIG_PCI_IOV
9878 /* prep for VF support */
9879 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9880 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9881 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9882 if (pci_num_vf(pdev))
9883 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
9884 }
9885 #endif
9886 err = i40e_setup_pf_switch(pf, false);
9887 if (err) {
9888 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
9889 goto err_vsis;
9890 }
9891 /* if FDIR VSI was set up, start it now */
9892 for (i = 0; i < pf->num_alloc_vsi; i++) {
9893 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
9894 i40e_vsi_open(pf->vsi[i]);
9895 break;
9896 }
9897 }
9898
9899 /* driver is only interested in link up/down and module qualification
9900 * reports from firmware
9901 */
9902 err = i40e_aq_set_phy_int_mask(&pf->hw,
9903 I40E_AQ_EVENT_LINK_UPDOWN |
9904 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
9905 if (err)
9906 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
9907
9908 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
9909 (pf->hw.aq.fw_maj_ver < 4)) {
9910 msleep(75);
9911 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9912 if (err)
9913 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
9914 pf->hw.aq.asq_last_status);
9915 }
9916 /* The main driver is (mostly) up and happy. We need to set this state
9917 * before setting up the misc vector or we get a race and the vector
9918 * ends up disabled forever.
9919 */
9920 clear_bit(__I40E_DOWN, &pf->state);
9921
9922 /* In case of MSIX we are going to setup the misc vector right here
9923 * to handle admin queue events etc. In case of legacy and MSI
9924 * the misc functionality and queue processing is combined in
9925 * the same vector and that gets setup at open.
9926 */
9927 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9928 err = i40e_setup_misc_vector(pf);
9929 if (err) {
9930 dev_info(&pdev->dev,
9931 "setup of misc vector failed: %d\n", err);
9932 goto err_vsis;
9933 }
9934 }
9935
9936 #ifdef CONFIG_PCI_IOV
9937 /* prep for VF support */
9938 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9939 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9940 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
9941 u32 val;
9942
9943 /* disable link interrupts for VFs */
9944 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
9945 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
9946 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
9947 i40e_flush(hw);
9948
9949 if (pci_num_vf(pdev)) {
9950 dev_info(&pdev->dev,
9951 "Active VFs found, allocating resources.\n");
9952 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
9953 if (err)
9954 dev_info(&pdev->dev,
9955 "Error %d allocating resources for existing VFs\n",
9956 err);
9957 }
9958 }
9959 #endif /* CONFIG_PCI_IOV */
9960
9961 pfs_found++;
9962
9963 i40e_dbg_pf_init(pf);
9964
9965 /* tell the firmware that we're starting */
9966 i40e_send_version(pf);
9967
9968 /* since everything's happy, start the service_task timer */
9969 mod_timer(&pf->service_timer,
9970 round_jiffies(jiffies + pf->service_timer_period));
9971
9972 #ifdef I40E_FCOE
9973 /* create FCoE interface */
9974 i40e_fcoe_vsi_setup(pf);
9975
9976 #endif
9977 /* Get the negotiated link width and speed from PCI config space */
9978 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
9979
9980 i40e_set_pci_config_data(hw, link_status);
9981
9982 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
9983 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
9984 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
9985 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
9986 "Unknown"),
9987 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
9988 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
9989 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
9990 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
9991 "Unknown"));
9992
9993 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
9994 hw->bus.speed < i40e_bus_speed_8000) {
9995 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
9996 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
9997 }
9998
9999 /* get the requested speeds from the fw */
10000 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10001 if (err)
10002 dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
10003 err);
10004 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10005
10006 /* print a string summarizing features */
10007 i40e_print_features(pf);
10008
10009 return 0;
10010
10011 /* Unwind what we've done if something failed in the setup */
10012 err_vsis:
10013 set_bit(__I40E_DOWN, &pf->state);
10014 i40e_clear_interrupt_scheme(pf);
10015 kfree(pf->vsi);
10016 err_switch_setup:
10017 i40e_reset_interrupt_capability(pf);
10018 del_timer_sync(&pf->service_timer);
10019 err_mac_addr:
10020 err_configure_lan_hmc:
10021 (void)i40e_shutdown_lan_hmc(hw);
10022 err_init_lan_hmc:
10023 kfree(pf->qp_pile);
10024 err_sw_init:
10025 err_adminq_setup:
10026 (void)i40e_shutdown_adminq(hw);
10027 err_pf_reset:
10028 iounmap(hw->hw_addr);
10029 err_ioremap:
10030 kfree(pf);
10031 err_pf_alloc:
10032 pci_disable_pcie_error_reporting(pdev);
10033 pci_release_selected_regions(pdev,
10034 pci_select_bars(pdev, IORESOURCE_MEM));
10035 err_pci_reg:
10036 err_dma:
10037 pci_disable_device(pdev);
10038 return err;
10039 }
10040
10041 /**
10042 * i40e_remove - Device removal routine
10043 * @pdev: PCI device information struct
10044 *
10045 * i40e_remove is called by the PCI subsystem to alert the driver
10046 * that is should release a PCI device. This could be caused by a
10047 * Hot-Plug event, or because the driver is going to be removed from
10048 * memory.
10049 **/
10050 static void i40e_remove(struct pci_dev *pdev)
10051 {
10052 struct i40e_pf *pf = pci_get_drvdata(pdev);
10053 i40e_status ret_code;
10054 int i;
10055
10056 i40e_dbg_pf_exit(pf);
10057
10058 i40e_ptp_stop(pf);
10059
10060 /* no more scheduling of any task */
10061 set_bit(__I40E_DOWN, &pf->state);
10062 del_timer_sync(&pf->service_timer);
10063 cancel_work_sync(&pf->service_task);
10064 i40e_fdir_teardown(pf);
10065
10066 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10067 i40e_free_vfs(pf);
10068 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10069 }
10070
10071 i40e_fdir_teardown(pf);
10072
10073 /* If there is a switch structure or any orphans, remove them.
10074 * This will leave only the PF's VSI remaining.
10075 */
10076 for (i = 0; i < I40E_MAX_VEB; i++) {
10077 if (!pf->veb[i])
10078 continue;
10079
10080 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10081 pf->veb[i]->uplink_seid == 0)
10082 i40e_switch_branch_release(pf->veb[i]);
10083 }
10084
10085 /* Now we can shutdown the PF's VSI, just before we kill
10086 * adminq and hmc.
10087 */
10088 if (pf->vsi[pf->lan_vsi])
10089 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10090
10091 /* shutdown and destroy the HMC */
10092 if (pf->hw.hmc.hmc_obj) {
10093 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10094 if (ret_code)
10095 dev_warn(&pdev->dev,
10096 "Failed to destroy the HMC resources: %d\n",
10097 ret_code);
10098 }
10099
10100 /* shutdown the adminq */
10101 ret_code = i40e_shutdown_adminq(&pf->hw);
10102 if (ret_code)
10103 dev_warn(&pdev->dev,
10104 "Failed to destroy the Admin Queue resources: %d\n",
10105 ret_code);
10106
10107 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10108 i40e_clear_interrupt_scheme(pf);
10109 for (i = 0; i < pf->num_alloc_vsi; i++) {
10110 if (pf->vsi[i]) {
10111 i40e_vsi_clear_rings(pf->vsi[i]);
10112 i40e_vsi_clear(pf->vsi[i]);
10113 pf->vsi[i] = NULL;
10114 }
10115 }
10116
10117 for (i = 0; i < I40E_MAX_VEB; i++) {
10118 kfree(pf->veb[i]);
10119 pf->veb[i] = NULL;
10120 }
10121
10122 kfree(pf->qp_pile);
10123 kfree(pf->vsi);
10124
10125 iounmap(pf->hw.hw_addr);
10126 kfree(pf);
10127 pci_release_selected_regions(pdev,
10128 pci_select_bars(pdev, IORESOURCE_MEM));
10129
10130 pci_disable_pcie_error_reporting(pdev);
10131 pci_disable_device(pdev);
10132 }
10133
10134 /**
10135 * i40e_pci_error_detected - warning that something funky happened in PCI land
10136 * @pdev: PCI device information struct
10137 *
10138 * Called to warn that something happened and the error handling steps
10139 * are in progress. Allows the driver to quiesce things, be ready for
10140 * remediation.
10141 **/
10142 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10143 enum pci_channel_state error)
10144 {
10145 struct i40e_pf *pf = pci_get_drvdata(pdev);
10146
10147 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10148
10149 /* shutdown all operations */
10150 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10151 rtnl_lock();
10152 i40e_prep_for_reset(pf);
10153 rtnl_unlock();
10154 }
10155
10156 /* Request a slot reset */
10157 return PCI_ERS_RESULT_NEED_RESET;
10158 }
10159
10160 /**
10161 * i40e_pci_error_slot_reset - a PCI slot reset just happened
10162 * @pdev: PCI device information struct
10163 *
10164 * Called to find if the driver can work with the device now that
10165 * the pci slot has been reset. If a basic connection seems good
10166 * (registers are readable and have sane content) then return a
10167 * happy little PCI_ERS_RESULT_xxx.
10168 **/
10169 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10170 {
10171 struct i40e_pf *pf = pci_get_drvdata(pdev);
10172 pci_ers_result_t result;
10173 int err;
10174 u32 reg;
10175
10176 dev_info(&pdev->dev, "%s\n", __func__);
10177 if (pci_enable_device_mem(pdev)) {
10178 dev_info(&pdev->dev,
10179 "Cannot re-enable PCI device after reset.\n");
10180 result = PCI_ERS_RESULT_DISCONNECT;
10181 } else {
10182 pci_set_master(pdev);
10183 pci_restore_state(pdev);
10184 pci_save_state(pdev);
10185 pci_wake_from_d3(pdev, false);
10186
10187 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10188 if (reg == 0)
10189 result = PCI_ERS_RESULT_RECOVERED;
10190 else
10191 result = PCI_ERS_RESULT_DISCONNECT;
10192 }
10193
10194 err = pci_cleanup_aer_uncorrect_error_status(pdev);
10195 if (err) {
10196 dev_info(&pdev->dev,
10197 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10198 err);
10199 /* non-fatal, continue */
10200 }
10201
10202 return result;
10203 }
10204
10205 /**
10206 * i40e_pci_error_resume - restart operations after PCI error recovery
10207 * @pdev: PCI device information struct
10208 *
10209 * Called to allow the driver to bring things back up after PCI error
10210 * and/or reset recovery has finished.
10211 **/
10212 static void i40e_pci_error_resume(struct pci_dev *pdev)
10213 {
10214 struct i40e_pf *pf = pci_get_drvdata(pdev);
10215
10216 dev_info(&pdev->dev, "%s\n", __func__);
10217 if (test_bit(__I40E_SUSPENDED, &pf->state))
10218 return;
10219
10220 rtnl_lock();
10221 i40e_handle_reset_warning(pf);
10222 rtnl_lock();
10223 }
10224
10225 /**
10226 * i40e_shutdown - PCI callback for shutting down
10227 * @pdev: PCI device information struct
10228 **/
10229 static void i40e_shutdown(struct pci_dev *pdev)
10230 {
10231 struct i40e_pf *pf = pci_get_drvdata(pdev);
10232 struct i40e_hw *hw = &pf->hw;
10233
10234 set_bit(__I40E_SUSPENDED, &pf->state);
10235 set_bit(__I40E_DOWN, &pf->state);
10236 rtnl_lock();
10237 i40e_prep_for_reset(pf);
10238 rtnl_unlock();
10239
10240 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10241 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10242
10243 i40e_clear_interrupt_scheme(pf);
10244
10245 if (system_state == SYSTEM_POWER_OFF) {
10246 pci_wake_from_d3(pdev, pf->wol_en);
10247 pci_set_power_state(pdev, PCI_D3hot);
10248 }
10249 }
10250
10251 #ifdef CONFIG_PM
10252 /**
10253 * i40e_suspend - PCI callback for moving to D3
10254 * @pdev: PCI device information struct
10255 **/
10256 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10257 {
10258 struct i40e_pf *pf = pci_get_drvdata(pdev);
10259 struct i40e_hw *hw = &pf->hw;
10260
10261 set_bit(__I40E_SUSPENDED, &pf->state);
10262 set_bit(__I40E_DOWN, &pf->state);
10263 del_timer_sync(&pf->service_timer);
10264 cancel_work_sync(&pf->service_task);
10265 i40e_fdir_teardown(pf);
10266
10267 rtnl_lock();
10268 i40e_prep_for_reset(pf);
10269 rtnl_unlock();
10270
10271 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10272 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10273
10274 pci_wake_from_d3(pdev, pf->wol_en);
10275 pci_set_power_state(pdev, PCI_D3hot);
10276
10277 return 0;
10278 }
10279
10280 /**
10281 * i40e_resume - PCI callback for waking up from D3
10282 * @pdev: PCI device information struct
10283 **/
10284 static int i40e_resume(struct pci_dev *pdev)
10285 {
10286 struct i40e_pf *pf = pci_get_drvdata(pdev);
10287 u32 err;
10288
10289 pci_set_power_state(pdev, PCI_D0);
10290 pci_restore_state(pdev);
10291 /* pci_restore_state() clears dev->state_saves, so
10292 * call pci_save_state() again to restore it.
10293 */
10294 pci_save_state(pdev);
10295
10296 err = pci_enable_device_mem(pdev);
10297 if (err) {
10298 dev_err(&pdev->dev,
10299 "%s: Cannot enable PCI device from suspend\n",
10300 __func__);
10301 return err;
10302 }
10303 pci_set_master(pdev);
10304
10305 /* no wakeup events while running */
10306 pci_wake_from_d3(pdev, false);
10307
10308 /* handling the reset will rebuild the device state */
10309 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10310 clear_bit(__I40E_DOWN, &pf->state);
10311 rtnl_lock();
10312 i40e_reset_and_rebuild(pf, false);
10313 rtnl_unlock();
10314 }
10315
10316 return 0;
10317 }
10318
10319 #endif
10320 static const struct pci_error_handlers i40e_err_handler = {
10321 .error_detected = i40e_pci_error_detected,
10322 .slot_reset = i40e_pci_error_slot_reset,
10323 .resume = i40e_pci_error_resume,
10324 };
10325
10326 static struct pci_driver i40e_driver = {
10327 .name = i40e_driver_name,
10328 .id_table = i40e_pci_tbl,
10329 .probe = i40e_probe,
10330 .remove = i40e_remove,
10331 #ifdef CONFIG_PM
10332 .suspend = i40e_suspend,
10333 .resume = i40e_resume,
10334 #endif
10335 .shutdown = i40e_shutdown,
10336 .err_handler = &i40e_err_handler,
10337 .sriov_configure = i40e_pci_sriov_configure,
10338 };
10339
10340 /**
10341 * i40e_init_module - Driver registration routine
10342 *
10343 * i40e_init_module is the first routine called when the driver is
10344 * loaded. All it does is register with the PCI subsystem.
10345 **/
10346 static int __init i40e_init_module(void)
10347 {
10348 pr_info("%s: %s - version %s\n", i40e_driver_name,
10349 i40e_driver_string, i40e_driver_version_str);
10350 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10351
10352 i40e_dbg_init();
10353 return pci_register_driver(&i40e_driver);
10354 }
10355 module_init(i40e_init_module);
10356
10357 /**
10358 * i40e_exit_module - Driver exit cleanup routine
10359 *
10360 * i40e_exit_module is called just before the driver is removed
10361 * from memory.
10362 **/
10363 static void __exit i40e_exit_module(void)
10364 {
10365 pci_unregister_driver(&i40e_driver);
10366 i40e_dbg_exit();
10367 }
10368 module_exit(i40e_exit_module);