]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/intel/i40e/i40e_main.c
i40e: disconnect irqs on shutdown
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
dc641b73 4 * Copyright(c) 2013 - 2014 Intel Corporation.
41c445ff
JB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
dc641b73
GR
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
41c445ff
JB
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27/* Local includes */
28#include "i40e.h"
4eb3f768 29#include "i40e_diag.h"
a1c9a9d9
JK
30#ifdef CONFIG_I40E_VXLAN
31#include <net/vxlan.h>
32#endif
41c445ff
JB
33
34const char i40e_driver_name[] = "i40e";
35static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38#define DRV_KERN "-k"
39
e8e724db 40#define DRV_VERSION_MAJOR 1
a36fdd8e 41#define DRV_VERSION_MINOR 2
300c34c1 42#define DRV_VERSION_BUILD 6
41c445ff
JB
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46const char i40e_driver_version_str[] = DRV_VERSION;
8fb905b3 47static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
41c445ff
JB
48
49/* a bit of forward declarations */
50static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51static void i40e_handle_reset_warning(struct i40e_pf *pf);
52static int i40e_add_vsi(struct i40e_vsi *vsi);
53static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 54static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
55static int i40e_setup_misc_vector(struct i40e_pf *pf);
56static void i40e_determine_queue_usage(struct i40e_pf *pf);
57static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
cbf61325 58static void i40e_fdir_sb_setup(struct i40e_pf *pf);
4e3b35b0 59static int i40e_veb_get_bw_info(struct i40e_veb *veb);
41c445ff
JB
60
61/* i40e_pci_tbl - PCI Device ID Table
62 *
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
9baa3c34 68static const struct pci_device_id i40e_pci_tbl[] = {
ab60085e 69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
ab60085e
SN
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
ab60085e
SN
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
5960d33f 77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
41c445ff
JB
78 /* required last entry */
79 {0, }
80};
81MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
82
83#define I40E_MAX_VF_COUNT 128
84static int debug = -1;
85module_param(debug, int, 0);
86MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
87
88MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
89MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
90MODULE_LICENSE("GPL");
91MODULE_VERSION(DRV_VERSION);
92
93/**
94 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
95 * @hw: pointer to the HW structure
96 * @mem: ptr to mem struct to fill out
97 * @size: size of memory requested
98 * @alignment: what to align the allocation to
99 **/
100int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
101 u64 size, u32 alignment)
102{
103 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
104
105 mem->size = ALIGN(size, alignment);
106 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
107 &mem->pa, GFP_KERNEL);
93bc73b8
JB
108 if (!mem->va)
109 return -ENOMEM;
41c445ff 110
93bc73b8 111 return 0;
41c445ff
JB
112}
113
114/**
115 * i40e_free_dma_mem_d - OS specific memory free for shared code
116 * @hw: pointer to the HW structure
117 * @mem: ptr to mem struct to free
118 **/
119int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
120{
121 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
122
123 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
124 mem->va = NULL;
125 mem->pa = 0;
126 mem->size = 0;
127
128 return 0;
129}
130
131/**
132 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
133 * @hw: pointer to the HW structure
134 * @mem: ptr to mem struct to fill out
135 * @size: size of memory requested
136 **/
137int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
138 u32 size)
139{
140 mem->size = size;
141 mem->va = kzalloc(size, GFP_KERNEL);
142
93bc73b8
JB
143 if (!mem->va)
144 return -ENOMEM;
41c445ff 145
93bc73b8 146 return 0;
41c445ff
JB
147}
148
149/**
150 * i40e_free_virt_mem_d - OS specific memory free for shared code
151 * @hw: pointer to the HW structure
152 * @mem: ptr to mem struct to free
153 **/
154int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
155{
156 /* it's ok to kfree a NULL pointer */
157 kfree(mem->va);
158 mem->va = NULL;
159 mem->size = 0;
160
161 return 0;
162}
163
164/**
165 * i40e_get_lump - find a lump of free generic resource
166 * @pf: board private structure
167 * @pile: the pile of resource to search
168 * @needed: the number of items needed
169 * @id: an owner id to stick on the items assigned
170 *
171 * Returns the base item index of the lump, or negative for error
172 *
173 * The search_hint trick and lack of advanced fit-finding only work
174 * because we're highly likely to have all the same size lump requests.
175 * Linear search time and any fragmentation should be minimal.
176 **/
177static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
178 u16 needed, u16 id)
179{
180 int ret = -ENOMEM;
ddf434ac 181 int i, j;
41c445ff
JB
182
183 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
184 dev_info(&pf->pdev->dev,
185 "param err: pile=%p needed=%d id=0x%04x\n",
186 pile, needed, id);
187 return -EINVAL;
188 }
189
190 /* start the linear search with an imperfect hint */
191 i = pile->search_hint;
ddf434ac 192 while (i < pile->num_entries) {
41c445ff
JB
193 /* skip already allocated entries */
194 if (pile->list[i] & I40E_PILE_VALID_BIT) {
195 i++;
196 continue;
197 }
198
199 /* do we have enough in this lump? */
200 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
201 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
202 break;
203 }
204
205 if (j == needed) {
206 /* there was enough, so assign it to the requestor */
207 for (j = 0; j < needed; j++)
208 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
209 ret = i;
210 pile->search_hint = i + j;
ddf434ac 211 break;
41c445ff
JB
212 } else {
213 /* not enough, so skip over it and continue looking */
214 i += j;
215 }
216 }
217
218 return ret;
219}
220
221/**
222 * i40e_put_lump - return a lump of generic resource
223 * @pile: the pile of resource to search
224 * @index: the base item index
225 * @id: the owner id of the items assigned
226 *
227 * Returns the count of items in the lump
228 **/
229static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
230{
231 int valid_id = (id | I40E_PILE_VALID_BIT);
232 int count = 0;
233 int i;
234
235 if (!pile || index >= pile->num_entries)
236 return -EINVAL;
237
238 for (i = index;
239 i < pile->num_entries && pile->list[i] == valid_id;
240 i++) {
241 pile->list[i] = 0;
242 count++;
243 }
244
245 if (count && index < pile->search_hint)
246 pile->search_hint = index;
247
248 return count;
249}
250
251/**
252 * i40e_service_event_schedule - Schedule the service task to wake up
253 * @pf: board private structure
254 *
255 * If not already scheduled, this puts the task into the work queue
256 **/
257static void i40e_service_event_schedule(struct i40e_pf *pf)
258{
259 if (!test_bit(__I40E_DOWN, &pf->state) &&
260 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
261 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
262 schedule_work(&pf->service_task);
263}
264
265/**
266 * i40e_tx_timeout - Respond to a Tx Hang
267 * @netdev: network interface device structure
268 *
269 * If any port has noticed a Tx timeout, it is likely that the whole
270 * device is munged, not just the one netdev port, so go for the full
271 * reset.
272 **/
38e00438
VD
273#ifdef I40E_FCOE
274void i40e_tx_timeout(struct net_device *netdev)
275#else
41c445ff 276static void i40e_tx_timeout(struct net_device *netdev)
38e00438 277#endif
41c445ff
JB
278{
279 struct i40e_netdev_priv *np = netdev_priv(netdev);
280 struct i40e_vsi *vsi = np->vsi;
281 struct i40e_pf *pf = vsi->back;
282
283 pf->tx_timeout_count++;
284
285 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
327fe04b 286 pf->tx_timeout_recovery_level = 1;
41c445ff
JB
287 pf->tx_timeout_last_recovery = jiffies;
288 netdev_info(netdev, "tx_timeout recovery level %d\n",
289 pf->tx_timeout_recovery_level);
290
291 switch (pf->tx_timeout_recovery_level) {
292 case 0:
293 /* disable and re-enable queues for the VSI */
294 if (in_interrupt()) {
295 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
296 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
297 } else {
298 i40e_vsi_reinit_locked(vsi);
299 }
300 break;
301 case 1:
302 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
303 break;
304 case 2:
305 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
306 break;
307 case 3:
308 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
309 break;
310 default:
311 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
b5d06f05
NP
312 set_bit(__I40E_DOWN_REQUESTED, &pf->state);
313 set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
41c445ff
JB
314 break;
315 }
316 i40e_service_event_schedule(pf);
317 pf->tx_timeout_recovery_level++;
318}
319
320/**
321 * i40e_release_rx_desc - Store the new tail and head values
322 * @rx_ring: ring to bump
323 * @val: new head index
324 **/
325static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
326{
327 rx_ring->next_to_use = val;
328
329 /* Force memory writes to complete before letting h/w
330 * know there are new descriptors to fetch. (Only
331 * applicable for weak-ordered memory model archs,
332 * such as IA-64).
333 */
334 wmb();
335 writel(val, rx_ring->tail);
336}
337
338/**
339 * i40e_get_vsi_stats_struct - Get System Network Statistics
340 * @vsi: the VSI we care about
341 *
342 * Returns the address of the device statistics structure.
343 * The statistics are actually updated from the service task.
344 **/
345struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
346{
347 return &vsi->net_stats;
348}
349
350/**
351 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
352 * @netdev: network interface device structure
353 *
354 * Returns the address of the device statistics structure.
355 * The statistics are actually updated from the service task.
356 **/
38e00438
VD
357#ifdef I40E_FCOE
358struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
359 struct net_device *netdev,
360 struct rtnl_link_stats64 *stats)
361#else
41c445ff
JB
362static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
363 struct net_device *netdev,
980e9b11 364 struct rtnl_link_stats64 *stats)
38e00438 365#endif
41c445ff
JB
366{
367 struct i40e_netdev_priv *np = netdev_priv(netdev);
e7046ee1 368 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 369 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
370 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
371 int i;
372
bc7d338f
ASJ
373 if (test_bit(__I40E_DOWN, &vsi->state))
374 return stats;
375
3c325ced
JB
376 if (!vsi->tx_rings)
377 return stats;
378
980e9b11
AD
379 rcu_read_lock();
380 for (i = 0; i < vsi->num_queue_pairs; i++) {
980e9b11
AD
381 u64 bytes, packets;
382 unsigned int start;
383
384 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
385 if (!tx_ring)
386 continue;
387
388 do {
57a7744e 389 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
980e9b11
AD
390 packets = tx_ring->stats.packets;
391 bytes = tx_ring->stats.bytes;
57a7744e 392 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
980e9b11
AD
393
394 stats->tx_packets += packets;
395 stats->tx_bytes += bytes;
396 rx_ring = &tx_ring[1];
397
398 do {
57a7744e 399 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
980e9b11
AD
400 packets = rx_ring->stats.packets;
401 bytes = rx_ring->stats.bytes;
57a7744e 402 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
41c445ff 403
980e9b11
AD
404 stats->rx_packets += packets;
405 stats->rx_bytes += bytes;
406 }
407 rcu_read_unlock();
408
a5282f44 409 /* following stats updated by i40e_watchdog_subtask() */
980e9b11
AD
410 stats->multicast = vsi_stats->multicast;
411 stats->tx_errors = vsi_stats->tx_errors;
412 stats->tx_dropped = vsi_stats->tx_dropped;
413 stats->rx_errors = vsi_stats->rx_errors;
414 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
415 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 416
980e9b11 417 return stats;
41c445ff
JB
418}
419
420/**
421 * i40e_vsi_reset_stats - Resets all stats of the given vsi
422 * @vsi: the VSI to have its stats reset
423 **/
424void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
425{
426 struct rtnl_link_stats64 *ns;
427 int i;
428
429 if (!vsi)
430 return;
431
432 ns = i40e_get_vsi_stats_struct(vsi);
433 memset(ns, 0, sizeof(*ns));
434 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
435 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
436 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
8e9dca53 437 if (vsi->rx_rings && vsi->rx_rings[0]) {
41c445ff 438 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
439 memset(&vsi->rx_rings[i]->stats, 0 ,
440 sizeof(vsi->rx_rings[i]->stats));
441 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
442 sizeof(vsi->rx_rings[i]->rx_stats));
443 memset(&vsi->tx_rings[i]->stats, 0 ,
444 sizeof(vsi->tx_rings[i]->stats));
445 memset(&vsi->tx_rings[i]->tx_stats, 0,
446 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff 447 }
8e9dca53 448 }
41c445ff
JB
449 vsi->stat_offsets_loaded = false;
450}
451
452/**
453 * i40e_pf_reset_stats - Reset all of the stats for the given pf
454 * @pf: the PF to be reset
455 **/
456void i40e_pf_reset_stats(struct i40e_pf *pf)
457{
e91fdf76
SN
458 int i;
459
41c445ff
JB
460 memset(&pf->stats, 0, sizeof(pf->stats));
461 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
462 pf->stat_offsets_loaded = false;
e91fdf76
SN
463
464 for (i = 0; i < I40E_MAX_VEB; i++) {
465 if (pf->veb[i]) {
466 memset(&pf->veb[i]->stats, 0,
467 sizeof(pf->veb[i]->stats));
468 memset(&pf->veb[i]->stats_offsets, 0,
469 sizeof(pf->veb[i]->stats_offsets));
470 pf->veb[i]->stat_offsets_loaded = false;
471 }
472 }
41c445ff
JB
473}
474
475/**
476 * i40e_stat_update48 - read and update a 48 bit stat from the chip
477 * @hw: ptr to the hardware info
478 * @hireg: the high 32 bit reg to read
479 * @loreg: the low 32 bit reg to read
480 * @offset_loaded: has the initial offset been loaded yet
481 * @offset: ptr to current offset value
482 * @stat: ptr to the stat
483 *
484 * Since the device stats are not reset at PFReset, they likely will not
485 * be zeroed when the driver starts. We'll save the first values read
486 * and use them as offsets to be subtracted from the raw values in order
487 * to report stats that count from zero. In the process, we also manage
488 * the potential roll-over.
489 **/
490static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
491 bool offset_loaded, u64 *offset, u64 *stat)
492{
493 u64 new_data;
494
ab60085e 495 if (hw->device_id == I40E_DEV_ID_QEMU) {
41c445ff
JB
496 new_data = rd32(hw, loreg);
497 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
498 } else {
499 new_data = rd64(hw, loreg);
500 }
501 if (!offset_loaded)
502 *offset = new_data;
503 if (likely(new_data >= *offset))
504 *stat = new_data - *offset;
505 else
506 *stat = (new_data + ((u64)1 << 48)) - *offset;
507 *stat &= 0xFFFFFFFFFFFFULL;
508}
509
510/**
511 * i40e_stat_update32 - read and update a 32 bit stat from the chip
512 * @hw: ptr to the hardware info
513 * @reg: the hw reg to read
514 * @offset_loaded: has the initial offset been loaded yet
515 * @offset: ptr to current offset value
516 * @stat: ptr to the stat
517 **/
518static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
519 bool offset_loaded, u64 *offset, u64 *stat)
520{
521 u32 new_data;
522
523 new_data = rd32(hw, reg);
524 if (!offset_loaded)
525 *offset = new_data;
526 if (likely(new_data >= *offset))
527 *stat = (u32)(new_data - *offset);
528 else
529 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
530}
531
532/**
533 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
534 * @vsi: the VSI to be updated
535 **/
536void i40e_update_eth_stats(struct i40e_vsi *vsi)
537{
538 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
539 struct i40e_pf *pf = vsi->back;
540 struct i40e_hw *hw = &pf->hw;
541 struct i40e_eth_stats *oes;
542 struct i40e_eth_stats *es; /* device's eth stats */
543
544 es = &vsi->eth_stats;
545 oes = &vsi->eth_stats_offsets;
546
547 /* Gather up the stats that the hw collects */
548 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
549 vsi->stat_offsets_loaded,
550 &oes->tx_errors, &es->tx_errors);
551 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
552 vsi->stat_offsets_loaded,
553 &oes->rx_discards, &es->rx_discards);
41a9e55c
SN
554 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
555 vsi->stat_offsets_loaded,
556 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
557 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
558 vsi->stat_offsets_loaded,
559 &oes->tx_errors, &es->tx_errors);
41c445ff
JB
560
561 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
562 I40E_GLV_GORCL(stat_idx),
563 vsi->stat_offsets_loaded,
564 &oes->rx_bytes, &es->rx_bytes);
565 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
566 I40E_GLV_UPRCL(stat_idx),
567 vsi->stat_offsets_loaded,
568 &oes->rx_unicast, &es->rx_unicast);
569 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
570 I40E_GLV_MPRCL(stat_idx),
571 vsi->stat_offsets_loaded,
572 &oes->rx_multicast, &es->rx_multicast);
573 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
574 I40E_GLV_BPRCL(stat_idx),
575 vsi->stat_offsets_loaded,
576 &oes->rx_broadcast, &es->rx_broadcast);
577
578 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
579 I40E_GLV_GOTCL(stat_idx),
580 vsi->stat_offsets_loaded,
581 &oes->tx_bytes, &es->tx_bytes);
582 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
583 I40E_GLV_UPTCL(stat_idx),
584 vsi->stat_offsets_loaded,
585 &oes->tx_unicast, &es->tx_unicast);
586 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
587 I40E_GLV_MPTCL(stat_idx),
588 vsi->stat_offsets_loaded,
589 &oes->tx_multicast, &es->tx_multicast);
590 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
591 I40E_GLV_BPTCL(stat_idx),
592 vsi->stat_offsets_loaded,
593 &oes->tx_broadcast, &es->tx_broadcast);
594 vsi->stat_offsets_loaded = true;
595}
596
597/**
598 * i40e_update_veb_stats - Update Switch component statistics
599 * @veb: the VEB being updated
600 **/
601static void i40e_update_veb_stats(struct i40e_veb *veb)
602{
603 struct i40e_pf *pf = veb->pf;
604 struct i40e_hw *hw = &pf->hw;
605 struct i40e_eth_stats *oes;
606 struct i40e_eth_stats *es; /* device's eth stats */
607 int idx = 0;
608
609 idx = veb->stats_idx;
610 es = &veb->stats;
611 oes = &veb->stats_offsets;
612
613 /* Gather up the stats that the hw collects */
614 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
615 veb->stat_offsets_loaded,
616 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
617 if (hw->revision_id > 0)
618 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
619 veb->stat_offsets_loaded,
620 &oes->rx_unknown_protocol,
621 &es->rx_unknown_protocol);
41c445ff
JB
622 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
623 veb->stat_offsets_loaded,
624 &oes->rx_bytes, &es->rx_bytes);
625 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
626 veb->stat_offsets_loaded,
627 &oes->rx_unicast, &es->rx_unicast);
628 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
629 veb->stat_offsets_loaded,
630 &oes->rx_multicast, &es->rx_multicast);
631 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
632 veb->stat_offsets_loaded,
633 &oes->rx_broadcast, &es->rx_broadcast);
634
635 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
636 veb->stat_offsets_loaded,
637 &oes->tx_bytes, &es->tx_bytes);
638 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
639 veb->stat_offsets_loaded,
640 &oes->tx_unicast, &es->tx_unicast);
641 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
642 veb->stat_offsets_loaded,
643 &oes->tx_multicast, &es->tx_multicast);
644 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
645 veb->stat_offsets_loaded,
646 &oes->tx_broadcast, &es->tx_broadcast);
647 veb->stat_offsets_loaded = true;
648}
649
38e00438
VD
650#ifdef I40E_FCOE
651/**
652 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
653 * @vsi: the VSI that is capable of doing FCoE
654 **/
655static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
656{
657 struct i40e_pf *pf = vsi->back;
658 struct i40e_hw *hw = &pf->hw;
659 struct i40e_fcoe_stats *ofs;
660 struct i40e_fcoe_stats *fs; /* device's eth stats */
661 int idx;
662
663 if (vsi->type != I40E_VSI_FCOE)
664 return;
665
666 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
667 fs = &vsi->fcoe_stats;
668 ofs = &vsi->fcoe_stats_offsets;
669
670 i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
671 vsi->fcoe_stat_offsets_loaded,
672 &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
673 i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
674 vsi->fcoe_stat_offsets_loaded,
675 &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
676 i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
677 vsi->fcoe_stat_offsets_loaded,
678 &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
679 i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
680 vsi->fcoe_stat_offsets_loaded,
681 &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
682 i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
683 vsi->fcoe_stat_offsets_loaded,
684 &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
685 i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
686 vsi->fcoe_stat_offsets_loaded,
687 &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
688 i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
689 vsi->fcoe_stat_offsets_loaded,
690 &ofs->fcoe_last_error, &fs->fcoe_last_error);
691 i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
692 vsi->fcoe_stat_offsets_loaded,
693 &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
694
695 vsi->fcoe_stat_offsets_loaded = true;
696}
697
698#endif
41c445ff
JB
699/**
700 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
701 * @pf: the corresponding PF
702 *
703 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
704 **/
705static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
706{
707 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
708 struct i40e_hw_port_stats *nsd = &pf->stats;
709 struct i40e_hw *hw = &pf->hw;
710 u64 xoff = 0;
711 u16 i, v;
712
713 if ((hw->fc.current_mode != I40E_FC_FULL) &&
714 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
715 return;
716
717 xoff = nsd->link_xoff_rx;
718 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
719 pf->stat_offsets_loaded,
720 &osd->link_xoff_rx, &nsd->link_xoff_rx);
721
722 /* No new LFC xoff rx */
723 if (!(nsd->link_xoff_rx - xoff))
724 return;
725
726 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
505682cd 727 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
728 struct i40e_vsi *vsi = pf->vsi[v];
729
ddfda80f 730 if (!vsi || !vsi->tx_rings[0])
41c445ff
JB
731 continue;
732
733 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 734 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
735 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
736 }
737 }
738}
739
740/**
741 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
742 * @pf: the corresponding PF
743 *
744 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
745 **/
746static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
747{
748 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
749 struct i40e_hw_port_stats *nsd = &pf->stats;
750 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
751 struct i40e_dcbx_config *dcb_cfg;
752 struct i40e_hw *hw = &pf->hw;
753 u16 i, v;
754 u8 tc;
755
756 dcb_cfg = &hw->local_dcbx_config;
757
758 /* See if DCB enabled with PFC TC */
759 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
760 !(dcb_cfg->pfc.pfcenable)) {
761 i40e_update_link_xoff_rx(pf);
762 return;
763 }
764
765 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
766 u64 prio_xoff = nsd->priority_xoff_rx[i];
767 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
768 pf->stat_offsets_loaded,
769 &osd->priority_xoff_rx[i],
770 &nsd->priority_xoff_rx[i]);
771
772 /* No new PFC xoff rx */
773 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
774 continue;
775 /* Get the TC for given priority */
776 tc = dcb_cfg->etscfg.prioritytable[i];
777 xoff[tc] = true;
778 }
779
780 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
505682cd 781 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
782 struct i40e_vsi *vsi = pf->vsi[v];
783
ddfda80f 784 if (!vsi || !vsi->tx_rings[0])
41c445ff
JB
785 continue;
786
787 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 788 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
789
790 tc = ring->dcb_tc;
791 if (xoff[tc])
792 clear_bit(__I40E_HANG_CHECK_ARMED,
793 &ring->state);
794 }
795 }
796}
797
798/**
7812fddc 799 * i40e_update_vsi_stats - Update the vsi statistics counters.
41c445ff
JB
800 * @vsi: the VSI to be updated
801 *
802 * There are a few instances where we store the same stat in a
803 * couple of different structs. This is partly because we have
804 * the netdev stats that need to be filled out, which is slightly
805 * different from the "eth_stats" defined by the chip and used in
7812fddc 806 * VF communications. We sort it out here.
41c445ff 807 **/
7812fddc 808static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
41c445ff
JB
809{
810 struct i40e_pf *pf = vsi->back;
41c445ff
JB
811 struct rtnl_link_stats64 *ons;
812 struct rtnl_link_stats64 *ns; /* netdev stats */
813 struct i40e_eth_stats *oes;
814 struct i40e_eth_stats *es; /* device's eth stats */
815 u32 tx_restart, tx_busy;
bf00b376 816 struct i40e_ring *p;
41c445ff 817 u32 rx_page, rx_buf;
bf00b376
AA
818 u64 bytes, packets;
819 unsigned int start;
41c445ff
JB
820 u64 rx_p, rx_b;
821 u64 tx_p, tx_b;
41c445ff
JB
822 u16 q;
823
824 if (test_bit(__I40E_DOWN, &vsi->state) ||
825 test_bit(__I40E_CONFIG_BUSY, &pf->state))
826 return;
827
828 ns = i40e_get_vsi_stats_struct(vsi);
829 ons = &vsi->net_stats_offsets;
830 es = &vsi->eth_stats;
831 oes = &vsi->eth_stats_offsets;
832
833 /* Gather up the netdev and vsi stats that the driver collects
834 * on the fly during packet processing
835 */
836 rx_b = rx_p = 0;
837 tx_b = tx_p = 0;
838 tx_restart = tx_busy = 0;
839 rx_page = 0;
840 rx_buf = 0;
980e9b11 841 rcu_read_lock();
41c445ff 842 for (q = 0; q < vsi->num_queue_pairs; q++) {
980e9b11
AD
843 /* locate Tx ring */
844 p = ACCESS_ONCE(vsi->tx_rings[q]);
845
846 do {
57a7744e 847 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
848 packets = p->stats.packets;
849 bytes = p->stats.bytes;
57a7744e 850 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
851 tx_b += bytes;
852 tx_p += packets;
853 tx_restart += p->tx_stats.restart_queue;
854 tx_busy += p->tx_stats.tx_busy;
41c445ff 855
980e9b11
AD
856 /* Rx queue is part of the same block as Tx queue */
857 p = &p[1];
858 do {
57a7744e 859 start = u64_stats_fetch_begin_irq(&p->syncp);
980e9b11
AD
860 packets = p->stats.packets;
861 bytes = p->stats.bytes;
57a7744e 862 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
980e9b11
AD
863 rx_b += bytes;
864 rx_p += packets;
420136cc
MW
865 rx_buf += p->rx_stats.alloc_buff_failed;
866 rx_page += p->rx_stats.alloc_page_failed;
41c445ff 867 }
980e9b11 868 rcu_read_unlock();
41c445ff
JB
869 vsi->tx_restart = tx_restart;
870 vsi->tx_busy = tx_busy;
871 vsi->rx_page_failed = rx_page;
872 vsi->rx_buf_failed = rx_buf;
873
874 ns->rx_packets = rx_p;
875 ns->rx_bytes = rx_b;
876 ns->tx_packets = tx_p;
877 ns->tx_bytes = tx_b;
878
41c445ff 879 /* update netdev stats from eth stats */
7812fddc 880 i40e_update_eth_stats(vsi);
41c445ff
JB
881 ons->tx_errors = oes->tx_errors;
882 ns->tx_errors = es->tx_errors;
883 ons->multicast = oes->rx_multicast;
884 ns->multicast = es->rx_multicast;
41a9e55c
SN
885 ons->rx_dropped = oes->rx_discards;
886 ns->rx_dropped = es->rx_discards;
41c445ff
JB
887 ons->tx_dropped = oes->tx_discards;
888 ns->tx_dropped = es->tx_discards;
889
7812fddc 890 /* pull in a couple PF stats if this is the main vsi */
41c445ff 891 if (vsi == pf->vsi[pf->lan_vsi]) {
7812fddc
SN
892 ns->rx_crc_errors = pf->stats.crc_errors;
893 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
894 ns->rx_length_errors = pf->stats.rx_length_errors;
895 }
896}
41c445ff 897
7812fddc
SN
898/**
899 * i40e_update_pf_stats - Update the pf statistics counters.
900 * @pf: the PF to be updated
901 **/
902static void i40e_update_pf_stats(struct i40e_pf *pf)
903{
904 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
905 struct i40e_hw_port_stats *nsd = &pf->stats;
906 struct i40e_hw *hw = &pf->hw;
907 u32 val;
908 int i;
41c445ff 909
7812fddc
SN
910 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
911 I40E_GLPRT_GORCL(hw->port),
912 pf->stat_offsets_loaded,
913 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
914 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
915 I40E_GLPRT_GOTCL(hw->port),
916 pf->stat_offsets_loaded,
917 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
918 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
919 pf->stat_offsets_loaded,
920 &osd->eth.rx_discards,
921 &nsd->eth.rx_discards);
922 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
923 pf->stat_offsets_loaded,
924 &osd->eth.tx_discards,
925 &nsd->eth.tx_discards);
41c445ff 926
532d283d
SN
927 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
928 I40E_GLPRT_UPRCL(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->eth.rx_unicast,
931 &nsd->eth.rx_unicast);
7812fddc
SN
932 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
933 I40E_GLPRT_MPRCL(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->eth.rx_multicast,
936 &nsd->eth.rx_multicast);
532d283d
SN
937 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
938 I40E_GLPRT_BPRCL(hw->port),
939 pf->stat_offsets_loaded,
940 &osd->eth.rx_broadcast,
941 &nsd->eth.rx_broadcast);
942 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
943 I40E_GLPRT_UPTCL(hw->port),
944 pf->stat_offsets_loaded,
945 &osd->eth.tx_unicast,
946 &nsd->eth.tx_unicast);
947 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
948 I40E_GLPRT_MPTCL(hw->port),
949 pf->stat_offsets_loaded,
950 &osd->eth.tx_multicast,
951 &nsd->eth.tx_multicast);
952 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
953 I40E_GLPRT_BPTCL(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->eth.tx_broadcast,
956 &nsd->eth.tx_broadcast);
41c445ff 957
7812fddc
SN
958 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
959 pf->stat_offsets_loaded,
960 &osd->tx_dropped_link_down,
961 &nsd->tx_dropped_link_down);
41c445ff 962
7812fddc
SN
963 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
964 pf->stat_offsets_loaded,
965 &osd->crc_errors, &nsd->crc_errors);
41c445ff 966
7812fddc
SN
967 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->illegal_bytes, &nsd->illegal_bytes);
41c445ff 970
7812fddc
SN
971 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
972 pf->stat_offsets_loaded,
973 &osd->mac_local_faults,
974 &nsd->mac_local_faults);
975 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
976 pf->stat_offsets_loaded,
977 &osd->mac_remote_faults,
978 &nsd->mac_remote_faults);
41c445ff 979
7812fddc
SN
980 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
981 pf->stat_offsets_loaded,
982 &osd->rx_length_errors,
983 &nsd->rx_length_errors);
41c445ff 984
7812fddc
SN
985 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
986 pf->stat_offsets_loaded,
987 &osd->link_xon_rx, &nsd->link_xon_rx);
988 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->link_xon_tx, &nsd->link_xon_tx);
991 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
992 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->link_xoff_tx, &nsd->link_xoff_tx);
41c445ff 995
7812fddc
SN
996 for (i = 0; i < 8; i++) {
997 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
41c445ff 998 pf->stat_offsets_loaded,
7812fddc
SN
999 &osd->priority_xon_rx[i],
1000 &nsd->priority_xon_rx[i]);
1001 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
41c445ff 1002 pf->stat_offsets_loaded,
7812fddc
SN
1003 &osd->priority_xon_tx[i],
1004 &nsd->priority_xon_tx[i]);
1005 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
41c445ff 1006 pf->stat_offsets_loaded,
7812fddc
SN
1007 &osd->priority_xoff_tx[i],
1008 &nsd->priority_xoff_tx[i]);
1009 i40e_stat_update32(hw,
1010 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
bee5af7e 1011 pf->stat_offsets_loaded,
7812fddc
SN
1012 &osd->priority_xon_2_xoff[i],
1013 &nsd->priority_xon_2_xoff[i]);
41c445ff
JB
1014 }
1015
7812fddc
SN
1016 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1017 I40E_GLPRT_PRC64L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->rx_size_64, &nsd->rx_size_64);
1020 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1021 I40E_GLPRT_PRC127L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->rx_size_127, &nsd->rx_size_127);
1024 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1025 I40E_GLPRT_PRC255L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->rx_size_255, &nsd->rx_size_255);
1028 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1029 I40E_GLPRT_PRC511L(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->rx_size_511, &nsd->rx_size_511);
1032 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1033 I40E_GLPRT_PRC1023L(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->rx_size_1023, &nsd->rx_size_1023);
1036 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1037 I40E_GLPRT_PRC1522L(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->rx_size_1522, &nsd->rx_size_1522);
1040 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1041 I40E_GLPRT_PRC9522L(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_size_big, &nsd->rx_size_big);
1044
1045 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1046 I40E_GLPRT_PTC64L(hw->port),
1047 pf->stat_offsets_loaded,
1048 &osd->tx_size_64, &nsd->tx_size_64);
1049 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1050 I40E_GLPRT_PTC127L(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->tx_size_127, &nsd->tx_size_127);
1053 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1054 I40E_GLPRT_PTC255L(hw->port),
1055 pf->stat_offsets_loaded,
1056 &osd->tx_size_255, &nsd->tx_size_255);
1057 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1058 I40E_GLPRT_PTC511L(hw->port),
1059 pf->stat_offsets_loaded,
1060 &osd->tx_size_511, &nsd->tx_size_511);
1061 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1062 I40E_GLPRT_PTC1023L(hw->port),
1063 pf->stat_offsets_loaded,
1064 &osd->tx_size_1023, &nsd->tx_size_1023);
1065 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1066 I40E_GLPRT_PTC1522L(hw->port),
1067 pf->stat_offsets_loaded,
1068 &osd->tx_size_1522, &nsd->tx_size_1522);
1069 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1070 I40E_GLPRT_PTC9522L(hw->port),
1071 pf->stat_offsets_loaded,
1072 &osd->tx_size_big, &nsd->tx_size_big);
1073
1074 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1075 pf->stat_offsets_loaded,
1076 &osd->rx_undersize, &nsd->rx_undersize);
1077 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1078 pf->stat_offsets_loaded,
1079 &osd->rx_fragments, &nsd->rx_fragments);
1080 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1081 pf->stat_offsets_loaded,
1082 &osd->rx_oversize, &nsd->rx_oversize);
1083 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1084 pf->stat_offsets_loaded,
1085 &osd->rx_jabber, &nsd->rx_jabber);
1086
433c47de
ASJ
1087 /* FDIR stats */
1088 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1089 pf->stat_offsets_loaded,
1090 &osd->fd_atr_match, &nsd->fd_atr_match);
1091 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1092 pf->stat_offsets_loaded,
1093 &osd->fd_sb_match, &nsd->fd_sb_match);
1094
7812fddc
SN
1095 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096 nsd->tx_lpi_status =
1097 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099 nsd->rx_lpi_status =
1100 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103 pf->stat_offsets_loaded,
1104 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106 pf->stat_offsets_loaded,
1107 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
41c445ff
JB
1109 pf->stat_offsets_loaded = true;
1110}
1111
7812fddc
SN
1112/**
1113 * i40e_update_stats - Update the various statistics counters.
1114 * @vsi: the VSI to be updated
1115 *
1116 * Update the various stats for this VSI and its related entities.
1117 **/
1118void i40e_update_stats(struct i40e_vsi *vsi)
1119{
1120 struct i40e_pf *pf = vsi->back;
1121
1122 if (vsi == pf->vsi[pf->lan_vsi])
1123 i40e_update_pf_stats(pf);
1124
1125 i40e_update_vsi_stats(vsi);
38e00438
VD
1126#ifdef I40E_FCOE
1127 i40e_update_fcoe_stats(vsi);
1128#endif
7812fddc
SN
1129}
1130
41c445ff
JB
1131/**
1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1133 * @vsi: the VSI to be searched
1134 * @macaddr: the MAC address
1135 * @vlan: the vlan
1136 * @is_vf: make sure its a vf filter, else doesn't matter
1137 * @is_netdev: make sure its a netdev filter, else doesn't matter
1138 *
1139 * Returns ptr to the filter object or NULL
1140 **/
1141static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1142 u8 *macaddr, s16 vlan,
1143 bool is_vf, bool is_netdev)
1144{
1145 struct i40e_mac_filter *f;
1146
1147 if (!vsi || !macaddr)
1148 return NULL;
1149
1150 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1151 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1152 (vlan == f->vlan) &&
1153 (!is_vf || f->is_vf) &&
1154 (!is_netdev || f->is_netdev))
1155 return f;
1156 }
1157 return NULL;
1158}
1159
1160/**
1161 * i40e_find_mac - Find a mac addr in the macvlan filters list
1162 * @vsi: the VSI to be searched
1163 * @macaddr: the MAC address we are searching for
1164 * @is_vf: make sure its a vf filter, else doesn't matter
1165 * @is_netdev: make sure its a netdev filter, else doesn't matter
1166 *
1167 * Returns the first filter with the provided MAC address or NULL if
1168 * MAC address was not found
1169 **/
1170struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1171 bool is_vf, bool is_netdev)
1172{
1173 struct i40e_mac_filter *f;
1174
1175 if (!vsi || !macaddr)
1176 return NULL;
1177
1178 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1179 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1180 (!is_vf || f->is_vf) &&
1181 (!is_netdev || f->is_netdev))
1182 return f;
1183 }
1184 return NULL;
1185}
1186
1187/**
1188 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1189 * @vsi: the VSI to be searched
1190 *
1191 * Returns true if VSI is in vlan mode or false otherwise
1192 **/
1193bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1194{
1195 struct i40e_mac_filter *f;
1196
1197 /* Only -1 for all the filters denotes not in vlan mode
1198 * so we have to go through all the list in order to make sure
1199 */
1200 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1201 if (f->vlan >= 0)
1202 return true;
1203 }
1204
1205 return false;
1206}
1207
1208/**
1209 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1210 * @vsi: the VSI to be searched
1211 * @macaddr: the mac address to be filtered
1212 * @is_vf: true if it is a vf
1213 * @is_netdev: true if it is a netdev
1214 *
1215 * Goes through all the macvlan filters and adds a
1216 * macvlan filter for each unique vlan that already exists
1217 *
1218 * Returns first filter found on success, else NULL
1219 **/
1220struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1221 bool is_vf, bool is_netdev)
1222{
1223 struct i40e_mac_filter *f;
1224
1225 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1226 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1227 is_vf, is_netdev)) {
1228 if (!i40e_add_filter(vsi, macaddr, f->vlan,
8fb905b3 1229 is_vf, is_netdev))
41c445ff
JB
1230 return NULL;
1231 }
1232 }
1233
1234 return list_first_entry_or_null(&vsi->mac_filter_list,
1235 struct i40e_mac_filter, list);
1236}
1237
8c27d42e
GR
1238/**
1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1240 * @vsi: the PF Main VSI - inappropriate for any other VSI
1241 * @macaddr: the MAC address
30650cc5
SN
1242 *
1243 * Some older firmware configurations set up a default promiscuous VLAN
1244 * filter that needs to be removed.
8c27d42e 1245 **/
30650cc5 1246static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
8c27d42e
GR
1247{
1248 struct i40e_aqc_remove_macvlan_element_data element;
1249 struct i40e_pf *pf = vsi->back;
1250 i40e_status aq_ret;
1251
1252 /* Only appropriate for the PF main VSI */
1253 if (vsi->type != I40E_VSI_MAIN)
30650cc5 1254 return -EINVAL;
8c27d42e 1255
30650cc5 1256 memset(&element, 0, sizeof(element));
8c27d42e
GR
1257 ether_addr_copy(element.mac_addr, macaddr);
1258 element.vlan_tag = 0;
1259 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1260 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1261 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1262 if (aq_ret)
30650cc5
SN
1263 return -ENOENT;
1264
1265 return 0;
8c27d42e
GR
1266}
1267
41c445ff
JB
1268/**
1269 * i40e_add_filter - Add a mac/vlan filter to the VSI
1270 * @vsi: the VSI to be searched
1271 * @macaddr: the MAC address
1272 * @vlan: the vlan
1273 * @is_vf: make sure its a vf filter, else doesn't matter
1274 * @is_netdev: make sure its a netdev filter, else doesn't matter
1275 *
1276 * Returns ptr to the filter object or NULL when no memory available.
1277 **/
1278struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1279 u8 *macaddr, s16 vlan,
1280 bool is_vf, bool is_netdev)
1281{
1282 struct i40e_mac_filter *f;
1283
1284 if (!vsi || !macaddr)
1285 return NULL;
1286
1287 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1288 if (!f) {
1289 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1290 if (!f)
1291 goto add_filter_out;
1292
9a173901 1293 ether_addr_copy(f->macaddr, macaddr);
41c445ff
JB
1294 f->vlan = vlan;
1295 f->changed = true;
1296
1297 INIT_LIST_HEAD(&f->list);
1298 list_add(&f->list, &vsi->mac_filter_list);
1299 }
1300
1301 /* increment counter and add a new flag if needed */
1302 if (is_vf) {
1303 if (!f->is_vf) {
1304 f->is_vf = true;
1305 f->counter++;
1306 }
1307 } else if (is_netdev) {
1308 if (!f->is_netdev) {
1309 f->is_netdev = true;
1310 f->counter++;
1311 }
1312 } else {
1313 f->counter++;
1314 }
1315
1316 /* changed tells sync_filters_subtask to
1317 * push the filter down to the firmware
1318 */
1319 if (f->changed) {
1320 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1321 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1322 }
1323
1324add_filter_out:
1325 return f;
1326}
1327
1328/**
1329 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1330 * @vsi: the VSI to be searched
1331 * @macaddr: the MAC address
1332 * @vlan: the vlan
1333 * @is_vf: make sure it's a vf filter, else doesn't matter
1334 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1335 **/
1336void i40e_del_filter(struct i40e_vsi *vsi,
1337 u8 *macaddr, s16 vlan,
1338 bool is_vf, bool is_netdev)
1339{
1340 struct i40e_mac_filter *f;
1341
1342 if (!vsi || !macaddr)
1343 return;
1344
1345 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1346 if (!f || f->counter == 0)
1347 return;
1348
1349 if (is_vf) {
1350 if (f->is_vf) {
1351 f->is_vf = false;
1352 f->counter--;
1353 }
1354 } else if (is_netdev) {
1355 if (f->is_netdev) {
1356 f->is_netdev = false;
1357 f->counter--;
1358 }
1359 } else {
1360 /* make sure we don't remove a filter in use by vf or netdev */
1361 int min_f = 0;
1362 min_f += (f->is_vf ? 1 : 0);
1363 min_f += (f->is_netdev ? 1 : 0);
1364
1365 if (f->counter > min_f)
1366 f->counter--;
1367 }
1368
1369 /* counter == 0 tells sync_filters_subtask to
1370 * remove the filter from the firmware's list
1371 */
1372 if (f->counter == 0) {
1373 f->changed = true;
1374 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1375 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1376 }
1377}
1378
1379/**
1380 * i40e_set_mac - NDO callback to set mac address
1381 * @netdev: network interface device structure
1382 * @p: pointer to an address structure
1383 *
1384 * Returns 0 on success, negative on failure
1385 **/
38e00438
VD
1386#ifdef I40E_FCOE
1387int i40e_set_mac(struct net_device *netdev, void *p)
1388#else
41c445ff 1389static int i40e_set_mac(struct net_device *netdev, void *p)
38e00438 1390#endif
41c445ff
JB
1391{
1392 struct i40e_netdev_priv *np = netdev_priv(netdev);
1393 struct i40e_vsi *vsi = np->vsi;
30650cc5
SN
1394 struct i40e_pf *pf = vsi->back;
1395 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
1396 struct sockaddr *addr = p;
1397 struct i40e_mac_filter *f;
1398
1399 if (!is_valid_ether_addr(addr->sa_data))
1400 return -EADDRNOTAVAIL;
1401
30650cc5
SN
1402 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1403 netdev_info(netdev, "already using mac address %pM\n",
1404 addr->sa_data);
1405 return 0;
1406 }
41c445ff 1407
80f6428f
ASJ
1408 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1409 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1410 return -EADDRNOTAVAIL;
1411
30650cc5
SN
1412 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1413 netdev_info(netdev, "returning to hw mac address %pM\n",
1414 hw->mac.addr);
1415 else
1416 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1417
41c445ff
JB
1418 if (vsi->type == I40E_VSI_MAIN) {
1419 i40e_status ret;
1420 ret = i40e_aq_mac_address_write(&vsi->back->hw,
cc41222c 1421 I40E_AQC_WRITE_TYPE_LAA_WOL,
41c445ff
JB
1422 addr->sa_data, NULL);
1423 if (ret) {
1424 netdev_info(netdev,
1425 "Addr change for Main VSI failed: %d\n",
1426 ret);
1427 return -EADDRNOTAVAIL;
1428 }
41c445ff
JB
1429 }
1430
30650cc5
SN
1431 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1432 struct i40e_aqc_remove_macvlan_element_data element;
6c8ad1ba 1433
30650cc5
SN
1434 memset(&element, 0, sizeof(element));
1435 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1436 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1437 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1438 } else {
6c8ad1ba
SN
1439 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1440 false, false);
6c8ad1ba 1441 }
41c445ff 1442
30650cc5
SN
1443 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1444 struct i40e_aqc_add_macvlan_element_data element;
1445
1446 memset(&element, 0, sizeof(element));
1447 ether_addr_copy(element.mac_addr, hw->mac.addr);
1448 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1449 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450 } else {
1451 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1452 false, false);
1453 if (f)
1454 f->is_laa = true;
1455 }
1456
1457 i40e_sync_vsi_filters(vsi);
1458 ether_addr_copy(netdev->dev_addr, addr->sa_data);
41c445ff
JB
1459
1460 return 0;
1461}
1462
1463/**
1464 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1465 * @vsi: the VSI being setup
1466 * @ctxt: VSI context structure
1467 * @enabled_tc: Enabled TCs bitmap
1468 * @is_add: True if called before Add VSI
1469 *
1470 * Setup VSI queue mapping for enabled traffic classes.
1471 **/
38e00438
VD
1472#ifdef I40E_FCOE
1473void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1474 struct i40e_vsi_context *ctxt,
1475 u8 enabled_tc,
1476 bool is_add)
1477#else
41c445ff
JB
1478static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1479 struct i40e_vsi_context *ctxt,
1480 u8 enabled_tc,
1481 bool is_add)
38e00438 1482#endif
41c445ff
JB
1483{
1484 struct i40e_pf *pf = vsi->back;
1485 u16 sections = 0;
1486 u8 netdev_tc = 0;
1487 u16 numtc = 0;
1488 u16 qcount;
1489 u8 offset;
1490 u16 qmap;
1491 int i;
4e3b35b0 1492 u16 num_tc_qps = 0;
41c445ff
JB
1493
1494 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1495 offset = 0;
1496
1497 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1498 /* Find numtc from enabled TC bitmap */
1499 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1500 if (enabled_tc & (1 << i)) /* TC is enabled */
1501 numtc++;
1502 }
1503 if (!numtc) {
1504 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1505 numtc = 1;
1506 }
1507 } else {
1508 /* At least TC0 is enabled in case of non-DCB case */
1509 numtc = 1;
1510 }
1511
1512 vsi->tc_config.numtc = numtc;
1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
4e3b35b0 1514 /* Number of queues per enabled TC */
7f9ff476
AS
1515 /* In MFP case we can have a much lower count of MSIx
1516 * vectors available and so we need to lower the used
1517 * q count.
1518 */
1519 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1520 num_tc_qps = qcount / numtc;
4e3b35b0 1521 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
41c445ff
JB
1522
1523 /* Setup queue offset/count for all TCs for given VSI */
1524 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1525 /* See if the given TC is enabled for the given VSI */
1526 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1527 int pow, num_qps;
1528
41c445ff
JB
1529 switch (vsi->type) {
1530 case I40E_VSI_MAIN:
4e3b35b0 1531 qcount = min_t(int, pf->rss_size, num_tc_qps);
41c445ff 1532 break;
38e00438
VD
1533#ifdef I40E_FCOE
1534 case I40E_VSI_FCOE:
1535 qcount = num_tc_qps;
1536 break;
1537#endif
41c445ff
JB
1538 case I40E_VSI_FDIR:
1539 case I40E_VSI_SRIOV:
1540 case I40E_VSI_VMDQ2:
1541 default:
4e3b35b0 1542 qcount = num_tc_qps;
41c445ff
JB
1543 WARN_ON(i != 0);
1544 break;
1545 }
4e3b35b0
NP
1546 vsi->tc_config.tc_info[i].qoffset = offset;
1547 vsi->tc_config.tc_info[i].qcount = qcount;
41c445ff
JB
1548
1549 /* find the power-of-2 of the number of queue pairs */
4e3b35b0 1550 num_qps = qcount;
41c445ff 1551 pow = 0;
4e3b35b0 1552 while (num_qps && ((1 << pow) < qcount)) {
41c445ff
JB
1553 pow++;
1554 num_qps >>= 1;
1555 }
1556
1557 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1558 qmap =
1559 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1560 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1561
4e3b35b0 1562 offset += qcount;
41c445ff
JB
1563 } else {
1564 /* TC is not enabled so set the offset to
1565 * default queue and allocate one queue
1566 * for the given TC.
1567 */
1568 vsi->tc_config.tc_info[i].qoffset = 0;
1569 vsi->tc_config.tc_info[i].qcount = 1;
1570 vsi->tc_config.tc_info[i].netdev_tc = 0;
1571
1572 qmap = 0;
1573 }
1574 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1575 }
1576
1577 /* Set actual Tx/Rx queue pairs */
1578 vsi->num_queue_pairs = offset;
1579
1580 /* Scheduler section valid can only be set for ADD VSI */
1581 if (is_add) {
1582 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1583
1584 ctxt->info.up_enable_bits = enabled_tc;
1585 }
1586 if (vsi->type == I40E_VSI_SRIOV) {
1587 ctxt->info.mapping_flags |=
1588 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1589 for (i = 0; i < vsi->num_queue_pairs; i++)
1590 ctxt->info.queue_mapping[i] =
1591 cpu_to_le16(vsi->base_queue + i);
1592 } else {
1593 ctxt->info.mapping_flags |=
1594 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1595 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1596 }
1597 ctxt->info.valid_sections |= cpu_to_le16(sections);
1598}
1599
1600/**
1601 * i40e_set_rx_mode - NDO callback to set the netdev filters
1602 * @netdev: network interface device structure
1603 **/
38e00438
VD
1604#ifdef I40E_FCOE
1605void i40e_set_rx_mode(struct net_device *netdev)
1606#else
41c445ff 1607static void i40e_set_rx_mode(struct net_device *netdev)
38e00438 1608#endif
41c445ff
JB
1609{
1610 struct i40e_netdev_priv *np = netdev_priv(netdev);
1611 struct i40e_mac_filter *f, *ftmp;
1612 struct i40e_vsi *vsi = np->vsi;
1613 struct netdev_hw_addr *uca;
1614 struct netdev_hw_addr *mca;
1615 struct netdev_hw_addr *ha;
1616
1617 /* add addr if not already in the filter list */
1618 netdev_for_each_uc_addr(uca, netdev) {
1619 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1620 if (i40e_is_vsi_in_vlan(vsi))
1621 i40e_put_mac_in_vlan(vsi, uca->addr,
1622 false, true);
1623 else
1624 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1625 false, true);
1626 }
1627 }
1628
1629 netdev_for_each_mc_addr(mca, netdev) {
1630 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1631 if (i40e_is_vsi_in_vlan(vsi))
1632 i40e_put_mac_in_vlan(vsi, mca->addr,
1633 false, true);
1634 else
1635 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1636 false, true);
1637 }
1638 }
1639
1640 /* remove filter if not in netdev list */
1641 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1642 bool found = false;
1643
1644 if (!f->is_netdev)
1645 continue;
1646
1647 if (is_multicast_ether_addr(f->macaddr)) {
1648 netdev_for_each_mc_addr(mca, netdev) {
1649 if (ether_addr_equal(mca->addr, f->macaddr)) {
1650 found = true;
1651 break;
1652 }
1653 }
1654 } else {
1655 netdev_for_each_uc_addr(uca, netdev) {
1656 if (ether_addr_equal(uca->addr, f->macaddr)) {
1657 found = true;
1658 break;
1659 }
1660 }
1661
1662 for_each_dev_addr(netdev, ha) {
1663 if (ether_addr_equal(ha->addr, f->macaddr)) {
1664 found = true;
1665 break;
1666 }
1667 }
1668 }
1669 if (!found)
1670 i40e_del_filter(
1671 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1672 }
1673
1674 /* check for other flag changes */
1675 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1676 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1677 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1678 }
1679}
1680
1681/**
1682 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1683 * @vsi: ptr to the VSI
1684 *
1685 * Push any outstanding VSI filter changes through the AdminQ.
1686 *
1687 * Returns 0 or error value
1688 **/
1689int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1690{
1691 struct i40e_mac_filter *f, *ftmp;
1692 bool promisc_forced_on = false;
1693 bool add_happened = false;
1694 int filter_list_len = 0;
1695 u32 changed_flags = 0;
dcae29be 1696 i40e_status aq_ret = 0;
41c445ff
JB
1697 struct i40e_pf *pf;
1698 int num_add = 0;
1699 int num_del = 0;
1700 u16 cmd_flags;
1701
1702 /* empty array typed pointers, kcalloc later */
1703 struct i40e_aqc_add_macvlan_element_data *add_list;
1704 struct i40e_aqc_remove_macvlan_element_data *del_list;
1705
1706 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1707 usleep_range(1000, 2000);
1708 pf = vsi->back;
1709
1710 if (vsi->netdev) {
1711 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1712 vsi->current_netdev_flags = vsi->netdev->flags;
1713 }
1714
1715 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1716 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1717
1718 filter_list_len = pf->hw.aq.asq_buf_size /
1719 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1720 del_list = kcalloc(filter_list_len,
1721 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1722 GFP_KERNEL);
1723 if (!del_list)
1724 return -ENOMEM;
1725
1726 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1727 if (!f->changed)
1728 continue;
1729
1730 if (f->counter != 0)
1731 continue;
1732 f->changed = false;
1733 cmd_flags = 0;
1734
1735 /* add to delete list */
9a173901 1736 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
41c445ff
JB
1737 del_list[num_del].vlan_tag =
1738 cpu_to_le16((u16)(f->vlan ==
1739 I40E_VLAN_ANY ? 0 : f->vlan));
1740
41c445ff
JB
1741 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1742 del_list[num_del].flags = cmd_flags;
1743 num_del++;
1744
1745 /* unlink from filter list */
1746 list_del(&f->list);
1747 kfree(f);
1748
1749 /* flush a full buffer */
1750 if (num_del == filter_list_len) {
dcae29be 1751 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1752 vsi->seid, del_list, num_del,
1753 NULL);
1754 num_del = 0;
1755 memset(del_list, 0, sizeof(*del_list));
1756
fdfe9cbe
SN
1757 if (aq_ret &&
1758 pf->hw.aq.asq_last_status !=
1759 I40E_AQ_RC_ENOENT)
41c445ff
JB
1760 dev_info(&pf->pdev->dev,
1761 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1762 aq_ret,
41c445ff
JB
1763 pf->hw.aq.asq_last_status);
1764 }
1765 }
1766 if (num_del) {
dcae29be 1767 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1768 del_list, num_del, NULL);
1769 num_del = 0;
1770
fdfe9cbe
SN
1771 if (aq_ret &&
1772 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
41c445ff
JB
1773 dev_info(&pf->pdev->dev,
1774 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1775 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1776 }
1777
1778 kfree(del_list);
1779 del_list = NULL;
1780
1781 /* do all the adds now */
1782 filter_list_len = pf->hw.aq.asq_buf_size /
1783 sizeof(struct i40e_aqc_add_macvlan_element_data),
1784 add_list = kcalloc(filter_list_len,
1785 sizeof(struct i40e_aqc_add_macvlan_element_data),
1786 GFP_KERNEL);
1787 if (!add_list)
1788 return -ENOMEM;
1789
1790 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1791 if (!f->changed)
1792 continue;
1793
1794 if (f->counter == 0)
1795 continue;
1796 f->changed = false;
1797 add_happened = true;
1798 cmd_flags = 0;
1799
1800 /* add to add array */
9a173901 1801 ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
41c445ff
JB
1802 add_list[num_add].vlan_tag =
1803 cpu_to_le16(
1804 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1805 add_list[num_add].queue_number = 0;
1806
1807 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
41c445ff
JB
1808 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1809 num_add++;
1810
1811 /* flush a full buffer */
1812 if (num_add == filter_list_len) {
dcae29be
JB
1813 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1814 add_list, num_add,
1815 NULL);
41c445ff
JB
1816 num_add = 0;
1817
dcae29be 1818 if (aq_ret)
41c445ff
JB
1819 break;
1820 memset(add_list, 0, sizeof(*add_list));
1821 }
1822 }
1823 if (num_add) {
dcae29be
JB
1824 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1825 add_list, num_add, NULL);
41c445ff
JB
1826 num_add = 0;
1827 }
1828 kfree(add_list);
1829 add_list = NULL;
1830
30650cc5
SN
1831 if (add_happened && aq_ret &&
1832 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
41c445ff
JB
1833 dev_info(&pf->pdev->dev,
1834 "add filter failed, err %d, aq_err %d\n",
dcae29be 1835 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1836 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1837 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1838 &vsi->state)) {
1839 promisc_forced_on = true;
1840 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1841 &vsi->state);
1842 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1843 }
1844 }
1845 }
1846
1847 /* check for changes in promiscuous modes */
1848 if (changed_flags & IFF_ALLMULTI) {
1849 bool cur_multipromisc;
1850 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1851 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1852 vsi->seid,
1853 cur_multipromisc,
1854 NULL);
1855 if (aq_ret)
41c445ff
JB
1856 dev_info(&pf->pdev->dev,
1857 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1858 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1859 }
1860 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1861 bool cur_promisc;
1862 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1863 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1864 &vsi->state));
dcae29be
JB
1865 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1866 vsi->seid,
1867 cur_promisc, NULL);
1868 if (aq_ret)
41c445ff
JB
1869 dev_info(&pf->pdev->dev,
1870 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1871 aq_ret, pf->hw.aq.asq_last_status);
1a10370a
GR
1872 aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1873 vsi->seid,
1874 cur_promisc, NULL);
1875 if (aq_ret)
1876 dev_info(&pf->pdev->dev,
1877 "set brdcast promisc failed, err %d, aq_err %d\n",
1878 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1879 }
1880
1881 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1882 return 0;
1883}
1884
1885/**
1886 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1887 * @pf: board private structure
1888 **/
1889static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1890{
1891 int v;
1892
1893 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1894 return;
1895 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1896
505682cd 1897 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
1898 if (pf->vsi[v] &&
1899 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1900 i40e_sync_vsi_filters(pf->vsi[v]);
1901 }
1902}
1903
1904/**
1905 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1906 * @netdev: network interface device structure
1907 * @new_mtu: new value for maximum frame size
1908 *
1909 * Returns 0 on success, negative on failure
1910 **/
1911static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1912{
1913 struct i40e_netdev_priv *np = netdev_priv(netdev);
61a46a4c 1914 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
41c445ff
JB
1915 struct i40e_vsi *vsi = np->vsi;
1916
1917 /* MTU < 68 is an error and causes problems on some kernels */
1918 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1919 return -EINVAL;
1920
1921 netdev_info(netdev, "changing MTU from %d to %d\n",
1922 netdev->mtu, new_mtu);
1923 netdev->mtu = new_mtu;
1924 if (netif_running(netdev))
1925 i40e_vsi_reinit_locked(vsi);
1926
1927 return 0;
1928}
1929
beb0dff1
JK
1930/**
1931 * i40e_ioctl - Access the hwtstamp interface
1932 * @netdev: network interface device structure
1933 * @ifr: interface request data
1934 * @cmd: ioctl command
1935 **/
1936int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1937{
1938 struct i40e_netdev_priv *np = netdev_priv(netdev);
1939 struct i40e_pf *pf = np->vsi->back;
1940
1941 switch (cmd) {
1942 case SIOCGHWTSTAMP:
1943 return i40e_ptp_get_ts_config(pf, ifr);
1944 case SIOCSHWTSTAMP:
1945 return i40e_ptp_set_ts_config(pf, ifr);
1946 default:
1947 return -EOPNOTSUPP;
1948 }
1949}
1950
41c445ff
JB
1951/**
1952 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1953 * @vsi: the vsi being adjusted
1954 **/
1955void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1956{
1957 struct i40e_vsi_context ctxt;
1958 i40e_status ret;
1959
1960 if ((vsi->info.valid_sections &
1961 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1962 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1963 return; /* already enabled */
1964
1965 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1966 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1967 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1968
1969 ctxt.seid = vsi->seid;
1970 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1971 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1972 if (ret) {
1973 dev_info(&vsi->back->pdev->dev,
1974 "%s: update vsi failed, aq_err=%d\n",
1975 __func__, vsi->back->hw.aq.asq_last_status);
1976 }
1977}
1978
1979/**
1980 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1981 * @vsi: the vsi being adjusted
1982 **/
1983void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1984{
1985 struct i40e_vsi_context ctxt;
1986 i40e_status ret;
1987
1988 if ((vsi->info.valid_sections &
1989 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1990 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1991 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1992 return; /* already disabled */
1993
1994 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1995 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1996 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1997
1998 ctxt.seid = vsi->seid;
1999 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2000 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2001 if (ret) {
2002 dev_info(&vsi->back->pdev->dev,
2003 "%s: update vsi failed, aq_err=%d\n",
2004 __func__, vsi->back->hw.aq.asq_last_status);
2005 }
2006}
2007
2008/**
2009 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2010 * @netdev: network interface to be adjusted
2011 * @features: netdev features to test if VLAN offload is enabled or not
2012 **/
2013static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2014{
2015 struct i40e_netdev_priv *np = netdev_priv(netdev);
2016 struct i40e_vsi *vsi = np->vsi;
2017
2018 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2019 i40e_vlan_stripping_enable(vsi);
2020 else
2021 i40e_vlan_stripping_disable(vsi);
2022}
2023
2024/**
2025 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2026 * @vsi: the vsi being configured
2027 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2028 **/
2029int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2030{
2031 struct i40e_mac_filter *f, *add_f;
2032 bool is_netdev, is_vf;
41c445ff
JB
2033
2034 is_vf = (vsi->type == I40E_VSI_SRIOV);
2035 is_netdev = !!(vsi->netdev);
2036
2037 if (is_netdev) {
2038 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2039 is_vf, is_netdev);
2040 if (!add_f) {
2041 dev_info(&vsi->back->pdev->dev,
2042 "Could not add vlan filter %d for %pM\n",
2043 vid, vsi->netdev->dev_addr);
2044 return -ENOMEM;
2045 }
2046 }
2047
2048 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2049 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2050 if (!add_f) {
2051 dev_info(&vsi->back->pdev->dev,
2052 "Could not add vlan filter %d for %pM\n",
2053 vid, f->macaddr);
2054 return -ENOMEM;
2055 }
2056 }
2057
41c445ff
JB
2058 /* Now if we add a vlan tag, make sure to check if it is the first
2059 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2060 * with 0, so we now accept untagged and specified tagged traffic
2061 * (and not any taged and untagged)
2062 */
2063 if (vid > 0) {
2064 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2065 I40E_VLAN_ANY,
2066 is_vf, is_netdev)) {
2067 i40e_del_filter(vsi, vsi->netdev->dev_addr,
2068 I40E_VLAN_ANY, is_vf, is_netdev);
2069 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2070 is_vf, is_netdev);
2071 if (!add_f) {
2072 dev_info(&vsi->back->pdev->dev,
2073 "Could not add filter 0 for %pM\n",
2074 vsi->netdev->dev_addr);
2075 return -ENOMEM;
2076 }
2077 }
8d82a7c5 2078 }
41c445ff 2079
8d82a7c5
GR
2080 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2081 if (vid > 0 && !vsi->info.pvid) {
41c445ff
JB
2082 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2083 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2084 is_vf, is_netdev)) {
2085 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2086 is_vf, is_netdev);
2087 add_f = i40e_add_filter(vsi, f->macaddr,
2088 0, is_vf, is_netdev);
2089 if (!add_f) {
2090 dev_info(&vsi->back->pdev->dev,
2091 "Could not add filter 0 for %pM\n",
2092 f->macaddr);
2093 return -ENOMEM;
2094 }
2095 }
2096 }
41c445ff
JB
2097 }
2098
80f6428f
ASJ
2099 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2100 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2101 return 0;
2102
2103 return i40e_sync_vsi_filters(vsi);
41c445ff
JB
2104}
2105
2106/**
2107 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2108 * @vsi: the vsi being configured
2109 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
2110 *
2111 * Return: 0 on success or negative otherwise
41c445ff
JB
2112 **/
2113int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2114{
2115 struct net_device *netdev = vsi->netdev;
2116 struct i40e_mac_filter *f, *add_f;
2117 bool is_vf, is_netdev;
2118 int filter_count = 0;
41c445ff
JB
2119
2120 is_vf = (vsi->type == I40E_VSI_SRIOV);
2121 is_netdev = !!(netdev);
2122
2123 if (is_netdev)
2124 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2125
2126 list_for_each_entry(f, &vsi->mac_filter_list, list)
2127 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2128
41c445ff
JB
2129 /* go through all the filters for this VSI and if there is only
2130 * vid == 0 it means there are no other filters, so vid 0 must
2131 * be replaced with -1. This signifies that we should from now
2132 * on accept any traffic (with any tag present, or untagged)
2133 */
2134 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2135 if (is_netdev) {
2136 if (f->vlan &&
2137 ether_addr_equal(netdev->dev_addr, f->macaddr))
2138 filter_count++;
2139 }
2140
2141 if (f->vlan)
2142 filter_count++;
2143 }
2144
2145 if (!filter_count && is_netdev) {
2146 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2147 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2148 is_vf, is_netdev);
2149 if (!f) {
2150 dev_info(&vsi->back->pdev->dev,
2151 "Could not add filter %d for %pM\n",
2152 I40E_VLAN_ANY, netdev->dev_addr);
2153 return -ENOMEM;
2154 }
2155 }
2156
2157 if (!filter_count) {
2158 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2159 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2160 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2161 is_vf, is_netdev);
2162 if (!add_f) {
2163 dev_info(&vsi->back->pdev->dev,
2164 "Could not add filter %d for %pM\n",
2165 I40E_VLAN_ANY, f->macaddr);
2166 return -ENOMEM;
2167 }
2168 }
2169 }
2170
80f6428f
ASJ
2171 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2172 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2173 return 0;
2174
41c445ff
JB
2175 return i40e_sync_vsi_filters(vsi);
2176}
2177
2178/**
2179 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2180 * @netdev: network interface to be adjusted
2181 * @vid: vlan id to be added
078b5876
JB
2182 *
2183 * net_device_ops implementation for adding vlan ids
41c445ff 2184 **/
38e00438
VD
2185#ifdef I40E_FCOE
2186int i40e_vlan_rx_add_vid(struct net_device *netdev,
2187 __always_unused __be16 proto, u16 vid)
2188#else
41c445ff
JB
2189static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2190 __always_unused __be16 proto, u16 vid)
38e00438 2191#endif
41c445ff
JB
2192{
2193 struct i40e_netdev_priv *np = netdev_priv(netdev);
2194 struct i40e_vsi *vsi = np->vsi;
078b5876 2195 int ret = 0;
41c445ff
JB
2196
2197 if (vid > 4095)
078b5876
JB
2198 return -EINVAL;
2199
2200 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 2201
6982d429
ASJ
2202 /* If the network stack called us with vid = 0 then
2203 * it is asking to receive priority tagged packets with
2204 * vlan id 0. Our HW receives them by default when configured
2205 * to receive untagged packets so there is no need to add an
2206 * extra filter for vlan 0 tagged packets.
41c445ff 2207 */
6982d429
ASJ
2208 if (vid)
2209 ret = i40e_vsi_add_vlan(vsi, vid);
41c445ff 2210
078b5876
JB
2211 if (!ret && (vid < VLAN_N_VID))
2212 set_bit(vid, vsi->active_vlans);
41c445ff 2213
078b5876 2214 return ret;
41c445ff
JB
2215}
2216
2217/**
2218 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2219 * @netdev: network interface to be adjusted
2220 * @vid: vlan id to be removed
078b5876 2221 *
fdfd943e 2222 * net_device_ops implementation for removing vlan ids
41c445ff 2223 **/
38e00438
VD
2224#ifdef I40E_FCOE
2225int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2226 __always_unused __be16 proto, u16 vid)
2227#else
41c445ff
JB
2228static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2229 __always_unused __be16 proto, u16 vid)
38e00438 2230#endif
41c445ff
JB
2231{
2232 struct i40e_netdev_priv *np = netdev_priv(netdev);
2233 struct i40e_vsi *vsi = np->vsi;
2234
078b5876
JB
2235 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2236
41c445ff
JB
2237 /* return code is ignored as there is nothing a user
2238 * can do about failure to remove and a log message was
078b5876 2239 * already printed from the other function
41c445ff
JB
2240 */
2241 i40e_vsi_kill_vlan(vsi, vid);
2242
2243 clear_bit(vid, vsi->active_vlans);
078b5876 2244
41c445ff
JB
2245 return 0;
2246}
2247
2248/**
2249 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2250 * @vsi: the vsi being brought back up
2251 **/
2252static void i40e_restore_vlan(struct i40e_vsi *vsi)
2253{
2254 u16 vid;
2255
2256 if (!vsi->netdev)
2257 return;
2258
2259 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2260
2261 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2262 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2263 vid);
2264}
2265
2266/**
2267 * i40e_vsi_add_pvid - Add pvid for the VSI
2268 * @vsi: the vsi being adjusted
2269 * @vid: the vlan id to set as a PVID
2270 **/
dcae29be 2271int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2272{
2273 struct i40e_vsi_context ctxt;
dcae29be 2274 i40e_status aq_ret;
41c445ff
JB
2275
2276 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2277 vsi->info.pvid = cpu_to_le16(vid);
6c12fcbf
GR
2278 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2279 I40E_AQ_VSI_PVLAN_INSERT_PVID |
b774c7dd 2280 I40E_AQ_VSI_PVLAN_EMOD_STR;
41c445ff
JB
2281
2282 ctxt.seid = vsi->seid;
2283 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2284 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2285 if (aq_ret) {
41c445ff
JB
2286 dev_info(&vsi->back->pdev->dev,
2287 "%s: update vsi failed, aq_err=%d\n",
2288 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2289 return -ENOENT;
41c445ff
JB
2290 }
2291
dcae29be 2292 return 0;
41c445ff
JB
2293}
2294
2295/**
2296 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2297 * @vsi: the vsi being adjusted
2298 *
2299 * Just use the vlan_rx_register() service to put it back to normal
2300 **/
2301void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2302{
6c12fcbf
GR
2303 i40e_vlan_stripping_disable(vsi);
2304
41c445ff 2305 vsi->info.pvid = 0;
41c445ff
JB
2306}
2307
2308/**
2309 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2310 * @vsi: ptr to the VSI
2311 *
2312 * If this function returns with an error, then it's possible one or
2313 * more of the rings is populated (while the rest are not). It is the
2314 * callers duty to clean those orphaned rings.
2315 *
2316 * Return 0 on success, negative on failure
2317 **/
2318static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2319{
2320 int i, err = 0;
2321
2322 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2323 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2324
2325 return err;
2326}
2327
2328/**
2329 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2330 * @vsi: ptr to the VSI
2331 *
2332 * Free VSI's transmit software resources
2333 **/
2334static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2335{
2336 int i;
2337
8e9dca53
GR
2338 if (!vsi->tx_rings)
2339 return;
2340
41c445ff 2341 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2342 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
9f65e15b 2343 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2344}
2345
2346/**
2347 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2348 * @vsi: ptr to the VSI
2349 *
2350 * If this function returns with an error, then it's possible one or
2351 * more of the rings is populated (while the rest are not). It is the
2352 * callers duty to clean those orphaned rings.
2353 *
2354 * Return 0 on success, negative on failure
2355 **/
2356static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2357{
2358 int i, err = 0;
2359
2360 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2361 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
38e00438
VD
2362#ifdef I40E_FCOE
2363 i40e_fcoe_setup_ddp_resources(vsi);
2364#endif
41c445ff
JB
2365 return err;
2366}
2367
2368/**
2369 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2370 * @vsi: ptr to the VSI
2371 *
2372 * Free all receive software resources
2373 **/
2374static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2375{
2376 int i;
2377
8e9dca53
GR
2378 if (!vsi->rx_rings)
2379 return;
2380
41c445ff 2381 for (i = 0; i < vsi->num_queue_pairs; i++)
8e9dca53 2382 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
9f65e15b 2383 i40e_free_rx_resources(vsi->rx_rings[i]);
38e00438
VD
2384#ifdef I40E_FCOE
2385 i40e_fcoe_free_ddp_resources(vsi);
2386#endif
41c445ff
JB
2387}
2388
3ffa037d
NP
2389/**
2390 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2391 * @ring: The Tx ring to configure
2392 *
2393 * This enables/disables XPS for a given Tx descriptor ring
2394 * based on the TCs enabled for the VSI that ring belongs to.
2395 **/
2396static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2397{
2398 struct i40e_vsi *vsi = ring->vsi;
2399 cpumask_var_t mask;
2400
2401 if (ring->q_vector && ring->netdev) {
2402 /* Single TC mode enable XPS */
2403 if (vsi->tc_config.numtc <= 1 &&
2404 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
2405 netif_set_xps_queue(ring->netdev,
2406 &ring->q_vector->affinity_mask,
2407 ring->queue_index);
2408 } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2409 /* Disable XPS to allow selection based on TC */
2410 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2411 netif_set_xps_queue(ring->netdev, mask,
2412 ring->queue_index);
2413 free_cpumask_var(mask);
2414 }
2415 }
2416}
2417
41c445ff
JB
2418/**
2419 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2420 * @ring: The Tx ring to configure
2421 *
2422 * Configure the Tx descriptor ring in the HMC context.
2423 **/
2424static int i40e_configure_tx_ring(struct i40e_ring *ring)
2425{
2426 struct i40e_vsi *vsi = ring->vsi;
2427 u16 pf_q = vsi->base_queue + ring->queue_index;
2428 struct i40e_hw *hw = &vsi->back->hw;
2429 struct i40e_hmc_obj_txq tx_ctx;
2430 i40e_status err = 0;
2431 u32 qtx_ctl = 0;
2432
2433 /* some ATR related tx ring init */
60ea5f83 2434 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
41c445ff
JB
2435 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2436 ring->atr_count = 0;
2437 } else {
2438 ring->atr_sample_rate = 0;
2439 }
2440
3ffa037d
NP
2441 /* configure XPS */
2442 i40e_config_xps_tx_ring(ring);
41c445ff
JB
2443
2444 /* clear the context structure first */
2445 memset(&tx_ctx, 0, sizeof(tx_ctx));
2446
2447 tx_ctx.new_context = 1;
2448 tx_ctx.base = (ring->dma / 128);
2449 tx_ctx.qlen = ring->count;
60ea5f83
JB
2450 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2451 I40E_FLAG_FD_ATR_ENABLED));
38e00438
VD
2452#ifdef I40E_FCOE
2453 tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2454#endif
beb0dff1 2455 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
1943d8ba
JB
2456 /* FDIR VSI tx ring can still use RS bit and writebacks */
2457 if (vsi->type != I40E_VSI_FDIR)
2458 tx_ctx.head_wb_ena = 1;
2459 tx_ctx.head_wb_addr = ring->dma +
2460 (ring->count * sizeof(struct i40e_tx_desc));
41c445ff
JB
2461
2462 /* As part of VSI creation/update, FW allocates certain
2463 * Tx arbitration queue sets for each TC enabled for
2464 * the VSI. The FW returns the handles to these queue
2465 * sets as part of the response buffer to Add VSI,
2466 * Update VSI, etc. AQ commands. It is expected that
2467 * these queue set handles be associated with the Tx
2468 * queues by the driver as part of the TX queue context
2469 * initialization. This has to be done regardless of
2470 * DCB as by default everything is mapped to TC0.
2471 */
2472 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2473 tx_ctx.rdylist_act = 0;
2474
2475 /* clear the context in the HMC */
2476 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2477 if (err) {
2478 dev_info(&vsi->back->pdev->dev,
2479 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2480 ring->queue_index, pf_q, err);
2481 return -ENOMEM;
2482 }
2483
2484 /* set the context in the HMC */
2485 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2486 if (err) {
2487 dev_info(&vsi->back->pdev->dev,
2488 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2489 ring->queue_index, pf_q, err);
2490 return -ENOMEM;
2491 }
2492
2493 /* Now associate this queue with this PCI function */
7a28d885 2494 if (vsi->type == I40E_VSI_VMDQ2) {
9d8bf547 2495 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
7a28d885
MW
2496 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2497 I40E_QTX_CTL_VFVM_INDX_MASK;
2498 } else {
9d8bf547 2499 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
7a28d885
MW
2500 }
2501
13fd9774
SN
2502 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2503 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2504 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2505 i40e_flush(hw);
2506
2507 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2508
2509 /* cache tail off for easier writes later */
2510 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2511
2512 return 0;
2513}
2514
2515/**
2516 * i40e_configure_rx_ring - Configure a receive ring context
2517 * @ring: The Rx ring to configure
2518 *
2519 * Configure the Rx descriptor ring in the HMC context.
2520 **/
2521static int i40e_configure_rx_ring(struct i40e_ring *ring)
2522{
2523 struct i40e_vsi *vsi = ring->vsi;
2524 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2525 u16 pf_q = vsi->base_queue + ring->queue_index;
2526 struct i40e_hw *hw = &vsi->back->hw;
2527 struct i40e_hmc_obj_rxq rx_ctx;
2528 i40e_status err = 0;
2529
2530 ring->state = 0;
2531
2532 /* clear the context structure first */
2533 memset(&rx_ctx, 0, sizeof(rx_ctx));
2534
2535 ring->rx_buf_len = vsi->rx_buf_len;
2536 ring->rx_hdr_len = vsi->rx_hdr_len;
2537
2538 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2539 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2540
2541 rx_ctx.base = (ring->dma / 128);
2542 rx_ctx.qlen = ring->count;
2543
2544 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2545 set_ring_16byte_desc_enabled(ring);
2546 rx_ctx.dsize = 0;
2547 } else {
2548 rx_ctx.dsize = 1;
2549 }
2550
2551 rx_ctx.dtype = vsi->dtype;
2552 if (vsi->dtype) {
2553 set_ring_ps_enabled(ring);
2554 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2555 I40E_RX_SPLIT_IP |
2556 I40E_RX_SPLIT_TCP_UDP |
2557 I40E_RX_SPLIT_SCTP;
2558 } else {
2559 rx_ctx.hsplit_0 = 0;
2560 }
2561
2562 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2563 (chain_len * ring->rx_buf_len));
7134f9ce
JB
2564 if (hw->revision_id == 0)
2565 rx_ctx.lrxqthresh = 0;
2566 else
2567 rx_ctx.lrxqthresh = 2;
41c445ff
JB
2568 rx_ctx.crcstrip = 1;
2569 rx_ctx.l2tsel = 1;
2570 rx_ctx.showiv = 1;
38e00438
VD
2571#ifdef I40E_FCOE
2572 rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2573#endif
acb3676b
CS
2574 /* set the prefena field to 1 because the manual says to */
2575 rx_ctx.prefena = 1;
41c445ff
JB
2576
2577 /* clear the context in the HMC */
2578 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2579 if (err) {
2580 dev_info(&vsi->back->pdev->dev,
2581 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2582 ring->queue_index, pf_q, err);
2583 return -ENOMEM;
2584 }
2585
2586 /* set the context in the HMC */
2587 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2588 if (err) {
2589 dev_info(&vsi->back->pdev->dev,
2590 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2591 ring->queue_index, pf_q, err);
2592 return -ENOMEM;
2593 }
2594
2595 /* cache tail for quicker writes, and clear the reg before use */
2596 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2597 writel(0, ring->tail);
2598
2599 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2600
2601 return 0;
2602}
2603
2604/**
2605 * i40e_vsi_configure_tx - Configure the VSI for Tx
2606 * @vsi: VSI structure describing this set of rings and resources
2607 *
2608 * Configure the Tx VSI for operation.
2609 **/
2610static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2611{
2612 int err = 0;
2613 u16 i;
2614
9f65e15b
AD
2615 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2616 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2617
2618 return err;
2619}
2620
2621/**
2622 * i40e_vsi_configure_rx - Configure the VSI for Rx
2623 * @vsi: the VSI being configured
2624 *
2625 * Configure the Rx VSI for operation.
2626 **/
2627static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2628{
2629 int err = 0;
2630 u16 i;
2631
2632 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2633 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2634 + ETH_FCS_LEN + VLAN_HLEN;
2635 else
2636 vsi->max_frame = I40E_RXBUFFER_2048;
2637
2638 /* figure out correct receive buffer length */
2639 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2640 I40E_FLAG_RX_PS_ENABLED)) {
2641 case I40E_FLAG_RX_1BUF_ENABLED:
2642 vsi->rx_hdr_len = 0;
2643 vsi->rx_buf_len = vsi->max_frame;
2644 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2645 break;
2646 case I40E_FLAG_RX_PS_ENABLED:
2647 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2648 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2649 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2650 break;
2651 default:
2652 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2653 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2654 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2655 break;
2656 }
2657
38e00438
VD
2658#ifdef I40E_FCOE
2659 /* setup rx buffer for FCoE */
2660 if ((vsi->type == I40E_VSI_FCOE) &&
2661 (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2662 vsi->rx_hdr_len = 0;
2663 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2664 vsi->max_frame = I40E_RXBUFFER_3072;
2665 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2666 }
2667
2668#endif /* I40E_FCOE */
41c445ff
JB
2669 /* round up for the chip's needs */
2670 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2671 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2672 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2673 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2674
2675 /* set up individual rings */
2676 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2677 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2678
2679 return err;
2680}
2681
2682/**
2683 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2684 * @vsi: ptr to the VSI
2685 **/
2686static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2687{
e7046ee1 2688 struct i40e_ring *tx_ring, *rx_ring;
41c445ff
JB
2689 u16 qoffset, qcount;
2690 int i, n;
2691
cd238a3e
PN
2692 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2693 /* Reset the TC information */
2694 for (i = 0; i < vsi->num_queue_pairs; i++) {
2695 rx_ring = vsi->rx_rings[i];
2696 tx_ring = vsi->tx_rings[i];
2697 rx_ring->dcb_tc = 0;
2698 tx_ring->dcb_tc = 0;
2699 }
2700 }
41c445ff
JB
2701
2702 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2703 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2704 continue;
2705
2706 qoffset = vsi->tc_config.tc_info[n].qoffset;
2707 qcount = vsi->tc_config.tc_info[n].qcount;
2708 for (i = qoffset; i < (qoffset + qcount); i++) {
e7046ee1
AA
2709 rx_ring = vsi->rx_rings[i];
2710 tx_ring = vsi->tx_rings[i];
41c445ff
JB
2711 rx_ring->dcb_tc = n;
2712 tx_ring->dcb_tc = n;
2713 }
2714 }
2715}
2716
2717/**
2718 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2719 * @vsi: ptr to the VSI
2720 **/
2721static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2722{
2723 if (vsi->netdev)
2724 i40e_set_rx_mode(vsi->netdev);
2725}
2726
17a73f6b
JG
2727/**
2728 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2729 * @vsi: Pointer to the targeted VSI
2730 *
2731 * This function replays the hlist on the hw where all the SB Flow Director
2732 * filters were saved.
2733 **/
2734static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2735{
2736 struct i40e_fdir_filter *filter;
2737 struct i40e_pf *pf = vsi->back;
2738 struct hlist_node *node;
2739
55a5e60b
ASJ
2740 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2741 return;
2742
17a73f6b
JG
2743 hlist_for_each_entry_safe(filter, node,
2744 &pf->fdir_filter_list, fdir_node) {
2745 i40e_add_del_fdir(vsi, filter, true);
2746 }
2747}
2748
41c445ff
JB
2749/**
2750 * i40e_vsi_configure - Set up the VSI for action
2751 * @vsi: the VSI being configured
2752 **/
2753static int i40e_vsi_configure(struct i40e_vsi *vsi)
2754{
2755 int err;
2756
2757 i40e_set_vsi_rx_mode(vsi);
2758 i40e_restore_vlan(vsi);
2759 i40e_vsi_config_dcb_rings(vsi);
2760 err = i40e_vsi_configure_tx(vsi);
2761 if (!err)
2762 err = i40e_vsi_configure_rx(vsi);
2763
2764 return err;
2765}
2766
2767/**
2768 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2769 * @vsi: the VSI being configured
2770 **/
2771static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2772{
2773 struct i40e_pf *pf = vsi->back;
2774 struct i40e_q_vector *q_vector;
2775 struct i40e_hw *hw = &pf->hw;
2776 u16 vector;
2777 int i, q;
2778 u32 val;
2779 u32 qp;
2780
2781 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2782 * and PFINT_LNKLSTn registers, e.g.:
2783 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2784 */
2785 qp = vsi->base_queue;
2786 vector = vsi->base_vector;
493fb300
AD
2787 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2788 q_vector = vsi->q_vectors[i];
41c445ff
JB
2789 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2790 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2791 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2792 q_vector->rx.itr);
2793 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2794 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2795 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2796 q_vector->tx.itr);
2797
2798 /* Linked list for the queuepairs assigned to this vector */
2799 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2800 for (q = 0; q < q_vector->num_ringpairs; q++) {
2801 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2802 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2803 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2804 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2805 (I40E_QUEUE_TYPE_TX
2806 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2807
2808 wr32(hw, I40E_QINT_RQCTL(qp), val);
2809
2810 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2811 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2812 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2813 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2814 (I40E_QUEUE_TYPE_RX
2815 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2816
2817 /* Terminate the linked list */
2818 if (q == (q_vector->num_ringpairs - 1))
2819 val |= (I40E_QUEUE_END_OF_LIST
2820 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2821
2822 wr32(hw, I40E_QINT_TQCTL(qp), val);
2823 qp++;
2824 }
2825 }
2826
2827 i40e_flush(hw);
2828}
2829
2830/**
2831 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2832 * @hw: ptr to the hardware info
2833 **/
ab437b5a 2834static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
41c445ff 2835{
ab437b5a 2836 struct i40e_hw *hw = &pf->hw;
41c445ff
JB
2837 u32 val;
2838
2839 /* clear things first */
2840 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2841 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2842
2843 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2844 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2845 I40E_PFINT_ICR0_ENA_GRST_MASK |
2846 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2847 I40E_PFINT_ICR0_ENA_GPIO_MASK |
41c445ff
JB
2848 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2849 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2850 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2851
ab437b5a
JK
2852 if (pf->flags & I40E_FLAG_PTP)
2853 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2854
41c445ff
JB
2855 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2856
2857 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
2858 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2859 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
2860
2861 /* OTHER_ITR_IDX = 0 */
2862 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2863}
2864
2865/**
2866 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2867 * @vsi: the VSI being configured
2868 **/
2869static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2870{
493fb300 2871 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2872 struct i40e_pf *pf = vsi->back;
2873 struct i40e_hw *hw = &pf->hw;
2874 u32 val;
2875
2876 /* set the ITR configuration */
2877 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2878 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2879 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2880 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2881 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2882 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2883
ab437b5a 2884 i40e_enable_misc_int_causes(pf);
41c445ff
JB
2885
2886 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2887 wr32(hw, I40E_PFINT_LNKLST0, 0);
2888
f29eaa3d 2889 /* Associate the queue pair to the vector and enable the queue int */
41c445ff
JB
2890 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2891 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2892 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2893
2894 wr32(hw, I40E_QINT_RQCTL(0), val);
2895
2896 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2897 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2898 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2899
2900 wr32(hw, I40E_QINT_TQCTL(0), val);
2901 i40e_flush(hw);
2902}
2903
2ef28cfb
MW
2904/**
2905 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2906 * @pf: board private structure
2907 **/
2908void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2909{
2910 struct i40e_hw *hw = &pf->hw;
2911
2912 wr32(hw, I40E_PFINT_DYN_CTL0,
2913 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2914 i40e_flush(hw);
2915}
2916
41c445ff
JB
2917/**
2918 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2919 * @pf: board private structure
2920 **/
116a57d4 2921void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2922{
2923 struct i40e_hw *hw = &pf->hw;
2924 u32 val;
2925
2926 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2927 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2928 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2929
2930 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2931 i40e_flush(hw);
2932}
2933
2934/**
2935 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2936 * @vsi: pointer to a vsi
2937 * @vector: enable a particular Hw Interrupt vector
2938 **/
2939void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2940{
2941 struct i40e_pf *pf = vsi->back;
2942 struct i40e_hw *hw = &pf->hw;
2943 u32 val;
2944
2945 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2946 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2947 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2948 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2949 /* skip the flush */
41c445ff
JB
2950}
2951
5c2cebda
CW
2952/**
2953 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2954 * @vsi: pointer to a vsi
03147773 2955 * @vector: disable a particular Hw Interrupt vector
5c2cebda
CW
2956 **/
2957void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2958{
2959 struct i40e_pf *pf = vsi->back;
2960 struct i40e_hw *hw = &pf->hw;
2961 u32 val;
2962
2963 val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2964 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2965 i40e_flush(hw);
2966}
2967
41c445ff
JB
2968/**
2969 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2970 * @irq: interrupt number
2971 * @data: pointer to a q_vector
2972 **/
2973static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2974{
2975 struct i40e_q_vector *q_vector = data;
2976
cd0b6fa6 2977 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2978 return IRQ_HANDLED;
2979
2980 napi_schedule(&q_vector->napi);
2981
2982 return IRQ_HANDLED;
2983}
2984
41c445ff
JB
2985/**
2986 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2987 * @vsi: the VSI being configured
2988 * @basename: name for the vector
2989 *
2990 * Allocates MSI-X vectors and requests interrupts from the kernel.
2991 **/
2992static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2993{
2994 int q_vectors = vsi->num_q_vectors;
2995 struct i40e_pf *pf = vsi->back;
2996 int base = vsi->base_vector;
2997 int rx_int_idx = 0;
2998 int tx_int_idx = 0;
2999 int vector, err;
3000
3001 for (vector = 0; vector < q_vectors; vector++) {
493fb300 3002 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 3003
cd0b6fa6 3004 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
3005 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3006 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3007 tx_int_idx++;
cd0b6fa6 3008 } else if (q_vector->rx.ring) {
41c445ff
JB
3009 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3010 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 3011 } else if (q_vector->tx.ring) {
41c445ff
JB
3012 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3013 "%s-%s-%d", basename, "tx", tx_int_idx++);
3014 } else {
3015 /* skip this unused q_vector */
3016 continue;
3017 }
3018 err = request_irq(pf->msix_entries[base + vector].vector,
3019 vsi->irq_handler,
3020 0,
3021 q_vector->name,
3022 q_vector);
3023 if (err) {
3024 dev_info(&pf->pdev->dev,
3025 "%s: request_irq failed, error: %d\n",
3026 __func__, err);
3027 goto free_queue_irqs;
3028 }
3029 /* assign the mask for this irq */
3030 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3031 &q_vector->affinity_mask);
3032 }
3033
63741846 3034 vsi->irqs_ready = true;
41c445ff
JB
3035 return 0;
3036
3037free_queue_irqs:
3038 while (vector) {
3039 vector--;
3040 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3041 NULL);
3042 free_irq(pf->msix_entries[base + vector].vector,
3043 &(vsi->q_vectors[vector]));
3044 }
3045 return err;
3046}
3047
3048/**
3049 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3050 * @vsi: the VSI being un-configured
3051 **/
3052static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3053{
3054 struct i40e_pf *pf = vsi->back;
3055 struct i40e_hw *hw = &pf->hw;
3056 int base = vsi->base_vector;
3057 int i;
3058
3059 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
3060 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3061 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
3062 }
3063
3064 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3065 for (i = vsi->base_vector;
3066 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3067 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3068
3069 i40e_flush(hw);
3070 for (i = 0; i < vsi->num_q_vectors; i++)
3071 synchronize_irq(pf->msix_entries[i + base].vector);
3072 } else {
3073 /* Legacy and MSI mode - this stops all interrupt handling */
3074 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3075 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3076 i40e_flush(hw);
3077 synchronize_irq(pf->pdev->irq);
3078 }
3079}
3080
3081/**
3082 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3083 * @vsi: the VSI being configured
3084 **/
3085static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3086{
3087 struct i40e_pf *pf = vsi->back;
3088 int i;
3089
3090 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3091 for (i = vsi->base_vector;
3092 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3093 i40e_irq_dynamic_enable(vsi, i);
3094 } else {
3095 i40e_irq_dynamic_enable_icr0(pf);
3096 }
3097
1022cb6c 3098 i40e_flush(&pf->hw);
41c445ff
JB
3099 return 0;
3100}
3101
3102/**
3103 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3104 * @pf: board private structure
3105 **/
3106static void i40e_stop_misc_vector(struct i40e_pf *pf)
3107{
3108 /* Disable ICR 0 */
3109 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3110 i40e_flush(&pf->hw);
3111}
3112
3113/**
3114 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3115 * @irq: interrupt number
3116 * @data: pointer to a q_vector
3117 *
3118 * This is the handler used for all MSI/Legacy interrupts, and deals
3119 * with both queue and non-queue interrupts. This is also used in
3120 * MSIX mode to handle the non-queue interrupts.
3121 **/
3122static irqreturn_t i40e_intr(int irq, void *data)
3123{
3124 struct i40e_pf *pf = (struct i40e_pf *)data;
3125 struct i40e_hw *hw = &pf->hw;
5e823066 3126 irqreturn_t ret = IRQ_NONE;
41c445ff
JB
3127 u32 icr0, icr0_remaining;
3128 u32 val, ena_mask;
3129
3130 icr0 = rd32(hw, I40E_PFINT_ICR0);
5e823066 3131 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
41c445ff 3132
116a57d4
SN
3133 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3134 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
5e823066 3135 goto enable_intr;
41c445ff 3136
cd92e72f
SN
3137 /* if interrupt but no bits showing, must be SWINT */
3138 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3139 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3140 pf->sw_int_count++;
3141
41c445ff
JB
3142 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3143 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3144
3145 /* temporarily disable queue cause for NAPI processing */
3146 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3147 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3148 wr32(hw, I40E_QINT_RQCTL(0), qval);
3149
3150 qval = rd32(hw, I40E_QINT_TQCTL(0));
3151 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3152 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
3153
3154 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 3155 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
3156 }
3157
3158 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3159 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3160 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3161 }
3162
3163 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3164 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3165 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3166 }
3167
3168 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3169 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3170 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3171 }
3172
3173 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3174 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3175 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3176 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3177 val = rd32(hw, I40E_GLGEN_RSTAT);
3178 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3179 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4eb3f768 3180 if (val == I40E_RESET_CORER) {
41c445ff 3181 pf->corer_count++;
4eb3f768 3182 } else if (val == I40E_RESET_GLOBR) {
41c445ff 3183 pf->globr_count++;
4eb3f768 3184 } else if (val == I40E_RESET_EMPR) {
41c445ff 3185 pf->empr_count++;
4eb3f768
SN
3186 set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
3187 }
41c445ff
JB
3188 }
3189
9c010ee0
ASJ
3190 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3191 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3192 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3193 }
3194
beb0dff1
JK
3195 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3196 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3197
3198 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
cafa1fca 3199 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
beb0dff1 3200 i40e_ptp_tx_hwtstamp(pf);
beb0dff1 3201 }
beb0dff1
JK
3202 }
3203
41c445ff
JB
3204 /* If a critical error is pending we have no choice but to reset the
3205 * device.
3206 * Report and mask out any remaining unexpected interrupts.
3207 */
3208 icr0_remaining = icr0 & ena_mask;
3209 if (icr0_remaining) {
3210 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3211 icr0_remaining);
9c010ee0 3212 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff 3213 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
c0c28975 3214 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
9c010ee0
ASJ
3215 dev_info(&pf->pdev->dev, "device will be reset\n");
3216 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3217 i40e_service_event_schedule(pf);
41c445ff
JB
3218 }
3219 ena_mask &= ~icr0_remaining;
3220 }
5e823066 3221 ret = IRQ_HANDLED;
41c445ff 3222
5e823066 3223enable_intr:
41c445ff
JB
3224 /* re-enable interrupt causes */
3225 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
3226 if (!test_bit(__I40E_DOWN, &pf->state)) {
3227 i40e_service_event_schedule(pf);
3228 i40e_irq_dynamic_enable_icr0(pf);
3229 }
3230
5e823066 3231 return ret;
41c445ff
JB
3232}
3233
cbf61325
ASJ
3234/**
3235 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3236 * @tx_ring: tx ring to clean
3237 * @budget: how many cleans we're allowed
3238 *
3239 * Returns true if there's any budget left (e.g. the clean is finished)
3240 **/
3241static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3242{
3243 struct i40e_vsi *vsi = tx_ring->vsi;
3244 u16 i = tx_ring->next_to_clean;
3245 struct i40e_tx_buffer *tx_buf;
3246 struct i40e_tx_desc *tx_desc;
3247
3248 tx_buf = &tx_ring->tx_bi[i];
3249 tx_desc = I40E_TX_DESC(tx_ring, i);
3250 i -= tx_ring->count;
3251
3252 do {
3253 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3254
3255 /* if next_to_watch is not set then there is no work pending */
3256 if (!eop_desc)
3257 break;
3258
3259 /* prevent any other reads prior to eop_desc */
3260 read_barrier_depends();
3261
3262 /* if the descriptor isn't done, no work yet to do */
3263 if (!(eop_desc->cmd_type_offset_bsz &
3264 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3265 break;
3266
3267 /* clear next_to_watch to prevent false hangs */
3268 tx_buf->next_to_watch = NULL;
3269
49d7d933
ASJ
3270 tx_desc->buffer_addr = 0;
3271 tx_desc->cmd_type_offset_bsz = 0;
3272 /* move past filter desc */
3273 tx_buf++;
3274 tx_desc++;
3275 i++;
3276 if (unlikely(!i)) {
3277 i -= tx_ring->count;
3278 tx_buf = tx_ring->tx_bi;
3279 tx_desc = I40E_TX_DESC(tx_ring, 0);
3280 }
cbf61325
ASJ
3281 /* unmap skb header data */
3282 dma_unmap_single(tx_ring->dev,
3283 dma_unmap_addr(tx_buf, dma),
3284 dma_unmap_len(tx_buf, len),
3285 DMA_TO_DEVICE);
49d7d933
ASJ
3286 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3287 kfree(tx_buf->raw_buf);
cbf61325 3288
49d7d933
ASJ
3289 tx_buf->raw_buf = NULL;
3290 tx_buf->tx_flags = 0;
3291 tx_buf->next_to_watch = NULL;
cbf61325 3292 dma_unmap_len_set(tx_buf, len, 0);
49d7d933
ASJ
3293 tx_desc->buffer_addr = 0;
3294 tx_desc->cmd_type_offset_bsz = 0;
cbf61325 3295
49d7d933 3296 /* move us past the eop_desc for start of next FD desc */
cbf61325
ASJ
3297 tx_buf++;
3298 tx_desc++;
3299 i++;
3300 if (unlikely(!i)) {
3301 i -= tx_ring->count;
3302 tx_buf = tx_ring->tx_bi;
3303 tx_desc = I40E_TX_DESC(tx_ring, 0);
3304 }
3305
3306 /* update budget accounting */
3307 budget--;
3308 } while (likely(budget));
3309
3310 i += tx_ring->count;
3311 tx_ring->next_to_clean = i;
3312
3313 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3314 i40e_irq_dynamic_enable(vsi,
3315 tx_ring->q_vector->v_idx + vsi->base_vector);
3316 }
3317 return budget > 0;
3318}
3319
3320/**
3321 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3322 * @irq: interrupt number
3323 * @data: pointer to a q_vector
3324 **/
3325static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3326{
3327 struct i40e_q_vector *q_vector = data;
3328 struct i40e_vsi *vsi;
3329
3330 if (!q_vector->tx.ring)
3331 return IRQ_HANDLED;
3332
3333 vsi = q_vector->tx.ring->vsi;
3334 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3335
3336 return IRQ_HANDLED;
3337}
3338
41c445ff 3339/**
cd0b6fa6 3340 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
3341 * @vsi: the VSI being configured
3342 * @v_idx: vector index
cd0b6fa6 3343 * @qp_idx: queue pair index
41c445ff 3344 **/
cd0b6fa6 3345static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 3346{
493fb300 3347 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
3348 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3349 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
3350
3351 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
3352 tx_ring->next = q_vector->tx.ring;
3353 q_vector->tx.ring = tx_ring;
41c445ff 3354 q_vector->tx.count++;
cd0b6fa6
AD
3355
3356 rx_ring->q_vector = q_vector;
3357 rx_ring->next = q_vector->rx.ring;
3358 q_vector->rx.ring = rx_ring;
3359 q_vector->rx.count++;
41c445ff
JB
3360}
3361
3362/**
3363 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3364 * @vsi: the VSI being configured
3365 *
3366 * This function maps descriptor rings to the queue-specific vectors
3367 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3368 * one vector per queue pair, but on a constrained vector budget, we
3369 * group the queue pairs as "efficiently" as possible.
3370 **/
3371static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3372{
3373 int qp_remaining = vsi->num_queue_pairs;
3374 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 3375 int num_ringpairs;
41c445ff
JB
3376 int v_start = 0;
3377 int qp_idx = 0;
3378
3379 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3380 * group them so there are multiple queues per vector.
70114ec4
ASJ
3381 * It is also important to go through all the vectors available to be
3382 * sure that if we don't use all the vectors, that the remaining vectors
3383 * are cleared. This is especially important when decreasing the
3384 * number of queues in use.
41c445ff 3385 */
70114ec4 3386 for (; v_start < q_vectors; v_start++) {
cd0b6fa6
AD
3387 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3388
3389 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3390
3391 q_vector->num_ringpairs = num_ringpairs;
3392
3393 q_vector->rx.count = 0;
3394 q_vector->tx.count = 0;
3395 q_vector->rx.ring = NULL;
3396 q_vector->tx.ring = NULL;
3397
3398 while (num_ringpairs--) {
3399 map_vector_to_qp(vsi, v_start, qp_idx);
3400 qp_idx++;
3401 qp_remaining--;
41c445ff
JB
3402 }
3403 }
3404}
3405
3406/**
3407 * i40e_vsi_request_irq - Request IRQ from the OS
3408 * @vsi: the VSI being configured
3409 * @basename: name for the vector
3410 **/
3411static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3412{
3413 struct i40e_pf *pf = vsi->back;
3414 int err;
3415
3416 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3417 err = i40e_vsi_request_irq_msix(vsi, basename);
3418 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3419 err = request_irq(pf->pdev->irq, i40e_intr, 0,
b294ac70 3420 pf->int_name, pf);
41c445ff
JB
3421 else
3422 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
b294ac70 3423 pf->int_name, pf);
41c445ff
JB
3424
3425 if (err)
3426 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3427
3428 return err;
3429}
3430
3431#ifdef CONFIG_NET_POLL_CONTROLLER
3432/**
3433 * i40e_netpoll - A Polling 'interrupt'handler
3434 * @netdev: network interface device structure
3435 *
3436 * This is used by netconsole to send skbs without having to re-enable
3437 * interrupts. It's not called while the normal interrupt routine is executing.
3438 **/
38e00438
VD
3439#ifdef I40E_FCOE
3440void i40e_netpoll(struct net_device *netdev)
3441#else
41c445ff 3442static void i40e_netpoll(struct net_device *netdev)
38e00438 3443#endif
41c445ff
JB
3444{
3445 struct i40e_netdev_priv *np = netdev_priv(netdev);
3446 struct i40e_vsi *vsi = np->vsi;
3447 struct i40e_pf *pf = vsi->back;
3448 int i;
3449
3450 /* if interface is down do nothing */
3451 if (test_bit(__I40E_DOWN, &vsi->state))
3452 return;
3453
3454 pf->flags |= I40E_FLAG_IN_NETPOLL;
3455 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3456 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 3457 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
3458 } else {
3459 i40e_intr(pf->pdev->irq, netdev);
3460 }
3461 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3462}
3463#endif
3464
23527308
NP
3465/**
3466 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3467 * @pf: the PF being configured
3468 * @pf_q: the PF queue
3469 * @enable: enable or disable state of the queue
3470 *
3471 * This routine will wait for the given Tx queue of the PF to reach the
3472 * enabled or disabled state.
3473 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3474 * multiple retries; else will return 0 in case of success.
3475 **/
3476static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3477{
3478 int i;
3479 u32 tx_reg;
3480
3481 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3482 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3483 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3484 break;
3485
f98a2006 3486 usleep_range(10, 20);
23527308
NP
3487 }
3488 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3489 return -ETIMEDOUT;
3490
3491 return 0;
3492}
3493
41c445ff
JB
3494/**
3495 * i40e_vsi_control_tx - Start or stop a VSI's rings
3496 * @vsi: the VSI being configured
3497 * @enable: start or stop the rings
3498 **/
3499static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3500{
3501 struct i40e_pf *pf = vsi->back;
3502 struct i40e_hw *hw = &pf->hw;
23527308 3503 int i, j, pf_q, ret = 0;
41c445ff
JB
3504 u32 tx_reg;
3505
3506 pf_q = vsi->base_queue;
3507 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
351499ab
MJ
3508
3509 /* warn the TX unit of coming changes */
3510 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3511 if (!enable)
f98a2006 3512 usleep_range(10, 20);
351499ab 3513
6c5ef620 3514 for (j = 0; j < 50; j++) {
41c445ff 3515 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
6c5ef620
MW
3516 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3517 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3518 break;
3519 usleep_range(1000, 2000);
3520 }
fda972f6 3521 /* Skip if the queue is already in the requested state */
7c122007 3522 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
fda972f6 3523 continue;
41c445ff
JB
3524
3525 /* turn on/off the queue */
c5c9eb9e
SN
3526 if (enable) {
3527 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
6c5ef620 3528 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3529 } else {
41c445ff 3530 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
c5c9eb9e 3531 }
41c445ff
JB
3532
3533 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
69129dc3
NP
3534 /* No waiting for the Tx queue to disable */
3535 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3536 continue;
41c445ff
JB
3537
3538 /* wait for the change to finish */
23527308
NP
3539 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3540 if (ret) {
3541 dev_info(&pf->pdev->dev,
3542 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3543 __func__, vsi->seid, pf_q,
3544 (enable ? "en" : "dis"));
3545 break;
41c445ff
JB
3546 }
3547 }
3548
7134f9ce
JB
3549 if (hw->revision_id == 0)
3550 mdelay(50);
23527308
NP
3551 return ret;
3552}
3553
3554/**
3555 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3556 * @pf: the PF being configured
3557 * @pf_q: the PF queue
3558 * @enable: enable or disable state of the queue
3559 *
3560 * This routine will wait for the given Rx queue of the PF to reach the
3561 * enabled or disabled state.
3562 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3563 * multiple retries; else will return 0 in case of success.
3564 **/
3565static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3566{
3567 int i;
3568 u32 rx_reg;
3569
3570 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3571 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3572 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3573 break;
3574
f98a2006 3575 usleep_range(10, 20);
23527308
NP
3576 }
3577 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3578 return -ETIMEDOUT;
7134f9ce 3579
41c445ff
JB
3580 return 0;
3581}
3582
3583/**
3584 * i40e_vsi_control_rx - Start or stop a VSI's rings
3585 * @vsi: the VSI being configured
3586 * @enable: start or stop the rings
3587 **/
3588static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3589{
3590 struct i40e_pf *pf = vsi->back;
3591 struct i40e_hw *hw = &pf->hw;
23527308 3592 int i, j, pf_q, ret = 0;
41c445ff
JB
3593 u32 rx_reg;
3594
3595 pf_q = vsi->base_queue;
3596 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
6c5ef620 3597 for (j = 0; j < 50; j++) {
41c445ff 3598 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
6c5ef620
MW
3599 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3600 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3601 break;
3602 usleep_range(1000, 2000);
3603 }
41c445ff 3604
7c122007
CS
3605 /* Skip if the queue is already in the requested state */
3606 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3607 continue;
41c445ff
JB
3608
3609 /* turn on/off the queue */
3610 if (enable)
6c5ef620 3611 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff 3612 else
6c5ef620 3613 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
41c445ff
JB
3614 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3615
3616 /* wait for the change to finish */
23527308
NP
3617 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3618 if (ret) {
3619 dev_info(&pf->pdev->dev,
3620 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3621 __func__, vsi->seid, pf_q,
3622 (enable ? "en" : "dis"));
3623 break;
41c445ff
JB
3624 }
3625 }
3626
23527308 3627 return ret;
41c445ff
JB
3628}
3629
3630/**
3631 * i40e_vsi_control_rings - Start or stop a VSI's rings
3632 * @vsi: the VSI being configured
3633 * @enable: start or stop the rings
3634 **/
fc18eaa0 3635int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
41c445ff 3636{
3b867b28 3637 int ret = 0;
41c445ff
JB
3638
3639 /* do rx first for enable and last for disable */
3640 if (request) {
3641 ret = i40e_vsi_control_rx(vsi, request);
3642 if (ret)
3643 return ret;
3644 ret = i40e_vsi_control_tx(vsi, request);
3645 } else {
3b867b28
ASJ
3646 /* Ignore return value, we need to shutdown whatever we can */
3647 i40e_vsi_control_tx(vsi, request);
3648 i40e_vsi_control_rx(vsi, request);
41c445ff
JB
3649 }
3650
3651 return ret;
3652}
3653
3654/**
3655 * i40e_vsi_free_irq - Free the irq association with the OS
3656 * @vsi: the VSI being configured
3657 **/
3658static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3659{
3660 struct i40e_pf *pf = vsi->back;
3661 struct i40e_hw *hw = &pf->hw;
3662 int base = vsi->base_vector;
3663 u32 val, qp;
3664 int i;
3665
3666 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3667 if (!vsi->q_vectors)
3668 return;
3669
63741846
SN
3670 if (!vsi->irqs_ready)
3671 return;
3672
3673 vsi->irqs_ready = false;
41c445ff
JB
3674 for (i = 0; i < vsi->num_q_vectors; i++) {
3675 u16 vector = i + base;
3676
3677 /* free only the irqs that were actually requested */
78681b1f
SN
3678 if (!vsi->q_vectors[i] ||
3679 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
3680 continue;
3681
3682 /* clear the affinity_mask in the IRQ descriptor */
3683 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3684 NULL);
3685 free_irq(pf->msix_entries[vector].vector,
493fb300 3686 vsi->q_vectors[i]);
41c445ff
JB
3687
3688 /* Tear down the interrupt queue link list
3689 *
3690 * We know that they come in pairs and always
3691 * the Rx first, then the Tx. To clear the
3692 * link list, stick the EOL value into the
3693 * next_q field of the registers.
3694 */
3695 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3696 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3697 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3698 val |= I40E_QUEUE_END_OF_LIST
3699 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3700 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3701
3702 while (qp != I40E_QUEUE_END_OF_LIST) {
3703 u32 next;
3704
3705 val = rd32(hw, I40E_QINT_RQCTL(qp));
3706
3707 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3708 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3709 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3710 I40E_QINT_RQCTL_INTEVENT_MASK);
3711
3712 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3713 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3714
3715 wr32(hw, I40E_QINT_RQCTL(qp), val);
3716
3717 val = rd32(hw, I40E_QINT_TQCTL(qp));
3718
3719 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3720 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3721
3722 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3723 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3724 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3725 I40E_QINT_TQCTL_INTEVENT_MASK);
3726
3727 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3728 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3729
3730 wr32(hw, I40E_QINT_TQCTL(qp), val);
3731 qp = next;
3732 }
3733 }
3734 } else {
3735 free_irq(pf->pdev->irq, pf);
3736
3737 val = rd32(hw, I40E_PFINT_LNKLST0);
3738 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3739 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3740 val |= I40E_QUEUE_END_OF_LIST
3741 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3742 wr32(hw, I40E_PFINT_LNKLST0, val);
3743
3744 val = rd32(hw, I40E_QINT_RQCTL(qp));
3745 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3746 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3747 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3748 I40E_QINT_RQCTL_INTEVENT_MASK);
3749
3750 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3751 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3752
3753 wr32(hw, I40E_QINT_RQCTL(qp), val);
3754
3755 val = rd32(hw, I40E_QINT_TQCTL(qp));
3756
3757 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3758 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3759 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3760 I40E_QINT_TQCTL_INTEVENT_MASK);
3761
3762 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3763 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3764
3765 wr32(hw, I40E_QINT_TQCTL(qp), val);
3766 }
3767}
3768
493fb300
AD
3769/**
3770 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3771 * @vsi: the VSI being configured
3772 * @v_idx: Index of vector to be freed
3773 *
3774 * This function frees the memory allocated to the q_vector. In addition if
3775 * NAPI is enabled it will delete any references to the NAPI struct prior
3776 * to freeing the q_vector.
3777 **/
3778static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3779{
3780 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3781 struct i40e_ring *ring;
493fb300
AD
3782
3783 if (!q_vector)
3784 return;
3785
3786 /* disassociate q_vector from rings */
cd0b6fa6
AD
3787 i40e_for_each_ring(ring, q_vector->tx)
3788 ring->q_vector = NULL;
3789
3790 i40e_for_each_ring(ring, q_vector->rx)
3791 ring->q_vector = NULL;
493fb300
AD
3792
3793 /* only VSI w/ an associated netdev is set up w/ NAPI */
3794 if (vsi->netdev)
3795 netif_napi_del(&q_vector->napi);
3796
3797 vsi->q_vectors[v_idx] = NULL;
3798
3799 kfree_rcu(q_vector, rcu);
3800}
3801
41c445ff
JB
3802/**
3803 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3804 * @vsi: the VSI being un-configured
3805 *
3806 * This frees the memory allocated to the q_vectors and
3807 * deletes references to the NAPI struct.
3808 **/
3809static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3810{
3811 int v_idx;
3812
493fb300
AD
3813 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3814 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3815}
3816
3817/**
3818 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3819 * @pf: board private structure
3820 **/
3821static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3822{
3823 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3824 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3825 pci_disable_msix(pf->pdev);
3826 kfree(pf->msix_entries);
3827 pf->msix_entries = NULL;
3828 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3829 pci_disable_msi(pf->pdev);
3830 }
3831 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3832}
3833
3834/**
3835 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3836 * @pf: board private structure
3837 *
3838 * We go through and clear interrupt specific resources and reset the structure
3839 * to pre-load conditions
3840 **/
3841static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3842{
3843 int i;
3844
e147758d
SN
3845 i40e_stop_misc_vector(pf);
3846 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3847 synchronize_irq(pf->msix_entries[0].vector);
3848 free_irq(pf->msix_entries[0].vector, pf);
3849 }
3850
41c445ff 3851 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
505682cd 3852 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
3853 if (pf->vsi[i])
3854 i40e_vsi_free_q_vectors(pf->vsi[i]);
3855 i40e_reset_interrupt_capability(pf);
3856}
3857
3858/**
3859 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3860 * @vsi: the VSI being configured
3861 **/
3862static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3863{
3864 int q_idx;
3865
3866 if (!vsi->netdev)
3867 return;
3868
3869 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3870 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3871}
3872
3873/**
3874 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3875 * @vsi: the VSI being configured
3876 **/
3877static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3878{
3879 int q_idx;
3880
3881 if (!vsi->netdev)
3882 return;
3883
3884 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3885 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3886}
3887
90ef8d47
SN
3888/**
3889 * i40e_vsi_close - Shut down a VSI
3890 * @vsi: the vsi to be quelled
3891 **/
3892static void i40e_vsi_close(struct i40e_vsi *vsi)
3893{
3894 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3895 i40e_down(vsi);
3896 i40e_vsi_free_irq(vsi);
3897 i40e_vsi_free_tx_resources(vsi);
3898 i40e_vsi_free_rx_resources(vsi);
3899}
3900
41c445ff
JB
3901/**
3902 * i40e_quiesce_vsi - Pause a given VSI
3903 * @vsi: the VSI being paused
3904 **/
3905static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3906{
3907 if (test_bit(__I40E_DOWN, &vsi->state))
3908 return;
3909
d341b7a5
NP
3910 /* No need to disable FCoE VSI when Tx suspended */
3911 if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3912 vsi->type == I40E_VSI_FCOE) {
3913 dev_dbg(&vsi->back->pdev->dev,
3914 "%s: VSI seid %d skipping FCoE VSI disable\n",
3915 __func__, vsi->seid);
3916 return;
3917 }
3918
41c445ff
JB
3919 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3920 if (vsi->netdev && netif_running(vsi->netdev)) {
3921 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3922 } else {
90ef8d47 3923 i40e_vsi_close(vsi);
41c445ff
JB
3924 }
3925}
3926
3927/**
3928 * i40e_unquiesce_vsi - Resume a given VSI
3929 * @vsi: the VSI being resumed
3930 **/
3931static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3932{
3933 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3934 return;
3935
3936 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3937 if (vsi->netdev && netif_running(vsi->netdev))
3938 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3939 else
8276f757 3940 i40e_vsi_open(vsi); /* this clears the DOWN bit */
41c445ff
JB
3941}
3942
3943/**
3944 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3945 * @pf: the PF
3946 **/
3947static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3948{
3949 int v;
3950
505682cd 3951 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
3952 if (pf->vsi[v])
3953 i40e_quiesce_vsi(pf->vsi[v]);
3954 }
3955}
3956
3957/**
3958 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3959 * @pf: the PF
3960 **/
3961static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3962{
3963 int v;
3964
505682cd 3965 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
3966 if (pf->vsi[v])
3967 i40e_unquiesce_vsi(pf->vsi[v]);
3968 }
3969}
3970
69129dc3
NP
3971#ifdef CONFIG_I40E_DCB
3972/**
3973 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
3974 * @vsi: the VSI being configured
3975 *
3976 * This function waits for the given VSI's Tx queues to be disabled.
3977 **/
3978static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
3979{
3980 struct i40e_pf *pf = vsi->back;
3981 int i, pf_q, ret;
3982
3983 pf_q = vsi->base_queue;
3984 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3985 /* Check and wait for the disable status of the queue */
3986 ret = i40e_pf_txq_wait(pf, pf_q, false);
3987 if (ret) {
3988 dev_info(&pf->pdev->dev,
3989 "%s: VSI seid %d Tx ring %d disable timeout\n",
3990 __func__, vsi->seid, pf_q);
3991 return ret;
3992 }
3993 }
3994
3995 return 0;
3996}
3997
3998/**
3999 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4000 * @pf: the PF
4001 *
4002 * This function waits for the Tx queues to be in disabled state for all the
4003 * VSIs that are managed by this PF.
4004 **/
4005static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4006{
4007 int v, ret = 0;
4008
4009 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
d341b7a5
NP
4010 /* No need to wait for FCoE VSI queues */
4011 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
69129dc3
NP
4012 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4013 if (ret)
4014 break;
4015 }
4016 }
4017
4018 return ret;
4019}
4020
4021#endif
63d7e5a4
NP
4022/**
4023 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4024 * @pf: pointer to pf
4025 *
4026 * Get TC map for ISCSI PF type that will include iSCSI TC
4027 * and LAN TC.
4028 **/
4029static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4030{
4031 struct i40e_dcb_app_priority_table app;
4032 struct i40e_hw *hw = &pf->hw;
4033 u8 enabled_tc = 1; /* TC0 is always enabled */
4034 u8 tc, i;
4035 /* Get the iSCSI APP TLV */
4036 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4037
4038 for (i = 0; i < dcbcfg->numapps; i++) {
4039 app = dcbcfg->app[i];
4040 if (app.selector == I40E_APP_SEL_TCPIP &&
4041 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4042 tc = dcbcfg->etscfg.prioritytable[app.priority];
4043 enabled_tc |= (1 << tc);
4044 break;
4045 }
4046 }
4047
4048 return enabled_tc;
4049}
4050
41c445ff
JB
4051/**
4052 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4053 * @dcbcfg: the corresponding DCBx configuration structure
4054 *
4055 * Return the number of TCs from given DCBx configuration
4056 **/
4057static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4058{
078b5876
JB
4059 u8 num_tc = 0;
4060 int i;
41c445ff
JB
4061
4062 /* Scan the ETS Config Priority Table to find
4063 * traffic class enabled for a given priority
4064 * and use the traffic class index to get the
4065 * number of traffic classes enabled
4066 */
4067 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4068 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4069 num_tc = dcbcfg->etscfg.prioritytable[i];
4070 }
4071
4072 /* Traffic class index starts from zero so
4073 * increment to return the actual count
4074 */
078b5876 4075 return num_tc + 1;
41c445ff
JB
4076}
4077
4078/**
4079 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4080 * @dcbcfg: the corresponding DCBx configuration structure
4081 *
4082 * Query the current DCB configuration and return the number of
4083 * traffic classes enabled from the given DCBX config
4084 **/
4085static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4086{
4087 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4088 u8 enabled_tc = 1;
4089 u8 i;
4090
4091 for (i = 0; i < num_tc; i++)
4092 enabled_tc |= 1 << i;
4093
4094 return enabled_tc;
4095}
4096
4097/**
4098 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4099 * @pf: PF being queried
4100 *
4101 * Return number of traffic classes enabled for the given PF
4102 **/
4103static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4104{
4105 struct i40e_hw *hw = &pf->hw;
4106 u8 i, enabled_tc;
4107 u8 num_tc = 0;
4108 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4109
4110 /* If DCB is not enabled then always in single TC */
4111 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4112 return 1;
4113
63d7e5a4
NP
4114 /* SFP mode will be enabled for all TCs on port */
4115 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4116 return i40e_dcb_get_num_tc(dcbcfg);
4117
41c445ff 4118 /* MFP mode return count of enabled TCs for this PF */
63d7e5a4
NP
4119 if (pf->hw.func_caps.iscsi)
4120 enabled_tc = i40e_get_iscsi_tc_map(pf);
4121 else
41c445ff 4122 enabled_tc = pf->hw.func_caps.enabled_tcmap;
41c445ff 4123
63d7e5a4
NP
4124 /* At least have TC0 */
4125 enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4126 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4127 if (enabled_tc & (1 << i))
4128 num_tc++;
4129 }
4130 return num_tc;
41c445ff
JB
4131}
4132
4133/**
4134 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4135 * @pf: PF being queried
4136 *
4137 * Return a bitmap for first enabled traffic class for this PF.
4138 **/
4139static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4140{
4141 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4142 u8 i = 0;
4143
4144 if (!enabled_tc)
4145 return 0x1; /* TC0 */
4146
4147 /* Find the first enabled TC */
4148 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4149 if (enabled_tc & (1 << i))
4150 break;
4151 }
4152
4153 return 1 << i;
4154}
4155
4156/**
4157 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4158 * @pf: PF being queried
4159 *
4160 * Return a bitmap for enabled traffic classes for this PF.
4161 **/
4162static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4163{
4164 /* If DCB is not enabled for this PF then just return default TC */
4165 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4166 return i40e_pf_get_default_tc(pf);
4167
41c445ff 4168 /* SFP mode we want PF to be enabled for all TCs */
63d7e5a4
NP
4169 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4170 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4171
4172 /* MPF enabled and iSCSI PF type */
4173 if (pf->hw.func_caps.iscsi)
4174 return i40e_get_iscsi_tc_map(pf);
4175 else
4176 return pf->hw.func_caps.enabled_tcmap;
41c445ff
JB
4177}
4178
4179/**
4180 * i40e_vsi_get_bw_info - Query VSI BW Information
4181 * @vsi: the VSI being queried
4182 *
4183 * Returns 0 on success, negative value on failure
4184 **/
4185static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4186{
4187 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4188 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4189 struct i40e_pf *pf = vsi->back;
4190 struct i40e_hw *hw = &pf->hw;
dcae29be 4191 i40e_status aq_ret;
41c445ff 4192 u32 tc_bw_max;
41c445ff
JB
4193 int i;
4194
4195 /* Get the VSI level BW configuration */
dcae29be
JB
4196 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4197 if (aq_ret) {
41c445ff
JB
4198 dev_info(&pf->pdev->dev,
4199 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
4200 aq_ret, pf->hw.aq.asq_last_status);
4201 return -EINVAL;
41c445ff
JB
4202 }
4203
4204 /* Get the VSI level BW configuration per TC */
dcae29be 4205 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
6838b535 4206 NULL);
dcae29be 4207 if (aq_ret) {
41c445ff
JB
4208 dev_info(&pf->pdev->dev,
4209 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
4210 aq_ret, pf->hw.aq.asq_last_status);
4211 return -EINVAL;
41c445ff
JB
4212 }
4213
4214 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4215 dev_info(&pf->pdev->dev,
4216 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4217 bw_config.tc_valid_bits,
4218 bw_ets_config.tc_valid_bits);
4219 /* Still continuing */
4220 }
4221
4222 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4223 vsi->bw_max_quanta = bw_config.max_bw;
4224 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4225 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4226 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4227 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4228 vsi->bw_ets_limit_credits[i] =
4229 le16_to_cpu(bw_ets_config.credits[i]);
4230 /* 3 bits out of 4 for each TC */
4231 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4232 }
078b5876 4233
dcae29be 4234 return 0;
41c445ff
JB
4235}
4236
4237/**
4238 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4239 * @vsi: the VSI being configured
4240 * @enabled_tc: TC bitmap
4241 * @bw_credits: BW shared credits per TC
4242 *
4243 * Returns 0 on success, negative value on failure
4244 **/
dcae29be 4245static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
4246 u8 *bw_share)
4247{
4248 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
4249 i40e_status aq_ret;
4250 int i;
41c445ff
JB
4251
4252 bw_data.tc_valid_bits = enabled_tc;
4253 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4254 bw_data.tc_bw_credits[i] = bw_share[i];
4255
dcae29be
JB
4256 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4257 NULL);
4258 if (aq_ret) {
41c445ff 4259 dev_info(&vsi->back->pdev->dev,
69bfb110
JB
4260 "AQ command Config VSI BW allocation per TC failed = %d\n",
4261 vsi->back->hw.aq.asq_last_status);
dcae29be 4262 return -EINVAL;
41c445ff
JB
4263 }
4264
4265 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4266 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4267
dcae29be 4268 return 0;
41c445ff
JB
4269}
4270
4271/**
4272 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4273 * @vsi: the VSI being configured
4274 * @enabled_tc: TC map to be enabled
4275 *
4276 **/
4277static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4278{
4279 struct net_device *netdev = vsi->netdev;
4280 struct i40e_pf *pf = vsi->back;
4281 struct i40e_hw *hw = &pf->hw;
4282 u8 netdev_tc = 0;
4283 int i;
4284 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4285
4286 if (!netdev)
4287 return;
4288
4289 if (!enabled_tc) {
4290 netdev_reset_tc(netdev);
4291 return;
4292 }
4293
4294 /* Set up actual enabled TCs on the VSI */
4295 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4296 return;
4297
4298 /* set per TC queues for the VSI */
4299 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4300 /* Only set TC queues for enabled tcs
4301 *
4302 * e.g. For a VSI that has TC0 and TC3 enabled the
4303 * enabled_tc bitmap would be 0x00001001; the driver
4304 * will set the numtc for netdev as 2 that will be
4305 * referenced by the netdev layer as TC 0 and 1.
4306 */
4307 if (vsi->tc_config.enabled_tc & (1 << i))
4308 netdev_set_tc_queue(netdev,
4309 vsi->tc_config.tc_info[i].netdev_tc,
4310 vsi->tc_config.tc_info[i].qcount,
4311 vsi->tc_config.tc_info[i].qoffset);
4312 }
4313
4314 /* Assign UP2TC map for the VSI */
4315 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4316 /* Get the actual TC# for the UP */
4317 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4318 /* Get the mapped netdev TC# for the UP */
4319 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
4320 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4321 }
4322}
4323
4324/**
4325 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4326 * @vsi: the VSI being configured
4327 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4328 **/
4329static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4330 struct i40e_vsi_context *ctxt)
4331{
4332 /* copy just the sections touched not the entire info
4333 * since not all sections are valid as returned by
4334 * update vsi params
4335 */
4336 vsi->info.mapping_flags = ctxt->info.mapping_flags;
4337 memcpy(&vsi->info.queue_mapping,
4338 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4339 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4340 sizeof(vsi->info.tc_mapping));
4341}
4342
4343/**
4344 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4345 * @vsi: VSI to be configured
4346 * @enabled_tc: TC bitmap
4347 *
4348 * This configures a particular VSI for TCs that are mapped to the
4349 * given TC bitmap. It uses default bandwidth share for TCs across
4350 * VSIs to configure TC for a particular VSI.
4351 *
4352 * NOTE:
4353 * It is expected that the VSI queues have been quisced before calling
4354 * this function.
4355 **/
4356static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4357{
4358 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4359 struct i40e_vsi_context ctxt;
4360 int ret = 0;
4361 int i;
4362
4363 /* Check if enabled_tc is same as existing or new TCs */
4364 if (vsi->tc_config.enabled_tc == enabled_tc)
4365 return ret;
4366
4367 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4368 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4369 if (enabled_tc & (1 << i))
4370 bw_share[i] = 1;
4371 }
4372
4373 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4374 if (ret) {
4375 dev_info(&vsi->back->pdev->dev,
4376 "Failed configuring TC map %d for VSI %d\n",
4377 enabled_tc, vsi->seid);
4378 goto out;
4379 }
4380
4381 /* Update Queue Pairs Mapping for currently enabled UPs */
4382 ctxt.seid = vsi->seid;
4383 ctxt.pf_num = vsi->back->hw.pf_id;
4384 ctxt.vf_num = 0;
4385 ctxt.uplink_seid = vsi->uplink_seid;
4386 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4387 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4388
4389 /* Update the VSI after updating the VSI queue-mapping information */
4390 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4391 if (ret) {
4392 dev_info(&vsi->back->pdev->dev,
4393 "update vsi failed, aq_err=%d\n",
4394 vsi->back->hw.aq.asq_last_status);
4395 goto out;
4396 }
4397 /* update the local VSI info with updated queue map */
4398 i40e_vsi_update_queue_map(vsi, &ctxt);
4399 vsi->info.valid_sections = 0;
4400
4401 /* Update current VSI BW information */
4402 ret = i40e_vsi_get_bw_info(vsi);
4403 if (ret) {
4404 dev_info(&vsi->back->pdev->dev,
4405 "Failed updating vsi bw info, aq_err=%d\n",
4406 vsi->back->hw.aq.asq_last_status);
4407 goto out;
4408 }
4409
4410 /* Update the netdev TC setup */
4411 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4412out:
4413 return ret;
4414}
4415
4e3b35b0
NP
4416/**
4417 * i40e_veb_config_tc - Configure TCs for given VEB
4418 * @veb: given VEB
4419 * @enabled_tc: TC bitmap
4420 *
4421 * Configures given TC bitmap for VEB (switching) element
4422 **/
4423int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4424{
4425 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4426 struct i40e_pf *pf = veb->pf;
4427 int ret = 0;
4428 int i;
4429
4430 /* No TCs or already enabled TCs just return */
4431 if (!enabled_tc || veb->enabled_tc == enabled_tc)
4432 return ret;
4433
4434 bw_data.tc_valid_bits = enabled_tc;
4435 /* bw_data.absolute_credits is not set (relative) */
4436
4437 /* Enable ETS TCs with equal BW Share for now */
4438 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4439 if (enabled_tc & (1 << i))
4440 bw_data.tc_bw_share_credits[i] = 1;
4441 }
4442
4443 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4444 &bw_data, NULL);
4445 if (ret) {
4446 dev_info(&pf->pdev->dev,
4447 "veb bw config failed, aq_err=%d\n",
4448 pf->hw.aq.asq_last_status);
4449 goto out;
4450 }
4451
4452 /* Update the BW information */
4453 ret = i40e_veb_get_bw_info(veb);
4454 if (ret) {
4455 dev_info(&pf->pdev->dev,
4456 "Failed getting veb bw config, aq_err=%d\n",
4457 pf->hw.aq.asq_last_status);
4458 }
4459
4460out:
4461 return ret;
4462}
4463
4464#ifdef CONFIG_I40E_DCB
4465/**
4466 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4467 * @pf: PF struct
4468 *
4469 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4470 * the caller would've quiesce all the VSIs before calling
4471 * this function
4472 **/
4473static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4474{
4475 u8 tc_map = 0;
4476 int ret;
4477 u8 v;
4478
4479 /* Enable the TCs available on PF to all VEBs */
4480 tc_map = i40e_pf_get_tc_map(pf);
4481 for (v = 0; v < I40E_MAX_VEB; v++) {
4482 if (!pf->veb[v])
4483 continue;
4484 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4485 if (ret) {
4486 dev_info(&pf->pdev->dev,
4487 "Failed configuring TC for VEB seid=%d\n",
4488 pf->veb[v]->seid);
4489 /* Will try to configure as many components */
4490 }
4491 }
4492
4493 /* Update each VSI */
505682cd 4494 for (v = 0; v < pf->num_alloc_vsi; v++) {
4e3b35b0
NP
4495 if (!pf->vsi[v])
4496 continue;
4497
4498 /* - Enable all TCs for the LAN VSI
38e00438
VD
4499#ifdef I40E_FCOE
4500 * - For FCoE VSI only enable the TC configured
4501 * as per the APP TLV
4502#endif
4e3b35b0
NP
4503 * - For all others keep them at TC0 for now
4504 */
4505 if (v == pf->lan_vsi)
4506 tc_map = i40e_pf_get_tc_map(pf);
4507 else
4508 tc_map = i40e_pf_get_default_tc(pf);
38e00438
VD
4509#ifdef I40E_FCOE
4510 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4511 tc_map = i40e_get_fcoe_tc_map(pf);
4512#endif /* #ifdef I40E_FCOE */
4e3b35b0
NP
4513
4514 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4515 if (ret) {
4516 dev_info(&pf->pdev->dev,
4517 "Failed configuring TC for VSI seid=%d\n",
4518 pf->vsi[v]->seid);
4519 /* Will try to configure as many components */
4520 } else {
0672a091
NP
4521 /* Re-configure VSI vectors based on updated TC map */
4522 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4e3b35b0
NP
4523 if (pf->vsi[v]->netdev)
4524 i40e_dcbnl_set_all(pf->vsi[v]);
4525 }
4526 }
4527}
4528
2fd75f31
NP
4529/**
4530 * i40e_resume_port_tx - Resume port Tx
4531 * @pf: PF struct
4532 *
4533 * Resume a port's Tx and issue a PF reset in case of failure to
4534 * resume.
4535 **/
4536static int i40e_resume_port_tx(struct i40e_pf *pf)
4537{
4538 struct i40e_hw *hw = &pf->hw;
4539 int ret;
4540
4541 ret = i40e_aq_resume_port_tx(hw, NULL);
4542 if (ret) {
4543 dev_info(&pf->pdev->dev,
4544 "AQ command Resume Port Tx failed = %d\n",
4545 pf->hw.aq.asq_last_status);
4546 /* Schedule PF reset to recover */
4547 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4548 i40e_service_event_schedule(pf);
4549 }
4550
4551 return ret;
4552}
4553
4e3b35b0
NP
4554/**
4555 * i40e_init_pf_dcb - Initialize DCB configuration
4556 * @pf: PF being configured
4557 *
4558 * Query the current DCB configuration and cache it
4559 * in the hardware structure
4560 **/
4561static int i40e_init_pf_dcb(struct i40e_pf *pf)
4562{
4563 struct i40e_hw *hw = &pf->hw;
4564 int err = 0;
4565
4e3b35b0
NP
4566 /* Get the initial DCB configuration */
4567 err = i40e_init_dcb(hw);
4568 if (!err) {
4569 /* Device/Function is not DCBX capable */
4570 if ((!hw->func_caps.dcb) ||
4571 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4572 dev_info(&pf->pdev->dev,
4573 "DCBX offload is not supported or is disabled for this PF.\n");
4574
4575 if (pf->flags & I40E_FLAG_MFP_ENABLED)
4576 goto out;
4577
4578 } else {
4579 /* When status is not DISABLED then DCBX in FW */
4580 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4581 DCB_CAP_DCBX_VER_IEEE;
4d9b6043
NP
4582
4583 pf->flags |= I40E_FLAG_DCB_CAPABLE;
4584 /* Enable DCB tagging only when more than one TC */
4585 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4586 pf->flags |= I40E_FLAG_DCB_ENABLED;
9fa61dd2
NP
4587 dev_dbg(&pf->pdev->dev,
4588 "DCBX offload is supported for this PF.\n");
4e3b35b0 4589 }
014269ff 4590 } else {
aebfc816
SN
4591 dev_info(&pf->pdev->dev,
4592 "AQ Querying DCB configuration failed: aq_err %d\n",
014269ff 4593 pf->hw.aq.asq_last_status);
4e3b35b0
NP
4594 }
4595
4596out:
4597 return err;
4598}
4599#endif /* CONFIG_I40E_DCB */
cf05ed08
JB
4600#define SPEED_SIZE 14
4601#define FC_SIZE 8
4602/**
4603 * i40e_print_link_message - print link up or down
4604 * @vsi: the VSI for which link needs a message
4605 */
4606static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4607{
4608 char speed[SPEED_SIZE] = "Unknown";
4609 char fc[FC_SIZE] = "RX/TX";
4610
4611 if (!isup) {
4612 netdev_info(vsi->netdev, "NIC Link is Down\n");
4613 return;
4614 }
4615
148c2d80
GR
4616 /* Warn user if link speed on NPAR enabled partition is not at
4617 * least 10GB
4618 */
4619 if (vsi->back->hw.func_caps.npar_enable &&
4620 (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4621 vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4622 netdev_warn(vsi->netdev,
4623 "The partition detected link speed that is less than 10Gbps\n");
4624
cf05ed08
JB
4625 switch (vsi->back->hw.phy.link_info.link_speed) {
4626 case I40E_LINK_SPEED_40GB:
35a7d804 4627 strlcpy(speed, "40 Gbps", SPEED_SIZE);
cf05ed08
JB
4628 break;
4629 case I40E_LINK_SPEED_10GB:
35a7d804 4630 strlcpy(speed, "10 Gbps", SPEED_SIZE);
cf05ed08
JB
4631 break;
4632 case I40E_LINK_SPEED_1GB:
35a7d804 4633 strlcpy(speed, "1000 Mbps", SPEED_SIZE);
cf05ed08 4634 break;
5960d33f
MW
4635 case I40E_LINK_SPEED_100MB:
4636 strncpy(speed, "100 Mbps", SPEED_SIZE);
4637 break;
cf05ed08
JB
4638 default:
4639 break;
4640 }
4641
4642 switch (vsi->back->hw.fc.current_mode) {
4643 case I40E_FC_FULL:
35a7d804 4644 strlcpy(fc, "RX/TX", FC_SIZE);
cf05ed08
JB
4645 break;
4646 case I40E_FC_TX_PAUSE:
35a7d804 4647 strlcpy(fc, "TX", FC_SIZE);
cf05ed08
JB
4648 break;
4649 case I40E_FC_RX_PAUSE:
35a7d804 4650 strlcpy(fc, "RX", FC_SIZE);
cf05ed08
JB
4651 break;
4652 default:
35a7d804 4653 strlcpy(fc, "None", FC_SIZE);
cf05ed08
JB
4654 break;
4655 }
4656
4657 netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
4658 speed, fc);
4659}
4e3b35b0 4660
41c445ff
JB
4661/**
4662 * i40e_up_complete - Finish the last steps of bringing up a connection
4663 * @vsi: the VSI being configured
4664 **/
4665static int i40e_up_complete(struct i40e_vsi *vsi)
4666{
4667 struct i40e_pf *pf = vsi->back;
4668 int err;
4669
4670 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4671 i40e_vsi_configure_msix(vsi);
4672 else
4673 i40e_configure_msi_and_legacy(vsi);
4674
4675 /* start rings */
4676 err = i40e_vsi_control_rings(vsi, true);
4677 if (err)
4678 return err;
4679
4680 clear_bit(__I40E_DOWN, &vsi->state);
4681 i40e_napi_enable_all(vsi);
4682 i40e_vsi_enable_irq(vsi);
4683
4684 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4685 (vsi->netdev)) {
cf05ed08 4686 i40e_print_link_message(vsi, true);
41c445ff
JB
4687 netif_tx_start_all_queues(vsi->netdev);
4688 netif_carrier_on(vsi->netdev);
6d779b41 4689 } else if (vsi->netdev) {
cf05ed08 4690 i40e_print_link_message(vsi, false);
7b592f61
CW
4691 /* need to check for qualified module here*/
4692 if ((pf->hw.phy.link_info.link_info &
4693 I40E_AQ_MEDIA_AVAILABLE) &&
4694 (!(pf->hw.phy.link_info.an_info &
4695 I40E_AQ_QUALIFIED_MODULE)))
4696 netdev_err(vsi->netdev,
4697 "the driver failed to link because an unqualified module was detected.");
41c445ff 4698 }
ca64fa4e
ASJ
4699
4700 /* replay FDIR SB filters */
1e1be8f6
ASJ
4701 if (vsi->type == I40E_VSI_FDIR) {
4702 /* reset fd counters */
4703 pf->fd_add_err = pf->fd_atr_cnt = 0;
4704 if (pf->fd_tcp_rule > 0) {
4705 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4706 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4707 pf->fd_tcp_rule = 0;
4708 }
ca64fa4e 4709 i40e_fdir_filter_restore(vsi);
1e1be8f6 4710 }
41c445ff
JB
4711 i40e_service_event_schedule(pf);
4712
4713 return 0;
4714}
4715
4716/**
4717 * i40e_vsi_reinit_locked - Reset the VSI
4718 * @vsi: the VSI being configured
4719 *
4720 * Rebuild the ring structs after some configuration
4721 * has changed, e.g. MTU size.
4722 **/
4723static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4724{
4725 struct i40e_pf *pf = vsi->back;
4726
4727 WARN_ON(in_interrupt());
4728 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4729 usleep_range(1000, 2000);
4730 i40e_down(vsi);
4731
4732 /* Give a VF some time to respond to the reset. The
4733 * two second wait is based upon the watchdog cycle in
4734 * the VF driver.
4735 */
4736 if (vsi->type == I40E_VSI_SRIOV)
4737 msleep(2000);
4738 i40e_up(vsi);
4739 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4740}
4741
4742/**
4743 * i40e_up - Bring the connection back up after being down
4744 * @vsi: the VSI being configured
4745 **/
4746int i40e_up(struct i40e_vsi *vsi)
4747{
4748 int err;
4749
4750 err = i40e_vsi_configure(vsi);
4751 if (!err)
4752 err = i40e_up_complete(vsi);
4753
4754 return err;
4755}
4756
4757/**
4758 * i40e_down - Shutdown the connection processing
4759 * @vsi: the VSI being stopped
4760 **/
4761void i40e_down(struct i40e_vsi *vsi)
4762{
4763 int i;
4764
4765 /* It is assumed that the caller of this function
4766 * sets the vsi->state __I40E_DOWN bit.
4767 */
4768 if (vsi->netdev) {
4769 netif_carrier_off(vsi->netdev);
4770 netif_tx_disable(vsi->netdev);
4771 }
4772 i40e_vsi_disable_irq(vsi);
4773 i40e_vsi_control_rings(vsi, false);
4774 i40e_napi_disable_all(vsi);
4775
4776 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
4777 i40e_clean_tx_ring(vsi->tx_rings[i]);
4778 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
4779 }
4780}
4781
4782/**
4783 * i40e_setup_tc - configure multiple traffic classes
4784 * @netdev: net device to configure
4785 * @tc: number of traffic classes to enable
4786 **/
38e00438
VD
4787#ifdef I40E_FCOE
4788int i40e_setup_tc(struct net_device *netdev, u8 tc)
4789#else
41c445ff 4790static int i40e_setup_tc(struct net_device *netdev, u8 tc)
38e00438 4791#endif
41c445ff
JB
4792{
4793 struct i40e_netdev_priv *np = netdev_priv(netdev);
4794 struct i40e_vsi *vsi = np->vsi;
4795 struct i40e_pf *pf = vsi->back;
4796 u8 enabled_tc = 0;
4797 int ret = -EINVAL;
4798 int i;
4799
4800 /* Check if DCB enabled to continue */
4801 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
4802 netdev_info(netdev, "DCB is not enabled for adapter\n");
4803 goto exit;
4804 }
4805
4806 /* Check if MFP enabled */
4807 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4808 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
4809 goto exit;
4810 }
4811
4812 /* Check whether tc count is within enabled limit */
4813 if (tc > i40e_pf_get_num_tc(pf)) {
4814 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
4815 goto exit;
4816 }
4817
4818 /* Generate TC map for number of tc requested */
4819 for (i = 0; i < tc; i++)
4820 enabled_tc |= (1 << i);
4821
4822 /* Requesting same TC configuration as already enabled */
4823 if (enabled_tc == vsi->tc_config.enabled_tc)
4824 return 0;
4825
4826 /* Quiesce VSI queues */
4827 i40e_quiesce_vsi(vsi);
4828
4829 /* Configure VSI for enabled TCs */
4830 ret = i40e_vsi_config_tc(vsi, enabled_tc);
4831 if (ret) {
4832 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
4833 vsi->seid);
4834 goto exit;
4835 }
4836
4837 /* Unquiesce VSI */
4838 i40e_unquiesce_vsi(vsi);
4839
4840exit:
4841 return ret;
4842}
4843
4844/**
4845 * i40e_open - Called when a network interface is made active
4846 * @netdev: network interface device structure
4847 *
4848 * The open entry point is called when a network interface is made
4849 * active by the system (IFF_UP). At this point all resources needed
4850 * for transmit and receive operations are allocated, the interrupt
4851 * handler is registered with the OS, the netdev watchdog subtask is
4852 * enabled, and the stack is notified that the interface is ready.
4853 *
4854 * Returns 0 on success, negative value on failure
4855 **/
38e00438
VD
4856#ifdef I40E_FCOE
4857int i40e_open(struct net_device *netdev)
4858#else
41c445ff 4859static int i40e_open(struct net_device *netdev)
38e00438 4860#endif
41c445ff
JB
4861{
4862 struct i40e_netdev_priv *np = netdev_priv(netdev);
4863 struct i40e_vsi *vsi = np->vsi;
4864 struct i40e_pf *pf = vsi->back;
41c445ff
JB
4865 int err;
4866
4eb3f768
SN
4867 /* disallow open during test or if eeprom is broken */
4868 if (test_bit(__I40E_TESTING, &pf->state) ||
4869 test_bit(__I40E_BAD_EEPROM, &pf->state))
41c445ff
JB
4870 return -EBUSY;
4871
4872 netif_carrier_off(netdev);
4873
6c167f58
EK
4874 err = i40e_vsi_open(vsi);
4875 if (err)
4876 return err;
4877
059dab69
JB
4878 /* configure global TSO hardware offload settings */
4879 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4880 TCP_FLAG_FIN) >> 16);
4881 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4882 TCP_FLAG_FIN |
4883 TCP_FLAG_CWR) >> 16);
4884 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4885
6c167f58
EK
4886#ifdef CONFIG_I40E_VXLAN
4887 vxlan_get_rx_port(netdev);
4888#endif
4889
4890 return 0;
4891}
4892
4893/**
4894 * i40e_vsi_open -
4895 * @vsi: the VSI to open
4896 *
4897 * Finish initialization of the VSI.
4898 *
4899 * Returns 0 on success, negative value on failure
4900 **/
4901int i40e_vsi_open(struct i40e_vsi *vsi)
4902{
4903 struct i40e_pf *pf = vsi->back;
b294ac70 4904 char int_name[I40E_INT_NAME_STR_LEN];
6c167f58
EK
4905 int err;
4906
41c445ff
JB
4907 /* allocate descriptors */
4908 err = i40e_vsi_setup_tx_resources(vsi);
4909 if (err)
4910 goto err_setup_tx;
4911 err = i40e_vsi_setup_rx_resources(vsi);
4912 if (err)
4913 goto err_setup_rx;
4914
4915 err = i40e_vsi_configure(vsi);
4916 if (err)
4917 goto err_setup_rx;
4918
c22e3c6c
SN
4919 if (vsi->netdev) {
4920 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4921 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4922 err = i40e_vsi_request_irq(vsi, int_name);
4923 if (err)
4924 goto err_setup_rx;
41c445ff 4925
c22e3c6c
SN
4926 /* Notify the stack of the actual queue counts. */
4927 err = netif_set_real_num_tx_queues(vsi->netdev,
4928 vsi->num_queue_pairs);
4929 if (err)
4930 goto err_set_queues;
25946ddb 4931
c22e3c6c
SN
4932 err = netif_set_real_num_rx_queues(vsi->netdev,
4933 vsi->num_queue_pairs);
4934 if (err)
4935 goto err_set_queues;
8a9eb7d3
SN
4936
4937 } else if (vsi->type == I40E_VSI_FDIR) {
e240f674 4938 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
b2008cbf
CW
4939 dev_driver_string(&pf->pdev->dev),
4940 dev_name(&pf->pdev->dev));
8a9eb7d3 4941 err = i40e_vsi_request_irq(vsi, int_name);
b2008cbf 4942
c22e3c6c 4943 } else {
ce9ccb17 4944 err = -EINVAL;
6c167f58
EK
4945 goto err_setup_rx;
4946 }
25946ddb 4947
41c445ff
JB
4948 err = i40e_up_complete(vsi);
4949 if (err)
4950 goto err_up_complete;
4951
41c445ff
JB
4952 return 0;
4953
4954err_up_complete:
4955 i40e_down(vsi);
25946ddb 4956err_set_queues:
41c445ff
JB
4957 i40e_vsi_free_irq(vsi);
4958err_setup_rx:
4959 i40e_vsi_free_rx_resources(vsi);
4960err_setup_tx:
4961 i40e_vsi_free_tx_resources(vsi);
4962 if (vsi == pf->vsi[pf->lan_vsi])
4963 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4964
4965 return err;
4966}
4967
17a73f6b
JG
4968/**
4969 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4970 * @pf: Pointer to pf
4971 *
4972 * This function destroys the hlist where all the Flow Director
4973 * filters were saved.
4974 **/
4975static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4976{
4977 struct i40e_fdir_filter *filter;
4978 struct hlist_node *node2;
4979
4980 hlist_for_each_entry_safe(filter, node2,
4981 &pf->fdir_filter_list, fdir_node) {
4982 hlist_del(&filter->fdir_node);
4983 kfree(filter);
4984 }
4985 pf->fdir_pf_active_filters = 0;
4986}
4987
41c445ff
JB
4988/**
4989 * i40e_close - Disables a network interface
4990 * @netdev: network interface device structure
4991 *
4992 * The close entry point is called when an interface is de-activated
4993 * by the OS. The hardware is still under the driver's control, but
4994 * this netdev interface is disabled.
4995 *
4996 * Returns 0, this is not allowed to fail
4997 **/
38e00438
VD
4998#ifdef I40E_FCOE
4999int i40e_close(struct net_device *netdev)
5000#else
41c445ff 5001static int i40e_close(struct net_device *netdev)
38e00438 5002#endif
41c445ff
JB
5003{
5004 struct i40e_netdev_priv *np = netdev_priv(netdev);
5005 struct i40e_vsi *vsi = np->vsi;
5006
90ef8d47 5007 i40e_vsi_close(vsi);
41c445ff
JB
5008
5009 return 0;
5010}
5011
5012/**
5013 * i40e_do_reset - Start a PF or Core Reset sequence
5014 * @pf: board private structure
5015 * @reset_flags: which reset is requested
5016 *
5017 * The essential difference in resets is that the PF Reset
5018 * doesn't clear the packet buffers, doesn't reset the PE
5019 * firmware, and doesn't bother the other PFs on the chip.
5020 **/
5021void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5022{
5023 u32 val;
5024
5025 WARN_ON(in_interrupt());
5026
263fc48f
MW
5027 if (i40e_check_asq_alive(&pf->hw))
5028 i40e_vc_notify_reset(pf);
5029
41c445ff
JB
5030 /* do the biggest reset indicated */
5031 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
5032
5033 /* Request a Global Reset
5034 *
5035 * This will start the chip's countdown to the actual full
5036 * chip reset event, and a warning interrupt to be sent
5037 * to all PFs, including the requestor. Our handler
5038 * for the warning interrupt will deal with the shutdown
5039 * and recovery of the switch setup.
5040 */
69bfb110 5041 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
41c445ff
JB
5042 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5043 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5044 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5045
5046 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
5047
5048 /* Request a Core Reset
5049 *
5050 * Same as Global Reset, except does *not* include the MAC/PHY
5051 */
69bfb110 5052 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
41c445ff
JB
5053 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5054 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5055 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5056 i40e_flush(&pf->hw);
5057
7823fe34
SN
5058 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
5059
5060 /* Request a Firmware Reset
5061 *
5062 * Same as Global reset, plus restarting the
5063 * embedded firmware engine.
5064 */
5065 /* enable EMP Reset */
5066 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
5067 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
5068 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
5069
5070 /* force the reset */
5071 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5072 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
5073 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5074 i40e_flush(&pf->hw);
5075
41c445ff
JB
5076 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
5077
5078 /* Request a PF Reset
5079 *
5080 * Resets only the PF-specific registers
5081 *
5082 * This goes directly to the tear-down and rebuild of
5083 * the switch, since we need to do all the recovery as
5084 * for the Core Reset.
5085 */
69bfb110 5086 dev_dbg(&pf->pdev->dev, "PFR requested\n");
41c445ff
JB
5087 i40e_handle_reset_warning(pf);
5088
5089 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
5090 int v;
5091
5092 /* Find the VSI(s) that requested a re-init */
5093 dev_info(&pf->pdev->dev,
5094 "VSI reinit requested\n");
505682cd 5095 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
5096 struct i40e_vsi *vsi = pf->vsi[v];
5097 if (vsi != NULL &&
5098 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5099 i40e_vsi_reinit_locked(pf->vsi[v]);
5100 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5101 }
5102 }
5103
b5d06f05
NP
5104 /* no further action needed, so return now */
5105 return;
5106 } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
5107 int v;
5108
5109 /* Find the VSI(s) that needs to be brought down */
5110 dev_info(&pf->pdev->dev, "VSI down requested\n");
5111 for (v = 0; v < pf->num_alloc_vsi; v++) {
5112 struct i40e_vsi *vsi = pf->vsi[v];
5113 if (vsi != NULL &&
5114 test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5115 set_bit(__I40E_DOWN, &vsi->state);
5116 i40e_down(vsi);
5117 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5118 }
5119 }
5120
41c445ff
JB
5121 /* no further action needed, so return now */
5122 return;
5123 } else {
5124 dev_info(&pf->pdev->dev,
5125 "bad reset request 0x%08x\n", reset_flags);
5126 return;
5127 }
5128}
5129
4e3b35b0
NP
5130#ifdef CONFIG_I40E_DCB
5131/**
5132 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5133 * @pf: board private structure
5134 * @old_cfg: current DCB config
5135 * @new_cfg: new DCB config
5136 **/
5137bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5138 struct i40e_dcbx_config *old_cfg,
5139 struct i40e_dcbx_config *new_cfg)
5140{
5141 bool need_reconfig = false;
5142
5143 /* Check if ETS configuration has changed */
5144 if (memcmp(&new_cfg->etscfg,
5145 &old_cfg->etscfg,
5146 sizeof(new_cfg->etscfg))) {
5147 /* If Priority Table has changed reconfig is needed */
5148 if (memcmp(&new_cfg->etscfg.prioritytable,
5149 &old_cfg->etscfg.prioritytable,
5150 sizeof(new_cfg->etscfg.prioritytable))) {
5151 need_reconfig = true;
69bfb110 5152 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4e3b35b0
NP
5153 }
5154
5155 if (memcmp(&new_cfg->etscfg.tcbwtable,
5156 &old_cfg->etscfg.tcbwtable,
5157 sizeof(new_cfg->etscfg.tcbwtable)))
69bfb110 5158 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4e3b35b0
NP
5159
5160 if (memcmp(&new_cfg->etscfg.tsatable,
5161 &old_cfg->etscfg.tsatable,
5162 sizeof(new_cfg->etscfg.tsatable)))
69bfb110 5163 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4e3b35b0
NP
5164 }
5165
5166 /* Check if PFC configuration has changed */
5167 if (memcmp(&new_cfg->pfc,
5168 &old_cfg->pfc,
5169 sizeof(new_cfg->pfc))) {
5170 need_reconfig = true;
69bfb110 5171 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4e3b35b0
NP
5172 }
5173
5174 /* Check if APP Table has changed */
5175 if (memcmp(&new_cfg->app,
5176 &old_cfg->app,
3d9667a9 5177 sizeof(new_cfg->app))) {
4e3b35b0 5178 need_reconfig = true;
69bfb110 5179 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
3d9667a9 5180 }
4e3b35b0 5181
9fa61dd2
NP
5182 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
5183 need_reconfig);
4e3b35b0
NP
5184 return need_reconfig;
5185}
5186
5187/**
5188 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5189 * @pf: board private structure
5190 * @e: event info posted on ARQ
5191 **/
5192static int i40e_handle_lldp_event(struct i40e_pf *pf,
5193 struct i40e_arq_event_info *e)
5194{
5195 struct i40e_aqc_lldp_get_mib *mib =
5196 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5197 struct i40e_hw *hw = &pf->hw;
5198 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
5199 struct i40e_dcbx_config tmp_dcbx_cfg;
5200 bool need_reconfig = false;
5201 int ret = 0;
5202 u8 type;
5203
4d9b6043
NP
5204 /* Not DCB capable or capability disabled */
5205 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5206 return ret;
5207
4e3b35b0
NP
5208 /* Ignore if event is not for Nearest Bridge */
5209 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5210 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9fa61dd2
NP
5211 dev_dbg(&pf->pdev->dev,
5212 "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
4e3b35b0
NP
5213 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5214 return ret;
5215
5216 /* Check MIB Type and return if event for Remote MIB update */
5217 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9fa61dd2
NP
5218 dev_dbg(&pf->pdev->dev,
5219 "%s: LLDP event mib type %s\n", __func__,
5220 type ? "remote" : "local");
4e3b35b0
NP
5221 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5222 /* Update the remote cached instance and return */
5223 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5224 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5225 &hw->remote_dcbx_config);
5226 goto exit;
5227 }
5228
4e3b35b0 5229 memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
9fa61dd2
NP
5230 /* Store the old configuration */
5231 tmp_dcbx_cfg = *dcbx_cfg;
5232
5233 /* Get updated DCBX data from firmware */
5234 ret = i40e_get_dcb_config(&pf->hw);
4e3b35b0 5235 if (ret) {
9fa61dd2 5236 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
4e3b35b0
NP
5237 goto exit;
5238 }
5239
5240 /* No change detected in DCBX configs */
5241 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
69bfb110 5242 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4e3b35b0
NP
5243 goto exit;
5244 }
5245
9fa61dd2 5246 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
4e3b35b0 5247
9fa61dd2 5248 i40e_dcbnl_flush_apps(pf, dcbx_cfg);
4e3b35b0
NP
5249
5250 if (!need_reconfig)
5251 goto exit;
5252
4d9b6043
NP
5253 /* Enable DCB tagging only when more than one TC */
5254 if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
5255 pf->flags |= I40E_FLAG_DCB_ENABLED;
5256 else
5257 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5258
69129dc3 5259 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
4e3b35b0
NP
5260 /* Reconfiguration needed quiesce all VSIs */
5261 i40e_pf_quiesce_all_vsi(pf);
5262
5263 /* Changes in configuration update VEB/VSI */
5264 i40e_dcb_reconfigure(pf);
5265
2fd75f31
NP
5266 ret = i40e_resume_port_tx(pf);
5267
69129dc3 5268 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
2fd75f31 5269 /* In case of error no point in resuming VSIs */
69129dc3
NP
5270 if (ret)
5271 goto exit;
5272
5273 /* Wait for the PF's Tx queues to be disabled */
5274 ret = i40e_pf_wait_txq_disabled(pf);
11e47708
PN
5275 if (ret) {
5276 /* Schedule PF reset to recover */
5277 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5278 i40e_service_event_schedule(pf);
5279 } else {
2fd75f31 5280 i40e_pf_unquiesce_all_vsi(pf);
11e47708
PN
5281 }
5282
4e3b35b0
NP
5283exit:
5284 return ret;
5285}
5286#endif /* CONFIG_I40E_DCB */
5287
23326186
ASJ
5288/**
5289 * i40e_do_reset_safe - Protected reset path for userland calls.
5290 * @pf: board private structure
5291 * @reset_flags: which reset is requested
5292 *
5293 **/
5294void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5295{
5296 rtnl_lock();
5297 i40e_do_reset(pf, reset_flags);
5298 rtnl_unlock();
5299}
5300
41c445ff
JB
5301/**
5302 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5303 * @pf: board private structure
5304 * @e: event info posted on ARQ
5305 *
5306 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5307 * and VF queues
5308 **/
5309static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5310 struct i40e_arq_event_info *e)
5311{
5312 struct i40e_aqc_lan_overflow *data =
5313 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5314 u32 queue = le32_to_cpu(data->prtdcb_rupto);
5315 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5316 struct i40e_hw *hw = &pf->hw;
5317 struct i40e_vf *vf;
5318 u16 vf_id;
5319
69bfb110
JB
5320 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5321 queue, qtx_ctl);
41c445ff
JB
5322
5323 /* Queue belongs to VF, find the VF and issue VF reset */
5324 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5325 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5326 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5327 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5328 vf_id -= hw->func_caps.vf_base_id;
5329 vf = &pf->vf[vf_id];
5330 i40e_vc_notify_vf_reset(vf);
5331 /* Allow VF to process pending reset notification */
5332 msleep(20);
5333 i40e_reset_vf(vf, false);
5334 }
5335}
5336
5337/**
5338 * i40e_service_event_complete - Finish up the service event
5339 * @pf: board private structure
5340 **/
5341static void i40e_service_event_complete(struct i40e_pf *pf)
5342{
5343 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5344
5345 /* flush memory to make sure state is correct before next watchog */
4e857c58 5346 smp_mb__before_atomic();
41c445ff
JB
5347 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5348}
5349
55a5e60b 5350/**
12957388
ASJ
5351 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5352 * @pf: board private structure
5353 **/
5354int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5355{
5356 int val, fcnt_prog;
5357
5358 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5359 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5360 return fcnt_prog;
5361}
5362
5363/**
5364 * i40e_get_current_fd_count - Get the count of total FD filters programmed
55a5e60b
ASJ
5365 * @pf: board private structure
5366 **/
5367int i40e_get_current_fd_count(struct i40e_pf *pf)
5368{
5369 int val, fcnt_prog;
5370 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5371 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5372 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5373 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5374 return fcnt_prog;
5375}
1e1be8f6 5376
55a5e60b
ASJ
5377/**
5378 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5379 * @pf: board private structure
5380 **/
5381void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5382{
5383 u32 fcnt_prog, fcnt_avail;
5384
1e1be8f6
ASJ
5385 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5386 return;
5387
55a5e60b
ASJ
5388 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5389 * to re-enable
5390 */
12957388
ASJ
5391 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
5392 fcnt_avail = pf->fdir_pf_filter_count;
1e1be8f6
ASJ
5393 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5394 (pf->fd_add_err == 0) ||
5395 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
55a5e60b
ASJ
5396 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5397 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5398 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5399 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5400 }
5401 }
5402 /* Wait for some more space to be available to turn on ATR */
5403 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5404 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5405 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5406 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5407 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5408 }
5409 }
5410}
5411
1e1be8f6
ASJ
5412#define I40E_MIN_FD_FLUSH_INTERVAL 10
5413/**
5414 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5415 * @pf: board private structure
5416 **/
5417static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5418{
5419 int flush_wait_retry = 50;
5420 int reg;
5421
1790ed0c
AA
5422 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5423 return;
5424
1e1be8f6
ASJ
5425 if (time_after(jiffies, pf->fd_flush_timestamp +
5426 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5427 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5428 pf->fd_flush_timestamp = jiffies;
5429 pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
5430 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5431 /* flush all filters */
5432 wr32(&pf->hw, I40E_PFQF_CTL_1,
5433 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5434 i40e_flush(&pf->hw);
60793f4a 5435 pf->fd_flush_cnt++;
1e1be8f6
ASJ
5436 pf->fd_add_err = 0;
5437 do {
5438 /* Check FD flush status every 5-6msec */
5439 usleep_range(5000, 6000);
5440 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5441 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5442 break;
5443 } while (flush_wait_retry--);
5444 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5445 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5446 } else {
5447 /* replay sideband filters */
5448 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5449
5450 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5451 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5452 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5453 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5454 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5455 }
5456 }
5457}
5458
5459/**
5460 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5461 * @pf: board private structure
5462 **/
5463int i40e_get_current_atr_cnt(struct i40e_pf *pf)
5464{
5465 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5466}
5467
5468/* We can see up to 256 filter programming desc in transit if the filters are
5469 * being applied really fast; before we see the first
5470 * filter miss error on Rx queue 0. Accumulating enough error messages before
5471 * reacting will make sure we don't cause flush too often.
5472 */
5473#define I40E_MAX_FD_PROGRAM_ERROR 256
5474
41c445ff
JB
5475/**
5476 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5477 * @pf: board private structure
5478 **/
5479static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5480{
41c445ff 5481
41c445ff
JB
5482 /* if interface is down do nothing */
5483 if (test_bit(__I40E_DOWN, &pf->state))
5484 return;
1e1be8f6 5485
1790ed0c
AA
5486 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5487 return;
5488
1e1be8f6
ASJ
5489 if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
5490 (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
5491 (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
5492 i40e_fdir_flush_and_replay(pf);
5493
55a5e60b
ASJ
5494 i40e_fdir_check_and_reenable(pf);
5495
41c445ff
JB
5496}
5497
5498/**
5499 * i40e_vsi_link_event - notify VSI of a link event
5500 * @vsi: vsi to be notified
5501 * @link_up: link up or down
5502 **/
5503static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5504{
32b5b811 5505 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
41c445ff
JB
5506 return;
5507
5508 switch (vsi->type) {
5509 case I40E_VSI_MAIN:
38e00438
VD
5510#ifdef I40E_FCOE
5511 case I40E_VSI_FCOE:
5512#endif
41c445ff
JB
5513 if (!vsi->netdev || !vsi->netdev_registered)
5514 break;
5515
5516 if (link_up) {
5517 netif_carrier_on(vsi->netdev);
5518 netif_tx_wake_all_queues(vsi->netdev);
5519 } else {
5520 netif_carrier_off(vsi->netdev);
5521 netif_tx_stop_all_queues(vsi->netdev);
5522 }
5523 break;
5524
5525 case I40E_VSI_SRIOV:
41c445ff
JB
5526 case I40E_VSI_VMDQ2:
5527 case I40E_VSI_CTRL:
5528 case I40E_VSI_MIRROR:
5529 default:
5530 /* there is no notification for other VSIs */
5531 break;
5532 }
5533}
5534
5535/**
5536 * i40e_veb_link_event - notify elements on the veb of a link event
5537 * @veb: veb to be notified
5538 * @link_up: link up or down
5539 **/
5540static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5541{
5542 struct i40e_pf *pf;
5543 int i;
5544
5545 if (!veb || !veb->pf)
5546 return;
5547 pf = veb->pf;
5548
5549 /* depth first... */
5550 for (i = 0; i < I40E_MAX_VEB; i++)
5551 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5552 i40e_veb_link_event(pf->veb[i], link_up);
5553
5554 /* ... now the local VSIs */
505682cd 5555 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
5556 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5557 i40e_vsi_link_event(pf->vsi[i], link_up);
5558}
5559
5560/**
5561 * i40e_link_event - Update netif_carrier status
5562 * @pf: board private structure
5563 **/
5564static void i40e_link_event(struct i40e_pf *pf)
5565{
5566 bool new_link, old_link;
320684cd 5567 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
fef59ddf 5568 u8 new_link_speed, old_link_speed;
41c445ff 5569
1e701e09
JB
5570 /* set this to force the get_link_status call to refresh state */
5571 pf->hw.phy.get_link_info = true;
5572
41c445ff 5573 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
1e701e09 5574 new_link = i40e_get_link_status(&pf->hw);
fef59ddf
CS
5575 old_link_speed = pf->hw.phy.link_info_old.link_speed;
5576 new_link_speed = pf->hw.phy.link_info.link_speed;
41c445ff 5577
1e701e09 5578 if (new_link == old_link &&
fef59ddf 5579 new_link_speed == old_link_speed &&
320684cd
MW
5580 (test_bit(__I40E_DOWN, &vsi->state) ||
5581 new_link == netif_carrier_ok(vsi->netdev)))
41c445ff 5582 return;
320684cd
MW
5583
5584 if (!test_bit(__I40E_DOWN, &vsi->state))
5585 i40e_print_link_message(vsi, new_link);
41c445ff
JB
5586
5587 /* Notify the base of the switch tree connected to
5588 * the link. Floating VEBs are not notified.
5589 */
5590 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5591 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5592 else
320684cd 5593 i40e_vsi_link_event(vsi, new_link);
41c445ff
JB
5594
5595 if (pf->vf)
5596 i40e_vc_notify_link_state(pf);
beb0dff1
JK
5597
5598 if (pf->flags & I40E_FLAG_PTP)
5599 i40e_ptp_set_increment(pf);
41c445ff
JB
5600}
5601
5602/**
5603 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
5604 * @pf: board private structure
5605 *
5606 * Set the per-queue flags to request a check for stuck queues in the irq
5607 * clean functions, then force interrupts to be sure the irq clean is called.
5608 **/
5609static void i40e_check_hang_subtask(struct i40e_pf *pf)
5610{
5611 int i, v;
5612
5613 /* If we're down or resetting, just bail */
b67a0335
AA
5614 if (test_bit(__I40E_DOWN, &pf->state) ||
5615 test_bit(__I40E_CONFIG_BUSY, &pf->state))
41c445ff
JB
5616 return;
5617
5618 /* for each VSI/netdev
5619 * for each Tx queue
5620 * set the check flag
5621 * for each q_vector
5622 * force an interrupt
5623 */
505682cd 5624 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
5625 struct i40e_vsi *vsi = pf->vsi[v];
5626 int armed = 0;
5627
5628 if (!pf->vsi[v] ||
5629 test_bit(__I40E_DOWN, &vsi->state) ||
5630 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
5631 continue;
5632
5633 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 5634 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 5635 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 5636 &vsi->tx_rings[i]->state))
41c445ff
JB
5637 armed++;
5638 }
5639
5640 if (armed) {
5641 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
5642 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
5643 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
5d1ff106
SN
5644 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
5645 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
5646 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
5647 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
41c445ff
JB
5648 } else {
5649 u16 vec = vsi->base_vector - 1;
5650 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
5d1ff106
SN
5651 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
5652 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
5653 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
5654 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
41c445ff
JB
5655 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
5656 wr32(&vsi->back->hw,
5657 I40E_PFINT_DYN_CTLN(vec), val);
5658 }
5659 i40e_flush(&vsi->back->hw);
5660 }
5661 }
5662}
5663
5664/**
21536717 5665 * i40e_watchdog_subtask - periodic checks not using event driven response
41c445ff
JB
5666 * @pf: board private structure
5667 **/
5668static void i40e_watchdog_subtask(struct i40e_pf *pf)
5669{
5670 int i;
5671
5672 /* if interface is down do nothing */
5673 if (test_bit(__I40E_DOWN, &pf->state) ||
5674 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5675 return;
5676
21536717
SN
5677 /* make sure we don't do these things too often */
5678 if (time_before(jiffies, (pf->service_timer_previous +
5679 pf->service_timer_period)))
5680 return;
5681 pf->service_timer_previous = jiffies;
5682
5683 i40e_check_hang_subtask(pf);
5684 i40e_link_event(pf);
5685
41c445ff
JB
5686 /* Update the stats for active netdevs so the network stack
5687 * can look at updated numbers whenever it cares to
5688 */
505682cd 5689 for (i = 0; i < pf->num_alloc_vsi; i++)
41c445ff
JB
5690 if (pf->vsi[i] && pf->vsi[i]->netdev)
5691 i40e_update_stats(pf->vsi[i]);
5692
5693 /* Update the stats for the active switching components */
5694 for (i = 0; i < I40E_MAX_VEB; i++)
5695 if (pf->veb[i])
5696 i40e_update_veb_stats(pf->veb[i]);
beb0dff1
JK
5697
5698 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
41c445ff
JB
5699}
5700
5701/**
5702 * i40e_reset_subtask - Set up for resetting the device and driver
5703 * @pf: board private structure
5704 **/
5705static void i40e_reset_subtask(struct i40e_pf *pf)
5706{
5707 u32 reset_flags = 0;
5708
23326186 5709 rtnl_lock();
41c445ff
JB
5710 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5711 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
5712 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5713 }
5714 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5715 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
5716 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5717 }
5718 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5719 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
5720 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5721 }
5722 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5723 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
5724 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5725 }
b5d06f05
NP
5726 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5727 reset_flags |= (1 << __I40E_DOWN_REQUESTED);
5728 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5729 }
41c445ff
JB
5730
5731 /* If there's a recovery already waiting, it takes
5732 * precedence before starting a new reset sequence.
5733 */
5734 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5735 i40e_handle_reset_warning(pf);
23326186 5736 goto unlock;
41c445ff
JB
5737 }
5738
5739 /* If we're already down or resetting, just bail */
5740 if (reset_flags &&
5741 !test_bit(__I40E_DOWN, &pf->state) &&
5742 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5743 i40e_do_reset(pf, reset_flags);
23326186
ASJ
5744
5745unlock:
5746 rtnl_unlock();
41c445ff
JB
5747}
5748
5749/**
5750 * i40e_handle_link_event - Handle link event
5751 * @pf: board private structure
5752 * @e: event info posted on ARQ
5753 **/
5754static void i40e_handle_link_event(struct i40e_pf *pf,
5755 struct i40e_arq_event_info *e)
5756{
5757 struct i40e_hw *hw = &pf->hw;
5758 struct i40e_aqc_get_link_status *status =
5759 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5760 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
5761
5762 /* save off old link status information */
5763 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
5764 sizeof(pf->hw.phy.link_info_old));
5765
1e701e09
JB
5766 /* Do a new status request to re-enable LSE reporting
5767 * and load new status information into the hw struct
5768 * This completely ignores any state information
5769 * in the ARQ event info, instead choosing to always
5770 * issue the AQ update link status command.
5771 */
5772 i40e_link_event(pf);
5773
7b592f61
CW
5774 /* check for unqualified module, if link is down */
5775 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5776 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5777 (!(status->link_info & I40E_AQ_LINK_UP)))
5778 dev_err(&pf->pdev->dev,
5779 "The driver failed to link because an unqualified module was detected.\n");
41c445ff
JB
5780}
5781
5782/**
5783 * i40e_clean_adminq_subtask - Clean the AdminQ rings
5784 * @pf: board private structure
5785 **/
5786static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5787{
5788 struct i40e_arq_event_info event;
5789 struct i40e_hw *hw = &pf->hw;
5790 u16 pending, i = 0;
5791 i40e_status ret;
5792 u16 opcode;
86df242b 5793 u32 oldval;
41c445ff
JB
5794 u32 val;
5795
a316f651
ASJ
5796 /* Do not run clean AQ when PF reset fails */
5797 if (test_bit(__I40E_RESET_FAILED, &pf->state))
5798 return;
5799
86df242b
SN
5800 /* check for error indications */
5801 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5802 oldval = val;
5803 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
5804 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
5805 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
5806 }
5807 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
5808 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
5809 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
5810 }
5811 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
5812 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
5813 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
5814 }
5815 if (oldval != val)
5816 wr32(&pf->hw, pf->hw.aq.arq.len, val);
5817
5818 val = rd32(&pf->hw, pf->hw.aq.asq.len);
5819 oldval = val;
5820 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
5821 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
5822 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
5823 }
5824 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
5825 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
5826 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
5827 }
5828 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
5829 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
5830 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
5831 }
5832 if (oldval != val)
5833 wr32(&pf->hw, pf->hw.aq.asq.len, val);
5834
1001dc37
MW
5835 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
5836 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
41c445ff
JB
5837 if (!event.msg_buf)
5838 return;
5839
5840 do {
5841 ret = i40e_clean_arq_element(hw, &event, &pending);
56497978 5842 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
41c445ff 5843 break;
56497978 5844 else if (ret) {
41c445ff
JB
5845 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
5846 break;
5847 }
5848
5849 opcode = le16_to_cpu(event.desc.opcode);
5850 switch (opcode) {
5851
5852 case i40e_aqc_opc_get_link_status:
5853 i40e_handle_link_event(pf, &event);
5854 break;
5855 case i40e_aqc_opc_send_msg_to_pf:
5856 ret = i40e_vc_process_vf_msg(pf,
5857 le16_to_cpu(event.desc.retval),
5858 le32_to_cpu(event.desc.cookie_high),
5859 le32_to_cpu(event.desc.cookie_low),
5860 event.msg_buf,
1001dc37 5861 event.msg_len);
41c445ff
JB
5862 break;
5863 case i40e_aqc_opc_lldp_update_mib:
69bfb110 5864 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4e3b35b0
NP
5865#ifdef CONFIG_I40E_DCB
5866 rtnl_lock();
5867 ret = i40e_handle_lldp_event(pf, &event);
5868 rtnl_unlock();
5869#endif /* CONFIG_I40E_DCB */
41c445ff
JB
5870 break;
5871 case i40e_aqc_opc_event_lan_overflow:
69bfb110 5872 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
41c445ff
JB
5873 i40e_handle_lan_overflow_event(pf, &event);
5874 break;
0467bc91
SN
5875 case i40e_aqc_opc_send_msg_to_peer:
5876 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
5877 break;
41c445ff
JB
5878 default:
5879 dev_info(&pf->pdev->dev,
0467bc91
SN
5880 "ARQ Error: Unknown event 0x%04x received\n",
5881 opcode);
41c445ff
JB
5882 break;
5883 }
5884 } while (pending && (i++ < pf->adminq_work_limit));
5885
5886 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
5887 /* re-enable Admin queue interrupt cause */
5888 val = rd32(hw, I40E_PFINT_ICR0_ENA);
5889 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
5890 wr32(hw, I40E_PFINT_ICR0_ENA, val);
5891 i40e_flush(hw);
5892
5893 kfree(event.msg_buf);
5894}
5895
4eb3f768
SN
5896/**
5897 * i40e_verify_eeprom - make sure eeprom is good to use
5898 * @pf: board private structure
5899 **/
5900static void i40e_verify_eeprom(struct i40e_pf *pf)
5901{
5902 int err;
5903
5904 err = i40e_diag_eeprom_test(&pf->hw);
5905 if (err) {
5906 /* retry in case of garbage read */
5907 err = i40e_diag_eeprom_test(&pf->hw);
5908 if (err) {
5909 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
5910 err);
5911 set_bit(__I40E_BAD_EEPROM, &pf->state);
5912 }
5913 }
5914
5915 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
5916 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
5917 clear_bit(__I40E_BAD_EEPROM, &pf->state);
5918 }
5919}
5920
41c445ff
JB
5921/**
5922 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
5923 * @veb: pointer to the VEB instance
5924 *
5925 * This is a recursive function that first builds the attached VSIs then
5926 * recurses in to build the next layer of VEB. We track the connections
5927 * through our own index numbers because the seid's from the HW could
5928 * change across the reset.
5929 **/
5930static int i40e_reconstitute_veb(struct i40e_veb *veb)
5931{
5932 struct i40e_vsi *ctl_vsi = NULL;
5933 struct i40e_pf *pf = veb->pf;
5934 int v, veb_idx;
5935 int ret;
5936
5937 /* build VSI that owns this VEB, temporarily attached to base VEB */
505682cd 5938 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
41c445ff
JB
5939 if (pf->vsi[v] &&
5940 pf->vsi[v]->veb_idx == veb->idx &&
5941 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
5942 ctl_vsi = pf->vsi[v];
5943 break;
5944 }
5945 }
5946 if (!ctl_vsi) {
5947 dev_info(&pf->pdev->dev,
5948 "missing owner VSI for veb_idx %d\n", veb->idx);
5949 ret = -ENOENT;
5950 goto end_reconstitute;
5951 }
5952 if (ctl_vsi != pf->vsi[pf->lan_vsi])
5953 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5954 ret = i40e_add_vsi(ctl_vsi);
5955 if (ret) {
5956 dev_info(&pf->pdev->dev,
5957 "rebuild of owner VSI failed: %d\n", ret);
5958 goto end_reconstitute;
5959 }
5960 i40e_vsi_reset_stats(ctl_vsi);
5961
5962 /* create the VEB in the switch and move the VSI onto the VEB */
5963 ret = i40e_add_veb(veb, ctl_vsi);
5964 if (ret)
5965 goto end_reconstitute;
5966
b64ba084
ASJ
5967 /* Enable LB mode for the main VSI now that it is on a VEB */
5968 i40e_enable_pf_switch_lb(pf);
5969
41c445ff 5970 /* create the remaining VSIs attached to this VEB */
505682cd 5971 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
5972 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
5973 continue;
5974
5975 if (pf->vsi[v]->veb_idx == veb->idx) {
5976 struct i40e_vsi *vsi = pf->vsi[v];
5977 vsi->uplink_seid = veb->seid;
5978 ret = i40e_add_vsi(vsi);
5979 if (ret) {
5980 dev_info(&pf->pdev->dev,
5981 "rebuild of vsi_idx %d failed: %d\n",
5982 v, ret);
5983 goto end_reconstitute;
5984 }
5985 i40e_vsi_reset_stats(vsi);
5986 }
5987 }
5988
5989 /* create any VEBs attached to this VEB - RECURSION */
5990 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
5991 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
5992 pf->veb[veb_idx]->uplink_seid = veb->seid;
5993 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
5994 if (ret)
5995 break;
5996 }
5997 }
5998
5999end_reconstitute:
6000 return ret;
6001}
6002
6003/**
6004 * i40e_get_capabilities - get info about the HW
6005 * @pf: the PF struct
6006 **/
6007static int i40e_get_capabilities(struct i40e_pf *pf)
6008{
6009 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6010 u16 data_size;
6011 int buf_len;
6012 int err;
6013
6014 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6015 do {
6016 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6017 if (!cap_buf)
6018 return -ENOMEM;
6019
6020 /* this loads the data into the hw struct for us */
6021 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6022 &data_size,
6023 i40e_aqc_opc_list_func_capabilities,
6024 NULL);
6025 /* data loaded, buffer no longer needed */
6026 kfree(cap_buf);
6027
6028 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6029 /* retry with a larger buffer */
6030 buf_len = data_size;
6031 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6032 dev_info(&pf->pdev->dev,
6033 "capability discovery failed: aq=%d\n",
6034 pf->hw.aq.asq_last_status);
6035 return -ENODEV;
6036 }
6037 } while (err);
6038
ac71b7ba
ASJ
6039 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
6040 (pf->hw.aq.fw_maj_ver < 2)) {
6041 pf->hw.func_caps.num_msix_vectors++;
6042 pf->hw.func_caps.num_msix_vectors_vf++;
6043 }
6044
41c445ff
JB
6045 if (pf->hw.debug_mask & I40E_DEBUG_USER)
6046 dev_info(&pf->pdev->dev,
6047 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6048 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6049 pf->hw.func_caps.num_msix_vectors,
6050 pf->hw.func_caps.num_msix_vectors_vf,
6051 pf->hw.func_caps.fd_filters_guaranteed,
6052 pf->hw.func_caps.fd_filters_best_effort,
6053 pf->hw.func_caps.num_tx_qp,
6054 pf->hw.func_caps.num_vsis);
6055
7134f9ce
JB
6056#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6057 + pf->hw.func_caps.num_vfs)
6058 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6059 dev_info(&pf->pdev->dev,
6060 "got num_vsis %d, setting num_vsis to %d\n",
6061 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6062 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6063 }
6064
41c445ff
JB
6065 return 0;
6066}
6067
cbf61325
ASJ
6068static int i40e_vsi_clear(struct i40e_vsi *vsi);
6069
41c445ff 6070/**
cbf61325 6071 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
41c445ff
JB
6072 * @pf: board private structure
6073 **/
cbf61325 6074static void i40e_fdir_sb_setup(struct i40e_pf *pf)
41c445ff
JB
6075{
6076 struct i40e_vsi *vsi;
8a9eb7d3 6077 int i;
41c445ff 6078
407e063c
JB
6079 /* quick workaround for an NVM issue that leaves a critical register
6080 * uninitialized
6081 */
6082 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6083 static const u32 hkey[] = {
6084 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6085 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6086 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6087 0x95b3a76d};
6088
6089 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6090 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6091 }
6092
cbf61325 6093 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
41c445ff
JB
6094 return;
6095
cbf61325 6096 /* find existing VSI and see if it needs configuring */
41c445ff 6097 vsi = NULL;
505682cd 6098 for (i = 0; i < pf->num_alloc_vsi; i++) {
cbf61325 6099 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
41c445ff 6100 vsi = pf->vsi[i];
cbf61325
ASJ
6101 break;
6102 }
6103 }
6104
6105 /* create a new VSI if none exists */
41c445ff 6106 if (!vsi) {
cbf61325
ASJ
6107 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6108 pf->vsi[pf->lan_vsi]->seid, 0);
41c445ff
JB
6109 if (!vsi) {
6110 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
8a9eb7d3
SN
6111 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6112 return;
41c445ff 6113 }
cbf61325 6114 }
41c445ff 6115
8a9eb7d3 6116 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
41c445ff
JB
6117}
6118
6119/**
6120 * i40e_fdir_teardown - release the Flow Director resources
6121 * @pf: board private structure
6122 **/
6123static void i40e_fdir_teardown(struct i40e_pf *pf)
6124{
6125 int i;
6126
17a73f6b 6127 i40e_fdir_filter_exit(pf);
505682cd 6128 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
6129 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6130 i40e_vsi_release(pf->vsi[i]);
6131 break;
6132 }
6133 }
6134}
6135
6136/**
f650a38b 6137 * i40e_prep_for_reset - prep for the core to reset
41c445ff
JB
6138 * @pf: board private structure
6139 *
f650a38b
ASJ
6140 * Close up the VFs and other things in prep for pf Reset.
6141 **/
23cfbe07 6142static void i40e_prep_for_reset(struct i40e_pf *pf)
41c445ff 6143{
41c445ff 6144 struct i40e_hw *hw = &pf->hw;
60442dea 6145 i40e_status ret = 0;
41c445ff
JB
6146 u32 v;
6147
6148 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6149 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
23cfbe07 6150 return;
41c445ff 6151
69bfb110 6152 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
41c445ff 6153
41c445ff
JB
6154 /* quiesce the VSIs and their queues that are not already DOWN */
6155 i40e_pf_quiesce_all_vsi(pf);
6156
505682cd 6157 for (v = 0; v < pf->num_alloc_vsi; v++) {
41c445ff
JB
6158 if (pf->vsi[v])
6159 pf->vsi[v]->seid = 0;
6160 }
6161
6162 i40e_shutdown_adminq(&pf->hw);
6163
f650a38b 6164 /* call shutdown HMC */
60442dea
SN
6165 if (hw->hmc.hmc_obj) {
6166 ret = i40e_shutdown_lan_hmc(hw);
23cfbe07 6167 if (ret)
60442dea
SN
6168 dev_warn(&pf->pdev->dev,
6169 "shutdown_lan_hmc failed: %d\n", ret);
f650a38b 6170 }
f650a38b
ASJ
6171}
6172
44033fac
JB
6173/**
6174 * i40e_send_version - update firmware with driver version
6175 * @pf: PF struct
6176 */
6177static void i40e_send_version(struct i40e_pf *pf)
6178{
6179 struct i40e_driver_version dv;
6180
6181 dv.major_version = DRV_VERSION_MAJOR;
6182 dv.minor_version = DRV_VERSION_MINOR;
6183 dv.build_version = DRV_VERSION_BUILD;
6184 dv.subbuild_version = 0;
35a7d804 6185 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
44033fac
JB
6186 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6187}
6188
f650a38b 6189/**
4dda12e6 6190 * i40e_reset_and_rebuild - reset and rebuild using a saved config
f650a38b 6191 * @pf: board private structure
bc7d338f 6192 * @reinit: if the Main VSI needs to re-initialized.
f650a38b 6193 **/
bc7d338f 6194static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
f650a38b 6195{
f650a38b 6196 struct i40e_hw *hw = &pf->hw;
cafa2ee6 6197 u8 set_fc_aq_fail = 0;
f650a38b
ASJ
6198 i40e_status ret;
6199 u32 v;
6200
41c445ff
JB
6201 /* Now we wait for GRST to settle out.
6202 * We don't have to delete the VEBs or VSIs from the hw switch
6203 * because the reset will make them disappear.
6204 */
6205 ret = i40e_pf_reset(hw);
b5565400 6206 if (ret) {
41c445ff 6207 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
a316f651
ASJ
6208 set_bit(__I40E_RESET_FAILED, &pf->state);
6209 goto clear_recovery;
b5565400 6210 }
41c445ff
JB
6211 pf->pfr_count++;
6212
6213 if (test_bit(__I40E_DOWN, &pf->state))
a316f651 6214 goto clear_recovery;
69bfb110 6215 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
41c445ff
JB
6216
6217 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6218 ret = i40e_init_adminq(&pf->hw);
6219 if (ret) {
6220 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
a316f651 6221 goto clear_recovery;
41c445ff
JB
6222 }
6223
4eb3f768
SN
6224 /* re-verify the eeprom if we just had an EMP reset */
6225 if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
6226 clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
6227 i40e_verify_eeprom(pf);
6228 }
6229
e78ac4bf 6230 i40e_clear_pxe_mode(hw);
41c445ff
JB
6231 ret = i40e_get_capabilities(pf);
6232 if (ret) {
6233 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
6234 ret);
6235 goto end_core_reset;
6236 }
6237
41c445ff
JB
6238 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6239 hw->func_caps.num_rx_qp,
6240 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6241 if (ret) {
6242 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6243 goto end_core_reset;
6244 }
6245 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6246 if (ret) {
6247 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6248 goto end_core_reset;
6249 }
6250
4e3b35b0
NP
6251#ifdef CONFIG_I40E_DCB
6252 ret = i40e_init_pf_dcb(pf);
6253 if (ret) {
aebfc816
SN
6254 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6255 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6256 /* Continue without DCB enabled */
4e3b35b0
NP
6257 }
6258#endif /* CONFIG_I40E_DCB */
38e00438
VD
6259#ifdef I40E_FCOE
6260 ret = i40e_init_pf_fcoe(pf);
6261 if (ret)
6262 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
4e3b35b0 6263
38e00438 6264#endif
41c445ff 6265 /* do basic switch setup */
bc7d338f 6266 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff
JB
6267 if (ret)
6268 goto end_core_reset;
6269
7e2453fe
JB
6270 /* driver is only interested in link up/down and module qualification
6271 * reports from firmware
6272 */
6273 ret = i40e_aq_set_phy_int_mask(&pf->hw,
6274 I40E_AQ_EVENT_LINK_UPDOWN |
6275 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6276 if (ret)
6277 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
6278
cafa2ee6
ASJ
6279 /* make sure our flow control settings are restored */
6280 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6281 if (ret)
6282 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
6283
41c445ff
JB
6284 /* Rebuild the VSIs and VEBs that existed before reset.
6285 * They are still in our local switch element arrays, so only
6286 * need to rebuild the switch model in the HW.
6287 *
6288 * If there were VEBs but the reconstitution failed, we'll try
6289 * try to recover minimal use by getting the basic PF VSI working.
6290 */
6291 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
69bfb110 6292 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
41c445ff
JB
6293 /* find the one VEB connected to the MAC, and find orphans */
6294 for (v = 0; v < I40E_MAX_VEB; v++) {
6295 if (!pf->veb[v])
6296 continue;
6297
6298 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6299 pf->veb[v]->uplink_seid == 0) {
6300 ret = i40e_reconstitute_veb(pf->veb[v]);
6301
6302 if (!ret)
6303 continue;
6304
6305 /* If Main VEB failed, we're in deep doodoo,
6306 * so give up rebuilding the switch and set up
6307 * for minimal rebuild of PF VSI.
6308 * If orphan failed, we'll report the error
6309 * but try to keep going.
6310 */
6311 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6312 dev_info(&pf->pdev->dev,
6313 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6314 ret);
6315 pf->vsi[pf->lan_vsi]->uplink_seid
6316 = pf->mac_seid;
6317 break;
6318 } else if (pf->veb[v]->uplink_seid == 0) {
6319 dev_info(&pf->pdev->dev,
6320 "rebuild of orphan VEB failed: %d\n",
6321 ret);
6322 }
6323 }
6324 }
6325 }
6326
6327 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
cde4cbc7 6328 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
41c445ff
JB
6329 /* no VEB, so rebuild only the Main VSI */
6330 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6331 if (ret) {
6332 dev_info(&pf->pdev->dev,
6333 "rebuild of Main VSI failed: %d\n", ret);
6334 goto end_core_reset;
6335 }
6336 }
6337
cafa2ee6
ASJ
6338 msleep(75);
6339 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6340 if (ret) {
6341 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
6342 pf->hw.aq.asq_last_status);
6343 }
6344
41c445ff
JB
6345 /* reinit the misc interrupt */
6346 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6347 ret = i40e_setup_misc_vector(pf);
6348
6349 /* restart the VSIs that were rebuilt and running before the reset */
6350 i40e_pf_unquiesce_all_vsi(pf);
6351
69f64b2b
MW
6352 if (pf->num_alloc_vfs) {
6353 for (v = 0; v < pf->num_alloc_vfs; v++)
6354 i40e_reset_vf(&pf->vf[v], true);
6355 }
6356
41c445ff 6357 /* tell the firmware that we're starting */
44033fac 6358 i40e_send_version(pf);
41c445ff
JB
6359
6360end_core_reset:
a316f651
ASJ
6361 clear_bit(__I40E_RESET_FAILED, &pf->state);
6362clear_recovery:
41c445ff
JB
6363 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6364}
6365
f650a38b
ASJ
6366/**
6367 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
6368 * @pf: board private structure
6369 *
6370 * Close up the VFs and other things in prep for a Core Reset,
6371 * then get ready to rebuild the world.
6372 **/
6373static void i40e_handle_reset_warning(struct i40e_pf *pf)
6374{
23cfbe07
SN
6375 i40e_prep_for_reset(pf);
6376 i40e_reset_and_rebuild(pf, false);
f650a38b
ASJ
6377}
6378
41c445ff
JB
6379/**
6380 * i40e_handle_mdd_event
6381 * @pf: pointer to the pf structure
6382 *
6383 * Called from the MDD irq handler to identify possibly malicious vfs
6384 **/
6385static void i40e_handle_mdd_event(struct i40e_pf *pf)
6386{
6387 struct i40e_hw *hw = &pf->hw;
6388 bool mdd_detected = false;
df430b12 6389 bool pf_mdd_detected = false;
41c445ff
JB
6390 struct i40e_vf *vf;
6391 u32 reg;
6392 int i;
6393
6394 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6395 return;
6396
6397 /* find what triggered the MDD event */
6398 reg = rd32(hw, I40E_GL_MDET_TX);
6399 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4c33f83a
ASJ
6400 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6401 I40E_GL_MDET_TX_PF_NUM_SHIFT;
2089ad03 6402 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
4c33f83a 6403 I40E_GL_MDET_TX_VF_NUM_SHIFT;
013f6579 6404 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
4c33f83a 6405 I40E_GL_MDET_TX_EVENT_SHIFT;
2089ad03
MW
6406 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6407 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6408 pf->hw.func_caps.base_queue;
faf32978
JB
6409 if (netif_msg_tx_err(pf))
6410 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
6411 event, queue, pf_num, vf_num);
41c445ff
JB
6412 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6413 mdd_detected = true;
6414 }
6415 reg = rd32(hw, I40E_GL_MDET_RX);
6416 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4c33f83a
ASJ
6417 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6418 I40E_GL_MDET_RX_FUNCTION_SHIFT;
013f6579 6419 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
4c33f83a 6420 I40E_GL_MDET_RX_EVENT_SHIFT;
2089ad03
MW
6421 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6422 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6423 pf->hw.func_caps.base_queue;
faf32978
JB
6424 if (netif_msg_rx_err(pf))
6425 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6426 event, queue, func);
41c445ff
JB
6427 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6428 mdd_detected = true;
6429 }
6430
df430b12
NP
6431 if (mdd_detected) {
6432 reg = rd32(hw, I40E_PF_MDET_TX);
6433 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6434 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
faf32978 6435 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
df430b12
NP
6436 pf_mdd_detected = true;
6437 }
6438 reg = rd32(hw, I40E_PF_MDET_RX);
6439 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6440 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
faf32978 6441 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
df430b12
NP
6442 pf_mdd_detected = true;
6443 }
6444 /* Queue belongs to the PF, initiate a reset */
6445 if (pf_mdd_detected) {
6446 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6447 i40e_service_event_schedule(pf);
6448 }
6449 }
6450
41c445ff
JB
6451 /* see if one of the VFs needs its hand slapped */
6452 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6453 vf = &(pf->vf[i]);
6454 reg = rd32(hw, I40E_VP_MDET_TX(i));
6455 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6456 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6457 vf->num_mdd_events++;
faf32978
JB
6458 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6459 i);
41c445ff
JB
6460 }
6461
6462 reg = rd32(hw, I40E_VP_MDET_RX(i));
6463 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6464 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6465 vf->num_mdd_events++;
faf32978
JB
6466 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6467 i);
41c445ff
JB
6468 }
6469
6470 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6471 dev_info(&pf->pdev->dev,
6472 "Too many MDD events on VF %d, disabled\n", i);
6473 dev_info(&pf->pdev->dev,
6474 "Use PF Control I/F to re-enable the VF\n");
6475 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6476 }
6477 }
6478
6479 /* re-enable mdd interrupt cause */
6480 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6481 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6482 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6483 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6484 i40e_flush(hw);
6485}
6486
a1c9a9d9
JK
6487#ifdef CONFIG_I40E_VXLAN
6488/**
6489 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6490 * @pf: board private structure
6491 **/
6492static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6493{
a1c9a9d9
JK
6494 struct i40e_hw *hw = &pf->hw;
6495 i40e_status ret;
6496 u8 filter_index;
6497 __be16 port;
6498 int i;
6499
6500 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6501 return;
6502
6503 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6504
6505 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6506 if (pf->pending_vxlan_bitmap & (1 << i)) {
6507 pf->pending_vxlan_bitmap &= ~(1 << i);
6508 port = pf->vxlan_ports[i];
6509 ret = port ?
6510 i40e_aq_add_udp_tunnel(hw, ntohs(port),
a1c9a9d9
JK
6511 I40E_AQC_TUNNEL_TYPE_VXLAN,
6512 &filter_index, NULL)
6513 : i40e_aq_del_udp_tunnel(hw, i, NULL);
6514
6515 if (ret) {
6516 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
6517 port ? "adding" : "deleting",
6518 ntohs(port), port ? i : i);
6519
6520 pf->vxlan_ports[i] = 0;
6521 } else {
6522 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
6523 port ? "Added" : "Deleted",
6524 ntohs(port), port ? i : filter_index);
6525 }
6526 }
6527 }
6528}
6529
6530#endif
41c445ff
JB
6531/**
6532 * i40e_service_task - Run the driver's async subtasks
6533 * @work: pointer to work_struct containing our data
6534 **/
6535static void i40e_service_task(struct work_struct *work)
6536{
6537 struct i40e_pf *pf = container_of(work,
6538 struct i40e_pf,
6539 service_task);
6540 unsigned long start_time = jiffies;
6541
e57a2fea
SN
6542 /* don't bother with service tasks if a reset is in progress */
6543 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6544 i40e_service_event_complete(pf);
6545 return;
6546 }
6547
41c445ff
JB
6548 i40e_reset_subtask(pf);
6549 i40e_handle_mdd_event(pf);
6550 i40e_vc_process_vflr_event(pf);
6551 i40e_watchdog_subtask(pf);
6552 i40e_fdir_reinit_subtask(pf);
41c445ff 6553 i40e_sync_filters_subtask(pf);
a1c9a9d9
JK
6554#ifdef CONFIG_I40E_VXLAN
6555 i40e_sync_vxlan_filters_subtask(pf);
6556#endif
41c445ff
JB
6557 i40e_clean_adminq_subtask(pf);
6558
6559 i40e_service_event_complete(pf);
6560
6561 /* If the tasks have taken longer than one timer cycle or there
6562 * is more work to be done, reschedule the service task now
6563 * rather than wait for the timer to tick again.
6564 */
6565 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6566 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
6567 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
6568 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6569 i40e_service_event_schedule(pf);
6570}
6571
6572/**
6573 * i40e_service_timer - timer callback
6574 * @data: pointer to PF struct
6575 **/
6576static void i40e_service_timer(unsigned long data)
6577{
6578 struct i40e_pf *pf = (struct i40e_pf *)data;
6579
6580 mod_timer(&pf->service_timer,
6581 round_jiffies(jiffies + pf->service_timer_period));
6582 i40e_service_event_schedule(pf);
6583}
6584
6585/**
6586 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6587 * @vsi: the VSI being configured
6588 **/
6589static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6590{
6591 struct i40e_pf *pf = vsi->back;
6592
6593 switch (vsi->type) {
6594 case I40E_VSI_MAIN:
6595 vsi->alloc_queue_pairs = pf->num_lan_qps;
6596 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6597 I40E_REQ_DESCRIPTOR_MULTIPLE);
6598 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6599 vsi->num_q_vectors = pf->num_lan_msix;
6600 else
6601 vsi->num_q_vectors = 1;
6602
6603 break;
6604
6605 case I40E_VSI_FDIR:
6606 vsi->alloc_queue_pairs = 1;
6607 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6608 I40E_REQ_DESCRIPTOR_MULTIPLE);
6609 vsi->num_q_vectors = 1;
6610 break;
6611
6612 case I40E_VSI_VMDQ2:
6613 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6614 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6615 I40E_REQ_DESCRIPTOR_MULTIPLE);
6616 vsi->num_q_vectors = pf->num_vmdq_msix;
6617 break;
6618
6619 case I40E_VSI_SRIOV:
6620 vsi->alloc_queue_pairs = pf->num_vf_qps;
6621 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6622 I40E_REQ_DESCRIPTOR_MULTIPLE);
6623 break;
6624
38e00438
VD
6625#ifdef I40E_FCOE
6626 case I40E_VSI_FCOE:
6627 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6628 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6629 I40E_REQ_DESCRIPTOR_MULTIPLE);
6630 vsi->num_q_vectors = pf->num_fcoe_msix;
6631 break;
6632
6633#endif /* I40E_FCOE */
41c445ff
JB
6634 default:
6635 WARN_ON(1);
6636 return -ENODATA;
6637 }
6638
6639 return 0;
6640}
6641
f650a38b
ASJ
6642/**
6643 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6644 * @type: VSI pointer
bc7d338f 6645 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
6646 *
6647 * On error: returns error code (negative)
6648 * On success: returns 0
6649 **/
bc7d338f 6650static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b
ASJ
6651{
6652 int size;
6653 int ret = 0;
6654
ac6c5e3d 6655 /* allocate memory for both Tx and Rx ring pointers */
f650a38b
ASJ
6656 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6657 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6658 if (!vsi->tx_rings)
6659 return -ENOMEM;
f650a38b
ASJ
6660 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6661
bc7d338f
ASJ
6662 if (alloc_qvectors) {
6663 /* allocate memory for q_vector pointers */
f57e4fbd 6664 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
bc7d338f
ASJ
6665 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6666 if (!vsi->q_vectors) {
6667 ret = -ENOMEM;
6668 goto err_vectors;
6669 }
f650a38b
ASJ
6670 }
6671 return ret;
6672
6673err_vectors:
6674 kfree(vsi->tx_rings);
6675 return ret;
6676}
6677
41c445ff
JB
6678/**
6679 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6680 * @pf: board private structure
6681 * @type: type of VSI
6682 *
6683 * On error: returns error code (negative)
6684 * On success: returns vsi index in PF (positive)
6685 **/
6686static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6687{
6688 int ret = -ENODEV;
6689 struct i40e_vsi *vsi;
6690 int vsi_idx;
6691 int i;
6692
6693 /* Need to protect the allocation of the VSIs at the PF level */
6694 mutex_lock(&pf->switch_mutex);
6695
6696 /* VSI list may be fragmented if VSI creation/destruction has
6697 * been happening. We can afford to do a quick scan to look
6698 * for any free VSIs in the list.
6699 *
6700 * find next empty vsi slot, looping back around if necessary
6701 */
6702 i = pf->next_vsi;
505682cd 6703 while (i < pf->num_alloc_vsi && pf->vsi[i])
41c445ff 6704 i++;
505682cd 6705 if (i >= pf->num_alloc_vsi) {
41c445ff
JB
6706 i = 0;
6707 while (i < pf->next_vsi && pf->vsi[i])
6708 i++;
6709 }
6710
505682cd 6711 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
41c445ff
JB
6712 vsi_idx = i; /* Found one! */
6713 } else {
6714 ret = -ENODEV;
493fb300 6715 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
6716 }
6717 pf->next_vsi = ++i;
6718
6719 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
6720 if (!vsi) {
6721 ret = -ENOMEM;
493fb300 6722 goto unlock_pf;
41c445ff
JB
6723 }
6724 vsi->type = type;
6725 vsi->back = pf;
6726 set_bit(__I40E_DOWN, &vsi->state);
6727 vsi->flags = 0;
6728 vsi->idx = vsi_idx;
6729 vsi->rx_itr_setting = pf->rx_itr_default;
6730 vsi->tx_itr_setting = pf->tx_itr_default;
6731 vsi->netdev_registered = false;
6732 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
6733 INIT_LIST_HEAD(&vsi->mac_filter_list);
63741846 6734 vsi->irqs_ready = false;
41c445ff 6735
9f65e15b
AD
6736 ret = i40e_set_num_rings_in_vsi(vsi);
6737 if (ret)
6738 goto err_rings;
6739
bc7d338f 6740 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 6741 if (ret)
9f65e15b 6742 goto err_rings;
493fb300 6743
41c445ff
JB
6744 /* Setup default MSIX irq handler for VSI */
6745 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
6746
6747 pf->vsi[vsi_idx] = vsi;
6748 ret = vsi_idx;
493fb300
AD
6749 goto unlock_pf;
6750
9f65e15b 6751err_rings:
493fb300
AD
6752 pf->next_vsi = i - 1;
6753 kfree(vsi);
6754unlock_pf:
41c445ff
JB
6755 mutex_unlock(&pf->switch_mutex);
6756 return ret;
6757}
6758
f650a38b
ASJ
6759/**
6760 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
6761 * @type: VSI pointer
bc7d338f 6762 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
6763 *
6764 * On error: returns error code (negative)
6765 * On success: returns 0
6766 **/
bc7d338f 6767static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
6768{
6769 /* free the ring and vector containers */
bc7d338f
ASJ
6770 if (free_qvectors) {
6771 kfree(vsi->q_vectors);
6772 vsi->q_vectors = NULL;
6773 }
f650a38b
ASJ
6774 kfree(vsi->tx_rings);
6775 vsi->tx_rings = NULL;
6776 vsi->rx_rings = NULL;
6777}
6778
41c445ff
JB
6779/**
6780 * i40e_vsi_clear - Deallocate the VSI provided
6781 * @vsi: the VSI being un-configured
6782 **/
6783static int i40e_vsi_clear(struct i40e_vsi *vsi)
6784{
6785 struct i40e_pf *pf;
6786
6787 if (!vsi)
6788 return 0;
6789
6790 if (!vsi->back)
6791 goto free_vsi;
6792 pf = vsi->back;
6793
6794 mutex_lock(&pf->switch_mutex);
6795 if (!pf->vsi[vsi->idx]) {
6796 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
6797 vsi->idx, vsi->idx, vsi, vsi->type);
6798 goto unlock_vsi;
6799 }
6800
6801 if (pf->vsi[vsi->idx] != vsi) {
6802 dev_err(&pf->pdev->dev,
6803 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
6804 pf->vsi[vsi->idx]->idx,
6805 pf->vsi[vsi->idx],
6806 pf->vsi[vsi->idx]->type,
6807 vsi->idx, vsi, vsi->type);
6808 goto unlock_vsi;
6809 }
6810
6811 /* updates the pf for this cleared vsi */
6812 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6813 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
6814
bc7d338f 6815 i40e_vsi_free_arrays(vsi, true);
493fb300 6816
41c445ff
JB
6817 pf->vsi[vsi->idx] = NULL;
6818 if (vsi->idx < pf->next_vsi)
6819 pf->next_vsi = vsi->idx;
6820
6821unlock_vsi:
6822 mutex_unlock(&pf->switch_mutex);
6823free_vsi:
6824 kfree(vsi);
6825
6826 return 0;
6827}
6828
9f65e15b
AD
6829/**
6830 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
6831 * @vsi: the VSI being cleaned
6832 **/
be1d5eea 6833static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
6834{
6835 int i;
6836
8e9dca53 6837 if (vsi->tx_rings && vsi->tx_rings[0]) {
d7397644 6838 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
00403f04
MW
6839 kfree_rcu(vsi->tx_rings[i], rcu);
6840 vsi->tx_rings[i] = NULL;
6841 vsi->rx_rings[i] = NULL;
6842 }
be1d5eea 6843 }
9f65e15b
AD
6844}
6845
41c445ff
JB
6846/**
6847 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
6848 * @vsi: the VSI being configured
6849 **/
6850static int i40e_alloc_rings(struct i40e_vsi *vsi)
6851{
e7046ee1 6852 struct i40e_ring *tx_ring, *rx_ring;
41c445ff 6853 struct i40e_pf *pf = vsi->back;
41c445ff
JB
6854 int i;
6855
41c445ff 6856 /* Set basic values in the rings to be used later during open() */
d7397644 6857 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
ac6c5e3d 6858 /* allocate space for both Tx and Rx in one shot */
9f65e15b
AD
6859 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
6860 if (!tx_ring)
6861 goto err_out;
41c445ff
JB
6862
6863 tx_ring->queue_index = i;
6864 tx_ring->reg_idx = vsi->base_queue + i;
6865 tx_ring->ring_active = false;
6866 tx_ring->vsi = vsi;
6867 tx_ring->netdev = vsi->netdev;
6868 tx_ring->dev = &pf->pdev->dev;
6869 tx_ring->count = vsi->num_desc;
6870 tx_ring->size = 0;
6871 tx_ring->dcb_tc = 0;
9f65e15b 6872 vsi->tx_rings[i] = tx_ring;
41c445ff 6873
9f65e15b 6874 rx_ring = &tx_ring[1];
41c445ff
JB
6875 rx_ring->queue_index = i;
6876 rx_ring->reg_idx = vsi->base_queue + i;
6877 rx_ring->ring_active = false;
6878 rx_ring->vsi = vsi;
6879 rx_ring->netdev = vsi->netdev;
6880 rx_ring->dev = &pf->pdev->dev;
6881 rx_ring->count = vsi->num_desc;
6882 rx_ring->size = 0;
6883 rx_ring->dcb_tc = 0;
6884 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
6885 set_ring_16byte_desc_enabled(rx_ring);
6886 else
6887 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 6888 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
6889 }
6890
6891 return 0;
9f65e15b
AD
6892
6893err_out:
6894 i40e_vsi_clear_rings(vsi);
6895 return -ENOMEM;
41c445ff
JB
6896}
6897
6898/**
6899 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
6900 * @pf: board private structure
6901 * @vectors: the number of MSI-X vectors to request
6902 *
6903 * Returns the number of vectors reserved, or error
6904 **/
6905static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
6906{
7b37f376
AG
6907 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
6908 I40E_MIN_MSIX, vectors);
6909 if (vectors < 0) {
41c445ff 6910 dev_info(&pf->pdev->dev,
7b37f376 6911 "MSI-X vector reservation failed: %d\n", vectors);
41c445ff
JB
6912 vectors = 0;
6913 }
6914
6915 return vectors;
6916}
6917
6918/**
6919 * i40e_init_msix - Setup the MSIX capability
6920 * @pf: board private structure
6921 *
6922 * Work with the OS to set up the MSIX vectors needed.
6923 *
6924 * Returns 0 on success, negative on failure
6925 **/
6926static int i40e_init_msix(struct i40e_pf *pf)
6927{
6928 i40e_status err = 0;
6929 struct i40e_hw *hw = &pf->hw;
c135b0de 6930 int other_vecs = 0;
41c445ff
JB
6931 int v_budget, i;
6932 int vec;
6933
6934 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6935 return -ENODEV;
6936
6937 /* The number of vectors we'll request will be comprised of:
6938 * - Add 1 for "other" cause for Admin Queue events, etc.
6939 * - The number of LAN queue pairs
f8ff1464
ASJ
6940 * - Queues being used for RSS.
6941 * We don't need as many as max_rss_size vectors.
6942 * use rss_size instead in the calculation since that
6943 * is governed by number of cpus in the system.
6944 * - assumes symmetric Tx/Rx pairing
41c445ff 6945 * - The number of VMDq pairs
38e00438
VD
6946#ifdef I40E_FCOE
6947 * - The number of FCOE qps.
6948#endif
41c445ff
JB
6949 * Once we count this up, try the request.
6950 *
6951 * If we can't get what we want, we'll simplify to nearly nothing
6952 * and try again. If that still fails, we punt.
6953 */
f8ff1464 6954 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
41c445ff 6955 pf->num_vmdq_msix = pf->num_vmdq_qps;
c135b0de
SN
6956 other_vecs = 1;
6957 other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
60ea5f83 6958 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
c135b0de 6959 other_vecs++;
41c445ff 6960
83840e4b
JL
6961 /* Scale down if necessary, and the rings will share vectors */
6962 pf->num_lan_msix = min_t(int, pf->num_lan_msix,
6963 (hw->func_caps.num_msix_vectors - other_vecs));
6964 v_budget = pf->num_lan_msix + other_vecs;
6965
38e00438
VD
6966#ifdef I40E_FCOE
6967 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
6968 pf->num_fcoe_msix = pf->num_fcoe_qps;
6969 v_budget += pf->num_fcoe_msix;
6970 }
38e00438 6971#endif
41c445ff
JB
6972
6973 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
6974 GFP_KERNEL);
6975 if (!pf->msix_entries)
6976 return -ENOMEM;
6977
6978 for (i = 0; i < v_budget; i++)
6979 pf->msix_entries[i].entry = i;
6980 vec = i40e_reserve_msix_vectors(pf, v_budget);
a34977ba
ASJ
6981
6982 if (vec != v_budget) {
6983 /* If we have limited resources, we will start with no vectors
6984 * for the special features and then allocate vectors to some
6985 * of these features based on the policy and at the end disable
6986 * the features that did not get any vectors.
6987 */
38e00438
VD
6988#ifdef I40E_FCOE
6989 pf->num_fcoe_qps = 0;
6990 pf->num_fcoe_msix = 0;
6991#endif
a34977ba
ASJ
6992 pf->num_vmdq_msix = 0;
6993 }
6994
41c445ff
JB
6995 if (vec < I40E_MIN_MSIX) {
6996 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
6997 kfree(pf->msix_entries);
6998 pf->msix_entries = NULL;
6999 return -ENODEV;
7000
7001 } else if (vec == I40E_MIN_MSIX) {
7002 /* Adjust for minimal MSIX use */
41c445ff
JB
7003 pf->num_vmdq_vsis = 0;
7004 pf->num_vmdq_qps = 0;
41c445ff
JB
7005 pf->num_lan_qps = 1;
7006 pf->num_lan_msix = 1;
7007
7008 } else if (vec != v_budget) {
a34977ba
ASJ
7009 /* reserve the misc vector */
7010 vec--;
7011
41c445ff
JB
7012 /* Scale vector usage down */
7013 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
a34977ba 7014 pf->num_vmdq_vsis = 1;
41c445ff
JB
7015
7016 /* partition out the remaining vectors */
7017 switch (vec) {
7018 case 2:
41c445ff
JB
7019 pf->num_lan_msix = 1;
7020 break;
7021 case 3:
38e00438
VD
7022#ifdef I40E_FCOE
7023 /* give one vector to FCoE */
7024 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7025 pf->num_lan_msix = 1;
7026 pf->num_fcoe_msix = 1;
7027 }
7028#else
41c445ff 7029 pf->num_lan_msix = 2;
38e00438 7030#endif
41c445ff
JB
7031 break;
7032 default:
38e00438
VD
7033#ifdef I40E_FCOE
7034 /* give one vector to FCoE */
7035 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7036 pf->num_fcoe_msix = 1;
7037 vec--;
7038 }
7039#endif
41c445ff
JB
7040 pf->num_lan_msix = min_t(int, (vec / 2),
7041 pf->num_lan_qps);
7042 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
7043 I40E_DEFAULT_NUM_VMDQ_VSI);
7044 break;
7045 }
7046 }
7047
a34977ba
ASJ
7048 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7049 (pf->num_vmdq_msix == 0)) {
7050 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7051 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7052 }
38e00438
VD
7053#ifdef I40E_FCOE
7054
7055 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7056 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7057 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7058 }
7059#endif
41c445ff
JB
7060 return err;
7061}
7062
493fb300 7063/**
90e04070 7064 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
493fb300
AD
7065 * @vsi: the VSI being configured
7066 * @v_idx: index of the vector in the vsi struct
7067 *
7068 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7069 **/
90e04070 7070static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
493fb300
AD
7071{
7072 struct i40e_q_vector *q_vector;
7073
7074 /* allocate q_vector */
7075 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7076 if (!q_vector)
7077 return -ENOMEM;
7078
7079 q_vector->vsi = vsi;
7080 q_vector->v_idx = v_idx;
7081 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7082 if (vsi->netdev)
7083 netif_napi_add(vsi->netdev, &q_vector->napi,
eefeacee 7084 i40e_napi_poll, NAPI_POLL_WEIGHT);
493fb300 7085
cd0b6fa6
AD
7086 q_vector->rx.latency_range = I40E_LOW_LATENCY;
7087 q_vector->tx.latency_range = I40E_LOW_LATENCY;
7088
493fb300
AD
7089 /* tie q_vector and vsi together */
7090 vsi->q_vectors[v_idx] = q_vector;
7091
7092 return 0;
7093}
7094
41c445ff 7095/**
90e04070 7096 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
41c445ff
JB
7097 * @vsi: the VSI being configured
7098 *
7099 * We allocate one q_vector per queue interrupt. If allocation fails we
7100 * return -ENOMEM.
7101 **/
90e04070 7102static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
41c445ff
JB
7103{
7104 struct i40e_pf *pf = vsi->back;
7105 int v_idx, num_q_vectors;
493fb300 7106 int err;
41c445ff
JB
7107
7108 /* if not MSIX, give the one vector only to the LAN VSI */
7109 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7110 num_q_vectors = vsi->num_q_vectors;
7111 else if (vsi == pf->vsi[pf->lan_vsi])
7112 num_q_vectors = 1;
7113 else
7114 return -EINVAL;
7115
41c445ff 7116 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
90e04070 7117 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
493fb300
AD
7118 if (err)
7119 goto err_out;
41c445ff
JB
7120 }
7121
7122 return 0;
493fb300
AD
7123
7124err_out:
7125 while (v_idx--)
7126 i40e_free_q_vector(vsi, v_idx);
7127
7128 return err;
41c445ff
JB
7129}
7130
7131/**
7132 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7133 * @pf: board private structure to initialize
7134 **/
7135static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
7136{
7137 int err = 0;
7138
7139 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7140 err = i40e_init_msix(pf);
7141 if (err) {
60ea5f83 7142 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
38e00438
VD
7143#ifdef I40E_FCOE
7144 I40E_FLAG_FCOE_ENABLED |
7145#endif
60ea5f83 7146 I40E_FLAG_RSS_ENABLED |
4d9b6043 7147 I40E_FLAG_DCB_CAPABLE |
60ea5f83
JB
7148 I40E_FLAG_SRIOV_ENABLED |
7149 I40E_FLAG_FD_SB_ENABLED |
7150 I40E_FLAG_FD_ATR_ENABLED |
7151 I40E_FLAG_VMDQ_ENABLED);
41c445ff
JB
7152
7153 /* rework the queue expectations without MSIX */
7154 i40e_determine_queue_usage(pf);
7155 }
7156 }
7157
7158 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7159 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
77fa28be 7160 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
41c445ff
JB
7161 err = pci_enable_msi(pf->pdev);
7162 if (err) {
958a3e3b 7163 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
7164 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7165 }
7166 }
7167
958a3e3b 7168 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
77fa28be 7169 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
958a3e3b 7170
41c445ff
JB
7171 /* track first vector for misc interrupts */
7172 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
7173}
7174
7175/**
7176 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7177 * @pf: board private structure
7178 *
7179 * This sets up the handler for MSIX 0, which is used to manage the
7180 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7181 * when in MSI or Legacy interrupt mode.
7182 **/
7183static int i40e_setup_misc_vector(struct i40e_pf *pf)
7184{
7185 struct i40e_hw *hw = &pf->hw;
7186 int err = 0;
7187
7188 /* Only request the irq if this is the first time through, and
7189 * not when we're rebuilding after a Reset
7190 */
7191 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7192 err = request_irq(pf->msix_entries[0].vector,
b294ac70 7193 i40e_intr, 0, pf->int_name, pf);
41c445ff
JB
7194 if (err) {
7195 dev_info(&pf->pdev->dev,
77fa28be 7196 "request_irq for %s failed: %d\n",
b294ac70 7197 pf->int_name, err);
41c445ff
JB
7198 return -EFAULT;
7199 }
7200 }
7201
ab437b5a 7202 i40e_enable_misc_int_causes(pf);
41c445ff
JB
7203
7204 /* associate no queues to the misc vector */
7205 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7206 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7207
7208 i40e_flush(hw);
7209
7210 i40e_irq_dynamic_enable_icr0(pf);
7211
7212 return err;
7213}
7214
7215/**
7216 * i40e_config_rss - Prepare for RSS if used
7217 * @pf: board private structure
7218 **/
7219static int i40e_config_rss(struct i40e_pf *pf)
7220{
22f258a1 7221 u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
4617e8c0
ASJ
7222 struct i40e_hw *hw = &pf->hw;
7223 u32 lut = 0;
7224 int i, j;
7225 u64 hena;
e157ea30 7226 u32 reg_val;
41c445ff 7227
22f258a1 7228 netdev_rss_key_fill(rss_key, sizeof(rss_key));
41c445ff 7229 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
22f258a1 7230 wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
41c445ff
JB
7231
7232 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7233 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7234 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
12dc4fe3 7235 hena |= I40E_DEFAULT_RSS_HENA;
41c445ff
JB
7236 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7237 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7238
e157ea30
CW
7239 /* Check capability and Set table size and register per hw expectation*/
7240 reg_val = rd32(hw, I40E_PFQF_CTL_0);
7241 if (hw->func_caps.rss_table_size == 512) {
7242 reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7243 pf->rss_table_size = 512;
7244 } else {
7245 pf->rss_table_size = 128;
7246 reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
7247 }
7248 wr32(hw, I40E_PFQF_CTL_0, reg_val);
7249
41c445ff 7250 /* Populate the LUT with max no. of queues in round robin fashion */
e157ea30 7251 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
41c445ff
JB
7252
7253 /* The assumption is that lan qp count will be the highest
7254 * qp count for any PF VSI that needs RSS.
7255 * If multiple VSIs need RSS support, all the qp counts
7256 * for those VSIs should be a power of 2 for RSS to work.
7257 * If LAN VSI is the only consumer for RSS then this requirement
7258 * is not necessary.
7259 */
7260 if (j == pf->rss_size)
7261 j = 0;
7262 /* lut = 4-byte sliding window of 4 lut entries */
7263 lut = (lut << 8) | (j &
7264 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
7265 /* On i = 3, we have 4 entries in lut; write to the register */
7266 if ((i & 3) == 3)
7267 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
7268 }
7269 i40e_flush(hw);
7270
7271 return 0;
7272}
7273
f8ff1464
ASJ
7274/**
7275 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7276 * @pf: board private structure
7277 * @queue_count: the requested queue count for rss.
7278 *
7279 * returns 0 if rss is not enabled, if enabled returns the final rss queue
7280 * count which may be different from the requested queue count.
7281 **/
7282int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7283{
7284 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7285 return 0;
7286
7287 queue_count = min_t(int, queue_count, pf->rss_size_max);
f8ff1464
ASJ
7288
7289 if (queue_count != pf->rss_size) {
f8ff1464
ASJ
7290 i40e_prep_for_reset(pf);
7291
f8ff1464
ASJ
7292 pf->rss_size = queue_count;
7293
7294 i40e_reset_and_rebuild(pf, true);
7295 i40e_config_rss(pf);
7296 }
7297 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
7298 return pf->rss_size;
7299}
7300
41c445ff
JB
7301/**
7302 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7303 * @pf: board private structure to initialize
7304 *
7305 * i40e_sw_init initializes the Adapter private data structure.
7306 * Fields are initialized based on PCI device information and
7307 * OS network device settings (MTU size).
7308 **/
7309static int i40e_sw_init(struct i40e_pf *pf)
7310{
7311 int err = 0;
7312 int size;
7313
7314 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7315 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
2759997b 7316 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
41c445ff
JB
7317 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7318 if (I40E_DEBUG_USER & debug)
7319 pf->hw.debug_mask = debug;
7320 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7321 I40E_DEFAULT_MSG_ENABLE);
7322 }
7323
7324 /* Set default capability flags */
7325 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7326 I40E_FLAG_MSI_ENABLED |
7327 I40E_FLAG_MSIX_ENABLED |
41c445ff
JB
7328 I40E_FLAG_RX_1BUF_ENABLED;
7329
ca99eb99
MW
7330 /* Set default ITR */
7331 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7332 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7333
7134f9ce
JB
7334 /* Depending on PF configurations, it is possible that the RSS
7335 * maximum might end up larger than the available queues
7336 */
41c445ff 7337 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
ec9a7db7 7338 pf->rss_size = 1;
7134f9ce
JB
7339 pf->rss_size_max = min_t(int, pf->rss_size_max,
7340 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
7341 if (pf->hw.func_caps.rss) {
7342 pf->flags |= I40E_FLAG_RSS_ENABLED;
bf051a3b 7343 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
41c445ff
JB
7344 }
7345
2050bc65
CS
7346 /* MFP mode enabled */
7347 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
7348 pf->flags |= I40E_FLAG_MFP_ENABLED;
7349 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7350 }
7351
cbf61325
ASJ
7352 /* FW/NVM is not yet fixed in this regard */
7353 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7354 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7355 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7356 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
433c47de
ASJ
7357 /* Setup a counter for fd_atr per pf */
7358 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
cbf61325 7359 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
60ea5f83 7360 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
433c47de
ASJ
7361 /* Setup a counter for fd_sb per pf */
7362 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
cbf61325
ASJ
7363 } else {
7364 dev_info(&pf->pdev->dev,
0b67584f 7365 "Flow Director Sideband mode Disabled in MFP mode\n");
41c445ff 7366 }
cbf61325
ASJ
7367 pf->fdir_pf_filter_count =
7368 pf->hw.func_caps.fd_filters_guaranteed;
7369 pf->hw.fdir_shared_filter_count =
7370 pf->hw.func_caps.fd_filters_best_effort;
41c445ff
JB
7371 }
7372
7373 if (pf->hw.func_caps.vmdq) {
7374 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7375 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7376 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
7377 }
7378
38e00438
VD
7379#ifdef I40E_FCOE
7380 err = i40e_init_pf_fcoe(pf);
7381 if (err)
7382 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
7383
7384#endif /* I40E_FCOE */
41c445ff 7385#ifdef CONFIG_PCI_IOV
ba252f13 7386 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
41c445ff
JB
7387 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7388 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7389 pf->num_req_vfs = min_t(int,
7390 pf->hw.func_caps.num_vfs,
7391 I40E_MAX_VF_COUNT);
7392 }
7393#endif /* CONFIG_PCI_IOV */
7394 pf->eeprom_version = 0xDEAD;
7395 pf->lan_veb = I40E_NO_VEB;
7396 pf->lan_vsi = I40E_NO_VSI;
7397
7398 /* set up queue assignment tracking */
7399 size = sizeof(struct i40e_lump_tracking)
7400 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7401 pf->qp_pile = kzalloc(size, GFP_KERNEL);
7402 if (!pf->qp_pile) {
7403 err = -ENOMEM;
7404 goto sw_init_done;
7405 }
7406 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
7407 pf->qp_pile->search_hint = 0;
7408
7409 /* set up vector assignment tracking */
7410 size = sizeof(struct i40e_lump_tracking)
7411 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
7412 pf->irq_pile = kzalloc(size, GFP_KERNEL);
7413 if (!pf->irq_pile) {
7414 kfree(pf->qp_pile);
7415 err = -ENOMEM;
7416 goto sw_init_done;
7417 }
7418 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
7419 pf->irq_pile->search_hint = 0;
7420
327fe04b
ASJ
7421 pf->tx_timeout_recovery_level = 1;
7422
41c445ff
JB
7423 mutex_init(&pf->switch_mutex);
7424
7425sw_init_done:
7426 return err;
7427}
7428
7c3c288b
ASJ
7429/**
7430 * i40e_set_ntuple - set the ntuple feature flag and take action
7431 * @pf: board private structure to initialize
7432 * @features: the feature set that the stack is suggesting
7433 *
7434 * returns a bool to indicate if reset needs to happen
7435 **/
7436bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7437{
7438 bool need_reset = false;
7439
7440 /* Check if Flow Director n-tuple support was enabled or disabled. If
7441 * the state changed, we need to reset.
7442 */
7443 if (features & NETIF_F_NTUPLE) {
7444 /* Enable filters and mark for reset */
7445 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
7446 need_reset = true;
7447 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7448 } else {
7449 /* turn off filters, mark for reset and clear SW filter list */
7450 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7451 need_reset = true;
7452 i40e_fdir_filter_exit(pf);
7453 }
7454 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8a4f34fb 7455 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
1e1be8f6
ASJ
7456 /* reset fd counters */
7457 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7458 pf->fdir_pf_active_filters = 0;
7459 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7460 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8a4f34fb
ASJ
7461 /* if ATR was auto disabled it can be re-enabled. */
7462 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7463 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
7464 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
7c3c288b
ASJ
7465 }
7466 return need_reset;
7467}
7468
41c445ff
JB
7469/**
7470 * i40e_set_features - set the netdev feature flags
7471 * @netdev: ptr to the netdev being adjusted
7472 * @features: the feature set that the stack is suggesting
7473 **/
7474static int i40e_set_features(struct net_device *netdev,
7475 netdev_features_t features)
7476{
7477 struct i40e_netdev_priv *np = netdev_priv(netdev);
7478 struct i40e_vsi *vsi = np->vsi;
7c3c288b
ASJ
7479 struct i40e_pf *pf = vsi->back;
7480 bool need_reset;
41c445ff
JB
7481
7482 if (features & NETIF_F_HW_VLAN_CTAG_RX)
7483 i40e_vlan_stripping_enable(vsi);
7484 else
7485 i40e_vlan_stripping_disable(vsi);
7486
7c3c288b
ASJ
7487 need_reset = i40e_set_ntuple(pf, features);
7488
7489 if (need_reset)
7490 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
7491
41c445ff
JB
7492 return 0;
7493}
7494
a1c9a9d9
JK
7495#ifdef CONFIG_I40E_VXLAN
7496/**
7497 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
7498 * @pf: board private structure
7499 * @port: The UDP port to look up
7500 *
7501 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
7502 **/
7503static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
7504{
7505 u8 i;
7506
7507 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7508 if (pf->vxlan_ports[i] == port)
7509 return i;
7510 }
7511
7512 return i;
7513}
7514
7515/**
7516 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
7517 * @netdev: This physical port's netdev
7518 * @sa_family: Socket Family that VXLAN is notifying us about
7519 * @port: New UDP port number that VXLAN started listening to
7520 **/
7521static void i40e_add_vxlan_port(struct net_device *netdev,
7522 sa_family_t sa_family, __be16 port)
7523{
7524 struct i40e_netdev_priv *np = netdev_priv(netdev);
7525 struct i40e_vsi *vsi = np->vsi;
7526 struct i40e_pf *pf = vsi->back;
7527 u8 next_idx;
7528 u8 idx;
7529
7530 if (sa_family == AF_INET6)
7531 return;
7532
7533 idx = i40e_get_vxlan_port_idx(pf, port);
7534
7535 /* Check if port already exists */
7536 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7537 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
7538 return;
7539 }
7540
7541 /* Now check if there is space to add the new port */
7542 next_idx = i40e_get_vxlan_port_idx(pf, 0);
7543
7544 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7545 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
7546 ntohs(port));
7547 return;
7548 }
7549
7550 /* New port: add it and mark its index in the bitmap */
7551 pf->vxlan_ports[next_idx] = port;
7552 pf->pending_vxlan_bitmap |= (1 << next_idx);
7553
7554 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7555}
7556
7557/**
7558 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
7559 * @netdev: This physical port's netdev
7560 * @sa_family: Socket Family that VXLAN is notifying us about
7561 * @port: UDP port number that VXLAN stopped listening to
7562 **/
7563static void i40e_del_vxlan_port(struct net_device *netdev,
7564 sa_family_t sa_family, __be16 port)
7565{
7566 struct i40e_netdev_priv *np = netdev_priv(netdev);
7567 struct i40e_vsi *vsi = np->vsi;
7568 struct i40e_pf *pf = vsi->back;
7569 u8 idx;
7570
7571 if (sa_family == AF_INET6)
7572 return;
7573
7574 idx = i40e_get_vxlan_port_idx(pf, port);
7575
7576 /* Check if port already exists */
7577 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
7578 /* if port exists, set it to 0 (mark for deletion)
7579 * and make it pending
7580 */
7581 pf->vxlan_ports[idx] = 0;
7582
7583 pf->pending_vxlan_bitmap |= (1 << idx);
7584
7585 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
7586 } else {
7587 netdev_warn(netdev, "Port %d was not found, not deleting\n",
7588 ntohs(port));
7589 }
7590}
7591
7592#endif
1f224ad2 7593static int i40e_get_phys_port_id(struct net_device *netdev,
02637fce 7594 struct netdev_phys_item_id *ppid)
1f224ad2
NP
7595{
7596 struct i40e_netdev_priv *np = netdev_priv(netdev);
7597 struct i40e_pf *pf = np->vsi->back;
7598 struct i40e_hw *hw = &pf->hw;
7599
7600 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
7601 return -EOPNOTSUPP;
7602
7603 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
7604 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
7605
7606 return 0;
7607}
7608
2f90ade6
JB
7609/**
7610 * i40e_ndo_fdb_add - add an entry to the hardware database
7611 * @ndm: the input from the stack
7612 * @tb: pointer to array of nladdr (unused)
7613 * @dev: the net device pointer
7614 * @addr: the MAC address entry being added
7615 * @flags: instructions from stack about fdb operation
7616 */
4ba0dea5
GR
7617static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7618 struct net_device *dev,
f6f6424b 7619 const unsigned char *addr, u16 vid,
4ba0dea5 7620 u16 flags)
4ba0dea5
GR
7621{
7622 struct i40e_netdev_priv *np = netdev_priv(dev);
7623 struct i40e_pf *pf = np->vsi->back;
7624 int err = 0;
7625
7626 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
7627 return -EOPNOTSUPP;
7628
65891fea
OG
7629 if (vid) {
7630 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
7631 return -EINVAL;
7632 }
7633
4ba0dea5
GR
7634 /* Hardware does not support aging addresses so if a
7635 * ndm_state is given only allow permanent addresses
7636 */
7637 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7638 netdev_info(dev, "FDB only supports static addresses\n");
7639 return -EINVAL;
7640 }
7641
7642 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
7643 err = dev_uc_add_excl(dev, addr);
7644 else if (is_multicast_ether_addr(addr))
7645 err = dev_mc_add_excl(dev, addr);
7646 else
7647 err = -EINVAL;
7648
7649 /* Only return duplicate errors if NLM_F_EXCL is set */
7650 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7651 err = 0;
7652
7653 return err;
7654}
7655
41c445ff
JB
7656static const struct net_device_ops i40e_netdev_ops = {
7657 .ndo_open = i40e_open,
7658 .ndo_stop = i40e_close,
7659 .ndo_start_xmit = i40e_lan_xmit_frame,
7660 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
7661 .ndo_set_rx_mode = i40e_set_rx_mode,
7662 .ndo_validate_addr = eth_validate_addr,
7663 .ndo_set_mac_address = i40e_set_mac,
7664 .ndo_change_mtu = i40e_change_mtu,
beb0dff1 7665 .ndo_do_ioctl = i40e_ioctl,
41c445ff
JB
7666 .ndo_tx_timeout = i40e_tx_timeout,
7667 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
7668 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
7669#ifdef CONFIG_NET_POLL_CONTROLLER
7670 .ndo_poll_controller = i40e_netpoll,
7671#endif
7672 .ndo_setup_tc = i40e_setup_tc,
38e00438
VD
7673#ifdef I40E_FCOE
7674 .ndo_fcoe_enable = i40e_fcoe_enable,
7675 .ndo_fcoe_disable = i40e_fcoe_disable,
7676#endif
41c445ff
JB
7677 .ndo_set_features = i40e_set_features,
7678 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
7679 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
ed616689 7680 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
41c445ff 7681 .ndo_get_vf_config = i40e_ndo_get_vf_config,
588aefa0 7682 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
e6d9004d 7683 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
a1c9a9d9
JK
7684#ifdef CONFIG_I40E_VXLAN
7685 .ndo_add_vxlan_port = i40e_add_vxlan_port,
7686 .ndo_del_vxlan_port = i40e_del_vxlan_port,
7687#endif
1f224ad2 7688 .ndo_get_phys_port_id = i40e_get_phys_port_id,
4ba0dea5 7689 .ndo_fdb_add = i40e_ndo_fdb_add,
41c445ff
JB
7690};
7691
7692/**
7693 * i40e_config_netdev - Setup the netdev flags
7694 * @vsi: the VSI being configured
7695 *
7696 * Returns 0 on success, negative value on failure
7697 **/
7698static int i40e_config_netdev(struct i40e_vsi *vsi)
7699{
1a10370a 7700 u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
41c445ff
JB
7701 struct i40e_pf *pf = vsi->back;
7702 struct i40e_hw *hw = &pf->hw;
7703 struct i40e_netdev_priv *np;
7704 struct net_device *netdev;
7705 u8 mac_addr[ETH_ALEN];
7706 int etherdev_size;
7707
7708 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 7709 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
7710 if (!netdev)
7711 return -ENOMEM;
7712
7713 vsi->netdev = netdev;
7714 np = netdev_priv(netdev);
7715 np->vsi = vsi;
7716
d70e941b 7717 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
41c445ff 7718 NETIF_F_GSO_UDP_TUNNEL |
d70e941b 7719 NETIF_F_TSO;
41c445ff
JB
7720
7721 netdev->features = NETIF_F_SG |
7722 NETIF_F_IP_CSUM |
7723 NETIF_F_SCTP_CSUM |
7724 NETIF_F_HIGHDMA |
7725 NETIF_F_GSO_UDP_TUNNEL |
7726 NETIF_F_HW_VLAN_CTAG_TX |
7727 NETIF_F_HW_VLAN_CTAG_RX |
7728 NETIF_F_HW_VLAN_CTAG_FILTER |
7729 NETIF_F_IPV6_CSUM |
7730 NETIF_F_TSO |
059dab69 7731 NETIF_F_TSO_ECN |
41c445ff
JB
7732 NETIF_F_TSO6 |
7733 NETIF_F_RXCSUM |
7734 NETIF_F_RXHASH |
7735 0;
7736
2e86a0b6
ASJ
7737 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
7738 netdev->features |= NETIF_F_NTUPLE;
7739
41c445ff
JB
7740 /* copy netdev features into list of user selectable features */
7741 netdev->hw_features |= netdev->features;
7742
7743 if (vsi->type == I40E_VSI_MAIN) {
7744 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
9a173901 7745 ether_addr_copy(mac_addr, hw->mac.perm_addr);
30650cc5
SN
7746 /* The following steps are necessary to prevent reception
7747 * of tagged packets - some older NVM configurations load a
7748 * default a MAC-VLAN filter that accepts any tagged packet
7749 * which must be replaced by a normal filter.
8c27d42e 7750 */
30650cc5
SN
7751 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
7752 i40e_add_filter(vsi, mac_addr,
7753 I40E_VLAN_ANY, false, true);
41c445ff
JB
7754 } else {
7755 /* relate the VSI_VMDQ name to the VSI_MAIN name */
7756 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
7757 pf->vsi[pf->lan_vsi]->netdev->name);
7758 random_ether_addr(mac_addr);
7759 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
7760 }
1a10370a 7761 i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
41c445ff 7762
9a173901
GR
7763 ether_addr_copy(netdev->dev_addr, mac_addr);
7764 ether_addr_copy(netdev->perm_addr, mac_addr);
41c445ff
JB
7765 /* vlan gets same features (except vlan offload)
7766 * after any tweaks for specific VSI types
7767 */
7768 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
7769 NETIF_F_HW_VLAN_CTAG_RX |
7770 NETIF_F_HW_VLAN_CTAG_FILTER);
7771 netdev->priv_flags |= IFF_UNICAST_FLT;
7772 netdev->priv_flags |= IFF_SUPP_NOFCS;
7773 /* Setup netdev TC information */
7774 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
7775
7776 netdev->netdev_ops = &i40e_netdev_ops;
7777 netdev->watchdog_timeo = 5 * HZ;
7778 i40e_set_ethtool_ops(netdev);
38e00438
VD
7779#ifdef I40E_FCOE
7780 i40e_fcoe_config_netdev(netdev, vsi);
7781#endif
41c445ff
JB
7782
7783 return 0;
7784}
7785
7786/**
7787 * i40e_vsi_delete - Delete a VSI from the switch
7788 * @vsi: the VSI being removed
7789 *
7790 * Returns 0 on success, negative value on failure
7791 **/
7792static void i40e_vsi_delete(struct i40e_vsi *vsi)
7793{
7794 /* remove default VSI is not allowed */
7795 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
7796 return;
7797
41c445ff 7798 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
41c445ff
JB
7799}
7800
7801/**
7802 * i40e_add_vsi - Add a VSI to the switch
7803 * @vsi: the VSI being configured
7804 *
7805 * This initializes a VSI context depending on the VSI type to be added and
7806 * passes it down to the add_vsi aq command.
7807 **/
7808static int i40e_add_vsi(struct i40e_vsi *vsi)
7809{
7810 int ret = -ENODEV;
7811 struct i40e_mac_filter *f, *ftmp;
7812 struct i40e_pf *pf = vsi->back;
7813 struct i40e_hw *hw = &pf->hw;
7814 struct i40e_vsi_context ctxt;
7815 u8 enabled_tc = 0x1; /* TC0 enabled */
7816 int f_count = 0;
7817
7818 memset(&ctxt, 0, sizeof(ctxt));
7819 switch (vsi->type) {
7820 case I40E_VSI_MAIN:
7821 /* The PF's main VSI is already setup as part of the
7822 * device initialization, so we'll not bother with
7823 * the add_vsi call, but we will retrieve the current
7824 * VSI context.
7825 */
7826 ctxt.seid = pf->main_vsi_seid;
7827 ctxt.pf_num = pf->hw.pf_id;
7828 ctxt.vf_num = 0;
7829 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
7830 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
7831 if (ret) {
7832 dev_info(&pf->pdev->dev,
7833 "couldn't get pf vsi config, err %d, aq_err %d\n",
7834 ret, pf->hw.aq.asq_last_status);
7835 return -ENOENT;
7836 }
7837 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7838 vsi->info.valid_sections = 0;
7839
7840 vsi->seid = ctxt.seid;
7841 vsi->id = ctxt.vsi_number;
7842
7843 enabled_tc = i40e_pf_get_tc_map(pf);
7844
7845 /* MFP mode setup queue map and update VSI */
63d7e5a4
NP
7846 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
7847 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
41c445ff
JB
7848 memset(&ctxt, 0, sizeof(ctxt));
7849 ctxt.seid = pf->main_vsi_seid;
7850 ctxt.pf_num = pf->hw.pf_id;
7851 ctxt.vf_num = 0;
7852 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
7853 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
7854 if (ret) {
7855 dev_info(&pf->pdev->dev,
7856 "update vsi failed, aq_err=%d\n",
7857 pf->hw.aq.asq_last_status);
7858 ret = -ENOENT;
7859 goto err;
7860 }
7861 /* update the local VSI info queue map */
7862 i40e_vsi_update_queue_map(vsi, &ctxt);
7863 vsi->info.valid_sections = 0;
7864 } else {
7865 /* Default/Main VSI is only enabled for TC0
7866 * reconfigure it to enable all TCs that are
7867 * available on the port in SFP mode.
63d7e5a4
NP
7868 * For MFP case the iSCSI PF would use this
7869 * flow to enable LAN+iSCSI TC.
41c445ff
JB
7870 */
7871 ret = i40e_vsi_config_tc(vsi, enabled_tc);
7872 if (ret) {
7873 dev_info(&pf->pdev->dev,
7874 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
7875 enabled_tc, ret,
7876 pf->hw.aq.asq_last_status);
7877 ret = -ENOENT;
7878 }
7879 }
7880 break;
7881
7882 case I40E_VSI_FDIR:
cbf61325
ASJ
7883 ctxt.pf_num = hw->pf_id;
7884 ctxt.vf_num = 0;
7885 ctxt.uplink_seid = vsi->uplink_seid;
7886 ctxt.connection_type = 0x1; /* regular data port */
7887 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
79c21a82
ASJ
7888 ctxt.info.valid_sections |=
7889 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7890 ctxt.info.switch_id =
7891 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
41c445ff 7892 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
41c445ff
JB
7893 break;
7894
7895 case I40E_VSI_VMDQ2:
7896 ctxt.pf_num = hw->pf_id;
7897 ctxt.vf_num = 0;
7898 ctxt.uplink_seid = vsi->uplink_seid;
7899 ctxt.connection_type = 0x1; /* regular data port */
7900 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
7901
7902 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7903
7904 /* This VSI is connected to VEB so the switch_id
7905 * should be set to zero by default.
7906 */
7907 ctxt.info.switch_id = 0;
41c445ff
JB
7908 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7909
7910 /* Setup the VSI tx/rx queue map for TC0 only for now */
7911 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7912 break;
7913
7914 case I40E_VSI_SRIOV:
7915 ctxt.pf_num = hw->pf_id;
7916 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
7917 ctxt.uplink_seid = vsi->uplink_seid;
7918 ctxt.connection_type = 0x1; /* regular data port */
7919 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
7920
7921 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
7922
7923 /* This VSI is connected to VEB so the switch_id
7924 * should be set to zero by default.
7925 */
7926 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
7927
7928 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
7929 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
c674d125
MW
7930 if (pf->vf[vsi->vf_id].spoofchk) {
7931 ctxt.info.valid_sections |=
7932 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
7933 ctxt.info.sec_flags |=
7934 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
7935 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
7936 }
41c445ff
JB
7937 /* Setup the VSI tx/rx queue map for TC0 only for now */
7938 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
7939 break;
7940
38e00438
VD
7941#ifdef I40E_FCOE
7942 case I40E_VSI_FCOE:
7943 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
7944 if (ret) {
7945 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
7946 return ret;
7947 }
7948 break;
7949
7950#endif /* I40E_FCOE */
41c445ff
JB
7951 default:
7952 return -ENODEV;
7953 }
7954
7955 if (vsi->type != I40E_VSI_MAIN) {
7956 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
7957 if (ret) {
7958 dev_info(&vsi->back->pdev->dev,
7959 "add vsi failed, aq_err=%d\n",
7960 vsi->back->hw.aq.asq_last_status);
7961 ret = -ENOENT;
7962 goto err;
7963 }
7964 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
7965 vsi->info.valid_sections = 0;
7966 vsi->seid = ctxt.seid;
7967 vsi->id = ctxt.vsi_number;
7968 }
7969
7970 /* If macvlan filters already exist, force them to get loaded */
7971 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
7972 f->changed = true;
7973 f_count++;
6252c7e4
SN
7974
7975 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
30650cc5
SN
7976 struct i40e_aqc_remove_macvlan_element_data element;
7977
7978 memset(&element, 0, sizeof(element));
7979 ether_addr_copy(element.mac_addr, f->macaddr);
7980 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7981 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
7982 &element, 1, NULL);
7983 if (ret) {
7984 /* some older FW has a different default */
7985 element.flags |=
7986 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7987 i40e_aq_remove_macvlan(hw, vsi->seid,
7988 &element, 1, NULL);
7989 }
7990
7991 i40e_aq_mac_address_write(hw,
6252c7e4
SN
7992 I40E_AQC_WRITE_TYPE_LAA_WOL,
7993 f->macaddr, NULL);
7994 }
41c445ff
JB
7995 }
7996 if (f_count) {
7997 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
7998 pf->flags |= I40E_FLAG_FILTER_SYNC;
7999 }
8000
8001 /* Update VSI BW information */
8002 ret = i40e_vsi_get_bw_info(vsi);
8003 if (ret) {
8004 dev_info(&pf->pdev->dev,
8005 "couldn't get vsi bw info, err %d, aq_err %d\n",
8006 ret, pf->hw.aq.asq_last_status);
8007 /* VSI is already added so not tearing that up */
8008 ret = 0;
8009 }
8010
8011err:
8012 return ret;
8013}
8014
8015/**
8016 * i40e_vsi_release - Delete a VSI and free its resources
8017 * @vsi: the VSI being removed
8018 *
8019 * Returns 0 on success or < 0 on error
8020 **/
8021int i40e_vsi_release(struct i40e_vsi *vsi)
8022{
8023 struct i40e_mac_filter *f, *ftmp;
8024 struct i40e_veb *veb = NULL;
8025 struct i40e_pf *pf;
8026 u16 uplink_seid;
8027 int i, n;
8028
8029 pf = vsi->back;
8030
8031 /* release of a VEB-owner or last VSI is not allowed */
8032 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8033 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8034 vsi->seid, vsi->uplink_seid);
8035 return -ENODEV;
8036 }
8037 if (vsi == pf->vsi[pf->lan_vsi] &&
8038 !test_bit(__I40E_DOWN, &pf->state)) {
8039 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8040 return -ENODEV;
8041 }
8042
8043 uplink_seid = vsi->uplink_seid;
8044 if (vsi->type != I40E_VSI_SRIOV) {
8045 if (vsi->netdev_registered) {
8046 vsi->netdev_registered = false;
8047 if (vsi->netdev) {
8048 /* results in a call to i40e_close() */
8049 unregister_netdev(vsi->netdev);
41c445ff
JB
8050 }
8051 } else {
90ef8d47 8052 i40e_vsi_close(vsi);
41c445ff
JB
8053 }
8054 i40e_vsi_disable_irq(vsi);
8055 }
8056
8057 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8058 i40e_del_filter(vsi, f->macaddr, f->vlan,
8059 f->is_vf, f->is_netdev);
8060 i40e_sync_vsi_filters(vsi);
8061
8062 i40e_vsi_delete(vsi);
8063 i40e_vsi_free_q_vectors(vsi);
a4866597
SN
8064 if (vsi->netdev) {
8065 free_netdev(vsi->netdev);
8066 vsi->netdev = NULL;
8067 }
41c445ff
JB
8068 i40e_vsi_clear_rings(vsi);
8069 i40e_vsi_clear(vsi);
8070
8071 /* If this was the last thing on the VEB, except for the
8072 * controlling VSI, remove the VEB, which puts the controlling
8073 * VSI onto the next level down in the switch.
8074 *
8075 * Well, okay, there's one more exception here: don't remove
8076 * the orphan VEBs yet. We'll wait for an explicit remove request
8077 * from up the network stack.
8078 */
505682cd 8079 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8080 if (pf->vsi[i] &&
8081 pf->vsi[i]->uplink_seid == uplink_seid &&
8082 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8083 n++; /* count the VSIs */
8084 }
8085 }
8086 for (i = 0; i < I40E_MAX_VEB; i++) {
8087 if (!pf->veb[i])
8088 continue;
8089 if (pf->veb[i]->uplink_seid == uplink_seid)
8090 n++; /* count the VEBs */
8091 if (pf->veb[i]->seid == uplink_seid)
8092 veb = pf->veb[i];
8093 }
8094 if (n == 0 && veb && veb->uplink_seid != 0)
8095 i40e_veb_release(veb);
8096
8097 return 0;
8098}
8099
8100/**
8101 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8102 * @vsi: ptr to the VSI
8103 *
8104 * This should only be called after i40e_vsi_mem_alloc() which allocates the
8105 * corresponding SW VSI structure and initializes num_queue_pairs for the
8106 * newly allocated VSI.
8107 *
8108 * Returns 0 on success or negative on failure
8109 **/
8110static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8111{
8112 int ret = -ENOENT;
8113 struct i40e_pf *pf = vsi->back;
8114
493fb300 8115 if (vsi->q_vectors[0]) {
41c445ff
JB
8116 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8117 vsi->seid);
8118 return -EEXIST;
8119 }
8120
8121 if (vsi->base_vector) {
f29eaa3d 8122 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
41c445ff
JB
8123 vsi->seid, vsi->base_vector);
8124 return -EEXIST;
8125 }
8126
90e04070 8127 ret = i40e_vsi_alloc_q_vectors(vsi);
41c445ff
JB
8128 if (ret) {
8129 dev_info(&pf->pdev->dev,
8130 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8131 vsi->num_q_vectors, vsi->seid, ret);
8132 vsi->num_q_vectors = 0;
8133 goto vector_setup_out;
8134 }
8135
958a3e3b
SN
8136 if (vsi->num_q_vectors)
8137 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8138 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
8139 if (vsi->base_vector < 0) {
8140 dev_info(&pf->pdev->dev,
049a2be8
SN
8141 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8142 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
41c445ff
JB
8143 i40e_vsi_free_q_vectors(vsi);
8144 ret = -ENOENT;
8145 goto vector_setup_out;
8146 }
8147
8148vector_setup_out:
8149 return ret;
8150}
8151
bc7d338f
ASJ
8152/**
8153 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8154 * @vsi: pointer to the vsi.
8155 *
8156 * This re-allocates a vsi's queue resources.
8157 *
8158 * Returns pointer to the successfully allocated and configured VSI sw struct
8159 * on success, otherwise returns NULL on failure.
8160 **/
8161static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8162{
8163 struct i40e_pf *pf = vsi->back;
8164 u8 enabled_tc;
8165 int ret;
8166
8167 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8168 i40e_vsi_clear_rings(vsi);
8169
8170 i40e_vsi_free_arrays(vsi, false);
8171 i40e_set_num_rings_in_vsi(vsi);
8172 ret = i40e_vsi_alloc_arrays(vsi, false);
8173 if (ret)
8174 goto err_vsi;
8175
8176 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8177 if (ret < 0) {
049a2be8
SN
8178 dev_info(&pf->pdev->dev,
8179 "failed to get tracking for %d queues for VSI %d err=%d\n",
8180 vsi->alloc_queue_pairs, vsi->seid, ret);
bc7d338f
ASJ
8181 goto err_vsi;
8182 }
8183 vsi->base_queue = ret;
8184
8185 /* Update the FW view of the VSI. Force a reset of TC and queue
8186 * layout configurations.
8187 */
8188 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8189 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8190 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8191 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8192
8193 /* assign it some queues */
8194 ret = i40e_alloc_rings(vsi);
8195 if (ret)
8196 goto err_rings;
8197
8198 /* map all of the rings to the q_vectors */
8199 i40e_vsi_map_rings_to_vectors(vsi);
8200 return vsi;
8201
8202err_rings:
8203 i40e_vsi_free_q_vectors(vsi);
8204 if (vsi->netdev_registered) {
8205 vsi->netdev_registered = false;
8206 unregister_netdev(vsi->netdev);
8207 free_netdev(vsi->netdev);
8208 vsi->netdev = NULL;
8209 }
8210 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8211err_vsi:
8212 i40e_vsi_clear(vsi);
8213 return NULL;
8214}
8215
41c445ff
JB
8216/**
8217 * i40e_vsi_setup - Set up a VSI by a given type
8218 * @pf: board private structure
8219 * @type: VSI type
8220 * @uplink_seid: the switch element to link to
8221 * @param1: usage depends upon VSI type. For VF types, indicates VF id
8222 *
8223 * This allocates the sw VSI structure and its queue resources, then add a VSI
8224 * to the identified VEB.
8225 *
8226 * Returns pointer to the successfully allocated and configure VSI sw struct on
8227 * success, otherwise returns NULL on failure.
8228 **/
8229struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
8230 u16 uplink_seid, u32 param1)
8231{
8232 struct i40e_vsi *vsi = NULL;
8233 struct i40e_veb *veb = NULL;
8234 int ret, i;
8235 int v_idx;
8236
8237 /* The requested uplink_seid must be either
8238 * - the PF's port seid
8239 * no VEB is needed because this is the PF
8240 * or this is a Flow Director special case VSI
8241 * - seid of an existing VEB
8242 * - seid of a VSI that owns an existing VEB
8243 * - seid of a VSI that doesn't own a VEB
8244 * a new VEB is created and the VSI becomes the owner
8245 * - seid of the PF VSI, which is what creates the first VEB
8246 * this is a special case of the previous
8247 *
8248 * Find which uplink_seid we were given and create a new VEB if needed
8249 */
8250 for (i = 0; i < I40E_MAX_VEB; i++) {
8251 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
8252 veb = pf->veb[i];
8253 break;
8254 }
8255 }
8256
8257 if (!veb && uplink_seid != pf->mac_seid) {
8258
505682cd 8259 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8260 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
8261 vsi = pf->vsi[i];
8262 break;
8263 }
8264 }
8265 if (!vsi) {
8266 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
8267 uplink_seid);
8268 return NULL;
8269 }
8270
8271 if (vsi->uplink_seid == pf->mac_seid)
8272 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
8273 vsi->tc_config.enabled_tc);
8274 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
8275 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8276 vsi->tc_config.enabled_tc);
79c21a82
ASJ
8277 if (veb) {
8278 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
8279 dev_info(&vsi->back->pdev->dev,
8280 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
8281 __func__);
8282 return NULL;
8283 }
8284 i40e_enable_pf_switch_lb(pf);
8285 }
41c445ff
JB
8286 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8287 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8288 veb = pf->veb[i];
8289 }
8290 if (!veb) {
8291 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
8292 return NULL;
8293 }
8294
8295 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8296 uplink_seid = veb->seid;
8297 }
8298
8299 /* get vsi sw struct */
8300 v_idx = i40e_vsi_mem_alloc(pf, type);
8301 if (v_idx < 0)
8302 goto err_alloc;
8303 vsi = pf->vsi[v_idx];
cbf61325
ASJ
8304 if (!vsi)
8305 goto err_alloc;
41c445ff
JB
8306 vsi->type = type;
8307 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
8308
8309 if (type == I40E_VSI_MAIN)
8310 pf->lan_vsi = v_idx;
8311 else if (type == I40E_VSI_SRIOV)
8312 vsi->vf_id = param1;
8313 /* assign it some queues */
cbf61325
ASJ
8314 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
8315 vsi->idx);
41c445ff 8316 if (ret < 0) {
049a2be8
SN
8317 dev_info(&pf->pdev->dev,
8318 "failed to get tracking for %d queues for VSI %d err=%d\n",
8319 vsi->alloc_queue_pairs, vsi->seid, ret);
41c445ff
JB
8320 goto err_vsi;
8321 }
8322 vsi->base_queue = ret;
8323
8324 /* get a VSI from the hardware */
8325 vsi->uplink_seid = uplink_seid;
8326 ret = i40e_add_vsi(vsi);
8327 if (ret)
8328 goto err_vsi;
8329
8330 switch (vsi->type) {
8331 /* setup the netdev if needed */
8332 case I40E_VSI_MAIN:
8333 case I40E_VSI_VMDQ2:
38e00438 8334 case I40E_VSI_FCOE:
41c445ff
JB
8335 ret = i40e_config_netdev(vsi);
8336 if (ret)
8337 goto err_netdev;
8338 ret = register_netdev(vsi->netdev);
8339 if (ret)
8340 goto err_netdev;
8341 vsi->netdev_registered = true;
8342 netif_carrier_off(vsi->netdev);
4e3b35b0
NP
8343#ifdef CONFIG_I40E_DCB
8344 /* Setup DCB netlink interface */
8345 i40e_dcbnl_setup(vsi);
8346#endif /* CONFIG_I40E_DCB */
41c445ff
JB
8347 /* fall through */
8348
8349 case I40E_VSI_FDIR:
8350 /* set up vectors and rings if needed */
8351 ret = i40e_vsi_setup_vectors(vsi);
8352 if (ret)
8353 goto err_msix;
8354
8355 ret = i40e_alloc_rings(vsi);
8356 if (ret)
8357 goto err_rings;
8358
8359 /* map all of the rings to the q_vectors */
8360 i40e_vsi_map_rings_to_vectors(vsi);
8361
8362 i40e_vsi_reset_stats(vsi);
8363 break;
8364
8365 default:
8366 /* no netdev or rings for the other VSI types */
8367 break;
8368 }
8369
8370 return vsi;
8371
8372err_rings:
8373 i40e_vsi_free_q_vectors(vsi);
8374err_msix:
8375 if (vsi->netdev_registered) {
8376 vsi->netdev_registered = false;
8377 unregister_netdev(vsi->netdev);
8378 free_netdev(vsi->netdev);
8379 vsi->netdev = NULL;
8380 }
8381err_netdev:
8382 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8383err_vsi:
8384 i40e_vsi_clear(vsi);
8385err_alloc:
8386 return NULL;
8387}
8388
8389/**
8390 * i40e_veb_get_bw_info - Query VEB BW information
8391 * @veb: the veb to query
8392 *
8393 * Query the Tx scheduler BW configuration data for given VEB
8394 **/
8395static int i40e_veb_get_bw_info(struct i40e_veb *veb)
8396{
8397 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
8398 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
8399 struct i40e_pf *pf = veb->pf;
8400 struct i40e_hw *hw = &pf->hw;
8401 u32 tc_bw_max;
8402 int ret = 0;
8403 int i;
8404
8405 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
8406 &bw_data, NULL);
8407 if (ret) {
8408 dev_info(&pf->pdev->dev,
8409 "query veb bw config failed, aq_err=%d\n",
8410 hw->aq.asq_last_status);
8411 goto out;
8412 }
8413
8414 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
8415 &ets_data, NULL);
8416 if (ret) {
8417 dev_info(&pf->pdev->dev,
8418 "query veb bw ets config failed, aq_err=%d\n",
8419 hw->aq.asq_last_status);
8420 goto out;
8421 }
8422
8423 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
8424 veb->bw_max_quanta = ets_data.tc_bw_max;
8425 veb->is_abs_credits = bw_data.absolute_credits_enable;
23cd1f09 8426 veb->enabled_tc = ets_data.tc_valid_bits;
41c445ff
JB
8427 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
8428 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
8429 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
8430 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
8431 veb->bw_tc_limit_credits[i] =
8432 le16_to_cpu(bw_data.tc_bw_limits[i]);
8433 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
8434 }
8435
8436out:
8437 return ret;
8438}
8439
8440/**
8441 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
8442 * @pf: board private structure
8443 *
8444 * On error: returns error code (negative)
8445 * On success: returns vsi index in PF (positive)
8446 **/
8447static int i40e_veb_mem_alloc(struct i40e_pf *pf)
8448{
8449 int ret = -ENOENT;
8450 struct i40e_veb *veb;
8451 int i;
8452
8453 /* Need to protect the allocation of switch elements at the PF level */
8454 mutex_lock(&pf->switch_mutex);
8455
8456 /* VEB list may be fragmented if VEB creation/destruction has
8457 * been happening. We can afford to do a quick scan to look
8458 * for any free slots in the list.
8459 *
8460 * find next empty veb slot, looping back around if necessary
8461 */
8462 i = 0;
8463 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
8464 i++;
8465 if (i >= I40E_MAX_VEB) {
8466 ret = -ENOMEM;
8467 goto err_alloc_veb; /* out of VEB slots! */
8468 }
8469
8470 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
8471 if (!veb) {
8472 ret = -ENOMEM;
8473 goto err_alloc_veb;
8474 }
8475 veb->pf = pf;
8476 veb->idx = i;
8477 veb->enabled_tc = 1;
8478
8479 pf->veb[i] = veb;
8480 ret = i;
8481err_alloc_veb:
8482 mutex_unlock(&pf->switch_mutex);
8483 return ret;
8484}
8485
8486/**
8487 * i40e_switch_branch_release - Delete a branch of the switch tree
8488 * @branch: where to start deleting
8489 *
8490 * This uses recursion to find the tips of the branch to be
8491 * removed, deleting until we get back to and can delete this VEB.
8492 **/
8493static void i40e_switch_branch_release(struct i40e_veb *branch)
8494{
8495 struct i40e_pf *pf = branch->pf;
8496 u16 branch_seid = branch->seid;
8497 u16 veb_idx = branch->idx;
8498 int i;
8499
8500 /* release any VEBs on this VEB - RECURSION */
8501 for (i = 0; i < I40E_MAX_VEB; i++) {
8502 if (!pf->veb[i])
8503 continue;
8504 if (pf->veb[i]->uplink_seid == branch->seid)
8505 i40e_switch_branch_release(pf->veb[i]);
8506 }
8507
8508 /* Release the VSIs on this VEB, but not the owner VSI.
8509 *
8510 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
8511 * the VEB itself, so don't use (*branch) after this loop.
8512 */
505682cd 8513 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8514 if (!pf->vsi[i])
8515 continue;
8516 if (pf->vsi[i]->uplink_seid == branch_seid &&
8517 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8518 i40e_vsi_release(pf->vsi[i]);
8519 }
8520 }
8521
8522 /* There's one corner case where the VEB might not have been
8523 * removed, so double check it here and remove it if needed.
8524 * This case happens if the veb was created from the debugfs
8525 * commands and no VSIs were added to it.
8526 */
8527 if (pf->veb[veb_idx])
8528 i40e_veb_release(pf->veb[veb_idx]);
8529}
8530
8531/**
8532 * i40e_veb_clear - remove veb struct
8533 * @veb: the veb to remove
8534 **/
8535static void i40e_veb_clear(struct i40e_veb *veb)
8536{
8537 if (!veb)
8538 return;
8539
8540 if (veb->pf) {
8541 struct i40e_pf *pf = veb->pf;
8542
8543 mutex_lock(&pf->switch_mutex);
8544 if (pf->veb[veb->idx] == veb)
8545 pf->veb[veb->idx] = NULL;
8546 mutex_unlock(&pf->switch_mutex);
8547 }
8548
8549 kfree(veb);
8550}
8551
8552/**
8553 * i40e_veb_release - Delete a VEB and free its resources
8554 * @veb: the VEB being removed
8555 **/
8556void i40e_veb_release(struct i40e_veb *veb)
8557{
8558 struct i40e_vsi *vsi = NULL;
8559 struct i40e_pf *pf;
8560 int i, n = 0;
8561
8562 pf = veb->pf;
8563
8564 /* find the remaining VSI and check for extras */
505682cd 8565 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
8566 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
8567 n++;
8568 vsi = pf->vsi[i];
8569 }
8570 }
8571 if (n != 1) {
8572 dev_info(&pf->pdev->dev,
8573 "can't remove VEB %d with %d VSIs left\n",
8574 veb->seid, n);
8575 return;
8576 }
8577
8578 /* move the remaining VSI to uplink veb */
8579 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
8580 if (veb->uplink_seid) {
8581 vsi->uplink_seid = veb->uplink_seid;
8582 if (veb->uplink_seid == pf->mac_seid)
8583 vsi->veb_idx = I40E_NO_VEB;
8584 else
8585 vsi->veb_idx = veb->veb_idx;
8586 } else {
8587 /* floating VEB */
8588 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8589 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
8590 }
8591
8592 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
8593 i40e_veb_clear(veb);
41c445ff
JB
8594}
8595
8596/**
8597 * i40e_add_veb - create the VEB in the switch
8598 * @veb: the VEB to be instantiated
8599 * @vsi: the controlling VSI
8600 **/
8601static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
8602{
56747264 8603 bool is_default = false;
e1c51b95 8604 bool is_cloud = false;
41c445ff
JB
8605 int ret;
8606
8607 /* get a VEB from the hardware */
8608 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
e1c51b95
KS
8609 veb->enabled_tc, is_default,
8610 is_cloud, &veb->seid, NULL);
41c445ff
JB
8611 if (ret) {
8612 dev_info(&veb->pf->pdev->dev,
8613 "couldn't add VEB, err %d, aq_err %d\n",
8614 ret, veb->pf->hw.aq.asq_last_status);
8615 return -EPERM;
8616 }
8617
8618 /* get statistics counter */
8619 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
8620 &veb->stats_idx, NULL, NULL, NULL);
8621 if (ret) {
8622 dev_info(&veb->pf->pdev->dev,
8623 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
8624 ret, veb->pf->hw.aq.asq_last_status);
8625 return -EPERM;
8626 }
8627 ret = i40e_veb_get_bw_info(veb);
8628 if (ret) {
8629 dev_info(&veb->pf->pdev->dev,
8630 "couldn't get VEB bw info, err %d, aq_err %d\n",
8631 ret, veb->pf->hw.aq.asq_last_status);
8632 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
8633 return -ENOENT;
8634 }
8635
8636 vsi->uplink_seid = veb->seid;
8637 vsi->veb_idx = veb->idx;
8638 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
8639
8640 return 0;
8641}
8642
8643/**
8644 * i40e_veb_setup - Set up a VEB
8645 * @pf: board private structure
8646 * @flags: VEB setup flags
8647 * @uplink_seid: the switch element to link to
8648 * @vsi_seid: the initial VSI seid
8649 * @enabled_tc: Enabled TC bit-map
8650 *
8651 * This allocates the sw VEB structure and links it into the switch
8652 * It is possible and legal for this to be a duplicate of an already
8653 * existing VEB. It is also possible for both uplink and vsi seids
8654 * to be zero, in order to create a floating VEB.
8655 *
8656 * Returns pointer to the successfully allocated VEB sw struct on
8657 * success, otherwise returns NULL on failure.
8658 **/
8659struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
8660 u16 uplink_seid, u16 vsi_seid,
8661 u8 enabled_tc)
8662{
8663 struct i40e_veb *veb, *uplink_veb = NULL;
8664 int vsi_idx, veb_idx;
8665 int ret;
8666
8667 /* if one seid is 0, the other must be 0 to create a floating relay */
8668 if ((uplink_seid == 0 || vsi_seid == 0) &&
8669 (uplink_seid + vsi_seid != 0)) {
8670 dev_info(&pf->pdev->dev,
8671 "one, not both seid's are 0: uplink=%d vsi=%d\n",
8672 uplink_seid, vsi_seid);
8673 return NULL;
8674 }
8675
8676 /* make sure there is such a vsi and uplink */
505682cd 8677 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
41c445ff
JB
8678 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
8679 break;
505682cd 8680 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
41c445ff
JB
8681 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
8682 vsi_seid);
8683 return NULL;
8684 }
8685
8686 if (uplink_seid && uplink_seid != pf->mac_seid) {
8687 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
8688 if (pf->veb[veb_idx] &&
8689 pf->veb[veb_idx]->seid == uplink_seid) {
8690 uplink_veb = pf->veb[veb_idx];
8691 break;
8692 }
8693 }
8694 if (!uplink_veb) {
8695 dev_info(&pf->pdev->dev,
8696 "uplink seid %d not found\n", uplink_seid);
8697 return NULL;
8698 }
8699 }
8700
8701 /* get veb sw struct */
8702 veb_idx = i40e_veb_mem_alloc(pf);
8703 if (veb_idx < 0)
8704 goto err_alloc;
8705 veb = pf->veb[veb_idx];
8706 veb->flags = flags;
8707 veb->uplink_seid = uplink_seid;
8708 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
8709 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
8710
8711 /* create the VEB in the switch */
8712 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
8713 if (ret)
8714 goto err_veb;
1bb8b935
SN
8715 if (vsi_idx == pf->lan_vsi)
8716 pf->lan_veb = veb->idx;
41c445ff
JB
8717
8718 return veb;
8719
8720err_veb:
8721 i40e_veb_clear(veb);
8722err_alloc:
8723 return NULL;
8724}
8725
8726/**
8727 * i40e_setup_pf_switch_element - set pf vars based on switch type
8728 * @pf: board private structure
8729 * @ele: element we are building info from
8730 * @num_reported: total number of elements
8731 * @printconfig: should we print the contents
8732 *
8733 * helper function to assist in extracting a few useful SEID values.
8734 **/
8735static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
8736 struct i40e_aqc_switch_config_element_resp *ele,
8737 u16 num_reported, bool printconfig)
8738{
8739 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
8740 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
8741 u8 element_type = ele->element_type;
8742 u16 seid = le16_to_cpu(ele->seid);
8743
8744 if (printconfig)
8745 dev_info(&pf->pdev->dev,
8746 "type=%d seid=%d uplink=%d downlink=%d\n",
8747 element_type, seid, uplink_seid, downlink_seid);
8748
8749 switch (element_type) {
8750 case I40E_SWITCH_ELEMENT_TYPE_MAC:
8751 pf->mac_seid = seid;
8752 break;
8753 case I40E_SWITCH_ELEMENT_TYPE_VEB:
8754 /* Main VEB? */
8755 if (uplink_seid != pf->mac_seid)
8756 break;
8757 if (pf->lan_veb == I40E_NO_VEB) {
8758 int v;
8759
8760 /* find existing or else empty VEB */
8761 for (v = 0; v < I40E_MAX_VEB; v++) {
8762 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
8763 pf->lan_veb = v;
8764 break;
8765 }
8766 }
8767 if (pf->lan_veb == I40E_NO_VEB) {
8768 v = i40e_veb_mem_alloc(pf);
8769 if (v < 0)
8770 break;
8771 pf->lan_veb = v;
8772 }
8773 }
8774
8775 pf->veb[pf->lan_veb]->seid = seid;
8776 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
8777 pf->veb[pf->lan_veb]->pf = pf;
8778 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
8779 break;
8780 case I40E_SWITCH_ELEMENT_TYPE_VSI:
8781 if (num_reported != 1)
8782 break;
8783 /* This is immediately after a reset so we can assume this is
8784 * the PF's VSI
8785 */
8786 pf->mac_seid = uplink_seid;
8787 pf->pf_seid = downlink_seid;
8788 pf->main_vsi_seid = seid;
8789 if (printconfig)
8790 dev_info(&pf->pdev->dev,
8791 "pf_seid=%d main_vsi_seid=%d\n",
8792 pf->pf_seid, pf->main_vsi_seid);
8793 break;
8794 case I40E_SWITCH_ELEMENT_TYPE_PF:
8795 case I40E_SWITCH_ELEMENT_TYPE_VF:
8796 case I40E_SWITCH_ELEMENT_TYPE_EMP:
8797 case I40E_SWITCH_ELEMENT_TYPE_BMC:
8798 case I40E_SWITCH_ELEMENT_TYPE_PE:
8799 case I40E_SWITCH_ELEMENT_TYPE_PA:
8800 /* ignore these for now */
8801 break;
8802 default:
8803 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
8804 element_type, seid);
8805 break;
8806 }
8807}
8808
8809/**
8810 * i40e_fetch_switch_configuration - Get switch config from firmware
8811 * @pf: board private structure
8812 * @printconfig: should we print the contents
8813 *
8814 * Get the current switch configuration from the device and
8815 * extract a few useful SEID values.
8816 **/
8817int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
8818{
8819 struct i40e_aqc_get_switch_config_resp *sw_config;
8820 u16 next_seid = 0;
8821 int ret = 0;
8822 u8 *aq_buf;
8823 int i;
8824
8825 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
8826 if (!aq_buf)
8827 return -ENOMEM;
8828
8829 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
8830 do {
8831 u16 num_reported, num_total;
8832
8833 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
8834 I40E_AQ_LARGE_BUF,
8835 &next_seid, NULL);
8836 if (ret) {
8837 dev_info(&pf->pdev->dev,
8838 "get switch config failed %d aq_err=%x\n",
8839 ret, pf->hw.aq.asq_last_status);
8840 kfree(aq_buf);
8841 return -ENOENT;
8842 }
8843
8844 num_reported = le16_to_cpu(sw_config->header.num_reported);
8845 num_total = le16_to_cpu(sw_config->header.num_total);
8846
8847 if (printconfig)
8848 dev_info(&pf->pdev->dev,
8849 "header: %d reported %d total\n",
8850 num_reported, num_total);
8851
41c445ff
JB
8852 for (i = 0; i < num_reported; i++) {
8853 struct i40e_aqc_switch_config_element_resp *ele =
8854 &sw_config->element[i];
8855
8856 i40e_setup_pf_switch_element(pf, ele, num_reported,
8857 printconfig);
8858 }
8859 } while (next_seid != 0);
8860
8861 kfree(aq_buf);
8862 return ret;
8863}
8864
8865/**
8866 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
8867 * @pf: board private structure
bc7d338f 8868 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
8869 *
8870 * Returns 0 on success, negative value on failure
8871 **/
bc7d338f 8872static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff
JB
8873{
8874 int ret;
8875
8876 /* find out what's out there already */
8877 ret = i40e_fetch_switch_configuration(pf, false);
8878 if (ret) {
8879 dev_info(&pf->pdev->dev,
8880 "couldn't fetch switch config, err %d, aq_err %d\n",
8881 ret, pf->hw.aq.asq_last_status);
8882 return ret;
8883 }
8884 i40e_pf_reset_stats(pf);
8885
41c445ff 8886 /* first time setup */
bc7d338f 8887 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
8888 struct i40e_vsi *vsi = NULL;
8889 u16 uplink_seid;
8890
8891 /* Set up the PF VSI associated with the PF's main VSI
8892 * that is already in the HW switch
8893 */
8894 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8895 uplink_seid = pf->veb[pf->lan_veb]->seid;
8896 else
8897 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
8898 if (pf->lan_vsi == I40E_NO_VSI)
8899 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
8900 else if (reinit)
8901 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
8902 if (!vsi) {
8903 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
8904 i40e_fdir_teardown(pf);
8905 return -EAGAIN;
8906 }
41c445ff
JB
8907 } else {
8908 /* force a reset of TC and queue layout configurations */
8909 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8910 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8911 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8912 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8913 }
8914 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
8915
cbf61325
ASJ
8916 i40e_fdir_sb_setup(pf);
8917
41c445ff
JB
8918 /* Setup static PF queue filter control settings */
8919 ret = i40e_setup_pf_filter_control(pf);
8920 if (ret) {
8921 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
8922 ret);
8923 /* Failure here should not stop continuing other steps */
8924 }
8925
8926 /* enable RSS in the HW, even for only one queue, as the stack can use
8927 * the hash
8928 */
8929 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
8930 i40e_config_rss(pf);
8931
8932 /* fill in link information and enable LSE reporting */
a34a6711
MW
8933 i40e_update_link_info(&pf->hw, true);
8934 i40e_link_event(pf);
8935
8936 /* Initialize user-specific link properties */
8937 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8938 I40E_AQ_AN_COMPLETED) ? true : false);
8939
8940 /* fill in link information and enable LSE reporting */
8109e123 8941 i40e_update_link_info(&pf->hw, true);
41c445ff
JB
8942 i40e_link_event(pf);
8943
d52c20b7 8944 /* Initialize user-specific link properties */
41c445ff
JB
8945 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
8946 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7 8947
beb0dff1
JK
8948 i40e_ptp_init(pf);
8949
41c445ff
JB
8950 return ret;
8951}
8952
41c445ff
JB
8953/**
8954 * i40e_determine_queue_usage - Work out queue distribution
8955 * @pf: board private structure
8956 **/
8957static void i40e_determine_queue_usage(struct i40e_pf *pf)
8958{
41c445ff
JB
8959 int queues_left;
8960
8961 pf->num_lan_qps = 0;
38e00438
VD
8962#ifdef I40E_FCOE
8963 pf->num_fcoe_qps = 0;
8964#endif
41c445ff
JB
8965
8966 /* Find the max queues to be put into basic use. We'll always be
8967 * using TC0, whether or not DCB is running, and TC0 will get the
8968 * big RSS set.
8969 */
8970 queues_left = pf->hw.func_caps.num_tx_qp;
8971
cbf61325 8972 if ((queues_left == 1) ||
9aa7e935 8973 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
41c445ff
JB
8974 /* one qp for PF, no queues for anything else */
8975 queues_left = 0;
8976 pf->rss_size = pf->num_lan_qps = 1;
8977
8978 /* make sure all the fancies are disabled */
60ea5f83 8979 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
38e00438
VD
8980#ifdef I40E_FCOE
8981 I40E_FLAG_FCOE_ENABLED |
8982#endif
60ea5f83
JB
8983 I40E_FLAG_FD_SB_ENABLED |
8984 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 8985 I40E_FLAG_DCB_CAPABLE |
60ea5f83
JB
8986 I40E_FLAG_SRIOV_ENABLED |
8987 I40E_FLAG_VMDQ_ENABLED);
9aa7e935
FZ
8988 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
8989 I40E_FLAG_FD_SB_ENABLED |
bbe7d0e0 8990 I40E_FLAG_FD_ATR_ENABLED |
4d9b6043 8991 I40E_FLAG_DCB_CAPABLE))) {
9aa7e935
FZ
8992 /* one qp for PF */
8993 pf->rss_size = pf->num_lan_qps = 1;
8994 queues_left -= pf->num_lan_qps;
8995
8996 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
38e00438
VD
8997#ifdef I40E_FCOE
8998 I40E_FLAG_FCOE_ENABLED |
8999#endif
9aa7e935
FZ
9000 I40E_FLAG_FD_SB_ENABLED |
9001 I40E_FLAG_FD_ATR_ENABLED |
9002 I40E_FLAG_DCB_ENABLED |
9003 I40E_FLAG_VMDQ_ENABLED);
41c445ff 9004 } else {
cbf61325 9005 /* Not enough queues for all TCs */
4d9b6043 9006 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
cbf61325 9007 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
4d9b6043 9008 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
cbf61325
ASJ
9009 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9010 }
9011 pf->num_lan_qps = pf->rss_size_max;
9012 queues_left -= pf->num_lan_qps;
9013 }
9014
38e00438
VD
9015#ifdef I40E_FCOE
9016 if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9017 if (I40E_DEFAULT_FCOE <= queues_left) {
9018 pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9019 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9020 pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9021 } else {
9022 pf->num_fcoe_qps = 0;
9023 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9024 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9025 }
9026
9027 queues_left -= pf->num_fcoe_qps;
9028 }
9029
9030#endif
cbf61325
ASJ
9031 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9032 if (queues_left > 1) {
9033 queues_left -= 1; /* save 1 queue for FD */
9034 } else {
9035 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9036 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9037 }
41c445ff
JB
9038 }
9039
9040 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9041 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
cbf61325
ASJ
9042 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9043 (queues_left / pf->num_vf_qps));
41c445ff
JB
9044 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9045 }
9046
9047 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9048 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9049 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9050 (queues_left / pf->num_vmdq_qps));
9051 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9052 }
9053
f8ff1464 9054 pf->queues_left = queues_left;
38e00438
VD
9055#ifdef I40E_FCOE
9056 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9057#endif
41c445ff
JB
9058}
9059
9060/**
9061 * i40e_setup_pf_filter_control - Setup PF static filter control
9062 * @pf: PF to be setup
9063 *
9064 * i40e_setup_pf_filter_control sets up a pf's initial filter control
9065 * settings. If PE/FCoE are enabled then it will also set the per PF
9066 * based filter sizes required for them. It also enables Flow director,
9067 * ethertype and macvlan type filter settings for the pf.
9068 *
9069 * Returns 0 on success, negative on failure
9070 **/
9071static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9072{
9073 struct i40e_filter_control_settings *settings = &pf->filter_settings;
9074
9075 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9076
9077 /* Flow Director is enabled */
60ea5f83 9078 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
41c445ff
JB
9079 settings->enable_fdir = true;
9080
9081 /* Ethtype and MACVLAN filters enabled for PF */
9082 settings->enable_ethtype = true;
9083 settings->enable_macvlan = true;
9084
9085 if (i40e_set_filter_control(&pf->hw, settings))
9086 return -ENOENT;
9087
9088 return 0;
9089}
9090
0c22b3dd
JB
9091#define INFO_STRING_LEN 255
9092static void i40e_print_features(struct i40e_pf *pf)
9093{
9094 struct i40e_hw *hw = &pf->hw;
9095 char *buf, *string;
9096
9097 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9098 if (!string) {
9099 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9100 return;
9101 }
9102
9103 buf = string;
9104
9105 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9106#ifdef CONFIG_PCI_IOV
9107 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9108#endif
9109 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
9110 pf->vsi[pf->lan_vsi]->num_queue_pairs);
9111
9112 if (pf->flags & I40E_FLAG_RSS_ENABLED)
9113 buf += sprintf(buf, "RSS ");
0c22b3dd 9114 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
c6423ff1
AA
9115 buf += sprintf(buf, "FD_ATR ");
9116 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9117 buf += sprintf(buf, "FD_SB ");
0c22b3dd 9118 buf += sprintf(buf, "NTUPLE ");
c6423ff1 9119 }
4d9b6043 9120 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
0c22b3dd
JB
9121 buf += sprintf(buf, "DCB ");
9122 if (pf->flags & I40E_FLAG_PTP)
9123 buf += sprintf(buf, "PTP ");
38e00438
VD
9124#ifdef I40E_FCOE
9125 if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9126 buf += sprintf(buf, "FCOE ");
9127#endif
0c22b3dd
JB
9128
9129 BUG_ON(buf > (string + INFO_STRING_LEN));
9130 dev_info(&pf->pdev->dev, "%s\n", string);
9131 kfree(string);
9132}
9133
41c445ff
JB
9134/**
9135 * i40e_probe - Device initialization routine
9136 * @pdev: PCI device information struct
9137 * @ent: entry in i40e_pci_tbl
9138 *
9139 * i40e_probe initializes a pf identified by a pci_dev structure.
9140 * The OS initialization, configuring of the pf private structure,
9141 * and a hardware reset occur.
9142 *
9143 * Returns 0 on success, negative on failure
9144 **/
9145static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9146{
41c445ff
JB
9147 struct i40e_pf *pf;
9148 struct i40e_hw *hw;
93cd765b 9149 static u16 pfs_found;
d4dfb81a 9150 u16 link_status;
41c445ff
JB
9151 int err = 0;
9152 u32 len;
8a9eb7d3 9153 u32 i;
41c445ff
JB
9154
9155 err = pci_enable_device_mem(pdev);
9156 if (err)
9157 return err;
9158
9159 /* set up for high or low dma */
6494294f 9160 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6494294f 9161 if (err) {
e3e3bfdd
JS
9162 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9163 if (err) {
9164 dev_err(&pdev->dev,
9165 "DMA configuration failed: 0x%x\n", err);
9166 goto err_dma;
9167 }
41c445ff
JB
9168 }
9169
9170 /* set up pci connections */
9171 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9172 IORESOURCE_MEM), i40e_driver_name);
9173 if (err) {
9174 dev_info(&pdev->dev,
9175 "pci_request_selected_regions failed %d\n", err);
9176 goto err_pci_reg;
9177 }
9178
9179 pci_enable_pcie_error_reporting(pdev);
9180 pci_set_master(pdev);
9181
9182 /* Now that we have a PCI connection, we need to do the
9183 * low level device setup. This is primarily setting up
9184 * the Admin Queue structures and then querying for the
9185 * device's current profile information.
9186 */
9187 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9188 if (!pf) {
9189 err = -ENOMEM;
9190 goto err_pf_alloc;
9191 }
9192 pf->next_vsi = 0;
9193 pf->pdev = pdev;
9194 set_bit(__I40E_DOWN, &pf->state);
9195
9196 hw = &pf->hw;
9197 hw->back = pf;
9198 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
9199 pci_resource_len(pdev, 0));
9200 if (!hw->hw_addr) {
9201 err = -EIO;
9202 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
9203 (unsigned int)pci_resource_start(pdev, 0),
9204 (unsigned int)pci_resource_len(pdev, 0), err);
9205 goto err_ioremap;
9206 }
9207 hw->vendor_id = pdev->vendor;
9208 hw->device_id = pdev->device;
9209 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
9210 hw->subsystem_vendor_id = pdev->subsystem_vendor;
9211 hw->subsystem_device_id = pdev->subsystem_device;
9212 hw->bus.device = PCI_SLOT(pdev->devfn);
9213 hw->bus.func = PCI_FUNC(pdev->devfn);
93cd765b 9214 pf->instance = pfs_found;
41c445ff 9215
5b5faa43
SN
9216 if (debug != -1) {
9217 pf->msg_enable = pf->hw.debug_mask;
9218 pf->msg_enable = debug;
9219 }
9220
7134f9ce
JB
9221 /* do a special CORER for clearing PXE mode once at init */
9222 if (hw->revision_id == 0 &&
9223 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
9224 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
9225 i40e_flush(hw);
9226 msleep(200);
9227 pf->corer_count++;
9228
9229 i40e_clear_pxe_mode(hw);
9230 }
9231
41c445ff 9232 /* Reset here to make sure all is clean and to define PF 'n' */
838d41d9 9233 i40e_clear_hw(hw);
41c445ff
JB
9234 err = i40e_pf_reset(hw);
9235 if (err) {
9236 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
9237 goto err_pf_reset;
9238 }
9239 pf->pfr_count++;
9240
9241 hw->aq.num_arq_entries = I40E_AQ_LEN;
9242 hw->aq.num_asq_entries = I40E_AQ_LEN;
9243 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9244 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
9245 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
b2008cbf 9246
b294ac70 9247 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
b2008cbf
CW
9248 "%s-%s:misc",
9249 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
41c445ff
JB
9250
9251 err = i40e_init_shared_code(hw);
9252 if (err) {
9253 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
9254 goto err_pf_reset;
9255 }
9256
d52c20b7
JB
9257 /* set up a default setting for link flow control */
9258 pf->hw.fc.requested_mode = I40E_FC_NONE;
9259
41c445ff
JB
9260 err = i40e_init_adminq(hw);
9261 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
9262 if (err) {
9263 dev_info(&pdev->dev,
7aa67613 9264 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
41c445ff
JB
9265 goto err_pf_reset;
9266 }
9267
7aa67613
CS
9268 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
9269 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
278b6f62 9270 dev_info(&pdev->dev,
7aa67613
CS
9271 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
9272 else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
9273 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
278b6f62 9274 dev_info(&pdev->dev,
7aa67613 9275 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
278b6f62
SN
9276
9277
4eb3f768
SN
9278 i40e_verify_eeprom(pf);
9279
2c5fe33b
JB
9280 /* Rev 0 hardware was never productized */
9281 if (hw->revision_id < 1)
9282 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
9283
6ff4ef86 9284 i40e_clear_pxe_mode(hw);
41c445ff
JB
9285 err = i40e_get_capabilities(pf);
9286 if (err)
9287 goto err_adminq_setup;
9288
9289 err = i40e_sw_init(pf);
9290 if (err) {
9291 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
9292 goto err_sw_init;
9293 }
9294
9295 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9296 hw->func_caps.num_rx_qp,
9297 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
9298 if (err) {
9299 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
9300 goto err_init_lan_hmc;
9301 }
9302
9303 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9304 if (err) {
9305 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
9306 err = -ENOENT;
9307 goto err_configure_lan_hmc;
9308 }
9309
b686ece5
NP
9310 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
9311 * Ignore error return codes because if it was already disabled via
9312 * hardware settings this will fail
9313 */
9314 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
9315 (pf->hw.aq.fw_maj_ver < 4)) {
9316 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
9317 i40e_aq_stop_lldp(hw, true, NULL);
9318 }
9319
41c445ff 9320 i40e_get_mac_addr(hw, hw->mac.addr);
f62b5060 9321 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
9322 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
9323 err = -EIO;
9324 goto err_mac_addr;
9325 }
9326 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
9a173901 9327 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
1f224ad2
NP
9328 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
9329 if (is_valid_ether_addr(hw->mac.port_addr))
9330 pf->flags |= I40E_FLAG_PORT_ID_VALID;
38e00438
VD
9331#ifdef I40E_FCOE
9332 err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
9333 if (err)
9334 dev_info(&pdev->dev,
9335 "(non-fatal) SAN MAC retrieval failed: %d\n", err);
9336 if (!is_valid_ether_addr(hw->mac.san_addr)) {
9337 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
9338 hw->mac.san_addr);
9339 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
9340 }
9341 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
9342#endif /* I40E_FCOE */
41c445ff
JB
9343
9344 pci_set_drvdata(pdev, pf);
9345 pci_save_state(pdev);
4e3b35b0
NP
9346#ifdef CONFIG_I40E_DCB
9347 err = i40e_init_pf_dcb(pf);
9348 if (err) {
aebfc816 9349 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
4d9b6043 9350 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
014269ff 9351 /* Continue without DCB enabled */
4e3b35b0
NP
9352 }
9353#endif /* CONFIG_I40E_DCB */
41c445ff
JB
9354
9355 /* set up periodic task facility */
9356 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
9357 pf->service_timer_period = HZ;
9358
9359 INIT_WORK(&pf->service_task, i40e_service_task);
9360 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
9361 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
9362 pf->link_check_timeout = jiffies;
9363
8e2773ae
SN
9364 /* WoL defaults to disabled */
9365 pf->wol_en = false;
9366 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
9367
41c445ff
JB
9368 /* set up the main switch operations */
9369 i40e_determine_queue_usage(pf);
9370 i40e_init_interrupt_scheme(pf);
9371
505682cd
MW
9372 /* The number of VSIs reported by the FW is the minimum guaranteed
9373 * to us; HW supports far more and we share the remaining pool with
9374 * the other PFs. We allocate space for more than the guarantee with
9375 * the understanding that we might not get them all later.
41c445ff 9376 */
505682cd
MW
9377 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
9378 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
9379 else
9380 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
9381
9382 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
9383 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
41c445ff 9384 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
9385 if (!pf->vsi) {
9386 err = -ENOMEM;
41c445ff 9387 goto err_switch_setup;
ed87ac09 9388 }
41c445ff 9389
bc7d338f 9390 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
9391 if (err) {
9392 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
9393 goto err_vsis;
9394 }
8a9eb7d3 9395 /* if FDIR VSI was set up, start it now */
505682cd 9396 for (i = 0; i < pf->num_alloc_vsi; i++) {
8a9eb7d3
SN
9397 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
9398 i40e_vsi_open(pf->vsi[i]);
9399 break;
9400 }
9401 }
41c445ff 9402
7e2453fe
JB
9403 /* driver is only interested in link up/down and module qualification
9404 * reports from firmware
9405 */
9406 err = i40e_aq_set_phy_int_mask(&pf->hw,
9407 I40E_AQ_EVENT_LINK_UPDOWN |
9408 I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
9409 if (err)
9410 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
9411
cafa2ee6
ASJ
9412 msleep(75);
9413 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9414 if (err) {
9415 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
9416 pf->hw.aq.asq_last_status);
9417 }
9418
41c445ff
JB
9419 /* The main driver is (mostly) up and happy. We need to set this state
9420 * before setting up the misc vector or we get a race and the vector
9421 * ends up disabled forever.
9422 */
9423 clear_bit(__I40E_DOWN, &pf->state);
9424
9425 /* In case of MSIX we are going to setup the misc vector right here
9426 * to handle admin queue events etc. In case of legacy and MSI
9427 * the misc functionality and queue processing is combined in
9428 * the same vector and that gets setup at open.
9429 */
9430 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9431 err = i40e_setup_misc_vector(pf);
9432 if (err) {
9433 dev_info(&pdev->dev,
9434 "setup of misc vector failed: %d\n", err);
9435 goto err_vsis;
9436 }
9437 }
9438
df805f62 9439#ifdef CONFIG_PCI_IOV
41c445ff
JB
9440 /* prep for VF support */
9441 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
4eb3f768
SN
9442 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
9443 !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
41c445ff
JB
9444 u32 val;
9445
9446 /* disable link interrupts for VFs */
9447 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
9448 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
9449 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
9450 i40e_flush(hw);
4aeec010
MW
9451
9452 if (pci_num_vf(pdev)) {
9453 dev_info(&pdev->dev,
9454 "Active VFs found, allocating resources.\n");
9455 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
9456 if (err)
9457 dev_info(&pdev->dev,
9458 "Error %d allocating resources for existing VFs\n",
9459 err);
9460 }
41c445ff 9461 }
df805f62 9462#endif /* CONFIG_PCI_IOV */
41c445ff 9463
93cd765b
ASJ
9464 pfs_found++;
9465
41c445ff
JB
9466 i40e_dbg_pf_init(pf);
9467
9468 /* tell the firmware that we're starting */
44033fac 9469 i40e_send_version(pf);
41c445ff
JB
9470
9471 /* since everything's happy, start the service_task timer */
9472 mod_timer(&pf->service_timer,
9473 round_jiffies(jiffies + pf->service_timer_period));
9474
38e00438
VD
9475#ifdef I40E_FCOE
9476 /* create FCoE interface */
9477 i40e_fcoe_vsi_setup(pf);
9478
9479#endif
d4dfb81a
CS
9480 /* Get the negotiated link width and speed from PCI config space */
9481 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
9482
9483 i40e_set_pci_config_data(hw, link_status);
9484
69bfb110 9485 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
d4dfb81a
CS
9486 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
9487 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
9488 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
9489 "Unknown"),
9490 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
9491 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
9492 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
9493 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
9494 "Unknown"));
9495
9496 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
9497 hw->bus.speed < i40e_bus_speed_8000) {
9498 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
9499 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
9500 }
9501
0c22b3dd
JB
9502 /* print a string summarizing features */
9503 i40e_print_features(pf);
9504
41c445ff
JB
9505 return 0;
9506
9507 /* Unwind what we've done if something failed in the setup */
9508err_vsis:
9509 set_bit(__I40E_DOWN, &pf->state);
41c445ff
JB
9510 i40e_clear_interrupt_scheme(pf);
9511 kfree(pf->vsi);
04b03013
SN
9512err_switch_setup:
9513 i40e_reset_interrupt_capability(pf);
41c445ff
JB
9514 del_timer_sync(&pf->service_timer);
9515err_mac_addr:
9516err_configure_lan_hmc:
9517 (void)i40e_shutdown_lan_hmc(hw);
9518err_init_lan_hmc:
9519 kfree(pf->qp_pile);
9520 kfree(pf->irq_pile);
9521err_sw_init:
9522err_adminq_setup:
9523 (void)i40e_shutdown_adminq(hw);
9524err_pf_reset:
9525 iounmap(hw->hw_addr);
9526err_ioremap:
9527 kfree(pf);
9528err_pf_alloc:
9529 pci_disable_pcie_error_reporting(pdev);
9530 pci_release_selected_regions(pdev,
9531 pci_select_bars(pdev, IORESOURCE_MEM));
9532err_pci_reg:
9533err_dma:
9534 pci_disable_device(pdev);
9535 return err;
9536}
9537
9538/**
9539 * i40e_remove - Device removal routine
9540 * @pdev: PCI device information struct
9541 *
9542 * i40e_remove is called by the PCI subsystem to alert the driver
9543 * that is should release a PCI device. This could be caused by a
9544 * Hot-Plug event, or because the driver is going to be removed from
9545 * memory.
9546 **/
9547static void i40e_remove(struct pci_dev *pdev)
9548{
9549 struct i40e_pf *pf = pci_get_drvdata(pdev);
9550 i40e_status ret_code;
41c445ff
JB
9551 int i;
9552
9553 i40e_dbg_pf_exit(pf);
9554
beb0dff1
JK
9555 i40e_ptp_stop(pf);
9556
41c445ff
JB
9557 /* no more scheduling of any task */
9558 set_bit(__I40E_DOWN, &pf->state);
9559 del_timer_sync(&pf->service_timer);
9560 cancel_work_sync(&pf->service_task);
9561
eb2d80bc
MW
9562 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9563 i40e_free_vfs(pf);
9564 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
9565 }
9566
41c445ff
JB
9567 i40e_fdir_teardown(pf);
9568
9569 /* If there is a switch structure or any orphans, remove them.
9570 * This will leave only the PF's VSI remaining.
9571 */
9572 for (i = 0; i < I40E_MAX_VEB; i++) {
9573 if (!pf->veb[i])
9574 continue;
9575
9576 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
9577 pf->veb[i]->uplink_seid == 0)
9578 i40e_switch_branch_release(pf->veb[i]);
9579 }
9580
9581 /* Now we can shutdown the PF's VSI, just before we kill
9582 * adminq and hmc.
9583 */
9584 if (pf->vsi[pf->lan_vsi])
9585 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9586
41c445ff 9587 /* shutdown and destroy the HMC */
60442dea
SN
9588 if (pf->hw.hmc.hmc_obj) {
9589 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
9590 if (ret_code)
9591 dev_warn(&pdev->dev,
9592 "Failed to destroy the HMC resources: %d\n",
9593 ret_code);
9594 }
41c445ff
JB
9595
9596 /* shutdown the adminq */
41c445ff
JB
9597 ret_code = i40e_shutdown_adminq(&pf->hw);
9598 if (ret_code)
9599 dev_warn(&pdev->dev,
9600 "Failed to destroy the Admin Queue resources: %d\n",
9601 ret_code);
9602
9603 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
9604 i40e_clear_interrupt_scheme(pf);
505682cd 9605 for (i = 0; i < pf->num_alloc_vsi; i++) {
41c445ff
JB
9606 if (pf->vsi[i]) {
9607 i40e_vsi_clear_rings(pf->vsi[i]);
9608 i40e_vsi_clear(pf->vsi[i]);
9609 pf->vsi[i] = NULL;
9610 }
9611 }
9612
9613 for (i = 0; i < I40E_MAX_VEB; i++) {
9614 kfree(pf->veb[i]);
9615 pf->veb[i] = NULL;
9616 }
9617
9618 kfree(pf->qp_pile);
9619 kfree(pf->irq_pile);
41c445ff
JB
9620 kfree(pf->vsi);
9621
41c445ff
JB
9622 iounmap(pf->hw.hw_addr);
9623 kfree(pf);
9624 pci_release_selected_regions(pdev,
9625 pci_select_bars(pdev, IORESOURCE_MEM));
9626
9627 pci_disable_pcie_error_reporting(pdev);
9628 pci_disable_device(pdev);
9629}
9630
9631/**
9632 * i40e_pci_error_detected - warning that something funky happened in PCI land
9633 * @pdev: PCI device information struct
9634 *
9635 * Called to warn that something happened and the error handling steps
9636 * are in progress. Allows the driver to quiesce things, be ready for
9637 * remediation.
9638 **/
9639static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
9640 enum pci_channel_state error)
9641{
9642 struct i40e_pf *pf = pci_get_drvdata(pdev);
9643
9644 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
9645
9646 /* shutdown all operations */
9007bccd
SN
9647 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
9648 rtnl_lock();
9649 i40e_prep_for_reset(pf);
9650 rtnl_unlock();
9651 }
41c445ff
JB
9652
9653 /* Request a slot reset */
9654 return PCI_ERS_RESULT_NEED_RESET;
9655}
9656
9657/**
9658 * i40e_pci_error_slot_reset - a PCI slot reset just happened
9659 * @pdev: PCI device information struct
9660 *
9661 * Called to find if the driver can work with the device now that
9662 * the pci slot has been reset. If a basic connection seems good
9663 * (registers are readable and have sane content) then return a
9664 * happy little PCI_ERS_RESULT_xxx.
9665 **/
9666static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
9667{
9668 struct i40e_pf *pf = pci_get_drvdata(pdev);
9669 pci_ers_result_t result;
9670 int err;
9671 u32 reg;
9672
9673 dev_info(&pdev->dev, "%s\n", __func__);
9674 if (pci_enable_device_mem(pdev)) {
9675 dev_info(&pdev->dev,
9676 "Cannot re-enable PCI device after reset.\n");
9677 result = PCI_ERS_RESULT_DISCONNECT;
9678 } else {
9679 pci_set_master(pdev);
9680 pci_restore_state(pdev);
9681 pci_save_state(pdev);
9682 pci_wake_from_d3(pdev, false);
9683
9684 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9685 if (reg == 0)
9686 result = PCI_ERS_RESULT_RECOVERED;
9687 else
9688 result = PCI_ERS_RESULT_DISCONNECT;
9689 }
9690
9691 err = pci_cleanup_aer_uncorrect_error_status(pdev);
9692 if (err) {
9693 dev_info(&pdev->dev,
9694 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
9695 err);
9696 /* non-fatal, continue */
9697 }
9698
9699 return result;
9700}
9701
9702/**
9703 * i40e_pci_error_resume - restart operations after PCI error recovery
9704 * @pdev: PCI device information struct
9705 *
9706 * Called to allow the driver to bring things back up after PCI error
9707 * and/or reset recovery has finished.
9708 **/
9709static void i40e_pci_error_resume(struct pci_dev *pdev)
9710{
9711 struct i40e_pf *pf = pci_get_drvdata(pdev);
9712
9713 dev_info(&pdev->dev, "%s\n", __func__);
9007bccd
SN
9714 if (test_bit(__I40E_SUSPENDED, &pf->state))
9715 return;
9716
9717 rtnl_lock();
41c445ff 9718 i40e_handle_reset_warning(pf);
9007bccd
SN
9719 rtnl_lock();
9720}
9721
9722/**
9723 * i40e_shutdown - PCI callback for shutting down
9724 * @pdev: PCI device information struct
9725 **/
9726static void i40e_shutdown(struct pci_dev *pdev)
9727{
9728 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 9729 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
9730
9731 set_bit(__I40E_SUSPENDED, &pf->state);
9732 set_bit(__I40E_DOWN, &pf->state);
9733 rtnl_lock();
9734 i40e_prep_for_reset(pf);
9735 rtnl_unlock();
9736
8e2773ae
SN
9737 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9738 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9739
e147758d
SN
9740 i40e_clear_interrupt_scheme(pf);
9741
9007bccd 9742 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 9743 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
9744 pci_set_power_state(pdev, PCI_D3hot);
9745 }
9746}
9747
9748#ifdef CONFIG_PM
9749/**
9750 * i40e_suspend - PCI callback for moving to D3
9751 * @pdev: PCI device information struct
9752 **/
9753static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
9754{
9755 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 9756 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
9757
9758 set_bit(__I40E_SUSPENDED, &pf->state);
9759 set_bit(__I40E_DOWN, &pf->state);
88086e5d
MW
9760 del_timer_sync(&pf->service_timer);
9761 cancel_work_sync(&pf->service_task);
9007bccd
SN
9762 rtnl_lock();
9763 i40e_prep_for_reset(pf);
9764 rtnl_unlock();
9765
8e2773ae
SN
9766 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9767 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9768
9769 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
9770 pci_set_power_state(pdev, PCI_D3hot);
9771
9772 return 0;
41c445ff
JB
9773}
9774
9007bccd
SN
9775/**
9776 * i40e_resume - PCI callback for waking up from D3
9777 * @pdev: PCI device information struct
9778 **/
9779static int i40e_resume(struct pci_dev *pdev)
9780{
9781 struct i40e_pf *pf = pci_get_drvdata(pdev);
9782 u32 err;
9783
9784 pci_set_power_state(pdev, PCI_D0);
9785 pci_restore_state(pdev);
9786 /* pci_restore_state() clears dev->state_saves, so
9787 * call pci_save_state() again to restore it.
9788 */
9789 pci_save_state(pdev);
9790
9791 err = pci_enable_device_mem(pdev);
9792 if (err) {
9793 dev_err(&pdev->dev,
9794 "%s: Cannot enable PCI device from suspend\n",
9795 __func__);
9796 return err;
9797 }
9798 pci_set_master(pdev);
9799
9800 /* no wakeup events while running */
9801 pci_wake_from_d3(pdev, false);
9802
9803 /* handling the reset will rebuild the device state */
9804 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
9805 clear_bit(__I40E_DOWN, &pf->state);
9806 rtnl_lock();
9807 i40e_reset_and_rebuild(pf, false);
9808 rtnl_unlock();
9809 }
9810
9811 return 0;
9812}
9813
9814#endif
41c445ff
JB
9815static const struct pci_error_handlers i40e_err_handler = {
9816 .error_detected = i40e_pci_error_detected,
9817 .slot_reset = i40e_pci_error_slot_reset,
9818 .resume = i40e_pci_error_resume,
9819};
9820
9821static struct pci_driver i40e_driver = {
9822 .name = i40e_driver_name,
9823 .id_table = i40e_pci_tbl,
9824 .probe = i40e_probe,
9825 .remove = i40e_remove,
9007bccd
SN
9826#ifdef CONFIG_PM
9827 .suspend = i40e_suspend,
9828 .resume = i40e_resume,
9829#endif
9830 .shutdown = i40e_shutdown,
41c445ff
JB
9831 .err_handler = &i40e_err_handler,
9832 .sriov_configure = i40e_pci_sriov_configure,
9833};
9834
9835/**
9836 * i40e_init_module - Driver registration routine
9837 *
9838 * i40e_init_module is the first routine called when the driver is
9839 * loaded. All it does is register with the PCI subsystem.
9840 **/
9841static int __init i40e_init_module(void)
9842{
9843 pr_info("%s: %s - version %s\n", i40e_driver_name,
9844 i40e_driver_string, i40e_driver_version_str);
9845 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
9846 i40e_dbg_init();
9847 return pci_register_driver(&i40e_driver);
9848}
9849module_init(i40e_init_module);
9850
9851/**
9852 * i40e_exit_module - Driver exit cleanup routine
9853 *
9854 * i40e_exit_module is called just before the driver is removed
9855 * from memory.
9856 **/
9857static void __exit i40e_exit_module(void)
9858{
9859 pci_unregister_driver(&i40e_driver);
9860 i40e_dbg_exit();
9861}
9862module_exit(i40e_exit_module);