]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/intel/i40e/i40e_main.c
i40e: catch unset q_vector
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
CommitLineData
41c445ff
JB
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28/* Local includes */
29#include "i40e.h"
a1c9a9d9
JK
30#ifdef CONFIG_I40E_VXLAN
31#include <net/vxlan.h>
32#endif
41c445ff
JB
33
34const char i40e_driver_name[] = "i40e";
35static const char i40e_driver_string[] =
36 "Intel(R) Ethernet Connection XL710 Network Driver";
37
38#define DRV_KERN "-k"
39
40#define DRV_VERSION_MAJOR 0
41#define DRV_VERSION_MINOR 3
582a74e5 42#define DRV_VERSION_BUILD 14
41c445ff
JB
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN
46const char i40e_driver_version_str[] = DRV_VERSION;
47static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
48
49/* a bit of forward declarations */
50static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51static void i40e_handle_reset_warning(struct i40e_pf *pf);
52static int i40e_add_vsi(struct i40e_vsi *vsi);
53static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
bc7d338f 54static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41c445ff
JB
55static int i40e_setup_misc_vector(struct i40e_pf *pf);
56static void i40e_determine_queue_usage(struct i40e_pf *pf);
57static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58
59/* i40e_pci_tbl - PCI Device ID Table
60 *
61 * Last entry must be all 0s
62 *
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
65 */
66static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
67 {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
68 {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
69 {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
70 {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
71 {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
72 {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
73 {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
74 {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
75 {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
76 {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
77 /* required last entry */
78 {0, }
79};
80MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
81
82#define I40E_MAX_VF_COUNT 128
83static int debug = -1;
84module_param(debug, int, 0);
85MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
86
87MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
88MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
92/**
93 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
94 * @hw: pointer to the HW structure
95 * @mem: ptr to mem struct to fill out
96 * @size: size of memory requested
97 * @alignment: what to align the allocation to
98 **/
99int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
100 u64 size, u32 alignment)
101{
102 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
103
104 mem->size = ALIGN(size, alignment);
105 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
106 &mem->pa, GFP_KERNEL);
93bc73b8
JB
107 if (!mem->va)
108 return -ENOMEM;
41c445ff 109
93bc73b8 110 return 0;
41c445ff
JB
111}
112
113/**
114 * i40e_free_dma_mem_d - OS specific memory free for shared code
115 * @hw: pointer to the HW structure
116 * @mem: ptr to mem struct to free
117 **/
118int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
119{
120 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
121
122 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
123 mem->va = NULL;
124 mem->pa = 0;
125 mem->size = 0;
126
127 return 0;
128}
129
130/**
131 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
132 * @hw: pointer to the HW structure
133 * @mem: ptr to mem struct to fill out
134 * @size: size of memory requested
135 **/
136int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
137 u32 size)
138{
139 mem->size = size;
140 mem->va = kzalloc(size, GFP_KERNEL);
141
93bc73b8
JB
142 if (!mem->va)
143 return -ENOMEM;
41c445ff 144
93bc73b8 145 return 0;
41c445ff
JB
146}
147
148/**
149 * i40e_free_virt_mem_d - OS specific memory free for shared code
150 * @hw: pointer to the HW structure
151 * @mem: ptr to mem struct to free
152 **/
153int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
154{
155 /* it's ok to kfree a NULL pointer */
156 kfree(mem->va);
157 mem->va = NULL;
158 mem->size = 0;
159
160 return 0;
161}
162
163/**
164 * i40e_get_lump - find a lump of free generic resource
165 * @pf: board private structure
166 * @pile: the pile of resource to search
167 * @needed: the number of items needed
168 * @id: an owner id to stick on the items assigned
169 *
170 * Returns the base item index of the lump, or negative for error
171 *
172 * The search_hint trick and lack of advanced fit-finding only work
173 * because we're highly likely to have all the same size lump requests.
174 * Linear search time and any fragmentation should be minimal.
175 **/
176static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
177 u16 needed, u16 id)
178{
179 int ret = -ENOMEM;
ddf434ac 180 int i, j;
41c445ff
JB
181
182 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
183 dev_info(&pf->pdev->dev,
184 "param err: pile=%p needed=%d id=0x%04x\n",
185 pile, needed, id);
186 return -EINVAL;
187 }
188
189 /* start the linear search with an imperfect hint */
190 i = pile->search_hint;
ddf434ac 191 while (i < pile->num_entries) {
41c445ff
JB
192 /* skip already allocated entries */
193 if (pile->list[i] & I40E_PILE_VALID_BIT) {
194 i++;
195 continue;
196 }
197
198 /* do we have enough in this lump? */
199 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
200 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
201 break;
202 }
203
204 if (j == needed) {
205 /* there was enough, so assign it to the requestor */
206 for (j = 0; j < needed; j++)
207 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
208 ret = i;
209 pile->search_hint = i + j;
ddf434ac 210 break;
41c445ff
JB
211 } else {
212 /* not enough, so skip over it and continue looking */
213 i += j;
214 }
215 }
216
217 return ret;
218}
219
220/**
221 * i40e_put_lump - return a lump of generic resource
222 * @pile: the pile of resource to search
223 * @index: the base item index
224 * @id: the owner id of the items assigned
225 *
226 * Returns the count of items in the lump
227 **/
228static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
229{
230 int valid_id = (id | I40E_PILE_VALID_BIT);
231 int count = 0;
232 int i;
233
234 if (!pile || index >= pile->num_entries)
235 return -EINVAL;
236
237 for (i = index;
238 i < pile->num_entries && pile->list[i] == valid_id;
239 i++) {
240 pile->list[i] = 0;
241 count++;
242 }
243
244 if (count && index < pile->search_hint)
245 pile->search_hint = index;
246
247 return count;
248}
249
250/**
251 * i40e_service_event_schedule - Schedule the service task to wake up
252 * @pf: board private structure
253 *
254 * If not already scheduled, this puts the task into the work queue
255 **/
256static void i40e_service_event_schedule(struct i40e_pf *pf)
257{
258 if (!test_bit(__I40E_DOWN, &pf->state) &&
259 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
260 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
261 schedule_work(&pf->service_task);
262}
263
264/**
265 * i40e_tx_timeout - Respond to a Tx Hang
266 * @netdev: network interface device structure
267 *
268 * If any port has noticed a Tx timeout, it is likely that the whole
269 * device is munged, not just the one netdev port, so go for the full
270 * reset.
271 **/
272static void i40e_tx_timeout(struct net_device *netdev)
273{
274 struct i40e_netdev_priv *np = netdev_priv(netdev);
275 struct i40e_vsi *vsi = np->vsi;
276 struct i40e_pf *pf = vsi->back;
277
278 pf->tx_timeout_count++;
279
280 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
281 pf->tx_timeout_recovery_level = 0;
282 pf->tx_timeout_last_recovery = jiffies;
283 netdev_info(netdev, "tx_timeout recovery level %d\n",
284 pf->tx_timeout_recovery_level);
285
286 switch (pf->tx_timeout_recovery_level) {
287 case 0:
288 /* disable and re-enable queues for the VSI */
289 if (in_interrupt()) {
290 set_bit(__I40E_REINIT_REQUESTED, &pf->state);
291 set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
292 } else {
293 i40e_vsi_reinit_locked(vsi);
294 }
295 break;
296 case 1:
297 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
298 break;
299 case 2:
300 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
301 break;
302 case 3:
303 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
304 break;
305 default:
306 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
307 i40e_down(vsi);
308 break;
309 }
310 i40e_service_event_schedule(pf);
311 pf->tx_timeout_recovery_level++;
312}
313
314/**
315 * i40e_release_rx_desc - Store the new tail and head values
316 * @rx_ring: ring to bump
317 * @val: new head index
318 **/
319static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
320{
321 rx_ring->next_to_use = val;
322
323 /* Force memory writes to complete before letting h/w
324 * know there are new descriptors to fetch. (Only
325 * applicable for weak-ordered memory model archs,
326 * such as IA-64).
327 */
328 wmb();
329 writel(val, rx_ring->tail);
330}
331
332/**
333 * i40e_get_vsi_stats_struct - Get System Network Statistics
334 * @vsi: the VSI we care about
335 *
336 * Returns the address of the device statistics structure.
337 * The statistics are actually updated from the service task.
338 **/
339struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
340{
341 return &vsi->net_stats;
342}
343
344/**
345 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
346 * @netdev: network interface device structure
347 *
348 * Returns the address of the device statistics structure.
349 * The statistics are actually updated from the service task.
350 **/
351static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
352 struct net_device *netdev,
980e9b11 353 struct rtnl_link_stats64 *stats)
41c445ff
JB
354{
355 struct i40e_netdev_priv *np = netdev_priv(netdev);
356 struct i40e_vsi *vsi = np->vsi;
980e9b11
AD
357 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
358 int i;
359
143c9054 360
bc7d338f
ASJ
361 if (test_bit(__I40E_DOWN, &vsi->state))
362 return stats;
363
3c325ced
JB
364 if (!vsi->tx_rings)
365 return stats;
366
980e9b11
AD
367 rcu_read_lock();
368 for (i = 0; i < vsi->num_queue_pairs; i++) {
369 struct i40e_ring *tx_ring, *rx_ring;
370 u64 bytes, packets;
371 unsigned int start;
372
373 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
374 if (!tx_ring)
375 continue;
376
377 do {
378 start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
379 packets = tx_ring->stats.packets;
380 bytes = tx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
382
383 stats->tx_packets += packets;
384 stats->tx_bytes += bytes;
385 rx_ring = &tx_ring[1];
386
387 do {
388 start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
389 packets = rx_ring->stats.packets;
390 bytes = rx_ring->stats.bytes;
391 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
41c445ff 392
980e9b11
AD
393 stats->rx_packets += packets;
394 stats->rx_bytes += bytes;
395 }
396 rcu_read_unlock();
397
398 /* following stats updated by ixgbe_watchdog_task() */
399 stats->multicast = vsi_stats->multicast;
400 stats->tx_errors = vsi_stats->tx_errors;
401 stats->tx_dropped = vsi_stats->tx_dropped;
402 stats->rx_errors = vsi_stats->rx_errors;
403 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
404 stats->rx_length_errors = vsi_stats->rx_length_errors;
41c445ff 405
980e9b11 406 return stats;
41c445ff
JB
407}
408
409/**
410 * i40e_vsi_reset_stats - Resets all stats of the given vsi
411 * @vsi: the VSI to have its stats reset
412 **/
413void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
414{
415 struct rtnl_link_stats64 *ns;
416 int i;
417
418 if (!vsi)
419 return;
420
421 ns = i40e_get_vsi_stats_struct(vsi);
422 memset(ns, 0, sizeof(*ns));
423 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
424 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
425 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
426 if (vsi->rx_rings)
427 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
428 memset(&vsi->rx_rings[i]->stats, 0 ,
429 sizeof(vsi->rx_rings[i]->stats));
430 memset(&vsi->rx_rings[i]->rx_stats, 0 ,
431 sizeof(vsi->rx_rings[i]->rx_stats));
432 memset(&vsi->tx_rings[i]->stats, 0 ,
433 sizeof(vsi->tx_rings[i]->stats));
434 memset(&vsi->tx_rings[i]->tx_stats, 0,
435 sizeof(vsi->tx_rings[i]->tx_stats));
41c445ff
JB
436 }
437 vsi->stat_offsets_loaded = false;
438}
439
440/**
441 * i40e_pf_reset_stats - Reset all of the stats for the given pf
442 * @pf: the PF to be reset
443 **/
444void i40e_pf_reset_stats(struct i40e_pf *pf)
445{
446 memset(&pf->stats, 0, sizeof(pf->stats));
447 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
448 pf->stat_offsets_loaded = false;
449}
450
451/**
452 * i40e_stat_update48 - read and update a 48 bit stat from the chip
453 * @hw: ptr to the hardware info
454 * @hireg: the high 32 bit reg to read
455 * @loreg: the low 32 bit reg to read
456 * @offset_loaded: has the initial offset been loaded yet
457 * @offset: ptr to current offset value
458 * @stat: ptr to the stat
459 *
460 * Since the device stats are not reset at PFReset, they likely will not
461 * be zeroed when the driver starts. We'll save the first values read
462 * and use them as offsets to be subtracted from the raw values in order
463 * to report stats that count from zero. In the process, we also manage
464 * the potential roll-over.
465 **/
466static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
467 bool offset_loaded, u64 *offset, u64 *stat)
468{
469 u64 new_data;
470
471 if (hw->device_id == I40E_QEMU_DEVICE_ID) {
472 new_data = rd32(hw, loreg);
473 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
474 } else {
475 new_data = rd64(hw, loreg);
476 }
477 if (!offset_loaded)
478 *offset = new_data;
479 if (likely(new_data >= *offset))
480 *stat = new_data - *offset;
481 else
482 *stat = (new_data + ((u64)1 << 48)) - *offset;
483 *stat &= 0xFFFFFFFFFFFFULL;
484}
485
486/**
487 * i40e_stat_update32 - read and update a 32 bit stat from the chip
488 * @hw: ptr to the hardware info
489 * @reg: the hw reg to read
490 * @offset_loaded: has the initial offset been loaded yet
491 * @offset: ptr to current offset value
492 * @stat: ptr to the stat
493 **/
494static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
495 bool offset_loaded, u64 *offset, u64 *stat)
496{
497 u32 new_data;
498
499 new_data = rd32(hw, reg);
500 if (!offset_loaded)
501 *offset = new_data;
502 if (likely(new_data >= *offset))
503 *stat = (u32)(new_data - *offset);
504 else
505 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
506}
507
508/**
509 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
510 * @vsi: the VSI to be updated
511 **/
512void i40e_update_eth_stats(struct i40e_vsi *vsi)
513{
514 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
515 struct i40e_pf *pf = vsi->back;
516 struct i40e_hw *hw = &pf->hw;
517 struct i40e_eth_stats *oes;
518 struct i40e_eth_stats *es; /* device's eth stats */
519
520 es = &vsi->eth_stats;
521 oes = &vsi->eth_stats_offsets;
522
523 /* Gather up the stats that the hw collects */
524 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
525 vsi->stat_offsets_loaded,
526 &oes->tx_errors, &es->tx_errors);
527 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
528 vsi->stat_offsets_loaded,
529 &oes->rx_discards, &es->rx_discards);
530
531 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
532 I40E_GLV_GORCL(stat_idx),
533 vsi->stat_offsets_loaded,
534 &oes->rx_bytes, &es->rx_bytes);
535 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
536 I40E_GLV_UPRCL(stat_idx),
537 vsi->stat_offsets_loaded,
538 &oes->rx_unicast, &es->rx_unicast);
539 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
540 I40E_GLV_MPRCL(stat_idx),
541 vsi->stat_offsets_loaded,
542 &oes->rx_multicast, &es->rx_multicast);
543 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
544 I40E_GLV_BPRCL(stat_idx),
545 vsi->stat_offsets_loaded,
546 &oes->rx_broadcast, &es->rx_broadcast);
547
548 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
549 I40E_GLV_GOTCL(stat_idx),
550 vsi->stat_offsets_loaded,
551 &oes->tx_bytes, &es->tx_bytes);
552 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
553 I40E_GLV_UPTCL(stat_idx),
554 vsi->stat_offsets_loaded,
555 &oes->tx_unicast, &es->tx_unicast);
556 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
557 I40E_GLV_MPTCL(stat_idx),
558 vsi->stat_offsets_loaded,
559 &oes->tx_multicast, &es->tx_multicast);
560 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
561 I40E_GLV_BPTCL(stat_idx),
562 vsi->stat_offsets_loaded,
563 &oes->tx_broadcast, &es->tx_broadcast);
564 vsi->stat_offsets_loaded = true;
565}
566
567/**
568 * i40e_update_veb_stats - Update Switch component statistics
569 * @veb: the VEB being updated
570 **/
571static void i40e_update_veb_stats(struct i40e_veb *veb)
572{
573 struct i40e_pf *pf = veb->pf;
574 struct i40e_hw *hw = &pf->hw;
575 struct i40e_eth_stats *oes;
576 struct i40e_eth_stats *es; /* device's eth stats */
577 int idx = 0;
578
579 idx = veb->stats_idx;
580 es = &veb->stats;
581 oes = &veb->stats_offsets;
582
583 /* Gather up the stats that the hw collects */
584 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
585 veb->stat_offsets_loaded,
586 &oes->tx_discards, &es->tx_discards);
7134f9ce
JB
587 if (hw->revision_id > 0)
588 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
589 veb->stat_offsets_loaded,
590 &oes->rx_unknown_protocol,
591 &es->rx_unknown_protocol);
41c445ff
JB
592 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
593 veb->stat_offsets_loaded,
594 &oes->rx_bytes, &es->rx_bytes);
595 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
596 veb->stat_offsets_loaded,
597 &oes->rx_unicast, &es->rx_unicast);
598 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
599 veb->stat_offsets_loaded,
600 &oes->rx_multicast, &es->rx_multicast);
601 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
602 veb->stat_offsets_loaded,
603 &oes->rx_broadcast, &es->rx_broadcast);
604
605 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
606 veb->stat_offsets_loaded,
607 &oes->tx_bytes, &es->tx_bytes);
608 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
609 veb->stat_offsets_loaded,
610 &oes->tx_unicast, &es->tx_unicast);
611 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
612 veb->stat_offsets_loaded,
613 &oes->tx_multicast, &es->tx_multicast);
614 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
615 veb->stat_offsets_loaded,
616 &oes->tx_broadcast, &es->tx_broadcast);
617 veb->stat_offsets_loaded = true;
618}
619
620/**
621 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
622 * @pf: the corresponding PF
623 *
624 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
625 **/
626static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
627{
628 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
629 struct i40e_hw_port_stats *nsd = &pf->stats;
630 struct i40e_hw *hw = &pf->hw;
631 u64 xoff = 0;
632 u16 i, v;
633
634 if ((hw->fc.current_mode != I40E_FC_FULL) &&
635 (hw->fc.current_mode != I40E_FC_RX_PAUSE))
636 return;
637
638 xoff = nsd->link_xoff_rx;
639 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
640 pf->stat_offsets_loaded,
641 &osd->link_xoff_rx, &nsd->link_xoff_rx);
642
643 /* No new LFC xoff rx */
644 if (!(nsd->link_xoff_rx - xoff))
645 return;
646
647 /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
648 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
649 struct i40e_vsi *vsi = pf->vsi[v];
650
651 if (!vsi)
652 continue;
653
654 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 655 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
656 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
657 }
658 }
659}
660
661/**
662 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
663 * @pf: the corresponding PF
664 *
665 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
666 **/
667static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
668{
669 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
670 struct i40e_hw_port_stats *nsd = &pf->stats;
671 bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
672 struct i40e_dcbx_config *dcb_cfg;
673 struct i40e_hw *hw = &pf->hw;
674 u16 i, v;
675 u8 tc;
676
677 dcb_cfg = &hw->local_dcbx_config;
678
679 /* See if DCB enabled with PFC TC */
680 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
681 !(dcb_cfg->pfc.pfcenable)) {
682 i40e_update_link_xoff_rx(pf);
683 return;
684 }
685
686 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
687 u64 prio_xoff = nsd->priority_xoff_rx[i];
688 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
689 pf->stat_offsets_loaded,
690 &osd->priority_xoff_rx[i],
691 &nsd->priority_xoff_rx[i]);
692
693 /* No new PFC xoff rx */
694 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
695 continue;
696 /* Get the TC for given priority */
697 tc = dcb_cfg->etscfg.prioritytable[i];
698 xoff[tc] = true;
699 }
700
701 /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
702 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
703 struct i40e_vsi *vsi = pf->vsi[v];
704
705 if (!vsi)
706 continue;
707
708 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 709 struct i40e_ring *ring = vsi->tx_rings[i];
41c445ff
JB
710
711 tc = ring->dcb_tc;
712 if (xoff[tc])
713 clear_bit(__I40E_HANG_CHECK_ARMED,
714 &ring->state);
715 }
716 }
717}
718
719/**
720 * i40e_update_stats - Update the board statistics counters.
721 * @vsi: the VSI to be updated
722 *
723 * There are a few instances where we store the same stat in a
724 * couple of different structs. This is partly because we have
725 * the netdev stats that need to be filled out, which is slightly
726 * different from the "eth_stats" defined by the chip and used in
727 * VF communications. We sort it all out here in a central place.
728 **/
729void i40e_update_stats(struct i40e_vsi *vsi)
730{
731 struct i40e_pf *pf = vsi->back;
732 struct i40e_hw *hw = &pf->hw;
733 struct rtnl_link_stats64 *ons;
734 struct rtnl_link_stats64 *ns; /* netdev stats */
735 struct i40e_eth_stats *oes;
736 struct i40e_eth_stats *es; /* device's eth stats */
737 u32 tx_restart, tx_busy;
738 u32 rx_page, rx_buf;
739 u64 rx_p, rx_b;
740 u64 tx_p, tx_b;
741 int i;
742 u16 q;
743
744 if (test_bit(__I40E_DOWN, &vsi->state) ||
745 test_bit(__I40E_CONFIG_BUSY, &pf->state))
746 return;
747
748 ns = i40e_get_vsi_stats_struct(vsi);
749 ons = &vsi->net_stats_offsets;
750 es = &vsi->eth_stats;
751 oes = &vsi->eth_stats_offsets;
752
753 /* Gather up the netdev and vsi stats that the driver collects
754 * on the fly during packet processing
755 */
756 rx_b = rx_p = 0;
757 tx_b = tx_p = 0;
758 tx_restart = tx_busy = 0;
759 rx_page = 0;
760 rx_buf = 0;
980e9b11 761 rcu_read_lock();
41c445ff
JB
762 for (q = 0; q < vsi->num_queue_pairs; q++) {
763 struct i40e_ring *p;
980e9b11
AD
764 u64 bytes, packets;
765 unsigned int start;
766
767 /* locate Tx ring */
768 p = ACCESS_ONCE(vsi->tx_rings[q]);
769
770 do {
771 start = u64_stats_fetch_begin_bh(&p->syncp);
772 packets = p->stats.packets;
773 bytes = p->stats.bytes;
774 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
775 tx_b += bytes;
776 tx_p += packets;
777 tx_restart += p->tx_stats.restart_queue;
778 tx_busy += p->tx_stats.tx_busy;
41c445ff 779
980e9b11
AD
780 /* Rx queue is part of the same block as Tx queue */
781 p = &p[1];
782 do {
783 start = u64_stats_fetch_begin_bh(&p->syncp);
784 packets = p->stats.packets;
785 bytes = p->stats.bytes;
786 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
787 rx_b += bytes;
788 rx_p += packets;
41c445ff
JB
789 rx_buf += p->rx_stats.alloc_rx_buff_failed;
790 rx_page += p->rx_stats.alloc_rx_page_failed;
41c445ff 791 }
980e9b11 792 rcu_read_unlock();
41c445ff
JB
793 vsi->tx_restart = tx_restart;
794 vsi->tx_busy = tx_busy;
795 vsi->rx_page_failed = rx_page;
796 vsi->rx_buf_failed = rx_buf;
797
798 ns->rx_packets = rx_p;
799 ns->rx_bytes = rx_b;
800 ns->tx_packets = tx_p;
801 ns->tx_bytes = tx_b;
802
803 i40e_update_eth_stats(vsi);
804 /* update netdev stats from eth stats */
805 ons->rx_errors = oes->rx_errors;
806 ns->rx_errors = es->rx_errors;
807 ons->tx_errors = oes->tx_errors;
808 ns->tx_errors = es->tx_errors;
809 ons->multicast = oes->rx_multicast;
810 ns->multicast = es->rx_multicast;
811 ons->tx_dropped = oes->tx_discards;
812 ns->tx_dropped = es->tx_discards;
813
814 /* Get the port data only if this is the main PF VSI */
815 if (vsi == pf->vsi[pf->lan_vsi]) {
816 struct i40e_hw_port_stats *nsd = &pf->stats;
817 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
818
819 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
820 I40E_GLPRT_GORCL(hw->port),
821 pf->stat_offsets_loaded,
822 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
823 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
824 I40E_GLPRT_GOTCL(hw->port),
825 pf->stat_offsets_loaded,
826 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
827 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
828 pf->stat_offsets_loaded,
829 &osd->eth.rx_discards,
830 &nsd->eth.rx_discards);
831 i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
832 pf->stat_offsets_loaded,
833 &osd->eth.tx_discards,
834 &nsd->eth.tx_discards);
835 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
836 I40E_GLPRT_MPRCL(hw->port),
837 pf->stat_offsets_loaded,
838 &osd->eth.rx_multicast,
839 &nsd->eth.rx_multicast);
840
841 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
842 pf->stat_offsets_loaded,
843 &osd->tx_dropped_link_down,
844 &nsd->tx_dropped_link_down);
845
846 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
847 pf->stat_offsets_loaded,
848 &osd->crc_errors, &nsd->crc_errors);
849 ns->rx_crc_errors = nsd->crc_errors;
850
851 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
852 pf->stat_offsets_loaded,
853 &osd->illegal_bytes, &nsd->illegal_bytes);
854 ns->rx_errors = nsd->crc_errors
855 + nsd->illegal_bytes;
856
857 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
858 pf->stat_offsets_loaded,
859 &osd->mac_local_faults,
860 &nsd->mac_local_faults);
861 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
862 pf->stat_offsets_loaded,
863 &osd->mac_remote_faults,
864 &nsd->mac_remote_faults);
865
866 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
867 pf->stat_offsets_loaded,
868 &osd->rx_length_errors,
869 &nsd->rx_length_errors);
870 ns->rx_length_errors = nsd->rx_length_errors;
871
872 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
873 pf->stat_offsets_loaded,
874 &osd->link_xon_rx, &nsd->link_xon_rx);
875 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
876 pf->stat_offsets_loaded,
877 &osd->link_xon_tx, &nsd->link_xon_tx);
878 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
879 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
880 pf->stat_offsets_loaded,
881 &osd->link_xoff_tx, &nsd->link_xoff_tx);
882
883 for (i = 0; i < 8; i++) {
884 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
885 pf->stat_offsets_loaded,
886 &osd->priority_xon_rx[i],
887 &nsd->priority_xon_rx[i]);
888 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
889 pf->stat_offsets_loaded,
890 &osd->priority_xon_tx[i],
891 &nsd->priority_xon_tx[i]);
892 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
893 pf->stat_offsets_loaded,
894 &osd->priority_xoff_tx[i],
895 &nsd->priority_xoff_tx[i]);
896 i40e_stat_update32(hw,
897 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
898 pf->stat_offsets_loaded,
899 &osd->priority_xon_2_xoff[i],
900 &nsd->priority_xon_2_xoff[i]);
901 }
902
903 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
904 I40E_GLPRT_PRC64L(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->rx_size_64, &nsd->rx_size_64);
907 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
908 I40E_GLPRT_PRC127L(hw->port),
909 pf->stat_offsets_loaded,
910 &osd->rx_size_127, &nsd->rx_size_127);
911 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
912 I40E_GLPRT_PRC255L(hw->port),
913 pf->stat_offsets_loaded,
914 &osd->rx_size_255, &nsd->rx_size_255);
915 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
916 I40E_GLPRT_PRC511L(hw->port),
917 pf->stat_offsets_loaded,
918 &osd->rx_size_511, &nsd->rx_size_511);
919 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
920 I40E_GLPRT_PRC1023L(hw->port),
921 pf->stat_offsets_loaded,
922 &osd->rx_size_1023, &nsd->rx_size_1023);
923 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
924 I40E_GLPRT_PRC1522L(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->rx_size_1522, &nsd->rx_size_1522);
927 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
928 I40E_GLPRT_PRC9522L(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->rx_size_big, &nsd->rx_size_big);
931
932 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
933 I40E_GLPRT_PTC64L(hw->port),
934 pf->stat_offsets_loaded,
935 &osd->tx_size_64, &nsd->tx_size_64);
936 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
937 I40E_GLPRT_PTC127L(hw->port),
938 pf->stat_offsets_loaded,
939 &osd->tx_size_127, &nsd->tx_size_127);
940 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
941 I40E_GLPRT_PTC255L(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->tx_size_255, &nsd->tx_size_255);
944 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
945 I40E_GLPRT_PTC511L(hw->port),
946 pf->stat_offsets_loaded,
947 &osd->tx_size_511, &nsd->tx_size_511);
948 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
949 I40E_GLPRT_PTC1023L(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->tx_size_1023, &nsd->tx_size_1023);
952 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
953 I40E_GLPRT_PTC1522L(hw->port),
954 pf->stat_offsets_loaded,
955 &osd->tx_size_1522, &nsd->tx_size_1522);
956 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
957 I40E_GLPRT_PTC9522L(hw->port),
958 pf->stat_offsets_loaded,
959 &osd->tx_size_big, &nsd->tx_size_big);
960
961 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
962 pf->stat_offsets_loaded,
963 &osd->rx_undersize, &nsd->rx_undersize);
964 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
965 pf->stat_offsets_loaded,
966 &osd->rx_fragments, &nsd->rx_fragments);
967 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
968 pf->stat_offsets_loaded,
969 &osd->rx_oversize, &nsd->rx_oversize);
970 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
971 pf->stat_offsets_loaded,
972 &osd->rx_jabber, &nsd->rx_jabber);
973 }
974
975 pf->stat_offsets_loaded = true;
976}
977
978/**
979 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
980 * @vsi: the VSI to be searched
981 * @macaddr: the MAC address
982 * @vlan: the vlan
983 * @is_vf: make sure its a vf filter, else doesn't matter
984 * @is_netdev: make sure its a netdev filter, else doesn't matter
985 *
986 * Returns ptr to the filter object or NULL
987 **/
988static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
989 u8 *macaddr, s16 vlan,
990 bool is_vf, bool is_netdev)
991{
992 struct i40e_mac_filter *f;
993
994 if (!vsi || !macaddr)
995 return NULL;
996
997 list_for_each_entry(f, &vsi->mac_filter_list, list) {
998 if ((ether_addr_equal(macaddr, f->macaddr)) &&
999 (vlan == f->vlan) &&
1000 (!is_vf || f->is_vf) &&
1001 (!is_netdev || f->is_netdev))
1002 return f;
1003 }
1004 return NULL;
1005}
1006
1007/**
1008 * i40e_find_mac - Find a mac addr in the macvlan filters list
1009 * @vsi: the VSI to be searched
1010 * @macaddr: the MAC address we are searching for
1011 * @is_vf: make sure its a vf filter, else doesn't matter
1012 * @is_netdev: make sure its a netdev filter, else doesn't matter
1013 *
1014 * Returns the first filter with the provided MAC address or NULL if
1015 * MAC address was not found
1016 **/
1017struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1018 bool is_vf, bool is_netdev)
1019{
1020 struct i40e_mac_filter *f;
1021
1022 if (!vsi || !macaddr)
1023 return NULL;
1024
1025 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1026 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1027 (!is_vf || f->is_vf) &&
1028 (!is_netdev || f->is_netdev))
1029 return f;
1030 }
1031 return NULL;
1032}
1033
1034/**
1035 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1036 * @vsi: the VSI to be searched
1037 *
1038 * Returns true if VSI is in vlan mode or false otherwise
1039 **/
1040bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1041{
1042 struct i40e_mac_filter *f;
1043
1044 /* Only -1 for all the filters denotes not in vlan mode
1045 * so we have to go through all the list in order to make sure
1046 */
1047 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1048 if (f->vlan >= 0)
1049 return true;
1050 }
1051
1052 return false;
1053}
1054
1055/**
1056 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1057 * @vsi: the VSI to be searched
1058 * @macaddr: the mac address to be filtered
1059 * @is_vf: true if it is a vf
1060 * @is_netdev: true if it is a netdev
1061 *
1062 * Goes through all the macvlan filters and adds a
1063 * macvlan filter for each unique vlan that already exists
1064 *
1065 * Returns first filter found on success, else NULL
1066 **/
1067struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1068 bool is_vf, bool is_netdev)
1069{
1070 struct i40e_mac_filter *f;
1071
1072 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1073 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1074 is_vf, is_netdev)) {
1075 if (!i40e_add_filter(vsi, macaddr, f->vlan,
1076 is_vf, is_netdev))
1077 return NULL;
1078 }
1079 }
1080
1081 return list_first_entry_or_null(&vsi->mac_filter_list,
1082 struct i40e_mac_filter, list);
1083}
1084
1085/**
1086 * i40e_add_filter - Add a mac/vlan filter to the VSI
1087 * @vsi: the VSI to be searched
1088 * @macaddr: the MAC address
1089 * @vlan: the vlan
1090 * @is_vf: make sure its a vf filter, else doesn't matter
1091 * @is_netdev: make sure its a netdev filter, else doesn't matter
1092 *
1093 * Returns ptr to the filter object or NULL when no memory available.
1094 **/
1095struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1096 u8 *macaddr, s16 vlan,
1097 bool is_vf, bool is_netdev)
1098{
1099 struct i40e_mac_filter *f;
1100
1101 if (!vsi || !macaddr)
1102 return NULL;
1103
1104 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1105 if (!f) {
1106 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1107 if (!f)
1108 goto add_filter_out;
1109
1110 memcpy(f->macaddr, macaddr, ETH_ALEN);
1111 f->vlan = vlan;
1112 f->changed = true;
1113
1114 INIT_LIST_HEAD(&f->list);
1115 list_add(&f->list, &vsi->mac_filter_list);
1116 }
1117
1118 /* increment counter and add a new flag if needed */
1119 if (is_vf) {
1120 if (!f->is_vf) {
1121 f->is_vf = true;
1122 f->counter++;
1123 }
1124 } else if (is_netdev) {
1125 if (!f->is_netdev) {
1126 f->is_netdev = true;
1127 f->counter++;
1128 }
1129 } else {
1130 f->counter++;
1131 }
1132
1133 /* changed tells sync_filters_subtask to
1134 * push the filter down to the firmware
1135 */
1136 if (f->changed) {
1137 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1138 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1139 }
1140
1141add_filter_out:
1142 return f;
1143}
1144
1145/**
1146 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1147 * @vsi: the VSI to be searched
1148 * @macaddr: the MAC address
1149 * @vlan: the vlan
1150 * @is_vf: make sure it's a vf filter, else doesn't matter
1151 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1152 **/
1153void i40e_del_filter(struct i40e_vsi *vsi,
1154 u8 *macaddr, s16 vlan,
1155 bool is_vf, bool is_netdev)
1156{
1157 struct i40e_mac_filter *f;
1158
1159 if (!vsi || !macaddr)
1160 return;
1161
1162 f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1163 if (!f || f->counter == 0)
1164 return;
1165
1166 if (is_vf) {
1167 if (f->is_vf) {
1168 f->is_vf = false;
1169 f->counter--;
1170 }
1171 } else if (is_netdev) {
1172 if (f->is_netdev) {
1173 f->is_netdev = false;
1174 f->counter--;
1175 }
1176 } else {
1177 /* make sure we don't remove a filter in use by vf or netdev */
1178 int min_f = 0;
1179 min_f += (f->is_vf ? 1 : 0);
1180 min_f += (f->is_netdev ? 1 : 0);
1181
1182 if (f->counter > min_f)
1183 f->counter--;
1184 }
1185
1186 /* counter == 0 tells sync_filters_subtask to
1187 * remove the filter from the firmware's list
1188 */
1189 if (f->counter == 0) {
1190 f->changed = true;
1191 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1192 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1193 }
1194}
1195
1196/**
1197 * i40e_set_mac - NDO callback to set mac address
1198 * @netdev: network interface device structure
1199 * @p: pointer to an address structure
1200 *
1201 * Returns 0 on success, negative on failure
1202 **/
1203static int i40e_set_mac(struct net_device *netdev, void *p)
1204{
1205 struct i40e_netdev_priv *np = netdev_priv(netdev);
1206 struct i40e_vsi *vsi = np->vsi;
1207 struct sockaddr *addr = p;
1208 struct i40e_mac_filter *f;
1209
1210 if (!is_valid_ether_addr(addr->sa_data))
1211 return -EADDRNOTAVAIL;
1212
1213 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
1214
1215 if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
1216 return 0;
1217
1218 if (vsi->type == I40E_VSI_MAIN) {
1219 i40e_status ret;
1220 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1221 I40E_AQC_WRITE_TYPE_LAA_ONLY,
1222 addr->sa_data, NULL);
1223 if (ret) {
1224 netdev_info(netdev,
1225 "Addr change for Main VSI failed: %d\n",
1226 ret);
1227 return -EADDRNOTAVAIL;
1228 }
1229
1230 memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
1231 }
1232
1233 /* In order to be sure to not drop any packets, add the new address
1234 * then delete the old one.
1235 */
1236 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
1237 if (!f)
1238 return -ENOMEM;
1239
1240 i40e_sync_vsi_filters(vsi);
1241 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
1242 i40e_sync_vsi_filters(vsi);
1243
1244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1245
1246 return 0;
1247}
1248
1249/**
1250 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1251 * @vsi: the VSI being setup
1252 * @ctxt: VSI context structure
1253 * @enabled_tc: Enabled TCs bitmap
1254 * @is_add: True if called before Add VSI
1255 *
1256 * Setup VSI queue mapping for enabled traffic classes.
1257 **/
1258static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1259 struct i40e_vsi_context *ctxt,
1260 u8 enabled_tc,
1261 bool is_add)
1262{
1263 struct i40e_pf *pf = vsi->back;
1264 u16 sections = 0;
1265 u8 netdev_tc = 0;
1266 u16 numtc = 0;
1267 u16 qcount;
1268 u8 offset;
1269 u16 qmap;
1270 int i;
1271
1272 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1273 offset = 0;
1274
1275 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1276 /* Find numtc from enabled TC bitmap */
1277 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1278 if (enabled_tc & (1 << i)) /* TC is enabled */
1279 numtc++;
1280 }
1281 if (!numtc) {
1282 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1283 numtc = 1;
1284 }
1285 } else {
1286 /* At least TC0 is enabled in case of non-DCB case */
1287 numtc = 1;
1288 }
1289
1290 vsi->tc_config.numtc = numtc;
1291 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1292
1293 /* Setup queue offset/count for all TCs for given VSI */
1294 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1295 /* See if the given TC is enabled for the given VSI */
1296 if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1297 int pow, num_qps;
1298
1299 vsi->tc_config.tc_info[i].qoffset = offset;
1300 switch (vsi->type) {
1301 case I40E_VSI_MAIN:
1302 if (i == 0)
1303 qcount = pf->rss_size;
1304 else
1305 qcount = pf->num_tc_qps;
1306 vsi->tc_config.tc_info[i].qcount = qcount;
1307 break;
1308 case I40E_VSI_FDIR:
1309 case I40E_VSI_SRIOV:
1310 case I40E_VSI_VMDQ2:
1311 default:
1312 qcount = vsi->alloc_queue_pairs;
1313 vsi->tc_config.tc_info[i].qcount = qcount;
1314 WARN_ON(i != 0);
1315 break;
1316 }
1317
1318 /* find the power-of-2 of the number of queue pairs */
1319 num_qps = vsi->tc_config.tc_info[i].qcount;
1320 pow = 0;
1321 while (num_qps &&
1322 ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
1323 pow++;
1324 num_qps >>= 1;
1325 }
1326
1327 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1328 qmap =
1329 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1330 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1331
1332 offset += vsi->tc_config.tc_info[i].qcount;
1333 } else {
1334 /* TC is not enabled so set the offset to
1335 * default queue and allocate one queue
1336 * for the given TC.
1337 */
1338 vsi->tc_config.tc_info[i].qoffset = 0;
1339 vsi->tc_config.tc_info[i].qcount = 1;
1340 vsi->tc_config.tc_info[i].netdev_tc = 0;
1341
1342 qmap = 0;
1343 }
1344 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1345 }
1346
1347 /* Set actual Tx/Rx queue pairs */
1348 vsi->num_queue_pairs = offset;
1349
1350 /* Scheduler section valid can only be set for ADD VSI */
1351 if (is_add) {
1352 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1353
1354 ctxt->info.up_enable_bits = enabled_tc;
1355 }
1356 if (vsi->type == I40E_VSI_SRIOV) {
1357 ctxt->info.mapping_flags |=
1358 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1359 for (i = 0; i < vsi->num_queue_pairs; i++)
1360 ctxt->info.queue_mapping[i] =
1361 cpu_to_le16(vsi->base_queue + i);
1362 } else {
1363 ctxt->info.mapping_flags |=
1364 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1365 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1366 }
1367 ctxt->info.valid_sections |= cpu_to_le16(sections);
1368}
1369
1370/**
1371 * i40e_set_rx_mode - NDO callback to set the netdev filters
1372 * @netdev: network interface device structure
1373 **/
1374static void i40e_set_rx_mode(struct net_device *netdev)
1375{
1376 struct i40e_netdev_priv *np = netdev_priv(netdev);
1377 struct i40e_mac_filter *f, *ftmp;
1378 struct i40e_vsi *vsi = np->vsi;
1379 struct netdev_hw_addr *uca;
1380 struct netdev_hw_addr *mca;
1381 struct netdev_hw_addr *ha;
1382
1383 /* add addr if not already in the filter list */
1384 netdev_for_each_uc_addr(uca, netdev) {
1385 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1386 if (i40e_is_vsi_in_vlan(vsi))
1387 i40e_put_mac_in_vlan(vsi, uca->addr,
1388 false, true);
1389 else
1390 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1391 false, true);
1392 }
1393 }
1394
1395 netdev_for_each_mc_addr(mca, netdev) {
1396 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1397 if (i40e_is_vsi_in_vlan(vsi))
1398 i40e_put_mac_in_vlan(vsi, mca->addr,
1399 false, true);
1400 else
1401 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1402 false, true);
1403 }
1404 }
1405
1406 /* remove filter if not in netdev list */
1407 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1408 bool found = false;
1409
1410 if (!f->is_netdev)
1411 continue;
1412
1413 if (is_multicast_ether_addr(f->macaddr)) {
1414 netdev_for_each_mc_addr(mca, netdev) {
1415 if (ether_addr_equal(mca->addr, f->macaddr)) {
1416 found = true;
1417 break;
1418 }
1419 }
1420 } else {
1421 netdev_for_each_uc_addr(uca, netdev) {
1422 if (ether_addr_equal(uca->addr, f->macaddr)) {
1423 found = true;
1424 break;
1425 }
1426 }
1427
1428 for_each_dev_addr(netdev, ha) {
1429 if (ether_addr_equal(ha->addr, f->macaddr)) {
1430 found = true;
1431 break;
1432 }
1433 }
1434 }
1435 if (!found)
1436 i40e_del_filter(
1437 vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1438 }
1439
1440 /* check for other flag changes */
1441 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1442 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1443 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1444 }
1445}
1446
1447/**
1448 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1449 * @vsi: ptr to the VSI
1450 *
1451 * Push any outstanding VSI filter changes through the AdminQ.
1452 *
1453 * Returns 0 or error value
1454 **/
1455int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1456{
1457 struct i40e_mac_filter *f, *ftmp;
1458 bool promisc_forced_on = false;
1459 bool add_happened = false;
1460 int filter_list_len = 0;
1461 u32 changed_flags = 0;
dcae29be 1462 i40e_status aq_ret = 0;
41c445ff
JB
1463 struct i40e_pf *pf;
1464 int num_add = 0;
1465 int num_del = 0;
1466 u16 cmd_flags;
1467
1468 /* empty array typed pointers, kcalloc later */
1469 struct i40e_aqc_add_macvlan_element_data *add_list;
1470 struct i40e_aqc_remove_macvlan_element_data *del_list;
1471
1472 while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1473 usleep_range(1000, 2000);
1474 pf = vsi->back;
1475
1476 if (vsi->netdev) {
1477 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1478 vsi->current_netdev_flags = vsi->netdev->flags;
1479 }
1480
1481 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1482 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1483
1484 filter_list_len = pf->hw.aq.asq_buf_size /
1485 sizeof(struct i40e_aqc_remove_macvlan_element_data);
1486 del_list = kcalloc(filter_list_len,
1487 sizeof(struct i40e_aqc_remove_macvlan_element_data),
1488 GFP_KERNEL);
1489 if (!del_list)
1490 return -ENOMEM;
1491
1492 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1493 if (!f->changed)
1494 continue;
1495
1496 if (f->counter != 0)
1497 continue;
1498 f->changed = false;
1499 cmd_flags = 0;
1500
1501 /* add to delete list */
1502 memcpy(del_list[num_del].mac_addr,
1503 f->macaddr, ETH_ALEN);
1504 del_list[num_del].vlan_tag =
1505 cpu_to_le16((u16)(f->vlan ==
1506 I40E_VLAN_ANY ? 0 : f->vlan));
1507
1508 /* vlan0 as wild card to allow packets from all vlans */
1509 if (f->vlan == I40E_VLAN_ANY ||
1510 (vsi->netdev && !(vsi->netdev->features &
1511 NETIF_F_HW_VLAN_CTAG_FILTER)))
1512 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1513 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1514 del_list[num_del].flags = cmd_flags;
1515 num_del++;
1516
1517 /* unlink from filter list */
1518 list_del(&f->list);
1519 kfree(f);
1520
1521 /* flush a full buffer */
1522 if (num_del == filter_list_len) {
dcae29be 1523 aq_ret = i40e_aq_remove_macvlan(&pf->hw,
41c445ff
JB
1524 vsi->seid, del_list, num_del,
1525 NULL);
1526 num_del = 0;
1527 memset(del_list, 0, sizeof(*del_list));
1528
dcae29be 1529 if (aq_ret)
41c445ff
JB
1530 dev_info(&pf->pdev->dev,
1531 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
dcae29be 1532 aq_ret,
41c445ff
JB
1533 pf->hw.aq.asq_last_status);
1534 }
1535 }
1536 if (num_del) {
dcae29be 1537 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
41c445ff
JB
1538 del_list, num_del, NULL);
1539 num_del = 0;
1540
dcae29be 1541 if (aq_ret)
41c445ff
JB
1542 dev_info(&pf->pdev->dev,
1543 "ignoring delete macvlan error, err %d, aq_err %d\n",
dcae29be 1544 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1545 }
1546
1547 kfree(del_list);
1548 del_list = NULL;
1549
1550 /* do all the adds now */
1551 filter_list_len = pf->hw.aq.asq_buf_size /
1552 sizeof(struct i40e_aqc_add_macvlan_element_data),
1553 add_list = kcalloc(filter_list_len,
1554 sizeof(struct i40e_aqc_add_macvlan_element_data),
1555 GFP_KERNEL);
1556 if (!add_list)
1557 return -ENOMEM;
1558
1559 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1560 if (!f->changed)
1561 continue;
1562
1563 if (f->counter == 0)
1564 continue;
1565 f->changed = false;
1566 add_happened = true;
1567 cmd_flags = 0;
1568
1569 /* add to add array */
1570 memcpy(add_list[num_add].mac_addr,
1571 f->macaddr, ETH_ALEN);
1572 add_list[num_add].vlan_tag =
1573 cpu_to_le16(
1574 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1575 add_list[num_add].queue_number = 0;
1576
1577 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1578
1579 /* vlan0 as wild card to allow packets from all vlans */
1580 if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
1581 !(vsi->netdev->features &
1582 NETIF_F_HW_VLAN_CTAG_FILTER)))
1583 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1584 add_list[num_add].flags = cpu_to_le16(cmd_flags);
1585 num_add++;
1586
1587 /* flush a full buffer */
1588 if (num_add == filter_list_len) {
dcae29be
JB
1589 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1590 add_list, num_add,
1591 NULL);
41c445ff
JB
1592 num_add = 0;
1593
dcae29be 1594 if (aq_ret)
41c445ff
JB
1595 break;
1596 memset(add_list, 0, sizeof(*add_list));
1597 }
1598 }
1599 if (num_add) {
dcae29be
JB
1600 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1601 add_list, num_add, NULL);
41c445ff
JB
1602 num_add = 0;
1603 }
1604 kfree(add_list);
1605 add_list = NULL;
1606
dcae29be 1607 if (add_happened && (!aq_ret)) {
41c445ff 1608 /* do nothing */;
dcae29be 1609 } else if (add_happened && (aq_ret)) {
41c445ff
JB
1610 dev_info(&pf->pdev->dev,
1611 "add filter failed, err %d, aq_err %d\n",
dcae29be 1612 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1613 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1614 !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1615 &vsi->state)) {
1616 promisc_forced_on = true;
1617 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1618 &vsi->state);
1619 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1620 }
1621 }
1622 }
1623
1624 /* check for changes in promiscuous modes */
1625 if (changed_flags & IFF_ALLMULTI) {
1626 bool cur_multipromisc;
1627 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
dcae29be
JB
1628 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1629 vsi->seid,
1630 cur_multipromisc,
1631 NULL);
1632 if (aq_ret)
41c445ff
JB
1633 dev_info(&pf->pdev->dev,
1634 "set multi promisc failed, err %d, aq_err %d\n",
dcae29be 1635 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1636 }
1637 if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1638 bool cur_promisc;
1639 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1640 test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1641 &vsi->state));
dcae29be
JB
1642 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1643 vsi->seid,
1644 cur_promisc, NULL);
1645 if (aq_ret)
41c445ff
JB
1646 dev_info(&pf->pdev->dev,
1647 "set uni promisc failed, err %d, aq_err %d\n",
dcae29be 1648 aq_ret, pf->hw.aq.asq_last_status);
41c445ff
JB
1649 }
1650
1651 clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1652 return 0;
1653}
1654
1655/**
1656 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1657 * @pf: board private structure
1658 **/
1659static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1660{
1661 int v;
1662
1663 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1664 return;
1665 pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1666
1667 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
1668 if (pf->vsi[v] &&
1669 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1670 i40e_sync_vsi_filters(pf->vsi[v]);
1671 }
1672}
1673
1674/**
1675 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1676 * @netdev: network interface device structure
1677 * @new_mtu: new value for maximum frame size
1678 *
1679 * Returns 0 on success, negative on failure
1680 **/
1681static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1682{
1683 struct i40e_netdev_priv *np = netdev_priv(netdev);
1684 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1685 struct i40e_vsi *vsi = np->vsi;
1686
1687 /* MTU < 68 is an error and causes problems on some kernels */
1688 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1689 return -EINVAL;
1690
1691 netdev_info(netdev, "changing MTU from %d to %d\n",
1692 netdev->mtu, new_mtu);
1693 netdev->mtu = new_mtu;
1694 if (netif_running(netdev))
1695 i40e_vsi_reinit_locked(vsi);
1696
1697 return 0;
1698}
1699
1700/**
1701 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1702 * @vsi: the vsi being adjusted
1703 **/
1704void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1705{
1706 struct i40e_vsi_context ctxt;
1707 i40e_status ret;
1708
1709 if ((vsi->info.valid_sections &
1710 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1711 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1712 return; /* already enabled */
1713
1714 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1715 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1716 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1717
1718 ctxt.seid = vsi->seid;
1719 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1720 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1721 if (ret) {
1722 dev_info(&vsi->back->pdev->dev,
1723 "%s: update vsi failed, aq_err=%d\n",
1724 __func__, vsi->back->hw.aq.asq_last_status);
1725 }
1726}
1727
1728/**
1729 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1730 * @vsi: the vsi being adjusted
1731 **/
1732void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1733{
1734 struct i40e_vsi_context ctxt;
1735 i40e_status ret;
1736
1737 if ((vsi->info.valid_sections &
1738 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1739 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1740 I40E_AQ_VSI_PVLAN_EMOD_MASK))
1741 return; /* already disabled */
1742
1743 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1744 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1745 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1746
1747 ctxt.seid = vsi->seid;
1748 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1749 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1750 if (ret) {
1751 dev_info(&vsi->back->pdev->dev,
1752 "%s: update vsi failed, aq_err=%d\n",
1753 __func__, vsi->back->hw.aq.asq_last_status);
1754 }
1755}
1756
1757/**
1758 * i40e_vlan_rx_register - Setup or shutdown vlan offload
1759 * @netdev: network interface to be adjusted
1760 * @features: netdev features to test if VLAN offload is enabled or not
1761 **/
1762static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
1763{
1764 struct i40e_netdev_priv *np = netdev_priv(netdev);
1765 struct i40e_vsi *vsi = np->vsi;
1766
1767 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1768 i40e_vlan_stripping_enable(vsi);
1769 else
1770 i40e_vlan_stripping_disable(vsi);
1771}
1772
1773/**
1774 * i40e_vsi_add_vlan - Add vsi membership for given vlan
1775 * @vsi: the vsi being configured
1776 * @vid: vlan id to be added (0 = untagged only , -1 = any)
1777 **/
1778int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
1779{
1780 struct i40e_mac_filter *f, *add_f;
1781 bool is_netdev, is_vf;
1782 int ret;
1783
1784 is_vf = (vsi->type == I40E_VSI_SRIOV);
1785 is_netdev = !!(vsi->netdev);
1786
1787 if (is_netdev) {
1788 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
1789 is_vf, is_netdev);
1790 if (!add_f) {
1791 dev_info(&vsi->back->pdev->dev,
1792 "Could not add vlan filter %d for %pM\n",
1793 vid, vsi->netdev->dev_addr);
1794 return -ENOMEM;
1795 }
1796 }
1797
1798 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1799 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1800 if (!add_f) {
1801 dev_info(&vsi->back->pdev->dev,
1802 "Could not add vlan filter %d for %pM\n",
1803 vid, f->macaddr);
1804 return -ENOMEM;
1805 }
1806 }
1807
1808 ret = i40e_sync_vsi_filters(vsi);
1809 if (ret) {
1810 dev_info(&vsi->back->pdev->dev,
1811 "Could not sync filters for vid %d\n", vid);
1812 return ret;
1813 }
1814
1815 /* Now if we add a vlan tag, make sure to check if it is the first
1816 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
1817 * with 0, so we now accept untagged and specified tagged traffic
1818 * (and not any taged and untagged)
1819 */
1820 if (vid > 0) {
1821 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
1822 I40E_VLAN_ANY,
1823 is_vf, is_netdev)) {
1824 i40e_del_filter(vsi, vsi->netdev->dev_addr,
1825 I40E_VLAN_ANY, is_vf, is_netdev);
1826 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
1827 is_vf, is_netdev);
1828 if (!add_f) {
1829 dev_info(&vsi->back->pdev->dev,
1830 "Could not add filter 0 for %pM\n",
1831 vsi->netdev->dev_addr);
1832 return -ENOMEM;
1833 }
1834 }
1835
1836 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1837 if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1838 is_vf, is_netdev)) {
1839 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1840 is_vf, is_netdev);
1841 add_f = i40e_add_filter(vsi, f->macaddr,
1842 0, is_vf, is_netdev);
1843 if (!add_f) {
1844 dev_info(&vsi->back->pdev->dev,
1845 "Could not add filter 0 for %pM\n",
1846 f->macaddr);
1847 return -ENOMEM;
1848 }
1849 }
1850 }
1851 ret = i40e_sync_vsi_filters(vsi);
1852 }
1853
1854 return ret;
1855}
1856
1857/**
1858 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
1859 * @vsi: the vsi being configured
1860 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
078b5876
JB
1861 *
1862 * Return: 0 on success or negative otherwise
41c445ff
JB
1863 **/
1864int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
1865{
1866 struct net_device *netdev = vsi->netdev;
1867 struct i40e_mac_filter *f, *add_f;
1868 bool is_vf, is_netdev;
1869 int filter_count = 0;
1870 int ret;
1871
1872 is_vf = (vsi->type == I40E_VSI_SRIOV);
1873 is_netdev = !!(netdev);
1874
1875 if (is_netdev)
1876 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
1877
1878 list_for_each_entry(f, &vsi->mac_filter_list, list)
1879 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
1880
1881 ret = i40e_sync_vsi_filters(vsi);
1882 if (ret) {
1883 dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
1884 return ret;
1885 }
1886
1887 /* go through all the filters for this VSI and if there is only
1888 * vid == 0 it means there are no other filters, so vid 0 must
1889 * be replaced with -1. This signifies that we should from now
1890 * on accept any traffic (with any tag present, or untagged)
1891 */
1892 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1893 if (is_netdev) {
1894 if (f->vlan &&
1895 ether_addr_equal(netdev->dev_addr, f->macaddr))
1896 filter_count++;
1897 }
1898
1899 if (f->vlan)
1900 filter_count++;
1901 }
1902
1903 if (!filter_count && is_netdev) {
1904 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
1905 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1906 is_vf, is_netdev);
1907 if (!f) {
1908 dev_info(&vsi->back->pdev->dev,
1909 "Could not add filter %d for %pM\n",
1910 I40E_VLAN_ANY, netdev->dev_addr);
1911 return -ENOMEM;
1912 }
1913 }
1914
1915 if (!filter_count) {
1916 list_for_each_entry(f, &vsi->mac_filter_list, list) {
1917 i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
1918 add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
1919 is_vf, is_netdev);
1920 if (!add_f) {
1921 dev_info(&vsi->back->pdev->dev,
1922 "Could not add filter %d for %pM\n",
1923 I40E_VLAN_ANY, f->macaddr);
1924 return -ENOMEM;
1925 }
1926 }
1927 }
1928
1929 return i40e_sync_vsi_filters(vsi);
1930}
1931
1932/**
1933 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
1934 * @netdev: network interface to be adjusted
1935 * @vid: vlan id to be added
078b5876
JB
1936 *
1937 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1938 **/
1939static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1940 __always_unused __be16 proto, u16 vid)
1941{
1942 struct i40e_netdev_priv *np = netdev_priv(netdev);
1943 struct i40e_vsi *vsi = np->vsi;
078b5876 1944 int ret = 0;
41c445ff
JB
1945
1946 if (vid > 4095)
078b5876
JB
1947 return -EINVAL;
1948
1949 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
41c445ff 1950
41c445ff
JB
1951 /* If the network stack called us with vid = 0, we should
1952 * indicate to i40e_vsi_add_vlan() that we want to receive
1953 * any traffic (i.e. with any vlan tag, or untagged)
1954 */
1955 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
1956
078b5876
JB
1957 if (!ret && (vid < VLAN_N_VID))
1958 set_bit(vid, vsi->active_vlans);
41c445ff 1959
078b5876 1960 return ret;
41c445ff
JB
1961}
1962
1963/**
1964 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
1965 * @netdev: network interface to be adjusted
1966 * @vid: vlan id to be removed
078b5876
JB
1967 *
1968 * net_device_ops implementation for adding vlan ids
41c445ff
JB
1969 **/
1970static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1971 __always_unused __be16 proto, u16 vid)
1972{
1973 struct i40e_netdev_priv *np = netdev_priv(netdev);
1974 struct i40e_vsi *vsi = np->vsi;
1975
078b5876
JB
1976 netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
1977
41c445ff
JB
1978 /* return code is ignored as there is nothing a user
1979 * can do about failure to remove and a log message was
078b5876 1980 * already printed from the other function
41c445ff
JB
1981 */
1982 i40e_vsi_kill_vlan(vsi, vid);
1983
1984 clear_bit(vid, vsi->active_vlans);
078b5876 1985
41c445ff
JB
1986 return 0;
1987}
1988
1989/**
1990 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
1991 * @vsi: the vsi being brought back up
1992 **/
1993static void i40e_restore_vlan(struct i40e_vsi *vsi)
1994{
1995 u16 vid;
1996
1997 if (!vsi->netdev)
1998 return;
1999
2000 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2001
2002 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2003 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2004 vid);
2005}
2006
2007/**
2008 * i40e_vsi_add_pvid - Add pvid for the VSI
2009 * @vsi: the vsi being adjusted
2010 * @vid: the vlan id to set as a PVID
2011 **/
dcae29be 2012int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
41c445ff
JB
2013{
2014 struct i40e_vsi_context ctxt;
dcae29be 2015 i40e_status aq_ret;
41c445ff
JB
2016
2017 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2018 vsi->info.pvid = cpu_to_le16(vid);
2019 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
2020 vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
2021
2022 ctxt.seid = vsi->seid;
2023 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
dcae29be
JB
2024 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2025 if (aq_ret) {
41c445ff
JB
2026 dev_info(&vsi->back->pdev->dev,
2027 "%s: update vsi failed, aq_err=%d\n",
2028 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 2029 return -ENOENT;
41c445ff
JB
2030 }
2031
dcae29be 2032 return 0;
41c445ff
JB
2033}
2034
2035/**
2036 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2037 * @vsi: the vsi being adjusted
2038 *
2039 * Just use the vlan_rx_register() service to put it back to normal
2040 **/
2041void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2042{
2043 vsi->info.pvid = 0;
2044 i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2045}
2046
2047/**
2048 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2049 * @vsi: ptr to the VSI
2050 *
2051 * If this function returns with an error, then it's possible one or
2052 * more of the rings is populated (while the rest are not). It is the
2053 * callers duty to clean those orphaned rings.
2054 *
2055 * Return 0 on success, negative on failure
2056 **/
2057static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2058{
2059 int i, err = 0;
2060
2061 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2062 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
41c445ff
JB
2063
2064 return err;
2065}
2066
2067/**
2068 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2069 * @vsi: ptr to the VSI
2070 *
2071 * Free VSI's transmit software resources
2072 **/
2073static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2074{
2075 int i;
2076
2077 for (i = 0; i < vsi->num_queue_pairs; i++)
9f65e15b
AD
2078 if (vsi->tx_rings[i]->desc)
2079 i40e_free_tx_resources(vsi->tx_rings[i]);
41c445ff
JB
2080}
2081
2082/**
2083 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2084 * @vsi: ptr to the VSI
2085 *
2086 * If this function returns with an error, then it's possible one or
2087 * more of the rings is populated (while the rest are not). It is the
2088 * callers duty to clean those orphaned rings.
2089 *
2090 * Return 0 on success, negative on failure
2091 **/
2092static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2093{
2094 int i, err = 0;
2095
2096 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2097 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
41c445ff
JB
2098 return err;
2099}
2100
2101/**
2102 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2103 * @vsi: ptr to the VSI
2104 *
2105 * Free all receive software resources
2106 **/
2107static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2108{
2109 int i;
2110
2111 for (i = 0; i < vsi->num_queue_pairs; i++)
9f65e15b
AD
2112 if (vsi->rx_rings[i]->desc)
2113 i40e_free_rx_resources(vsi->rx_rings[i]);
41c445ff
JB
2114}
2115
2116/**
2117 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2118 * @ring: The Tx ring to configure
2119 *
2120 * Configure the Tx descriptor ring in the HMC context.
2121 **/
2122static int i40e_configure_tx_ring(struct i40e_ring *ring)
2123{
2124 struct i40e_vsi *vsi = ring->vsi;
2125 u16 pf_q = vsi->base_queue + ring->queue_index;
2126 struct i40e_hw *hw = &vsi->back->hw;
2127 struct i40e_hmc_obj_txq tx_ctx;
2128 i40e_status err = 0;
2129 u32 qtx_ctl = 0;
2130
2131 /* some ATR related tx ring init */
2132 if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
2133 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2134 ring->atr_count = 0;
2135 } else {
2136 ring->atr_sample_rate = 0;
2137 }
2138
2139 /* initialize XPS */
2140 if (ring->q_vector && ring->netdev &&
2141 !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2142 netif_set_xps_queue(ring->netdev,
2143 &ring->q_vector->affinity_mask,
2144 ring->queue_index);
2145
2146 /* clear the context structure first */
2147 memset(&tx_ctx, 0, sizeof(tx_ctx));
2148
2149 tx_ctx.new_context = 1;
2150 tx_ctx.base = (ring->dma / 128);
2151 tx_ctx.qlen = ring->count;
2152 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
2153 I40E_FLAG_FDIR_ATR_ENABLED));
2154
2155 /* As part of VSI creation/update, FW allocates certain
2156 * Tx arbitration queue sets for each TC enabled for
2157 * the VSI. The FW returns the handles to these queue
2158 * sets as part of the response buffer to Add VSI,
2159 * Update VSI, etc. AQ commands. It is expected that
2160 * these queue set handles be associated with the Tx
2161 * queues by the driver as part of the TX queue context
2162 * initialization. This has to be done regardless of
2163 * DCB as by default everything is mapped to TC0.
2164 */
2165 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2166 tx_ctx.rdylist_act = 0;
2167
2168 /* clear the context in the HMC */
2169 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2170 if (err) {
2171 dev_info(&vsi->back->pdev->dev,
2172 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2173 ring->queue_index, pf_q, err);
2174 return -ENOMEM;
2175 }
2176
2177 /* set the context in the HMC */
2178 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2179 if (err) {
2180 dev_info(&vsi->back->pdev->dev,
2181 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2182 ring->queue_index, pf_q, err);
2183 return -ENOMEM;
2184 }
2185
2186 /* Now associate this queue with this PCI function */
2187 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
13fd9774
SN
2188 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2189 I40E_QTX_CTL_PF_INDX_MASK);
41c445ff
JB
2190 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2191 i40e_flush(hw);
2192
2193 clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2194
2195 /* cache tail off for easier writes later */
2196 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2197
2198 return 0;
2199}
2200
2201/**
2202 * i40e_configure_rx_ring - Configure a receive ring context
2203 * @ring: The Rx ring to configure
2204 *
2205 * Configure the Rx descriptor ring in the HMC context.
2206 **/
2207static int i40e_configure_rx_ring(struct i40e_ring *ring)
2208{
2209 struct i40e_vsi *vsi = ring->vsi;
2210 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2211 u16 pf_q = vsi->base_queue + ring->queue_index;
2212 struct i40e_hw *hw = &vsi->back->hw;
2213 struct i40e_hmc_obj_rxq rx_ctx;
2214 i40e_status err = 0;
2215
2216 ring->state = 0;
2217
2218 /* clear the context structure first */
2219 memset(&rx_ctx, 0, sizeof(rx_ctx));
2220
2221 ring->rx_buf_len = vsi->rx_buf_len;
2222 ring->rx_hdr_len = vsi->rx_hdr_len;
2223
2224 rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2225 rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2226
2227 rx_ctx.base = (ring->dma / 128);
2228 rx_ctx.qlen = ring->count;
2229
2230 if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2231 set_ring_16byte_desc_enabled(ring);
2232 rx_ctx.dsize = 0;
2233 } else {
2234 rx_ctx.dsize = 1;
2235 }
2236
2237 rx_ctx.dtype = vsi->dtype;
2238 if (vsi->dtype) {
2239 set_ring_ps_enabled(ring);
2240 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
2241 I40E_RX_SPLIT_IP |
2242 I40E_RX_SPLIT_TCP_UDP |
2243 I40E_RX_SPLIT_SCTP;
2244 } else {
2245 rx_ctx.hsplit_0 = 0;
2246 }
2247
2248 rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2249 (chain_len * ring->rx_buf_len));
2250 rx_ctx.tphrdesc_ena = 1;
2251 rx_ctx.tphwdesc_ena = 1;
2252 rx_ctx.tphdata_ena = 1;
2253 rx_ctx.tphhead_ena = 1;
7134f9ce
JB
2254 if (hw->revision_id == 0)
2255 rx_ctx.lrxqthresh = 0;
2256 else
2257 rx_ctx.lrxqthresh = 2;
41c445ff
JB
2258 rx_ctx.crcstrip = 1;
2259 rx_ctx.l2tsel = 1;
2260 rx_ctx.showiv = 1;
2261
2262 /* clear the context in the HMC */
2263 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2264 if (err) {
2265 dev_info(&vsi->back->pdev->dev,
2266 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2267 ring->queue_index, pf_q, err);
2268 return -ENOMEM;
2269 }
2270
2271 /* set the context in the HMC */
2272 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2273 if (err) {
2274 dev_info(&vsi->back->pdev->dev,
2275 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2276 ring->queue_index, pf_q, err);
2277 return -ENOMEM;
2278 }
2279
2280 /* cache tail for quicker writes, and clear the reg before use */
2281 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2282 writel(0, ring->tail);
2283
2284 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2285
2286 return 0;
2287}
2288
2289/**
2290 * i40e_vsi_configure_tx - Configure the VSI for Tx
2291 * @vsi: VSI structure describing this set of rings and resources
2292 *
2293 * Configure the Tx VSI for operation.
2294 **/
2295static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2296{
2297 int err = 0;
2298 u16 i;
2299
9f65e15b
AD
2300 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2301 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
41c445ff
JB
2302
2303 return err;
2304}
2305
2306/**
2307 * i40e_vsi_configure_rx - Configure the VSI for Rx
2308 * @vsi: the VSI being configured
2309 *
2310 * Configure the Rx VSI for operation.
2311 **/
2312static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2313{
2314 int err = 0;
2315 u16 i;
2316
2317 if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2318 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2319 + ETH_FCS_LEN + VLAN_HLEN;
2320 else
2321 vsi->max_frame = I40E_RXBUFFER_2048;
2322
2323 /* figure out correct receive buffer length */
2324 switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2325 I40E_FLAG_RX_PS_ENABLED)) {
2326 case I40E_FLAG_RX_1BUF_ENABLED:
2327 vsi->rx_hdr_len = 0;
2328 vsi->rx_buf_len = vsi->max_frame;
2329 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2330 break;
2331 case I40E_FLAG_RX_PS_ENABLED:
2332 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2333 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2334 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2335 break;
2336 default:
2337 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2338 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2339 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2340 break;
2341 }
2342
2343 /* round up for the chip's needs */
2344 vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2345 (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2346 vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2347 (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2348
2349 /* set up individual rings */
2350 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
9f65e15b 2351 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
2352
2353 return err;
2354}
2355
2356/**
2357 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2358 * @vsi: ptr to the VSI
2359 **/
2360static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2361{
2362 u16 qoffset, qcount;
2363 int i, n;
2364
2365 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2366 return;
2367
2368 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2369 if (!(vsi->tc_config.enabled_tc & (1 << n)))
2370 continue;
2371
2372 qoffset = vsi->tc_config.tc_info[n].qoffset;
2373 qcount = vsi->tc_config.tc_info[n].qcount;
2374 for (i = qoffset; i < (qoffset + qcount); i++) {
9f65e15b
AD
2375 struct i40e_ring *rx_ring = vsi->rx_rings[i];
2376 struct i40e_ring *tx_ring = vsi->tx_rings[i];
41c445ff
JB
2377 rx_ring->dcb_tc = n;
2378 tx_ring->dcb_tc = n;
2379 }
2380 }
2381}
2382
2383/**
2384 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2385 * @vsi: ptr to the VSI
2386 **/
2387static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2388{
2389 if (vsi->netdev)
2390 i40e_set_rx_mode(vsi->netdev);
2391}
2392
2393/**
2394 * i40e_vsi_configure - Set up the VSI for action
2395 * @vsi: the VSI being configured
2396 **/
2397static int i40e_vsi_configure(struct i40e_vsi *vsi)
2398{
2399 int err;
2400
2401 i40e_set_vsi_rx_mode(vsi);
2402 i40e_restore_vlan(vsi);
2403 i40e_vsi_config_dcb_rings(vsi);
2404 err = i40e_vsi_configure_tx(vsi);
2405 if (!err)
2406 err = i40e_vsi_configure_rx(vsi);
2407
2408 return err;
2409}
2410
2411/**
2412 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2413 * @vsi: the VSI being configured
2414 **/
2415static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2416{
2417 struct i40e_pf *pf = vsi->back;
2418 struct i40e_q_vector *q_vector;
2419 struct i40e_hw *hw = &pf->hw;
2420 u16 vector;
2421 int i, q;
2422 u32 val;
2423 u32 qp;
2424
2425 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2426 * and PFINT_LNKLSTn registers, e.g.:
2427 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
2428 */
2429 qp = vsi->base_queue;
2430 vector = vsi->base_vector;
493fb300
AD
2431 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2432 q_vector = vsi->q_vectors[i];
41c445ff
JB
2433 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2434 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2435 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2436 q_vector->rx.itr);
2437 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2438 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2439 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2440 q_vector->tx.itr);
2441
2442 /* Linked list for the queuepairs assigned to this vector */
2443 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2444 for (q = 0; q < q_vector->num_ringpairs; q++) {
2445 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2446 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2447 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2448 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2449 (I40E_QUEUE_TYPE_TX
2450 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2451
2452 wr32(hw, I40E_QINT_RQCTL(qp), val);
2453
2454 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2455 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2456 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2457 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2458 (I40E_QUEUE_TYPE_RX
2459 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2460
2461 /* Terminate the linked list */
2462 if (q == (q_vector->num_ringpairs - 1))
2463 val |= (I40E_QUEUE_END_OF_LIST
2464 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2465
2466 wr32(hw, I40E_QINT_TQCTL(qp), val);
2467 qp++;
2468 }
2469 }
2470
2471 i40e_flush(hw);
2472}
2473
2474/**
2475 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2476 * @hw: ptr to the hardware info
2477 **/
2478static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2479{
2480 u32 val;
2481
2482 /* clear things first */
2483 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
2484 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
2485
2486 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
2487 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
2488 I40E_PFINT_ICR0_ENA_GRST_MASK |
2489 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2490 I40E_PFINT_ICR0_ENA_GPIO_MASK |
2491 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
2492 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
2493 I40E_PFINT_ICR0_ENA_VFLR_MASK |
2494 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2495
2496 wr32(hw, I40E_PFINT_ICR0_ENA, val);
2497
2498 /* SW_ITR_IDX = 0, but don't change INTENA */
84ed40e7
ASJ
2499 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2500 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
41c445ff
JB
2501
2502 /* OTHER_ITR_IDX = 0 */
2503 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2504}
2505
2506/**
2507 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2508 * @vsi: the VSI being configured
2509 **/
2510static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2511{
493fb300 2512 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
41c445ff
JB
2513 struct i40e_pf *pf = vsi->back;
2514 struct i40e_hw *hw = &pf->hw;
2515 u32 val;
2516
2517 /* set the ITR configuration */
2518 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2519 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2520 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2521 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2522 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2523 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2524
2525 i40e_enable_misc_int_causes(hw);
2526
2527 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2528 wr32(hw, I40E_PFINT_LNKLST0, 0);
2529
2530 /* Associate the queue pair to the vector and enable the q int */
2531 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2532 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2533 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2534
2535 wr32(hw, I40E_QINT_RQCTL(0), val);
2536
2537 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2538 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2539 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2540
2541 wr32(hw, I40E_QINT_TQCTL(0), val);
2542 i40e_flush(hw);
2543}
2544
2ef28cfb
MW
2545/**
2546 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2547 * @pf: board private structure
2548 **/
2549void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2550{
2551 struct i40e_hw *hw = &pf->hw;
2552
2553 wr32(hw, I40E_PFINT_DYN_CTL0,
2554 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2555 i40e_flush(hw);
2556}
2557
41c445ff
JB
2558/**
2559 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2560 * @pf: board private structure
2561 **/
116a57d4 2562void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
41c445ff
JB
2563{
2564 struct i40e_hw *hw = &pf->hw;
2565 u32 val;
2566
2567 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2568 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2569 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2570
2571 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2572 i40e_flush(hw);
2573}
2574
2575/**
2576 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2577 * @vsi: pointer to a vsi
2578 * @vector: enable a particular Hw Interrupt vector
2579 **/
2580void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2581{
2582 struct i40e_pf *pf = vsi->back;
2583 struct i40e_hw *hw = &pf->hw;
2584 u32 val;
2585
2586 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2587 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2588 (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2589 wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1022cb6c 2590 /* skip the flush */
41c445ff
JB
2591}
2592
2593/**
2594 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2595 * @irq: interrupt number
2596 * @data: pointer to a q_vector
2597 **/
2598static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2599{
2600 struct i40e_q_vector *q_vector = data;
2601
cd0b6fa6 2602 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2603 return IRQ_HANDLED;
2604
2605 napi_schedule(&q_vector->napi);
2606
2607 return IRQ_HANDLED;
2608}
2609
2610/**
2611 * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
2612 * @irq: interrupt number
2613 * @data: pointer to a q_vector
2614 **/
2615static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
2616{
2617 struct i40e_q_vector *q_vector = data;
2618
cd0b6fa6 2619 if (!q_vector->tx.ring && !q_vector->rx.ring)
41c445ff
JB
2620 return IRQ_HANDLED;
2621
2622 pr_info("fdir ring cleaning needed\n");
2623
2624 return IRQ_HANDLED;
2625}
2626
2627/**
2628 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2629 * @vsi: the VSI being configured
2630 * @basename: name for the vector
2631 *
2632 * Allocates MSI-X vectors and requests interrupts from the kernel.
2633 **/
2634static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2635{
2636 int q_vectors = vsi->num_q_vectors;
2637 struct i40e_pf *pf = vsi->back;
2638 int base = vsi->base_vector;
2639 int rx_int_idx = 0;
2640 int tx_int_idx = 0;
2641 int vector, err;
2642
2643 for (vector = 0; vector < q_vectors; vector++) {
493fb300 2644 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
41c445ff 2645
cd0b6fa6 2646 if (q_vector->tx.ring && q_vector->rx.ring) {
41c445ff
JB
2647 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2648 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2649 tx_int_idx++;
cd0b6fa6 2650 } else if (q_vector->rx.ring) {
41c445ff
JB
2651 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2652 "%s-%s-%d", basename, "rx", rx_int_idx++);
cd0b6fa6 2653 } else if (q_vector->tx.ring) {
41c445ff
JB
2654 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2655 "%s-%s-%d", basename, "tx", tx_int_idx++);
2656 } else {
2657 /* skip this unused q_vector */
2658 continue;
2659 }
2660 err = request_irq(pf->msix_entries[base + vector].vector,
2661 vsi->irq_handler,
2662 0,
2663 q_vector->name,
2664 q_vector);
2665 if (err) {
2666 dev_info(&pf->pdev->dev,
2667 "%s: request_irq failed, error: %d\n",
2668 __func__, err);
2669 goto free_queue_irqs;
2670 }
2671 /* assign the mask for this irq */
2672 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2673 &q_vector->affinity_mask);
2674 }
2675
2676 return 0;
2677
2678free_queue_irqs:
2679 while (vector) {
2680 vector--;
2681 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
2682 NULL);
2683 free_irq(pf->msix_entries[base + vector].vector,
2684 &(vsi->q_vectors[vector]));
2685 }
2686 return err;
2687}
2688
2689/**
2690 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
2691 * @vsi: the VSI being un-configured
2692 **/
2693static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
2694{
2695 struct i40e_pf *pf = vsi->back;
2696 struct i40e_hw *hw = &pf->hw;
2697 int base = vsi->base_vector;
2698 int i;
2699
2700 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
2701 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
2702 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
41c445ff
JB
2703 }
2704
2705 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2706 for (i = vsi->base_vector;
2707 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2708 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
2709
2710 i40e_flush(hw);
2711 for (i = 0; i < vsi->num_q_vectors; i++)
2712 synchronize_irq(pf->msix_entries[i + base].vector);
2713 } else {
2714 /* Legacy and MSI mode - this stops all interrupt handling */
2715 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
2716 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
2717 i40e_flush(hw);
2718 synchronize_irq(pf->pdev->irq);
2719 }
2720}
2721
2722/**
2723 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
2724 * @vsi: the VSI being configured
2725 **/
2726static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
2727{
2728 struct i40e_pf *pf = vsi->back;
2729 int i;
2730
2731 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2732 for (i = vsi->base_vector;
2733 i < (vsi->num_q_vectors + vsi->base_vector); i++)
2734 i40e_irq_dynamic_enable(vsi, i);
2735 } else {
2736 i40e_irq_dynamic_enable_icr0(pf);
2737 }
2738
1022cb6c 2739 i40e_flush(&pf->hw);
41c445ff
JB
2740 return 0;
2741}
2742
2743/**
2744 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
2745 * @pf: board private structure
2746 **/
2747static void i40e_stop_misc_vector(struct i40e_pf *pf)
2748{
2749 /* Disable ICR 0 */
2750 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
2751 i40e_flush(&pf->hw);
2752}
2753
2754/**
2755 * i40e_intr - MSI/Legacy and non-queue interrupt handler
2756 * @irq: interrupt number
2757 * @data: pointer to a q_vector
2758 *
2759 * This is the handler used for all MSI/Legacy interrupts, and deals
2760 * with both queue and non-queue interrupts. This is also used in
2761 * MSIX mode to handle the non-queue interrupts.
2762 **/
2763static irqreturn_t i40e_intr(int irq, void *data)
2764{
2765 struct i40e_pf *pf = (struct i40e_pf *)data;
2766 struct i40e_hw *hw = &pf->hw;
2767 u32 icr0, icr0_remaining;
2768 u32 val, ena_mask;
2769
2770 icr0 = rd32(hw, I40E_PFINT_ICR0);
2771
41c445ff
JB
2772 val = rd32(hw, I40E_PFINT_DYN_CTL0);
2773 val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
2774 wr32(hw, I40E_PFINT_DYN_CTL0, val);
2775
116a57d4
SN
2776 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
2777 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
2778 return IRQ_NONE;
2779
41c445ff
JB
2780 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
2781
cd92e72f
SN
2782 /* if interrupt but no bits showing, must be SWINT */
2783 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
2784 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
2785 pf->sw_int_count++;
2786
41c445ff
JB
2787 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
2788 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
2789
2790 /* temporarily disable queue cause for NAPI processing */
2791 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
2792 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2793 wr32(hw, I40E_QINT_RQCTL(0), qval);
2794
2795 qval = rd32(hw, I40E_QINT_TQCTL(0));
2796 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2797 wr32(hw, I40E_QINT_TQCTL(0), qval);
41c445ff
JB
2798
2799 if (!test_bit(__I40E_DOWN, &pf->state))
493fb300 2800 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
41c445ff
JB
2801 }
2802
2803 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
2804 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2805 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
2806 }
2807
2808 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
2809 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2810 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
2811 }
2812
2813 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
2814 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
2815 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
2816 }
2817
2818 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
2819 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
2820 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
2821 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
2822 val = rd32(hw, I40E_GLGEN_RSTAT);
2823 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
2824 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
d52cf0a9 2825 if (val == I40E_RESET_CORER)
41c445ff 2826 pf->corer_count++;
d52cf0a9 2827 else if (val == I40E_RESET_GLOBR)
41c445ff 2828 pf->globr_count++;
d52cf0a9 2829 else if (val == I40E_RESET_EMPR)
41c445ff
JB
2830 pf->empr_count++;
2831 }
2832
9c010ee0
ASJ
2833 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
2834 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
2835 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
2836 }
2837
41c445ff
JB
2838 /* If a critical error is pending we have no choice but to reset the
2839 * device.
2840 * Report and mask out any remaining unexpected interrupts.
2841 */
2842 icr0_remaining = icr0 & ena_mask;
2843 if (icr0_remaining) {
2844 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
2845 icr0_remaining);
9c010ee0 2846 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
41c445ff
JB
2847 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2848 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
2849 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
9c010ee0
ASJ
2850 dev_info(&pf->pdev->dev, "device will be reset\n");
2851 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2852 i40e_service_event_schedule(pf);
41c445ff
JB
2853 }
2854 ena_mask &= ~icr0_remaining;
2855 }
2856
2857 /* re-enable interrupt causes */
2858 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
41c445ff
JB
2859 if (!test_bit(__I40E_DOWN, &pf->state)) {
2860 i40e_service_event_schedule(pf);
2861 i40e_irq_dynamic_enable_icr0(pf);
2862 }
2863
2864 return IRQ_HANDLED;
2865}
2866
2867/**
cd0b6fa6 2868 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
41c445ff
JB
2869 * @vsi: the VSI being configured
2870 * @v_idx: vector index
cd0b6fa6 2871 * @qp_idx: queue pair index
41c445ff 2872 **/
cd0b6fa6 2873static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
41c445ff 2874{
493fb300 2875 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
9f65e15b
AD
2876 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
2877 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
41c445ff
JB
2878
2879 tx_ring->q_vector = q_vector;
cd0b6fa6
AD
2880 tx_ring->next = q_vector->tx.ring;
2881 q_vector->tx.ring = tx_ring;
41c445ff 2882 q_vector->tx.count++;
cd0b6fa6
AD
2883
2884 rx_ring->q_vector = q_vector;
2885 rx_ring->next = q_vector->rx.ring;
2886 q_vector->rx.ring = rx_ring;
2887 q_vector->rx.count++;
41c445ff
JB
2888}
2889
2890/**
2891 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
2892 * @vsi: the VSI being configured
2893 *
2894 * This function maps descriptor rings to the queue-specific vectors
2895 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2896 * one vector per queue pair, but on a constrained vector budget, we
2897 * group the queue pairs as "efficiently" as possible.
2898 **/
2899static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
2900{
2901 int qp_remaining = vsi->num_queue_pairs;
2902 int q_vectors = vsi->num_q_vectors;
cd0b6fa6 2903 int num_ringpairs;
41c445ff
JB
2904 int v_start = 0;
2905 int qp_idx = 0;
2906
2907 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2908 * group them so there are multiple queues per vector.
2909 */
2910 for (; v_start < q_vectors && qp_remaining; v_start++) {
cd0b6fa6
AD
2911 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
2912
2913 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
2914
2915 q_vector->num_ringpairs = num_ringpairs;
2916
2917 q_vector->rx.count = 0;
2918 q_vector->tx.count = 0;
2919 q_vector->rx.ring = NULL;
2920 q_vector->tx.ring = NULL;
2921
2922 while (num_ringpairs--) {
2923 map_vector_to_qp(vsi, v_start, qp_idx);
2924 qp_idx++;
2925 qp_remaining--;
41c445ff
JB
2926 }
2927 }
2928}
2929
2930/**
2931 * i40e_vsi_request_irq - Request IRQ from the OS
2932 * @vsi: the VSI being configured
2933 * @basename: name for the vector
2934 **/
2935static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
2936{
2937 struct i40e_pf *pf = vsi->back;
2938 int err;
2939
2940 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2941 err = i40e_vsi_request_irq_msix(vsi, basename);
2942 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
2943 err = request_irq(pf->pdev->irq, i40e_intr, 0,
2944 pf->misc_int_name, pf);
2945 else
2946 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
2947 pf->misc_int_name, pf);
2948
2949 if (err)
2950 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
2951
2952 return err;
2953}
2954
2955#ifdef CONFIG_NET_POLL_CONTROLLER
2956/**
2957 * i40e_netpoll - A Polling 'interrupt'handler
2958 * @netdev: network interface device structure
2959 *
2960 * This is used by netconsole to send skbs without having to re-enable
2961 * interrupts. It's not called while the normal interrupt routine is executing.
2962 **/
2963static void i40e_netpoll(struct net_device *netdev)
2964{
2965 struct i40e_netdev_priv *np = netdev_priv(netdev);
2966 struct i40e_vsi *vsi = np->vsi;
2967 struct i40e_pf *pf = vsi->back;
2968 int i;
2969
2970 /* if interface is down do nothing */
2971 if (test_bit(__I40E_DOWN, &vsi->state))
2972 return;
2973
2974 pf->flags |= I40E_FLAG_IN_NETPOLL;
2975 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
2976 for (i = 0; i < vsi->num_q_vectors; i++)
493fb300 2977 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
41c445ff
JB
2978 } else {
2979 i40e_intr(pf->pdev->irq, netdev);
2980 }
2981 pf->flags &= ~I40E_FLAG_IN_NETPOLL;
2982}
2983#endif
2984
2985/**
2986 * i40e_vsi_control_tx - Start or stop a VSI's rings
2987 * @vsi: the VSI being configured
2988 * @enable: start or stop the rings
2989 **/
2990static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
2991{
2992 struct i40e_pf *pf = vsi->back;
2993 struct i40e_hw *hw = &pf->hw;
2994 int i, j, pf_q;
2995 u32 tx_reg;
2996
2997 pf_q = vsi->base_queue;
2998 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
2999 j = 1000;
3000 do {
3001 usleep_range(1000, 2000);
3002 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3003 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
3004 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
3005
fda972f6
MW
3006 /* Skip if the queue is already in the requested state */
3007 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3008 continue;
3009 if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3010 continue;
41c445ff
JB
3011
3012 /* turn on/off the queue */
3013 if (enable)
3014 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
3015 I40E_QTX_ENA_QENA_STAT_MASK;
3016 else
3017 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3018
3019 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3020
3021 /* wait for the change to finish */
3022 for (j = 0; j < 10; j++) {
3023 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3024 if (enable) {
3025 if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3026 break;
3027 } else {
3028 if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3029 break;
3030 }
3031
3032 udelay(10);
3033 }
3034 if (j >= 10) {
3035 dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
3036 pf_q, (enable ? "en" : "dis"));
3037 return -ETIMEDOUT;
3038 }
3039 }
3040
7134f9ce
JB
3041 if (hw->revision_id == 0)
3042 mdelay(50);
3043
41c445ff
JB
3044 return 0;
3045}
3046
3047/**
3048 * i40e_vsi_control_rx - Start or stop a VSI's rings
3049 * @vsi: the VSI being configured
3050 * @enable: start or stop the rings
3051 **/
3052static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3053{
3054 struct i40e_pf *pf = vsi->back;
3055 struct i40e_hw *hw = &pf->hw;
3056 int i, j, pf_q;
3057 u32 rx_reg;
3058
3059 pf_q = vsi->base_queue;
3060 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3061 j = 1000;
3062 do {
3063 usleep_range(1000, 2000);
3064 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3065 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
3066 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
3067
3068 if (enable) {
3069 /* is STAT set ? */
3070 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3071 continue;
3072 } else {
3073 /* is !STAT set ? */
3074 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3075 continue;
3076 }
3077
3078 /* turn on/off the queue */
3079 if (enable)
3080 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
3081 I40E_QRX_ENA_QENA_STAT_MASK;
3082 else
3083 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
3084 I40E_QRX_ENA_QENA_STAT_MASK);
3085 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3086
3087 /* wait for the change to finish */
3088 for (j = 0; j < 10; j++) {
3089 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3090
3091 if (enable) {
3092 if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3093 break;
3094 } else {
3095 if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3096 break;
3097 }
3098
3099 udelay(10);
3100 }
3101 if (j >= 10) {
3102 dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
3103 pf_q, (enable ? "en" : "dis"));
3104 return -ETIMEDOUT;
3105 }
3106 }
3107
3108 return 0;
3109}
3110
3111/**
3112 * i40e_vsi_control_rings - Start or stop a VSI's rings
3113 * @vsi: the VSI being configured
3114 * @enable: start or stop the rings
3115 **/
fc18eaa0 3116int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
41c445ff
JB
3117{
3118 int ret;
3119
3120 /* do rx first for enable and last for disable */
3121 if (request) {
3122 ret = i40e_vsi_control_rx(vsi, request);
3123 if (ret)
3124 return ret;
3125 ret = i40e_vsi_control_tx(vsi, request);
3126 } else {
3127 ret = i40e_vsi_control_tx(vsi, request);
3128 if (ret)
3129 return ret;
3130 ret = i40e_vsi_control_rx(vsi, request);
3131 }
3132
3133 return ret;
3134}
3135
3136/**
3137 * i40e_vsi_free_irq - Free the irq association with the OS
3138 * @vsi: the VSI being configured
3139 **/
3140static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3141{
3142 struct i40e_pf *pf = vsi->back;
3143 struct i40e_hw *hw = &pf->hw;
3144 int base = vsi->base_vector;
3145 u32 val, qp;
3146 int i;
3147
3148 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3149 if (!vsi->q_vectors)
3150 return;
3151
3152 for (i = 0; i < vsi->num_q_vectors; i++) {
3153 u16 vector = i + base;
3154
3155 /* free only the irqs that were actually requested */
78681b1f
SN
3156 if (!vsi->q_vectors[i] ||
3157 !vsi->q_vectors[i]->num_ringpairs)
41c445ff
JB
3158 continue;
3159
3160 /* clear the affinity_mask in the IRQ descriptor */
3161 irq_set_affinity_hint(pf->msix_entries[vector].vector,
3162 NULL);
3163 free_irq(pf->msix_entries[vector].vector,
493fb300 3164 vsi->q_vectors[i]);
41c445ff
JB
3165
3166 /* Tear down the interrupt queue link list
3167 *
3168 * We know that they come in pairs and always
3169 * the Rx first, then the Tx. To clear the
3170 * link list, stick the EOL value into the
3171 * next_q field of the registers.
3172 */
3173 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3174 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3175 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3176 val |= I40E_QUEUE_END_OF_LIST
3177 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3178 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3179
3180 while (qp != I40E_QUEUE_END_OF_LIST) {
3181 u32 next;
3182
3183 val = rd32(hw, I40E_QINT_RQCTL(qp));
3184
3185 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3186 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3187 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3188 I40E_QINT_RQCTL_INTEVENT_MASK);
3189
3190 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3191 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3192
3193 wr32(hw, I40E_QINT_RQCTL(qp), val);
3194
3195 val = rd32(hw, I40E_QINT_TQCTL(qp));
3196
3197 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3198 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3199
3200 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3201 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3202 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3203 I40E_QINT_TQCTL_INTEVENT_MASK);
3204
3205 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3206 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3207
3208 wr32(hw, I40E_QINT_TQCTL(qp), val);
3209 qp = next;
3210 }
3211 }
3212 } else {
3213 free_irq(pf->pdev->irq, pf);
3214
3215 val = rd32(hw, I40E_PFINT_LNKLST0);
3216 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3217 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3218 val |= I40E_QUEUE_END_OF_LIST
3219 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3220 wr32(hw, I40E_PFINT_LNKLST0, val);
3221
3222 val = rd32(hw, I40E_QINT_RQCTL(qp));
3223 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
3224 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3225 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3226 I40E_QINT_RQCTL_INTEVENT_MASK);
3227
3228 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3229 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3230
3231 wr32(hw, I40E_QINT_RQCTL(qp), val);
3232
3233 val = rd32(hw, I40E_QINT_TQCTL(qp));
3234
3235 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
3236 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3237 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3238 I40E_QINT_TQCTL_INTEVENT_MASK);
3239
3240 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3241 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3242
3243 wr32(hw, I40E_QINT_TQCTL(qp), val);
3244 }
3245}
3246
493fb300
AD
3247/**
3248 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3249 * @vsi: the VSI being configured
3250 * @v_idx: Index of vector to be freed
3251 *
3252 * This function frees the memory allocated to the q_vector. In addition if
3253 * NAPI is enabled it will delete any references to the NAPI struct prior
3254 * to freeing the q_vector.
3255 **/
3256static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3257{
3258 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
cd0b6fa6 3259 struct i40e_ring *ring;
493fb300
AD
3260
3261 if (!q_vector)
3262 return;
3263
3264 /* disassociate q_vector from rings */
cd0b6fa6
AD
3265 i40e_for_each_ring(ring, q_vector->tx)
3266 ring->q_vector = NULL;
3267
3268 i40e_for_each_ring(ring, q_vector->rx)
3269 ring->q_vector = NULL;
493fb300
AD
3270
3271 /* only VSI w/ an associated netdev is set up w/ NAPI */
3272 if (vsi->netdev)
3273 netif_napi_del(&q_vector->napi);
3274
3275 vsi->q_vectors[v_idx] = NULL;
3276
3277 kfree_rcu(q_vector, rcu);
3278}
3279
41c445ff
JB
3280/**
3281 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3282 * @vsi: the VSI being un-configured
3283 *
3284 * This frees the memory allocated to the q_vectors and
3285 * deletes references to the NAPI struct.
3286 **/
3287static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3288{
3289 int v_idx;
3290
493fb300
AD
3291 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3292 i40e_free_q_vector(vsi, v_idx);
41c445ff
JB
3293}
3294
3295/**
3296 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3297 * @pf: board private structure
3298 **/
3299static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3300{
3301 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3302 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3303 pci_disable_msix(pf->pdev);
3304 kfree(pf->msix_entries);
3305 pf->msix_entries = NULL;
3306 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3307 pci_disable_msi(pf->pdev);
3308 }
3309 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3310}
3311
3312/**
3313 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3314 * @pf: board private structure
3315 *
3316 * We go through and clear interrupt specific resources and reset the structure
3317 * to pre-load conditions
3318 **/
3319static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3320{
3321 int i;
3322
3323 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3324 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
3325 if (pf->vsi[i])
3326 i40e_vsi_free_q_vectors(pf->vsi[i]);
3327 i40e_reset_interrupt_capability(pf);
3328}
3329
3330/**
3331 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3332 * @vsi: the VSI being configured
3333 **/
3334static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3335{
3336 int q_idx;
3337
3338 if (!vsi->netdev)
3339 return;
3340
3341 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3342 napi_enable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3343}
3344
3345/**
3346 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3347 * @vsi: the VSI being configured
3348 **/
3349static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3350{
3351 int q_idx;
3352
3353 if (!vsi->netdev)
3354 return;
3355
3356 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
493fb300 3357 napi_disable(&vsi->q_vectors[q_idx]->napi);
41c445ff
JB
3358}
3359
3360/**
3361 * i40e_quiesce_vsi - Pause a given VSI
3362 * @vsi: the VSI being paused
3363 **/
3364static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3365{
3366 if (test_bit(__I40E_DOWN, &vsi->state))
3367 return;
3368
3369 set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3370 if (vsi->netdev && netif_running(vsi->netdev)) {
3371 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3372 } else {
3373 set_bit(__I40E_DOWN, &vsi->state);
3374 i40e_down(vsi);
3375 }
3376}
3377
3378/**
3379 * i40e_unquiesce_vsi - Resume a given VSI
3380 * @vsi: the VSI being resumed
3381 **/
3382static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3383{
3384 if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3385 return;
3386
3387 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3388 if (vsi->netdev && netif_running(vsi->netdev))
3389 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3390 else
3391 i40e_up(vsi); /* this clears the DOWN bit */
3392}
3393
3394/**
3395 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3396 * @pf: the PF
3397 **/
3398static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3399{
3400 int v;
3401
3402 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3403 if (pf->vsi[v])
3404 i40e_quiesce_vsi(pf->vsi[v]);
3405 }
3406}
3407
3408/**
3409 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3410 * @pf: the PF
3411 **/
3412static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3413{
3414 int v;
3415
3416 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3417 if (pf->vsi[v])
3418 i40e_unquiesce_vsi(pf->vsi[v]);
3419 }
3420}
3421
3422/**
3423 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
3424 * @dcbcfg: the corresponding DCBx configuration structure
3425 *
3426 * Return the number of TCs from given DCBx configuration
3427 **/
3428static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
3429{
078b5876
JB
3430 u8 num_tc = 0;
3431 int i;
41c445ff
JB
3432
3433 /* Scan the ETS Config Priority Table to find
3434 * traffic class enabled for a given priority
3435 * and use the traffic class index to get the
3436 * number of traffic classes enabled
3437 */
3438 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3439 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
3440 num_tc = dcbcfg->etscfg.prioritytable[i];
3441 }
3442
3443 /* Traffic class index starts from zero so
3444 * increment to return the actual count
3445 */
078b5876 3446 return num_tc + 1;
41c445ff
JB
3447}
3448
3449/**
3450 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
3451 * @dcbcfg: the corresponding DCBx configuration structure
3452 *
3453 * Query the current DCB configuration and return the number of
3454 * traffic classes enabled from the given DCBX config
3455 **/
3456static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
3457{
3458 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
3459 u8 enabled_tc = 1;
3460 u8 i;
3461
3462 for (i = 0; i < num_tc; i++)
3463 enabled_tc |= 1 << i;
3464
3465 return enabled_tc;
3466}
3467
3468/**
3469 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
3470 * @pf: PF being queried
3471 *
3472 * Return number of traffic classes enabled for the given PF
3473 **/
3474static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
3475{
3476 struct i40e_hw *hw = &pf->hw;
3477 u8 i, enabled_tc;
3478 u8 num_tc = 0;
3479 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3480
3481 /* If DCB is not enabled then always in single TC */
3482 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3483 return 1;
3484
3485 /* MFP mode return count of enabled TCs for this PF */
3486 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3487 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3488 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3489 if (enabled_tc & (1 << i))
3490 num_tc++;
3491 }
3492 return num_tc;
3493 }
3494
3495 /* SFP mode will be enabled for all TCs on port */
3496 return i40e_dcb_get_num_tc(dcbcfg);
3497}
3498
3499/**
3500 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
3501 * @pf: PF being queried
3502 *
3503 * Return a bitmap for first enabled traffic class for this PF.
3504 **/
3505static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
3506{
3507 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
3508 u8 i = 0;
3509
3510 if (!enabled_tc)
3511 return 0x1; /* TC0 */
3512
3513 /* Find the first enabled TC */
3514 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3515 if (enabled_tc & (1 << i))
3516 break;
3517 }
3518
3519 return 1 << i;
3520}
3521
3522/**
3523 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
3524 * @pf: PF being queried
3525 *
3526 * Return a bitmap for enabled traffic classes for this PF.
3527 **/
3528static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
3529{
3530 /* If DCB is not enabled for this PF then just return default TC */
3531 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
3532 return i40e_pf_get_default_tc(pf);
3533
3534 /* MFP mode will have enabled TCs set by FW */
3535 if (pf->flags & I40E_FLAG_MFP_ENABLED)
3536 return pf->hw.func_caps.enabled_tcmap;
3537
3538 /* SFP mode we want PF to be enabled for all TCs */
3539 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
3540}
3541
3542/**
3543 * i40e_vsi_get_bw_info - Query VSI BW Information
3544 * @vsi: the VSI being queried
3545 *
3546 * Returns 0 on success, negative value on failure
3547 **/
3548static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
3549{
3550 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
3551 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
3552 struct i40e_pf *pf = vsi->back;
3553 struct i40e_hw *hw = &pf->hw;
dcae29be 3554 i40e_status aq_ret;
41c445ff 3555 u32 tc_bw_max;
41c445ff
JB
3556 int i;
3557
3558 /* Get the VSI level BW configuration */
dcae29be
JB
3559 aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
3560 if (aq_ret) {
41c445ff
JB
3561 dev_info(&pf->pdev->dev,
3562 "couldn't get pf vsi bw config, err %d, aq_err %d\n",
dcae29be
JB
3563 aq_ret, pf->hw.aq.asq_last_status);
3564 return -EINVAL;
41c445ff
JB
3565 }
3566
3567 /* Get the VSI level BW configuration per TC */
dcae29be
JB
3568 aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
3569 NULL);
3570 if (aq_ret) {
41c445ff
JB
3571 dev_info(&pf->pdev->dev,
3572 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
dcae29be
JB
3573 aq_ret, pf->hw.aq.asq_last_status);
3574 return -EINVAL;
41c445ff
JB
3575 }
3576
3577 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
3578 dev_info(&pf->pdev->dev,
3579 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
3580 bw_config.tc_valid_bits,
3581 bw_ets_config.tc_valid_bits);
3582 /* Still continuing */
3583 }
3584
3585 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
3586 vsi->bw_max_quanta = bw_config.max_bw;
3587 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
3588 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
3589 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3590 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
3591 vsi->bw_ets_limit_credits[i] =
3592 le16_to_cpu(bw_ets_config.credits[i]);
3593 /* 3 bits out of 4 for each TC */
3594 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
3595 }
078b5876 3596
dcae29be 3597 return 0;
41c445ff
JB
3598}
3599
3600/**
3601 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
3602 * @vsi: the VSI being configured
3603 * @enabled_tc: TC bitmap
3604 * @bw_credits: BW shared credits per TC
3605 *
3606 * Returns 0 on success, negative value on failure
3607 **/
dcae29be 3608static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
41c445ff
JB
3609 u8 *bw_share)
3610{
3611 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
dcae29be
JB
3612 i40e_status aq_ret;
3613 int i;
41c445ff
JB
3614
3615 bw_data.tc_valid_bits = enabled_tc;
3616 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3617 bw_data.tc_bw_credits[i] = bw_share[i];
3618
dcae29be
JB
3619 aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
3620 NULL);
3621 if (aq_ret) {
41c445ff
JB
3622 dev_info(&vsi->back->pdev->dev,
3623 "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
3624 __func__, vsi->back->hw.aq.asq_last_status);
dcae29be 3625 return -EINVAL;
41c445ff
JB
3626 }
3627
3628 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
3629 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
3630
dcae29be 3631 return 0;
41c445ff
JB
3632}
3633
3634/**
3635 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
3636 * @vsi: the VSI being configured
3637 * @enabled_tc: TC map to be enabled
3638 *
3639 **/
3640static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3641{
3642 struct net_device *netdev = vsi->netdev;
3643 struct i40e_pf *pf = vsi->back;
3644 struct i40e_hw *hw = &pf->hw;
3645 u8 netdev_tc = 0;
3646 int i;
3647 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
3648
3649 if (!netdev)
3650 return;
3651
3652 if (!enabled_tc) {
3653 netdev_reset_tc(netdev);
3654 return;
3655 }
3656
3657 /* Set up actual enabled TCs on the VSI */
3658 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
3659 return;
3660
3661 /* set per TC queues for the VSI */
3662 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3663 /* Only set TC queues for enabled tcs
3664 *
3665 * e.g. For a VSI that has TC0 and TC3 enabled the
3666 * enabled_tc bitmap would be 0x00001001; the driver
3667 * will set the numtc for netdev as 2 that will be
3668 * referenced by the netdev layer as TC 0 and 1.
3669 */
3670 if (vsi->tc_config.enabled_tc & (1 << i))
3671 netdev_set_tc_queue(netdev,
3672 vsi->tc_config.tc_info[i].netdev_tc,
3673 vsi->tc_config.tc_info[i].qcount,
3674 vsi->tc_config.tc_info[i].qoffset);
3675 }
3676
3677 /* Assign UP2TC map for the VSI */
3678 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
3679 /* Get the actual TC# for the UP */
3680 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
3681 /* Get the mapped netdev TC# for the UP */
3682 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
3683 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3684 }
3685}
3686
3687/**
3688 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
3689 * @vsi: the VSI being configured
3690 * @ctxt: the ctxt buffer returned from AQ VSI update param command
3691 **/
3692static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
3693 struct i40e_vsi_context *ctxt)
3694{
3695 /* copy just the sections touched not the entire info
3696 * since not all sections are valid as returned by
3697 * update vsi params
3698 */
3699 vsi->info.mapping_flags = ctxt->info.mapping_flags;
3700 memcpy(&vsi->info.queue_mapping,
3701 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
3702 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
3703 sizeof(vsi->info.tc_mapping));
3704}
3705
3706/**
3707 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
3708 * @vsi: VSI to be configured
3709 * @enabled_tc: TC bitmap
3710 *
3711 * This configures a particular VSI for TCs that are mapped to the
3712 * given TC bitmap. It uses default bandwidth share for TCs across
3713 * VSIs to configure TC for a particular VSI.
3714 *
3715 * NOTE:
3716 * It is expected that the VSI queues have been quisced before calling
3717 * this function.
3718 **/
3719static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
3720{
3721 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
3722 struct i40e_vsi_context ctxt;
3723 int ret = 0;
3724 int i;
3725
3726 /* Check if enabled_tc is same as existing or new TCs */
3727 if (vsi->tc_config.enabled_tc == enabled_tc)
3728 return ret;
3729
3730 /* Enable ETS TCs with equal BW Share for now across all VSIs */
3731 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
3732 if (enabled_tc & (1 << i))
3733 bw_share[i] = 1;
3734 }
3735
3736 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
3737 if (ret) {
3738 dev_info(&vsi->back->pdev->dev,
3739 "Failed configuring TC map %d for VSI %d\n",
3740 enabled_tc, vsi->seid);
3741 goto out;
3742 }
3743
3744 /* Update Queue Pairs Mapping for currently enabled UPs */
3745 ctxt.seid = vsi->seid;
3746 ctxt.pf_num = vsi->back->hw.pf_id;
3747 ctxt.vf_num = 0;
3748 ctxt.uplink_seid = vsi->uplink_seid;
3749 memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3750 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
3751
3752 /* Update the VSI after updating the VSI queue-mapping information */
3753 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
3754 if (ret) {
3755 dev_info(&vsi->back->pdev->dev,
3756 "update vsi failed, aq_err=%d\n",
3757 vsi->back->hw.aq.asq_last_status);
3758 goto out;
3759 }
3760 /* update the local VSI info with updated queue map */
3761 i40e_vsi_update_queue_map(vsi, &ctxt);
3762 vsi->info.valid_sections = 0;
3763
3764 /* Update current VSI BW information */
3765 ret = i40e_vsi_get_bw_info(vsi);
3766 if (ret) {
3767 dev_info(&vsi->back->pdev->dev,
3768 "Failed updating vsi bw info, aq_err=%d\n",
3769 vsi->back->hw.aq.asq_last_status);
3770 goto out;
3771 }
3772
3773 /* Update the netdev TC setup */
3774 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
3775out:
3776 return ret;
3777}
3778
3779/**
3780 * i40e_up_complete - Finish the last steps of bringing up a connection
3781 * @vsi: the VSI being configured
3782 **/
3783static int i40e_up_complete(struct i40e_vsi *vsi)
3784{
3785 struct i40e_pf *pf = vsi->back;
3786 int err;
3787
3788 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3789 i40e_vsi_configure_msix(vsi);
3790 else
3791 i40e_configure_msi_and_legacy(vsi);
3792
3793 /* start rings */
3794 err = i40e_vsi_control_rings(vsi, true);
3795 if (err)
3796 return err;
3797
3798 clear_bit(__I40E_DOWN, &vsi->state);
3799 i40e_napi_enable_all(vsi);
3800 i40e_vsi_enable_irq(vsi);
3801
3802 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
3803 (vsi->netdev)) {
6d779b41 3804 netdev_info(vsi->netdev, "NIC Link is Up\n");
41c445ff
JB
3805 netif_tx_start_all_queues(vsi->netdev);
3806 netif_carrier_on(vsi->netdev);
6d779b41
AS
3807 } else if (vsi->netdev) {
3808 netdev_info(vsi->netdev, "NIC Link is Down\n");
41c445ff
JB
3809 }
3810 i40e_service_event_schedule(pf);
3811
3812 return 0;
3813}
3814
3815/**
3816 * i40e_vsi_reinit_locked - Reset the VSI
3817 * @vsi: the VSI being configured
3818 *
3819 * Rebuild the ring structs after some configuration
3820 * has changed, e.g. MTU size.
3821 **/
3822static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
3823{
3824 struct i40e_pf *pf = vsi->back;
3825
3826 WARN_ON(in_interrupt());
3827 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
3828 usleep_range(1000, 2000);
3829 i40e_down(vsi);
3830
3831 /* Give a VF some time to respond to the reset. The
3832 * two second wait is based upon the watchdog cycle in
3833 * the VF driver.
3834 */
3835 if (vsi->type == I40E_VSI_SRIOV)
3836 msleep(2000);
3837 i40e_up(vsi);
3838 clear_bit(__I40E_CONFIG_BUSY, &pf->state);
3839}
3840
3841/**
3842 * i40e_up - Bring the connection back up after being down
3843 * @vsi: the VSI being configured
3844 **/
3845int i40e_up(struct i40e_vsi *vsi)
3846{
3847 int err;
3848
3849 err = i40e_vsi_configure(vsi);
3850 if (!err)
3851 err = i40e_up_complete(vsi);
3852
3853 return err;
3854}
3855
3856/**
3857 * i40e_down - Shutdown the connection processing
3858 * @vsi: the VSI being stopped
3859 **/
3860void i40e_down(struct i40e_vsi *vsi)
3861{
3862 int i;
3863
3864 /* It is assumed that the caller of this function
3865 * sets the vsi->state __I40E_DOWN bit.
3866 */
3867 if (vsi->netdev) {
3868 netif_carrier_off(vsi->netdev);
3869 netif_tx_disable(vsi->netdev);
3870 }
3871 i40e_vsi_disable_irq(vsi);
3872 i40e_vsi_control_rings(vsi, false);
3873 i40e_napi_disable_all(vsi);
3874
3875 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
3876 i40e_clean_tx_ring(vsi->tx_rings[i]);
3877 i40e_clean_rx_ring(vsi->rx_rings[i]);
41c445ff
JB
3878 }
3879}
3880
3881/**
3882 * i40e_setup_tc - configure multiple traffic classes
3883 * @netdev: net device to configure
3884 * @tc: number of traffic classes to enable
3885 **/
3886static int i40e_setup_tc(struct net_device *netdev, u8 tc)
3887{
3888 struct i40e_netdev_priv *np = netdev_priv(netdev);
3889 struct i40e_vsi *vsi = np->vsi;
3890 struct i40e_pf *pf = vsi->back;
3891 u8 enabled_tc = 0;
3892 int ret = -EINVAL;
3893 int i;
3894
3895 /* Check if DCB enabled to continue */
3896 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
3897 netdev_info(netdev, "DCB is not enabled for adapter\n");
3898 goto exit;
3899 }
3900
3901 /* Check if MFP enabled */
3902 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3903 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
3904 goto exit;
3905 }
3906
3907 /* Check whether tc count is within enabled limit */
3908 if (tc > i40e_pf_get_num_tc(pf)) {
3909 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
3910 goto exit;
3911 }
3912
3913 /* Generate TC map for number of tc requested */
3914 for (i = 0; i < tc; i++)
3915 enabled_tc |= (1 << i);
3916
3917 /* Requesting same TC configuration as already enabled */
3918 if (enabled_tc == vsi->tc_config.enabled_tc)
3919 return 0;
3920
3921 /* Quiesce VSI queues */
3922 i40e_quiesce_vsi(vsi);
3923
3924 /* Configure VSI for enabled TCs */
3925 ret = i40e_vsi_config_tc(vsi, enabled_tc);
3926 if (ret) {
3927 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
3928 vsi->seid);
3929 goto exit;
3930 }
3931
3932 /* Unquiesce VSI */
3933 i40e_unquiesce_vsi(vsi);
3934
3935exit:
3936 return ret;
3937}
3938
3939/**
3940 * i40e_open - Called when a network interface is made active
3941 * @netdev: network interface device structure
3942 *
3943 * The open entry point is called when a network interface is made
3944 * active by the system (IFF_UP). At this point all resources needed
3945 * for transmit and receive operations are allocated, the interrupt
3946 * handler is registered with the OS, the netdev watchdog subtask is
3947 * enabled, and the stack is notified that the interface is ready.
3948 *
3949 * Returns 0 on success, negative value on failure
3950 **/
3951static int i40e_open(struct net_device *netdev)
3952{
3953 struct i40e_netdev_priv *np = netdev_priv(netdev);
3954 struct i40e_vsi *vsi = np->vsi;
3955 struct i40e_pf *pf = vsi->back;
3956 char int_name[IFNAMSIZ];
3957 int err;
3958
3959 /* disallow open during test */
3960 if (test_bit(__I40E_TESTING, &pf->state))
3961 return -EBUSY;
3962
3963 netif_carrier_off(netdev);
3964
3965 /* allocate descriptors */
3966 err = i40e_vsi_setup_tx_resources(vsi);
3967 if (err)
3968 goto err_setup_tx;
3969 err = i40e_vsi_setup_rx_resources(vsi);
3970 if (err)
3971 goto err_setup_rx;
3972
3973 err = i40e_vsi_configure(vsi);
3974 if (err)
3975 goto err_setup_rx;
3976
3977 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
3978 dev_driver_string(&pf->pdev->dev), netdev->name);
3979 err = i40e_vsi_request_irq(vsi, int_name);
3980 if (err)
3981 goto err_setup_rx;
3982
25946ddb
ASJ
3983 /* Notify the stack of the actual queue counts. */
3984 err = netif_set_real_num_tx_queues(netdev, pf->num_tx_queues);
3985 if (err)
3986 goto err_set_queues;
3987
3988 err = netif_set_real_num_rx_queues(netdev, pf->num_rx_queues);
3989 if (err)
3990 goto err_set_queues;
3991
41c445ff
JB
3992 err = i40e_up_complete(vsi);
3993 if (err)
3994 goto err_up_complete;
3995
3996 if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
3997 err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
3998 if (err)
3999 netdev_info(netdev,
4000 "couldn't set broadcast err %d aq_err %d\n",
4001 err, pf->hw.aq.asq_last_status);
4002 }
a1c9a9d9
JK
4003#ifdef CONFIG_I40E_VXLAN
4004 vxlan_get_rx_port(netdev);
4005#endif
41c445ff
JB
4006
4007 return 0;
4008
4009err_up_complete:
4010 i40e_down(vsi);
25946ddb 4011err_set_queues:
41c445ff
JB
4012 i40e_vsi_free_irq(vsi);
4013err_setup_rx:
4014 i40e_vsi_free_rx_resources(vsi);
4015err_setup_tx:
4016 i40e_vsi_free_tx_resources(vsi);
4017 if (vsi == pf->vsi[pf->lan_vsi])
4018 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
4019
4020 return err;
4021}
4022
4023/**
4024 * i40e_close - Disables a network interface
4025 * @netdev: network interface device structure
4026 *
4027 * The close entry point is called when an interface is de-activated
4028 * by the OS. The hardware is still under the driver's control, but
4029 * this netdev interface is disabled.
4030 *
4031 * Returns 0, this is not allowed to fail
4032 **/
4033static int i40e_close(struct net_device *netdev)
4034{
4035 struct i40e_netdev_priv *np = netdev_priv(netdev);
4036 struct i40e_vsi *vsi = np->vsi;
4037
4038 if (test_and_set_bit(__I40E_DOWN, &vsi->state))
4039 return 0;
4040
4041 i40e_down(vsi);
4042 i40e_vsi_free_irq(vsi);
4043
4044 i40e_vsi_free_tx_resources(vsi);
4045 i40e_vsi_free_rx_resources(vsi);
4046
4047 return 0;
4048}
4049
4050/**
4051 * i40e_do_reset - Start a PF or Core Reset sequence
4052 * @pf: board private structure
4053 * @reset_flags: which reset is requested
4054 *
4055 * The essential difference in resets is that the PF Reset
4056 * doesn't clear the packet buffers, doesn't reset the PE
4057 * firmware, and doesn't bother the other PFs on the chip.
4058 **/
4059void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4060{
4061 u32 val;
4062
4063 WARN_ON(in_interrupt());
4064
4065 /* do the biggest reset indicated */
4066 if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
4067
4068 /* Request a Global Reset
4069 *
4070 * This will start the chip's countdown to the actual full
4071 * chip reset event, and a warning interrupt to be sent
4072 * to all PFs, including the requestor. Our handler
4073 * for the warning interrupt will deal with the shutdown
4074 * and recovery of the switch setup.
4075 */
4076 dev_info(&pf->pdev->dev, "GlobalR requested\n");
4077 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4078 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4079 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4080
4081 } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
4082
4083 /* Request a Core Reset
4084 *
4085 * Same as Global Reset, except does *not* include the MAC/PHY
4086 */
4087 dev_info(&pf->pdev->dev, "CoreR requested\n");
4088 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4089 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4090 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4091 i40e_flush(&pf->hw);
4092
7823fe34
SN
4093 } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
4094
4095 /* Request a Firmware Reset
4096 *
4097 * Same as Global reset, plus restarting the
4098 * embedded firmware engine.
4099 */
4100 /* enable EMP Reset */
4101 val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
4102 val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
4103 wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
4104
4105 /* force the reset */
4106 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4107 val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
4108 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
4109 i40e_flush(&pf->hw);
4110
41c445ff
JB
4111 } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
4112
4113 /* Request a PF Reset
4114 *
4115 * Resets only the PF-specific registers
4116 *
4117 * This goes directly to the tear-down and rebuild of
4118 * the switch, since we need to do all the recovery as
4119 * for the Core Reset.
4120 */
4121 dev_info(&pf->pdev->dev, "PFR requested\n");
4122 i40e_handle_reset_warning(pf);
4123
4124 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
4125 int v;
4126
4127 /* Find the VSI(s) that requested a re-init */
4128 dev_info(&pf->pdev->dev,
4129 "VSI reinit requested\n");
4130 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4131 struct i40e_vsi *vsi = pf->vsi[v];
4132 if (vsi != NULL &&
4133 test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
4134 i40e_vsi_reinit_locked(pf->vsi[v]);
4135 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
4136 }
4137 }
4138
4139 /* no further action needed, so return now */
4140 return;
4141 } else {
4142 dev_info(&pf->pdev->dev,
4143 "bad reset request 0x%08x\n", reset_flags);
4144 return;
4145 }
4146}
4147
23326186
ASJ
4148/**
4149 * i40e_do_reset_safe - Protected reset path for userland calls.
4150 * @pf: board private structure
4151 * @reset_flags: which reset is requested
4152 *
4153 **/
4154void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
4155{
4156 rtnl_lock();
4157 i40e_do_reset(pf, reset_flags);
4158 rtnl_unlock();
4159}
4160
41c445ff
JB
4161/**
4162 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
4163 * @pf: board private structure
4164 * @e: event info posted on ARQ
4165 *
4166 * Handler for LAN Queue Overflow Event generated by the firmware for PF
4167 * and VF queues
4168 **/
4169static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4170 struct i40e_arq_event_info *e)
4171{
4172 struct i40e_aqc_lan_overflow *data =
4173 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
4174 u32 queue = le32_to_cpu(data->prtdcb_rupto);
4175 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
4176 struct i40e_hw *hw = &pf->hw;
4177 struct i40e_vf *vf;
4178 u16 vf_id;
4179
4180 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
4181 __func__, queue, qtx_ctl);
4182
4183 /* Queue belongs to VF, find the VF and issue VF reset */
4184 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
4185 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
4186 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
4187 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
4188 vf_id -= hw->func_caps.vf_base_id;
4189 vf = &pf->vf[vf_id];
4190 i40e_vc_notify_vf_reset(vf);
4191 /* Allow VF to process pending reset notification */
4192 msleep(20);
4193 i40e_reset_vf(vf, false);
4194 }
4195}
4196
4197/**
4198 * i40e_service_event_complete - Finish up the service event
4199 * @pf: board private structure
4200 **/
4201static void i40e_service_event_complete(struct i40e_pf *pf)
4202{
4203 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
4204
4205 /* flush memory to make sure state is correct before next watchog */
4206 smp_mb__before_clear_bit();
4207 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
4208}
4209
4210/**
4211 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4212 * @pf: board private structure
4213 **/
4214static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4215{
4216 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4217 return;
4218
4219 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4220
4221 /* if interface is down do nothing */
4222 if (test_bit(__I40E_DOWN, &pf->state))
4223 return;
4224}
4225
4226/**
4227 * i40e_vsi_link_event - notify VSI of a link event
4228 * @vsi: vsi to be notified
4229 * @link_up: link up or down
4230 **/
4231static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
4232{
4233 if (!vsi)
4234 return;
4235
4236 switch (vsi->type) {
4237 case I40E_VSI_MAIN:
4238 if (!vsi->netdev || !vsi->netdev_registered)
4239 break;
4240
4241 if (link_up) {
4242 netif_carrier_on(vsi->netdev);
4243 netif_tx_wake_all_queues(vsi->netdev);
4244 } else {
4245 netif_carrier_off(vsi->netdev);
4246 netif_tx_stop_all_queues(vsi->netdev);
4247 }
4248 break;
4249
4250 case I40E_VSI_SRIOV:
4251 break;
4252
4253 case I40E_VSI_VMDQ2:
4254 case I40E_VSI_CTRL:
4255 case I40E_VSI_MIRROR:
4256 default:
4257 /* there is no notification for other VSIs */
4258 break;
4259 }
4260}
4261
4262/**
4263 * i40e_veb_link_event - notify elements on the veb of a link event
4264 * @veb: veb to be notified
4265 * @link_up: link up or down
4266 **/
4267static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
4268{
4269 struct i40e_pf *pf;
4270 int i;
4271
4272 if (!veb || !veb->pf)
4273 return;
4274 pf = veb->pf;
4275
4276 /* depth first... */
4277 for (i = 0; i < I40E_MAX_VEB; i++)
4278 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
4279 i40e_veb_link_event(pf->veb[i], link_up);
4280
4281 /* ... now the local VSIs */
4282 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4283 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
4284 i40e_vsi_link_event(pf->vsi[i], link_up);
4285}
4286
4287/**
4288 * i40e_link_event - Update netif_carrier status
4289 * @pf: board private structure
4290 **/
4291static void i40e_link_event(struct i40e_pf *pf)
4292{
4293 bool new_link, old_link;
4294
4295 new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
4296 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
4297
4298 if (new_link == old_link)
4299 return;
4300
6d779b41
AS
4301 if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
4302 netdev_info(pf->vsi[pf->lan_vsi]->netdev,
4303 "NIC Link is %s\n", (new_link ? "Up" : "Down"));
41c445ff
JB
4304
4305 /* Notify the base of the switch tree connected to
4306 * the link. Floating VEBs are not notified.
4307 */
4308 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
4309 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
4310 else
4311 i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
4312
4313 if (pf->vf)
4314 i40e_vc_notify_link_state(pf);
4315}
4316
4317/**
4318 * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
4319 * @pf: board private structure
4320 *
4321 * Set the per-queue flags to request a check for stuck queues in the irq
4322 * clean functions, then force interrupts to be sure the irq clean is called.
4323 **/
4324static void i40e_check_hang_subtask(struct i40e_pf *pf)
4325{
4326 int i, v;
4327
4328 /* If we're down or resetting, just bail */
4329 if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
4330 return;
4331
4332 /* for each VSI/netdev
4333 * for each Tx queue
4334 * set the check flag
4335 * for each q_vector
4336 * force an interrupt
4337 */
4338 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4339 struct i40e_vsi *vsi = pf->vsi[v];
4340 int armed = 0;
4341
4342 if (!pf->vsi[v] ||
4343 test_bit(__I40E_DOWN, &vsi->state) ||
4344 (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
4345 continue;
4346
4347 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b 4348 set_check_for_tx_hang(vsi->tx_rings[i]);
41c445ff 4349 if (test_bit(__I40E_HANG_CHECK_ARMED,
9f65e15b 4350 &vsi->tx_rings[i]->state))
41c445ff
JB
4351 armed++;
4352 }
4353
4354 if (armed) {
4355 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
4356 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
4357 (I40E_PFINT_DYN_CTL0_INTENA_MASK |
4358 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
4359 } else {
4360 u16 vec = vsi->base_vector - 1;
4361 u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
4362 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
4363 for (i = 0; i < vsi->num_q_vectors; i++, vec++)
4364 wr32(&vsi->back->hw,
4365 I40E_PFINT_DYN_CTLN(vec), val);
4366 }
4367 i40e_flush(&vsi->back->hw);
4368 }
4369 }
4370}
4371
4372/**
4373 * i40e_watchdog_subtask - Check and bring link up
4374 * @pf: board private structure
4375 **/
4376static void i40e_watchdog_subtask(struct i40e_pf *pf)
4377{
4378 int i;
4379
4380 /* if interface is down do nothing */
4381 if (test_bit(__I40E_DOWN, &pf->state) ||
4382 test_bit(__I40E_CONFIG_BUSY, &pf->state))
4383 return;
4384
4385 /* Update the stats for active netdevs so the network stack
4386 * can look at updated numbers whenever it cares to
4387 */
4388 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4389 if (pf->vsi[i] && pf->vsi[i]->netdev)
4390 i40e_update_stats(pf->vsi[i]);
4391
4392 /* Update the stats for the active switching components */
4393 for (i = 0; i < I40E_MAX_VEB; i++)
4394 if (pf->veb[i])
4395 i40e_update_veb_stats(pf->veb[i]);
4396}
4397
4398/**
4399 * i40e_reset_subtask - Set up for resetting the device and driver
4400 * @pf: board private structure
4401 **/
4402static void i40e_reset_subtask(struct i40e_pf *pf)
4403{
4404 u32 reset_flags = 0;
4405
23326186 4406 rtnl_lock();
41c445ff
JB
4407 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
4408 reset_flags |= (1 << __I40E_REINIT_REQUESTED);
4409 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
4410 }
4411 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
4412 reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
4413 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4414 }
4415 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
4416 reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
4417 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
4418 }
4419 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
4420 reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
4421 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
4422 }
4423
4424 /* If there's a recovery already waiting, it takes
4425 * precedence before starting a new reset sequence.
4426 */
4427 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
4428 i40e_handle_reset_warning(pf);
23326186 4429 goto unlock;
41c445ff
JB
4430 }
4431
4432 /* If we're already down or resetting, just bail */
4433 if (reset_flags &&
4434 !test_bit(__I40E_DOWN, &pf->state) &&
4435 !test_bit(__I40E_CONFIG_BUSY, &pf->state))
4436 i40e_do_reset(pf, reset_flags);
23326186
ASJ
4437
4438unlock:
4439 rtnl_unlock();
41c445ff
JB
4440}
4441
4442/**
4443 * i40e_handle_link_event - Handle link event
4444 * @pf: board private structure
4445 * @e: event info posted on ARQ
4446 **/
4447static void i40e_handle_link_event(struct i40e_pf *pf,
4448 struct i40e_arq_event_info *e)
4449{
4450 struct i40e_hw *hw = &pf->hw;
4451 struct i40e_aqc_get_link_status *status =
4452 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
4453 struct i40e_link_status *hw_link_info = &hw->phy.link_info;
4454
4455 /* save off old link status information */
4456 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
4457 sizeof(pf->hw.phy.link_info_old));
4458
4459 /* update link status */
4460 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
4461 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
4462 hw_link_info->link_info = status->link_info;
4463 hw_link_info->an_info = status->an_info;
4464 hw_link_info->ext_info = status->ext_info;
4465 hw_link_info->lse_enable =
4466 le16_to_cpu(status->command_flags) &
4467 I40E_AQ_LSE_ENABLE;
4468
4469 /* process the event */
4470 i40e_link_event(pf);
4471
4472 /* Do a new status request to re-enable LSE reporting
4473 * and load new status information into the hw struct,
4474 * then see if the status changed while processing the
4475 * initial event.
4476 */
4477 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
4478 i40e_link_event(pf);
4479}
4480
4481/**
4482 * i40e_clean_adminq_subtask - Clean the AdminQ rings
4483 * @pf: board private structure
4484 **/
4485static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4486{
4487 struct i40e_arq_event_info event;
4488 struct i40e_hw *hw = &pf->hw;
4489 u16 pending, i = 0;
4490 i40e_status ret;
4491 u16 opcode;
4492 u32 val;
4493
4494 if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
4495 return;
4496
41c445ff
JB
4497 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
4498 if (!event.msg_buf)
4499 return;
4500
4501 do {
2f019123 4502 event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
41c445ff
JB
4503 ret = i40e_clean_arq_element(hw, &event, &pending);
4504 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
4505 dev_info(&pf->pdev->dev, "No ARQ event found\n");
4506 break;
4507 } else if (ret) {
4508 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
4509 break;
4510 }
4511
4512 opcode = le16_to_cpu(event.desc.opcode);
4513 switch (opcode) {
4514
4515 case i40e_aqc_opc_get_link_status:
4516 i40e_handle_link_event(pf, &event);
4517 break;
4518 case i40e_aqc_opc_send_msg_to_pf:
4519 ret = i40e_vc_process_vf_msg(pf,
4520 le16_to_cpu(event.desc.retval),
4521 le32_to_cpu(event.desc.cookie_high),
4522 le32_to_cpu(event.desc.cookie_low),
4523 event.msg_buf,
4524 event.msg_size);
4525 break;
4526 case i40e_aqc_opc_lldp_update_mib:
4527 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4528 break;
4529 case i40e_aqc_opc_event_lan_overflow:
4530 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4531 i40e_handle_lan_overflow_event(pf, &event);
4532 break;
4533 default:
4534 dev_info(&pf->pdev->dev,
4535 "ARQ Error: Unknown event %d received\n",
4536 event.desc.opcode);
4537 break;
4538 }
4539 } while (pending && (i++ < pf->adminq_work_limit));
4540
4541 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
4542 /* re-enable Admin queue interrupt cause */
4543 val = rd32(hw, I40E_PFINT_ICR0_ENA);
4544 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4545 wr32(hw, I40E_PFINT_ICR0_ENA, val);
4546 i40e_flush(hw);
4547
4548 kfree(event.msg_buf);
4549}
4550
4551/**
4552 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
4553 * @veb: pointer to the VEB instance
4554 *
4555 * This is a recursive function that first builds the attached VSIs then
4556 * recurses in to build the next layer of VEB. We track the connections
4557 * through our own index numbers because the seid's from the HW could
4558 * change across the reset.
4559 **/
4560static int i40e_reconstitute_veb(struct i40e_veb *veb)
4561{
4562 struct i40e_vsi *ctl_vsi = NULL;
4563 struct i40e_pf *pf = veb->pf;
4564 int v, veb_idx;
4565 int ret;
4566
4567 /* build VSI that owns this VEB, temporarily attached to base VEB */
4568 for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
4569 if (pf->vsi[v] &&
4570 pf->vsi[v]->veb_idx == veb->idx &&
4571 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
4572 ctl_vsi = pf->vsi[v];
4573 break;
4574 }
4575 }
4576 if (!ctl_vsi) {
4577 dev_info(&pf->pdev->dev,
4578 "missing owner VSI for veb_idx %d\n", veb->idx);
4579 ret = -ENOENT;
4580 goto end_reconstitute;
4581 }
4582 if (ctl_vsi != pf->vsi[pf->lan_vsi])
4583 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
4584 ret = i40e_add_vsi(ctl_vsi);
4585 if (ret) {
4586 dev_info(&pf->pdev->dev,
4587 "rebuild of owner VSI failed: %d\n", ret);
4588 goto end_reconstitute;
4589 }
4590 i40e_vsi_reset_stats(ctl_vsi);
4591
4592 /* create the VEB in the switch and move the VSI onto the VEB */
4593 ret = i40e_add_veb(veb, ctl_vsi);
4594 if (ret)
4595 goto end_reconstitute;
4596
4597 /* create the remaining VSIs attached to this VEB */
4598 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4599 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
4600 continue;
4601
4602 if (pf->vsi[v]->veb_idx == veb->idx) {
4603 struct i40e_vsi *vsi = pf->vsi[v];
4604 vsi->uplink_seid = veb->seid;
4605 ret = i40e_add_vsi(vsi);
4606 if (ret) {
4607 dev_info(&pf->pdev->dev,
4608 "rebuild of vsi_idx %d failed: %d\n",
4609 v, ret);
4610 goto end_reconstitute;
4611 }
4612 i40e_vsi_reset_stats(vsi);
4613 }
4614 }
4615
4616 /* create any VEBs attached to this VEB - RECURSION */
4617 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
4618 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
4619 pf->veb[veb_idx]->uplink_seid = veb->seid;
4620 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
4621 if (ret)
4622 break;
4623 }
4624 }
4625
4626end_reconstitute:
4627 return ret;
4628}
4629
4630/**
4631 * i40e_get_capabilities - get info about the HW
4632 * @pf: the PF struct
4633 **/
4634static int i40e_get_capabilities(struct i40e_pf *pf)
4635{
4636 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
4637 u16 data_size;
4638 int buf_len;
4639 int err;
4640
4641 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
4642 do {
4643 cap_buf = kzalloc(buf_len, GFP_KERNEL);
4644 if (!cap_buf)
4645 return -ENOMEM;
4646
4647 /* this loads the data into the hw struct for us */
4648 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
4649 &data_size,
4650 i40e_aqc_opc_list_func_capabilities,
4651 NULL);
4652 /* data loaded, buffer no longer needed */
4653 kfree(cap_buf);
4654
4655 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
4656 /* retry with a larger buffer */
4657 buf_len = data_size;
4658 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
4659 dev_info(&pf->pdev->dev,
4660 "capability discovery failed: aq=%d\n",
4661 pf->hw.aq.asq_last_status);
4662 return -ENODEV;
4663 }
4664 } while (err);
4665
7134f9ce
JB
4666 if (pf->hw.revision_id == 0 && pf->hw.func_caps.npar_enable) {
4667 pf->hw.func_caps.num_msix_vectors += 1;
4668 pf->hw.func_caps.num_tx_qp =
4669 min_t(int, pf->hw.func_caps.num_tx_qp,
4670 I40E_MAX_NPAR_QPS);
4671 }
4672
41c445ff
JB
4673 if (pf->hw.debug_mask & I40E_DEBUG_USER)
4674 dev_info(&pf->pdev->dev,
4675 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
4676 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
4677 pf->hw.func_caps.num_msix_vectors,
4678 pf->hw.func_caps.num_msix_vectors_vf,
4679 pf->hw.func_caps.fd_filters_guaranteed,
4680 pf->hw.func_caps.fd_filters_best_effort,
4681 pf->hw.func_caps.num_tx_qp,
4682 pf->hw.func_caps.num_vsis);
4683
7134f9ce
JB
4684#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
4685 + pf->hw.func_caps.num_vfs)
4686 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
4687 dev_info(&pf->pdev->dev,
4688 "got num_vsis %d, setting num_vsis to %d\n",
4689 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
4690 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
4691 }
4692
41c445ff
JB
4693 return 0;
4694}
4695
4696/**
4697 * i40e_fdir_setup - initialize the Flow Director resources
4698 * @pf: board private structure
4699 **/
4700static void i40e_fdir_setup(struct i40e_pf *pf)
4701{
4702 struct i40e_vsi *vsi;
4703 bool new_vsi = false;
4704 int err, i;
4705
958a3e3b
SN
4706 if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
4707 I40E_FLAG_FDIR_ATR_ENABLED)))
41c445ff
JB
4708 return;
4709
4710 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
4711
4712 /* find existing or make new FDIR VSI */
4713 vsi = NULL;
4714 for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
4715 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
4716 vsi = pf->vsi[i];
4717 if (!vsi) {
4718 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
4719 if (!vsi) {
4720 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
4721 pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
4722 return;
4723 }
4724 new_vsi = true;
4725 }
4726 WARN_ON(vsi->base_queue != I40E_FDIR_RING);
4727 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
4728
4729 err = i40e_vsi_setup_tx_resources(vsi);
4730 if (!err)
4731 err = i40e_vsi_setup_rx_resources(vsi);
4732 if (!err)
4733 err = i40e_vsi_configure(vsi);
4734 if (!err && new_vsi) {
4735 char int_name[IFNAMSIZ + 9];
4736 snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
4737 dev_driver_string(&pf->pdev->dev));
4738 err = i40e_vsi_request_irq(vsi, int_name);
4739 }
4740 if (!err)
4741 err = i40e_up_complete(vsi);
4742
4743 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4744}
4745
4746/**
4747 * i40e_fdir_teardown - release the Flow Director resources
4748 * @pf: board private structure
4749 **/
4750static void i40e_fdir_teardown(struct i40e_pf *pf)
4751{
4752 int i;
4753
4754 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
4755 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
4756 i40e_vsi_release(pf->vsi[i]);
4757 break;
4758 }
4759 }
4760}
4761
4762/**
f650a38b 4763 * i40e_prep_for_reset - prep for the core to reset
41c445ff
JB
4764 * @pf: board private structure
4765 *
f650a38b
ASJ
4766 * Close up the VFs and other things in prep for pf Reset.
4767 **/
4768static int i40e_prep_for_reset(struct i40e_pf *pf)
41c445ff 4769{
41c445ff
JB
4770 struct i40e_hw *hw = &pf->hw;
4771 i40e_status ret;
4772 u32 v;
4773
4774 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
4775 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
f650a38b 4776 return 0;
41c445ff
JB
4777
4778 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
4779
4780 i40e_vc_notify_reset(pf);
4781
4782 /* quiesce the VSIs and their queues that are not already DOWN */
4783 i40e_pf_quiesce_all_vsi(pf);
4784
4785 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4786 if (pf->vsi[v])
4787 pf->vsi[v]->seid = 0;
4788 }
4789
4790 i40e_shutdown_adminq(&pf->hw);
4791
f650a38b
ASJ
4792 /* call shutdown HMC */
4793 ret = i40e_shutdown_lan_hmc(hw);
4794 if (ret) {
4795 dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
4796 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4797 }
4798 return ret;
4799}
4800
4801/**
4802 * i40e_reset_and_rebuild - reset and rebuid using a saved config
4803 * @pf: board private structure
bc7d338f 4804 * @reinit: if the Main VSI needs to re-initialized.
f650a38b 4805 **/
bc7d338f 4806static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
f650a38b
ASJ
4807{
4808 struct i40e_driver_version dv;
4809 struct i40e_hw *hw = &pf->hw;
4810 i40e_status ret;
4811 u32 v;
4812
41c445ff
JB
4813 /* Now we wait for GRST to settle out.
4814 * We don't have to delete the VEBs or VSIs from the hw switch
4815 * because the reset will make them disappear.
4816 */
4817 ret = i40e_pf_reset(hw);
4818 if (ret)
4819 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
4820 pf->pfr_count++;
4821
4822 if (test_bit(__I40E_DOWN, &pf->state))
4823 goto end_core_reset;
4824 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
4825
4826 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
4827 ret = i40e_init_adminq(&pf->hw);
4828 if (ret) {
4829 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
4830 goto end_core_reset;
4831 }
4832
4833 ret = i40e_get_capabilities(pf);
4834 if (ret) {
4835 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
4836 ret);
4837 goto end_core_reset;
4838 }
4839
41c445ff
JB
4840 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
4841 hw->func_caps.num_rx_qp,
4842 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
4843 if (ret) {
4844 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
4845 goto end_core_reset;
4846 }
4847 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
4848 if (ret) {
4849 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
4850 goto end_core_reset;
4851 }
4852
4853 /* do basic switch setup */
bc7d338f 4854 ret = i40e_setup_pf_switch(pf, reinit);
41c445ff
JB
4855 if (ret)
4856 goto end_core_reset;
4857
4858 /* Rebuild the VSIs and VEBs that existed before reset.
4859 * They are still in our local switch element arrays, so only
4860 * need to rebuild the switch model in the HW.
4861 *
4862 * If there were VEBs but the reconstitution failed, we'll try
4863 * try to recover minimal use by getting the basic PF VSI working.
4864 */
4865 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
4866 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
4867 /* find the one VEB connected to the MAC, and find orphans */
4868 for (v = 0; v < I40E_MAX_VEB; v++) {
4869 if (!pf->veb[v])
4870 continue;
4871
4872 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
4873 pf->veb[v]->uplink_seid == 0) {
4874 ret = i40e_reconstitute_veb(pf->veb[v]);
4875
4876 if (!ret)
4877 continue;
4878
4879 /* If Main VEB failed, we're in deep doodoo,
4880 * so give up rebuilding the switch and set up
4881 * for minimal rebuild of PF VSI.
4882 * If orphan failed, we'll report the error
4883 * but try to keep going.
4884 */
4885 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
4886 dev_info(&pf->pdev->dev,
4887 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
4888 ret);
4889 pf->vsi[pf->lan_vsi]->uplink_seid
4890 = pf->mac_seid;
4891 break;
4892 } else if (pf->veb[v]->uplink_seid == 0) {
4893 dev_info(&pf->pdev->dev,
4894 "rebuild of orphan VEB failed: %d\n",
4895 ret);
4896 }
4897 }
4898 }
4899 }
4900
4901 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
4902 dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
4903 /* no VEB, so rebuild only the Main VSI */
4904 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
4905 if (ret) {
4906 dev_info(&pf->pdev->dev,
4907 "rebuild of Main VSI failed: %d\n", ret);
4908 goto end_core_reset;
4909 }
4910 }
4911
4912 /* reinit the misc interrupt */
4913 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4914 ret = i40e_setup_misc_vector(pf);
4915
4916 /* restart the VSIs that were rebuilt and running before the reset */
4917 i40e_pf_unquiesce_all_vsi(pf);
4918
4919 /* tell the firmware that we're starting */
4920 dv.major_version = DRV_VERSION_MAJOR;
4921 dv.minor_version = DRV_VERSION_MINOR;
4922 dv.build_version = DRV_VERSION_BUILD;
4923 dv.subbuild_version = 0;
4924 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
4925
4926 dev_info(&pf->pdev->dev, "PF reset done\n");
4927
4928end_core_reset:
4929 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
4930}
4931
f650a38b
ASJ
4932/**
4933 * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
4934 * @pf: board private structure
4935 *
4936 * Close up the VFs and other things in prep for a Core Reset,
4937 * then get ready to rebuild the world.
4938 **/
4939static void i40e_handle_reset_warning(struct i40e_pf *pf)
4940{
4941 i40e_status ret;
4942
4943 ret = i40e_prep_for_reset(pf);
4944 if (!ret)
bc7d338f 4945 i40e_reset_and_rebuild(pf, false);
f650a38b
ASJ
4946}
4947
41c445ff
JB
4948/**
4949 * i40e_handle_mdd_event
4950 * @pf: pointer to the pf structure
4951 *
4952 * Called from the MDD irq handler to identify possibly malicious vfs
4953 **/
4954static void i40e_handle_mdd_event(struct i40e_pf *pf)
4955{
4956 struct i40e_hw *hw = &pf->hw;
4957 bool mdd_detected = false;
4958 struct i40e_vf *vf;
4959 u32 reg;
4960 int i;
4961
4962 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
4963 return;
4964
4965 /* find what triggered the MDD event */
4966 reg = rd32(hw, I40E_GL_MDET_TX);
4967 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
4968 u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
4969 >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
4970 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
4971 >> I40E_GL_MDET_TX_EVENT_SHIFT;
4972 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
4973 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
4974 dev_info(&pf->pdev->dev,
4975 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
4976 event, queue, func);
4977 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
4978 mdd_detected = true;
4979 }
4980 reg = rd32(hw, I40E_GL_MDET_RX);
4981 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
4982 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
4983 >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
4984 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
4985 >> I40E_GL_MDET_RX_EVENT_SHIFT;
4986 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
4987 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
4988 dev_info(&pf->pdev->dev,
4989 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
4990 event, queue, func);
4991 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
4992 mdd_detected = true;
4993 }
4994
4995 /* see if one of the VFs needs its hand slapped */
4996 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
4997 vf = &(pf->vf[i]);
4998 reg = rd32(hw, I40E_VP_MDET_TX(i));
4999 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
5000 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
5001 vf->num_mdd_events++;
5002 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
5003 }
5004
5005 reg = rd32(hw, I40E_VP_MDET_RX(i));
5006 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
5007 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
5008 vf->num_mdd_events++;
5009 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
5010 }
5011
5012 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
5013 dev_info(&pf->pdev->dev,
5014 "Too many MDD events on VF %d, disabled\n", i);
5015 dev_info(&pf->pdev->dev,
5016 "Use PF Control I/F to re-enable the VF\n");
5017 set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
5018 }
5019 }
5020
5021 /* re-enable mdd interrupt cause */
5022 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
5023 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
5024 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
5025 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
5026 i40e_flush(hw);
5027}
5028
a1c9a9d9
JK
5029#ifdef CONFIG_I40E_VXLAN
5030/**
5031 * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
5032 * @pf: board private structure
5033 **/
5034static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
5035{
5036 const int vxlan_hdr_qwords = 4;
5037 struct i40e_hw *hw = &pf->hw;
5038 i40e_status ret;
5039 u8 filter_index;
5040 __be16 port;
5041 int i;
5042
5043 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
5044 return;
5045
5046 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
5047
5048 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5049 if (pf->pending_vxlan_bitmap & (1 << i)) {
5050 pf->pending_vxlan_bitmap &= ~(1 << i);
5051 port = pf->vxlan_ports[i];
5052 ret = port ?
5053 i40e_aq_add_udp_tunnel(hw, ntohs(port),
5054 vxlan_hdr_qwords,
5055 I40E_AQC_TUNNEL_TYPE_VXLAN,
5056 &filter_index, NULL)
5057 : i40e_aq_del_udp_tunnel(hw, i, NULL);
5058
5059 if (ret) {
5060 dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
5061 port ? "adding" : "deleting",
5062 ntohs(port), port ? i : i);
5063
5064 pf->vxlan_ports[i] = 0;
5065 } else {
5066 dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
5067 port ? "Added" : "Deleted",
5068 ntohs(port), port ? i : filter_index);
5069 }
5070 }
5071 }
5072}
5073
5074#endif
41c445ff
JB
5075/**
5076 * i40e_service_task - Run the driver's async subtasks
5077 * @work: pointer to work_struct containing our data
5078 **/
5079static void i40e_service_task(struct work_struct *work)
5080{
5081 struct i40e_pf *pf = container_of(work,
5082 struct i40e_pf,
5083 service_task);
5084 unsigned long start_time = jiffies;
5085
5086 i40e_reset_subtask(pf);
5087 i40e_handle_mdd_event(pf);
5088 i40e_vc_process_vflr_event(pf);
5089 i40e_watchdog_subtask(pf);
5090 i40e_fdir_reinit_subtask(pf);
5091 i40e_check_hang_subtask(pf);
5092 i40e_sync_filters_subtask(pf);
a1c9a9d9
JK
5093#ifdef CONFIG_I40E_VXLAN
5094 i40e_sync_vxlan_filters_subtask(pf);
5095#endif
41c445ff
JB
5096 i40e_clean_adminq_subtask(pf);
5097
5098 i40e_service_event_complete(pf);
5099
5100 /* If the tasks have taken longer than one timer cycle or there
5101 * is more work to be done, reschedule the service task now
5102 * rather than wait for the timer to tick again.
5103 */
5104 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
5105 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
5106 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
5107 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
5108 i40e_service_event_schedule(pf);
5109}
5110
5111/**
5112 * i40e_service_timer - timer callback
5113 * @data: pointer to PF struct
5114 **/
5115static void i40e_service_timer(unsigned long data)
5116{
5117 struct i40e_pf *pf = (struct i40e_pf *)data;
5118
5119 mod_timer(&pf->service_timer,
5120 round_jiffies(jiffies + pf->service_timer_period));
5121 i40e_service_event_schedule(pf);
5122}
5123
5124/**
5125 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
5126 * @vsi: the VSI being configured
5127 **/
5128static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
5129{
5130 struct i40e_pf *pf = vsi->back;
5131
5132 switch (vsi->type) {
5133 case I40E_VSI_MAIN:
5134 vsi->alloc_queue_pairs = pf->num_lan_qps;
5135 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5136 I40E_REQ_DESCRIPTOR_MULTIPLE);
5137 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5138 vsi->num_q_vectors = pf->num_lan_msix;
5139 else
5140 vsi->num_q_vectors = 1;
5141
5142 break;
5143
5144 case I40E_VSI_FDIR:
5145 vsi->alloc_queue_pairs = 1;
5146 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
5147 I40E_REQ_DESCRIPTOR_MULTIPLE);
5148 vsi->num_q_vectors = 1;
5149 break;
5150
5151 case I40E_VSI_VMDQ2:
5152 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
5153 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5154 I40E_REQ_DESCRIPTOR_MULTIPLE);
5155 vsi->num_q_vectors = pf->num_vmdq_msix;
5156 break;
5157
5158 case I40E_VSI_SRIOV:
5159 vsi->alloc_queue_pairs = pf->num_vf_qps;
5160 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
5161 I40E_REQ_DESCRIPTOR_MULTIPLE);
5162 break;
5163
5164 default:
5165 WARN_ON(1);
5166 return -ENODATA;
5167 }
5168
5169 return 0;
5170}
5171
f650a38b
ASJ
5172/**
5173 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
5174 * @type: VSI pointer
bc7d338f 5175 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
f650a38b
ASJ
5176 *
5177 * On error: returns error code (negative)
5178 * On success: returns 0
5179 **/
bc7d338f 5180static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
f650a38b
ASJ
5181{
5182 int size;
5183 int ret = 0;
5184
ac6c5e3d 5185 /* allocate memory for both Tx and Rx ring pointers */
f650a38b
ASJ
5186 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
5187 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
5188 if (!vsi->tx_rings)
5189 return -ENOMEM;
f650a38b
ASJ
5190 vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
5191
bc7d338f
ASJ
5192 if (alloc_qvectors) {
5193 /* allocate memory for q_vector pointers */
5194 size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
5195 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
5196 if (!vsi->q_vectors) {
5197 ret = -ENOMEM;
5198 goto err_vectors;
5199 }
f650a38b
ASJ
5200 }
5201 return ret;
5202
5203err_vectors:
5204 kfree(vsi->tx_rings);
5205 return ret;
5206}
5207
41c445ff
JB
5208/**
5209 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
5210 * @pf: board private structure
5211 * @type: type of VSI
5212 *
5213 * On error: returns error code (negative)
5214 * On success: returns vsi index in PF (positive)
5215 **/
5216static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
5217{
5218 int ret = -ENODEV;
5219 struct i40e_vsi *vsi;
5220 int vsi_idx;
5221 int i;
5222
5223 /* Need to protect the allocation of the VSIs at the PF level */
5224 mutex_lock(&pf->switch_mutex);
5225
5226 /* VSI list may be fragmented if VSI creation/destruction has
5227 * been happening. We can afford to do a quick scan to look
5228 * for any free VSIs in the list.
5229 *
5230 * find next empty vsi slot, looping back around if necessary
5231 */
5232 i = pf->next_vsi;
5233 while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
5234 i++;
5235 if (i >= pf->hw.func_caps.num_vsis) {
5236 i = 0;
5237 while (i < pf->next_vsi && pf->vsi[i])
5238 i++;
5239 }
5240
5241 if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
5242 vsi_idx = i; /* Found one! */
5243 } else {
5244 ret = -ENODEV;
493fb300 5245 goto unlock_pf; /* out of VSI slots! */
41c445ff
JB
5246 }
5247 pf->next_vsi = ++i;
5248
5249 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
5250 if (!vsi) {
5251 ret = -ENOMEM;
493fb300 5252 goto unlock_pf;
41c445ff
JB
5253 }
5254 vsi->type = type;
5255 vsi->back = pf;
5256 set_bit(__I40E_DOWN, &vsi->state);
5257 vsi->flags = 0;
5258 vsi->idx = vsi_idx;
5259 vsi->rx_itr_setting = pf->rx_itr_default;
5260 vsi->tx_itr_setting = pf->tx_itr_default;
5261 vsi->netdev_registered = false;
5262 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
5263 INIT_LIST_HEAD(&vsi->mac_filter_list);
5264
9f65e15b
AD
5265 ret = i40e_set_num_rings_in_vsi(vsi);
5266 if (ret)
5267 goto err_rings;
5268
bc7d338f 5269 ret = i40e_vsi_alloc_arrays(vsi, true);
f650a38b 5270 if (ret)
9f65e15b 5271 goto err_rings;
493fb300 5272
41c445ff
JB
5273 /* Setup default MSIX irq handler for VSI */
5274 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
5275
5276 pf->vsi[vsi_idx] = vsi;
5277 ret = vsi_idx;
493fb300
AD
5278 goto unlock_pf;
5279
9f65e15b 5280err_rings:
493fb300
AD
5281 pf->next_vsi = i - 1;
5282 kfree(vsi);
5283unlock_pf:
41c445ff
JB
5284 mutex_unlock(&pf->switch_mutex);
5285 return ret;
5286}
5287
f650a38b
ASJ
5288/**
5289 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
5290 * @type: VSI pointer
bc7d338f 5291 * @free_qvectors: a bool to specify if q_vectors need to be freed.
f650a38b
ASJ
5292 *
5293 * On error: returns error code (negative)
5294 * On success: returns 0
5295 **/
bc7d338f 5296static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
f650a38b
ASJ
5297{
5298 /* free the ring and vector containers */
bc7d338f
ASJ
5299 if (free_qvectors) {
5300 kfree(vsi->q_vectors);
5301 vsi->q_vectors = NULL;
5302 }
f650a38b
ASJ
5303 kfree(vsi->tx_rings);
5304 vsi->tx_rings = NULL;
5305 vsi->rx_rings = NULL;
5306}
5307
41c445ff
JB
5308/**
5309 * i40e_vsi_clear - Deallocate the VSI provided
5310 * @vsi: the VSI being un-configured
5311 **/
5312static int i40e_vsi_clear(struct i40e_vsi *vsi)
5313{
5314 struct i40e_pf *pf;
5315
5316 if (!vsi)
5317 return 0;
5318
5319 if (!vsi->back)
5320 goto free_vsi;
5321 pf = vsi->back;
5322
5323 mutex_lock(&pf->switch_mutex);
5324 if (!pf->vsi[vsi->idx]) {
5325 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
5326 vsi->idx, vsi->idx, vsi, vsi->type);
5327 goto unlock_vsi;
5328 }
5329
5330 if (pf->vsi[vsi->idx] != vsi) {
5331 dev_err(&pf->pdev->dev,
5332 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
5333 pf->vsi[vsi->idx]->idx,
5334 pf->vsi[vsi->idx],
5335 pf->vsi[vsi->idx]->type,
5336 vsi->idx, vsi, vsi->type);
5337 goto unlock_vsi;
5338 }
5339
5340 /* updates the pf for this cleared vsi */
5341 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
5342 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
5343
bc7d338f 5344 i40e_vsi_free_arrays(vsi, true);
493fb300 5345
41c445ff
JB
5346 pf->vsi[vsi->idx] = NULL;
5347 if (vsi->idx < pf->next_vsi)
5348 pf->next_vsi = vsi->idx;
5349
5350unlock_vsi:
5351 mutex_unlock(&pf->switch_mutex);
5352free_vsi:
5353 kfree(vsi);
5354
5355 return 0;
5356}
5357
9f65e15b
AD
5358/**
5359 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
5360 * @vsi: the VSI being cleaned
5361 **/
be1d5eea 5362static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
9f65e15b
AD
5363{
5364 int i;
5365
be1d5eea 5366 if (vsi->tx_rings[0]) {
ac6c5e3d 5367 for (i = 0; i < vsi->num_queue_pairs; i++) {
00403f04
MW
5368 kfree_rcu(vsi->tx_rings[i], rcu);
5369 vsi->tx_rings[i] = NULL;
5370 vsi->rx_rings[i] = NULL;
5371 }
be1d5eea 5372 }
9f65e15b
AD
5373}
5374
41c445ff
JB
5375/**
5376 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
5377 * @vsi: the VSI being configured
5378 **/
5379static int i40e_alloc_rings(struct i40e_vsi *vsi)
5380{
5381 struct i40e_pf *pf = vsi->back;
41c445ff
JB
5382 int i;
5383
41c445ff 5384 /* Set basic values in the rings to be used later during open() */
ac6c5e3d 5385 for (i = 0; i < vsi->num_queue_pairs; i++) {
9f65e15b
AD
5386 struct i40e_ring *tx_ring;
5387 struct i40e_ring *rx_ring;
5388
ac6c5e3d 5389 /* allocate space for both Tx and Rx in one shot */
9f65e15b
AD
5390 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
5391 if (!tx_ring)
5392 goto err_out;
41c445ff
JB
5393
5394 tx_ring->queue_index = i;
5395 tx_ring->reg_idx = vsi->base_queue + i;
5396 tx_ring->ring_active = false;
5397 tx_ring->vsi = vsi;
5398 tx_ring->netdev = vsi->netdev;
5399 tx_ring->dev = &pf->pdev->dev;
5400 tx_ring->count = vsi->num_desc;
5401 tx_ring->size = 0;
5402 tx_ring->dcb_tc = 0;
9f65e15b 5403 vsi->tx_rings[i] = tx_ring;
41c445ff 5404
9f65e15b 5405 rx_ring = &tx_ring[1];
41c445ff
JB
5406 rx_ring->queue_index = i;
5407 rx_ring->reg_idx = vsi->base_queue + i;
5408 rx_ring->ring_active = false;
5409 rx_ring->vsi = vsi;
5410 rx_ring->netdev = vsi->netdev;
5411 rx_ring->dev = &pf->pdev->dev;
5412 rx_ring->count = vsi->num_desc;
5413 rx_ring->size = 0;
5414 rx_ring->dcb_tc = 0;
5415 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
5416 set_ring_16byte_desc_enabled(rx_ring);
5417 else
5418 clear_ring_16byte_desc_enabled(rx_ring);
9f65e15b 5419 vsi->rx_rings[i] = rx_ring;
41c445ff
JB
5420 }
5421
5422 return 0;
9f65e15b
AD
5423
5424err_out:
5425 i40e_vsi_clear_rings(vsi);
5426 return -ENOMEM;
41c445ff
JB
5427}
5428
5429/**
5430 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
5431 * @pf: board private structure
5432 * @vectors: the number of MSI-X vectors to request
5433 *
5434 * Returns the number of vectors reserved, or error
5435 **/
5436static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5437{
5438 int err = 0;
5439
5440 pf->num_msix_entries = 0;
5441 while (vectors >= I40E_MIN_MSIX) {
5442 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5443 if (err == 0) {
5444 /* good to go */
5445 pf->num_msix_entries = vectors;
5446 break;
5447 } else if (err < 0) {
5448 /* total failure */
5449 dev_info(&pf->pdev->dev,
5450 "MSI-X vector reservation failed: %d\n", err);
5451 vectors = 0;
5452 break;
5453 } else {
5454 /* err > 0 is the hint for retry */
5455 dev_info(&pf->pdev->dev,
5456 "MSI-X vectors wanted %d, retrying with %d\n",
5457 vectors, err);
5458 vectors = err;
5459 }
5460 }
5461
5462 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5463 dev_info(&pf->pdev->dev,
5464 "Couldn't get enough vectors, only %d available\n",
5465 vectors);
5466 vectors = 0;
5467 }
5468
5469 return vectors;
5470}
5471
5472/**
5473 * i40e_init_msix - Setup the MSIX capability
5474 * @pf: board private structure
5475 *
5476 * Work with the OS to set up the MSIX vectors needed.
5477 *
5478 * Returns 0 on success, negative on failure
5479 **/
5480static int i40e_init_msix(struct i40e_pf *pf)
5481{
5482 i40e_status err = 0;
5483 struct i40e_hw *hw = &pf->hw;
5484 int v_budget, i;
5485 int vec;
5486
5487 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
5488 return -ENODEV;
5489
5490 /* The number of vectors we'll request will be comprised of:
5491 * - Add 1 for "other" cause for Admin Queue events, etc.
5492 * - The number of LAN queue pairs
f8ff1464
ASJ
5493 * - Queues being used for RSS.
5494 * We don't need as many as max_rss_size vectors.
5495 * use rss_size instead in the calculation since that
5496 * is governed by number of cpus in the system.
5497 * - assumes symmetric Tx/Rx pairing
41c445ff
JB
5498 * - The number of VMDq pairs
5499 * Once we count this up, try the request.
5500 *
5501 * If we can't get what we want, we'll simplify to nearly nothing
5502 * and try again. If that still fails, we punt.
5503 */
f8ff1464 5504 pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
41c445ff
JB
5505 pf->num_vmdq_msix = pf->num_vmdq_qps;
5506 v_budget = 1 + pf->num_lan_msix;
5507 v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
5508 if (pf->flags & I40E_FLAG_FDIR_ENABLED)
5509 v_budget++;
5510
5511 /* Scale down if necessary, and the rings will share vectors */
5512 v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
5513
5514 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
5515 GFP_KERNEL);
5516 if (!pf->msix_entries)
5517 return -ENOMEM;
5518
5519 for (i = 0; i < v_budget; i++)
5520 pf->msix_entries[i].entry = i;
5521 vec = i40e_reserve_msix_vectors(pf, v_budget);
5522 if (vec < I40E_MIN_MSIX) {
5523 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
5524 kfree(pf->msix_entries);
5525 pf->msix_entries = NULL;
5526 return -ENODEV;
5527
5528 } else if (vec == I40E_MIN_MSIX) {
5529 /* Adjust for minimal MSIX use */
5530 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
5531 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5532 pf->num_vmdq_vsis = 0;
5533 pf->num_vmdq_qps = 0;
5534 pf->num_vmdq_msix = 0;
5535 pf->num_lan_qps = 1;
5536 pf->num_lan_msix = 1;
5537
5538 } else if (vec != v_budget) {
5539 /* Scale vector usage down */
5540 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
5541 vec--; /* reserve the misc vector */
5542
5543 /* partition out the remaining vectors */
5544 switch (vec) {
5545 case 2:
5546 pf->num_vmdq_vsis = 1;
5547 pf->num_lan_msix = 1;
5548 break;
5549 case 3:
5550 pf->num_vmdq_vsis = 1;
5551 pf->num_lan_msix = 2;
5552 break;
5553 default:
5554 pf->num_lan_msix = min_t(int, (vec / 2),
5555 pf->num_lan_qps);
5556 pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
5557 I40E_DEFAULT_NUM_VMDQ_VSI);
5558 break;
5559 }
5560 }
5561
5562 return err;
5563}
5564
493fb300
AD
5565/**
5566 * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
5567 * @vsi: the VSI being configured
5568 * @v_idx: index of the vector in the vsi struct
5569 *
5570 * We allocate one q_vector. If allocation fails we return -ENOMEM.
5571 **/
5572static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
5573{
5574 struct i40e_q_vector *q_vector;
5575
5576 /* allocate q_vector */
5577 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
5578 if (!q_vector)
5579 return -ENOMEM;
5580
5581 q_vector->vsi = vsi;
5582 q_vector->v_idx = v_idx;
5583 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
5584 if (vsi->netdev)
5585 netif_napi_add(vsi->netdev, &q_vector->napi,
5586 i40e_napi_poll, vsi->work_limit);
5587
cd0b6fa6
AD
5588 q_vector->rx.latency_range = I40E_LOW_LATENCY;
5589 q_vector->tx.latency_range = I40E_LOW_LATENCY;
5590
493fb300
AD
5591 /* tie q_vector and vsi together */
5592 vsi->q_vectors[v_idx] = q_vector;
5593
5594 return 0;
5595}
5596
41c445ff
JB
5597/**
5598 * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
5599 * @vsi: the VSI being configured
5600 *
5601 * We allocate one q_vector per queue interrupt. If allocation fails we
5602 * return -ENOMEM.
5603 **/
5604static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
5605{
5606 struct i40e_pf *pf = vsi->back;
5607 int v_idx, num_q_vectors;
493fb300 5608 int err;
41c445ff
JB
5609
5610 /* if not MSIX, give the one vector only to the LAN VSI */
5611 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5612 num_q_vectors = vsi->num_q_vectors;
5613 else if (vsi == pf->vsi[pf->lan_vsi])
5614 num_q_vectors = 1;
5615 else
5616 return -EINVAL;
5617
41c445ff 5618 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
493fb300
AD
5619 err = i40e_alloc_q_vector(vsi, v_idx);
5620 if (err)
5621 goto err_out;
41c445ff
JB
5622 }
5623
5624 return 0;
493fb300
AD
5625
5626err_out:
5627 while (v_idx--)
5628 i40e_free_q_vector(vsi, v_idx);
5629
5630 return err;
41c445ff
JB
5631}
5632
5633/**
5634 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
5635 * @pf: board private structure to initialize
5636 **/
5637static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
5638{
5639 int err = 0;
5640
5641 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5642 err = i40e_init_msix(pf);
5643 if (err) {
958a3e3b
SN
5644 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
5645 I40E_FLAG_RSS_ENABLED |
41c445ff
JB
5646 I40E_FLAG_MQ_ENABLED |
5647 I40E_FLAG_DCB_ENABLED |
5648 I40E_FLAG_SRIOV_ENABLED |
5649 I40E_FLAG_FDIR_ENABLED |
5650 I40E_FLAG_FDIR_ATR_ENABLED |
5651 I40E_FLAG_VMDQ_ENABLED);
5652
5653 /* rework the queue expectations without MSIX */
5654 i40e_determine_queue_usage(pf);
5655 }
5656 }
5657
5658 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
5659 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
958a3e3b 5660 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
41c445ff
JB
5661 err = pci_enable_msi(pf->pdev);
5662 if (err) {
958a3e3b 5663 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
41c445ff
JB
5664 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
5665 }
5666 }
5667
958a3e3b
SN
5668 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
5669 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
5670
41c445ff
JB
5671 /* track first vector for misc interrupts */
5672 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
5673}
5674
5675/**
5676 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
5677 * @pf: board private structure
5678 *
5679 * This sets up the handler for MSIX 0, which is used to manage the
5680 * non-queue interrupts, e.g. AdminQ and errors. This is not used
5681 * when in MSI or Legacy interrupt mode.
5682 **/
5683static int i40e_setup_misc_vector(struct i40e_pf *pf)
5684{
5685 struct i40e_hw *hw = &pf->hw;
5686 int err = 0;
5687
5688 /* Only request the irq if this is the first time through, and
5689 * not when we're rebuilding after a Reset
5690 */
5691 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
5692 err = request_irq(pf->msix_entries[0].vector,
5693 i40e_intr, 0, pf->misc_int_name, pf);
5694 if (err) {
5695 dev_info(&pf->pdev->dev,
5696 "request_irq for msix_misc failed: %d\n", err);
5697 return -EFAULT;
5698 }
5699 }
5700
5701 i40e_enable_misc_int_causes(hw);
5702
5703 /* associate no queues to the misc vector */
5704 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
5705 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
5706
5707 i40e_flush(hw);
5708
5709 i40e_irq_dynamic_enable_icr0(pf);
5710
5711 return err;
5712}
5713
5714/**
5715 * i40e_config_rss - Prepare for RSS if used
5716 * @pf: board private structure
5717 **/
5718static int i40e_config_rss(struct i40e_pf *pf)
5719{
41c445ff
JB
5720 /* Set of random keys generated using kernel random number generator */
5721 static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
5722 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
5723 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
5724 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
4617e8c0
ASJ
5725 struct i40e_hw *hw = &pf->hw;
5726 u32 lut = 0;
5727 int i, j;
5728 u64 hena;
41c445ff
JB
5729
5730 /* Fill out hash function seed */
5731 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
5732 wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
5733
5734 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
5735 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
5736 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
12dc4fe3 5737 hena |= I40E_DEFAULT_RSS_HENA;
41c445ff
JB
5738 wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
5739 wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
5740
5741 /* Populate the LUT with max no. of queues in round robin fashion */
5742 for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
5743
5744 /* The assumption is that lan qp count will be the highest
5745 * qp count for any PF VSI that needs RSS.
5746 * If multiple VSIs need RSS support, all the qp counts
5747 * for those VSIs should be a power of 2 for RSS to work.
5748 * If LAN VSI is the only consumer for RSS then this requirement
5749 * is not necessary.
5750 */
5751 if (j == pf->rss_size)
5752 j = 0;
5753 /* lut = 4-byte sliding window of 4 lut entries */
5754 lut = (lut << 8) | (j &
5755 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
5756 /* On i = 3, we have 4 entries in lut; write to the register */
5757 if ((i & 3) == 3)
5758 wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
5759 }
5760 i40e_flush(hw);
5761
5762 return 0;
5763}
5764
f8ff1464
ASJ
5765/**
5766 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
5767 * @pf: board private structure
5768 * @queue_count: the requested queue count for rss.
5769 *
5770 * returns 0 if rss is not enabled, if enabled returns the final rss queue
5771 * count which may be different from the requested queue count.
5772 **/
5773int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
5774{
5775 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
5776 return 0;
5777
5778 queue_count = min_t(int, queue_count, pf->rss_size_max);
5779 queue_count = rounddown_pow_of_two(queue_count);
5780
5781 if (queue_count != pf->rss_size) {
5782 if (pf->queues_left < (queue_count - pf->rss_size)) {
5783 dev_info(&pf->pdev->dev,
5784 "Not enough queues to do RSS on %d queues: remaining queues %d\n",
5785 queue_count, pf->queues_left);
5786 return pf->rss_size;
5787 }
5788 i40e_prep_for_reset(pf);
5789
5790 pf->num_lan_qps += (queue_count - pf->rss_size);
5791 pf->queues_left -= (queue_count - pf->rss_size);
5792 pf->rss_size = queue_count;
5793
5794 i40e_reset_and_rebuild(pf, true);
5795 i40e_config_rss(pf);
5796 }
5797 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
5798 return pf->rss_size;
5799}
5800
41c445ff
JB
5801/**
5802 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
5803 * @pf: board private structure to initialize
5804 *
5805 * i40e_sw_init initializes the Adapter private data structure.
5806 * Fields are initialized based on PCI device information and
5807 * OS network device settings (MTU size).
5808 **/
5809static int i40e_sw_init(struct i40e_pf *pf)
5810{
5811 int err = 0;
5812 int size;
5813
5814 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
5815 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
2759997b 5816 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
41c445ff
JB
5817 if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
5818 if (I40E_DEBUG_USER & debug)
5819 pf->hw.debug_mask = debug;
5820 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
5821 I40E_DEFAULT_MSG_ENABLE);
5822 }
5823
5824 /* Set default capability flags */
5825 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
5826 I40E_FLAG_MSI_ENABLED |
5827 I40E_FLAG_MSIX_ENABLED |
5828 I40E_FLAG_RX_PS_ENABLED |
5829 I40E_FLAG_MQ_ENABLED |
5830 I40E_FLAG_RX_1BUF_ENABLED;
5831
7134f9ce
JB
5832 /* Depending on PF configurations, it is possible that the RSS
5833 * maximum might end up larger than the available queues
5834 */
41c445ff 5835 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
7134f9ce
JB
5836 pf->rss_size_max = min_t(int, pf->rss_size_max,
5837 pf->hw.func_caps.num_tx_qp);
41c445ff
JB
5838 if (pf->hw.func_caps.rss) {
5839 pf->flags |= I40E_FLAG_RSS_ENABLED;
bf051a3b 5840 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
41c445ff
JB
5841 } else {
5842 pf->rss_size = 1;
5843 }
5844
5845 if (pf->hw.func_caps.dcb)
5846 pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
5847 else
5848 pf->num_tc_qps = 0;
5849
5850 if (pf->hw.func_caps.fd) {
5851 /* FW/NVM is not yet fixed in this regard */
5852 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
5853 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
5854 pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
5855 dev_info(&pf->pdev->dev,
5856 "Flow Director ATR mode Enabled\n");
5857 pf->flags |= I40E_FLAG_FDIR_ENABLED;
5858 dev_info(&pf->pdev->dev,
5859 "Flow Director Side Band mode Enabled\n");
5860 pf->fdir_pf_filter_count =
5861 pf->hw.func_caps.fd_filters_guaranteed;
5862 }
5863 } else {
5864 pf->fdir_pf_filter_count = 0;
5865 }
5866
5867 if (pf->hw.func_caps.vmdq) {
5868 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
5869 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
5870 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
5871 }
5872
5873 /* MFP mode enabled */
5874 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
5875 pf->flags |= I40E_FLAG_MFP_ENABLED;
5876 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
5877 }
5878
5879#ifdef CONFIG_PCI_IOV
5880 if (pf->hw.func_caps.num_vfs) {
5881 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
5882 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
5883 pf->num_req_vfs = min_t(int,
5884 pf->hw.func_caps.num_vfs,
5885 I40E_MAX_VF_COUNT);
4a38d09c
ASJ
5886 dev_info(&pf->pdev->dev,
5887 "Number of VFs being requested for PF[%d] = %d\n",
5888 pf->hw.pf_id, pf->num_req_vfs);
41c445ff
JB
5889 }
5890#endif /* CONFIG_PCI_IOV */
5891 pf->eeprom_version = 0xDEAD;
5892 pf->lan_veb = I40E_NO_VEB;
5893 pf->lan_vsi = I40E_NO_VSI;
5894
5895 /* set up queue assignment tracking */
5896 size = sizeof(struct i40e_lump_tracking)
5897 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
5898 pf->qp_pile = kzalloc(size, GFP_KERNEL);
5899 if (!pf->qp_pile) {
5900 err = -ENOMEM;
5901 goto sw_init_done;
5902 }
5903 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
5904 pf->qp_pile->search_hint = 0;
5905
5906 /* set up vector assignment tracking */
5907 size = sizeof(struct i40e_lump_tracking)
5908 + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
5909 pf->irq_pile = kzalloc(size, GFP_KERNEL);
5910 if (!pf->irq_pile) {
5911 kfree(pf->qp_pile);
5912 err = -ENOMEM;
5913 goto sw_init_done;
5914 }
5915 pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
5916 pf->irq_pile->search_hint = 0;
5917
5918 mutex_init(&pf->switch_mutex);
5919
5920sw_init_done:
5921 return err;
5922}
5923
5924/**
5925 * i40e_set_features - set the netdev feature flags
5926 * @netdev: ptr to the netdev being adjusted
5927 * @features: the feature set that the stack is suggesting
5928 **/
5929static int i40e_set_features(struct net_device *netdev,
5930 netdev_features_t features)
5931{
5932 struct i40e_netdev_priv *np = netdev_priv(netdev);
5933 struct i40e_vsi *vsi = np->vsi;
5934
5935 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5936 i40e_vlan_stripping_enable(vsi);
5937 else
5938 i40e_vlan_stripping_disable(vsi);
5939
5940 return 0;
5941}
5942
a1c9a9d9
JK
5943#ifdef CONFIG_I40E_VXLAN
5944/**
5945 * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
5946 * @pf: board private structure
5947 * @port: The UDP port to look up
5948 *
5949 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
5950 **/
5951static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
5952{
5953 u8 i;
5954
5955 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
5956 if (pf->vxlan_ports[i] == port)
5957 return i;
5958 }
5959
5960 return i;
5961}
5962
5963/**
5964 * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
5965 * @netdev: This physical port's netdev
5966 * @sa_family: Socket Family that VXLAN is notifying us about
5967 * @port: New UDP port number that VXLAN started listening to
5968 **/
5969static void i40e_add_vxlan_port(struct net_device *netdev,
5970 sa_family_t sa_family, __be16 port)
5971{
5972 struct i40e_netdev_priv *np = netdev_priv(netdev);
5973 struct i40e_vsi *vsi = np->vsi;
5974 struct i40e_pf *pf = vsi->back;
5975 u8 next_idx;
5976 u8 idx;
5977
5978 if (sa_family == AF_INET6)
5979 return;
5980
5981 idx = i40e_get_vxlan_port_idx(pf, port);
5982
5983 /* Check if port already exists */
5984 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
5985 netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
5986 return;
5987 }
5988
5989 /* Now check if there is space to add the new port */
5990 next_idx = i40e_get_vxlan_port_idx(pf, 0);
5991
5992 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
5993 netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
5994 ntohs(port));
5995 return;
5996 }
5997
5998 /* New port: add it and mark its index in the bitmap */
5999 pf->vxlan_ports[next_idx] = port;
6000 pf->pending_vxlan_bitmap |= (1 << next_idx);
6001
6002 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6003}
6004
6005/**
6006 * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
6007 * @netdev: This physical port's netdev
6008 * @sa_family: Socket Family that VXLAN is notifying us about
6009 * @port: UDP port number that VXLAN stopped listening to
6010 **/
6011static void i40e_del_vxlan_port(struct net_device *netdev,
6012 sa_family_t sa_family, __be16 port)
6013{
6014 struct i40e_netdev_priv *np = netdev_priv(netdev);
6015 struct i40e_vsi *vsi = np->vsi;
6016 struct i40e_pf *pf = vsi->back;
6017 u8 idx;
6018
6019 if (sa_family == AF_INET6)
6020 return;
6021
6022 idx = i40e_get_vxlan_port_idx(pf, port);
6023
6024 /* Check if port already exists */
6025 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
6026 /* if port exists, set it to 0 (mark for deletion)
6027 * and make it pending
6028 */
6029 pf->vxlan_ports[idx] = 0;
6030
6031 pf->pending_vxlan_bitmap |= (1 << idx);
6032
6033 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
6034 } else {
6035 netdev_warn(netdev, "Port %d was not found, not deleting\n",
6036 ntohs(port));
6037 }
6038}
6039
6040#endif
41c445ff
JB
6041static const struct net_device_ops i40e_netdev_ops = {
6042 .ndo_open = i40e_open,
6043 .ndo_stop = i40e_close,
6044 .ndo_start_xmit = i40e_lan_xmit_frame,
6045 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
6046 .ndo_set_rx_mode = i40e_set_rx_mode,
6047 .ndo_validate_addr = eth_validate_addr,
6048 .ndo_set_mac_address = i40e_set_mac,
6049 .ndo_change_mtu = i40e_change_mtu,
6050 .ndo_tx_timeout = i40e_tx_timeout,
6051 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
6052 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
6053#ifdef CONFIG_NET_POLL_CONTROLLER
6054 .ndo_poll_controller = i40e_netpoll,
6055#endif
6056 .ndo_setup_tc = i40e_setup_tc,
6057 .ndo_set_features = i40e_set_features,
6058 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
6059 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6060 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
6061 .ndo_get_vf_config = i40e_ndo_get_vf_config,
a1c9a9d9
JK
6062#ifdef CONFIG_I40E_VXLAN
6063 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6064 .ndo_del_vxlan_port = i40e_del_vxlan_port,
6065#endif
41c445ff
JB
6066};
6067
6068/**
6069 * i40e_config_netdev - Setup the netdev flags
6070 * @vsi: the VSI being configured
6071 *
6072 * Returns 0 on success, negative value on failure
6073 **/
6074static int i40e_config_netdev(struct i40e_vsi *vsi)
6075{
6076 struct i40e_pf *pf = vsi->back;
6077 struct i40e_hw *hw = &pf->hw;
6078 struct i40e_netdev_priv *np;
6079 struct net_device *netdev;
6080 u8 mac_addr[ETH_ALEN];
6081 int etherdev_size;
6082
6083 etherdev_size = sizeof(struct i40e_netdev_priv);
f8ff1464 6084 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
41c445ff
JB
6085 if (!netdev)
6086 return -ENOMEM;
6087
6088 vsi->netdev = netdev;
6089 np = netdev_priv(netdev);
6090 np->vsi = vsi;
6091
6092 netdev->hw_enc_features = NETIF_F_IP_CSUM |
6093 NETIF_F_GSO_UDP_TUNNEL |
6094 NETIF_F_TSO |
6095 NETIF_F_SG;
6096
6097 netdev->features = NETIF_F_SG |
6098 NETIF_F_IP_CSUM |
6099 NETIF_F_SCTP_CSUM |
6100 NETIF_F_HIGHDMA |
6101 NETIF_F_GSO_UDP_TUNNEL |
6102 NETIF_F_HW_VLAN_CTAG_TX |
6103 NETIF_F_HW_VLAN_CTAG_RX |
6104 NETIF_F_HW_VLAN_CTAG_FILTER |
6105 NETIF_F_IPV6_CSUM |
6106 NETIF_F_TSO |
6107 NETIF_F_TSO6 |
6108 NETIF_F_RXCSUM |
6109 NETIF_F_RXHASH |
6110 0;
6111
6112 /* copy netdev features into list of user selectable features */
6113 netdev->hw_features |= netdev->features;
6114
6115 if (vsi->type == I40E_VSI_MAIN) {
6116 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
6117 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
6118 } else {
6119 /* relate the VSI_VMDQ name to the VSI_MAIN name */
6120 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
6121 pf->vsi[pf->lan_vsi]->netdev->name);
6122 random_ether_addr(mac_addr);
6123 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
6124 }
6125
6126 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
6127 memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
6128 /* vlan gets same features (except vlan offload)
6129 * after any tweaks for specific VSI types
6130 */
6131 netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
6132 NETIF_F_HW_VLAN_CTAG_RX |
6133 NETIF_F_HW_VLAN_CTAG_FILTER);
6134 netdev->priv_flags |= IFF_UNICAST_FLT;
6135 netdev->priv_flags |= IFF_SUPP_NOFCS;
6136 /* Setup netdev TC information */
6137 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
6138
6139 netdev->netdev_ops = &i40e_netdev_ops;
6140 netdev->watchdog_timeo = 5 * HZ;
6141 i40e_set_ethtool_ops(netdev);
6142
6143 return 0;
6144}
6145
6146/**
6147 * i40e_vsi_delete - Delete a VSI from the switch
6148 * @vsi: the VSI being removed
6149 *
6150 * Returns 0 on success, negative value on failure
6151 **/
6152static void i40e_vsi_delete(struct i40e_vsi *vsi)
6153{
6154 /* remove default VSI is not allowed */
6155 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
6156 return;
6157
6158 /* there is no HW VSI for FDIR */
6159 if (vsi->type == I40E_VSI_FDIR)
6160 return;
6161
6162 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
6163 return;
6164}
6165
6166/**
6167 * i40e_add_vsi - Add a VSI to the switch
6168 * @vsi: the VSI being configured
6169 *
6170 * This initializes a VSI context depending on the VSI type to be added and
6171 * passes it down to the add_vsi aq command.
6172 **/
6173static int i40e_add_vsi(struct i40e_vsi *vsi)
6174{
6175 int ret = -ENODEV;
6176 struct i40e_mac_filter *f, *ftmp;
6177 struct i40e_pf *pf = vsi->back;
6178 struct i40e_hw *hw = &pf->hw;
6179 struct i40e_vsi_context ctxt;
6180 u8 enabled_tc = 0x1; /* TC0 enabled */
6181 int f_count = 0;
6182
6183 memset(&ctxt, 0, sizeof(ctxt));
6184 switch (vsi->type) {
6185 case I40E_VSI_MAIN:
6186 /* The PF's main VSI is already setup as part of the
6187 * device initialization, so we'll not bother with
6188 * the add_vsi call, but we will retrieve the current
6189 * VSI context.
6190 */
6191 ctxt.seid = pf->main_vsi_seid;
6192 ctxt.pf_num = pf->hw.pf_id;
6193 ctxt.vf_num = 0;
6194 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6195 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6196 if (ret) {
6197 dev_info(&pf->pdev->dev,
6198 "couldn't get pf vsi config, err %d, aq_err %d\n",
6199 ret, pf->hw.aq.asq_last_status);
6200 return -ENOENT;
6201 }
6202 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6203 vsi->info.valid_sections = 0;
6204
6205 vsi->seid = ctxt.seid;
6206 vsi->id = ctxt.vsi_number;
6207
6208 enabled_tc = i40e_pf_get_tc_map(pf);
6209
6210 /* MFP mode setup queue map and update VSI */
6211 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6212 memset(&ctxt, 0, sizeof(ctxt));
6213 ctxt.seid = pf->main_vsi_seid;
6214 ctxt.pf_num = pf->hw.pf_id;
6215 ctxt.vf_num = 0;
6216 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
6217 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
6218 if (ret) {
6219 dev_info(&pf->pdev->dev,
6220 "update vsi failed, aq_err=%d\n",
6221 pf->hw.aq.asq_last_status);
6222 ret = -ENOENT;
6223 goto err;
6224 }
6225 /* update the local VSI info queue map */
6226 i40e_vsi_update_queue_map(vsi, &ctxt);
6227 vsi->info.valid_sections = 0;
6228 } else {
6229 /* Default/Main VSI is only enabled for TC0
6230 * reconfigure it to enable all TCs that are
6231 * available on the port in SFP mode.
6232 */
6233 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6234 if (ret) {
6235 dev_info(&pf->pdev->dev,
6236 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
6237 enabled_tc, ret,
6238 pf->hw.aq.asq_last_status);
6239 ret = -ENOENT;
6240 }
6241 }
6242 break;
6243
6244 case I40E_VSI_FDIR:
6245 /* no queue mapping or actual HW VSI needed */
6246 vsi->info.valid_sections = 0;
6247 vsi->seid = 0;
6248 vsi->id = 0;
6249 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6250 return 0;
6251 break;
6252
6253 case I40E_VSI_VMDQ2:
6254 ctxt.pf_num = hw->pf_id;
6255 ctxt.vf_num = 0;
6256 ctxt.uplink_seid = vsi->uplink_seid;
6257 ctxt.connection_type = 0x1; /* regular data port */
6258 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6259
6260 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6261
6262 /* This VSI is connected to VEB so the switch_id
6263 * should be set to zero by default.
6264 */
6265 ctxt.info.switch_id = 0;
6266 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
6267 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6268
6269 /* Setup the VSI tx/rx queue map for TC0 only for now */
6270 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6271 break;
6272
6273 case I40E_VSI_SRIOV:
6274 ctxt.pf_num = hw->pf_id;
6275 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
6276 ctxt.uplink_seid = vsi->uplink_seid;
6277 ctxt.connection_type = 0x1; /* regular data port */
6278 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
6279
6280 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6281
6282 /* This VSI is connected to VEB so the switch_id
6283 * should be set to zero by default.
6284 */
6285 ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6286
6287 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
6288 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
6289 /* Setup the VSI tx/rx queue map for TC0 only for now */
6290 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
6291 break;
6292
6293 default:
6294 return -ENODEV;
6295 }
6296
6297 if (vsi->type != I40E_VSI_MAIN) {
6298 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
6299 if (ret) {
6300 dev_info(&vsi->back->pdev->dev,
6301 "add vsi failed, aq_err=%d\n",
6302 vsi->back->hw.aq.asq_last_status);
6303 ret = -ENOENT;
6304 goto err;
6305 }
6306 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
6307 vsi->info.valid_sections = 0;
6308 vsi->seid = ctxt.seid;
6309 vsi->id = ctxt.vsi_number;
6310 }
6311
6312 /* If macvlan filters already exist, force them to get loaded */
6313 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
6314 f->changed = true;
6315 f_count++;
6316 }
6317 if (f_count) {
6318 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
6319 pf->flags |= I40E_FLAG_FILTER_SYNC;
6320 }
6321
6322 /* Update VSI BW information */
6323 ret = i40e_vsi_get_bw_info(vsi);
6324 if (ret) {
6325 dev_info(&pf->pdev->dev,
6326 "couldn't get vsi bw info, err %d, aq_err %d\n",
6327 ret, pf->hw.aq.asq_last_status);
6328 /* VSI is already added so not tearing that up */
6329 ret = 0;
6330 }
6331
6332err:
6333 return ret;
6334}
6335
6336/**
6337 * i40e_vsi_release - Delete a VSI and free its resources
6338 * @vsi: the VSI being removed
6339 *
6340 * Returns 0 on success or < 0 on error
6341 **/
6342int i40e_vsi_release(struct i40e_vsi *vsi)
6343{
6344 struct i40e_mac_filter *f, *ftmp;
6345 struct i40e_veb *veb = NULL;
6346 struct i40e_pf *pf;
6347 u16 uplink_seid;
6348 int i, n;
6349
6350 pf = vsi->back;
6351
6352 /* release of a VEB-owner or last VSI is not allowed */
6353 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
6354 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
6355 vsi->seid, vsi->uplink_seid);
6356 return -ENODEV;
6357 }
6358 if (vsi == pf->vsi[pf->lan_vsi] &&
6359 !test_bit(__I40E_DOWN, &pf->state)) {
6360 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
6361 return -ENODEV;
6362 }
6363
6364 uplink_seid = vsi->uplink_seid;
6365 if (vsi->type != I40E_VSI_SRIOV) {
6366 if (vsi->netdev_registered) {
6367 vsi->netdev_registered = false;
6368 if (vsi->netdev) {
6369 /* results in a call to i40e_close() */
6370 unregister_netdev(vsi->netdev);
6371 free_netdev(vsi->netdev);
6372 vsi->netdev = NULL;
6373 }
6374 } else {
6375 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
6376 i40e_down(vsi);
6377 i40e_vsi_free_irq(vsi);
6378 i40e_vsi_free_tx_resources(vsi);
6379 i40e_vsi_free_rx_resources(vsi);
6380 }
6381 i40e_vsi_disable_irq(vsi);
6382 }
6383
6384 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
6385 i40e_del_filter(vsi, f->macaddr, f->vlan,
6386 f->is_vf, f->is_netdev);
6387 i40e_sync_vsi_filters(vsi);
6388
6389 i40e_vsi_delete(vsi);
6390 i40e_vsi_free_q_vectors(vsi);
6391 i40e_vsi_clear_rings(vsi);
6392 i40e_vsi_clear(vsi);
6393
6394 /* If this was the last thing on the VEB, except for the
6395 * controlling VSI, remove the VEB, which puts the controlling
6396 * VSI onto the next level down in the switch.
6397 *
6398 * Well, okay, there's one more exception here: don't remove
6399 * the orphan VEBs yet. We'll wait for an explicit remove request
6400 * from up the network stack.
6401 */
6402 for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6403 if (pf->vsi[i] &&
6404 pf->vsi[i]->uplink_seid == uplink_seid &&
6405 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6406 n++; /* count the VSIs */
6407 }
6408 }
6409 for (i = 0; i < I40E_MAX_VEB; i++) {
6410 if (!pf->veb[i])
6411 continue;
6412 if (pf->veb[i]->uplink_seid == uplink_seid)
6413 n++; /* count the VEBs */
6414 if (pf->veb[i]->seid == uplink_seid)
6415 veb = pf->veb[i];
6416 }
6417 if (n == 0 && veb && veb->uplink_seid != 0)
6418 i40e_veb_release(veb);
6419
6420 return 0;
6421}
6422
6423/**
6424 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
6425 * @vsi: ptr to the VSI
6426 *
6427 * This should only be called after i40e_vsi_mem_alloc() which allocates the
6428 * corresponding SW VSI structure and initializes num_queue_pairs for the
6429 * newly allocated VSI.
6430 *
6431 * Returns 0 on success or negative on failure
6432 **/
6433static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6434{
6435 int ret = -ENOENT;
6436 struct i40e_pf *pf = vsi->back;
6437
493fb300 6438 if (vsi->q_vectors[0]) {
41c445ff
JB
6439 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
6440 vsi->seid);
6441 return -EEXIST;
6442 }
6443
6444 if (vsi->base_vector) {
6445 dev_info(&pf->pdev->dev,
6446 "VSI %d has non-zero base vector %d\n",
6447 vsi->seid, vsi->base_vector);
6448 return -EEXIST;
6449 }
6450
6451 ret = i40e_alloc_q_vectors(vsi);
6452 if (ret) {
6453 dev_info(&pf->pdev->dev,
6454 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
6455 vsi->num_q_vectors, vsi->seid, ret);
6456 vsi->num_q_vectors = 0;
6457 goto vector_setup_out;
6458 }
6459
958a3e3b
SN
6460 if (vsi->num_q_vectors)
6461 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
6462 vsi->num_q_vectors, vsi->idx);
41c445ff
JB
6463 if (vsi->base_vector < 0) {
6464 dev_info(&pf->pdev->dev,
6465 "failed to get q tracking for VSI %d, err=%d\n",
6466 vsi->seid, vsi->base_vector);
6467 i40e_vsi_free_q_vectors(vsi);
6468 ret = -ENOENT;
6469 goto vector_setup_out;
6470 }
6471
6472vector_setup_out:
6473 return ret;
6474}
6475
bc7d338f
ASJ
6476/**
6477 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
6478 * @vsi: pointer to the vsi.
6479 *
6480 * This re-allocates a vsi's queue resources.
6481 *
6482 * Returns pointer to the successfully allocated and configured VSI sw struct
6483 * on success, otherwise returns NULL on failure.
6484 **/
6485static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
6486{
6487 struct i40e_pf *pf = vsi->back;
6488 u8 enabled_tc;
6489 int ret;
6490
6491 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
6492 i40e_vsi_clear_rings(vsi);
6493
6494 i40e_vsi_free_arrays(vsi, false);
6495 i40e_set_num_rings_in_vsi(vsi);
6496 ret = i40e_vsi_alloc_arrays(vsi, false);
6497 if (ret)
6498 goto err_vsi;
6499
6500 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6501 if (ret < 0) {
6502 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6503 vsi->seid, ret);
6504 goto err_vsi;
6505 }
6506 vsi->base_queue = ret;
6507
6508 /* Update the FW view of the VSI. Force a reset of TC and queue
6509 * layout configurations.
6510 */
6511 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
6512 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
6513 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
6514 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
6515
6516 /* assign it some queues */
6517 ret = i40e_alloc_rings(vsi);
6518 if (ret)
6519 goto err_rings;
6520
6521 /* map all of the rings to the q_vectors */
6522 i40e_vsi_map_rings_to_vectors(vsi);
6523 return vsi;
6524
6525err_rings:
6526 i40e_vsi_free_q_vectors(vsi);
6527 if (vsi->netdev_registered) {
6528 vsi->netdev_registered = false;
6529 unregister_netdev(vsi->netdev);
6530 free_netdev(vsi->netdev);
6531 vsi->netdev = NULL;
6532 }
6533 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6534err_vsi:
6535 i40e_vsi_clear(vsi);
6536 return NULL;
6537}
6538
41c445ff
JB
6539/**
6540 * i40e_vsi_setup - Set up a VSI by a given type
6541 * @pf: board private structure
6542 * @type: VSI type
6543 * @uplink_seid: the switch element to link to
6544 * @param1: usage depends upon VSI type. For VF types, indicates VF id
6545 *
6546 * This allocates the sw VSI structure and its queue resources, then add a VSI
6547 * to the identified VEB.
6548 *
6549 * Returns pointer to the successfully allocated and configure VSI sw struct on
6550 * success, otherwise returns NULL on failure.
6551 **/
6552struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
6553 u16 uplink_seid, u32 param1)
6554{
6555 struct i40e_vsi *vsi = NULL;
6556 struct i40e_veb *veb = NULL;
6557 int ret, i;
6558 int v_idx;
6559
6560 /* The requested uplink_seid must be either
6561 * - the PF's port seid
6562 * no VEB is needed because this is the PF
6563 * or this is a Flow Director special case VSI
6564 * - seid of an existing VEB
6565 * - seid of a VSI that owns an existing VEB
6566 * - seid of a VSI that doesn't own a VEB
6567 * a new VEB is created and the VSI becomes the owner
6568 * - seid of the PF VSI, which is what creates the first VEB
6569 * this is a special case of the previous
6570 *
6571 * Find which uplink_seid we were given and create a new VEB if needed
6572 */
6573 for (i = 0; i < I40E_MAX_VEB; i++) {
6574 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
6575 veb = pf->veb[i];
6576 break;
6577 }
6578 }
6579
6580 if (!veb && uplink_seid != pf->mac_seid) {
6581
6582 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6583 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
6584 vsi = pf->vsi[i];
6585 break;
6586 }
6587 }
6588 if (!vsi) {
6589 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
6590 uplink_seid);
6591 return NULL;
6592 }
6593
6594 if (vsi->uplink_seid == pf->mac_seid)
6595 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
6596 vsi->tc_config.enabled_tc);
6597 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
6598 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
6599 vsi->tc_config.enabled_tc);
6600
6601 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
6602 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
6603 veb = pf->veb[i];
6604 }
6605 if (!veb) {
6606 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
6607 return NULL;
6608 }
6609
6610 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6611 uplink_seid = veb->seid;
6612 }
6613
6614 /* get vsi sw struct */
6615 v_idx = i40e_vsi_mem_alloc(pf, type);
6616 if (v_idx < 0)
6617 goto err_alloc;
6618 vsi = pf->vsi[v_idx];
6619 vsi->type = type;
6620 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
6621
6622 if (type == I40E_VSI_MAIN)
6623 pf->lan_vsi = v_idx;
6624 else if (type == I40E_VSI_SRIOV)
6625 vsi->vf_id = param1;
6626 /* assign it some queues */
6627 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
6628 if (ret < 0) {
6629 dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
6630 vsi->seid, ret);
6631 goto err_vsi;
6632 }
6633 vsi->base_queue = ret;
6634
6635 /* get a VSI from the hardware */
6636 vsi->uplink_seid = uplink_seid;
6637 ret = i40e_add_vsi(vsi);
6638 if (ret)
6639 goto err_vsi;
6640
6641 switch (vsi->type) {
6642 /* setup the netdev if needed */
6643 case I40E_VSI_MAIN:
6644 case I40E_VSI_VMDQ2:
6645 ret = i40e_config_netdev(vsi);
6646 if (ret)
6647 goto err_netdev;
6648 ret = register_netdev(vsi->netdev);
6649 if (ret)
6650 goto err_netdev;
6651 vsi->netdev_registered = true;
6652 netif_carrier_off(vsi->netdev);
6653 /* fall through */
6654
6655 case I40E_VSI_FDIR:
6656 /* set up vectors and rings if needed */
6657 ret = i40e_vsi_setup_vectors(vsi);
6658 if (ret)
6659 goto err_msix;
6660
6661 ret = i40e_alloc_rings(vsi);
6662 if (ret)
6663 goto err_rings;
6664
6665 /* map all of the rings to the q_vectors */
6666 i40e_vsi_map_rings_to_vectors(vsi);
6667
6668 i40e_vsi_reset_stats(vsi);
6669 break;
6670
6671 default:
6672 /* no netdev or rings for the other VSI types */
6673 break;
6674 }
6675
6676 return vsi;
6677
6678err_rings:
6679 i40e_vsi_free_q_vectors(vsi);
6680err_msix:
6681 if (vsi->netdev_registered) {
6682 vsi->netdev_registered = false;
6683 unregister_netdev(vsi->netdev);
6684 free_netdev(vsi->netdev);
6685 vsi->netdev = NULL;
6686 }
6687err_netdev:
6688 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
6689err_vsi:
6690 i40e_vsi_clear(vsi);
6691err_alloc:
6692 return NULL;
6693}
6694
6695/**
6696 * i40e_veb_get_bw_info - Query VEB BW information
6697 * @veb: the veb to query
6698 *
6699 * Query the Tx scheduler BW configuration data for given VEB
6700 **/
6701static int i40e_veb_get_bw_info(struct i40e_veb *veb)
6702{
6703 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
6704 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
6705 struct i40e_pf *pf = veb->pf;
6706 struct i40e_hw *hw = &pf->hw;
6707 u32 tc_bw_max;
6708 int ret = 0;
6709 int i;
6710
6711 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
6712 &bw_data, NULL);
6713 if (ret) {
6714 dev_info(&pf->pdev->dev,
6715 "query veb bw config failed, aq_err=%d\n",
6716 hw->aq.asq_last_status);
6717 goto out;
6718 }
6719
6720 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
6721 &ets_data, NULL);
6722 if (ret) {
6723 dev_info(&pf->pdev->dev,
6724 "query veb bw ets config failed, aq_err=%d\n",
6725 hw->aq.asq_last_status);
6726 goto out;
6727 }
6728
6729 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
6730 veb->bw_max_quanta = ets_data.tc_bw_max;
6731 veb->is_abs_credits = bw_data.absolute_credits_enable;
6732 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
6733 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
6734 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6735 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
6736 veb->bw_tc_limit_credits[i] =
6737 le16_to_cpu(bw_data.tc_bw_limits[i]);
6738 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
6739 }
6740
6741out:
6742 return ret;
6743}
6744
6745/**
6746 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
6747 * @pf: board private structure
6748 *
6749 * On error: returns error code (negative)
6750 * On success: returns vsi index in PF (positive)
6751 **/
6752static int i40e_veb_mem_alloc(struct i40e_pf *pf)
6753{
6754 int ret = -ENOENT;
6755 struct i40e_veb *veb;
6756 int i;
6757
6758 /* Need to protect the allocation of switch elements at the PF level */
6759 mutex_lock(&pf->switch_mutex);
6760
6761 /* VEB list may be fragmented if VEB creation/destruction has
6762 * been happening. We can afford to do a quick scan to look
6763 * for any free slots in the list.
6764 *
6765 * find next empty veb slot, looping back around if necessary
6766 */
6767 i = 0;
6768 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
6769 i++;
6770 if (i >= I40E_MAX_VEB) {
6771 ret = -ENOMEM;
6772 goto err_alloc_veb; /* out of VEB slots! */
6773 }
6774
6775 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
6776 if (!veb) {
6777 ret = -ENOMEM;
6778 goto err_alloc_veb;
6779 }
6780 veb->pf = pf;
6781 veb->idx = i;
6782 veb->enabled_tc = 1;
6783
6784 pf->veb[i] = veb;
6785 ret = i;
6786err_alloc_veb:
6787 mutex_unlock(&pf->switch_mutex);
6788 return ret;
6789}
6790
6791/**
6792 * i40e_switch_branch_release - Delete a branch of the switch tree
6793 * @branch: where to start deleting
6794 *
6795 * This uses recursion to find the tips of the branch to be
6796 * removed, deleting until we get back to and can delete this VEB.
6797 **/
6798static void i40e_switch_branch_release(struct i40e_veb *branch)
6799{
6800 struct i40e_pf *pf = branch->pf;
6801 u16 branch_seid = branch->seid;
6802 u16 veb_idx = branch->idx;
6803 int i;
6804
6805 /* release any VEBs on this VEB - RECURSION */
6806 for (i = 0; i < I40E_MAX_VEB; i++) {
6807 if (!pf->veb[i])
6808 continue;
6809 if (pf->veb[i]->uplink_seid == branch->seid)
6810 i40e_switch_branch_release(pf->veb[i]);
6811 }
6812
6813 /* Release the VSIs on this VEB, but not the owner VSI.
6814 *
6815 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
6816 * the VEB itself, so don't use (*branch) after this loop.
6817 */
6818 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6819 if (!pf->vsi[i])
6820 continue;
6821 if (pf->vsi[i]->uplink_seid == branch_seid &&
6822 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
6823 i40e_vsi_release(pf->vsi[i]);
6824 }
6825 }
6826
6827 /* There's one corner case where the VEB might not have been
6828 * removed, so double check it here and remove it if needed.
6829 * This case happens if the veb was created from the debugfs
6830 * commands and no VSIs were added to it.
6831 */
6832 if (pf->veb[veb_idx])
6833 i40e_veb_release(pf->veb[veb_idx]);
6834}
6835
6836/**
6837 * i40e_veb_clear - remove veb struct
6838 * @veb: the veb to remove
6839 **/
6840static void i40e_veb_clear(struct i40e_veb *veb)
6841{
6842 if (!veb)
6843 return;
6844
6845 if (veb->pf) {
6846 struct i40e_pf *pf = veb->pf;
6847
6848 mutex_lock(&pf->switch_mutex);
6849 if (pf->veb[veb->idx] == veb)
6850 pf->veb[veb->idx] = NULL;
6851 mutex_unlock(&pf->switch_mutex);
6852 }
6853
6854 kfree(veb);
6855}
6856
6857/**
6858 * i40e_veb_release - Delete a VEB and free its resources
6859 * @veb: the VEB being removed
6860 **/
6861void i40e_veb_release(struct i40e_veb *veb)
6862{
6863 struct i40e_vsi *vsi = NULL;
6864 struct i40e_pf *pf;
6865 int i, n = 0;
6866
6867 pf = veb->pf;
6868
6869 /* find the remaining VSI and check for extras */
6870 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
6871 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
6872 n++;
6873 vsi = pf->vsi[i];
6874 }
6875 }
6876 if (n != 1) {
6877 dev_info(&pf->pdev->dev,
6878 "can't remove VEB %d with %d VSIs left\n",
6879 veb->seid, n);
6880 return;
6881 }
6882
6883 /* move the remaining VSI to uplink veb */
6884 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
6885 if (veb->uplink_seid) {
6886 vsi->uplink_seid = veb->uplink_seid;
6887 if (veb->uplink_seid == pf->mac_seid)
6888 vsi->veb_idx = I40E_NO_VEB;
6889 else
6890 vsi->veb_idx = veb->veb_idx;
6891 } else {
6892 /* floating VEB */
6893 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6894 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
6895 }
6896
6897 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
6898 i40e_veb_clear(veb);
6899
6900 return;
6901}
6902
6903/**
6904 * i40e_add_veb - create the VEB in the switch
6905 * @veb: the VEB to be instantiated
6906 * @vsi: the controlling VSI
6907 **/
6908static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
6909{
6910 bool is_default = (vsi->idx == vsi->back->lan_vsi);
e1c51b95 6911 bool is_cloud = false;
41c445ff
JB
6912 int ret;
6913
6914 /* get a VEB from the hardware */
6915 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
e1c51b95
KS
6916 veb->enabled_tc, is_default,
6917 is_cloud, &veb->seid, NULL);
41c445ff
JB
6918 if (ret) {
6919 dev_info(&veb->pf->pdev->dev,
6920 "couldn't add VEB, err %d, aq_err %d\n",
6921 ret, veb->pf->hw.aq.asq_last_status);
6922 return -EPERM;
6923 }
6924
6925 /* get statistics counter */
6926 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
6927 &veb->stats_idx, NULL, NULL, NULL);
6928 if (ret) {
6929 dev_info(&veb->pf->pdev->dev,
6930 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
6931 ret, veb->pf->hw.aq.asq_last_status);
6932 return -EPERM;
6933 }
6934 ret = i40e_veb_get_bw_info(veb);
6935 if (ret) {
6936 dev_info(&veb->pf->pdev->dev,
6937 "couldn't get VEB bw info, err %d, aq_err %d\n",
6938 ret, veb->pf->hw.aq.asq_last_status);
6939 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
6940 return -ENOENT;
6941 }
6942
6943 vsi->uplink_seid = veb->seid;
6944 vsi->veb_idx = veb->idx;
6945 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
6946
6947 return 0;
6948}
6949
6950/**
6951 * i40e_veb_setup - Set up a VEB
6952 * @pf: board private structure
6953 * @flags: VEB setup flags
6954 * @uplink_seid: the switch element to link to
6955 * @vsi_seid: the initial VSI seid
6956 * @enabled_tc: Enabled TC bit-map
6957 *
6958 * This allocates the sw VEB structure and links it into the switch
6959 * It is possible and legal for this to be a duplicate of an already
6960 * existing VEB. It is also possible for both uplink and vsi seids
6961 * to be zero, in order to create a floating VEB.
6962 *
6963 * Returns pointer to the successfully allocated VEB sw struct on
6964 * success, otherwise returns NULL on failure.
6965 **/
6966struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
6967 u16 uplink_seid, u16 vsi_seid,
6968 u8 enabled_tc)
6969{
6970 struct i40e_veb *veb, *uplink_veb = NULL;
6971 int vsi_idx, veb_idx;
6972 int ret;
6973
6974 /* if one seid is 0, the other must be 0 to create a floating relay */
6975 if ((uplink_seid == 0 || vsi_seid == 0) &&
6976 (uplink_seid + vsi_seid != 0)) {
6977 dev_info(&pf->pdev->dev,
6978 "one, not both seid's are 0: uplink=%d vsi=%d\n",
6979 uplink_seid, vsi_seid);
6980 return NULL;
6981 }
6982
6983 /* make sure there is such a vsi and uplink */
6984 for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
6985 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
6986 break;
6987 if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
6988 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
6989 vsi_seid);
6990 return NULL;
6991 }
6992
6993 if (uplink_seid && uplink_seid != pf->mac_seid) {
6994 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6995 if (pf->veb[veb_idx] &&
6996 pf->veb[veb_idx]->seid == uplink_seid) {
6997 uplink_veb = pf->veb[veb_idx];
6998 break;
6999 }
7000 }
7001 if (!uplink_veb) {
7002 dev_info(&pf->pdev->dev,
7003 "uplink seid %d not found\n", uplink_seid);
7004 return NULL;
7005 }
7006 }
7007
7008 /* get veb sw struct */
7009 veb_idx = i40e_veb_mem_alloc(pf);
7010 if (veb_idx < 0)
7011 goto err_alloc;
7012 veb = pf->veb[veb_idx];
7013 veb->flags = flags;
7014 veb->uplink_seid = uplink_seid;
7015 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
7016 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
7017
7018 /* create the VEB in the switch */
7019 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
7020 if (ret)
7021 goto err_veb;
7022
7023 return veb;
7024
7025err_veb:
7026 i40e_veb_clear(veb);
7027err_alloc:
7028 return NULL;
7029}
7030
7031/**
7032 * i40e_setup_pf_switch_element - set pf vars based on switch type
7033 * @pf: board private structure
7034 * @ele: element we are building info from
7035 * @num_reported: total number of elements
7036 * @printconfig: should we print the contents
7037 *
7038 * helper function to assist in extracting a few useful SEID values.
7039 **/
7040static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
7041 struct i40e_aqc_switch_config_element_resp *ele,
7042 u16 num_reported, bool printconfig)
7043{
7044 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
7045 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
7046 u8 element_type = ele->element_type;
7047 u16 seid = le16_to_cpu(ele->seid);
7048
7049 if (printconfig)
7050 dev_info(&pf->pdev->dev,
7051 "type=%d seid=%d uplink=%d downlink=%d\n",
7052 element_type, seid, uplink_seid, downlink_seid);
7053
7054 switch (element_type) {
7055 case I40E_SWITCH_ELEMENT_TYPE_MAC:
7056 pf->mac_seid = seid;
7057 break;
7058 case I40E_SWITCH_ELEMENT_TYPE_VEB:
7059 /* Main VEB? */
7060 if (uplink_seid != pf->mac_seid)
7061 break;
7062 if (pf->lan_veb == I40E_NO_VEB) {
7063 int v;
7064
7065 /* find existing or else empty VEB */
7066 for (v = 0; v < I40E_MAX_VEB; v++) {
7067 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
7068 pf->lan_veb = v;
7069 break;
7070 }
7071 }
7072 if (pf->lan_veb == I40E_NO_VEB) {
7073 v = i40e_veb_mem_alloc(pf);
7074 if (v < 0)
7075 break;
7076 pf->lan_veb = v;
7077 }
7078 }
7079
7080 pf->veb[pf->lan_veb]->seid = seid;
7081 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
7082 pf->veb[pf->lan_veb]->pf = pf;
7083 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
7084 break;
7085 case I40E_SWITCH_ELEMENT_TYPE_VSI:
7086 if (num_reported != 1)
7087 break;
7088 /* This is immediately after a reset so we can assume this is
7089 * the PF's VSI
7090 */
7091 pf->mac_seid = uplink_seid;
7092 pf->pf_seid = downlink_seid;
7093 pf->main_vsi_seid = seid;
7094 if (printconfig)
7095 dev_info(&pf->pdev->dev,
7096 "pf_seid=%d main_vsi_seid=%d\n",
7097 pf->pf_seid, pf->main_vsi_seid);
7098 break;
7099 case I40E_SWITCH_ELEMENT_TYPE_PF:
7100 case I40E_SWITCH_ELEMENT_TYPE_VF:
7101 case I40E_SWITCH_ELEMENT_TYPE_EMP:
7102 case I40E_SWITCH_ELEMENT_TYPE_BMC:
7103 case I40E_SWITCH_ELEMENT_TYPE_PE:
7104 case I40E_SWITCH_ELEMENT_TYPE_PA:
7105 /* ignore these for now */
7106 break;
7107 default:
7108 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
7109 element_type, seid);
7110 break;
7111 }
7112}
7113
7114/**
7115 * i40e_fetch_switch_configuration - Get switch config from firmware
7116 * @pf: board private structure
7117 * @printconfig: should we print the contents
7118 *
7119 * Get the current switch configuration from the device and
7120 * extract a few useful SEID values.
7121 **/
7122int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
7123{
7124 struct i40e_aqc_get_switch_config_resp *sw_config;
7125 u16 next_seid = 0;
7126 int ret = 0;
7127 u8 *aq_buf;
7128 int i;
7129
7130 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
7131 if (!aq_buf)
7132 return -ENOMEM;
7133
7134 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
7135 do {
7136 u16 num_reported, num_total;
7137
7138 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
7139 I40E_AQ_LARGE_BUF,
7140 &next_seid, NULL);
7141 if (ret) {
7142 dev_info(&pf->pdev->dev,
7143 "get switch config failed %d aq_err=%x\n",
7144 ret, pf->hw.aq.asq_last_status);
7145 kfree(aq_buf);
7146 return -ENOENT;
7147 }
7148
7149 num_reported = le16_to_cpu(sw_config->header.num_reported);
7150 num_total = le16_to_cpu(sw_config->header.num_total);
7151
7152 if (printconfig)
7153 dev_info(&pf->pdev->dev,
7154 "header: %d reported %d total\n",
7155 num_reported, num_total);
7156
7157 if (num_reported) {
7158 int sz = sizeof(*sw_config) * num_reported;
7159
7160 kfree(pf->sw_config);
7161 pf->sw_config = kzalloc(sz, GFP_KERNEL);
7162 if (pf->sw_config)
7163 memcpy(pf->sw_config, sw_config, sz);
7164 }
7165
7166 for (i = 0; i < num_reported; i++) {
7167 struct i40e_aqc_switch_config_element_resp *ele =
7168 &sw_config->element[i];
7169
7170 i40e_setup_pf_switch_element(pf, ele, num_reported,
7171 printconfig);
7172 }
7173 } while (next_seid != 0);
7174
7175 kfree(aq_buf);
7176 return ret;
7177}
7178
7179/**
7180 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
7181 * @pf: board private structure
bc7d338f 7182 * @reinit: if the Main VSI needs to re-initialized.
41c445ff
JB
7183 *
7184 * Returns 0 on success, negative value on failure
7185 **/
bc7d338f 7186static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
41c445ff 7187{
895106a5 7188 u32 rxfc = 0, txfc = 0, rxfc_reg;
41c445ff
JB
7189 int ret;
7190
7191 /* find out what's out there already */
7192 ret = i40e_fetch_switch_configuration(pf, false);
7193 if (ret) {
7194 dev_info(&pf->pdev->dev,
7195 "couldn't fetch switch config, err %d, aq_err %d\n",
7196 ret, pf->hw.aq.asq_last_status);
7197 return ret;
7198 }
7199 i40e_pf_reset_stats(pf);
7200
7201 /* fdir VSI must happen first to be sure it gets queue 0, but only
7202 * if there is enough room for the fdir VSI
7203 */
7204 if (pf->num_lan_qps > 1)
7205 i40e_fdir_setup(pf);
7206
7207 /* first time setup */
bc7d338f 7208 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
41c445ff
JB
7209 struct i40e_vsi *vsi = NULL;
7210 u16 uplink_seid;
7211
7212 /* Set up the PF VSI associated with the PF's main VSI
7213 * that is already in the HW switch
7214 */
7215 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
7216 uplink_seid = pf->veb[pf->lan_veb]->seid;
7217 else
7218 uplink_seid = pf->mac_seid;
bc7d338f
ASJ
7219 if (pf->lan_vsi == I40E_NO_VSI)
7220 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
7221 else if (reinit)
7222 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
41c445ff
JB
7223 if (!vsi) {
7224 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
7225 i40e_fdir_teardown(pf);
7226 return -EAGAIN;
7227 }
7228 /* accommodate kcompat by copying the main VSI queue count
7229 * into the pf, since this newer code pushes the pf queue
7230 * info down a level into a VSI
7231 */
ac6c5e3d
SN
7232 pf->num_rx_queues = vsi->num_queue_pairs;
7233 pf->num_tx_queues = vsi->num_queue_pairs;
41c445ff
JB
7234 } else {
7235 /* force a reset of TC and queue layout configurations */
7236 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
7237 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
7238 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
7239 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
7240 }
7241 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
7242
7243 /* Setup static PF queue filter control settings */
7244 ret = i40e_setup_pf_filter_control(pf);
7245 if (ret) {
7246 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
7247 ret);
7248 /* Failure here should not stop continuing other steps */
7249 }
7250
7251 /* enable RSS in the HW, even for only one queue, as the stack can use
7252 * the hash
7253 */
7254 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
7255 i40e_config_rss(pf);
7256
7257 /* fill in link information and enable LSE reporting */
7258 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
7259 i40e_link_event(pf);
7260
d52c20b7 7261 /* Initialize user-specific link properties */
41c445ff
JB
7262 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
7263 I40E_AQ_AN_COMPLETED) ? true : false);
d52c20b7
JB
7264 /* requested_mode is set in probe or by ethtool */
7265 if (!pf->fc_autoneg_status)
7266 goto no_autoneg;
7267
7268 if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
7269 (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
41c445ff
JB
7270 pf->hw.fc.current_mode = I40E_FC_FULL;
7271 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
7272 pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
7273 else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
7274 pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
7275 else
d52c20b7
JB
7276 pf->hw.fc.current_mode = I40E_FC_NONE;
7277
7278 /* sync the flow control settings with the auto-neg values */
7279 switch (pf->hw.fc.current_mode) {
7280 case I40E_FC_FULL:
7281 txfc = 1;
7282 rxfc = 1;
7283 break;
7284 case I40E_FC_TX_PAUSE:
7285 txfc = 1;
7286 rxfc = 0;
7287 break;
7288 case I40E_FC_RX_PAUSE:
7289 txfc = 0;
7290 rxfc = 1;
7291 break;
7292 case I40E_FC_NONE:
7293 case I40E_FC_DEFAULT:
7294 txfc = 0;
7295 rxfc = 0;
7296 break;
7297 case I40E_FC_PFC:
7298 /* TBD */
7299 break;
7300 /* no default case, we have to handle all possibilities here */
7301 }
7302
7303 wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
7304
7305 rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7306 ~I40E_PRTDCB_MFLCN_RFCE_MASK;
7307 rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
7308
7309 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
41c445ff 7310
d52c20b7
JB
7311 goto fc_complete;
7312
7313no_autoneg:
7314 /* disable L2 flow control, user can turn it on if they wish */
7315 wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
7316 wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
7317 ~I40E_PRTDCB_MFLCN_RFCE_MASK);
7318
7319fc_complete:
41c445ff
JB
7320 return ret;
7321}
7322
7323/**
7324 * i40e_set_rss_size - helper to set rss_size
7325 * @pf: board private structure
7326 * @queues_left: how many queues
7327 */
7328static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
7329{
7330 int num_tc0;
7331
7332 num_tc0 = min_t(int, queues_left, pf->rss_size_max);
bf051a3b 7333 num_tc0 = min_t(int, num_tc0, num_online_cpus());
41c445ff
JB
7334 num_tc0 = rounddown_pow_of_two(num_tc0);
7335
7336 return num_tc0;
7337}
7338
7339/**
7340 * i40e_determine_queue_usage - Work out queue distribution
7341 * @pf: board private structure
7342 **/
7343static void i40e_determine_queue_usage(struct i40e_pf *pf)
7344{
7345 int accum_tc_size;
7346 int queues_left;
7347
7348 pf->num_lan_qps = 0;
7349 pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
7350 accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
7351
7352 /* Find the max queues to be put into basic use. We'll always be
7353 * using TC0, whether or not DCB is running, and TC0 will get the
7354 * big RSS set.
7355 */
7356 queues_left = pf->hw.func_caps.num_tx_qp;
7357
7358 if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7359 (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
7360 !(pf->flags & (I40E_FLAG_RSS_ENABLED |
7361 I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
7362 (queues_left == 1)) {
7363
7364 /* one qp for PF, no queues for anything else */
7365 queues_left = 0;
7366 pf->rss_size = pf->num_lan_qps = 1;
7367
7368 /* make sure all the fancies are disabled */
7369 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
7370 I40E_FLAG_MQ_ENABLED |
7371 I40E_FLAG_FDIR_ENABLED |
7372 I40E_FLAG_FDIR_ATR_ENABLED |
7373 I40E_FLAG_DCB_ENABLED |
7374 I40E_FLAG_SRIOV_ENABLED |
7375 I40E_FLAG_VMDQ_ENABLED);
7376
7377 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7378 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7379 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7380
7381 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7382
7383 queues_left -= pf->rss_size;
f8ff1464 7384 pf->num_lan_qps = pf->rss_size_max;
41c445ff
JB
7385
7386 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7387 !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7388 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
7389
7390 /* save num_tc_qps queues for TCs 1 thru 7 and the rest
7391 * are set up for RSS in TC0
7392 */
7393 queues_left -= accum_tc_size;
7394
7395 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7396
7397 queues_left -= pf->rss_size;
7398 if (queues_left < 0) {
7399 dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
7400 return;
7401 }
7402
f8ff1464 7403 pf->num_lan_qps = pf->rss_size_max + accum_tc_size;
41c445ff
JB
7404
7405 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7406 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7407 !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
7408
7409 queues_left -= 1; /* save 1 queue for FD */
7410
7411 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7412
7413 queues_left -= pf->rss_size;
7414 if (queues_left < 0) {
7415 dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
7416 return;
7417 }
7418
f8ff1464 7419 pf->num_lan_qps = pf->rss_size_max;
41c445ff
JB
7420
7421 } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
7422 (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
7423 (pf->flags & I40E_FLAG_DCB_ENABLED)) {
7424
7425 /* save 1 queue for TCs 1 thru 7,
7426 * 1 queue for flow director,
7427 * and the rest are set up for RSS in TC0
7428 */
7429 queues_left -= 1;
7430 queues_left -= accum_tc_size;
7431
7432 pf->rss_size = i40e_set_rss_size(pf, queues_left);
7433 queues_left -= pf->rss_size;
7434 if (queues_left < 0) {
7435 dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
7436 return;
7437 }
7438
f8ff1464 7439 pf->num_lan_qps = pf->rss_size_max + accum_tc_size;
41c445ff
JB
7440
7441 } else {
7442 dev_info(&pf->pdev->dev,
7443 "Invalid configuration, flags=0x%08llx\n", pf->flags);
7444 return;
7445 }
7446
7447 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7448 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
7449 pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
7450 pf->num_vf_qps));
7451 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
7452 }
7453
7454 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7455 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
7456 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
7457 (queues_left / pf->num_vmdq_qps));
7458 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
7459 }
7460
f8ff1464 7461 pf->queues_left = queues_left;
41c445ff
JB
7462 return;
7463}
7464
7465/**
7466 * i40e_setup_pf_filter_control - Setup PF static filter control
7467 * @pf: PF to be setup
7468 *
7469 * i40e_setup_pf_filter_control sets up a pf's initial filter control
7470 * settings. If PE/FCoE are enabled then it will also set the per PF
7471 * based filter sizes required for them. It also enables Flow director,
7472 * ethertype and macvlan type filter settings for the pf.
7473 *
7474 * Returns 0 on success, negative on failure
7475 **/
7476static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7477{
7478 struct i40e_filter_control_settings *settings = &pf->filter_settings;
7479
7480 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
7481
7482 /* Flow Director is enabled */
7483 if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
7484 settings->enable_fdir = true;
7485
7486 /* Ethtype and MACVLAN filters enabled for PF */
7487 settings->enable_ethtype = true;
7488 settings->enable_macvlan = true;
7489
7490 if (i40e_set_filter_control(&pf->hw, settings))
7491 return -ENOENT;
7492
7493 return 0;
7494}
7495
7496/**
7497 * i40e_probe - Device initialization routine
7498 * @pdev: PCI device information struct
7499 * @ent: entry in i40e_pci_tbl
7500 *
7501 * i40e_probe initializes a pf identified by a pci_dev structure.
7502 * The OS initialization, configuring of the pf private structure,
7503 * and a hardware reset occur.
7504 *
7505 * Returns 0 on success, negative on failure
7506 **/
7507static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7508{
7509 struct i40e_driver_version dv;
7510 struct i40e_pf *pf;
7511 struct i40e_hw *hw;
93cd765b 7512 static u16 pfs_found;
d4dfb81a 7513 u16 link_status;
41c445ff
JB
7514 int err = 0;
7515 u32 len;
7516
7517 err = pci_enable_device_mem(pdev);
7518 if (err)
7519 return err;
7520
7521 /* set up for high or low dma */
7522 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7523 /* coherent mask for the same size will always succeed if
7524 * dma_set_mask does
7525 */
7526 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
7527 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7528 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7529 } else {
7530 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7531 err = -EIO;
7532 goto err_dma;
7533 }
7534
7535 /* set up pci connections */
7536 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
7537 IORESOURCE_MEM), i40e_driver_name);
7538 if (err) {
7539 dev_info(&pdev->dev,
7540 "pci_request_selected_regions failed %d\n", err);
7541 goto err_pci_reg;
7542 }
7543
7544 pci_enable_pcie_error_reporting(pdev);
7545 pci_set_master(pdev);
7546
7547 /* Now that we have a PCI connection, we need to do the
7548 * low level device setup. This is primarily setting up
7549 * the Admin Queue structures and then querying for the
7550 * device's current profile information.
7551 */
7552 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
7553 if (!pf) {
7554 err = -ENOMEM;
7555 goto err_pf_alloc;
7556 }
7557 pf->next_vsi = 0;
7558 pf->pdev = pdev;
7559 set_bit(__I40E_DOWN, &pf->state);
7560
7561 hw = &pf->hw;
7562 hw->back = pf;
7563 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
7564 pci_resource_len(pdev, 0));
7565 if (!hw->hw_addr) {
7566 err = -EIO;
7567 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
7568 (unsigned int)pci_resource_start(pdev, 0),
7569 (unsigned int)pci_resource_len(pdev, 0), err);
7570 goto err_ioremap;
7571 }
7572 hw->vendor_id = pdev->vendor;
7573 hw->device_id = pdev->device;
7574 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
7575 hw->subsystem_vendor_id = pdev->subsystem_vendor;
7576 hw->subsystem_device_id = pdev->subsystem_device;
7577 hw->bus.device = PCI_SLOT(pdev->devfn);
7578 hw->bus.func = PCI_FUNC(pdev->devfn);
93cd765b 7579 pf->instance = pfs_found;
41c445ff 7580
7134f9ce
JB
7581 /* do a special CORER for clearing PXE mode once at init */
7582 if (hw->revision_id == 0 &&
7583 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
7584 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
7585 i40e_flush(hw);
7586 msleep(200);
7587 pf->corer_count++;
7588
7589 i40e_clear_pxe_mode(hw);
7590 }
7591
41c445ff
JB
7592 /* Reset here to make sure all is clean and to define PF 'n' */
7593 err = i40e_pf_reset(hw);
7594 if (err) {
7595 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
7596 goto err_pf_reset;
7597 }
7598 pf->pfr_count++;
7599
7600 hw->aq.num_arq_entries = I40E_AQ_LEN;
7601 hw->aq.num_asq_entries = I40E_AQ_LEN;
7602 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7603 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
7604 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
7605 snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
7606 "%s-pf%d:misc",
7607 dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
7608
7609 err = i40e_init_shared_code(hw);
7610 if (err) {
7611 dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
7612 goto err_pf_reset;
7613 }
7614
d52c20b7
JB
7615 /* set up a default setting for link flow control */
7616 pf->hw.fc.requested_mode = I40E_FC_NONE;
7617
41c445ff
JB
7618 err = i40e_init_adminq(hw);
7619 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
fe310704
AS
7620 if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
7621 >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
7622 dev_info(&pdev->dev,
7623 "warning: NVM version not supported, supported version: %02x.%02x\n",
7624 I40E_CURRENT_NVM_VERSION_HI,
7625 I40E_CURRENT_NVM_VERSION_LO);
7626 }
41c445ff
JB
7627 if (err) {
7628 dev_info(&pdev->dev,
7629 "init_adminq failed: %d expecting API %02x.%02x\n",
7630 err,
7631 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
7632 goto err_pf_reset;
7633 }
7634
7635 err = i40e_get_capabilities(pf);
7636 if (err)
7637 goto err_adminq_setup;
7638
7639 err = i40e_sw_init(pf);
7640 if (err) {
7641 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
7642 goto err_sw_init;
7643 }
7644
7645 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
7646 hw->func_caps.num_rx_qp,
7647 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
7648 if (err) {
7649 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
7650 goto err_init_lan_hmc;
7651 }
7652
7653 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
7654 if (err) {
7655 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
7656 err = -ENOENT;
7657 goto err_configure_lan_hmc;
7658 }
7659
7660 i40e_get_mac_addr(hw, hw->mac.addr);
f62b5060 7661 if (!is_valid_ether_addr(hw->mac.addr)) {
41c445ff
JB
7662 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
7663 err = -EIO;
7664 goto err_mac_addr;
7665 }
7666 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
7667 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
7668
7669 pci_set_drvdata(pdev, pf);
7670 pci_save_state(pdev);
7671
7672 /* set up periodic task facility */
7673 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
7674 pf->service_timer_period = HZ;
7675
7676 INIT_WORK(&pf->service_task, i40e_service_task);
7677 clear_bit(__I40E_SERVICE_SCHED, &pf->state);
7678 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
7679 pf->link_check_timeout = jiffies;
7680
8e2773ae
SN
7681 /* WoL defaults to disabled */
7682 pf->wol_en = false;
7683 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
7684
41c445ff
JB
7685 /* set up the main switch operations */
7686 i40e_determine_queue_usage(pf);
7687 i40e_init_interrupt_scheme(pf);
7688
7689 /* Set up the *vsi struct based on the number of VSIs in the HW,
7690 * and set up our local tracking of the MAIN PF vsi.
7691 */
7692 len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
7693 pf->vsi = kzalloc(len, GFP_KERNEL);
ed87ac09
WY
7694 if (!pf->vsi) {
7695 err = -ENOMEM;
41c445ff 7696 goto err_switch_setup;
ed87ac09 7697 }
41c445ff 7698
bc7d338f 7699 err = i40e_setup_pf_switch(pf, false);
41c445ff
JB
7700 if (err) {
7701 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
7702 goto err_vsis;
7703 }
7704
7705 /* The main driver is (mostly) up and happy. We need to set this state
7706 * before setting up the misc vector or we get a race and the vector
7707 * ends up disabled forever.
7708 */
7709 clear_bit(__I40E_DOWN, &pf->state);
7710
7711 /* In case of MSIX we are going to setup the misc vector right here
7712 * to handle admin queue events etc. In case of legacy and MSI
7713 * the misc functionality and queue processing is combined in
7714 * the same vector and that gets setup at open.
7715 */
7716 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7717 err = i40e_setup_misc_vector(pf);
7718 if (err) {
7719 dev_info(&pdev->dev,
7720 "setup of misc vector failed: %d\n", err);
7721 goto err_vsis;
7722 }
7723 }
7724
7725 /* prep for VF support */
7726 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
7727 (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
7728 u32 val;
7729
7730 /* disable link interrupts for VFs */
7731 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
7732 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
7733 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
7734 i40e_flush(hw);
7735 }
7736
93cd765b
ASJ
7737 pfs_found++;
7738
41c445ff
JB
7739 i40e_dbg_pf_init(pf);
7740
7741 /* tell the firmware that we're starting */
7742 dv.major_version = DRV_VERSION_MAJOR;
7743 dv.minor_version = DRV_VERSION_MINOR;
7744 dv.build_version = DRV_VERSION_BUILD;
7745 dv.subbuild_version = 0;
7746 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
7747
7748 /* since everything's happy, start the service_task timer */
7749 mod_timer(&pf->service_timer,
7750 round_jiffies(jiffies + pf->service_timer_period));
7751
d4dfb81a
CS
7752 /* Get the negotiated link width and speed from PCI config space */
7753 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
7754
7755 i40e_set_pci_config_data(hw, link_status);
7756
7757 dev_info(&pdev->dev, "PCI Express: %s %s\n",
7758 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
7759 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
7760 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
7761 "Unknown"),
7762 (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
7763 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
7764 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
7765 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
7766 "Unknown"));
7767
7768 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
7769 hw->bus.speed < i40e_bus_speed_8000) {
7770 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
7771 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
7772 }
7773
41c445ff
JB
7774 return 0;
7775
7776 /* Unwind what we've done if something failed in the setup */
7777err_vsis:
7778 set_bit(__I40E_DOWN, &pf->state);
41c445ff
JB
7779 i40e_clear_interrupt_scheme(pf);
7780 kfree(pf->vsi);
04b03013
SN
7781err_switch_setup:
7782 i40e_reset_interrupt_capability(pf);
41c445ff
JB
7783 del_timer_sync(&pf->service_timer);
7784err_mac_addr:
7785err_configure_lan_hmc:
7786 (void)i40e_shutdown_lan_hmc(hw);
7787err_init_lan_hmc:
7788 kfree(pf->qp_pile);
7789 kfree(pf->irq_pile);
7790err_sw_init:
7791err_adminq_setup:
7792 (void)i40e_shutdown_adminq(hw);
7793err_pf_reset:
7794 iounmap(hw->hw_addr);
7795err_ioremap:
7796 kfree(pf);
7797err_pf_alloc:
7798 pci_disable_pcie_error_reporting(pdev);
7799 pci_release_selected_regions(pdev,
7800 pci_select_bars(pdev, IORESOURCE_MEM));
7801err_pci_reg:
7802err_dma:
7803 pci_disable_device(pdev);
7804 return err;
7805}
7806
7807/**
7808 * i40e_remove - Device removal routine
7809 * @pdev: PCI device information struct
7810 *
7811 * i40e_remove is called by the PCI subsystem to alert the driver
7812 * that is should release a PCI device. This could be caused by a
7813 * Hot-Plug event, or because the driver is going to be removed from
7814 * memory.
7815 **/
7816static void i40e_remove(struct pci_dev *pdev)
7817{
7818 struct i40e_pf *pf = pci_get_drvdata(pdev);
7819 i40e_status ret_code;
7820 u32 reg;
7821 int i;
7822
7823 i40e_dbg_pf_exit(pf);
7824
7825 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
7826 i40e_free_vfs(pf);
7827 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
7828 }
7829
7830 /* no more scheduling of any task */
7831 set_bit(__I40E_DOWN, &pf->state);
7832 del_timer_sync(&pf->service_timer);
7833 cancel_work_sync(&pf->service_task);
7834
7835 i40e_fdir_teardown(pf);
7836
7837 /* If there is a switch structure or any orphans, remove them.
7838 * This will leave only the PF's VSI remaining.
7839 */
7840 for (i = 0; i < I40E_MAX_VEB; i++) {
7841 if (!pf->veb[i])
7842 continue;
7843
7844 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
7845 pf->veb[i]->uplink_seid == 0)
7846 i40e_switch_branch_release(pf->veb[i]);
7847 }
7848
7849 /* Now we can shutdown the PF's VSI, just before we kill
7850 * adminq and hmc.
7851 */
7852 if (pf->vsi[pf->lan_vsi])
7853 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
7854
7855 i40e_stop_misc_vector(pf);
7856 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7857 synchronize_irq(pf->msix_entries[0].vector);
7858 free_irq(pf->msix_entries[0].vector, pf);
7859 }
7860
7861 /* shutdown and destroy the HMC */
7862 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
7863 if (ret_code)
7864 dev_warn(&pdev->dev,
7865 "Failed to destroy the HMC resources: %d\n", ret_code);
7866
7867 /* shutdown the adminq */
7868 i40e_aq_queue_shutdown(&pf->hw, true);
7869 ret_code = i40e_shutdown_adminq(&pf->hw);
7870 if (ret_code)
7871 dev_warn(&pdev->dev,
7872 "Failed to destroy the Admin Queue resources: %d\n",
7873 ret_code);
7874
7875 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
7876 i40e_clear_interrupt_scheme(pf);
7877 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
7878 if (pf->vsi[i]) {
7879 i40e_vsi_clear_rings(pf->vsi[i]);
7880 i40e_vsi_clear(pf->vsi[i]);
7881 pf->vsi[i] = NULL;
7882 }
7883 }
7884
7885 for (i = 0; i < I40E_MAX_VEB; i++) {
7886 kfree(pf->veb[i]);
7887 pf->veb[i] = NULL;
7888 }
7889
7890 kfree(pf->qp_pile);
7891 kfree(pf->irq_pile);
7892 kfree(pf->sw_config);
7893 kfree(pf->vsi);
7894
7895 /* force a PF reset to clean anything leftover */
7896 reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
7897 wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
7898 i40e_flush(&pf->hw);
7899
7900 iounmap(pf->hw.hw_addr);
7901 kfree(pf);
7902 pci_release_selected_regions(pdev,
7903 pci_select_bars(pdev, IORESOURCE_MEM));
7904
7905 pci_disable_pcie_error_reporting(pdev);
7906 pci_disable_device(pdev);
7907}
7908
7909/**
7910 * i40e_pci_error_detected - warning that something funky happened in PCI land
7911 * @pdev: PCI device information struct
7912 *
7913 * Called to warn that something happened and the error handling steps
7914 * are in progress. Allows the driver to quiesce things, be ready for
7915 * remediation.
7916 **/
7917static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
7918 enum pci_channel_state error)
7919{
7920 struct i40e_pf *pf = pci_get_drvdata(pdev);
7921
7922 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
7923
7924 /* shutdown all operations */
9007bccd
SN
7925 if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
7926 rtnl_lock();
7927 i40e_prep_for_reset(pf);
7928 rtnl_unlock();
7929 }
41c445ff
JB
7930
7931 /* Request a slot reset */
7932 return PCI_ERS_RESULT_NEED_RESET;
7933}
7934
7935/**
7936 * i40e_pci_error_slot_reset - a PCI slot reset just happened
7937 * @pdev: PCI device information struct
7938 *
7939 * Called to find if the driver can work with the device now that
7940 * the pci slot has been reset. If a basic connection seems good
7941 * (registers are readable and have sane content) then return a
7942 * happy little PCI_ERS_RESULT_xxx.
7943 **/
7944static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
7945{
7946 struct i40e_pf *pf = pci_get_drvdata(pdev);
7947 pci_ers_result_t result;
7948 int err;
7949 u32 reg;
7950
7951 dev_info(&pdev->dev, "%s\n", __func__);
7952 if (pci_enable_device_mem(pdev)) {
7953 dev_info(&pdev->dev,
7954 "Cannot re-enable PCI device after reset.\n");
7955 result = PCI_ERS_RESULT_DISCONNECT;
7956 } else {
7957 pci_set_master(pdev);
7958 pci_restore_state(pdev);
7959 pci_save_state(pdev);
7960 pci_wake_from_d3(pdev, false);
7961
7962 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7963 if (reg == 0)
7964 result = PCI_ERS_RESULT_RECOVERED;
7965 else
7966 result = PCI_ERS_RESULT_DISCONNECT;
7967 }
7968
7969 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7970 if (err) {
7971 dev_info(&pdev->dev,
7972 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7973 err);
7974 /* non-fatal, continue */
7975 }
7976
7977 return result;
7978}
7979
7980/**
7981 * i40e_pci_error_resume - restart operations after PCI error recovery
7982 * @pdev: PCI device information struct
7983 *
7984 * Called to allow the driver to bring things back up after PCI error
7985 * and/or reset recovery has finished.
7986 **/
7987static void i40e_pci_error_resume(struct pci_dev *pdev)
7988{
7989 struct i40e_pf *pf = pci_get_drvdata(pdev);
7990
7991 dev_info(&pdev->dev, "%s\n", __func__);
9007bccd
SN
7992 if (test_bit(__I40E_SUSPENDED, &pf->state))
7993 return;
7994
7995 rtnl_lock();
41c445ff 7996 i40e_handle_reset_warning(pf);
9007bccd
SN
7997 rtnl_lock();
7998}
7999
8000/**
8001 * i40e_shutdown - PCI callback for shutting down
8002 * @pdev: PCI device information struct
8003 **/
8004static void i40e_shutdown(struct pci_dev *pdev)
8005{
8006 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 8007 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
8008
8009 set_bit(__I40E_SUSPENDED, &pf->state);
8010 set_bit(__I40E_DOWN, &pf->state);
8011 rtnl_lock();
8012 i40e_prep_for_reset(pf);
8013 rtnl_unlock();
8014
8e2773ae
SN
8015 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8016 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8017
9007bccd 8018 if (system_state == SYSTEM_POWER_OFF) {
8e2773ae 8019 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
8020 pci_set_power_state(pdev, PCI_D3hot);
8021 }
8022}
8023
8024#ifdef CONFIG_PM
8025/**
8026 * i40e_suspend - PCI callback for moving to D3
8027 * @pdev: PCI device information struct
8028 **/
8029static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
8030{
8031 struct i40e_pf *pf = pci_get_drvdata(pdev);
8e2773ae 8032 struct i40e_hw *hw = &pf->hw;
9007bccd
SN
8033
8034 set_bit(__I40E_SUSPENDED, &pf->state);
8035 set_bit(__I40E_DOWN, &pf->state);
8036 rtnl_lock();
8037 i40e_prep_for_reset(pf);
8038 rtnl_unlock();
8039
8e2773ae
SN
8040 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
8041 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
8042
8043 pci_wake_from_d3(pdev, pf->wol_en);
9007bccd
SN
8044 pci_set_power_state(pdev, PCI_D3hot);
8045
8046 return 0;
41c445ff
JB
8047}
8048
9007bccd
SN
8049/**
8050 * i40e_resume - PCI callback for waking up from D3
8051 * @pdev: PCI device information struct
8052 **/
8053static int i40e_resume(struct pci_dev *pdev)
8054{
8055 struct i40e_pf *pf = pci_get_drvdata(pdev);
8056 u32 err;
8057
8058 pci_set_power_state(pdev, PCI_D0);
8059 pci_restore_state(pdev);
8060 /* pci_restore_state() clears dev->state_saves, so
8061 * call pci_save_state() again to restore it.
8062 */
8063 pci_save_state(pdev);
8064
8065 err = pci_enable_device_mem(pdev);
8066 if (err) {
8067 dev_err(&pdev->dev,
8068 "%s: Cannot enable PCI device from suspend\n",
8069 __func__);
8070 return err;
8071 }
8072 pci_set_master(pdev);
8073
8074 /* no wakeup events while running */
8075 pci_wake_from_d3(pdev, false);
8076
8077 /* handling the reset will rebuild the device state */
8078 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
8079 clear_bit(__I40E_DOWN, &pf->state);
8080 rtnl_lock();
8081 i40e_reset_and_rebuild(pf, false);
8082 rtnl_unlock();
8083 }
8084
8085 return 0;
8086}
8087
8088#endif
41c445ff
JB
8089static const struct pci_error_handlers i40e_err_handler = {
8090 .error_detected = i40e_pci_error_detected,
8091 .slot_reset = i40e_pci_error_slot_reset,
8092 .resume = i40e_pci_error_resume,
8093};
8094
8095static struct pci_driver i40e_driver = {
8096 .name = i40e_driver_name,
8097 .id_table = i40e_pci_tbl,
8098 .probe = i40e_probe,
8099 .remove = i40e_remove,
9007bccd
SN
8100#ifdef CONFIG_PM
8101 .suspend = i40e_suspend,
8102 .resume = i40e_resume,
8103#endif
8104 .shutdown = i40e_shutdown,
41c445ff
JB
8105 .err_handler = &i40e_err_handler,
8106 .sriov_configure = i40e_pci_sriov_configure,
8107};
8108
8109/**
8110 * i40e_init_module - Driver registration routine
8111 *
8112 * i40e_init_module is the first routine called when the driver is
8113 * loaded. All it does is register with the PCI subsystem.
8114 **/
8115static int __init i40e_init_module(void)
8116{
8117 pr_info("%s: %s - version %s\n", i40e_driver_name,
8118 i40e_driver_string, i40e_driver_version_str);
8119 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
8120 i40e_dbg_init();
8121 return pci_register_driver(&i40e_driver);
8122}
8123module_init(i40e_init_module);
8124
8125/**
8126 * i40e_exit_module - Driver exit cleanup routine
8127 *
8128 * i40e_exit_module is called just before the driver is removed
8129 * from memory.
8130 **/
8131static void __exit i40e_exit_module(void)
8132{
8133 pci_unregister_driver(&i40e_driver);
8134 i40e_dbg_exit();
8135}
8136module_exit(i40e_exit_module);