1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
43 const char i40e_driver_name
[] = "i40e";
44 static const char i40e_driver_string
[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 1
51 #define DRV_VERSION_BUILD 14
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str
[] = DRV_VERSION
;
56 static const char i40e_copyright
[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
);
60 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
);
61 static int i40e_add_vsi(struct i40e_vsi
*vsi
);
62 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
);
63 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
);
64 static int i40e_setup_misc_vector(struct i40e_pf
*pf
);
65 static void i40e_determine_queue_usage(struct i40e_pf
*pf
);
66 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
);
67 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
);
68 static int i40e_reset(struct i40e_pf
*pf
);
69 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
);
70 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
);
71 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
);
72 static int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
73 struct i40e_cloud_filter
*filter
,
75 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
76 struct i40e_cloud_filter
*filter
,
78 static int i40e_get_capabilities(struct i40e_pf
*pf
,
79 enum i40e_admin_queue_opc list_type
);
82 /* i40e_pci_tbl - PCI Device ID Table
84 * Last entry must be all 0s
86 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
87 * Class, Class Mask, private data (not used) }
89 static const struct pci_device_id i40e_pci_tbl
[] = {
90 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_XL710
), 0},
91 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QEMU
), 0},
92 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_B
), 0},
93 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_C
), 0},
94 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_A
), 0},
95 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_B
), 0},
96 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_C
), 0},
97 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T
), 0},
98 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T4
), 0},
99 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_X722
), 0},
100 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_X722
), 0},
101 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_X722
), 0},
102 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_1G_BASE_T_X722
), 0},
103 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T_X722
), 0},
104 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_I_X722
), 0},
105 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2
), 0},
106 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2_A
), 0},
107 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_B
), 0},
108 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_SFP28
), 0},
109 /* required last entry */
112 MODULE_DEVICE_TABLE(pci
, i40e_pci_tbl
);
114 #define I40E_MAX_VF_COUNT 128
115 static int debug
= -1;
116 module_param(debug
, uint
, 0);
117 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
119 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
120 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(DRV_VERSION
);
124 static struct workqueue_struct
*i40e_wq
;
127 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to fill out
130 * @size: size of memory requested
131 * @alignment: what to align the allocation to
133 int i40e_allocate_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
,
134 u64 size
, u32 alignment
)
136 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
138 mem
->size
= ALIGN(size
, alignment
);
139 mem
->va
= dma_zalloc_coherent(&pf
->pdev
->dev
, mem
->size
,
140 &mem
->pa
, GFP_KERNEL
);
148 * i40e_free_dma_mem_d - OS specific memory free for shared code
149 * @hw: pointer to the HW structure
150 * @mem: ptr to mem struct to free
152 int i40e_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
154 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
156 dma_free_coherent(&pf
->pdev
->dev
, mem
->size
, mem
->va
, mem
->pa
);
165 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
166 * @hw: pointer to the HW structure
167 * @mem: ptr to mem struct to fill out
168 * @size: size of memory requested
170 int i40e_allocate_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
,
174 mem
->va
= kzalloc(size
, GFP_KERNEL
);
183 * i40e_free_virt_mem_d - OS specific memory free for shared code
184 * @hw: pointer to the HW structure
185 * @mem: ptr to mem struct to free
187 int i40e_free_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
)
189 /* it's ok to kfree a NULL pointer */
198 * i40e_get_lump - find a lump of free generic resource
199 * @pf: board private structure
200 * @pile: the pile of resource to search
201 * @needed: the number of items needed
202 * @id: an owner id to stick on the items assigned
204 * Returns the base item index of the lump, or negative for error
206 * The search_hint trick and lack of advanced fit-finding only work
207 * because we're highly likely to have all the same size lump requests.
208 * Linear search time and any fragmentation should be minimal.
210 static int i40e_get_lump(struct i40e_pf
*pf
, struct i40e_lump_tracking
*pile
,
216 if (!pile
|| needed
== 0 || id
>= I40E_PILE_VALID_BIT
) {
217 dev_info(&pf
->pdev
->dev
,
218 "param err: pile=%p needed=%d id=0x%04x\n",
223 /* start the linear search with an imperfect hint */
224 i
= pile
->search_hint
;
225 while (i
< pile
->num_entries
) {
226 /* skip already allocated entries */
227 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
232 /* do we have enough in this lump? */
233 for (j
= 0; (j
< needed
) && ((i
+j
) < pile
->num_entries
); j
++) {
234 if (pile
->list
[i
+j
] & I40E_PILE_VALID_BIT
)
239 /* there was enough, so assign it to the requestor */
240 for (j
= 0; j
< needed
; j
++)
241 pile
->list
[i
+j
] = id
| I40E_PILE_VALID_BIT
;
243 pile
->search_hint
= i
+ j
;
247 /* not enough, so skip over it and continue looking */
255 * i40e_put_lump - return a lump of generic resource
256 * @pile: the pile of resource to search
257 * @index: the base item index
258 * @id: the owner id of the items assigned
260 * Returns the count of items in the lump
262 static int i40e_put_lump(struct i40e_lump_tracking
*pile
, u16 index
, u16 id
)
264 int valid_id
= (id
| I40E_PILE_VALID_BIT
);
268 if (!pile
|| index
>= pile
->num_entries
)
272 i
< pile
->num_entries
&& pile
->list
[i
] == valid_id
;
278 if (count
&& index
< pile
->search_hint
)
279 pile
->search_hint
= index
;
285 * i40e_find_vsi_from_id - searches for the vsi with the given id
286 * @pf - the pf structure to search for the vsi
287 * @id - id of the vsi it is searching for
289 struct i40e_vsi
*i40e_find_vsi_from_id(struct i40e_pf
*pf
, u16 id
)
293 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
294 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->id
== id
))
301 * i40e_service_event_schedule - Schedule the service task to wake up
302 * @pf: board private structure
304 * If not already scheduled, this puts the task into the work queue
306 void i40e_service_event_schedule(struct i40e_pf
*pf
)
308 if (!test_bit(__I40E_DOWN
, pf
->state
) &&
309 !test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
310 queue_work(i40e_wq
, &pf
->service_task
);
314 * i40e_tx_timeout - Respond to a Tx Hang
315 * @netdev: network interface device structure
317 * If any port has noticed a Tx timeout, it is likely that the whole
318 * device is munged, not just the one netdev port, so go for the full
321 static void i40e_tx_timeout(struct net_device
*netdev
)
323 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
324 struct i40e_vsi
*vsi
= np
->vsi
;
325 struct i40e_pf
*pf
= vsi
->back
;
326 struct i40e_ring
*tx_ring
= NULL
;
327 unsigned int i
, hung_queue
= 0;
330 pf
->tx_timeout_count
++;
332 /* find the stopped queue the same way the stack does */
333 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
334 struct netdev_queue
*q
;
335 unsigned long trans_start
;
337 q
= netdev_get_tx_queue(netdev
, i
);
338 trans_start
= q
->trans_start
;
339 if (netif_xmit_stopped(q
) &&
341 (trans_start
+ netdev
->watchdog_timeo
))) {
347 if (i
== netdev
->num_tx_queues
) {
348 netdev_info(netdev
, "tx_timeout: no netdev hung queue found\n");
350 /* now that we have an index, find the tx_ring struct */
351 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
352 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
354 vsi
->tx_rings
[i
]->queue_index
) {
355 tx_ring
= vsi
->tx_rings
[i
];
362 if (time_after(jiffies
, (pf
->tx_timeout_last_recovery
+ HZ
*20)))
363 pf
->tx_timeout_recovery_level
= 1; /* reset after some time */
364 else if (time_before(jiffies
,
365 (pf
->tx_timeout_last_recovery
+ netdev
->watchdog_timeo
)))
366 return; /* don't do any new action before the next timeout */
369 head
= i40e_get_head(tx_ring
);
370 /* Read interrupt register */
371 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
373 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
374 tx_ring
->vsi
->base_vector
- 1));
376 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
378 netdev_info(netdev
, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
379 vsi
->seid
, hung_queue
, tx_ring
->next_to_clean
,
380 head
, tx_ring
->next_to_use
,
381 readl(tx_ring
->tail
), val
);
384 pf
->tx_timeout_last_recovery
= jiffies
;
385 netdev_info(netdev
, "tx_timeout recovery level %d, hung_queue %d\n",
386 pf
->tx_timeout_recovery_level
, hung_queue
);
388 switch (pf
->tx_timeout_recovery_level
) {
390 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
393 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
396 set_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
399 netdev_err(netdev
, "tx_timeout recovery unsuccessful\n");
403 i40e_service_event_schedule(pf
);
404 pf
->tx_timeout_recovery_level
++;
408 * i40e_get_vsi_stats_struct - Get System Network Statistics
409 * @vsi: the VSI we care about
411 * Returns the address of the device statistics structure.
412 * The statistics are actually updated from the service task.
414 struct rtnl_link_stats64
*i40e_get_vsi_stats_struct(struct i40e_vsi
*vsi
)
416 return &vsi
->net_stats
;
420 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
421 * @ring: Tx ring to get statistics from
422 * @stats: statistics entry to be updated
424 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring
*ring
,
425 struct rtnl_link_stats64
*stats
)
431 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
432 packets
= ring
->stats
.packets
;
433 bytes
= ring
->stats
.bytes
;
434 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
436 stats
->tx_packets
+= packets
;
437 stats
->tx_bytes
+= bytes
;
441 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
442 * @netdev: network interface device structure
444 * Returns the address of the device statistics structure.
445 * The statistics are actually updated from the service task.
447 static void i40e_get_netdev_stats_struct(struct net_device
*netdev
,
448 struct rtnl_link_stats64
*stats
)
450 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
451 struct i40e_ring
*tx_ring
, *rx_ring
;
452 struct i40e_vsi
*vsi
= np
->vsi
;
453 struct rtnl_link_stats64
*vsi_stats
= i40e_get_vsi_stats_struct(vsi
);
456 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
463 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
467 tx_ring
= READ_ONCE(vsi
->tx_rings
[i
]);
470 i40e_get_netdev_stats_struct_tx(tx_ring
, stats
);
472 rx_ring
= &tx_ring
[1];
475 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
476 packets
= rx_ring
->stats
.packets
;
477 bytes
= rx_ring
->stats
.bytes
;
478 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
480 stats
->rx_packets
+= packets
;
481 stats
->rx_bytes
+= bytes
;
483 if (i40e_enabled_xdp_vsi(vsi
))
484 i40e_get_netdev_stats_struct_tx(&rx_ring
[1], stats
);
488 /* following stats updated by i40e_watchdog_subtask() */
489 stats
->multicast
= vsi_stats
->multicast
;
490 stats
->tx_errors
= vsi_stats
->tx_errors
;
491 stats
->tx_dropped
= vsi_stats
->tx_dropped
;
492 stats
->rx_errors
= vsi_stats
->rx_errors
;
493 stats
->rx_dropped
= vsi_stats
->rx_dropped
;
494 stats
->rx_crc_errors
= vsi_stats
->rx_crc_errors
;
495 stats
->rx_length_errors
= vsi_stats
->rx_length_errors
;
499 * i40e_vsi_reset_stats - Resets all stats of the given vsi
500 * @vsi: the VSI to have its stats reset
502 void i40e_vsi_reset_stats(struct i40e_vsi
*vsi
)
504 struct rtnl_link_stats64
*ns
;
510 ns
= i40e_get_vsi_stats_struct(vsi
);
511 memset(ns
, 0, sizeof(*ns
));
512 memset(&vsi
->net_stats_offsets
, 0, sizeof(vsi
->net_stats_offsets
));
513 memset(&vsi
->eth_stats
, 0, sizeof(vsi
->eth_stats
));
514 memset(&vsi
->eth_stats_offsets
, 0, sizeof(vsi
->eth_stats_offsets
));
515 if (vsi
->rx_rings
&& vsi
->rx_rings
[0]) {
516 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
517 memset(&vsi
->rx_rings
[i
]->stats
, 0,
518 sizeof(vsi
->rx_rings
[i
]->stats
));
519 memset(&vsi
->rx_rings
[i
]->rx_stats
, 0,
520 sizeof(vsi
->rx_rings
[i
]->rx_stats
));
521 memset(&vsi
->tx_rings
[i
]->stats
, 0,
522 sizeof(vsi
->tx_rings
[i
]->stats
));
523 memset(&vsi
->tx_rings
[i
]->tx_stats
, 0,
524 sizeof(vsi
->tx_rings
[i
]->tx_stats
));
527 vsi
->stat_offsets_loaded
= false;
531 * i40e_pf_reset_stats - Reset all of the stats for the given PF
532 * @pf: the PF to be reset
534 void i40e_pf_reset_stats(struct i40e_pf
*pf
)
538 memset(&pf
->stats
, 0, sizeof(pf
->stats
));
539 memset(&pf
->stats_offsets
, 0, sizeof(pf
->stats_offsets
));
540 pf
->stat_offsets_loaded
= false;
542 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
544 memset(&pf
->veb
[i
]->stats
, 0,
545 sizeof(pf
->veb
[i
]->stats
));
546 memset(&pf
->veb
[i
]->stats_offsets
, 0,
547 sizeof(pf
->veb
[i
]->stats_offsets
));
548 pf
->veb
[i
]->stat_offsets_loaded
= false;
551 pf
->hw_csum_rx_error
= 0;
555 * i40e_stat_update48 - read and update a 48 bit stat from the chip
556 * @hw: ptr to the hardware info
557 * @hireg: the high 32 bit reg to read
558 * @loreg: the low 32 bit reg to read
559 * @offset_loaded: has the initial offset been loaded yet
560 * @offset: ptr to current offset value
561 * @stat: ptr to the stat
563 * Since the device stats are not reset at PFReset, they likely will not
564 * be zeroed when the driver starts. We'll save the first values read
565 * and use them as offsets to be subtracted from the raw values in order
566 * to report stats that count from zero. In the process, we also manage
567 * the potential roll-over.
569 static void i40e_stat_update48(struct i40e_hw
*hw
, u32 hireg
, u32 loreg
,
570 bool offset_loaded
, u64
*offset
, u64
*stat
)
574 if (hw
->device_id
== I40E_DEV_ID_QEMU
) {
575 new_data
= rd32(hw
, loreg
);
576 new_data
|= ((u64
)(rd32(hw
, hireg
) & 0xFFFF)) << 32;
578 new_data
= rd64(hw
, loreg
);
582 if (likely(new_data
>= *offset
))
583 *stat
= new_data
- *offset
;
585 *stat
= (new_data
+ BIT_ULL(48)) - *offset
;
586 *stat
&= 0xFFFFFFFFFFFFULL
;
590 * i40e_stat_update32 - read and update a 32 bit stat from the chip
591 * @hw: ptr to the hardware info
592 * @reg: the hw reg to read
593 * @offset_loaded: has the initial offset been loaded yet
594 * @offset: ptr to current offset value
595 * @stat: ptr to the stat
597 static void i40e_stat_update32(struct i40e_hw
*hw
, u32 reg
,
598 bool offset_loaded
, u64
*offset
, u64
*stat
)
602 new_data
= rd32(hw
, reg
);
605 if (likely(new_data
>= *offset
))
606 *stat
= (u32
)(new_data
- *offset
);
608 *stat
= (u32
)((new_data
+ BIT_ULL(32)) - *offset
);
612 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
613 * @hw: ptr to the hardware info
614 * @reg: the hw reg to read and clear
615 * @stat: ptr to the stat
617 static void i40e_stat_update_and_clear32(struct i40e_hw
*hw
, u32 reg
, u64
*stat
)
619 u32 new_data
= rd32(hw
, reg
);
621 wr32(hw
, reg
, 1); /* must write a nonzero value to clear register */
626 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
627 * @vsi: the VSI to be updated
629 void i40e_update_eth_stats(struct i40e_vsi
*vsi
)
631 int stat_idx
= le16_to_cpu(vsi
->info
.stat_counter_idx
);
632 struct i40e_pf
*pf
= vsi
->back
;
633 struct i40e_hw
*hw
= &pf
->hw
;
634 struct i40e_eth_stats
*oes
;
635 struct i40e_eth_stats
*es
; /* device's eth stats */
637 es
= &vsi
->eth_stats
;
638 oes
= &vsi
->eth_stats_offsets
;
640 /* Gather up the stats that the hw collects */
641 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
642 vsi
->stat_offsets_loaded
,
643 &oes
->tx_errors
, &es
->tx_errors
);
644 i40e_stat_update32(hw
, I40E_GLV_RDPC(stat_idx
),
645 vsi
->stat_offsets_loaded
,
646 &oes
->rx_discards
, &es
->rx_discards
);
647 i40e_stat_update32(hw
, I40E_GLV_RUPP(stat_idx
),
648 vsi
->stat_offsets_loaded
,
649 &oes
->rx_unknown_protocol
, &es
->rx_unknown_protocol
);
650 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
651 vsi
->stat_offsets_loaded
,
652 &oes
->tx_errors
, &es
->tx_errors
);
654 i40e_stat_update48(hw
, I40E_GLV_GORCH(stat_idx
),
655 I40E_GLV_GORCL(stat_idx
),
656 vsi
->stat_offsets_loaded
,
657 &oes
->rx_bytes
, &es
->rx_bytes
);
658 i40e_stat_update48(hw
, I40E_GLV_UPRCH(stat_idx
),
659 I40E_GLV_UPRCL(stat_idx
),
660 vsi
->stat_offsets_loaded
,
661 &oes
->rx_unicast
, &es
->rx_unicast
);
662 i40e_stat_update48(hw
, I40E_GLV_MPRCH(stat_idx
),
663 I40E_GLV_MPRCL(stat_idx
),
664 vsi
->stat_offsets_loaded
,
665 &oes
->rx_multicast
, &es
->rx_multicast
);
666 i40e_stat_update48(hw
, I40E_GLV_BPRCH(stat_idx
),
667 I40E_GLV_BPRCL(stat_idx
),
668 vsi
->stat_offsets_loaded
,
669 &oes
->rx_broadcast
, &es
->rx_broadcast
);
671 i40e_stat_update48(hw
, I40E_GLV_GOTCH(stat_idx
),
672 I40E_GLV_GOTCL(stat_idx
),
673 vsi
->stat_offsets_loaded
,
674 &oes
->tx_bytes
, &es
->tx_bytes
);
675 i40e_stat_update48(hw
, I40E_GLV_UPTCH(stat_idx
),
676 I40E_GLV_UPTCL(stat_idx
),
677 vsi
->stat_offsets_loaded
,
678 &oes
->tx_unicast
, &es
->tx_unicast
);
679 i40e_stat_update48(hw
, I40E_GLV_MPTCH(stat_idx
),
680 I40E_GLV_MPTCL(stat_idx
),
681 vsi
->stat_offsets_loaded
,
682 &oes
->tx_multicast
, &es
->tx_multicast
);
683 i40e_stat_update48(hw
, I40E_GLV_BPTCH(stat_idx
),
684 I40E_GLV_BPTCL(stat_idx
),
685 vsi
->stat_offsets_loaded
,
686 &oes
->tx_broadcast
, &es
->tx_broadcast
);
687 vsi
->stat_offsets_loaded
= true;
691 * i40e_update_veb_stats - Update Switch component statistics
692 * @veb: the VEB being updated
694 static void i40e_update_veb_stats(struct i40e_veb
*veb
)
696 struct i40e_pf
*pf
= veb
->pf
;
697 struct i40e_hw
*hw
= &pf
->hw
;
698 struct i40e_eth_stats
*oes
;
699 struct i40e_eth_stats
*es
; /* device's eth stats */
700 struct i40e_veb_tc_stats
*veb_oes
;
701 struct i40e_veb_tc_stats
*veb_es
;
704 idx
= veb
->stats_idx
;
706 oes
= &veb
->stats_offsets
;
707 veb_es
= &veb
->tc_stats
;
708 veb_oes
= &veb
->tc_stats_offsets
;
710 /* Gather up the stats that the hw collects */
711 i40e_stat_update32(hw
, I40E_GLSW_TDPC(idx
),
712 veb
->stat_offsets_loaded
,
713 &oes
->tx_discards
, &es
->tx_discards
);
714 if (hw
->revision_id
> 0)
715 i40e_stat_update32(hw
, I40E_GLSW_RUPP(idx
),
716 veb
->stat_offsets_loaded
,
717 &oes
->rx_unknown_protocol
,
718 &es
->rx_unknown_protocol
);
719 i40e_stat_update48(hw
, I40E_GLSW_GORCH(idx
), I40E_GLSW_GORCL(idx
),
720 veb
->stat_offsets_loaded
,
721 &oes
->rx_bytes
, &es
->rx_bytes
);
722 i40e_stat_update48(hw
, I40E_GLSW_UPRCH(idx
), I40E_GLSW_UPRCL(idx
),
723 veb
->stat_offsets_loaded
,
724 &oes
->rx_unicast
, &es
->rx_unicast
);
725 i40e_stat_update48(hw
, I40E_GLSW_MPRCH(idx
), I40E_GLSW_MPRCL(idx
),
726 veb
->stat_offsets_loaded
,
727 &oes
->rx_multicast
, &es
->rx_multicast
);
728 i40e_stat_update48(hw
, I40E_GLSW_BPRCH(idx
), I40E_GLSW_BPRCL(idx
),
729 veb
->stat_offsets_loaded
,
730 &oes
->rx_broadcast
, &es
->rx_broadcast
);
732 i40e_stat_update48(hw
, I40E_GLSW_GOTCH(idx
), I40E_GLSW_GOTCL(idx
),
733 veb
->stat_offsets_loaded
,
734 &oes
->tx_bytes
, &es
->tx_bytes
);
735 i40e_stat_update48(hw
, I40E_GLSW_UPTCH(idx
), I40E_GLSW_UPTCL(idx
),
736 veb
->stat_offsets_loaded
,
737 &oes
->tx_unicast
, &es
->tx_unicast
);
738 i40e_stat_update48(hw
, I40E_GLSW_MPTCH(idx
), I40E_GLSW_MPTCL(idx
),
739 veb
->stat_offsets_loaded
,
740 &oes
->tx_multicast
, &es
->tx_multicast
);
741 i40e_stat_update48(hw
, I40E_GLSW_BPTCH(idx
), I40E_GLSW_BPTCL(idx
),
742 veb
->stat_offsets_loaded
,
743 &oes
->tx_broadcast
, &es
->tx_broadcast
);
744 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
745 i40e_stat_update48(hw
, I40E_GLVEBTC_RPCH(i
, idx
),
746 I40E_GLVEBTC_RPCL(i
, idx
),
747 veb
->stat_offsets_loaded
,
748 &veb_oes
->tc_rx_packets
[i
],
749 &veb_es
->tc_rx_packets
[i
]);
750 i40e_stat_update48(hw
, I40E_GLVEBTC_RBCH(i
, idx
),
751 I40E_GLVEBTC_RBCL(i
, idx
),
752 veb
->stat_offsets_loaded
,
753 &veb_oes
->tc_rx_bytes
[i
],
754 &veb_es
->tc_rx_bytes
[i
]);
755 i40e_stat_update48(hw
, I40E_GLVEBTC_TPCH(i
, idx
),
756 I40E_GLVEBTC_TPCL(i
, idx
),
757 veb
->stat_offsets_loaded
,
758 &veb_oes
->tc_tx_packets
[i
],
759 &veb_es
->tc_tx_packets
[i
]);
760 i40e_stat_update48(hw
, I40E_GLVEBTC_TBCH(i
, idx
),
761 I40E_GLVEBTC_TBCL(i
, idx
),
762 veb
->stat_offsets_loaded
,
763 &veb_oes
->tc_tx_bytes
[i
],
764 &veb_es
->tc_tx_bytes
[i
]);
766 veb
->stat_offsets_loaded
= true;
770 * i40e_update_vsi_stats - Update the vsi statistics counters.
771 * @vsi: the VSI to be updated
773 * There are a few instances where we store the same stat in a
774 * couple of different structs. This is partly because we have
775 * the netdev stats that need to be filled out, which is slightly
776 * different from the "eth_stats" defined by the chip and used in
777 * VF communications. We sort it out here.
779 static void i40e_update_vsi_stats(struct i40e_vsi
*vsi
)
781 struct i40e_pf
*pf
= vsi
->back
;
782 struct rtnl_link_stats64
*ons
;
783 struct rtnl_link_stats64
*ns
; /* netdev stats */
784 struct i40e_eth_stats
*oes
;
785 struct i40e_eth_stats
*es
; /* device's eth stats */
786 u32 tx_restart
, tx_busy
;
797 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
798 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
801 ns
= i40e_get_vsi_stats_struct(vsi
);
802 ons
= &vsi
->net_stats_offsets
;
803 es
= &vsi
->eth_stats
;
804 oes
= &vsi
->eth_stats_offsets
;
806 /* Gather up the netdev and vsi stats that the driver collects
807 * on the fly during packet processing
811 tx_restart
= tx_busy
= tx_linearize
= tx_force_wb
= 0;
815 for (q
= 0; q
< vsi
->num_queue_pairs
; q
++) {
817 p
= READ_ONCE(vsi
->tx_rings
[q
]);
820 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
821 packets
= p
->stats
.packets
;
822 bytes
= p
->stats
.bytes
;
823 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
826 tx_restart
+= p
->tx_stats
.restart_queue
;
827 tx_busy
+= p
->tx_stats
.tx_busy
;
828 tx_linearize
+= p
->tx_stats
.tx_linearize
;
829 tx_force_wb
+= p
->tx_stats
.tx_force_wb
;
831 /* Rx queue is part of the same block as Tx queue */
834 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
835 packets
= p
->stats
.packets
;
836 bytes
= p
->stats
.bytes
;
837 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
840 rx_buf
+= p
->rx_stats
.alloc_buff_failed
;
841 rx_page
+= p
->rx_stats
.alloc_page_failed
;
844 vsi
->tx_restart
= tx_restart
;
845 vsi
->tx_busy
= tx_busy
;
846 vsi
->tx_linearize
= tx_linearize
;
847 vsi
->tx_force_wb
= tx_force_wb
;
848 vsi
->rx_page_failed
= rx_page
;
849 vsi
->rx_buf_failed
= rx_buf
;
851 ns
->rx_packets
= rx_p
;
853 ns
->tx_packets
= tx_p
;
856 /* update netdev stats from eth stats */
857 i40e_update_eth_stats(vsi
);
858 ons
->tx_errors
= oes
->tx_errors
;
859 ns
->tx_errors
= es
->tx_errors
;
860 ons
->multicast
= oes
->rx_multicast
;
861 ns
->multicast
= es
->rx_multicast
;
862 ons
->rx_dropped
= oes
->rx_discards
;
863 ns
->rx_dropped
= es
->rx_discards
;
864 ons
->tx_dropped
= oes
->tx_discards
;
865 ns
->tx_dropped
= es
->tx_discards
;
867 /* pull in a couple PF stats if this is the main vsi */
868 if (vsi
== pf
->vsi
[pf
->lan_vsi
]) {
869 ns
->rx_crc_errors
= pf
->stats
.crc_errors
;
870 ns
->rx_errors
= pf
->stats
.crc_errors
+ pf
->stats
.illegal_bytes
;
871 ns
->rx_length_errors
= pf
->stats
.rx_length_errors
;
876 * i40e_update_pf_stats - Update the PF statistics counters.
877 * @pf: the PF to be updated
879 static void i40e_update_pf_stats(struct i40e_pf
*pf
)
881 struct i40e_hw_port_stats
*osd
= &pf
->stats_offsets
;
882 struct i40e_hw_port_stats
*nsd
= &pf
->stats
;
883 struct i40e_hw
*hw
= &pf
->hw
;
887 i40e_stat_update48(hw
, I40E_GLPRT_GORCH(hw
->port
),
888 I40E_GLPRT_GORCL(hw
->port
),
889 pf
->stat_offsets_loaded
,
890 &osd
->eth
.rx_bytes
, &nsd
->eth
.rx_bytes
);
891 i40e_stat_update48(hw
, I40E_GLPRT_GOTCH(hw
->port
),
892 I40E_GLPRT_GOTCL(hw
->port
),
893 pf
->stat_offsets_loaded
,
894 &osd
->eth
.tx_bytes
, &nsd
->eth
.tx_bytes
);
895 i40e_stat_update32(hw
, I40E_GLPRT_RDPC(hw
->port
),
896 pf
->stat_offsets_loaded
,
897 &osd
->eth
.rx_discards
,
898 &nsd
->eth
.rx_discards
);
899 i40e_stat_update48(hw
, I40E_GLPRT_UPRCH(hw
->port
),
900 I40E_GLPRT_UPRCL(hw
->port
),
901 pf
->stat_offsets_loaded
,
902 &osd
->eth
.rx_unicast
,
903 &nsd
->eth
.rx_unicast
);
904 i40e_stat_update48(hw
, I40E_GLPRT_MPRCH(hw
->port
),
905 I40E_GLPRT_MPRCL(hw
->port
),
906 pf
->stat_offsets_loaded
,
907 &osd
->eth
.rx_multicast
,
908 &nsd
->eth
.rx_multicast
);
909 i40e_stat_update48(hw
, I40E_GLPRT_BPRCH(hw
->port
),
910 I40E_GLPRT_BPRCL(hw
->port
),
911 pf
->stat_offsets_loaded
,
912 &osd
->eth
.rx_broadcast
,
913 &nsd
->eth
.rx_broadcast
);
914 i40e_stat_update48(hw
, I40E_GLPRT_UPTCH(hw
->port
),
915 I40E_GLPRT_UPTCL(hw
->port
),
916 pf
->stat_offsets_loaded
,
917 &osd
->eth
.tx_unicast
,
918 &nsd
->eth
.tx_unicast
);
919 i40e_stat_update48(hw
, I40E_GLPRT_MPTCH(hw
->port
),
920 I40E_GLPRT_MPTCL(hw
->port
),
921 pf
->stat_offsets_loaded
,
922 &osd
->eth
.tx_multicast
,
923 &nsd
->eth
.tx_multicast
);
924 i40e_stat_update48(hw
, I40E_GLPRT_BPTCH(hw
->port
),
925 I40E_GLPRT_BPTCL(hw
->port
),
926 pf
->stat_offsets_loaded
,
927 &osd
->eth
.tx_broadcast
,
928 &nsd
->eth
.tx_broadcast
);
930 i40e_stat_update32(hw
, I40E_GLPRT_TDOLD(hw
->port
),
931 pf
->stat_offsets_loaded
,
932 &osd
->tx_dropped_link_down
,
933 &nsd
->tx_dropped_link_down
);
935 i40e_stat_update32(hw
, I40E_GLPRT_CRCERRS(hw
->port
),
936 pf
->stat_offsets_loaded
,
937 &osd
->crc_errors
, &nsd
->crc_errors
);
939 i40e_stat_update32(hw
, I40E_GLPRT_ILLERRC(hw
->port
),
940 pf
->stat_offsets_loaded
,
941 &osd
->illegal_bytes
, &nsd
->illegal_bytes
);
943 i40e_stat_update32(hw
, I40E_GLPRT_MLFC(hw
->port
),
944 pf
->stat_offsets_loaded
,
945 &osd
->mac_local_faults
,
946 &nsd
->mac_local_faults
);
947 i40e_stat_update32(hw
, I40E_GLPRT_MRFC(hw
->port
),
948 pf
->stat_offsets_loaded
,
949 &osd
->mac_remote_faults
,
950 &nsd
->mac_remote_faults
);
952 i40e_stat_update32(hw
, I40E_GLPRT_RLEC(hw
->port
),
953 pf
->stat_offsets_loaded
,
954 &osd
->rx_length_errors
,
955 &nsd
->rx_length_errors
);
957 i40e_stat_update32(hw
, I40E_GLPRT_LXONRXC(hw
->port
),
958 pf
->stat_offsets_loaded
,
959 &osd
->link_xon_rx
, &nsd
->link_xon_rx
);
960 i40e_stat_update32(hw
, I40E_GLPRT_LXONTXC(hw
->port
),
961 pf
->stat_offsets_loaded
,
962 &osd
->link_xon_tx
, &nsd
->link_xon_tx
);
963 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFRXC(hw
->port
),
964 pf
->stat_offsets_loaded
,
965 &osd
->link_xoff_rx
, &nsd
->link_xoff_rx
);
966 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFTXC(hw
->port
),
967 pf
->stat_offsets_loaded
,
968 &osd
->link_xoff_tx
, &nsd
->link_xoff_tx
);
970 for (i
= 0; i
< 8; i
++) {
971 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFRXC(hw
->port
, i
),
972 pf
->stat_offsets_loaded
,
973 &osd
->priority_xoff_rx
[i
],
974 &nsd
->priority_xoff_rx
[i
]);
975 i40e_stat_update32(hw
, I40E_GLPRT_PXONRXC(hw
->port
, i
),
976 pf
->stat_offsets_loaded
,
977 &osd
->priority_xon_rx
[i
],
978 &nsd
->priority_xon_rx
[i
]);
979 i40e_stat_update32(hw
, I40E_GLPRT_PXONTXC(hw
->port
, i
),
980 pf
->stat_offsets_loaded
,
981 &osd
->priority_xon_tx
[i
],
982 &nsd
->priority_xon_tx
[i
]);
983 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFTXC(hw
->port
, i
),
984 pf
->stat_offsets_loaded
,
985 &osd
->priority_xoff_tx
[i
],
986 &nsd
->priority_xoff_tx
[i
]);
987 i40e_stat_update32(hw
,
988 I40E_GLPRT_RXON2OFFCNT(hw
->port
, i
),
989 pf
->stat_offsets_loaded
,
990 &osd
->priority_xon_2_xoff
[i
],
991 &nsd
->priority_xon_2_xoff
[i
]);
994 i40e_stat_update48(hw
, I40E_GLPRT_PRC64H(hw
->port
),
995 I40E_GLPRT_PRC64L(hw
->port
),
996 pf
->stat_offsets_loaded
,
997 &osd
->rx_size_64
, &nsd
->rx_size_64
);
998 i40e_stat_update48(hw
, I40E_GLPRT_PRC127H(hw
->port
),
999 I40E_GLPRT_PRC127L(hw
->port
),
1000 pf
->stat_offsets_loaded
,
1001 &osd
->rx_size_127
, &nsd
->rx_size_127
);
1002 i40e_stat_update48(hw
, I40E_GLPRT_PRC255H(hw
->port
),
1003 I40E_GLPRT_PRC255L(hw
->port
),
1004 pf
->stat_offsets_loaded
,
1005 &osd
->rx_size_255
, &nsd
->rx_size_255
);
1006 i40e_stat_update48(hw
, I40E_GLPRT_PRC511H(hw
->port
),
1007 I40E_GLPRT_PRC511L(hw
->port
),
1008 pf
->stat_offsets_loaded
,
1009 &osd
->rx_size_511
, &nsd
->rx_size_511
);
1010 i40e_stat_update48(hw
, I40E_GLPRT_PRC1023H(hw
->port
),
1011 I40E_GLPRT_PRC1023L(hw
->port
),
1012 pf
->stat_offsets_loaded
,
1013 &osd
->rx_size_1023
, &nsd
->rx_size_1023
);
1014 i40e_stat_update48(hw
, I40E_GLPRT_PRC1522H(hw
->port
),
1015 I40E_GLPRT_PRC1522L(hw
->port
),
1016 pf
->stat_offsets_loaded
,
1017 &osd
->rx_size_1522
, &nsd
->rx_size_1522
);
1018 i40e_stat_update48(hw
, I40E_GLPRT_PRC9522H(hw
->port
),
1019 I40E_GLPRT_PRC9522L(hw
->port
),
1020 pf
->stat_offsets_loaded
,
1021 &osd
->rx_size_big
, &nsd
->rx_size_big
);
1023 i40e_stat_update48(hw
, I40E_GLPRT_PTC64H(hw
->port
),
1024 I40E_GLPRT_PTC64L(hw
->port
),
1025 pf
->stat_offsets_loaded
,
1026 &osd
->tx_size_64
, &nsd
->tx_size_64
);
1027 i40e_stat_update48(hw
, I40E_GLPRT_PTC127H(hw
->port
),
1028 I40E_GLPRT_PTC127L(hw
->port
),
1029 pf
->stat_offsets_loaded
,
1030 &osd
->tx_size_127
, &nsd
->tx_size_127
);
1031 i40e_stat_update48(hw
, I40E_GLPRT_PTC255H(hw
->port
),
1032 I40E_GLPRT_PTC255L(hw
->port
),
1033 pf
->stat_offsets_loaded
,
1034 &osd
->tx_size_255
, &nsd
->tx_size_255
);
1035 i40e_stat_update48(hw
, I40E_GLPRT_PTC511H(hw
->port
),
1036 I40E_GLPRT_PTC511L(hw
->port
),
1037 pf
->stat_offsets_loaded
,
1038 &osd
->tx_size_511
, &nsd
->tx_size_511
);
1039 i40e_stat_update48(hw
, I40E_GLPRT_PTC1023H(hw
->port
),
1040 I40E_GLPRT_PTC1023L(hw
->port
),
1041 pf
->stat_offsets_loaded
,
1042 &osd
->tx_size_1023
, &nsd
->tx_size_1023
);
1043 i40e_stat_update48(hw
, I40E_GLPRT_PTC1522H(hw
->port
),
1044 I40E_GLPRT_PTC1522L(hw
->port
),
1045 pf
->stat_offsets_loaded
,
1046 &osd
->tx_size_1522
, &nsd
->tx_size_1522
);
1047 i40e_stat_update48(hw
, I40E_GLPRT_PTC9522H(hw
->port
),
1048 I40E_GLPRT_PTC9522L(hw
->port
),
1049 pf
->stat_offsets_loaded
,
1050 &osd
->tx_size_big
, &nsd
->tx_size_big
);
1052 i40e_stat_update32(hw
, I40E_GLPRT_RUC(hw
->port
),
1053 pf
->stat_offsets_loaded
,
1054 &osd
->rx_undersize
, &nsd
->rx_undersize
);
1055 i40e_stat_update32(hw
, I40E_GLPRT_RFC(hw
->port
),
1056 pf
->stat_offsets_loaded
,
1057 &osd
->rx_fragments
, &nsd
->rx_fragments
);
1058 i40e_stat_update32(hw
, I40E_GLPRT_ROC(hw
->port
),
1059 pf
->stat_offsets_loaded
,
1060 &osd
->rx_oversize
, &nsd
->rx_oversize
);
1061 i40e_stat_update32(hw
, I40E_GLPRT_RJC(hw
->port
),
1062 pf
->stat_offsets_loaded
,
1063 &osd
->rx_jabber
, &nsd
->rx_jabber
);
1066 i40e_stat_update_and_clear32(hw
,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw
->pf_id
)),
1068 &nsd
->fd_atr_match
);
1069 i40e_stat_update_and_clear32(hw
,
1070 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw
->pf_id
)),
1072 i40e_stat_update_and_clear32(hw
,
1073 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw
->pf_id
)),
1074 &nsd
->fd_atr_tunnel_match
);
1076 val
= rd32(hw
, I40E_PRTPM_EEE_STAT
);
1077 nsd
->tx_lpi_status
=
1078 (val
& I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK
) >>
1079 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT
;
1080 nsd
->rx_lpi_status
=
1081 (val
& I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK
) >>
1082 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT
;
1083 i40e_stat_update32(hw
, I40E_PRTPM_TLPIC
,
1084 pf
->stat_offsets_loaded
,
1085 &osd
->tx_lpi_count
, &nsd
->tx_lpi_count
);
1086 i40e_stat_update32(hw
, I40E_PRTPM_RLPIC
,
1087 pf
->stat_offsets_loaded
,
1088 &osd
->rx_lpi_count
, &nsd
->rx_lpi_count
);
1090 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
&&
1091 !(pf
->flags
& I40E_FLAG_FD_SB_AUTO_DISABLED
))
1092 nsd
->fd_sb_status
= true;
1094 nsd
->fd_sb_status
= false;
1096 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
&&
1097 !(pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
))
1098 nsd
->fd_atr_status
= true;
1100 nsd
->fd_atr_status
= false;
1102 pf
->stat_offsets_loaded
= true;
1106 * i40e_update_stats - Update the various statistics counters.
1107 * @vsi: the VSI to be updated
1109 * Update the various stats for this VSI and its related entities.
1111 void i40e_update_stats(struct i40e_vsi
*vsi
)
1113 struct i40e_pf
*pf
= vsi
->back
;
1115 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
1116 i40e_update_pf_stats(pf
);
1118 i40e_update_vsi_stats(vsi
);
1122 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1123 * @vsi: the VSI to be searched
1124 * @macaddr: the MAC address
1127 * Returns ptr to the filter object or NULL
1129 static struct i40e_mac_filter
*i40e_find_filter(struct i40e_vsi
*vsi
,
1130 const u8
*macaddr
, s16 vlan
)
1132 struct i40e_mac_filter
*f
;
1135 if (!vsi
|| !macaddr
)
1138 key
= i40e_addr_to_hkey(macaddr
);
1139 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1140 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1148 * i40e_find_mac - Find a mac addr in the macvlan filters list
1149 * @vsi: the VSI to be searched
1150 * @macaddr: the MAC address we are searching for
1152 * Returns the first filter with the provided MAC address or NULL if
1153 * MAC address was not found
1155 struct i40e_mac_filter
*i40e_find_mac(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1157 struct i40e_mac_filter
*f
;
1160 if (!vsi
|| !macaddr
)
1163 key
= i40e_addr_to_hkey(macaddr
);
1164 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1165 if ((ether_addr_equal(macaddr
, f
->macaddr
)))
1172 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1173 * @vsi: the VSI to be searched
1175 * Returns true if VSI is in vlan mode or false otherwise
1177 bool i40e_is_vsi_in_vlan(struct i40e_vsi
*vsi
)
1179 /* If we have a PVID, always operate in VLAN mode */
1183 /* We need to operate in VLAN mode whenever we have any filters with
1184 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1185 * time, incurring search cost repeatedly. However, we can notice two
1188 * 1) the only place where we can gain a VLAN filter is in
1191 * 2) the only place where filters are actually removed is in
1192 * i40e_sync_filters_subtask.
1194 * Thus, we can simply use a boolean value, has_vlan_filters which we
1195 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1196 * we have to perform the full search after deleting filters in
1197 * i40e_sync_filters_subtask, but we already have to search
1198 * filters here and can perform the check at the same time. This
1199 * results in avoiding embedding a loop for VLAN mode inside another
1200 * loop over all the filters, and should maintain correctness as noted
1203 return vsi
->has_vlan_filter
;
1207 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1208 * @vsi: the VSI to configure
1209 * @tmp_add_list: list of filters ready to be added
1210 * @tmp_del_list: list of filters ready to be deleted
1211 * @vlan_filters: the number of active VLAN filters
1213 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1214 * behave as expected. If we have any active VLAN filters remaining or about
1215 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1216 * so that they only match against untagged traffic. If we no longer have any
1217 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1218 * so that they match against both tagged and untagged traffic. In this way,
1219 * we ensure that we correctly receive the desired traffic. This ensures that
1220 * when we have an active VLAN we will receive only untagged traffic and
1221 * traffic matching active VLANs. If we have no active VLANs then we will
1222 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1224 * Finally, in a similar fashion, this function also corrects filters when
1225 * there is an active PVID assigned to this VSI.
1227 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1229 * This function is only expected to be called from within
1230 * i40e_sync_vsi_filters.
1232 * NOTE: This function expects to be called while under the
1233 * mac_filter_hash_lock
1235 static int i40e_correct_mac_vlan_filters(struct i40e_vsi
*vsi
,
1236 struct hlist_head
*tmp_add_list
,
1237 struct hlist_head
*tmp_del_list
,
1240 s16 pvid
= le16_to_cpu(vsi
->info
.pvid
);
1241 struct i40e_mac_filter
*f
, *add_head
;
1242 struct i40e_new_mac_filter
*new;
1243 struct hlist_node
*h
;
1246 /* To determine if a particular filter needs to be replaced we
1247 * have the three following conditions:
1249 * a) if we have a PVID assigned, then all filters which are
1250 * not marked as VLAN=PVID must be replaced with filters that
1252 * b) otherwise, if we have any active VLANS, all filters
1253 * which are marked as VLAN=-1 must be replaced with
1254 * filters marked as VLAN=0
1255 * c) finally, if we do not have any active VLANS, all filters
1256 * which are marked as VLAN=0 must be replaced with filters
1260 /* Update the filters about to be added in place */
1261 hlist_for_each_entry(new, tmp_add_list
, hlist
) {
1262 if (pvid
&& new->f
->vlan
!= pvid
)
1263 new->f
->vlan
= pvid
;
1264 else if (vlan_filters
&& new->f
->vlan
== I40E_VLAN_ANY
)
1266 else if (!vlan_filters
&& new->f
->vlan
== 0)
1267 new->f
->vlan
= I40E_VLAN_ANY
;
1270 /* Update the remaining active filters */
1271 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1272 /* Combine the checks for whether a filter needs to be changed
1273 * and then determine the new VLAN inside the if block, in
1274 * order to avoid duplicating code for adding the new filter
1275 * then deleting the old filter.
1277 if ((pvid
&& f
->vlan
!= pvid
) ||
1278 (vlan_filters
&& f
->vlan
== I40E_VLAN_ANY
) ||
1279 (!vlan_filters
&& f
->vlan
== 0)) {
1280 /* Determine the new vlan we will be adding */
1283 else if (vlan_filters
)
1286 new_vlan
= I40E_VLAN_ANY
;
1288 /* Create the new filter */
1289 add_head
= i40e_add_filter(vsi
, f
->macaddr
, new_vlan
);
1293 /* Create a temporary i40e_new_mac_filter */
1294 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
1299 new->state
= add_head
->state
;
1301 /* Add the new filter to the tmp list */
1302 hlist_add_head(&new->hlist
, tmp_add_list
);
1304 /* Put the original filter into the delete list */
1305 f
->state
= I40E_FILTER_REMOVE
;
1306 hash_del(&f
->hlist
);
1307 hlist_add_head(&f
->hlist
, tmp_del_list
);
1311 vsi
->has_vlan_filter
= !!vlan_filters
;
1317 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1318 * @vsi: the PF Main VSI - inappropriate for any other VSI
1319 * @macaddr: the MAC address
1321 * Remove whatever filter the firmware set up so the driver can manage
1322 * its own filtering intelligently.
1324 static void i40e_rm_default_mac_filter(struct i40e_vsi
*vsi
, u8
*macaddr
)
1326 struct i40e_aqc_remove_macvlan_element_data element
;
1327 struct i40e_pf
*pf
= vsi
->back
;
1329 /* Only appropriate for the PF main VSI */
1330 if (vsi
->type
!= I40E_VSI_MAIN
)
1333 memset(&element
, 0, sizeof(element
));
1334 ether_addr_copy(element
.mac_addr
, macaddr
);
1335 element
.vlan_tag
= 0;
1336 /* Ignore error returns, some firmware does it this way... */
1337 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
1338 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1340 memset(&element
, 0, sizeof(element
));
1341 ether_addr_copy(element
.mac_addr
, macaddr
);
1342 element
.vlan_tag
= 0;
1343 /* ...and some firmware does it this way. */
1344 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
|
1345 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
1346 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1350 * i40e_add_filter - Add a mac/vlan filter to the VSI
1351 * @vsi: the VSI to be searched
1352 * @macaddr: the MAC address
1355 * Returns ptr to the filter object or NULL when no memory available.
1357 * NOTE: This function is expected to be called with mac_filter_hash_lock
1360 struct i40e_mac_filter
*i40e_add_filter(struct i40e_vsi
*vsi
,
1361 const u8
*macaddr
, s16 vlan
)
1363 struct i40e_mac_filter
*f
;
1366 if (!vsi
|| !macaddr
)
1369 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1371 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
1375 /* Update the boolean indicating if we need to function in
1379 vsi
->has_vlan_filter
= true;
1381 ether_addr_copy(f
->macaddr
, macaddr
);
1383 /* If we're in overflow promisc mode, set the state directly
1384 * to failed, so we don't bother to try sending the filter
1387 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
))
1388 f
->state
= I40E_FILTER_FAILED
;
1390 f
->state
= I40E_FILTER_NEW
;
1391 INIT_HLIST_NODE(&f
->hlist
);
1393 key
= i40e_addr_to_hkey(macaddr
);
1394 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1396 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1397 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1400 /* If we're asked to add a filter that has been marked for removal, it
1401 * is safe to simply restore it to active state. __i40e_del_filter
1402 * will have simply deleted any filters which were previously marked
1403 * NEW or FAILED, so if it is currently marked REMOVE it must have
1404 * previously been ACTIVE. Since we haven't yet run the sync filters
1405 * task, just restore this filter to the ACTIVE state so that the
1406 * sync task leaves it in place
1408 if (f
->state
== I40E_FILTER_REMOVE
)
1409 f
->state
= I40E_FILTER_ACTIVE
;
1415 * __i40e_del_filter - Remove a specific filter from the VSI
1416 * @vsi: VSI to remove from
1417 * @f: the filter to remove from the list
1419 * This function should be called instead of i40e_del_filter only if you know
1420 * the exact filter you will remove already, such as via i40e_find_filter or
1423 * NOTE: This function is expected to be called with mac_filter_hash_lock
1425 * ANOTHER NOTE: This function MUST be called from within the context of
1426 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1427 * instead of list_for_each_entry().
1429 void __i40e_del_filter(struct i40e_vsi
*vsi
, struct i40e_mac_filter
*f
)
1434 /* If the filter was never added to firmware then we can just delete it
1435 * directly and we don't want to set the status to remove or else an
1436 * admin queue command will unnecessarily fire.
1438 if ((f
->state
== I40E_FILTER_FAILED
) ||
1439 (f
->state
== I40E_FILTER_NEW
)) {
1440 hash_del(&f
->hlist
);
1443 f
->state
= I40E_FILTER_REMOVE
;
1446 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1447 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1451 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1452 * @vsi: the VSI to be searched
1453 * @macaddr: the MAC address
1456 * NOTE: This function is expected to be called with mac_filter_hash_lock
1458 * ANOTHER NOTE: This function MUST be called from within the context of
1459 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1460 * instead of list_for_each_entry().
1462 void i40e_del_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
, s16 vlan
)
1464 struct i40e_mac_filter
*f
;
1466 if (!vsi
|| !macaddr
)
1469 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1470 __i40e_del_filter(vsi
, f
);
1474 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1475 * @vsi: the VSI to be searched
1476 * @macaddr: the mac address to be filtered
1478 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1479 * go through all the macvlan filters and add a macvlan filter for each
1480 * unique vlan that already exists. If a PVID has been assigned, instead only
1481 * add the macaddr to that VLAN.
1483 * Returns last filter added on success, else NULL
1485 struct i40e_mac_filter
*i40e_add_mac_filter(struct i40e_vsi
*vsi
,
1488 struct i40e_mac_filter
*f
, *add
= NULL
;
1489 struct hlist_node
*h
;
1493 return i40e_add_filter(vsi
, macaddr
,
1494 le16_to_cpu(vsi
->info
.pvid
));
1496 if (!i40e_is_vsi_in_vlan(vsi
))
1497 return i40e_add_filter(vsi
, macaddr
, I40E_VLAN_ANY
);
1499 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1500 if (f
->state
== I40E_FILTER_REMOVE
)
1502 add
= i40e_add_filter(vsi
, macaddr
, f
->vlan
);
1511 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1512 * @vsi: the VSI to be searched
1513 * @macaddr: the mac address to be removed
1515 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1518 * Returns 0 for success, or error
1520 int i40e_del_mac_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1522 struct i40e_mac_filter
*f
;
1523 struct hlist_node
*h
;
1527 WARN(!spin_is_locked(&vsi
->mac_filter_hash_lock
),
1528 "Missing mac_filter_hash_lock\n");
1529 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1530 if (ether_addr_equal(macaddr
, f
->macaddr
)) {
1531 __i40e_del_filter(vsi
, f
);
1543 * i40e_set_mac - NDO callback to set mac address
1544 * @netdev: network interface device structure
1545 * @p: pointer to an address structure
1547 * Returns 0 on success, negative on failure
1549 static int i40e_set_mac(struct net_device
*netdev
, void *p
)
1551 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1552 struct i40e_vsi
*vsi
= np
->vsi
;
1553 struct i40e_pf
*pf
= vsi
->back
;
1554 struct i40e_hw
*hw
= &pf
->hw
;
1555 struct sockaddr
*addr
= p
;
1557 if (!is_valid_ether_addr(addr
->sa_data
))
1558 return -EADDRNOTAVAIL
;
1560 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
)) {
1561 netdev_info(netdev
, "already using mac address %pM\n",
1566 if (test_bit(__I40E_VSI_DOWN
, vsi
->back
->state
) ||
1567 test_bit(__I40E_RESET_RECOVERY_PENDING
, vsi
->back
->state
))
1568 return -EADDRNOTAVAIL
;
1570 if (ether_addr_equal(hw
->mac
.addr
, addr
->sa_data
))
1571 netdev_info(netdev
, "returning to hw mac address %pM\n",
1574 netdev_info(netdev
, "set new mac address %pM\n", addr
->sa_data
);
1576 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1577 i40e_del_mac_filter(vsi
, netdev
->dev_addr
);
1578 i40e_add_mac_filter(vsi
, addr
->sa_data
);
1579 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1580 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1581 if (vsi
->type
== I40E_VSI_MAIN
) {
1584 ret
= i40e_aq_mac_address_write(&vsi
->back
->hw
,
1585 I40E_AQC_WRITE_TYPE_LAA_WOL
,
1586 addr
->sa_data
, NULL
);
1588 netdev_info(netdev
, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1589 i40e_stat_str(hw
, ret
),
1590 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1593 /* schedule our worker thread which will take care of
1594 * applying the new filter changes
1596 i40e_service_event_schedule(vsi
->back
);
1601 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1602 * @vsi: vsi structure
1603 * @seed: RSS hash seed
1605 static int i40e_config_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
1606 u8
*lut
, u16 lut_size
)
1608 struct i40e_pf
*pf
= vsi
->back
;
1609 struct i40e_hw
*hw
= &pf
->hw
;
1613 struct i40e_aqc_get_set_rss_key_data
*seed_dw
=
1614 (struct i40e_aqc_get_set_rss_key_data
*)seed
;
1615 ret
= i40e_aq_set_rss_key(hw
, vsi
->id
, seed_dw
);
1617 dev_info(&pf
->pdev
->dev
,
1618 "Cannot set RSS key, err %s aq_err %s\n",
1619 i40e_stat_str(hw
, ret
),
1620 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1625 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
1627 ret
= i40e_aq_set_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
1629 dev_info(&pf
->pdev
->dev
,
1630 "Cannot set RSS lut, err %s aq_err %s\n",
1631 i40e_stat_str(hw
, ret
),
1632 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1640 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1641 * @vsi: VSI structure
1643 static int i40e_vsi_config_rss(struct i40e_vsi
*vsi
)
1645 struct i40e_pf
*pf
= vsi
->back
;
1646 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
1650 if (!(pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
))
1653 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
1654 vsi
->num_queue_pairs
);
1657 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
1661 /* Use the user configured hash keys and lookup table if there is one,
1662 * otherwise use default
1664 if (vsi
->rss_lut_user
)
1665 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
1667 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
1668 if (vsi
->rss_hkey_user
)
1669 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
1671 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
1672 ret
= i40e_config_rss_aq(vsi
, seed
, lut
, vsi
->rss_table_size
);
1678 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1679 * @vsi: the VSI being configured,
1680 * @ctxt: VSI context structure
1681 * @enabled_tc: number of traffic classes to enable
1683 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1685 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi
*vsi
,
1686 struct i40e_vsi_context
*ctxt
,
1689 u16 qcount
= 0, max_qcount
, qmap
, sections
= 0;
1690 int i
, override_q
, pow
, num_qps
, ret
;
1691 u8 netdev_tc
= 0, offset
= 0;
1693 if (vsi
->type
!= I40E_VSI_MAIN
)
1695 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1696 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1697 vsi
->tc_config
.numtc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
1698 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1699 num_qps
= vsi
->mqprio_qopt
.qopt
.count
[0];
1701 /* find the next higher power-of-2 of num queue pairs */
1702 pow
= ilog2(num_qps
);
1703 if (!is_power_of_2(num_qps
))
1705 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1706 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1708 /* Setup queue offset/count for all TCs for given VSI */
1709 max_qcount
= vsi
->mqprio_qopt
.qopt
.count
[0];
1710 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1711 /* See if the given TC is enabled for the given VSI */
1712 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1713 offset
= vsi
->mqprio_qopt
.qopt
.offset
[i
];
1714 qcount
= vsi
->mqprio_qopt
.qopt
.count
[i
];
1715 if (qcount
> max_qcount
)
1716 max_qcount
= qcount
;
1717 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1718 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1719 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1721 /* TC is not enabled so set the offset to
1722 * default queue and allocate one queue
1725 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1726 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1727 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1731 /* Set actual Tx/Rx queue pairs */
1732 vsi
->num_queue_pairs
= offset
+ qcount
;
1734 /* Setup queue TC[0].qmap for given VSI context */
1735 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
1736 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1737 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1738 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1740 /* Reconfigure RSS for main VSI with max queue count */
1741 vsi
->rss_size
= max_qcount
;
1742 ret
= i40e_vsi_config_rss(vsi
);
1744 dev_info(&vsi
->back
->pdev
->dev
,
1745 "Failed to reconfig rss for num_queues (%u)\n",
1749 vsi
->reconfig_rss
= true;
1750 dev_dbg(&vsi
->back
->pdev
->dev
,
1751 "Reconfigured rss with num_queues (%u)\n", max_qcount
);
1753 /* Find queue count available for channel VSIs and starting offset
1756 override_q
= vsi
->mqprio_qopt
.qopt
.count
[0];
1757 if (override_q
&& override_q
< vsi
->num_queue_pairs
) {
1758 vsi
->cnt_q_avail
= vsi
->num_queue_pairs
- override_q
;
1759 vsi
->next_base_queue
= override_q
;
1765 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1766 * @vsi: the VSI being setup
1767 * @ctxt: VSI context structure
1768 * @enabled_tc: Enabled TCs bitmap
1769 * @is_add: True if called before Add VSI
1771 * Setup VSI queue mapping for enabled traffic classes.
1773 static void i40e_vsi_setup_queue_map(struct i40e_vsi
*vsi
,
1774 struct i40e_vsi_context
*ctxt
,
1778 struct i40e_pf
*pf
= vsi
->back
;
1788 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1791 if (enabled_tc
&& (vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
1792 /* Find numtc from enabled TC bitmap */
1793 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1794 if (enabled_tc
& BIT(i
)) /* TC is enabled */
1798 dev_warn(&pf
->pdev
->dev
, "DCB is enabled but no TC enabled, forcing TC0\n");
1802 /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
1806 vsi
->tc_config
.numtc
= numtc
;
1807 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1808 /* Number of queues per enabled TC */
1809 qcount
= vsi
->alloc_queue_pairs
;
1811 num_tc_qps
= qcount
/ numtc
;
1812 num_tc_qps
= min_t(int, num_tc_qps
, i40e_pf_get_max_q_per_tc(pf
));
1814 /* Setup queue offset/count for all TCs for given VSI */
1815 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1816 /* See if the given TC is enabled for the given VSI */
1817 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1821 switch (vsi
->type
) {
1823 qcount
= min_t(int, pf
->alloc_rss_size
,
1827 case I40E_VSI_SRIOV
:
1828 case I40E_VSI_VMDQ2
:
1830 qcount
= num_tc_qps
;
1834 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1835 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1837 /* find the next higher power-of-2 of num queue pairs */
1840 while (num_qps
&& (BIT_ULL(pow
) < qcount
)) {
1845 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1847 (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1848 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1852 /* TC is not enabled so set the offset to
1853 * default queue and allocate one queue
1856 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1857 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1858 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1862 ctxt
->info
.tc_mapping
[i
] = cpu_to_le16(qmap
);
1865 /* Set actual Tx/Rx queue pairs */
1866 vsi
->num_queue_pairs
= offset
;
1867 if ((vsi
->type
== I40E_VSI_MAIN
) && (numtc
== 1)) {
1868 if (vsi
->req_queue_pairs
> 0)
1869 vsi
->num_queue_pairs
= vsi
->req_queue_pairs
;
1870 else if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1871 vsi
->num_queue_pairs
= pf
->num_lan_msix
;
1874 /* Scheduler section valid can only be set for ADD VSI */
1876 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1878 ctxt
->info
.up_enable_bits
= enabled_tc
;
1880 if (vsi
->type
== I40E_VSI_SRIOV
) {
1881 ctxt
->info
.mapping_flags
|=
1882 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG
);
1883 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
1884 ctxt
->info
.queue_mapping
[i
] =
1885 cpu_to_le16(vsi
->base_queue
+ i
);
1887 ctxt
->info
.mapping_flags
|=
1888 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1889 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1891 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1895 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1896 * @netdev: the netdevice
1897 * @addr: address to add
1899 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1900 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1902 static int i40e_addr_sync(struct net_device
*netdev
, const u8
*addr
)
1904 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1905 struct i40e_vsi
*vsi
= np
->vsi
;
1907 if (i40e_add_mac_filter(vsi
, addr
))
1914 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1915 * @netdev: the netdevice
1916 * @addr: address to add
1918 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1919 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1921 static int i40e_addr_unsync(struct net_device
*netdev
, const u8
*addr
)
1923 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1924 struct i40e_vsi
*vsi
= np
->vsi
;
1926 i40e_del_mac_filter(vsi
, addr
);
1932 * i40e_set_rx_mode - NDO callback to set the netdev filters
1933 * @netdev: network interface device structure
1935 static void i40e_set_rx_mode(struct net_device
*netdev
)
1937 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1938 struct i40e_vsi
*vsi
= np
->vsi
;
1940 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1942 __dev_uc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1943 __dev_mc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1945 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1947 /* check for other flag changes */
1948 if (vsi
->current_netdev_flags
!= vsi
->netdev
->flags
) {
1949 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1950 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1955 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1956 * @vsi: Pointer to VSI struct
1957 * @from: Pointer to list which contains MAC filter entries - changes to
1958 * those entries needs to be undone.
1960 * MAC filter entries from this list were slated for deletion.
1962 static void i40e_undo_del_filter_entries(struct i40e_vsi
*vsi
,
1963 struct hlist_head
*from
)
1965 struct i40e_mac_filter
*f
;
1966 struct hlist_node
*h
;
1968 hlist_for_each_entry_safe(f
, h
, from
, hlist
) {
1969 u64 key
= i40e_addr_to_hkey(f
->macaddr
);
1971 /* Move the element back into MAC filter list*/
1972 hlist_del(&f
->hlist
);
1973 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1978 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1979 * @vsi: Pointer to vsi struct
1980 * @from: Pointer to list which contains MAC filter entries - changes to
1981 * those entries needs to be undone.
1983 * MAC filter entries from this list were slated for addition.
1985 static void i40e_undo_add_filter_entries(struct i40e_vsi
*vsi
,
1986 struct hlist_head
*from
)
1988 struct i40e_new_mac_filter
*new;
1989 struct hlist_node
*h
;
1991 hlist_for_each_entry_safe(new, h
, from
, hlist
) {
1992 /* We can simply free the wrapper structure */
1993 hlist_del(&new->hlist
);
1999 * i40e_next_entry - Get the next non-broadcast filter from a list
2000 * @next: pointer to filter in list
2002 * Returns the next non-broadcast filter in the list. Required so that we
2003 * ignore broadcast filters within the list, since these are not handled via
2004 * the normal firmware update path.
2007 struct i40e_new_mac_filter
*i40e_next_filter(struct i40e_new_mac_filter
*next
)
2009 hlist_for_each_entry_continue(next
, hlist
) {
2010 if (!is_broadcast_ether_addr(next
->f
->macaddr
))
2018 * i40e_update_filter_state - Update filter state based on return data
2020 * @count: Number of filters added
2021 * @add_list: return data from fw
2022 * @head: pointer to first filter in current batch
2024 * MAC filter entries from list were slated to be added to device. Returns
2025 * number of successful filters. Note that 0 does NOT mean success!
2028 i40e_update_filter_state(int count
,
2029 struct i40e_aqc_add_macvlan_element_data
*add_list
,
2030 struct i40e_new_mac_filter
*add_head
)
2035 for (i
= 0; i
< count
; i
++) {
2036 /* Always check status of each filter. We don't need to check
2037 * the firmware return status because we pre-set the filter
2038 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2039 * request to the adminq. Thus, if it no longer matches then
2040 * we know the filter is active.
2042 if (add_list
[i
].match_method
== I40E_AQC_MM_ERR_NO_RES
) {
2043 add_head
->state
= I40E_FILTER_FAILED
;
2045 add_head
->state
= I40E_FILTER_ACTIVE
;
2049 add_head
= i40e_next_filter(add_head
);
2058 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2059 * @vsi: ptr to the VSI
2060 * @vsi_name: name to display in messages
2061 * @list: the list of filters to send to firmware
2062 * @num_del: the number of filters to delete
2063 * @retval: Set to -EIO on failure to delete
2065 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2066 * *retval instead of a return value so that success does not force ret_val to
2067 * be set to 0. This ensures that a sequence of calls to this function
2068 * preserve the previous value of *retval on successful delete.
2071 void i40e_aqc_del_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2072 struct i40e_aqc_remove_macvlan_element_data
*list
,
2073 int num_del
, int *retval
)
2075 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2079 aq_ret
= i40e_aq_remove_macvlan(hw
, vsi
->seid
, list
, num_del
, NULL
);
2080 aq_err
= hw
->aq
.asq_last_status
;
2082 /* Explicitly ignore and do not report when firmware returns ENOENT */
2083 if (aq_ret
&& !(aq_err
== I40E_AQ_RC_ENOENT
)) {
2085 dev_info(&vsi
->back
->pdev
->dev
,
2086 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2087 vsi_name
, i40e_stat_str(hw
, aq_ret
),
2088 i40e_aq_str(hw
, aq_err
));
2093 * i40e_aqc_add_filters - Request firmware to add a set of filters
2094 * @vsi: ptr to the VSI
2095 * @vsi_name: name to display in messages
2096 * @list: the list of filters to send to firmware
2097 * @add_head: Position in the add hlist
2098 * @num_add: the number of filters to add
2099 * @promisc_change: set to true on exit if promiscuous mode was forced on
2101 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2102 * promisc_changed to true if the firmware has run out of space for more
2106 void i40e_aqc_add_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2107 struct i40e_aqc_add_macvlan_element_data
*list
,
2108 struct i40e_new_mac_filter
*add_head
,
2109 int num_add
, bool *promisc_changed
)
2111 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2114 i40e_aq_add_macvlan(hw
, vsi
->seid
, list
, num_add
, NULL
);
2115 aq_err
= hw
->aq
.asq_last_status
;
2116 fcnt
= i40e_update_filter_state(num_add
, list
, add_head
);
2118 if (fcnt
!= num_add
) {
2119 *promisc_changed
= true;
2120 set_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2121 dev_warn(&vsi
->back
->pdev
->dev
,
2122 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2123 i40e_aq_str(hw
, aq_err
),
2129 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2130 * @vsi: pointer to the VSI
2133 * This function sets or clears the promiscuous broadcast flags for VLAN
2134 * filters in order to properly receive broadcast frames. Assumes that only
2135 * broadcast filters are passed.
2137 * Returns status indicating success or failure;
2140 i40e_aqc_broadcast_filter(struct i40e_vsi
*vsi
, const char *vsi_name
,
2141 struct i40e_mac_filter
*f
)
2143 bool enable
= f
->state
== I40E_FILTER_NEW
;
2144 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2147 if (f
->vlan
== I40E_VLAN_ANY
) {
2148 aq_ret
= i40e_aq_set_vsi_broadcast(hw
,
2153 aq_ret
= i40e_aq_set_vsi_bc_promisc_on_vlan(hw
,
2161 dev_warn(&vsi
->back
->pdev
->dev
,
2162 "Error %s setting broadcast promiscuous mode on %s\n",
2163 i40e_aq_str(hw
, hw
->aq
.asq_last_status
),
2170 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2171 * @vsi: ptr to the VSI
2173 * Push any outstanding VSI filter changes through the AdminQ.
2175 * Returns 0 or error value
2177 int i40e_sync_vsi_filters(struct i40e_vsi
*vsi
)
2179 struct hlist_head tmp_add_list
, tmp_del_list
;
2180 struct i40e_mac_filter
*f
;
2181 struct i40e_new_mac_filter
*new, *add_head
= NULL
;
2182 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2183 unsigned int failed_filters
= 0;
2184 unsigned int vlan_filters
= 0;
2185 bool promisc_changed
= false;
2186 char vsi_name
[16] = "PF";
2187 int filter_list_len
= 0;
2188 i40e_status aq_ret
= 0;
2189 u32 changed_flags
= 0;
2190 struct hlist_node
*h
;
2199 /* empty array typed pointers, kcalloc later */
2200 struct i40e_aqc_add_macvlan_element_data
*add_list
;
2201 struct i40e_aqc_remove_macvlan_element_data
*del_list
;
2203 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
))
2204 usleep_range(1000, 2000);
2208 changed_flags
= vsi
->current_netdev_flags
^ vsi
->netdev
->flags
;
2209 vsi
->current_netdev_flags
= vsi
->netdev
->flags
;
2212 INIT_HLIST_HEAD(&tmp_add_list
);
2213 INIT_HLIST_HEAD(&tmp_del_list
);
2215 if (vsi
->type
== I40E_VSI_SRIOV
)
2216 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "VF %d", vsi
->vf_id
);
2217 else if (vsi
->type
!= I40E_VSI_MAIN
)
2218 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "vsi %d", vsi
->seid
);
2220 if (vsi
->flags
& I40E_VSI_FLAG_FILTER_CHANGED
) {
2221 vsi
->flags
&= ~I40E_VSI_FLAG_FILTER_CHANGED
;
2223 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2224 /* Create a list of filters to delete. */
2225 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2226 if (f
->state
== I40E_FILTER_REMOVE
) {
2227 /* Move the element into temporary del_list */
2228 hash_del(&f
->hlist
);
2229 hlist_add_head(&f
->hlist
, &tmp_del_list
);
2231 /* Avoid counting removed filters */
2234 if (f
->state
== I40E_FILTER_NEW
) {
2235 /* Create a temporary i40e_new_mac_filter */
2236 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
2238 goto err_no_memory_locked
;
2240 /* Store pointer to the real filter */
2242 new->state
= f
->state
;
2244 /* Add it to the hash list */
2245 hlist_add_head(&new->hlist
, &tmp_add_list
);
2248 /* Count the number of active (current and new) VLAN
2249 * filters we have now. Does not count filters which
2250 * are marked for deletion.
2256 retval
= i40e_correct_mac_vlan_filters(vsi
,
2261 goto err_no_memory_locked
;
2263 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2266 /* Now process 'del_list' outside the lock */
2267 if (!hlist_empty(&tmp_del_list
)) {
2268 filter_list_len
= hw
->aq
.asq_buf_size
/
2269 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2270 list_size
= filter_list_len
*
2271 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2272 del_list
= kzalloc(list_size
, GFP_ATOMIC
);
2276 hlist_for_each_entry_safe(f
, h
, &tmp_del_list
, hlist
) {
2279 /* handle broadcast filters by updating the broadcast
2280 * promiscuous flag and release filter list.
2282 if (is_broadcast_ether_addr(f
->macaddr
)) {
2283 i40e_aqc_broadcast_filter(vsi
, vsi_name
, f
);
2285 hlist_del(&f
->hlist
);
2290 /* add to delete list */
2291 ether_addr_copy(del_list
[num_del
].mac_addr
, f
->macaddr
);
2292 if (f
->vlan
== I40E_VLAN_ANY
) {
2293 del_list
[num_del
].vlan_tag
= 0;
2294 cmd_flags
|= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
2296 del_list
[num_del
].vlan_tag
=
2297 cpu_to_le16((u16
)(f
->vlan
));
2300 cmd_flags
|= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
2301 del_list
[num_del
].flags
= cmd_flags
;
2304 /* flush a full buffer */
2305 if (num_del
== filter_list_len
) {
2306 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2308 memset(del_list
, 0, list_size
);
2311 /* Release memory for MAC filter entries which were
2312 * synced up with HW.
2314 hlist_del(&f
->hlist
);
2319 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2327 if (!hlist_empty(&tmp_add_list
)) {
2328 /* Do all the adds now. */
2329 filter_list_len
= hw
->aq
.asq_buf_size
/
2330 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2331 list_size
= filter_list_len
*
2332 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2333 add_list
= kzalloc(list_size
, GFP_ATOMIC
);
2338 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2339 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
,
2341 new->state
= I40E_FILTER_FAILED
;
2345 /* handle broadcast filters by updating the broadcast
2346 * promiscuous flag instead of adding a MAC filter.
2348 if (is_broadcast_ether_addr(new->f
->macaddr
)) {
2349 if (i40e_aqc_broadcast_filter(vsi
, vsi_name
,
2351 new->state
= I40E_FILTER_FAILED
;
2353 new->state
= I40E_FILTER_ACTIVE
;
2357 /* add to add array */
2361 ether_addr_copy(add_list
[num_add
].mac_addr
,
2363 if (new->f
->vlan
== I40E_VLAN_ANY
) {
2364 add_list
[num_add
].vlan_tag
= 0;
2365 cmd_flags
|= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN
;
2367 add_list
[num_add
].vlan_tag
=
2368 cpu_to_le16((u16
)(new->f
->vlan
));
2370 add_list
[num_add
].queue_number
= 0;
2371 /* set invalid match method for later detection */
2372 add_list
[num_add
].match_method
= I40E_AQC_MM_ERR_NO_RES
;
2373 cmd_flags
|= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH
;
2374 add_list
[num_add
].flags
= cpu_to_le16(cmd_flags
);
2377 /* flush a full buffer */
2378 if (num_add
== filter_list_len
) {
2379 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
,
2382 memset(add_list
, 0, list_size
);
2387 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
, add_head
,
2388 num_add
, &promisc_changed
);
2390 /* Now move all of the filters from the temp add list back to
2393 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2394 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2395 /* Only update the state if we're still NEW */
2396 if (new->f
->state
== I40E_FILTER_NEW
)
2397 new->f
->state
= new->state
;
2398 hlist_del(&new->hlist
);
2401 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2406 /* Determine the number of active and failed filters. */
2407 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2408 vsi
->active_filters
= 0;
2409 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
2410 if (f
->state
== I40E_FILTER_ACTIVE
)
2411 vsi
->active_filters
++;
2412 else if (f
->state
== I40E_FILTER_FAILED
)
2415 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2417 /* If promiscuous mode has changed, we need to calculate a new
2418 * threshold for when we are safe to exit
2420 if (promisc_changed
)
2421 vsi
->promisc_threshold
= (vsi
->active_filters
* 3) / 4;
2423 /* Check if we are able to exit overflow promiscuous mode. We can
2424 * safely exit if we didn't just enter, we no longer have any failed
2425 * filters, and we have reduced filters below the threshold value.
2427 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
) &&
2428 !promisc_changed
&& !failed_filters
&&
2429 (vsi
->active_filters
< vsi
->promisc_threshold
)) {
2430 dev_info(&pf
->pdev
->dev
,
2431 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2433 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2434 promisc_changed
= true;
2435 vsi
->promisc_threshold
= 0;
2438 /* if the VF is not trusted do not do promisc */
2439 if ((vsi
->type
== I40E_VSI_SRIOV
) && !pf
->vf
[vsi
->vf_id
].trusted
) {
2440 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2444 /* check for changes in promiscuous modes */
2445 if (changed_flags
& IFF_ALLMULTI
) {
2446 bool cur_multipromisc
;
2448 cur_multipromisc
= !!(vsi
->current_netdev_flags
& IFF_ALLMULTI
);
2449 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(&vsi
->back
->hw
,
2454 retval
= i40e_aq_rc_to_posix(aq_ret
,
2455 hw
->aq
.asq_last_status
);
2456 dev_info(&pf
->pdev
->dev
,
2457 "set multi promisc failed on %s, err %s aq_err %s\n",
2459 i40e_stat_str(hw
, aq_ret
),
2460 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2464 if ((changed_flags
& IFF_PROMISC
) || promisc_changed
) {
2467 cur_promisc
= (!!(vsi
->current_netdev_flags
& IFF_PROMISC
) ||
2468 test_bit(__I40E_VSI_OVERFLOW_PROMISC
,
2470 if ((vsi
->type
== I40E_VSI_MAIN
) &&
2471 (pf
->lan_veb
!= I40E_NO_VEB
) &&
2472 !(pf
->flags
& I40E_FLAG_MFP_ENABLED
)) {
2473 /* set defport ON for Main VSI instead of true promisc
2474 * this way we will get all unicast/multicast and VLAN
2475 * promisc behavior but will not get VF or VMDq traffic
2476 * replicated on the Main VSI.
2478 if (pf
->cur_promisc
!= cur_promisc
) {
2479 pf
->cur_promisc
= cur_promisc
;
2482 i40e_aq_set_default_vsi(hw
,
2487 i40e_aq_clear_default_vsi(hw
,
2491 retval
= i40e_aq_rc_to_posix(aq_ret
,
2492 hw
->aq
.asq_last_status
);
2493 dev_info(&pf
->pdev
->dev
,
2494 "Set default VSI failed on %s, err %s, aq_err %s\n",
2496 i40e_stat_str(hw
, aq_ret
),
2498 hw
->aq
.asq_last_status
));
2502 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(
2509 i40e_aq_rc_to_posix(aq_ret
,
2510 hw
->aq
.asq_last_status
);
2511 dev_info(&pf
->pdev
->dev
,
2512 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2514 i40e_stat_str(hw
, aq_ret
),
2516 hw
->aq
.asq_last_status
));
2518 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(
2524 i40e_aq_rc_to_posix(aq_ret
,
2525 hw
->aq
.asq_last_status
);
2526 dev_info(&pf
->pdev
->dev
,
2527 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2529 i40e_stat_str(hw
, aq_ret
),
2531 hw
->aq
.asq_last_status
));
2534 aq_ret
= i40e_aq_set_vsi_broadcast(&vsi
->back
->hw
,
2538 retval
= i40e_aq_rc_to_posix(aq_ret
,
2539 pf
->hw
.aq
.asq_last_status
);
2540 dev_info(&pf
->pdev
->dev
,
2541 "set brdcast promisc failed, err %s, aq_err %s\n",
2542 i40e_stat_str(hw
, aq_ret
),
2544 hw
->aq
.asq_last_status
));
2548 /* if something went wrong then set the changed flag so we try again */
2550 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2552 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2556 /* Restore elements on the temporary add and delete lists */
2557 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2558 err_no_memory_locked
:
2559 i40e_undo_del_filter_entries(vsi
, &tmp_del_list
);
2560 i40e_undo_add_filter_entries(vsi
, &tmp_add_list
);
2561 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2563 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2564 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2569 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2570 * @pf: board private structure
2572 static void i40e_sync_filters_subtask(struct i40e_pf
*pf
)
2576 if (!pf
|| !(pf
->flags
& I40E_FLAG_FILTER_SYNC
))
2578 pf
->flags
&= ~I40E_FLAG_FILTER_SYNC
;
2580 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
2582 (pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_FILTER_CHANGED
)) {
2583 int ret
= i40e_sync_vsi_filters(pf
->vsi
[v
]);
2586 /* come back and try again later */
2587 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
2595 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2598 static int i40e_max_xdp_frame_size(struct i40e_vsi
*vsi
)
2600 if (PAGE_SIZE
>= 8192 || (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
2601 return I40E_RXBUFFER_2048
;
2603 return I40E_RXBUFFER_3072
;
2607 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2608 * @netdev: network interface device structure
2609 * @new_mtu: new value for maximum frame size
2611 * Returns 0 on success, negative on failure
2613 static int i40e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2615 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2616 struct i40e_vsi
*vsi
= np
->vsi
;
2617 struct i40e_pf
*pf
= vsi
->back
;
2619 if (i40e_enabled_xdp_vsi(vsi
)) {
2620 int frame_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
2622 if (frame_size
> i40e_max_xdp_frame_size(vsi
))
2626 netdev_info(netdev
, "changing MTU from %d to %d\n",
2627 netdev
->mtu
, new_mtu
);
2628 netdev
->mtu
= new_mtu
;
2629 if (netif_running(netdev
))
2630 i40e_vsi_reinit_locked(vsi
);
2631 pf
->flags
|= (I40E_FLAG_SERVICE_CLIENT_REQUESTED
|
2632 I40E_FLAG_CLIENT_L2_CHANGE
);
2637 * i40e_ioctl - Access the hwtstamp interface
2638 * @netdev: network interface device structure
2639 * @ifr: interface request data
2640 * @cmd: ioctl command
2642 int i40e_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2644 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2645 struct i40e_pf
*pf
= np
->vsi
->back
;
2649 return i40e_ptp_get_ts_config(pf
, ifr
);
2651 return i40e_ptp_set_ts_config(pf
, ifr
);
2658 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2659 * @vsi: the vsi being adjusted
2661 void i40e_vlan_stripping_enable(struct i40e_vsi
*vsi
)
2663 struct i40e_vsi_context ctxt
;
2666 if ((vsi
->info
.valid_sections
&
2667 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2668 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_MODE_MASK
) == 0))
2669 return; /* already enabled */
2671 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2672 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2673 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH
;
2675 ctxt
.seid
= vsi
->seid
;
2676 ctxt
.info
= vsi
->info
;
2677 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2679 dev_info(&vsi
->back
->pdev
->dev
,
2680 "update vlan stripping failed, err %s aq_err %s\n",
2681 i40e_stat_str(&vsi
->back
->hw
, ret
),
2682 i40e_aq_str(&vsi
->back
->hw
,
2683 vsi
->back
->hw
.aq
.asq_last_status
));
2688 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2689 * @vsi: the vsi being adjusted
2691 void i40e_vlan_stripping_disable(struct i40e_vsi
*vsi
)
2693 struct i40e_vsi_context ctxt
;
2696 if ((vsi
->info
.valid_sections
&
2697 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2698 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_EMOD_MASK
) ==
2699 I40E_AQ_VSI_PVLAN_EMOD_MASK
))
2700 return; /* already disabled */
2702 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2703 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2704 I40E_AQ_VSI_PVLAN_EMOD_NOTHING
;
2706 ctxt
.seid
= vsi
->seid
;
2707 ctxt
.info
= vsi
->info
;
2708 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2710 dev_info(&vsi
->back
->pdev
->dev
,
2711 "update vlan stripping failed, err %s aq_err %s\n",
2712 i40e_stat_str(&vsi
->back
->hw
, ret
),
2713 i40e_aq_str(&vsi
->back
->hw
,
2714 vsi
->back
->hw
.aq
.asq_last_status
));
2719 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2720 * @netdev: network interface to be adjusted
2721 * @features: netdev features to test if VLAN offload is enabled or not
2723 static void i40e_vlan_rx_register(struct net_device
*netdev
, u32 features
)
2725 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2726 struct i40e_vsi
*vsi
= np
->vsi
;
2728 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2729 i40e_vlan_stripping_enable(vsi
);
2731 i40e_vlan_stripping_disable(vsi
);
2735 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2736 * @vsi: the vsi being configured
2737 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2739 * This is a helper function for adding a new MAC/VLAN filter with the
2740 * specified VLAN for each existing MAC address already in the hash table.
2741 * This function does *not* perform any accounting to update filters based on
2744 * NOTE: this function expects to be called while under the
2745 * mac_filter_hash_lock
2747 int i40e_add_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2749 struct i40e_mac_filter
*f
, *add_f
;
2750 struct hlist_node
*h
;
2753 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2754 if (f
->state
== I40E_FILTER_REMOVE
)
2756 add_f
= i40e_add_filter(vsi
, f
->macaddr
, vid
);
2758 dev_info(&vsi
->back
->pdev
->dev
,
2759 "Could not add vlan filter %d for %pM\n",
2769 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2770 * @vsi: the VSI being configured
2771 * @vid: VLAN id to be added
2773 int i40e_vsi_add_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2780 /* The network stack will attempt to add VID=0, with the intention to
2781 * receive priority tagged packets with a VLAN of 0. Our HW receives
2782 * these packets by default when configured to receive untagged
2783 * packets, so we don't need to add a filter for this case.
2784 * Additionally, HW interprets adding a VID=0 filter as meaning to
2785 * receive *only* tagged traffic and stops receiving untagged traffic.
2786 * Thus, we do not want to actually add a filter for VID=0
2791 /* Locked once because all functions invoked below iterates list*/
2792 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2793 err
= i40e_add_vlan_all_mac(vsi
, vid
);
2794 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2798 /* schedule our worker thread which will take care of
2799 * applying the new filter changes
2801 i40e_service_event_schedule(vsi
->back
);
2806 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2807 * @vsi: the vsi being configured
2808 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2810 * This function should be used to remove all VLAN filters which match the
2811 * given VID. It does not schedule the service event and does not take the
2812 * mac_filter_hash_lock so it may be combined with other operations under
2813 * a single invocation of the mac_filter_hash_lock.
2815 * NOTE: this function expects to be called while under the
2816 * mac_filter_hash_lock
2818 void i40e_rm_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2820 struct i40e_mac_filter
*f
;
2821 struct hlist_node
*h
;
2824 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2826 __i40e_del_filter(vsi
, f
);
2831 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2832 * @vsi: the VSI being configured
2833 * @vid: VLAN id to be removed
2835 void i40e_vsi_kill_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2837 if (!vid
|| vsi
->info
.pvid
)
2840 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2841 i40e_rm_vlan_all_mac(vsi
, vid
);
2842 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2844 /* schedule our worker thread which will take care of
2845 * applying the new filter changes
2847 i40e_service_event_schedule(vsi
->back
);
2851 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2852 * @netdev: network interface to be adjusted
2853 * @vid: vlan id to be added
2855 * net_device_ops implementation for adding vlan ids
2857 static int i40e_vlan_rx_add_vid(struct net_device
*netdev
,
2858 __always_unused __be16 proto
, u16 vid
)
2860 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2861 struct i40e_vsi
*vsi
= np
->vsi
;
2864 if (vid
>= VLAN_N_VID
)
2867 ret
= i40e_vsi_add_vlan(vsi
, vid
);
2869 set_bit(vid
, vsi
->active_vlans
);
2875 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2876 * @netdev: network interface to be adjusted
2877 * @vid: vlan id to be removed
2879 * net_device_ops implementation for removing vlan ids
2881 static int i40e_vlan_rx_kill_vid(struct net_device
*netdev
,
2882 __always_unused __be16 proto
, u16 vid
)
2884 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2885 struct i40e_vsi
*vsi
= np
->vsi
;
2887 /* return code is ignored as there is nothing a user
2888 * can do about failure to remove and a log message was
2889 * already printed from the other function
2891 i40e_vsi_kill_vlan(vsi
, vid
);
2893 clear_bit(vid
, vsi
->active_vlans
);
2899 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2900 * @vsi: the vsi being brought back up
2902 static void i40e_restore_vlan(struct i40e_vsi
*vsi
)
2909 i40e_vlan_rx_register(vsi
->netdev
, vsi
->netdev
->features
);
2911 for_each_set_bit(vid
, vsi
->active_vlans
, VLAN_N_VID
)
2912 i40e_vlan_rx_add_vid(vsi
->netdev
, htons(ETH_P_8021Q
),
2917 * i40e_vsi_add_pvid - Add pvid for the VSI
2918 * @vsi: the vsi being adjusted
2919 * @vid: the vlan id to set as a PVID
2921 int i40e_vsi_add_pvid(struct i40e_vsi
*vsi
, u16 vid
)
2923 struct i40e_vsi_context ctxt
;
2926 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2927 vsi
->info
.pvid
= cpu_to_le16(vid
);
2928 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_TAGGED
|
2929 I40E_AQ_VSI_PVLAN_INSERT_PVID
|
2930 I40E_AQ_VSI_PVLAN_EMOD_STR
;
2932 ctxt
.seid
= vsi
->seid
;
2933 ctxt
.info
= vsi
->info
;
2934 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2936 dev_info(&vsi
->back
->pdev
->dev
,
2937 "add pvid failed, err %s aq_err %s\n",
2938 i40e_stat_str(&vsi
->back
->hw
, ret
),
2939 i40e_aq_str(&vsi
->back
->hw
,
2940 vsi
->back
->hw
.aq
.asq_last_status
));
2948 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2949 * @vsi: the vsi being adjusted
2951 * Just use the vlan_rx_register() service to put it back to normal
2953 void i40e_vsi_remove_pvid(struct i40e_vsi
*vsi
)
2955 i40e_vlan_stripping_disable(vsi
);
2961 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2962 * @vsi: ptr to the VSI
2964 * If this function returns with an error, then it's possible one or
2965 * more of the rings is populated (while the rest are not). It is the
2966 * callers duty to clean those orphaned rings.
2968 * Return 0 on success, negative on failure
2970 static int i40e_vsi_setup_tx_resources(struct i40e_vsi
*vsi
)
2974 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2975 err
= i40e_setup_tx_descriptors(vsi
->tx_rings
[i
]);
2977 if (!i40e_enabled_xdp_vsi(vsi
))
2980 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2981 err
= i40e_setup_tx_descriptors(vsi
->xdp_rings
[i
]);
2987 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2988 * @vsi: ptr to the VSI
2990 * Free VSI's transmit software resources
2992 static void i40e_vsi_free_tx_resources(struct i40e_vsi
*vsi
)
2996 if (vsi
->tx_rings
) {
2997 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
2998 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
)
2999 i40e_free_tx_resources(vsi
->tx_rings
[i
]);
3002 if (vsi
->xdp_rings
) {
3003 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3004 if (vsi
->xdp_rings
[i
] && vsi
->xdp_rings
[i
]->desc
)
3005 i40e_free_tx_resources(vsi
->xdp_rings
[i
]);
3010 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3011 * @vsi: ptr to the VSI
3013 * If this function returns with an error, then it's possible one or
3014 * more of the rings is populated (while the rest are not). It is the
3015 * callers duty to clean those orphaned rings.
3017 * Return 0 on success, negative on failure
3019 static int i40e_vsi_setup_rx_resources(struct i40e_vsi
*vsi
)
3023 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3024 err
= i40e_setup_rx_descriptors(vsi
->rx_rings
[i
]);
3029 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3030 * @vsi: ptr to the VSI
3032 * Free all receive software resources
3034 static void i40e_vsi_free_rx_resources(struct i40e_vsi
*vsi
)
3041 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3042 if (vsi
->rx_rings
[i
] && vsi
->rx_rings
[i
]->desc
)
3043 i40e_free_rx_resources(vsi
->rx_rings
[i
]);
3047 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3048 * @ring: The Tx ring to configure
3050 * This enables/disables XPS for a given Tx descriptor ring
3051 * based on the TCs enabled for the VSI that ring belongs to.
3053 static void i40e_config_xps_tx_ring(struct i40e_ring
*ring
)
3057 if (!ring
->q_vector
|| !ring
->netdev
|| ring
->ch
)
3060 /* We only initialize XPS once, so as not to overwrite user settings */
3061 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE
, ring
->state
))
3064 cpu
= cpumask_local_spread(ring
->q_vector
->v_idx
, -1);
3065 netif_set_xps_queue(ring
->netdev
, get_cpu_mask(cpu
),
3070 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3071 * @ring: The Tx ring to configure
3073 * Configure the Tx descriptor ring in the HMC context.
3075 static int i40e_configure_tx_ring(struct i40e_ring
*ring
)
3077 struct i40e_vsi
*vsi
= ring
->vsi
;
3078 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3079 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3080 struct i40e_hmc_obj_txq tx_ctx
;
3081 i40e_status err
= 0;
3084 /* some ATR related tx ring init */
3085 if (vsi
->back
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
3086 ring
->atr_sample_rate
= vsi
->back
->atr_sample_rate
;
3087 ring
->atr_count
= 0;
3089 ring
->atr_sample_rate
= 0;
3093 i40e_config_xps_tx_ring(ring
);
3095 /* clear the context structure first */
3096 memset(&tx_ctx
, 0, sizeof(tx_ctx
));
3098 tx_ctx
.new_context
= 1;
3099 tx_ctx
.base
= (ring
->dma
/ 128);
3100 tx_ctx
.qlen
= ring
->count
;
3101 tx_ctx
.fd_ena
= !!(vsi
->back
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
3102 I40E_FLAG_FD_ATR_ENABLED
));
3103 tx_ctx
.timesync_ena
= !!(vsi
->back
->flags
& I40E_FLAG_PTP
);
3104 /* FDIR VSI tx ring can still use RS bit and writebacks */
3105 if (vsi
->type
!= I40E_VSI_FDIR
)
3106 tx_ctx
.head_wb_ena
= 1;
3107 tx_ctx
.head_wb_addr
= ring
->dma
+
3108 (ring
->count
* sizeof(struct i40e_tx_desc
));
3110 /* As part of VSI creation/update, FW allocates certain
3111 * Tx arbitration queue sets for each TC enabled for
3112 * the VSI. The FW returns the handles to these queue
3113 * sets as part of the response buffer to Add VSI,
3114 * Update VSI, etc. AQ commands. It is expected that
3115 * these queue set handles be associated with the Tx
3116 * queues by the driver as part of the TX queue context
3117 * initialization. This has to be done regardless of
3118 * DCB as by default everything is mapped to TC0.
3123 le16_to_cpu(ring
->ch
->info
.qs_handle
[ring
->dcb_tc
]);
3126 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[ring
->dcb_tc
]);
3128 tx_ctx
.rdylist_act
= 0;
3130 /* clear the context in the HMC */
3131 err
= i40e_clear_lan_tx_queue_context(hw
, pf_q
);
3133 dev_info(&vsi
->back
->pdev
->dev
,
3134 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3135 ring
->queue_index
, pf_q
, err
);
3139 /* set the context in the HMC */
3140 err
= i40e_set_lan_tx_queue_context(hw
, pf_q
, &tx_ctx
);
3142 dev_info(&vsi
->back
->pdev
->dev
,
3143 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3144 ring
->queue_index
, pf_q
, err
);
3148 /* Now associate this queue with this PCI function */
3150 if (ring
->ch
->type
== I40E_VSI_VMDQ2
)
3151 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3155 qtx_ctl
|= (ring
->ch
->vsi_number
<<
3156 I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3157 I40E_QTX_CTL_VFVM_INDX_MASK
;
3159 if (vsi
->type
== I40E_VSI_VMDQ2
) {
3160 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3161 qtx_ctl
|= ((vsi
->id
) << I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3162 I40E_QTX_CTL_VFVM_INDX_MASK
;
3164 qtx_ctl
= I40E_QTX_CTL_PF_QUEUE
;
3168 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
) &
3169 I40E_QTX_CTL_PF_INDX_MASK
);
3170 wr32(hw
, I40E_QTX_CTL(pf_q
), qtx_ctl
);
3173 /* cache tail off for easier writes later */
3174 ring
->tail
= hw
->hw_addr
+ I40E_QTX_TAIL(pf_q
);
3180 * i40e_configure_rx_ring - Configure a receive ring context
3181 * @ring: The Rx ring to configure
3183 * Configure the Rx descriptor ring in the HMC context.
3185 static int i40e_configure_rx_ring(struct i40e_ring
*ring
)
3187 struct i40e_vsi
*vsi
= ring
->vsi
;
3188 u32 chain_len
= vsi
->back
->hw
.func_caps
.rx_buf_chain_len
;
3189 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3190 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3191 struct i40e_hmc_obj_rxq rx_ctx
;
3192 i40e_status err
= 0;
3194 bitmap_zero(ring
->state
, __I40E_RING_STATE_NBITS
);
3196 /* clear the context structure first */
3197 memset(&rx_ctx
, 0, sizeof(rx_ctx
));
3199 ring
->rx_buf_len
= vsi
->rx_buf_len
;
3201 rx_ctx
.dbuff
= DIV_ROUND_UP(ring
->rx_buf_len
,
3202 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT
));
3204 rx_ctx
.base
= (ring
->dma
/ 128);
3205 rx_ctx
.qlen
= ring
->count
;
3207 /* use 32 byte descriptors */
3210 /* descriptor type is always zero
3213 rx_ctx
.hsplit_0
= 0;
3215 rx_ctx
.rxmax
= min_t(u16
, vsi
->max_frame
, chain_len
* ring
->rx_buf_len
);
3216 if (hw
->revision_id
== 0)
3217 rx_ctx
.lrxqthresh
= 0;
3219 rx_ctx
.lrxqthresh
= 1;
3220 rx_ctx
.crcstrip
= 1;
3222 /* this controls whether VLAN is stripped from inner headers */
3224 /* set the prefena field to 1 because the manual says to */
3227 /* clear the context in the HMC */
3228 err
= i40e_clear_lan_rx_queue_context(hw
, pf_q
);
3230 dev_info(&vsi
->back
->pdev
->dev
,
3231 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3232 ring
->queue_index
, pf_q
, err
);
3236 /* set the context in the HMC */
3237 err
= i40e_set_lan_rx_queue_context(hw
, pf_q
, &rx_ctx
);
3239 dev_info(&vsi
->back
->pdev
->dev
,
3240 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3241 ring
->queue_index
, pf_q
, err
);
3245 /* configure Rx buffer alignment */
3246 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
3247 clear_ring_build_skb_enabled(ring
);
3249 set_ring_build_skb_enabled(ring
);
3251 /* cache tail for quicker writes, and clear the reg before use */
3252 ring
->tail
= hw
->hw_addr
+ I40E_QRX_TAIL(pf_q
);
3253 writel(0, ring
->tail
);
3255 i40e_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
3261 * i40e_vsi_configure_tx - Configure the VSI for Tx
3262 * @vsi: VSI structure describing this set of rings and resources
3264 * Configure the Tx VSI for operation.
3266 static int i40e_vsi_configure_tx(struct i40e_vsi
*vsi
)
3271 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3272 err
= i40e_configure_tx_ring(vsi
->tx_rings
[i
]);
3274 if (!i40e_enabled_xdp_vsi(vsi
))
3277 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3278 err
= i40e_configure_tx_ring(vsi
->xdp_rings
[i
]);
3284 * i40e_vsi_configure_rx - Configure the VSI for Rx
3285 * @vsi: the VSI being configured
3287 * Configure the Rx VSI for operation.
3289 static int i40e_vsi_configure_rx(struct i40e_vsi
*vsi
)
3294 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
)) {
3295 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3296 vsi
->rx_buf_len
= I40E_RXBUFFER_2048
;
3297 #if (PAGE_SIZE < 8192)
3298 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING
&&
3299 (vsi
->netdev
->mtu
<= ETH_DATA_LEN
)) {
3300 vsi
->max_frame
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3301 vsi
->rx_buf_len
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3304 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3305 vsi
->rx_buf_len
= (PAGE_SIZE
< 8192) ? I40E_RXBUFFER_3072
:
3309 /* set up individual rings */
3310 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3311 err
= i40e_configure_rx_ring(vsi
->rx_rings
[i
]);
3317 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3318 * @vsi: ptr to the VSI
3320 static void i40e_vsi_config_dcb_rings(struct i40e_vsi
*vsi
)
3322 struct i40e_ring
*tx_ring
, *rx_ring
;
3323 u16 qoffset
, qcount
;
3326 if (!(vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
3327 /* Reset the TC information */
3328 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3329 rx_ring
= vsi
->rx_rings
[i
];
3330 tx_ring
= vsi
->tx_rings
[i
];
3331 rx_ring
->dcb_tc
= 0;
3332 tx_ring
->dcb_tc
= 0;
3337 for (n
= 0; n
< I40E_MAX_TRAFFIC_CLASS
; n
++) {
3338 if (!(vsi
->tc_config
.enabled_tc
& BIT_ULL(n
)))
3341 qoffset
= vsi
->tc_config
.tc_info
[n
].qoffset
;
3342 qcount
= vsi
->tc_config
.tc_info
[n
].qcount
;
3343 for (i
= qoffset
; i
< (qoffset
+ qcount
); i
++) {
3344 rx_ring
= vsi
->rx_rings
[i
];
3345 tx_ring
= vsi
->tx_rings
[i
];
3346 rx_ring
->dcb_tc
= n
;
3347 tx_ring
->dcb_tc
= n
;
3353 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3354 * @vsi: ptr to the VSI
3356 static void i40e_set_vsi_rx_mode(struct i40e_vsi
*vsi
)
3359 i40e_set_rx_mode(vsi
->netdev
);
3363 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3364 * @vsi: Pointer to the targeted VSI
3366 * This function replays the hlist on the hw where all the SB Flow Director
3367 * filters were saved.
3369 static void i40e_fdir_filter_restore(struct i40e_vsi
*vsi
)
3371 struct i40e_fdir_filter
*filter
;
3372 struct i40e_pf
*pf
= vsi
->back
;
3373 struct hlist_node
*node
;
3375 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
3378 /* Reset FDir counters as we're replaying all existing filters */
3379 pf
->fd_tcp4_filter_cnt
= 0;
3380 pf
->fd_udp4_filter_cnt
= 0;
3381 pf
->fd_sctp4_filter_cnt
= 0;
3382 pf
->fd_ip4_filter_cnt
= 0;
3384 hlist_for_each_entry_safe(filter
, node
,
3385 &pf
->fdir_filter_list
, fdir_node
) {
3386 i40e_add_del_fdir(vsi
, filter
, true);
3391 * i40e_vsi_configure - Set up the VSI for action
3392 * @vsi: the VSI being configured
3394 static int i40e_vsi_configure(struct i40e_vsi
*vsi
)
3398 i40e_set_vsi_rx_mode(vsi
);
3399 i40e_restore_vlan(vsi
);
3400 i40e_vsi_config_dcb_rings(vsi
);
3401 err
= i40e_vsi_configure_tx(vsi
);
3403 err
= i40e_vsi_configure_rx(vsi
);
3409 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3410 * @vsi: the VSI being configured
3412 static void i40e_vsi_configure_msix(struct i40e_vsi
*vsi
)
3414 bool has_xdp
= i40e_enabled_xdp_vsi(vsi
);
3415 struct i40e_pf
*pf
= vsi
->back
;
3416 struct i40e_hw
*hw
= &pf
->hw
;
3421 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3422 * and PFINT_LNKLSTn registers, e.g.:
3423 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3425 qp
= vsi
->base_queue
;
3426 vector
= vsi
->base_vector
;
3427 for (i
= 0; i
< vsi
->num_q_vectors
; i
++, vector
++) {
3428 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[i
];
3430 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3431 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[i
]->rx_itr_setting
);
3432 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3433 wr32(hw
, I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1),
3435 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[i
]->tx_itr_setting
);
3436 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3437 wr32(hw
, I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1),
3439 wr32(hw
, I40E_PFINT_RATEN(vector
- 1),
3440 i40e_intrl_usec_to_reg(vsi
->int_rate_limit
));
3442 /* Linked list for the queuepairs assigned to this vector */
3443 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), qp
);
3444 for (q
= 0; q
< q_vector
->num_ringpairs
; q
++) {
3445 u32 nextqp
= has_xdp
? qp
+ vsi
->alloc_queue_pairs
: qp
;
3448 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3449 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3450 (vector
<< I40E_QINT_RQCTL_MSIX_INDX_SHIFT
) |
3451 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
3452 (I40E_QUEUE_TYPE_TX
<<
3453 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
);
3455 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
3458 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3459 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3460 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3461 (qp
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3462 (I40E_QUEUE_TYPE_TX
<<
3463 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3465 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3468 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3469 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3470 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3471 ((qp
+ 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3472 (I40E_QUEUE_TYPE_RX
<<
3473 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3475 /* Terminate the linked list */
3476 if (q
== (q_vector
->num_ringpairs
- 1))
3477 val
|= (I40E_QUEUE_END_OF_LIST
<<
3478 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3480 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
3489 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3490 * @hw: ptr to the hardware info
3492 static void i40e_enable_misc_int_causes(struct i40e_pf
*pf
)
3494 struct i40e_hw
*hw
= &pf
->hw
;
3497 /* clear things first */
3498 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0); /* disable all */
3499 rd32(hw
, I40E_PFINT_ICR0
); /* read to clear */
3501 val
= I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
|
3502 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
|
3503 I40E_PFINT_ICR0_ENA_GRST_MASK
|
3504 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
|
3505 I40E_PFINT_ICR0_ENA_GPIO_MASK
|
3506 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
|
3507 I40E_PFINT_ICR0_ENA_VFLR_MASK
|
3508 I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3510 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
)
3511 val
|= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3513 if (pf
->flags
& I40E_FLAG_PTP
)
3514 val
|= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3516 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
3518 /* SW_ITR_IDX = 0, but don't change INTENA */
3519 wr32(hw
, I40E_PFINT_DYN_CTL0
, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK
|
3520 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK
);
3522 /* OTHER_ITR_IDX = 0 */
3523 wr32(hw
, I40E_PFINT_STAT_CTL0
, 0);
3527 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3528 * @vsi: the VSI being configured
3530 static void i40e_configure_msi_and_legacy(struct i40e_vsi
*vsi
)
3532 u32 nextqp
= i40e_enabled_xdp_vsi(vsi
) ? vsi
->alloc_queue_pairs
: 0;
3533 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3534 struct i40e_pf
*pf
= vsi
->back
;
3535 struct i40e_hw
*hw
= &pf
->hw
;
3538 /* set the ITR configuration */
3539 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3540 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[0]->rx_itr_setting
);
3541 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3542 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), q_vector
->rx
.itr
);
3543 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[0]->tx_itr_setting
);
3544 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3545 wr32(hw
, I40E_PFINT_ITR0(I40E_TX_ITR
), q_vector
->tx
.itr
);
3547 i40e_enable_misc_int_causes(pf
);
3549 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3550 wr32(hw
, I40E_PFINT_LNKLST0
, 0);
3552 /* Associate the queue pair to the vector and enable the queue int */
3553 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3554 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3555 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
)|
3556 (I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3558 wr32(hw
, I40E_QINT_RQCTL(0), val
);
3560 if (i40e_enabled_xdp_vsi(vsi
)) {
3561 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3562 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
)|
3564 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3566 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3569 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3570 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3571 (I40E_QUEUE_END_OF_LIST
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3573 wr32(hw
, I40E_QINT_TQCTL(0), val
);
3578 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3579 * @pf: board private structure
3581 void i40e_irq_dynamic_disable_icr0(struct i40e_pf
*pf
)
3583 struct i40e_hw
*hw
= &pf
->hw
;
3585 wr32(hw
, I40E_PFINT_DYN_CTL0
,
3586 I40E_ITR_NONE
<< I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT
);
3591 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3592 * @pf: board private structure
3594 void i40e_irq_dynamic_enable_icr0(struct i40e_pf
*pf
)
3596 struct i40e_hw
*hw
= &pf
->hw
;
3599 val
= I40E_PFINT_DYN_CTL0_INTENA_MASK
|
3600 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK
|
3601 (I40E_ITR_NONE
<< I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT
);
3603 wr32(hw
, I40E_PFINT_DYN_CTL0
, val
);
3608 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3609 * @irq: interrupt number
3610 * @data: pointer to a q_vector
3612 static irqreturn_t
i40e_msix_clean_rings(int irq
, void *data
)
3614 struct i40e_q_vector
*q_vector
= data
;
3616 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
3619 napi_schedule_irqoff(&q_vector
->napi
);
3625 * i40e_irq_affinity_notify - Callback for affinity changes
3626 * @notify: context as to what irq was changed
3627 * @mask: the new affinity mask
3629 * This is a callback function used by the irq_set_affinity_notifier function
3630 * so that we may register to receive changes to the irq affinity masks.
3632 static void i40e_irq_affinity_notify(struct irq_affinity_notify
*notify
,
3633 const cpumask_t
*mask
)
3635 struct i40e_q_vector
*q_vector
=
3636 container_of(notify
, struct i40e_q_vector
, affinity_notify
);
3638 cpumask_copy(&q_vector
->affinity_mask
, mask
);
3642 * i40e_irq_affinity_release - Callback for affinity notifier release
3643 * @ref: internal core kernel usage
3645 * This is a callback function used by the irq_set_affinity_notifier function
3646 * to inform the current notification subscriber that they will no longer
3647 * receive notifications.
3649 static void i40e_irq_affinity_release(struct kref
*ref
) {}
3652 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3653 * @vsi: the VSI being configured
3654 * @basename: name for the vector
3656 * Allocates MSI-X vectors and requests interrupts from the kernel.
3658 static int i40e_vsi_request_irq_msix(struct i40e_vsi
*vsi
, char *basename
)
3660 int q_vectors
= vsi
->num_q_vectors
;
3661 struct i40e_pf
*pf
= vsi
->back
;
3662 int base
= vsi
->base_vector
;
3669 for (vector
= 0; vector
< q_vectors
; vector
++) {
3670 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[vector
];
3672 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3674 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
3675 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3676 "%s-%s-%d", basename
, "TxRx", rx_int_idx
++);
3678 } else if (q_vector
->rx
.ring
) {
3679 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3680 "%s-%s-%d", basename
, "rx", rx_int_idx
++);
3681 } else if (q_vector
->tx
.ring
) {
3682 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3683 "%s-%s-%d", basename
, "tx", tx_int_idx
++);
3685 /* skip this unused q_vector */
3688 err
= request_irq(irq_num
,
3694 dev_info(&pf
->pdev
->dev
,
3695 "MSIX request_irq failed, error: %d\n", err
);
3696 goto free_queue_irqs
;
3699 /* register for affinity change notifications */
3700 q_vector
->affinity_notify
.notify
= i40e_irq_affinity_notify
;
3701 q_vector
->affinity_notify
.release
= i40e_irq_affinity_release
;
3702 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
3703 /* Spread affinity hints out across online CPUs.
3705 * get_cpu_mask returns a static constant mask with
3706 * a permanent lifetime so it's ok to pass to
3707 * irq_set_affinity_hint without making a copy.
3709 cpu
= cpumask_local_spread(q_vector
->v_idx
, -1);
3710 irq_set_affinity_hint(irq_num
, get_cpu_mask(cpu
));
3713 vsi
->irqs_ready
= true;
3719 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3720 irq_set_affinity_notifier(irq_num
, NULL
);
3721 irq_set_affinity_hint(irq_num
, NULL
);
3722 free_irq(irq_num
, &vsi
->q_vectors
[vector
]);
3728 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3729 * @vsi: the VSI being un-configured
3731 static void i40e_vsi_disable_irq(struct i40e_vsi
*vsi
)
3733 struct i40e_pf
*pf
= vsi
->back
;
3734 struct i40e_hw
*hw
= &pf
->hw
;
3735 int base
= vsi
->base_vector
;
3738 /* disable interrupt causation from each queue */
3739 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3742 val
= rd32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
));
3743 val
&= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
3744 wr32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
), val
);
3746 val
= rd32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
));
3747 val
&= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
3748 wr32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
), val
);
3750 if (!i40e_enabled_xdp_vsi(vsi
))
3752 wr32(hw
, I40E_QINT_TQCTL(vsi
->xdp_rings
[i
]->reg_idx
), 0);
3755 /* disable each interrupt */
3756 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3757 for (i
= vsi
->base_vector
;
3758 i
< (vsi
->num_q_vectors
+ vsi
->base_vector
); i
++)
3759 wr32(hw
, I40E_PFINT_DYN_CTLN(i
- 1), 0);
3762 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3763 synchronize_irq(pf
->msix_entries
[i
+ base
].vector
);
3765 /* Legacy and MSI mode - this stops all interrupt handling */
3766 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0);
3767 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
3769 synchronize_irq(pf
->pdev
->irq
);
3774 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3775 * @vsi: the VSI being configured
3777 static int i40e_vsi_enable_irq(struct i40e_vsi
*vsi
)
3779 struct i40e_pf
*pf
= vsi
->back
;
3782 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3783 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3784 i40e_irq_dynamic_enable(vsi
, i
);
3786 i40e_irq_dynamic_enable_icr0(pf
);
3789 i40e_flush(&pf
->hw
);
3794 * i40e_free_misc_vector - Free the vector that handles non-queue events
3795 * @pf: board private structure
3797 static void i40e_free_misc_vector(struct i40e_pf
*pf
)
3800 wr32(&pf
->hw
, I40E_PFINT_ICR0_ENA
, 0);
3801 i40e_flush(&pf
->hw
);
3803 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
&& pf
->msix_entries
) {
3804 synchronize_irq(pf
->msix_entries
[0].vector
);
3805 free_irq(pf
->msix_entries
[0].vector
, pf
);
3806 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
3811 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3812 * @irq: interrupt number
3813 * @data: pointer to a q_vector
3815 * This is the handler used for all MSI/Legacy interrupts, and deals
3816 * with both queue and non-queue interrupts. This is also used in
3817 * MSIX mode to handle the non-queue interrupts.
3819 static irqreturn_t
i40e_intr(int irq
, void *data
)
3821 struct i40e_pf
*pf
= (struct i40e_pf
*)data
;
3822 struct i40e_hw
*hw
= &pf
->hw
;
3823 irqreturn_t ret
= IRQ_NONE
;
3824 u32 icr0
, icr0_remaining
;
3827 icr0
= rd32(hw
, I40E_PFINT_ICR0
);
3828 ena_mask
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
3830 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3831 if ((icr0
& I40E_PFINT_ICR0_INTEVENT_MASK
) == 0)
3834 /* if interrupt but no bits showing, must be SWINT */
3835 if (((icr0
& ~I40E_PFINT_ICR0_INTEVENT_MASK
) == 0) ||
3836 (icr0
& I40E_PFINT_ICR0_SWINT_MASK
))
3839 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
3840 (icr0
& I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
)) {
3841 ena_mask
&= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3842 dev_dbg(&pf
->pdev
->dev
, "cleared PE_CRITERR\n");
3843 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
3846 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3847 if (icr0
& I40E_PFINT_ICR0_QUEUE_0_MASK
) {
3848 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
3849 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3851 /* We do not have a way to disarm Queue causes while leaving
3852 * interrupt enabled for all other causes, ideally
3853 * interrupt should be disabled while we are in NAPI but
3854 * this is not a performance path and napi_schedule()
3855 * can deal with rescheduling.
3857 if (!test_bit(__I40E_DOWN
, pf
->state
))
3858 napi_schedule_irqoff(&q_vector
->napi
);
3861 if (icr0
& I40E_PFINT_ICR0_ADMINQ_MASK
) {
3862 ena_mask
&= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3863 set_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
3864 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
, "AdminQ event\n");
3867 if (icr0
& I40E_PFINT_ICR0_MAL_DETECT_MASK
) {
3868 ena_mask
&= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
3869 set_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
3872 if (icr0
& I40E_PFINT_ICR0_VFLR_MASK
) {
3873 ena_mask
&= ~I40E_PFINT_ICR0_ENA_VFLR_MASK
;
3874 set_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
);
3877 if (icr0
& I40E_PFINT_ICR0_GRST_MASK
) {
3878 if (!test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
3879 set_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
3880 ena_mask
&= ~I40E_PFINT_ICR0_ENA_GRST_MASK
;
3881 val
= rd32(hw
, I40E_GLGEN_RSTAT
);
3882 val
= (val
& I40E_GLGEN_RSTAT_RESET_TYPE_MASK
)
3883 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT
;
3884 if (val
== I40E_RESET_CORER
) {
3886 } else if (val
== I40E_RESET_GLOBR
) {
3888 } else if (val
== I40E_RESET_EMPR
) {
3890 set_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
);
3894 if (icr0
& I40E_PFINT_ICR0_HMC_ERR_MASK
) {
3895 icr0
&= ~I40E_PFINT_ICR0_HMC_ERR_MASK
;
3896 dev_info(&pf
->pdev
->dev
, "HMC error interrupt\n");
3897 dev_info(&pf
->pdev
->dev
, "HMC error info 0x%x, HMC error data 0x%x\n",
3898 rd32(hw
, I40E_PFHMC_ERRORINFO
),
3899 rd32(hw
, I40E_PFHMC_ERRORDATA
));
3902 if (icr0
& I40E_PFINT_ICR0_TIMESYNC_MASK
) {
3903 u32 prttsyn_stat
= rd32(hw
, I40E_PRTTSYN_STAT_0
);
3905 if (prttsyn_stat
& I40E_PRTTSYN_STAT_0_TXTIME_MASK
) {
3906 icr0
&= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3907 i40e_ptp_tx_hwtstamp(pf
);
3911 /* If a critical error is pending we have no choice but to reset the
3913 * Report and mask out any remaining unexpected interrupts.
3915 icr0_remaining
= icr0
& ena_mask
;
3916 if (icr0_remaining
) {
3917 dev_info(&pf
->pdev
->dev
, "unhandled interrupt icr0=0x%08x\n",
3919 if ((icr0_remaining
& I40E_PFINT_ICR0_PE_CRITERR_MASK
) ||
3920 (icr0_remaining
& I40E_PFINT_ICR0_PCI_EXCEPTION_MASK
) ||
3921 (icr0_remaining
& I40E_PFINT_ICR0_ECC_ERR_MASK
)) {
3922 dev_info(&pf
->pdev
->dev
, "device will be reset\n");
3923 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
3924 i40e_service_event_schedule(pf
);
3926 ena_mask
&= ~icr0_remaining
;
3931 /* re-enable interrupt causes */
3932 wr32(hw
, I40E_PFINT_ICR0_ENA
, ena_mask
);
3933 if (!test_bit(__I40E_DOWN
, pf
->state
)) {
3934 i40e_service_event_schedule(pf
);
3935 i40e_irq_dynamic_enable_icr0(pf
);
3942 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3943 * @tx_ring: tx ring to clean
3944 * @budget: how many cleans we're allowed
3946 * Returns true if there's any budget left (e.g. the clean is finished)
3948 static bool i40e_clean_fdir_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
3950 struct i40e_vsi
*vsi
= tx_ring
->vsi
;
3951 u16 i
= tx_ring
->next_to_clean
;
3952 struct i40e_tx_buffer
*tx_buf
;
3953 struct i40e_tx_desc
*tx_desc
;
3955 tx_buf
= &tx_ring
->tx_bi
[i
];
3956 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
3957 i
-= tx_ring
->count
;
3960 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
3962 /* if next_to_watch is not set then there is no work pending */
3966 /* prevent any other reads prior to eop_desc */
3967 read_barrier_depends();
3969 /* if the descriptor isn't done, no work yet to do */
3970 if (!(eop_desc
->cmd_type_offset_bsz
&
3971 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
3974 /* clear next_to_watch to prevent false hangs */
3975 tx_buf
->next_to_watch
= NULL
;
3977 tx_desc
->buffer_addr
= 0;
3978 tx_desc
->cmd_type_offset_bsz
= 0;
3979 /* move past filter desc */
3984 i
-= tx_ring
->count
;
3985 tx_buf
= tx_ring
->tx_bi
;
3986 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
3988 /* unmap skb header data */
3989 dma_unmap_single(tx_ring
->dev
,
3990 dma_unmap_addr(tx_buf
, dma
),
3991 dma_unmap_len(tx_buf
, len
),
3993 if (tx_buf
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
3994 kfree(tx_buf
->raw_buf
);
3996 tx_buf
->raw_buf
= NULL
;
3997 tx_buf
->tx_flags
= 0;
3998 tx_buf
->next_to_watch
= NULL
;
3999 dma_unmap_len_set(tx_buf
, len
, 0);
4000 tx_desc
->buffer_addr
= 0;
4001 tx_desc
->cmd_type_offset_bsz
= 0;
4003 /* move us past the eop_desc for start of next FD desc */
4008 i
-= tx_ring
->count
;
4009 tx_buf
= tx_ring
->tx_bi
;
4010 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
4013 /* update budget accounting */
4015 } while (likely(budget
));
4017 i
+= tx_ring
->count
;
4018 tx_ring
->next_to_clean
= i
;
4020 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
)
4021 i40e_irq_dynamic_enable(vsi
, tx_ring
->q_vector
->v_idx
);
4027 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4028 * @irq: interrupt number
4029 * @data: pointer to a q_vector
4031 static irqreturn_t
i40e_fdir_clean_ring(int irq
, void *data
)
4033 struct i40e_q_vector
*q_vector
= data
;
4034 struct i40e_vsi
*vsi
;
4036 if (!q_vector
->tx
.ring
)
4039 vsi
= q_vector
->tx
.ring
->vsi
;
4040 i40e_clean_fdir_tx_irq(q_vector
->tx
.ring
, vsi
->work_limit
);
4046 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4047 * @vsi: the VSI being configured
4048 * @v_idx: vector index
4049 * @qp_idx: queue pair index
4051 static void i40e_map_vector_to_qp(struct i40e_vsi
*vsi
, int v_idx
, int qp_idx
)
4053 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4054 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[qp_idx
];
4055 struct i40e_ring
*rx_ring
= vsi
->rx_rings
[qp_idx
];
4057 tx_ring
->q_vector
= q_vector
;
4058 tx_ring
->next
= q_vector
->tx
.ring
;
4059 q_vector
->tx
.ring
= tx_ring
;
4060 q_vector
->tx
.count
++;
4062 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4063 if (i40e_enabled_xdp_vsi(vsi
)) {
4064 struct i40e_ring
*xdp_ring
= vsi
->xdp_rings
[qp_idx
];
4066 xdp_ring
->q_vector
= q_vector
;
4067 xdp_ring
->next
= q_vector
->tx
.ring
;
4068 q_vector
->tx
.ring
= xdp_ring
;
4069 q_vector
->tx
.count
++;
4072 rx_ring
->q_vector
= q_vector
;
4073 rx_ring
->next
= q_vector
->rx
.ring
;
4074 q_vector
->rx
.ring
= rx_ring
;
4075 q_vector
->rx
.count
++;
4079 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4080 * @vsi: the VSI being configured
4082 * This function maps descriptor rings to the queue-specific vectors
4083 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4084 * one vector per queue pair, but on a constrained vector budget, we
4085 * group the queue pairs as "efficiently" as possible.
4087 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi
*vsi
)
4089 int qp_remaining
= vsi
->num_queue_pairs
;
4090 int q_vectors
= vsi
->num_q_vectors
;
4095 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4096 * group them so there are multiple queues per vector.
4097 * It is also important to go through all the vectors available to be
4098 * sure that if we don't use all the vectors, that the remaining vectors
4099 * are cleared. This is especially important when decreasing the
4100 * number of queues in use.
4102 for (; v_start
< q_vectors
; v_start
++) {
4103 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_start
];
4105 num_ringpairs
= DIV_ROUND_UP(qp_remaining
, q_vectors
- v_start
);
4107 q_vector
->num_ringpairs
= num_ringpairs
;
4109 q_vector
->rx
.count
= 0;
4110 q_vector
->tx
.count
= 0;
4111 q_vector
->rx
.ring
= NULL
;
4112 q_vector
->tx
.ring
= NULL
;
4114 while (num_ringpairs
--) {
4115 i40e_map_vector_to_qp(vsi
, v_start
, qp_idx
);
4123 * i40e_vsi_request_irq - Request IRQ from the OS
4124 * @vsi: the VSI being configured
4125 * @basename: name for the vector
4127 static int i40e_vsi_request_irq(struct i40e_vsi
*vsi
, char *basename
)
4129 struct i40e_pf
*pf
= vsi
->back
;
4132 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4133 err
= i40e_vsi_request_irq_msix(vsi
, basename
);
4134 else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
)
4135 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, 0,
4138 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, IRQF_SHARED
,
4142 dev_info(&pf
->pdev
->dev
, "request_irq failed, Error %d\n", err
);
4147 #ifdef CONFIG_NET_POLL_CONTROLLER
4149 * i40e_netpoll - A Polling 'interrupt' handler
4150 * @netdev: network interface device structure
4152 * This is used by netconsole to send skbs without having to re-enable
4153 * interrupts. It's not called while the normal interrupt routine is executing.
4155 static void i40e_netpoll(struct net_device
*netdev
)
4157 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4158 struct i40e_vsi
*vsi
= np
->vsi
;
4159 struct i40e_pf
*pf
= vsi
->back
;
4162 /* if interface is down do nothing */
4163 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4166 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4167 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
4168 i40e_msix_clean_rings(0, vsi
->q_vectors
[i
]);
4170 i40e_intr(pf
->pdev
->irq
, netdev
);
4175 #define I40E_QTX_ENA_WAIT_COUNT 50
4178 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4179 * @pf: the PF being configured
4180 * @pf_q: the PF queue
4181 * @enable: enable or disable state of the queue
4183 * This routine will wait for the given Tx queue of the PF to reach the
4184 * enabled or disabled state.
4185 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4186 * multiple retries; else will return 0 in case of success.
4188 static int i40e_pf_txq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4193 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4194 tx_reg
= rd32(&pf
->hw
, I40E_QTX_ENA(pf_q
));
4195 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4198 usleep_range(10, 20);
4200 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4207 * i40e_control_tx_q - Start or stop a particular Tx queue
4208 * @pf: the PF structure
4209 * @pf_q: the PF queue to configure
4210 * @enable: start or stop the queue
4212 * This function enables or disables a single queue. Note that any delay
4213 * required after the operation is expected to be handled by the caller of
4216 static void i40e_control_tx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4218 struct i40e_hw
*hw
= &pf
->hw
;
4222 /* warn the TX unit of coming changes */
4223 i40e_pre_tx_queue_cfg(&pf
->hw
, pf_q
, enable
);
4225 usleep_range(10, 20);
4227 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4228 tx_reg
= rd32(hw
, I40E_QTX_ENA(pf_q
));
4229 if (((tx_reg
>> I40E_QTX_ENA_QENA_REQ_SHIFT
) & 1) ==
4230 ((tx_reg
>> I40E_QTX_ENA_QENA_STAT_SHIFT
) & 1))
4232 usleep_range(1000, 2000);
4235 /* Skip if the queue is already in the requested state */
4236 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4239 /* turn on/off the queue */
4241 wr32(hw
, I40E_QTX_HEAD(pf_q
), 0);
4242 tx_reg
|= I40E_QTX_ENA_QENA_REQ_MASK
;
4244 tx_reg
&= ~I40E_QTX_ENA_QENA_REQ_MASK
;
4247 wr32(hw
, I40E_QTX_ENA(pf_q
), tx_reg
);
4251 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4253 * @pf: the PF structure
4254 * @pf_q: the PF queue to configure
4255 * @is_xdp: true if the queue is used for XDP
4256 * @enable: start or stop the queue
4258 static int i40e_control_wait_tx_q(int seid
, struct i40e_pf
*pf
, int pf_q
,
4259 bool is_xdp
, bool enable
)
4263 i40e_control_tx_q(pf
, pf_q
, enable
);
4265 /* wait for the change to finish */
4266 ret
= i40e_pf_txq_wait(pf
, pf_q
, enable
);
4268 dev_info(&pf
->pdev
->dev
,
4269 "VSI seid %d %sTx ring %d %sable timeout\n",
4270 seid
, (is_xdp
? "XDP " : ""), pf_q
,
4271 (enable
? "en" : "dis"));
4278 * i40e_vsi_control_tx - Start or stop a VSI's rings
4279 * @vsi: the VSI being configured
4280 * @enable: start or stop the rings
4282 static int i40e_vsi_control_tx(struct i40e_vsi
*vsi
, bool enable
)
4284 struct i40e_pf
*pf
= vsi
->back
;
4285 int i
, pf_q
, ret
= 0;
4287 pf_q
= vsi
->base_queue
;
4288 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4289 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4291 false /*is xdp*/, enable
);
4295 if (!i40e_enabled_xdp_vsi(vsi
))
4298 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4299 pf_q
+ vsi
->alloc_queue_pairs
,
4300 true /*is xdp*/, enable
);
4309 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4310 * @pf: the PF being configured
4311 * @pf_q: the PF queue
4312 * @enable: enable or disable state of the queue
4314 * This routine will wait for the given Rx queue of the PF to reach the
4315 * enabled or disabled state.
4316 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4317 * multiple retries; else will return 0 in case of success.
4319 static int i40e_pf_rxq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4324 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4325 rx_reg
= rd32(&pf
->hw
, I40E_QRX_ENA(pf_q
));
4326 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4329 usleep_range(10, 20);
4331 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4338 * i40e_control_rx_q - Start or stop a particular Rx queue
4339 * @pf: the PF structure
4340 * @pf_q: the PF queue to configure
4341 * @enable: start or stop the queue
4343 * This function enables or disables a single queue. Note that any delay
4344 * required after the operation is expected to be handled by the caller of
4347 static void i40e_control_rx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4349 struct i40e_hw
*hw
= &pf
->hw
;
4353 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4354 rx_reg
= rd32(hw
, I40E_QRX_ENA(pf_q
));
4355 if (((rx_reg
>> I40E_QRX_ENA_QENA_REQ_SHIFT
) & 1) ==
4356 ((rx_reg
>> I40E_QRX_ENA_QENA_STAT_SHIFT
) & 1))
4358 usleep_range(1000, 2000);
4361 /* Skip if the queue is already in the requested state */
4362 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4365 /* turn on/off the queue */
4367 rx_reg
|= I40E_QRX_ENA_QENA_REQ_MASK
;
4369 rx_reg
&= ~I40E_QRX_ENA_QENA_REQ_MASK
;
4371 wr32(hw
, I40E_QRX_ENA(pf_q
), rx_reg
);
4375 * i40e_vsi_control_rx - Start or stop a VSI's rings
4376 * @vsi: the VSI being configured
4377 * @enable: start or stop the rings
4379 static int i40e_vsi_control_rx(struct i40e_vsi
*vsi
, bool enable
)
4381 struct i40e_pf
*pf
= vsi
->back
;
4382 int i
, pf_q
, ret
= 0;
4384 pf_q
= vsi
->base_queue
;
4385 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4386 i40e_control_rx_q(pf
, pf_q
, enable
);
4388 /* wait for the change to finish */
4389 ret
= i40e_pf_rxq_wait(pf
, pf_q
, enable
);
4391 dev_info(&pf
->pdev
->dev
,
4392 "VSI seid %d Rx ring %d %sable timeout\n",
4393 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
4398 /* Due to HW errata, on Rx disable only, the register can indicate done
4399 * before it really is. Needs 50ms to be sure
4408 * i40e_vsi_start_rings - Start a VSI's rings
4409 * @vsi: the VSI being configured
4411 int i40e_vsi_start_rings(struct i40e_vsi
*vsi
)
4415 /* do rx first for enable and last for disable */
4416 ret
= i40e_vsi_control_rx(vsi
, true);
4419 ret
= i40e_vsi_control_tx(vsi
, true);
4425 * i40e_vsi_stop_rings - Stop a VSI's rings
4426 * @vsi: the VSI being configured
4428 void i40e_vsi_stop_rings(struct i40e_vsi
*vsi
)
4430 /* When port TX is suspended, don't wait */
4431 if (test_bit(__I40E_PORT_SUSPENDED
, vsi
->back
->state
))
4432 return i40e_vsi_stop_rings_no_wait(vsi
);
4434 /* do rx first for enable and last for disable
4435 * Ignore return value, we need to shutdown whatever we can
4437 i40e_vsi_control_tx(vsi
, false);
4438 i40e_vsi_control_rx(vsi
, false);
4442 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4443 * @vsi: the VSI being shutdown
4445 * This function stops all the rings for a VSI but does not delay to verify
4446 * that rings have been disabled. It is expected that the caller is shutting
4447 * down multiple VSIs at once and will delay together for all the VSIs after
4448 * initiating the shutdown. This is particularly useful for shutting down lots
4449 * of VFs together. Otherwise, a large delay can be incurred while configuring
4450 * each VSI in serial.
4452 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi
*vsi
)
4454 struct i40e_pf
*pf
= vsi
->back
;
4457 pf_q
= vsi
->base_queue
;
4458 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4459 i40e_control_tx_q(pf
, pf_q
, false);
4460 i40e_control_rx_q(pf
, pf_q
, false);
4465 * i40e_vsi_free_irq - Free the irq association with the OS
4466 * @vsi: the VSI being configured
4468 static void i40e_vsi_free_irq(struct i40e_vsi
*vsi
)
4470 struct i40e_pf
*pf
= vsi
->back
;
4471 struct i40e_hw
*hw
= &pf
->hw
;
4472 int base
= vsi
->base_vector
;
4476 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4477 if (!vsi
->q_vectors
)
4480 if (!vsi
->irqs_ready
)
4483 vsi
->irqs_ready
= false;
4484 for (i
= 0; i
< vsi
->num_q_vectors
; i
++) {
4489 irq_num
= pf
->msix_entries
[vector
].vector
;
4491 /* free only the irqs that were actually requested */
4492 if (!vsi
->q_vectors
[i
] ||
4493 !vsi
->q_vectors
[i
]->num_ringpairs
)
4496 /* clear the affinity notifier in the IRQ descriptor */
4497 irq_set_affinity_notifier(irq_num
, NULL
);
4498 /* remove our suggested affinity mask for this IRQ */
4499 irq_set_affinity_hint(irq_num
, NULL
);
4500 synchronize_irq(irq_num
);
4501 free_irq(irq_num
, vsi
->q_vectors
[i
]);
4503 /* Tear down the interrupt queue link list
4505 * We know that they come in pairs and always
4506 * the Rx first, then the Tx. To clear the
4507 * link list, stick the EOL value into the
4508 * next_q field of the registers.
4510 val
= rd32(hw
, I40E_PFINT_LNKLSTN(vector
- 1));
4511 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4512 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4513 val
|= I40E_QUEUE_END_OF_LIST
4514 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4515 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), val
);
4517 while (qp
!= I40E_QUEUE_END_OF_LIST
) {
4520 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4522 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4523 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4524 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4525 I40E_QINT_RQCTL_INTEVENT_MASK
);
4527 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4528 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4530 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4532 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4534 next
= (val
& I40E_QINT_TQCTL_NEXTQ_INDX_MASK
)
4535 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
;
4537 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4538 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4539 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4540 I40E_QINT_TQCTL_INTEVENT_MASK
);
4542 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4543 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4545 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4550 free_irq(pf
->pdev
->irq
, pf
);
4552 val
= rd32(hw
, I40E_PFINT_LNKLST0
);
4553 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4554 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4555 val
|= I40E_QUEUE_END_OF_LIST
4556 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT
;
4557 wr32(hw
, I40E_PFINT_LNKLST0
, val
);
4559 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4560 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4561 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4562 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4563 I40E_QINT_RQCTL_INTEVENT_MASK
);
4565 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4566 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4568 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4570 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4572 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4573 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4574 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4575 I40E_QINT_TQCTL_INTEVENT_MASK
);
4577 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4578 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4580 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4585 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4586 * @vsi: the VSI being configured
4587 * @v_idx: Index of vector to be freed
4589 * This function frees the memory allocated to the q_vector. In addition if
4590 * NAPI is enabled it will delete any references to the NAPI struct prior
4591 * to freeing the q_vector.
4593 static void i40e_free_q_vector(struct i40e_vsi
*vsi
, int v_idx
)
4595 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4596 struct i40e_ring
*ring
;
4601 /* disassociate q_vector from rings */
4602 i40e_for_each_ring(ring
, q_vector
->tx
)
4603 ring
->q_vector
= NULL
;
4605 i40e_for_each_ring(ring
, q_vector
->rx
)
4606 ring
->q_vector
= NULL
;
4608 /* only VSI w/ an associated netdev is set up w/ NAPI */
4610 netif_napi_del(&q_vector
->napi
);
4612 vsi
->q_vectors
[v_idx
] = NULL
;
4614 kfree_rcu(q_vector
, rcu
);
4618 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4619 * @vsi: the VSI being un-configured
4621 * This frees the memory allocated to the q_vectors and
4622 * deletes references to the NAPI struct.
4624 static void i40e_vsi_free_q_vectors(struct i40e_vsi
*vsi
)
4628 for (v_idx
= 0; v_idx
< vsi
->num_q_vectors
; v_idx
++)
4629 i40e_free_q_vector(vsi
, v_idx
);
4633 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4634 * @pf: board private structure
4636 static void i40e_reset_interrupt_capability(struct i40e_pf
*pf
)
4638 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4639 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4640 pci_disable_msix(pf
->pdev
);
4641 kfree(pf
->msix_entries
);
4642 pf
->msix_entries
= NULL
;
4643 kfree(pf
->irq_pile
);
4644 pf
->irq_pile
= NULL
;
4645 } else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
) {
4646 pci_disable_msi(pf
->pdev
);
4648 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
4652 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4653 * @pf: board private structure
4655 * We go through and clear interrupt specific resources and reset the structure
4656 * to pre-load conditions
4658 static void i40e_clear_interrupt_scheme(struct i40e_pf
*pf
)
4662 i40e_free_misc_vector(pf
);
4664 i40e_put_lump(pf
->irq_pile
, pf
->iwarp_base_vector
,
4665 I40E_IWARP_IRQ_PILE_ID
);
4667 i40e_put_lump(pf
->irq_pile
, 0, I40E_PILE_VALID_BIT
-1);
4668 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
4670 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
4671 i40e_reset_interrupt_capability(pf
);
4675 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4676 * @vsi: the VSI being configured
4678 static void i40e_napi_enable_all(struct i40e_vsi
*vsi
)
4685 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4686 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4688 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4689 napi_enable(&q_vector
->napi
);
4694 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4695 * @vsi: the VSI being configured
4697 static void i40e_napi_disable_all(struct i40e_vsi
*vsi
)
4704 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4705 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4707 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4708 napi_disable(&q_vector
->napi
);
4713 * i40e_vsi_close - Shut down a VSI
4714 * @vsi: the vsi to be quelled
4716 static void i40e_vsi_close(struct i40e_vsi
*vsi
)
4718 struct i40e_pf
*pf
= vsi
->back
;
4719 if (!test_and_set_bit(__I40E_VSI_DOWN
, vsi
->state
))
4721 i40e_vsi_free_irq(vsi
);
4722 i40e_vsi_free_tx_resources(vsi
);
4723 i40e_vsi_free_rx_resources(vsi
);
4724 vsi
->current_netdev_flags
= 0;
4725 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
4726 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
4727 pf
->flags
|= I40E_FLAG_CLIENT_RESET
;
4731 * i40e_quiesce_vsi - Pause a given VSI
4732 * @vsi: the VSI being paused
4734 static void i40e_quiesce_vsi(struct i40e_vsi
*vsi
)
4736 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4739 set_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
);
4740 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4741 vsi
->netdev
->netdev_ops
->ndo_stop(vsi
->netdev
);
4743 i40e_vsi_close(vsi
);
4747 * i40e_unquiesce_vsi - Resume a given VSI
4748 * @vsi: the VSI being resumed
4750 static void i40e_unquiesce_vsi(struct i40e_vsi
*vsi
)
4752 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
))
4755 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4756 vsi
->netdev
->netdev_ops
->ndo_open(vsi
->netdev
);
4758 i40e_vsi_open(vsi
); /* this clears the DOWN bit */
4762 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4765 static void i40e_pf_quiesce_all_vsi(struct i40e_pf
*pf
)
4769 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4771 i40e_quiesce_vsi(pf
->vsi
[v
]);
4776 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4779 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf
*pf
)
4783 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4785 i40e_unquiesce_vsi(pf
->vsi
[v
]);
4790 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4791 * @vsi: the VSI being configured
4793 * Wait until all queues on a given VSI have been disabled.
4795 int i40e_vsi_wait_queues_disabled(struct i40e_vsi
*vsi
)
4797 struct i40e_pf
*pf
= vsi
->back
;
4800 pf_q
= vsi
->base_queue
;
4801 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4802 /* Check and wait for the Tx queue */
4803 ret
= i40e_pf_txq_wait(pf
, pf_q
, false);
4805 dev_info(&pf
->pdev
->dev
,
4806 "VSI seid %d Tx ring %d disable timeout\n",
4811 if (!i40e_enabled_xdp_vsi(vsi
))
4814 /* Check and wait for the XDP Tx queue */
4815 ret
= i40e_pf_txq_wait(pf
, pf_q
+ vsi
->alloc_queue_pairs
,
4818 dev_info(&pf
->pdev
->dev
,
4819 "VSI seid %d XDP Tx ring %d disable timeout\n",
4824 /* Check and wait for the Rx queue */
4825 ret
= i40e_pf_rxq_wait(pf
, pf_q
, false);
4827 dev_info(&pf
->pdev
->dev
,
4828 "VSI seid %d Rx ring %d disable timeout\n",
4837 #ifdef CONFIG_I40E_DCB
4839 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4842 * This function waits for the queues to be in disabled state for all the
4843 * VSIs that are managed by this PF.
4845 static int i40e_pf_wait_queues_disabled(struct i40e_pf
*pf
)
4849 for (v
= 0; v
< pf
->hw
.func_caps
.num_vsis
; v
++) {
4851 ret
= i40e_vsi_wait_queues_disabled(pf
->vsi
[v
]);
4863 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4864 * @q_idx: TX queue number
4865 * @vsi: Pointer to VSI struct
4867 * This function checks specified queue for given VSI. Detects hung condition.
4868 * We proactively detect hung TX queues by checking if interrupts are disabled
4869 * but there are pending descriptors. If it appears hung, attempt to recover
4870 * by triggering a SW interrupt.
4872 static void i40e_detect_recover_hung_queue(int q_idx
, struct i40e_vsi
*vsi
)
4874 struct i40e_ring
*tx_ring
= NULL
;
4876 u32 val
, tx_pending
;
4881 /* now that we have an index, find the tx_ring struct */
4882 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
4883 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
4884 if (q_idx
== vsi
->tx_rings
[i
]->queue_index
) {
4885 tx_ring
= vsi
->tx_rings
[i
];
4894 /* Read interrupt register */
4895 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4897 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
4898 tx_ring
->vsi
->base_vector
- 1));
4900 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
4902 tx_pending
= i40e_get_tx_pending(tx_ring
);
4904 /* Interrupts are disabled and TX pending is non-zero,
4905 * trigger the SW interrupt (don't wait). Worst case
4906 * there will be one extra interrupt which may result
4907 * into not cleaning any queues because queues are cleaned.
4909 if (tx_pending
&& (!(val
& I40E_PFINT_DYN_CTLN_INTENA_MASK
)))
4910 i40e_force_wb(vsi
, tx_ring
->q_vector
);
4914 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4915 * @pf: pointer to PF struct
4917 * LAN VSI has netdev and netdev has TX queues. This function is to check
4918 * each of those TX queues if they are hung, trigger recovery by issuing
4921 static void i40e_detect_recover_hung(struct i40e_pf
*pf
)
4923 struct net_device
*netdev
;
4924 struct i40e_vsi
*vsi
;
4927 /* Only for LAN VSI */
4928 vsi
= pf
->vsi
[pf
->lan_vsi
];
4933 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4934 if (test_bit(__I40E_VSI_DOWN
, vsi
->back
->state
) ||
4935 test_bit(__I40E_RESET_RECOVERY_PENDING
, vsi
->back
->state
))
4938 /* Make sure type is MAIN VSI */
4939 if (vsi
->type
!= I40E_VSI_MAIN
)
4942 netdev
= vsi
->netdev
;
4946 /* Bail out if netif_carrier is not OK */
4947 if (!netif_carrier_ok(netdev
))
4950 /* Go thru' TX queues for netdev */
4951 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
4952 struct netdev_queue
*q
;
4954 q
= netdev_get_tx_queue(netdev
, i
);
4956 i40e_detect_recover_hung_queue(i
, vsi
);
4961 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4962 * @pf: pointer to PF
4964 * Get TC map for ISCSI PF type that will include iSCSI TC
4967 static u8
i40e_get_iscsi_tc_map(struct i40e_pf
*pf
)
4969 struct i40e_dcb_app_priority_table app
;
4970 struct i40e_hw
*hw
= &pf
->hw
;
4971 u8 enabled_tc
= 1; /* TC0 is always enabled */
4973 /* Get the iSCSI APP TLV */
4974 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4976 for (i
= 0; i
< dcbcfg
->numapps
; i
++) {
4977 app
= dcbcfg
->app
[i
];
4978 if (app
.selector
== I40E_APP_SEL_TCPIP
&&
4979 app
.protocolid
== I40E_APP_PROTOID_ISCSI
) {
4980 tc
= dcbcfg
->etscfg
.prioritytable
[app
.priority
];
4981 enabled_tc
|= BIT(tc
);
4990 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4991 * @dcbcfg: the corresponding DCBx configuration structure
4993 * Return the number of TCs from given DCBx configuration
4995 static u8
i40e_dcb_get_num_tc(struct i40e_dcbx_config
*dcbcfg
)
4997 int i
, tc_unused
= 0;
5001 /* Scan the ETS Config Priority Table to find
5002 * traffic class enabled for a given priority
5003 * and create a bitmask of enabled TCs
5005 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++)
5006 num_tc
|= BIT(dcbcfg
->etscfg
.prioritytable
[i
]);
5008 /* Now scan the bitmask to check for
5009 * contiguous TCs starting with TC0
5011 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5012 if (num_tc
& BIT(i
)) {
5016 pr_err("Non-contiguous TC - Disabling DCB\n");
5024 /* There is always at least TC0 */
5032 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5033 * @dcbcfg: the corresponding DCBx configuration structure
5035 * Query the current DCB configuration and return the number of
5036 * traffic classes enabled from the given DCBX config
5038 static u8
i40e_dcb_get_enabled_tc(struct i40e_dcbx_config
*dcbcfg
)
5040 u8 num_tc
= i40e_dcb_get_num_tc(dcbcfg
);
5044 for (i
= 0; i
< num_tc
; i
++)
5045 enabled_tc
|= BIT(i
);
5051 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5052 * @pf: PF being queried
5054 * Query the current MQPRIO configuration and return the number of
5055 * traffic classes enabled.
5057 static u8
i40e_mqprio_get_enabled_tc(struct i40e_pf
*pf
)
5059 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
5060 u8 num_tc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
5061 u8 enabled_tc
= 1, i
;
5063 for (i
= 1; i
< num_tc
; i
++)
5064 enabled_tc
|= BIT(i
);
5069 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5070 * @pf: PF being queried
5072 * Return number of traffic classes enabled for the given PF
5074 static u8
i40e_pf_get_num_tc(struct i40e_pf
*pf
)
5076 struct i40e_hw
*hw
= &pf
->hw
;
5077 u8 i
, enabled_tc
= 1;
5079 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5081 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5082 return pf
->vsi
[pf
->lan_vsi
]->mqprio_qopt
.qopt
.num_tc
;
5084 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5085 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5088 /* SFP mode will be enabled for all TCs on port */
5089 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5090 return i40e_dcb_get_num_tc(dcbcfg
);
5092 /* MFP mode return count of enabled TCs for this PF */
5093 if (pf
->hw
.func_caps
.iscsi
)
5094 enabled_tc
= i40e_get_iscsi_tc_map(pf
);
5096 return 1; /* Only TC0 */
5098 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5099 if (enabled_tc
& BIT(i
))
5106 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5107 * @pf: PF being queried
5109 * Return a bitmap for enabled traffic classes for this PF.
5111 static u8
i40e_pf_get_tc_map(struct i40e_pf
*pf
)
5113 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5114 return i40e_mqprio_get_enabled_tc(pf
);
5116 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5119 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5120 return I40E_DEFAULT_TRAFFIC_CLASS
;
5122 /* SFP mode we want PF to be enabled for all TCs */
5123 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5124 return i40e_dcb_get_enabled_tc(&pf
->hw
.local_dcbx_config
);
5126 /* MFP enabled and iSCSI PF type */
5127 if (pf
->hw
.func_caps
.iscsi
)
5128 return i40e_get_iscsi_tc_map(pf
);
5130 return I40E_DEFAULT_TRAFFIC_CLASS
;
5134 * i40e_vsi_get_bw_info - Query VSI BW Information
5135 * @vsi: the VSI being queried
5137 * Returns 0 on success, negative value on failure
5139 static int i40e_vsi_get_bw_info(struct i40e_vsi
*vsi
)
5141 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config
= {0};
5142 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
5143 struct i40e_pf
*pf
= vsi
->back
;
5144 struct i40e_hw
*hw
= &pf
->hw
;
5149 /* Get the VSI level BW configuration */
5150 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
, &bw_config
, NULL
);
5152 dev_info(&pf
->pdev
->dev
,
5153 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5154 i40e_stat_str(&pf
->hw
, ret
),
5155 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5159 /* Get the VSI level BW configuration per TC */
5160 ret
= i40e_aq_query_vsi_ets_sla_config(hw
, vsi
->seid
, &bw_ets_config
,
5163 dev_info(&pf
->pdev
->dev
,
5164 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5165 i40e_stat_str(&pf
->hw
, ret
),
5166 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5170 if (bw_config
.tc_valid_bits
!= bw_ets_config
.tc_valid_bits
) {
5171 dev_info(&pf
->pdev
->dev
,
5172 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5173 bw_config
.tc_valid_bits
,
5174 bw_ets_config
.tc_valid_bits
);
5175 /* Still continuing */
5178 vsi
->bw_limit
= le16_to_cpu(bw_config
.port_bw_limit
);
5179 vsi
->bw_max_quanta
= bw_config
.max_bw
;
5180 tc_bw_max
= le16_to_cpu(bw_ets_config
.tc_bw_max
[0]) |
5181 (le16_to_cpu(bw_ets_config
.tc_bw_max
[1]) << 16);
5182 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5183 vsi
->bw_ets_share_credits
[i
] = bw_ets_config
.share_credits
[i
];
5184 vsi
->bw_ets_limit_credits
[i
] =
5185 le16_to_cpu(bw_ets_config
.credits
[i
]);
5186 /* 3 bits out of 4 for each TC */
5187 vsi
->bw_ets_max_quanta
[i
] = (u8
)((tc_bw_max
>> (i
*4)) & 0x7);
5194 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5195 * @vsi: the VSI being configured
5196 * @enabled_tc: TC bitmap
5197 * @bw_credits: BW shared credits per TC
5199 * Returns 0 on success, negative value on failure
5201 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi
*vsi
, u8 enabled_tc
,
5204 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5208 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
)
5210 if (!vsi
->mqprio_qopt
.qopt
.hw
) {
5211 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, 0);
5213 dev_info(&vsi
->back
->pdev
->dev
,
5214 "Failed to reset tx rate for vsi->seid %u\n",
5218 bw_data
.tc_valid_bits
= enabled_tc
;
5219 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5220 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5222 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, vsi
->seid
, &bw_data
,
5225 dev_info(&vsi
->back
->pdev
->dev
,
5226 "AQ command Config VSI BW allocation per TC failed = %d\n",
5227 vsi
->back
->hw
.aq
.asq_last_status
);
5231 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5232 vsi
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5238 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5239 * @vsi: the VSI being configured
5240 * @enabled_tc: TC map to be enabled
5243 static void i40e_vsi_config_netdev_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5245 struct net_device
*netdev
= vsi
->netdev
;
5246 struct i40e_pf
*pf
= vsi
->back
;
5247 struct i40e_hw
*hw
= &pf
->hw
;
5250 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5256 netdev_reset_tc(netdev
);
5260 /* Set up actual enabled TCs on the VSI */
5261 if (netdev_set_num_tc(netdev
, vsi
->tc_config
.numtc
))
5264 /* set per TC queues for the VSI */
5265 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5266 /* Only set TC queues for enabled tcs
5268 * e.g. For a VSI that has TC0 and TC3 enabled the
5269 * enabled_tc bitmap would be 0x00001001; the driver
5270 * will set the numtc for netdev as 2 that will be
5271 * referenced by the netdev layer as TC 0 and 1.
5273 if (vsi
->tc_config
.enabled_tc
& BIT(i
))
5274 netdev_set_tc_queue(netdev
,
5275 vsi
->tc_config
.tc_info
[i
].netdev_tc
,
5276 vsi
->tc_config
.tc_info
[i
].qcount
,
5277 vsi
->tc_config
.tc_info
[i
].qoffset
);
5280 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5283 /* Assign UP2TC map for the VSI */
5284 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++) {
5285 /* Get the actual TC# for the UP */
5286 u8 ets_tc
= dcbcfg
->etscfg
.prioritytable
[i
];
5287 /* Get the mapped netdev TC# for the UP */
5288 netdev_tc
= vsi
->tc_config
.tc_info
[ets_tc
].netdev_tc
;
5289 netdev_set_prio_tc_map(netdev
, i
, netdev_tc
);
5294 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5295 * @vsi: the VSI being configured
5296 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5298 static void i40e_vsi_update_queue_map(struct i40e_vsi
*vsi
,
5299 struct i40e_vsi_context
*ctxt
)
5301 /* copy just the sections touched not the entire info
5302 * since not all sections are valid as returned by
5305 vsi
->info
.mapping_flags
= ctxt
->info
.mapping_flags
;
5306 memcpy(&vsi
->info
.queue_mapping
,
5307 &ctxt
->info
.queue_mapping
, sizeof(vsi
->info
.queue_mapping
));
5308 memcpy(&vsi
->info
.tc_mapping
, ctxt
->info
.tc_mapping
,
5309 sizeof(vsi
->info
.tc_mapping
));
5313 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5314 * @vsi: VSI to be configured
5315 * @enabled_tc: TC bitmap
5317 * This configures a particular VSI for TCs that are mapped to the
5318 * given TC bitmap. It uses default bandwidth share for TCs across
5319 * VSIs to configure TC for a particular VSI.
5322 * It is expected that the VSI queues have been quisced before calling
5325 static int i40e_vsi_config_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5327 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5328 struct i40e_vsi_context ctxt
;
5332 /* Check if enabled_tc is same as existing or new TCs */
5333 if (vsi
->tc_config
.enabled_tc
== enabled_tc
&&
5334 vsi
->mqprio_qopt
.mode
!= TC_MQPRIO_MODE_CHANNEL
)
5337 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5338 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5339 if (enabled_tc
& BIT(i
))
5343 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
5345 dev_info(&vsi
->back
->pdev
->dev
,
5346 "Failed configuring TC map %d for VSI %d\n",
5347 enabled_tc
, vsi
->seid
);
5351 /* Update Queue Pairs Mapping for currently enabled UPs */
5352 ctxt
.seid
= vsi
->seid
;
5353 ctxt
.pf_num
= vsi
->back
->hw
.pf_id
;
5355 ctxt
.uplink_seid
= vsi
->uplink_seid
;
5356 ctxt
.info
= vsi
->info
;
5357 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
) {
5358 ret
= i40e_vsi_setup_queue_map_mqprio(vsi
, &ctxt
, enabled_tc
);
5362 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
5365 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5368 if (!vsi
->mqprio_qopt
.qopt
.hw
&& vsi
->reconfig_rss
) {
5369 vsi
->rss_size
= min_t(int, vsi
->back
->alloc_rss_size
,
5370 vsi
->num_queue_pairs
);
5371 ret
= i40e_vsi_config_rss(vsi
);
5373 dev_info(&vsi
->back
->pdev
->dev
,
5374 "Failed to reconfig rss for num_queues\n");
5377 vsi
->reconfig_rss
= false;
5379 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
5380 ctxt
.info
.valid_sections
|=
5381 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
5382 ctxt
.info
.queueing_opt_flags
|= I40E_AQ_VSI_QUE_OPT_TCP_ENA
;
5385 /* Update the VSI after updating the VSI queue-mapping
5388 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
5390 dev_info(&vsi
->back
->pdev
->dev
,
5391 "Update vsi tc config failed, err %s aq_err %s\n",
5392 i40e_stat_str(&vsi
->back
->hw
, ret
),
5393 i40e_aq_str(&vsi
->back
->hw
,
5394 vsi
->back
->hw
.aq
.asq_last_status
));
5397 /* update the local VSI info with updated queue map */
5398 i40e_vsi_update_queue_map(vsi
, &ctxt
);
5399 vsi
->info
.valid_sections
= 0;
5401 /* Update current VSI BW information */
5402 ret
= i40e_vsi_get_bw_info(vsi
);
5404 dev_info(&vsi
->back
->pdev
->dev
,
5405 "Failed updating vsi bw info, err %s aq_err %s\n",
5406 i40e_stat_str(&vsi
->back
->hw
, ret
),
5407 i40e_aq_str(&vsi
->back
->hw
,
5408 vsi
->back
->hw
.aq
.asq_last_status
));
5412 /* Update the netdev TC setup */
5413 i40e_vsi_config_netdev_tc(vsi
, enabled_tc
);
5419 * i40e_get_link_speed - Returns link speed for the interface
5420 * @vsi: VSI to be configured
5423 int i40e_get_link_speed(struct i40e_vsi
*vsi
)
5425 struct i40e_pf
*pf
= vsi
->back
;
5427 switch (pf
->hw
.phy
.link_info
.link_speed
) {
5428 case I40E_LINK_SPEED_40GB
:
5430 case I40E_LINK_SPEED_25GB
:
5432 case I40E_LINK_SPEED_20GB
:
5434 case I40E_LINK_SPEED_10GB
:
5436 case I40E_LINK_SPEED_1GB
:
5444 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5445 * @vsi: VSI to be configured
5446 * @seid: seid of the channel/VSI
5447 * @max_tx_rate: max TX rate to be configured as BW limit
5449 * Helper function to set BW limit for a given VSI
5451 int i40e_set_bw_limit(struct i40e_vsi
*vsi
, u16 seid
, u64 max_tx_rate
)
5453 struct i40e_pf
*pf
= vsi
->back
;
5458 speed
= i40e_get_link_speed(vsi
);
5459 if (max_tx_rate
> speed
) {
5460 dev_err(&pf
->pdev
->dev
,
5461 "Invalid max tx rate %llu specified for VSI seid %d.",
5465 if (max_tx_rate
&& max_tx_rate
< 50) {
5466 dev_warn(&pf
->pdev
->dev
,
5467 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5471 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5472 credits
= max_tx_rate
;
5473 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
5474 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, seid
, credits
,
5475 I40E_MAX_BW_INACTIVE_ACCUM
, NULL
);
5477 dev_err(&pf
->pdev
->dev
,
5478 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5479 max_tx_rate
, seid
, i40e_stat_str(&pf
->hw
, ret
),
5480 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5485 * i40e_remove_queue_channels - Remove queue channels for the TCs
5486 * @vsi: VSI to be configured
5488 * Remove queue channels for the TCs
5490 static void i40e_remove_queue_channels(struct i40e_vsi
*vsi
)
5492 enum i40e_admin_queue_err last_aq_status
;
5493 struct i40e_cloud_filter
*cfilter
;
5494 struct i40e_channel
*ch
, *ch_tmp
;
5495 struct i40e_pf
*pf
= vsi
->back
;
5496 struct hlist_node
*node
;
5499 /* Reset rss size that was stored when reconfiguring rss for
5500 * channel VSIs with non-power-of-2 queue count.
5502 vsi
->current_rss_size
= 0;
5504 /* perform cleanup for channels if they exist */
5505 if (list_empty(&vsi
->ch_list
))
5508 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5509 struct i40e_vsi
*p_vsi
;
5511 list_del(&ch
->list
);
5512 p_vsi
= ch
->parent_vsi
;
5513 if (!p_vsi
|| !ch
->initialized
) {
5517 /* Reset queue contexts */
5518 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5519 struct i40e_ring
*tx_ring
, *rx_ring
;
5522 pf_q
= ch
->base_queue
+ i
;
5523 tx_ring
= vsi
->tx_rings
[pf_q
];
5526 rx_ring
= vsi
->rx_rings
[pf_q
];
5530 /* Reset BW configured for this VSI via mqprio */
5531 ret
= i40e_set_bw_limit(vsi
, ch
->seid
, 0);
5533 dev_info(&vsi
->back
->pdev
->dev
,
5534 "Failed to reset tx rate for ch->seid %u\n",
5537 /* delete cloud filters associated with this channel */
5538 hlist_for_each_entry_safe(cfilter
, node
,
5539 &pf
->cloud_filter_list
, cloud_node
) {
5540 if (cfilter
->seid
!= ch
->seid
)
5543 hash_del(&cfilter
->cloud_node
);
5544 if (cfilter
->dst_port
)
5545 ret
= i40e_add_del_cloud_filter_big_buf(vsi
,
5549 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
,
5551 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
5553 dev_info(&pf
->pdev
->dev
,
5554 "Failed to delete cloud filter, err %s aq_err %s\n",
5555 i40e_stat_str(&pf
->hw
, ret
),
5556 i40e_aq_str(&pf
->hw
, last_aq_status
));
5560 /* delete VSI from FW */
5561 ret
= i40e_aq_delete_element(&vsi
->back
->hw
, ch
->seid
,
5564 dev_err(&vsi
->back
->pdev
->dev
,
5565 "unable to remove channel (%d) for parent VSI(%d)\n",
5566 ch
->seid
, p_vsi
->seid
);
5569 INIT_LIST_HEAD(&vsi
->ch_list
);
5573 * i40e_is_any_channel - channel exist or not
5574 * @vsi: ptr to VSI to which channels are associated with
5576 * Returns true or false if channel(s) exist for associated VSI or not
5578 static bool i40e_is_any_channel(struct i40e_vsi
*vsi
)
5580 struct i40e_channel
*ch
, *ch_tmp
;
5582 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5583 if (ch
->initialized
)
5591 * i40e_get_max_queues_for_channel
5592 * @vsi: ptr to VSI to which channels are associated with
5594 * Helper function which returns max value among the queue counts set on the
5595 * channels/TCs created.
5597 static int i40e_get_max_queues_for_channel(struct i40e_vsi
*vsi
)
5599 struct i40e_channel
*ch
, *ch_tmp
;
5602 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5603 if (!ch
->initialized
)
5605 if (ch
->num_queue_pairs
> max
)
5606 max
= ch
->num_queue_pairs
;
5613 * i40e_validate_num_queues - validate num_queues w.r.t channel
5614 * @pf: ptr to PF device
5615 * @num_queues: number of queues
5616 * @vsi: the parent VSI
5617 * @reconfig_rss: indicates should the RSS be reconfigured or not
5619 * This function validates number of queues in the context of new channel
5620 * which is being established and determines if RSS should be reconfigured
5621 * or not for parent VSI.
5623 static int i40e_validate_num_queues(struct i40e_pf
*pf
, int num_queues
,
5624 struct i40e_vsi
*vsi
, bool *reconfig_rss
)
5631 *reconfig_rss
= false;
5633 if (num_queues
> I40E_MAX_QUEUES_PER_CH
) {
5634 dev_err(&pf
->pdev
->dev
,
5635 "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
5636 num_queues
, I40E_MAX_QUEUES_PER_CH
);
5640 if (vsi
->current_rss_size
) {
5641 if (num_queues
> vsi
->current_rss_size
) {
5642 dev_dbg(&pf
->pdev
->dev
,
5643 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5644 num_queues
, vsi
->current_rss_size
);
5646 } else if ((num_queues
< vsi
->current_rss_size
) &&
5647 (!is_power_of_2(num_queues
))) {
5648 dev_dbg(&pf
->pdev
->dev
,
5649 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5650 num_queues
, vsi
->current_rss_size
);
5655 if (!is_power_of_2(num_queues
)) {
5656 /* Find the max num_queues configured for channel if channel
5658 * if channel exist, then enforce 'num_queues' to be more than
5659 * max ever queues configured for channel.
5661 max_ch_queues
= i40e_get_max_queues_for_channel(vsi
);
5662 if (num_queues
< max_ch_queues
) {
5663 dev_dbg(&pf
->pdev
->dev
,
5664 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5665 num_queues
, max_ch_queues
);
5668 *reconfig_rss
= true;
5675 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5676 * @vsi: the VSI being setup
5677 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5679 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5681 static int i40e_vsi_reconfig_rss(struct i40e_vsi
*vsi
, u16 rss_size
)
5683 struct i40e_pf
*pf
= vsi
->back
;
5684 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
5685 struct i40e_hw
*hw
= &pf
->hw
;
5693 if (rss_size
> vsi
->rss_size
)
5696 local_rss_size
= min_t(int, vsi
->rss_size
, rss_size
);
5697 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
5701 /* Ignoring user configured lut if there is one */
5702 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, local_rss_size
);
5704 /* Use user configured hash key if there is one, otherwise
5707 if (vsi
->rss_hkey_user
)
5708 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
5710 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
5712 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
5714 dev_info(&pf
->pdev
->dev
,
5715 "Cannot set RSS lut, err %s aq_err %s\n",
5716 i40e_stat_str(hw
, ret
),
5717 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5723 /* Do the update w.r.t. storing rss_size */
5724 if (!vsi
->orig_rss_size
)
5725 vsi
->orig_rss_size
= vsi
->rss_size
;
5726 vsi
->current_rss_size
= local_rss_size
;
5732 * i40e_channel_setup_queue_map - Setup a channel queue map
5733 * @pf: ptr to PF device
5734 * @vsi: the VSI being setup
5735 * @ctxt: VSI context structure
5736 * @ch: ptr to channel structure
5738 * Setup queue map for a specific channel
5740 static void i40e_channel_setup_queue_map(struct i40e_pf
*pf
,
5741 struct i40e_vsi_context
*ctxt
,
5742 struct i40e_channel
*ch
)
5744 u16 qcount
, qmap
, sections
= 0;
5748 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
5749 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
5751 qcount
= min_t(int, ch
->num_queue_pairs
, pf
->num_lan_msix
);
5752 ch
->num_queue_pairs
= qcount
;
5754 /* find the next higher power-of-2 of num queue pairs */
5755 pow
= ilog2(qcount
);
5756 if (!is_power_of_2(qcount
))
5759 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
5760 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
5762 /* Setup queue TC[0].qmap for given VSI context */
5763 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
5765 ctxt
->info
.up_enable_bits
= 0x1; /* TC0 enabled */
5766 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
5767 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(ch
->base_queue
);
5768 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
5772 * i40e_add_channel - add a channel by adding VSI
5773 * @pf: ptr to PF device
5774 * @uplink_seid: underlying HW switching element (VEB) ID
5775 * @ch: ptr to channel structure
5777 * Add a channel (VSI) using add_vsi and queue_map
5779 static int i40e_add_channel(struct i40e_pf
*pf
, u16 uplink_seid
,
5780 struct i40e_channel
*ch
)
5782 struct i40e_hw
*hw
= &pf
->hw
;
5783 struct i40e_vsi_context ctxt
;
5784 u8 enabled_tc
= 0x1; /* TC0 enabled */
5787 if (ch
->type
!= I40E_VSI_VMDQ2
) {
5788 dev_info(&pf
->pdev
->dev
,
5789 "add new vsi failed, ch->type %d\n", ch
->type
);
5793 memset(&ctxt
, 0, sizeof(ctxt
));
5794 ctxt
.pf_num
= hw
->pf_id
;
5796 ctxt
.uplink_seid
= uplink_seid
;
5797 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
5798 if (ch
->type
== I40E_VSI_VMDQ2
)
5799 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
5801 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) {
5802 ctxt
.info
.valid_sections
|=
5803 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
5804 ctxt
.info
.switch_id
=
5805 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
5808 /* Set queue map for a given VSI context */
5809 i40e_channel_setup_queue_map(pf
, &ctxt
, ch
);
5811 /* Now time to create VSI */
5812 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
5814 dev_info(&pf
->pdev
->dev
,
5815 "add new vsi failed, err %s aq_err %s\n",
5816 i40e_stat_str(&pf
->hw
, ret
),
5817 i40e_aq_str(&pf
->hw
,
5818 pf
->hw
.aq
.asq_last_status
));
5822 /* Success, update channel */
5823 ch
->enabled_tc
= enabled_tc
;
5824 ch
->seid
= ctxt
.seid
;
5825 ch
->vsi_number
= ctxt
.vsi_number
;
5826 ch
->stat_counter_idx
= cpu_to_le16(ctxt
.info
.stat_counter_idx
);
5828 /* copy just the sections touched not the entire info
5829 * since not all sections are valid as returned by
5832 ch
->info
.mapping_flags
= ctxt
.info
.mapping_flags
;
5833 memcpy(&ch
->info
.queue_mapping
,
5834 &ctxt
.info
.queue_mapping
, sizeof(ctxt
.info
.queue_mapping
));
5835 memcpy(&ch
->info
.tc_mapping
, ctxt
.info
.tc_mapping
,
5836 sizeof(ctxt
.info
.tc_mapping
));
5841 static int i40e_channel_config_bw(struct i40e_vsi
*vsi
, struct i40e_channel
*ch
,
5844 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5848 bw_data
.tc_valid_bits
= ch
->enabled_tc
;
5849 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5850 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5852 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, ch
->seid
,
5855 dev_info(&vsi
->back
->pdev
->dev
,
5856 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5857 vsi
->back
->hw
.aq
.asq_last_status
, ch
->seid
);
5861 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5862 ch
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5868 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5869 * @pf: ptr to PF device
5870 * @vsi: the VSI being setup
5871 * @ch: ptr to channel structure
5873 * Configure TX rings associated with channel (VSI) since queues are being
5876 static int i40e_channel_config_tx_ring(struct i40e_pf
*pf
,
5877 struct i40e_vsi
*vsi
,
5878 struct i40e_channel
*ch
)
5882 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5884 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5885 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5886 if (ch
->enabled_tc
& BIT(i
))
5890 /* configure BW for new VSI */
5891 ret
= i40e_channel_config_bw(vsi
, ch
, bw_share
);
5893 dev_info(&vsi
->back
->pdev
->dev
,
5894 "Failed configuring TC map %d for channel (seid %u)\n",
5895 ch
->enabled_tc
, ch
->seid
);
5899 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5900 struct i40e_ring
*tx_ring
, *rx_ring
;
5903 pf_q
= ch
->base_queue
+ i
;
5905 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5908 tx_ring
= vsi
->tx_rings
[pf_q
];
5911 /* Get the RX ring ptr */
5912 rx_ring
= vsi
->rx_rings
[pf_q
];
5920 * i40e_setup_hw_channel - setup new channel
5921 * @pf: ptr to PF device
5922 * @vsi: the VSI being setup
5923 * @ch: ptr to channel structure
5924 * @uplink_seid: underlying HW switching element (VEB) ID
5925 * @type: type of channel to be created (VMDq2/VF)
5927 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5928 * and configures TX rings accordingly
5930 static inline int i40e_setup_hw_channel(struct i40e_pf
*pf
,
5931 struct i40e_vsi
*vsi
,
5932 struct i40e_channel
*ch
,
5933 u16 uplink_seid
, u8 type
)
5937 ch
->initialized
= false;
5938 ch
->base_queue
= vsi
->next_base_queue
;
5941 /* Proceed with creation of channel (VMDq2) VSI */
5942 ret
= i40e_add_channel(pf
, uplink_seid
, ch
);
5944 dev_info(&pf
->pdev
->dev
,
5945 "failed to add_channel using uplink_seid %u\n",
5950 /* Mark the successful creation of channel */
5951 ch
->initialized
= true;
5953 /* Reconfigure TX queues using QTX_CTL register */
5954 ret
= i40e_channel_config_tx_ring(pf
, vsi
, ch
);
5956 dev_info(&pf
->pdev
->dev
,
5957 "failed to configure TX rings for channel %u\n",
5962 /* update 'next_base_queue' */
5963 vsi
->next_base_queue
= vsi
->next_base_queue
+ ch
->num_queue_pairs
;
5964 dev_dbg(&pf
->pdev
->dev
,
5965 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5966 ch
->seid
, ch
->vsi_number
, ch
->stat_counter_idx
,
5967 ch
->num_queue_pairs
,
5968 vsi
->next_base_queue
);
5973 * i40e_setup_channel - setup new channel using uplink element
5974 * @pf: ptr to PF device
5975 * @type: type of channel to be created (VMDq2/VF)
5976 * @uplink_seid: underlying HW switching element (VEB) ID
5977 * @ch: ptr to channel structure
5979 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5980 * and uplink switching element (uplink_seid)
5982 static bool i40e_setup_channel(struct i40e_pf
*pf
, struct i40e_vsi
*vsi
,
5983 struct i40e_channel
*ch
)
5989 if (vsi
->type
== I40E_VSI_MAIN
) {
5990 vsi_type
= I40E_VSI_VMDQ2
;
5992 dev_err(&pf
->pdev
->dev
, "unsupported parent vsi type(%d)\n",
5997 /* underlying switching element */
5998 seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
6000 /* create channel (VSI), configure TX rings */
6001 ret
= i40e_setup_hw_channel(pf
, vsi
, ch
, seid
, vsi_type
);
6003 dev_err(&pf
->pdev
->dev
, "failed to setup hw_channel\n");
6007 return ch
->initialized
? true : false;
6011 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6012 * @vsi: ptr to VSI which has PF backing
6014 * Sets up switch mode correctly if it needs to be changed and perform
6015 * what are allowed modes.
6017 static int i40e_validate_and_set_switch_mode(struct i40e_vsi
*vsi
)
6020 struct i40e_pf
*pf
= vsi
->back
;
6021 struct i40e_hw
*hw
= &pf
->hw
;
6024 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_dev_capabilities
);
6028 if (hw
->dev_caps
.switch_mode
) {
6029 /* if switch mode is set, support mode2 (non-tunneled for
6030 * cloud filter) for now
6032 u32 switch_mode
= hw
->dev_caps
.switch_mode
&
6033 I40E_SWITCH_MODE_MASK
;
6034 if (switch_mode
>= I40E_CLOUD_FILTER_MODE1
) {
6035 if (switch_mode
== I40E_CLOUD_FILTER_MODE2
)
6037 dev_err(&pf
->pdev
->dev
,
6038 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6039 hw
->dev_caps
.switch_mode
);
6044 /* Set Bit 7 to be valid */
6045 mode
= I40E_AQ_SET_SWITCH_BIT7_VALID
;
6047 /* Set L4type to both TCP and UDP support */
6048 mode
|= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH
;
6050 /* Set cloud filter mode */
6051 mode
|= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL
;
6053 /* Prep mode field for set_switch_config */
6054 ret
= i40e_aq_set_switch_config(hw
, pf
->last_sw_conf_flags
,
6055 pf
->last_sw_conf_valid_flags
,
6057 if (ret
&& hw
->aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
)
6058 dev_err(&pf
->pdev
->dev
,
6059 "couldn't set switch config bits, err %s aq_err %s\n",
6060 i40e_stat_str(hw
, ret
),
6062 hw
->aq
.asq_last_status
));
6068 * i40e_create_queue_channel - function to create channel
6069 * @vsi: VSI to be configured
6070 * @ch: ptr to channel (it contains channel specific params)
6072 * This function creates channel (VSI) using num_queues specified by user,
6073 * reconfigs RSS if needed.
6075 int i40e_create_queue_channel(struct i40e_vsi
*vsi
,
6076 struct i40e_channel
*ch
)
6078 struct i40e_pf
*pf
= vsi
->back
;
6085 if (!ch
->num_queue_pairs
) {
6086 dev_err(&pf
->pdev
->dev
, "Invalid num_queues requested: %d\n",
6087 ch
->num_queue_pairs
);
6091 /* validate user requested num_queues for channel */
6092 err
= i40e_validate_num_queues(pf
, ch
->num_queue_pairs
, vsi
,
6095 dev_info(&pf
->pdev
->dev
, "Failed to validate num_queues (%d)\n",
6096 ch
->num_queue_pairs
);
6100 /* By default we are in VEPA mode, if this is the first VF/VMDq
6101 * VSI to be added switch to VEB mode.
6103 if ((!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) ||
6104 (!i40e_is_any_channel(vsi
))) {
6105 if (!is_power_of_2(vsi
->tc_config
.tc_info
[0].qcount
)) {
6106 dev_dbg(&pf
->pdev
->dev
,
6107 "Failed to create channel. Override queues (%u) not power of 2\n",
6108 vsi
->tc_config
.tc_info
[0].qcount
);
6112 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
6113 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
6115 if (vsi
->type
== I40E_VSI_MAIN
) {
6116 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
6117 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
,
6120 i40e_do_reset_safe(pf
,
6121 I40E_PF_RESET_FLAG
);
6124 /* now onwards for main VSI, number of queues will be value
6125 * of TC0's queue count
6129 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6130 * it should be more than num_queues
6132 if (!vsi
->cnt_q_avail
|| vsi
->cnt_q_avail
< ch
->num_queue_pairs
) {
6133 dev_dbg(&pf
->pdev
->dev
,
6134 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6135 vsi
->cnt_q_avail
, ch
->num_queue_pairs
);
6139 /* reconfig_rss only if vsi type is MAIN_VSI */
6140 if (reconfig_rss
&& (vsi
->type
== I40E_VSI_MAIN
)) {
6141 err
= i40e_vsi_reconfig_rss(vsi
, ch
->num_queue_pairs
);
6143 dev_info(&pf
->pdev
->dev
,
6144 "Error: unable to reconfig rss for num_queues (%u)\n",
6145 ch
->num_queue_pairs
);
6150 if (!i40e_setup_channel(pf
, vsi
, ch
)) {
6151 dev_info(&pf
->pdev
->dev
, "Failed to setup channel\n");
6155 dev_info(&pf
->pdev
->dev
,
6156 "Setup channel (id:%u) utilizing num_queues %d\n",
6157 ch
->seid
, ch
->num_queue_pairs
);
6159 /* configure VSI for BW limit */
6160 if (ch
->max_tx_rate
) {
6161 u64 credits
= ch
->max_tx_rate
;
6163 if (i40e_set_bw_limit(vsi
, ch
->seid
, ch
->max_tx_rate
))
6166 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6167 dev_dbg(&pf
->pdev
->dev
,
6168 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6174 /* in case of VF, this will be main SRIOV VSI */
6175 ch
->parent_vsi
= vsi
;
6177 /* and update main_vsi's count for queue_available to use */
6178 vsi
->cnt_q_avail
-= ch
->num_queue_pairs
;
6184 * i40e_configure_queue_channels - Add queue channel for the given TCs
6185 * @vsi: VSI to be configured
6187 * Configures queue channel mapping to the given TCs
6189 static int i40e_configure_queue_channels(struct i40e_vsi
*vsi
)
6191 struct i40e_channel
*ch
;
6195 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6196 vsi
->tc_seid_map
[0] = vsi
->seid
;
6197 for (i
= 1; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6198 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
6199 ch
= kzalloc(sizeof(*ch
), GFP_KERNEL
);
6205 INIT_LIST_HEAD(&ch
->list
);
6206 ch
->num_queue_pairs
=
6207 vsi
->tc_config
.tc_info
[i
].qcount
;
6209 vsi
->tc_config
.tc_info
[i
].qoffset
;
6211 /* Bandwidth limit through tc interface is in bytes/s,
6214 max_rate
= vsi
->mqprio_qopt
.max_rate
[i
];
6215 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6216 ch
->max_tx_rate
= max_rate
;
6218 list_add_tail(&ch
->list
, &vsi
->ch_list
);
6220 ret
= i40e_create_queue_channel(vsi
, ch
);
6222 dev_err(&vsi
->back
->pdev
->dev
,
6223 "Failed creating queue channel with TC%d: queues %d\n",
6224 i
, ch
->num_queue_pairs
);
6227 vsi
->tc_seid_map
[i
] = ch
->seid
;
6233 i40e_remove_queue_channels(vsi
);
6238 * i40e_veb_config_tc - Configure TCs for given VEB
6240 * @enabled_tc: TC bitmap
6242 * Configures given TC bitmap for VEB (switching) element
6244 int i40e_veb_config_tc(struct i40e_veb
*veb
, u8 enabled_tc
)
6246 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data
= {0};
6247 struct i40e_pf
*pf
= veb
->pf
;
6251 /* No TCs or already enabled TCs just return */
6252 if (!enabled_tc
|| veb
->enabled_tc
== enabled_tc
)
6255 bw_data
.tc_valid_bits
= enabled_tc
;
6256 /* bw_data.absolute_credits is not set (relative) */
6258 /* Enable ETS TCs with equal BW Share for now */
6259 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6260 if (enabled_tc
& BIT(i
))
6261 bw_data
.tc_bw_share_credits
[i
] = 1;
6264 ret
= i40e_aq_config_switch_comp_bw_config(&pf
->hw
, veb
->seid
,
6267 dev_info(&pf
->pdev
->dev
,
6268 "VEB bw config failed, err %s aq_err %s\n",
6269 i40e_stat_str(&pf
->hw
, ret
),
6270 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6274 /* Update the BW information */
6275 ret
= i40e_veb_get_bw_info(veb
);
6277 dev_info(&pf
->pdev
->dev
,
6278 "Failed getting veb bw config, err %s aq_err %s\n",
6279 i40e_stat_str(&pf
->hw
, ret
),
6280 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6287 #ifdef CONFIG_I40E_DCB
6289 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6292 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6293 * the caller would've quiesce all the VSIs before calling
6296 static void i40e_dcb_reconfigure(struct i40e_pf
*pf
)
6302 /* Enable the TCs available on PF to all VEBs */
6303 tc_map
= i40e_pf_get_tc_map(pf
);
6304 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
6307 ret
= i40e_veb_config_tc(pf
->veb
[v
], tc_map
);
6309 dev_info(&pf
->pdev
->dev
,
6310 "Failed configuring TC for VEB seid=%d\n",
6312 /* Will try to configure as many components */
6316 /* Update each VSI */
6317 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
6321 /* - Enable all TCs for the LAN VSI
6322 * - For all others keep them at TC0 for now
6324 if (v
== pf
->lan_vsi
)
6325 tc_map
= i40e_pf_get_tc_map(pf
);
6327 tc_map
= I40E_DEFAULT_TRAFFIC_CLASS
;
6329 ret
= i40e_vsi_config_tc(pf
->vsi
[v
], tc_map
);
6331 dev_info(&pf
->pdev
->dev
,
6332 "Failed configuring TC for VSI seid=%d\n",
6334 /* Will try to configure as many components */
6336 /* Re-configure VSI vectors based on updated TC map */
6337 i40e_vsi_map_rings_to_vectors(pf
->vsi
[v
]);
6338 if (pf
->vsi
[v
]->netdev
)
6339 i40e_dcbnl_set_all(pf
->vsi
[v
]);
6345 * i40e_resume_port_tx - Resume port Tx
6348 * Resume a port's Tx and issue a PF reset in case of failure to
6351 static int i40e_resume_port_tx(struct i40e_pf
*pf
)
6353 struct i40e_hw
*hw
= &pf
->hw
;
6356 ret
= i40e_aq_resume_port_tx(hw
, NULL
);
6358 dev_info(&pf
->pdev
->dev
,
6359 "Resume Port Tx failed, err %s aq_err %s\n",
6360 i40e_stat_str(&pf
->hw
, ret
),
6361 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6362 /* Schedule PF reset to recover */
6363 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
6364 i40e_service_event_schedule(pf
);
6371 * i40e_init_pf_dcb - Initialize DCB configuration
6372 * @pf: PF being configured
6374 * Query the current DCB configuration and cache it
6375 * in the hardware structure
6377 static int i40e_init_pf_dcb(struct i40e_pf
*pf
)
6379 struct i40e_hw
*hw
= &pf
->hw
;
6382 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
6383 if (pf
->hw_features
& I40E_HW_NO_DCB_SUPPORT
)
6386 /* Get the initial DCB configuration */
6387 err
= i40e_init_dcb(hw
);
6389 /* Device/Function is not DCBX capable */
6390 if ((!hw
->func_caps
.dcb
) ||
6391 (hw
->dcbx_status
== I40E_DCBX_STATUS_DISABLED
)) {
6392 dev_info(&pf
->pdev
->dev
,
6393 "DCBX offload is not supported or is disabled for this PF.\n");
6395 /* When status is not DISABLED then DCBX in FW */
6396 pf
->dcbx_cap
= DCB_CAP_DCBX_LLD_MANAGED
|
6397 DCB_CAP_DCBX_VER_IEEE
;
6399 pf
->flags
|= I40E_FLAG_DCB_CAPABLE
;
6400 /* Enable DCB tagging only when more than one TC
6401 * or explicitly disable if only one TC
6403 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
6404 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
6406 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6407 dev_dbg(&pf
->pdev
->dev
,
6408 "DCBX offload is supported for this PF.\n");
6411 dev_info(&pf
->pdev
->dev
,
6412 "Query for DCB configuration failed, err %s aq_err %s\n",
6413 i40e_stat_str(&pf
->hw
, err
),
6414 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6420 #endif /* CONFIG_I40E_DCB */
6421 #define SPEED_SIZE 14
6424 * i40e_print_link_message - print link up or down
6425 * @vsi: the VSI for which link needs a message
6427 void i40e_print_link_message(struct i40e_vsi
*vsi
, bool isup
)
6429 enum i40e_aq_link_speed new_speed
;
6430 struct i40e_pf
*pf
= vsi
->back
;
6431 char *speed
= "Unknown";
6432 char *fc
= "Unknown";
6437 new_speed
= pf
->hw
.phy
.link_info
.link_speed
;
6439 if ((vsi
->current_isup
== isup
) && (vsi
->current_speed
== new_speed
))
6441 vsi
->current_isup
= isup
;
6442 vsi
->current_speed
= new_speed
;
6444 netdev_info(vsi
->netdev
, "NIC Link is Down\n");
6448 /* Warn user if link speed on NPAR enabled partition is not at
6451 if (pf
->hw
.func_caps
.npar_enable
&&
6452 (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_1GB
||
6453 pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_100MB
))
6454 netdev_warn(vsi
->netdev
,
6455 "The partition detected link speed that is less than 10Gbps\n");
6457 switch (pf
->hw
.phy
.link_info
.link_speed
) {
6458 case I40E_LINK_SPEED_40GB
:
6461 case I40E_LINK_SPEED_20GB
:
6464 case I40E_LINK_SPEED_25GB
:
6467 case I40E_LINK_SPEED_10GB
:
6470 case I40E_LINK_SPEED_1GB
:
6473 case I40E_LINK_SPEED_100MB
:
6480 switch (pf
->hw
.fc
.current_mode
) {
6484 case I40E_FC_TX_PAUSE
:
6487 case I40E_FC_RX_PAUSE
:
6495 if (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_25GB
) {
6496 req_fec
= ", Requested FEC: None";
6497 fec
= ", FEC: None";
6498 an
= ", Autoneg: False";
6500 if (pf
->hw
.phy
.link_info
.an_info
& I40E_AQ_AN_COMPLETED
)
6501 an
= ", Autoneg: True";
6503 if (pf
->hw
.phy
.link_info
.fec_info
&
6504 I40E_AQ_CONFIG_FEC_KR_ENA
)
6505 fec
= ", FEC: CL74 FC-FEC/BASE-R";
6506 else if (pf
->hw
.phy
.link_info
.fec_info
&
6507 I40E_AQ_CONFIG_FEC_RS_ENA
)
6508 fec
= ", FEC: CL108 RS-FEC";
6510 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6511 * both RS and FC are requested
6513 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6514 (I40E_AQ_REQUEST_FEC_KR
| I40E_AQ_REQUEST_FEC_RS
)) {
6515 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6516 I40E_AQ_REQUEST_FEC_RS
)
6517 req_fec
= ", Requested FEC: CL108 RS-FEC";
6519 req_fec
= ", Requested FEC: CL74 FC-FEC/BASE-R";
6523 netdev_info(vsi
->netdev
, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6524 speed
, req_fec
, fec
, an
, fc
);
6528 * i40e_up_complete - Finish the last steps of bringing up a connection
6529 * @vsi: the VSI being configured
6531 static int i40e_up_complete(struct i40e_vsi
*vsi
)
6533 struct i40e_pf
*pf
= vsi
->back
;
6536 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
6537 i40e_vsi_configure_msix(vsi
);
6539 i40e_configure_msi_and_legacy(vsi
);
6542 err
= i40e_vsi_start_rings(vsi
);
6546 clear_bit(__I40E_VSI_DOWN
, vsi
->state
);
6547 i40e_napi_enable_all(vsi
);
6548 i40e_vsi_enable_irq(vsi
);
6550 if ((pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
) &&
6552 i40e_print_link_message(vsi
, true);
6553 netif_tx_start_all_queues(vsi
->netdev
);
6554 netif_carrier_on(vsi
->netdev
);
6557 /* replay FDIR SB filters */
6558 if (vsi
->type
== I40E_VSI_FDIR
) {
6559 /* reset fd counters */
6562 i40e_fdir_filter_restore(vsi
);
6565 /* On the next run of the service_task, notify any clients of the new
6568 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
6569 i40e_service_event_schedule(pf
);
6575 * i40e_vsi_reinit_locked - Reset the VSI
6576 * @vsi: the VSI being configured
6578 * Rebuild the ring structs after some configuration
6579 * has changed, e.g. MTU size.
6581 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
)
6583 struct i40e_pf
*pf
= vsi
->back
;
6585 WARN_ON(in_interrupt());
6586 while (test_and_set_bit(__I40E_CONFIG_BUSY
, pf
->state
))
6587 usleep_range(1000, 2000);
6591 clear_bit(__I40E_CONFIG_BUSY
, pf
->state
);
6595 * i40e_up - Bring the connection back up after being down
6596 * @vsi: the VSI being configured
6598 int i40e_up(struct i40e_vsi
*vsi
)
6602 err
= i40e_vsi_configure(vsi
);
6604 err
= i40e_up_complete(vsi
);
6610 * i40e_down - Shutdown the connection processing
6611 * @vsi: the VSI being stopped
6613 void i40e_down(struct i40e_vsi
*vsi
)
6617 /* It is assumed that the caller of this function
6618 * sets the vsi->state __I40E_VSI_DOWN bit.
6621 netif_carrier_off(vsi
->netdev
);
6622 netif_tx_disable(vsi
->netdev
);
6624 i40e_vsi_disable_irq(vsi
);
6625 i40e_vsi_stop_rings(vsi
);
6626 i40e_napi_disable_all(vsi
);
6628 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
6629 i40e_clean_tx_ring(vsi
->tx_rings
[i
]);
6630 if (i40e_enabled_xdp_vsi(vsi
))
6631 i40e_clean_tx_ring(vsi
->xdp_rings
[i
]);
6632 i40e_clean_rx_ring(vsi
->rx_rings
[i
]);
6638 * i40e_validate_mqprio_qopt- validate queue mapping info
6639 * @vsi: the VSI being configured
6640 * @mqprio_qopt: queue parametrs
6642 static int i40e_validate_mqprio_qopt(struct i40e_vsi
*vsi
,
6643 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
6645 u64 sum_max_rate
= 0;
6649 if (mqprio_qopt
->qopt
.offset
[0] != 0 ||
6650 mqprio_qopt
->qopt
.num_tc
< 1 ||
6651 mqprio_qopt
->qopt
.num_tc
> I40E_MAX_TRAFFIC_CLASS
)
6653 for (i
= 0; ; i
++) {
6654 if (!mqprio_qopt
->qopt
.count
[i
])
6656 if (mqprio_qopt
->min_rate
[i
]) {
6657 dev_err(&vsi
->back
->pdev
->dev
,
6658 "Invalid min tx rate (greater than 0) specified\n");
6661 max_rate
= mqprio_qopt
->max_rate
[i
];
6662 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6663 sum_max_rate
+= max_rate
;
6665 if (i
>= mqprio_qopt
->qopt
.num_tc
- 1)
6667 if (mqprio_qopt
->qopt
.offset
[i
+ 1] !=
6668 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
]))
6671 if (vsi
->num_queue_pairs
<
6672 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
])) {
6675 if (sum_max_rate
> i40e_get_link_speed(vsi
)) {
6676 dev_err(&vsi
->back
->pdev
->dev
,
6677 "Invalid max tx rate specified\n");
6684 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6685 * @vsi: the VSI being configured
6687 static void i40e_vsi_set_default_tc_config(struct i40e_vsi
*vsi
)
6692 /* Only TC0 is enabled */
6693 vsi
->tc_config
.numtc
= 1;
6694 vsi
->tc_config
.enabled_tc
= 1;
6695 qcount
= min_t(int, vsi
->alloc_queue_pairs
,
6696 i40e_pf_get_max_q_per_tc(vsi
->back
));
6697 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6698 /* For the TC that is not enabled set the offset to to default
6699 * queue and allocate one queue for the given TC.
6701 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
6703 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
6705 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
6706 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
6711 * i40e_setup_tc - configure multiple traffic classes
6712 * @netdev: net device to configure
6713 * @type_data: tc offload data
6715 static int i40e_setup_tc(struct net_device
*netdev
, void *type_data
)
6717 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
6718 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
6719 struct i40e_vsi
*vsi
= np
->vsi
;
6720 struct i40e_pf
*pf
= vsi
->back
;
6721 u8 enabled_tc
= 0, num_tc
, hw
;
6722 bool need_reset
= false;
6727 num_tc
= mqprio_qopt
->qopt
.num_tc
;
6728 hw
= mqprio_qopt
->qopt
.hw
;
6729 mode
= mqprio_qopt
->mode
;
6731 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6732 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
, sizeof(*mqprio_qopt
));
6736 /* Check if MFP enabled */
6737 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
6739 "Configuring TC not supported in MFP mode\n");
6743 case TC_MQPRIO_MODE_DCB
:
6744 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6746 /* Check if DCB enabled to continue */
6747 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
)) {
6749 "DCB is not enabled for adapter\n");
6753 /* Check whether tc count is within enabled limit */
6754 if (num_tc
> i40e_pf_get_num_tc(pf
)) {
6756 "TC count greater than enabled on link for adapter\n");
6760 case TC_MQPRIO_MODE_CHANNEL
:
6761 if (pf
->flags
& I40E_FLAG_DCB_ENABLED
) {
6763 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6766 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
6768 ret
= i40e_validate_mqprio_qopt(vsi
, mqprio_qopt
);
6771 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
,
6772 sizeof(*mqprio_qopt
));
6773 pf
->flags
|= I40E_FLAG_TC_MQPRIO
;
6774 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6781 /* Generate TC map for number of tc requested */
6782 for (i
= 0; i
< num_tc
; i
++)
6783 enabled_tc
|= BIT(i
);
6785 /* Requesting same TC configuration as already enabled */
6786 if (enabled_tc
== vsi
->tc_config
.enabled_tc
&&
6787 mode
!= TC_MQPRIO_MODE_CHANNEL
)
6790 /* Quiesce VSI queues */
6791 i40e_quiesce_vsi(vsi
);
6793 if (!hw
&& !(pf
->flags
& I40E_FLAG_TC_MQPRIO
))
6794 i40e_remove_queue_channels(vsi
);
6796 /* Configure VSI for enabled TCs */
6797 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
6799 netdev_info(netdev
, "Failed configuring TC for VSI seid=%d\n",
6805 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
) {
6806 if (vsi
->mqprio_qopt
.max_rate
[0]) {
6807 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
6809 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
6810 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
6812 u64 credits
= max_tx_rate
;
6814 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6815 dev_dbg(&vsi
->back
->pdev
->dev
,
6816 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6825 ret
= i40e_configure_queue_channels(vsi
);
6828 "Failed configuring queue channels\n");
6835 /* Reset the configuration data to defaults, only TC0 is enabled */
6837 i40e_vsi_set_default_tc_config(vsi
);
6842 i40e_unquiesce_vsi(vsi
);
6847 * i40e_set_cld_element - sets cloud filter element data
6848 * @filter: cloud filter rule
6849 * @cld: ptr to cloud filter element data
6851 * This is helper function to copy data into cloud filter element
6854 i40e_set_cld_element(struct i40e_cloud_filter
*filter
,
6855 struct i40e_aqc_cloud_filters_element_data
*cld
)
6860 memset(cld
, 0, sizeof(*cld
));
6861 ether_addr_copy(cld
->outer_mac
, filter
->dst_mac
);
6862 ether_addr_copy(cld
->inner_mac
, filter
->src_mac
);
6864 if (filter
->n_proto
!= ETH_P_IP
&& filter
->n_proto
!= ETH_P_IPV6
)
6867 if (filter
->n_proto
== ETH_P_IPV6
) {
6868 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6869 for (i
= 0, j
= 0; i
< ARRAY_SIZE(filter
->dst_ipv6
);
6871 ipa
= be32_to_cpu(filter
->dst_ipv6
[IPV6_MAX_INDEX
- i
]);
6872 ipa
= cpu_to_le32(ipa
);
6873 memcpy(&cld
->ipaddr
.raw_v6
.data
[j
], &ipa
, sizeof(ipa
));
6876 ipa
= be32_to_cpu(filter
->dst_ipv4
);
6877 memcpy(&cld
->ipaddr
.v4
.data
, &ipa
, sizeof(ipa
));
6880 cld
->inner_vlan
= cpu_to_le16(ntohs(filter
->vlan_id
));
6882 /* tenant_id is not supported by FW now, once the support is enabled
6883 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6885 if (filter
->tenant_id
)
6890 * i40e_add_del_cloud_filter - Add/del cloud filter
6891 * @vsi: pointer to VSI
6892 * @filter: cloud filter rule
6893 * @add: if true, add, if false, delete
6895 * Add or delete a cloud filter for a specific flow spec.
6896 * Returns 0 if the filter were successfully added.
6898 static int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
6899 struct i40e_cloud_filter
*filter
, bool add
)
6901 struct i40e_aqc_cloud_filters_element_data cld_filter
;
6902 struct i40e_pf
*pf
= vsi
->back
;
6904 static const u16 flag_table
[128] = {
6905 [I40E_CLOUD_FILTER_FLAGS_OMAC
] =
6906 I40E_AQC_ADD_CLOUD_FILTER_OMAC
,
6907 [I40E_CLOUD_FILTER_FLAGS_IMAC
] =
6908 I40E_AQC_ADD_CLOUD_FILTER_IMAC
,
6909 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN
] =
6910 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN
,
6911 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID
] =
6912 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID
,
6913 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC
] =
6914 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC
,
6915 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID
] =
6916 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID
,
6917 [I40E_CLOUD_FILTER_FLAGS_IIP
] =
6918 I40E_AQC_ADD_CLOUD_FILTER_IIP
,
6921 if (filter
->flags
>= ARRAY_SIZE(flag_table
))
6922 return I40E_ERR_CONFIG
;
6924 /* copy element needed to add cloud filter from filter */
6925 i40e_set_cld_element(filter
, &cld_filter
);
6927 if (filter
->tunnel_type
!= I40E_CLOUD_TNL_TYPE_NONE
)
6928 cld_filter
.flags
= cpu_to_le16(filter
->tunnel_type
<<
6929 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT
);
6931 if (filter
->n_proto
== ETH_P_IPV6
)
6932 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
6933 I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
6935 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
6936 I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
6939 ret
= i40e_aq_add_cloud_filters(&pf
->hw
, filter
->seid
,
6942 ret
= i40e_aq_rem_cloud_filters(&pf
->hw
, filter
->seid
,
6945 dev_dbg(&pf
->pdev
->dev
,
6946 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
6947 add
? "add" : "delete", filter
->dst_port
, ret
,
6948 pf
->hw
.aq
.asq_last_status
);
6950 dev_info(&pf
->pdev
->dev
,
6951 "%s cloud filter for VSI: %d\n",
6952 add
? "Added" : "Deleted", filter
->seid
);
6957 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
6958 * @vsi: pointer to VSI
6959 * @filter: cloud filter rule
6960 * @add: if true, add, if false, delete
6962 * Add or delete a cloud filter for a specific flow spec using big buffer.
6963 * Returns 0 if the filter were successfully added.
6965 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
6966 struct i40e_cloud_filter
*filter
,
6969 struct i40e_aqc_cloud_filters_element_bb cld_filter
;
6970 struct i40e_pf
*pf
= vsi
->back
;
6973 /* Both (src/dst) valid mac_addr are not supported */
6974 if ((is_valid_ether_addr(filter
->dst_mac
) &&
6975 is_valid_ether_addr(filter
->src_mac
)) ||
6976 (is_multicast_ether_addr(filter
->dst_mac
) &&
6977 is_multicast_ether_addr(filter
->src_mac
)))
6980 /* Make sure port is specified, otherwise bail out, for channel
6981 * specific cloud filter needs 'L4 port' to be non-zero
6983 if (!filter
->dst_port
)
6986 /* adding filter using src_port/src_ip is not supported at this stage */
6987 if (filter
->src_port
|| filter
->src_ipv4
||
6988 !ipv6_addr_any(&filter
->ip
.v6
.src_ip6
))
6991 /* copy element needed to add cloud filter from filter */
6992 i40e_set_cld_element(filter
, &cld_filter
.element
);
6994 if (is_valid_ether_addr(filter
->dst_mac
) ||
6995 is_valid_ether_addr(filter
->src_mac
) ||
6996 is_multicast_ether_addr(filter
->dst_mac
) ||
6997 is_multicast_ether_addr(filter
->src_mac
)) {
6998 /* MAC + IP : unsupported mode */
6999 if (filter
->dst_ipv4
)
7002 /* since we validated that L4 port must be valid before
7003 * we get here, start with respective "flags" value
7004 * and update if vlan is present or not
7006 cld_filter
.element
.flags
=
7007 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT
);
7009 if (filter
->vlan_id
) {
7010 cld_filter
.element
.flags
=
7011 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT
);
7014 } else if (filter
->dst_ipv4
||
7015 !ipv6_addr_any(&filter
->ip
.v6
.dst_ip6
)) {
7016 cld_filter
.element
.flags
=
7017 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT
);
7018 if (filter
->n_proto
== ETH_P_IPV6
)
7019 cld_filter
.element
.flags
|=
7020 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
7022 cld_filter
.element
.flags
|=
7023 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
7025 dev_err(&pf
->pdev
->dev
,
7026 "either mac or ip has to be valid for cloud filter\n");
7030 /* Now copy L4 port in Byte 6..7 in general fields */
7031 cld_filter
.general_fields
[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0
] =
7032 be16_to_cpu(filter
->dst_port
);
7035 /* Validate current device switch mode, change if necessary */
7036 ret
= i40e_validate_and_set_switch_mode(vsi
);
7038 dev_err(&pf
->pdev
->dev
,
7039 "failed to set switch mode, ret %d\n",
7044 ret
= i40e_aq_add_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7047 ret
= i40e_aq_rem_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7052 dev_dbg(&pf
->pdev
->dev
,
7053 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7054 add
? "add" : "delete", ret
, pf
->hw
.aq
.asq_last_status
);
7056 dev_info(&pf
->pdev
->dev
,
7057 "%s cloud filter for VSI: %d, L4 port: %d\n",
7058 add
? "add" : "delete", filter
->seid
,
7059 ntohs(filter
->dst_port
));
7064 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7065 * @vsi: Pointer to VSI
7066 * @cls_flower: Pointer to struct tc_cls_flower_offload
7067 * @filter: Pointer to cloud filter structure
7070 static int i40e_parse_cls_flower(struct i40e_vsi
*vsi
,
7071 struct tc_cls_flower_offload
*f
,
7072 struct i40e_cloud_filter
*filter
)
7074 u16 n_proto_mask
= 0, n_proto_key
= 0, addr_type
= 0;
7075 struct i40e_pf
*pf
= vsi
->back
;
7078 if (f
->dissector
->used_keys
&
7079 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
7080 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
7081 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7082 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
7083 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7084 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7085 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
7086 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
))) {
7087 dev_err(&pf
->pdev
->dev
, "Unsupported key used: 0x%x\n",
7088 f
->dissector
->used_keys
);
7092 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
7093 struct flow_dissector_key_keyid
*key
=
7094 skb_flow_dissector_target(f
->dissector
,
7095 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7098 struct flow_dissector_key_keyid
*mask
=
7099 skb_flow_dissector_target(f
->dissector
,
7100 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7103 if (mask
->keyid
!= 0)
7104 field_flags
|= I40E_CLOUD_FIELD_TEN_ID
;
7106 filter
->tenant_id
= be32_to_cpu(key
->keyid
);
7109 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
7110 struct flow_dissector_key_basic
*key
=
7111 skb_flow_dissector_target(f
->dissector
,
7112 FLOW_DISSECTOR_KEY_BASIC
,
7115 struct flow_dissector_key_basic
*mask
=
7116 skb_flow_dissector_target(f
->dissector
,
7117 FLOW_DISSECTOR_KEY_BASIC
,
7120 n_proto_key
= ntohs(key
->n_proto
);
7121 n_proto_mask
= ntohs(mask
->n_proto
);
7123 if (n_proto_key
== ETH_P_ALL
) {
7127 filter
->n_proto
= n_proto_key
& n_proto_mask
;
7128 filter
->ip_proto
= key
->ip_proto
;
7131 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7132 struct flow_dissector_key_eth_addrs
*key
=
7133 skb_flow_dissector_target(f
->dissector
,
7134 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7137 struct flow_dissector_key_eth_addrs
*mask
=
7138 skb_flow_dissector_target(f
->dissector
,
7139 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7142 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7143 if (!is_zero_ether_addr(mask
->dst
)) {
7144 if (is_broadcast_ether_addr(mask
->dst
)) {
7145 field_flags
|= I40E_CLOUD_FIELD_OMAC
;
7147 dev_err(&pf
->pdev
->dev
, "Bad ether dest mask %pM\n",
7149 return I40E_ERR_CONFIG
;
7153 if (!is_zero_ether_addr(mask
->src
)) {
7154 if (is_broadcast_ether_addr(mask
->src
)) {
7155 field_flags
|= I40E_CLOUD_FIELD_IMAC
;
7157 dev_err(&pf
->pdev
->dev
, "Bad ether src mask %pM\n",
7159 return I40E_ERR_CONFIG
;
7162 ether_addr_copy(filter
->dst_mac
, key
->dst
);
7163 ether_addr_copy(filter
->src_mac
, key
->src
);
7166 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
7167 struct flow_dissector_key_vlan
*key
=
7168 skb_flow_dissector_target(f
->dissector
,
7169 FLOW_DISSECTOR_KEY_VLAN
,
7171 struct flow_dissector_key_vlan
*mask
=
7172 skb_flow_dissector_target(f
->dissector
,
7173 FLOW_DISSECTOR_KEY_VLAN
,
7176 if (mask
->vlan_id
) {
7177 if (mask
->vlan_id
== VLAN_VID_MASK
) {
7178 field_flags
|= I40E_CLOUD_FIELD_IVLAN
;
7181 dev_err(&pf
->pdev
->dev
, "Bad vlan mask 0x%04x\n",
7183 return I40E_ERR_CONFIG
;
7187 filter
->vlan_id
= cpu_to_be16(key
->vlan_id
);
7190 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7191 struct flow_dissector_key_control
*key
=
7192 skb_flow_dissector_target(f
->dissector
,
7193 FLOW_DISSECTOR_KEY_CONTROL
,
7196 addr_type
= key
->addr_type
;
7199 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7200 struct flow_dissector_key_ipv4_addrs
*key
=
7201 skb_flow_dissector_target(f
->dissector
,
7202 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7204 struct flow_dissector_key_ipv4_addrs
*mask
=
7205 skb_flow_dissector_target(f
->dissector
,
7206 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7210 if (mask
->dst
== cpu_to_be32(0xffffffff)) {
7211 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7213 mask
->dst
= be32_to_cpu(mask
->dst
);
7214 dev_err(&pf
->pdev
->dev
, "Bad ip dst mask %pI4\n",
7216 return I40E_ERR_CONFIG
;
7221 if (mask
->src
== cpu_to_be32(0xffffffff)) {
7222 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7224 mask
->src
= be32_to_cpu(mask
->src
);
7225 dev_err(&pf
->pdev
->dev
, "Bad ip src mask %pI4\n",
7227 return I40E_ERR_CONFIG
;
7231 if (field_flags
& I40E_CLOUD_FIELD_TEN_ID
) {
7232 dev_err(&pf
->pdev
->dev
, "Tenant id not allowed for ip filter\n");
7233 return I40E_ERR_CONFIG
;
7235 filter
->dst_ipv4
= key
->dst
;
7236 filter
->src_ipv4
= key
->src
;
7239 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7240 struct flow_dissector_key_ipv6_addrs
*key
=
7241 skb_flow_dissector_target(f
->dissector
,
7242 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7244 struct flow_dissector_key_ipv6_addrs
*mask
=
7245 skb_flow_dissector_target(f
->dissector
,
7246 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7249 /* src and dest IPV6 address should not be LOOPBACK
7250 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7252 if (ipv6_addr_loopback(&key
->dst
) ||
7253 ipv6_addr_loopback(&key
->src
)) {
7254 dev_err(&pf
->pdev
->dev
,
7255 "Bad ipv6, addr is LOOPBACK\n");
7256 return I40E_ERR_CONFIG
;
7258 if (!ipv6_addr_any(&mask
->dst
) || !ipv6_addr_any(&mask
->src
))
7259 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7261 memcpy(&filter
->src_ipv6
, &key
->src
.s6_addr32
,
7262 sizeof(filter
->src_ipv6
));
7263 memcpy(&filter
->dst_ipv6
, &key
->dst
.s6_addr32
,
7264 sizeof(filter
->dst_ipv6
));
7267 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
7268 struct flow_dissector_key_ports
*key
=
7269 skb_flow_dissector_target(f
->dissector
,
7270 FLOW_DISSECTOR_KEY_PORTS
,
7272 struct flow_dissector_key_ports
*mask
=
7273 skb_flow_dissector_target(f
->dissector
,
7274 FLOW_DISSECTOR_KEY_PORTS
,
7278 if (mask
->src
== cpu_to_be16(0xffff)) {
7279 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7281 dev_err(&pf
->pdev
->dev
, "Bad src port mask 0x%04x\n",
7282 be16_to_cpu(mask
->src
));
7283 return I40E_ERR_CONFIG
;
7288 if (mask
->dst
== cpu_to_be16(0xffff)) {
7289 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7291 dev_err(&pf
->pdev
->dev
, "Bad dst port mask 0x%04x\n",
7292 be16_to_cpu(mask
->dst
));
7293 return I40E_ERR_CONFIG
;
7297 filter
->dst_port
= key
->dst
;
7298 filter
->src_port
= key
->src
;
7300 switch (filter
->ip_proto
) {
7305 dev_err(&pf
->pdev
->dev
,
7306 "Only UDP and TCP transport are supported\n");
7310 filter
->flags
= field_flags
;
7315 * i40e_handle_tclass: Forward to a traffic class on the device
7316 * @vsi: Pointer to VSI
7317 * @tc: traffic class index on the device
7318 * @filter: Pointer to cloud filter structure
7321 static int i40e_handle_tclass(struct i40e_vsi
*vsi
, u32 tc
,
7322 struct i40e_cloud_filter
*filter
)
7324 struct i40e_channel
*ch
, *ch_tmp
;
7326 /* direct to a traffic class on the same device */
7328 filter
->seid
= vsi
->seid
;
7330 } else if (vsi
->tc_config
.enabled_tc
& BIT(tc
)) {
7331 if (!filter
->dst_port
) {
7332 dev_err(&vsi
->back
->pdev
->dev
,
7333 "Specify destination port to direct to traffic class that is not default\n");
7336 if (list_empty(&vsi
->ch_list
))
7338 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
,
7340 if (ch
->seid
== vsi
->tc_seid_map
[tc
])
7341 filter
->seid
= ch
->seid
;
7345 dev_err(&vsi
->back
->pdev
->dev
, "TC is not enabled\n");
7350 * i40e_configure_clsflower - Configure tc flower filters
7351 * @vsi: Pointer to VSI
7352 * @cls_flower: Pointer to struct tc_cls_flower_offload
7355 static int i40e_configure_clsflower(struct i40e_vsi
*vsi
,
7356 struct tc_cls_flower_offload
*cls_flower
)
7358 int tc
= tc_classid_to_hwtc(vsi
->netdev
, cls_flower
->classid
);
7359 struct i40e_cloud_filter
*filter
= NULL
;
7360 struct i40e_pf
*pf
= vsi
->back
;
7364 dev_err(&vsi
->back
->pdev
->dev
, "Invalid traffic class\n");
7368 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
) ||
7369 test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
))
7372 if (pf
->fdir_pf_active_filters
||
7373 (!hlist_empty(&pf
->fdir_filter_list
))) {
7374 dev_err(&vsi
->back
->pdev
->dev
,
7375 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7379 if (vsi
->back
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
7380 dev_err(&vsi
->back
->pdev
->dev
,
7381 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7382 vsi
->back
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
7383 vsi
->back
->flags
|= I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7386 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
7390 filter
->cookie
= cls_flower
->cookie
;
7392 err
= i40e_parse_cls_flower(vsi
, cls_flower
, filter
);
7396 err
= i40e_handle_tclass(vsi
, tc
, filter
);
7400 /* Add cloud filter */
7401 if (filter
->dst_port
)
7402 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, true);
7404 err
= i40e_add_del_cloud_filter(vsi
, filter
, true);
7407 dev_err(&pf
->pdev
->dev
,
7408 "Failed to add cloud filter, err %s\n",
7409 i40e_stat_str(&pf
->hw
, err
));
7410 err
= i40e_aq_rc_to_posix(err
, pf
->hw
.aq
.asq_last_status
);
7414 /* add filter to the ordered list */
7415 INIT_HLIST_NODE(&filter
->cloud_node
);
7417 hlist_add_head(&filter
->cloud_node
, &pf
->cloud_filter_list
);
7419 pf
->num_cloud_filters
++;
7428 * i40e_find_cloud_filter - Find the could filter in the list
7429 * @vsi: Pointer to VSI
7430 * @cookie: filter specific cookie
7433 static struct i40e_cloud_filter
*i40e_find_cloud_filter(struct i40e_vsi
*vsi
,
7434 unsigned long *cookie
)
7436 struct i40e_cloud_filter
*filter
= NULL
;
7437 struct hlist_node
*node2
;
7439 hlist_for_each_entry_safe(filter
, node2
,
7440 &vsi
->back
->cloud_filter_list
, cloud_node
)
7441 if (!memcmp(cookie
, &filter
->cookie
, sizeof(filter
->cookie
)))
7447 * i40e_delete_clsflower - Remove tc flower filters
7448 * @vsi: Pointer to VSI
7449 * @cls_flower: Pointer to struct tc_cls_flower_offload
7452 static int i40e_delete_clsflower(struct i40e_vsi
*vsi
,
7453 struct tc_cls_flower_offload
*cls_flower
)
7455 struct i40e_cloud_filter
*filter
= NULL
;
7456 struct i40e_pf
*pf
= vsi
->back
;
7459 filter
= i40e_find_cloud_filter(vsi
, &cls_flower
->cookie
);
7464 hash_del(&filter
->cloud_node
);
7466 if (filter
->dst_port
)
7467 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, false);
7469 err
= i40e_add_del_cloud_filter(vsi
, filter
, false);
7473 dev_err(&pf
->pdev
->dev
,
7474 "Failed to delete cloud filter, err %s\n",
7475 i40e_stat_str(&pf
->hw
, err
));
7476 return i40e_aq_rc_to_posix(err
, pf
->hw
.aq
.asq_last_status
);
7479 pf
->num_cloud_filters
--;
7480 if (!pf
->num_cloud_filters
)
7481 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7482 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7483 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7484 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7485 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7491 * i40e_setup_tc_cls_flower - flower classifier offloads
7492 * @netdev: net device to configure
7493 * @type_data: offload data
7495 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv
*np
,
7496 struct tc_cls_flower_offload
*cls_flower
)
7498 struct i40e_vsi
*vsi
= np
->vsi
;
7500 if (cls_flower
->common
.chain_index
)
7503 switch (cls_flower
->command
) {
7504 case TC_CLSFLOWER_REPLACE
:
7505 return i40e_configure_clsflower(vsi
, cls_flower
);
7506 case TC_CLSFLOWER_DESTROY
:
7507 return i40e_delete_clsflower(vsi
, cls_flower
);
7508 case TC_CLSFLOWER_STATS
:
7515 static int i40e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
7518 struct i40e_netdev_priv
*np
= cb_priv
;
7521 case TC_SETUP_CLSFLOWER
:
7522 return i40e_setup_tc_cls_flower(np
, type_data
);
7529 static int i40e_setup_tc_block(struct net_device
*dev
,
7530 struct tc_block_offload
*f
)
7532 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
7534 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
7537 switch (f
->command
) {
7539 return tcf_block_cb_register(f
->block
, i40e_setup_tc_block_cb
,
7541 case TC_BLOCK_UNBIND
:
7542 tcf_block_cb_unregister(f
->block
, i40e_setup_tc_block_cb
, np
);
7549 static int __i40e_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
7553 case TC_SETUP_QDISC_MQPRIO
:
7554 return i40e_setup_tc(netdev
, type_data
);
7555 case TC_SETUP_BLOCK
:
7556 return i40e_setup_tc_block(netdev
, type_data
);
7563 * i40e_open - Called when a network interface is made active
7564 * @netdev: network interface device structure
7566 * The open entry point is called when a network interface is made
7567 * active by the system (IFF_UP). At this point all resources needed
7568 * for transmit and receive operations are allocated, the interrupt
7569 * handler is registered with the OS, the netdev watchdog subtask is
7570 * enabled, and the stack is notified that the interface is ready.
7572 * Returns 0 on success, negative value on failure
7574 int i40e_open(struct net_device
*netdev
)
7576 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7577 struct i40e_vsi
*vsi
= np
->vsi
;
7578 struct i40e_pf
*pf
= vsi
->back
;
7581 /* disallow open during test or if eeprom is broken */
7582 if (test_bit(__I40E_TESTING
, pf
->state
) ||
7583 test_bit(__I40E_BAD_EEPROM
, pf
->state
))
7586 netif_carrier_off(netdev
);
7588 err
= i40e_vsi_open(vsi
);
7592 /* configure global TSO hardware offload settings */
7593 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_F
, be32_to_cpu(TCP_FLAG_PSH
|
7594 TCP_FLAG_FIN
) >> 16);
7595 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_M
, be32_to_cpu(TCP_FLAG_PSH
|
7597 TCP_FLAG_CWR
) >> 16);
7598 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_L
, be32_to_cpu(TCP_FLAG_CWR
) >> 16);
7600 udp_tunnel_get_rx_info(netdev
);
7607 * @vsi: the VSI to open
7609 * Finish initialization of the VSI.
7611 * Returns 0 on success, negative value on failure
7613 * Note: expects to be called while under rtnl_lock()
7615 int i40e_vsi_open(struct i40e_vsi
*vsi
)
7617 struct i40e_pf
*pf
= vsi
->back
;
7618 char int_name
[I40E_INT_NAME_STR_LEN
];
7621 /* allocate descriptors */
7622 err
= i40e_vsi_setup_tx_resources(vsi
);
7625 err
= i40e_vsi_setup_rx_resources(vsi
);
7629 err
= i40e_vsi_configure(vsi
);
7634 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s",
7635 dev_driver_string(&pf
->pdev
->dev
), vsi
->netdev
->name
);
7636 err
= i40e_vsi_request_irq(vsi
, int_name
);
7640 /* Notify the stack of the actual queue counts. */
7641 err
= netif_set_real_num_tx_queues(vsi
->netdev
,
7642 vsi
->num_queue_pairs
);
7644 goto err_set_queues
;
7646 err
= netif_set_real_num_rx_queues(vsi
->netdev
,
7647 vsi
->num_queue_pairs
);
7649 goto err_set_queues
;
7651 } else if (vsi
->type
== I40E_VSI_FDIR
) {
7652 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s:fdir",
7653 dev_driver_string(&pf
->pdev
->dev
),
7654 dev_name(&pf
->pdev
->dev
));
7655 err
= i40e_vsi_request_irq(vsi
, int_name
);
7662 err
= i40e_up_complete(vsi
);
7664 goto err_up_complete
;
7671 i40e_vsi_free_irq(vsi
);
7673 i40e_vsi_free_rx_resources(vsi
);
7675 i40e_vsi_free_tx_resources(vsi
);
7676 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
7677 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
7683 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7684 * @pf: Pointer to PF
7686 * This function destroys the hlist where all the Flow Director
7687 * filters were saved.
7689 static void i40e_fdir_filter_exit(struct i40e_pf
*pf
)
7691 struct i40e_fdir_filter
*filter
;
7692 struct i40e_flex_pit
*pit_entry
, *tmp
;
7693 struct hlist_node
*node2
;
7695 hlist_for_each_entry_safe(filter
, node2
,
7696 &pf
->fdir_filter_list
, fdir_node
) {
7697 hlist_del(&filter
->fdir_node
);
7701 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l3_flex_pit_list
, list
) {
7702 list_del(&pit_entry
->list
);
7705 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
7707 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l4_flex_pit_list
, list
) {
7708 list_del(&pit_entry
->list
);
7711 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
7713 pf
->fdir_pf_active_filters
= 0;
7714 pf
->fd_tcp4_filter_cnt
= 0;
7715 pf
->fd_udp4_filter_cnt
= 0;
7716 pf
->fd_sctp4_filter_cnt
= 0;
7717 pf
->fd_ip4_filter_cnt
= 0;
7719 /* Reprogram the default input set for TCP/IPv4 */
7720 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_TCP
,
7721 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7722 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7724 /* Reprogram the default input set for UDP/IPv4 */
7725 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_UDP
,
7726 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7727 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7729 /* Reprogram the default input set for SCTP/IPv4 */
7730 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP
,
7731 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7732 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7734 /* Reprogram the default input set for Other/IPv4 */
7735 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
,
7736 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
);
7740 * i40e_cloud_filter_exit - Cleans up the cloud filters
7741 * @pf: Pointer to PF
7743 * This function destroys the hlist where all the cloud filters
7746 static void i40e_cloud_filter_exit(struct i40e_pf
*pf
)
7748 struct i40e_cloud_filter
*cfilter
;
7749 struct hlist_node
*node
;
7751 hlist_for_each_entry_safe(cfilter
, node
,
7752 &pf
->cloud_filter_list
, cloud_node
) {
7753 hlist_del(&cfilter
->cloud_node
);
7756 pf
->num_cloud_filters
= 0;
7758 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7759 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7760 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7761 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7762 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7767 * i40e_close - Disables a network interface
7768 * @netdev: network interface device structure
7770 * The close entry point is called when an interface is de-activated
7771 * by the OS. The hardware is still under the driver's control, but
7772 * this netdev interface is disabled.
7774 * Returns 0, this is not allowed to fail
7776 int i40e_close(struct net_device
*netdev
)
7778 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7779 struct i40e_vsi
*vsi
= np
->vsi
;
7781 i40e_vsi_close(vsi
);
7787 * i40e_do_reset - Start a PF or Core Reset sequence
7788 * @pf: board private structure
7789 * @reset_flags: which reset is requested
7790 * @lock_acquired: indicates whether or not the lock has been acquired
7791 * before this function was called.
7793 * The essential difference in resets is that the PF Reset
7794 * doesn't clear the packet buffers, doesn't reset the PE
7795 * firmware, and doesn't bother the other PFs on the chip.
7797 void i40e_do_reset(struct i40e_pf
*pf
, u32 reset_flags
, bool lock_acquired
)
7801 WARN_ON(in_interrupt());
7804 /* do the biggest reset indicated */
7805 if (reset_flags
& BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED
)) {
7807 /* Request a Global Reset
7809 * This will start the chip's countdown to the actual full
7810 * chip reset event, and a warning interrupt to be sent
7811 * to all PFs, including the requestor. Our handler
7812 * for the warning interrupt will deal with the shutdown
7813 * and recovery of the switch setup.
7815 dev_dbg(&pf
->pdev
->dev
, "GlobalR requested\n");
7816 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7817 val
|= I40E_GLGEN_RTRIG_GLOBR_MASK
;
7818 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7820 } else if (reset_flags
& BIT_ULL(__I40E_CORE_RESET_REQUESTED
)) {
7822 /* Request a Core Reset
7824 * Same as Global Reset, except does *not* include the MAC/PHY
7826 dev_dbg(&pf
->pdev
->dev
, "CoreR requested\n");
7827 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7828 val
|= I40E_GLGEN_RTRIG_CORER_MASK
;
7829 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7830 i40e_flush(&pf
->hw
);
7832 } else if (reset_flags
& I40E_PF_RESET_FLAG
) {
7834 /* Request a PF Reset
7836 * Resets only the PF-specific registers
7838 * This goes directly to the tear-down and rebuild of
7839 * the switch, since we need to do all the recovery as
7840 * for the Core Reset.
7842 dev_dbg(&pf
->pdev
->dev
, "PFR requested\n");
7843 i40e_handle_reset_warning(pf
, lock_acquired
);
7845 } else if (reset_flags
& BIT_ULL(__I40E_REINIT_REQUESTED
)) {
7848 /* Find the VSI(s) that requested a re-init */
7849 dev_info(&pf
->pdev
->dev
,
7850 "VSI reinit requested\n");
7851 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7852 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7855 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED
,
7857 i40e_vsi_reinit_locked(pf
->vsi
[v
]);
7859 } else if (reset_flags
& BIT_ULL(__I40E_DOWN_REQUESTED
)) {
7862 /* Find the VSI(s) that needs to be brought down */
7863 dev_info(&pf
->pdev
->dev
, "VSI down requested\n");
7864 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7865 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7868 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED
,
7870 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
7875 dev_info(&pf
->pdev
->dev
,
7876 "bad reset request 0x%08x\n", reset_flags
);
7880 #ifdef CONFIG_I40E_DCB
7882 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7883 * @pf: board private structure
7884 * @old_cfg: current DCB config
7885 * @new_cfg: new DCB config
7887 bool i40e_dcb_need_reconfig(struct i40e_pf
*pf
,
7888 struct i40e_dcbx_config
*old_cfg
,
7889 struct i40e_dcbx_config
*new_cfg
)
7891 bool need_reconfig
= false;
7893 /* Check if ETS configuration has changed */
7894 if (memcmp(&new_cfg
->etscfg
,
7896 sizeof(new_cfg
->etscfg
))) {
7897 /* If Priority Table has changed reconfig is needed */
7898 if (memcmp(&new_cfg
->etscfg
.prioritytable
,
7899 &old_cfg
->etscfg
.prioritytable
,
7900 sizeof(new_cfg
->etscfg
.prioritytable
))) {
7901 need_reconfig
= true;
7902 dev_dbg(&pf
->pdev
->dev
, "ETS UP2TC changed.\n");
7905 if (memcmp(&new_cfg
->etscfg
.tcbwtable
,
7906 &old_cfg
->etscfg
.tcbwtable
,
7907 sizeof(new_cfg
->etscfg
.tcbwtable
)))
7908 dev_dbg(&pf
->pdev
->dev
, "ETS TC BW Table changed.\n");
7910 if (memcmp(&new_cfg
->etscfg
.tsatable
,
7911 &old_cfg
->etscfg
.tsatable
,
7912 sizeof(new_cfg
->etscfg
.tsatable
)))
7913 dev_dbg(&pf
->pdev
->dev
, "ETS TSA Table changed.\n");
7916 /* Check if PFC configuration has changed */
7917 if (memcmp(&new_cfg
->pfc
,
7919 sizeof(new_cfg
->pfc
))) {
7920 need_reconfig
= true;
7921 dev_dbg(&pf
->pdev
->dev
, "PFC config change detected.\n");
7924 /* Check if APP Table has changed */
7925 if (memcmp(&new_cfg
->app
,
7927 sizeof(new_cfg
->app
))) {
7928 need_reconfig
= true;
7929 dev_dbg(&pf
->pdev
->dev
, "APP Table change detected.\n");
7932 dev_dbg(&pf
->pdev
->dev
, "dcb need_reconfig=%d\n", need_reconfig
);
7933 return need_reconfig
;
7937 * i40e_handle_lldp_event - Handle LLDP Change MIB event
7938 * @pf: board private structure
7939 * @e: event info posted on ARQ
7941 static int i40e_handle_lldp_event(struct i40e_pf
*pf
,
7942 struct i40e_arq_event_info
*e
)
7944 struct i40e_aqc_lldp_get_mib
*mib
=
7945 (struct i40e_aqc_lldp_get_mib
*)&e
->desc
.params
.raw
;
7946 struct i40e_hw
*hw
= &pf
->hw
;
7947 struct i40e_dcbx_config tmp_dcbx_cfg
;
7948 bool need_reconfig
= false;
7952 /* Not DCB capable or capability disabled */
7953 if (!(pf
->flags
& I40E_FLAG_DCB_CAPABLE
))
7956 /* Ignore if event is not for Nearest Bridge */
7957 type
= ((mib
->type
>> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT
)
7958 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK
);
7959 dev_dbg(&pf
->pdev
->dev
, "LLDP event mib bridge type 0x%x\n", type
);
7960 if (type
!= I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
)
7963 /* Check MIB Type and return if event for Remote MIB update */
7964 type
= mib
->type
& I40E_AQ_LLDP_MIB_TYPE_MASK
;
7965 dev_dbg(&pf
->pdev
->dev
,
7966 "LLDP event mib type %s\n", type
? "remote" : "local");
7967 if (type
== I40E_AQ_LLDP_MIB_REMOTE
) {
7968 /* Update the remote cached instance and return */
7969 ret
= i40e_aq_get_dcb_config(hw
, I40E_AQ_LLDP_MIB_REMOTE
,
7970 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
,
7971 &hw
->remote_dcbx_config
);
7975 /* Store the old configuration */
7976 tmp_dcbx_cfg
= hw
->local_dcbx_config
;
7978 /* Reset the old DCBx configuration data */
7979 memset(&hw
->local_dcbx_config
, 0, sizeof(hw
->local_dcbx_config
));
7980 /* Get updated DCBX data from firmware */
7981 ret
= i40e_get_dcb_config(&pf
->hw
);
7983 dev_info(&pf
->pdev
->dev
,
7984 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
7985 i40e_stat_str(&pf
->hw
, ret
),
7986 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
7990 /* No change detected in DCBX configs */
7991 if (!memcmp(&tmp_dcbx_cfg
, &hw
->local_dcbx_config
,
7992 sizeof(tmp_dcbx_cfg
))) {
7993 dev_dbg(&pf
->pdev
->dev
, "No change detected in DCBX configuration.\n");
7997 need_reconfig
= i40e_dcb_need_reconfig(pf
, &tmp_dcbx_cfg
,
7998 &hw
->local_dcbx_config
);
8000 i40e_dcbnl_flush_apps(pf
, &tmp_dcbx_cfg
, &hw
->local_dcbx_config
);
8005 /* Enable DCB tagging only when more than one TC */
8006 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
8007 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
8009 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
8011 set_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
8012 /* Reconfiguration needed quiesce all VSIs */
8013 i40e_pf_quiesce_all_vsi(pf
);
8015 /* Changes in configuration update VEB/VSI */
8016 i40e_dcb_reconfigure(pf
);
8018 ret
= i40e_resume_port_tx(pf
);
8020 clear_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
8021 /* In case of error no point in resuming VSIs */
8025 /* Wait for the PF's queues to be disabled */
8026 ret
= i40e_pf_wait_queues_disabled(pf
);
8028 /* Schedule PF reset to recover */
8029 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8030 i40e_service_event_schedule(pf
);
8032 i40e_pf_unquiesce_all_vsi(pf
);
8033 pf
->flags
|= (I40E_FLAG_SERVICE_CLIENT_REQUESTED
|
8034 I40E_FLAG_CLIENT_L2_CHANGE
);
8040 #endif /* CONFIG_I40E_DCB */
8043 * i40e_do_reset_safe - Protected reset path for userland calls.
8044 * @pf: board private structure
8045 * @reset_flags: which reset is requested
8048 void i40e_do_reset_safe(struct i40e_pf
*pf
, u32 reset_flags
)
8051 i40e_do_reset(pf
, reset_flags
, true);
8056 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8057 * @pf: board private structure
8058 * @e: event info posted on ARQ
8060 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8063 static void i40e_handle_lan_overflow_event(struct i40e_pf
*pf
,
8064 struct i40e_arq_event_info
*e
)
8066 struct i40e_aqc_lan_overflow
*data
=
8067 (struct i40e_aqc_lan_overflow
*)&e
->desc
.params
.raw
;
8068 u32 queue
= le32_to_cpu(data
->prtdcb_rupto
);
8069 u32 qtx_ctl
= le32_to_cpu(data
->otx_ctl
);
8070 struct i40e_hw
*hw
= &pf
->hw
;
8074 dev_dbg(&pf
->pdev
->dev
, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8077 /* Queue belongs to VF, find the VF and issue VF reset */
8078 if (((qtx_ctl
& I40E_QTX_CTL_PFVF_Q_MASK
)
8079 >> I40E_QTX_CTL_PFVF_Q_SHIFT
) == I40E_QTX_CTL_VF_QUEUE
) {
8080 vf_id
= (u16
)((qtx_ctl
& I40E_QTX_CTL_VFVM_INDX_MASK
)
8081 >> I40E_QTX_CTL_VFVM_INDX_SHIFT
);
8082 vf_id
-= hw
->func_caps
.vf_base_id
;
8083 vf
= &pf
->vf
[vf_id
];
8084 i40e_vc_notify_vf_reset(vf
);
8085 /* Allow VF to process pending reset notification */
8087 i40e_reset_vf(vf
, false);
8092 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8093 * @pf: board private structure
8095 u32
i40e_get_cur_guaranteed_fd_count(struct i40e_pf
*pf
)
8099 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8100 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
);
8105 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8106 * @pf: board private structure
8108 u32
i40e_get_current_fd_count(struct i40e_pf
*pf
)
8112 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8113 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
) +
8114 ((val
& I40E_PFQF_FDSTAT_BEST_CNT_MASK
) >>
8115 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT
);
8120 * i40e_get_global_fd_count - Get total FD filters programmed on device
8121 * @pf: board private structure
8123 u32
i40e_get_global_fd_count(struct i40e_pf
*pf
)
8127 val
= rd32(&pf
->hw
, I40E_GLQF_FDCNT_0
);
8128 fcnt_prog
= (val
& I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK
) +
8129 ((val
& I40E_GLQF_FDCNT_0_BESTCNT_MASK
) >>
8130 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT
);
8135 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8136 * @pf: board private structure
8138 void i40e_fdir_check_and_reenable(struct i40e_pf
*pf
)
8140 struct i40e_fdir_filter
*filter
;
8141 u32 fcnt_prog
, fcnt_avail
;
8142 struct hlist_node
*node
;
8144 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8147 /* Check if we have enough room to re-enable FDir SB capability. */
8148 fcnt_prog
= i40e_get_global_fd_count(pf
);
8149 fcnt_avail
= pf
->fdir_pf_filter_count
;
8150 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM
)) ||
8151 (pf
->fd_add_err
== 0) ||
8152 (i40e_get_current_atr_cnt(pf
) < pf
->fd_atr_cnt
)) {
8153 if (pf
->flags
& I40E_FLAG_FD_SB_AUTO_DISABLED
) {
8154 pf
->flags
&= ~I40E_FLAG_FD_SB_AUTO_DISABLED
;
8155 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
8156 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8157 dev_info(&pf
->pdev
->dev
, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8161 /* We should wait for even more space before re-enabling ATR.
8162 * Additionally, we cannot enable ATR as long as we still have TCP SB
8165 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) &&
8166 (pf
->fd_tcp4_filter_cnt
== 0)) {
8167 if (pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
) {
8168 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8169 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
8170 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8171 dev_info(&pf
->pdev
->dev
, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8175 /* if hw had a problem adding a filter, delete it */
8176 if (pf
->fd_inv
> 0) {
8177 hlist_for_each_entry_safe(filter
, node
,
8178 &pf
->fdir_filter_list
, fdir_node
) {
8179 if (filter
->fd_id
== pf
->fd_inv
) {
8180 hlist_del(&filter
->fdir_node
);
8182 pf
->fdir_pf_active_filters
--;
8189 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8190 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8192 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8193 * @pf: board private structure
8195 static void i40e_fdir_flush_and_replay(struct i40e_pf
*pf
)
8197 unsigned long min_flush_time
;
8198 int flush_wait_retry
= 50;
8199 bool disable_atr
= false;
8203 if (!time_after(jiffies
, pf
->fd_flush_timestamp
+
8204 (I40E_MIN_FD_FLUSH_INTERVAL
* HZ
)))
8207 /* If the flush is happening too quick and we have mostly SB rules we
8208 * should not re-enable ATR for some time.
8210 min_flush_time
= pf
->fd_flush_timestamp
+
8211 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE
* HZ
);
8212 fd_room
= pf
->fdir_pf_filter_count
- pf
->fdir_pf_active_filters
;
8214 if (!(time_after(jiffies
, min_flush_time
)) &&
8215 (fd_room
< I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) {
8216 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8217 dev_info(&pf
->pdev
->dev
, "ATR disabled, not enough FD filter space.\n");
8221 pf
->fd_flush_timestamp
= jiffies
;
8222 pf
->flags
|= I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8223 /* flush all filters */
8224 wr32(&pf
->hw
, I40E_PFQF_CTL_1
,
8225 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
);
8226 i40e_flush(&pf
->hw
);
8230 /* Check FD flush status every 5-6msec */
8231 usleep_range(5000, 6000);
8232 reg
= rd32(&pf
->hw
, I40E_PFQF_CTL_1
);
8233 if (!(reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
))
8235 } while (flush_wait_retry
--);
8236 if (reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
) {
8237 dev_warn(&pf
->pdev
->dev
, "FD table did not flush, needs more time\n");
8239 /* replay sideband filters */
8240 i40e_fdir_filter_restore(pf
->vsi
[pf
->lan_vsi
]);
8241 if (!disable_atr
&& !pf
->fd_tcp4_filter_cnt
)
8242 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8243 clear_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
);
8244 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8245 dev_info(&pf
->pdev
->dev
, "FD Filter table flushed and FD-SB replayed.\n");
8250 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8251 * @pf: board private structure
8253 u32
i40e_get_current_atr_cnt(struct i40e_pf
*pf
)
8255 return i40e_get_current_fd_count(pf
) - pf
->fdir_pf_active_filters
;
8258 /* We can see up to 256 filter programming desc in transit if the filters are
8259 * being applied really fast; before we see the first
8260 * filter miss error on Rx queue 0. Accumulating enough error messages before
8261 * reacting will make sure we don't cause flush too often.
8263 #define I40E_MAX_FD_PROGRAM_ERROR 256
8266 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8267 * @pf: board private structure
8269 static void i40e_fdir_reinit_subtask(struct i40e_pf
*pf
)
8272 /* if interface is down do nothing */
8273 if (test_bit(__I40E_DOWN
, pf
->state
))
8276 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8277 i40e_fdir_flush_and_replay(pf
);
8279 i40e_fdir_check_and_reenable(pf
);
8284 * i40e_vsi_link_event - notify VSI of a link event
8285 * @vsi: vsi to be notified
8286 * @link_up: link up or down
8288 static void i40e_vsi_link_event(struct i40e_vsi
*vsi
, bool link_up
)
8290 if (!vsi
|| test_bit(__I40E_VSI_DOWN
, vsi
->state
))
8293 switch (vsi
->type
) {
8295 if (!vsi
->netdev
|| !vsi
->netdev_registered
)
8299 netif_carrier_on(vsi
->netdev
);
8300 netif_tx_wake_all_queues(vsi
->netdev
);
8302 netif_carrier_off(vsi
->netdev
);
8303 netif_tx_stop_all_queues(vsi
->netdev
);
8307 case I40E_VSI_SRIOV
:
8308 case I40E_VSI_VMDQ2
:
8310 case I40E_VSI_IWARP
:
8311 case I40E_VSI_MIRROR
:
8313 /* there is no notification for other VSIs */
8319 * i40e_veb_link_event - notify elements on the veb of a link event
8320 * @veb: veb to be notified
8321 * @link_up: link up or down
8323 static void i40e_veb_link_event(struct i40e_veb
*veb
, bool link_up
)
8328 if (!veb
|| !veb
->pf
)
8332 /* depth first... */
8333 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8334 if (pf
->veb
[i
] && (pf
->veb
[i
]->uplink_seid
== veb
->seid
))
8335 i40e_veb_link_event(pf
->veb
[i
], link_up
);
8337 /* ... now the local VSIs */
8338 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8339 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->uplink_seid
== veb
->seid
))
8340 i40e_vsi_link_event(pf
->vsi
[i
], link_up
);
8344 * i40e_link_event - Update netif_carrier status
8345 * @pf: board private structure
8347 static void i40e_link_event(struct i40e_pf
*pf
)
8349 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8350 u8 new_link_speed
, old_link_speed
;
8352 bool new_link
, old_link
;
8354 /* save off old link status information */
8355 pf
->hw
.phy
.link_info_old
= pf
->hw
.phy
.link_info
;
8357 /* set this to force the get_link_status call to refresh state */
8358 pf
->hw
.phy
.get_link_info
= true;
8360 old_link
= (pf
->hw
.phy
.link_info_old
.link_info
& I40E_AQ_LINK_UP
);
8362 status
= i40e_get_link_status(&pf
->hw
, &new_link
);
8364 /* On success, disable temp link polling */
8365 if (status
== I40E_SUCCESS
) {
8366 if (pf
->flags
& I40E_FLAG_TEMP_LINK_POLLING
)
8367 pf
->flags
&= ~I40E_FLAG_TEMP_LINK_POLLING
;
8369 /* Enable link polling temporarily until i40e_get_link_status
8370 * returns I40E_SUCCESS
8372 pf
->flags
|= I40E_FLAG_TEMP_LINK_POLLING
;
8373 dev_dbg(&pf
->pdev
->dev
, "couldn't get link state, status: %d\n",
8378 old_link_speed
= pf
->hw
.phy
.link_info_old
.link_speed
;
8379 new_link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
8381 if (new_link
== old_link
&&
8382 new_link_speed
== old_link_speed
&&
8383 (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
8384 new_link
== netif_carrier_ok(vsi
->netdev
)))
8387 i40e_print_link_message(vsi
, new_link
);
8389 /* Notify the base of the switch tree connected to
8390 * the link. Floating VEBs are not notified.
8392 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
8393 i40e_veb_link_event(pf
->veb
[pf
->lan_veb
], new_link
);
8395 i40e_vsi_link_event(vsi
, new_link
);
8398 i40e_vc_notify_link_state(pf
);
8400 if (pf
->flags
& I40E_FLAG_PTP
)
8401 i40e_ptp_set_increment(pf
);
8405 * i40e_watchdog_subtask - periodic checks not using event driven response
8406 * @pf: board private structure
8408 static void i40e_watchdog_subtask(struct i40e_pf
*pf
)
8412 /* if interface is down do nothing */
8413 if (test_bit(__I40E_DOWN
, pf
->state
) ||
8414 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
8417 /* make sure we don't do these things too often */
8418 if (time_before(jiffies
, (pf
->service_timer_previous
+
8419 pf
->service_timer_period
)))
8421 pf
->service_timer_previous
= jiffies
;
8423 if ((pf
->flags
& I40E_FLAG_LINK_POLLING_ENABLED
) ||
8424 (pf
->flags
& I40E_FLAG_TEMP_LINK_POLLING
))
8425 i40e_link_event(pf
);
8427 /* Update the stats for active netdevs so the network stack
8428 * can look at updated numbers whenever it cares to
8430 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8431 if (pf
->vsi
[i
] && pf
->vsi
[i
]->netdev
)
8432 i40e_update_stats(pf
->vsi
[i
]);
8434 if (pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
) {
8435 /* Update the stats for the active switching components */
8436 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8438 i40e_update_veb_stats(pf
->veb
[i
]);
8441 i40e_ptp_rx_hang(pf
);
8442 i40e_ptp_tx_hang(pf
);
8446 * i40e_reset_subtask - Set up for resetting the device and driver
8447 * @pf: board private structure
8449 static void i40e_reset_subtask(struct i40e_pf
*pf
)
8451 u32 reset_flags
= 0;
8453 if (test_bit(__I40E_REINIT_REQUESTED
, pf
->state
)) {
8454 reset_flags
|= BIT(__I40E_REINIT_REQUESTED
);
8455 clear_bit(__I40E_REINIT_REQUESTED
, pf
->state
);
8457 if (test_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
)) {
8458 reset_flags
|= BIT(__I40E_PF_RESET_REQUESTED
);
8459 clear_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8461 if (test_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
)) {
8462 reset_flags
|= BIT(__I40E_CORE_RESET_REQUESTED
);
8463 clear_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
8465 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
)) {
8466 reset_flags
|= BIT(__I40E_GLOBAL_RESET_REQUESTED
);
8467 clear_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
8469 if (test_bit(__I40E_DOWN_REQUESTED
, pf
->state
)) {
8470 reset_flags
|= BIT(__I40E_DOWN_REQUESTED
);
8471 clear_bit(__I40E_DOWN_REQUESTED
, pf
->state
);
8474 /* If there's a recovery already waiting, it takes
8475 * precedence before starting a new reset sequence.
8477 if (test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
)) {
8478 i40e_prep_for_reset(pf
, false);
8480 i40e_rebuild(pf
, false, false);
8483 /* If we're already down or resetting, just bail */
8485 !test_bit(__I40E_DOWN
, pf
->state
) &&
8486 !test_bit(__I40E_CONFIG_BUSY
, pf
->state
)) {
8487 i40e_do_reset(pf
, reset_flags
, false);
8492 * i40e_handle_link_event - Handle link event
8493 * @pf: board private structure
8494 * @e: event info posted on ARQ
8496 static void i40e_handle_link_event(struct i40e_pf
*pf
,
8497 struct i40e_arq_event_info
*e
)
8499 struct i40e_aqc_get_link_status
*status
=
8500 (struct i40e_aqc_get_link_status
*)&e
->desc
.params
.raw
;
8502 /* Do a new status request to re-enable LSE reporting
8503 * and load new status information into the hw struct
8504 * This completely ignores any state information
8505 * in the ARQ event info, instead choosing to always
8506 * issue the AQ update link status command.
8508 i40e_link_event(pf
);
8510 /* Check if module meets thermal requirements */
8511 if (status
->phy_type
== I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP
) {
8512 dev_err(&pf
->pdev
->dev
,
8513 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8514 dev_err(&pf
->pdev
->dev
,
8515 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8517 /* check for unqualified module, if link is down, suppress
8518 * the message if link was forced to be down.
8520 if ((status
->link_info
& I40E_AQ_MEDIA_AVAILABLE
) &&
8521 (!(status
->an_info
& I40E_AQ_QUALIFIED_MODULE
)) &&
8522 (!(status
->link_info
& I40E_AQ_LINK_UP
)) &&
8523 (!(pf
->flags
& I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED
))) {
8524 dev_err(&pf
->pdev
->dev
,
8525 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8526 dev_err(&pf
->pdev
->dev
,
8527 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8533 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8534 * @pf: board private structure
8536 static void i40e_clean_adminq_subtask(struct i40e_pf
*pf
)
8538 struct i40e_arq_event_info event
;
8539 struct i40e_hw
*hw
= &pf
->hw
;
8546 /* Do not run clean AQ when PF reset fails */
8547 if (test_bit(__I40E_RESET_FAILED
, pf
->state
))
8550 /* check for error indications */
8551 val
= rd32(&pf
->hw
, pf
->hw
.aq
.arq
.len
);
8553 if (val
& I40E_PF_ARQLEN_ARQVFE_MASK
) {
8554 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8555 dev_info(&pf
->pdev
->dev
, "ARQ VF Error detected\n");
8556 val
&= ~I40E_PF_ARQLEN_ARQVFE_MASK
;
8558 if (val
& I40E_PF_ARQLEN_ARQOVFL_MASK
) {
8559 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8560 dev_info(&pf
->pdev
->dev
, "ARQ Overflow Error detected\n");
8561 val
&= ~I40E_PF_ARQLEN_ARQOVFL_MASK
;
8562 pf
->arq_overflows
++;
8564 if (val
& I40E_PF_ARQLEN_ARQCRIT_MASK
) {
8565 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8566 dev_info(&pf
->pdev
->dev
, "ARQ Critical Error detected\n");
8567 val
&= ~I40E_PF_ARQLEN_ARQCRIT_MASK
;
8570 wr32(&pf
->hw
, pf
->hw
.aq
.arq
.len
, val
);
8572 val
= rd32(&pf
->hw
, pf
->hw
.aq
.asq
.len
);
8574 if (val
& I40E_PF_ATQLEN_ATQVFE_MASK
) {
8575 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8576 dev_info(&pf
->pdev
->dev
, "ASQ VF Error detected\n");
8577 val
&= ~I40E_PF_ATQLEN_ATQVFE_MASK
;
8579 if (val
& I40E_PF_ATQLEN_ATQOVFL_MASK
) {
8580 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8581 dev_info(&pf
->pdev
->dev
, "ASQ Overflow Error detected\n");
8582 val
&= ~I40E_PF_ATQLEN_ATQOVFL_MASK
;
8584 if (val
& I40E_PF_ATQLEN_ATQCRIT_MASK
) {
8585 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8586 dev_info(&pf
->pdev
->dev
, "ASQ Critical Error detected\n");
8587 val
&= ~I40E_PF_ATQLEN_ATQCRIT_MASK
;
8590 wr32(&pf
->hw
, pf
->hw
.aq
.asq
.len
, val
);
8592 event
.buf_len
= I40E_MAX_AQ_BUF_SIZE
;
8593 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
8598 ret
= i40e_clean_arq_element(hw
, &event
, &pending
);
8599 if (ret
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
8602 dev_info(&pf
->pdev
->dev
, "ARQ event error %d\n", ret
);
8606 opcode
= le16_to_cpu(event
.desc
.opcode
);
8609 case i40e_aqc_opc_get_link_status
:
8610 i40e_handle_link_event(pf
, &event
);
8612 case i40e_aqc_opc_send_msg_to_pf
:
8613 ret
= i40e_vc_process_vf_msg(pf
,
8614 le16_to_cpu(event
.desc
.retval
),
8615 le32_to_cpu(event
.desc
.cookie_high
),
8616 le32_to_cpu(event
.desc
.cookie_low
),
8620 case i40e_aqc_opc_lldp_update_mib
:
8621 dev_dbg(&pf
->pdev
->dev
, "ARQ: Update LLDP MIB event received\n");
8622 #ifdef CONFIG_I40E_DCB
8624 ret
= i40e_handle_lldp_event(pf
, &event
);
8626 #endif /* CONFIG_I40E_DCB */
8628 case i40e_aqc_opc_event_lan_overflow
:
8629 dev_dbg(&pf
->pdev
->dev
, "ARQ LAN queue overflow event received\n");
8630 i40e_handle_lan_overflow_event(pf
, &event
);
8632 case i40e_aqc_opc_send_msg_to_peer
:
8633 dev_info(&pf
->pdev
->dev
, "ARQ: Msg from other pf\n");
8635 case i40e_aqc_opc_nvm_erase
:
8636 case i40e_aqc_opc_nvm_update
:
8637 case i40e_aqc_opc_oem_post_update
:
8638 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
,
8639 "ARQ NVM operation 0x%04x completed\n",
8643 dev_info(&pf
->pdev
->dev
,
8644 "ARQ: Unknown event 0x%04x ignored\n",
8648 } while (i
++ < pf
->adminq_work_limit
);
8650 if (i
< pf
->adminq_work_limit
)
8651 clear_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
8653 /* re-enable Admin queue interrupt cause */
8654 val
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
8655 val
|= I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
8656 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
8659 kfree(event
.msg_buf
);
8663 * i40e_verify_eeprom - make sure eeprom is good to use
8664 * @pf: board private structure
8666 static void i40e_verify_eeprom(struct i40e_pf
*pf
)
8670 err
= i40e_diag_eeprom_test(&pf
->hw
);
8672 /* retry in case of garbage read */
8673 err
= i40e_diag_eeprom_test(&pf
->hw
);
8675 dev_info(&pf
->pdev
->dev
, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8677 set_bit(__I40E_BAD_EEPROM
, pf
->state
);
8681 if (!err
&& test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
8682 dev_info(&pf
->pdev
->dev
, "eeprom check passed, Tx/Rx traffic enabled\n");
8683 clear_bit(__I40E_BAD_EEPROM
, pf
->state
);
8688 * i40e_enable_pf_switch_lb
8689 * @pf: pointer to the PF structure
8691 * enable switch loop back or die - no point in a return value
8693 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
8695 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8696 struct i40e_vsi_context ctxt
;
8699 ctxt
.seid
= pf
->main_vsi_seid
;
8700 ctxt
.pf_num
= pf
->hw
.pf_id
;
8702 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8704 dev_info(&pf
->pdev
->dev
,
8705 "couldn't get PF vsi config, err %s aq_err %s\n",
8706 i40e_stat_str(&pf
->hw
, ret
),
8707 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8710 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8711 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8712 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8714 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8716 dev_info(&pf
->pdev
->dev
,
8717 "update vsi switch failed, err %s aq_err %s\n",
8718 i40e_stat_str(&pf
->hw
, ret
),
8719 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8724 * i40e_disable_pf_switch_lb
8725 * @pf: pointer to the PF structure
8727 * disable switch loop back or die - no point in a return value
8729 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
8731 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8732 struct i40e_vsi_context ctxt
;
8735 ctxt
.seid
= pf
->main_vsi_seid
;
8736 ctxt
.pf_num
= pf
->hw
.pf_id
;
8738 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8740 dev_info(&pf
->pdev
->dev
,
8741 "couldn't get PF vsi config, err %s aq_err %s\n",
8742 i40e_stat_str(&pf
->hw
, ret
),
8743 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8746 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8747 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8748 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8750 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8752 dev_info(&pf
->pdev
->dev
,
8753 "update vsi switch failed, err %s aq_err %s\n",
8754 i40e_stat_str(&pf
->hw
, ret
),
8755 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8760 * i40e_config_bridge_mode - Configure the HW bridge mode
8761 * @veb: pointer to the bridge instance
8763 * Configure the loop back mode for the LAN VSI that is downlink to the
8764 * specified HW bridge instance. It is expected this function is called
8765 * when a new HW bridge is instantiated.
8767 static void i40e_config_bridge_mode(struct i40e_veb
*veb
)
8769 struct i40e_pf
*pf
= veb
->pf
;
8771 if (pf
->hw
.debug_mask
& I40E_DEBUG_LAN
)
8772 dev_info(&pf
->pdev
->dev
, "enabling bridge mode: %s\n",
8773 veb
->bridge_mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
8774 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
)
8775 i40e_disable_pf_switch_lb(pf
);
8777 i40e_enable_pf_switch_lb(pf
);
8781 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8782 * @veb: pointer to the VEB instance
8784 * This is a recursive function that first builds the attached VSIs then
8785 * recurses in to build the next layer of VEB. We track the connections
8786 * through our own index numbers because the seid's from the HW could
8787 * change across the reset.
8789 static int i40e_reconstitute_veb(struct i40e_veb
*veb
)
8791 struct i40e_vsi
*ctl_vsi
= NULL
;
8792 struct i40e_pf
*pf
= veb
->pf
;
8796 /* build VSI that owns this VEB, temporarily attached to base VEB */
8797 for (v
= 0; v
< pf
->num_alloc_vsi
&& !ctl_vsi
; v
++) {
8799 pf
->vsi
[v
]->veb_idx
== veb
->idx
&&
8800 pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
8801 ctl_vsi
= pf
->vsi
[v
];
8806 dev_info(&pf
->pdev
->dev
,
8807 "missing owner VSI for veb_idx %d\n", veb
->idx
);
8809 goto end_reconstitute
;
8811 if (ctl_vsi
!= pf
->vsi
[pf
->lan_vsi
])
8812 ctl_vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
8813 ret
= i40e_add_vsi(ctl_vsi
);
8815 dev_info(&pf
->pdev
->dev
,
8816 "rebuild of veb_idx %d owner VSI failed: %d\n",
8818 goto end_reconstitute
;
8820 i40e_vsi_reset_stats(ctl_vsi
);
8822 /* create the VEB in the switch and move the VSI onto the VEB */
8823 ret
= i40e_add_veb(veb
, ctl_vsi
);
8825 goto end_reconstitute
;
8827 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
8828 veb
->bridge_mode
= BRIDGE_MODE_VEB
;
8830 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
8831 i40e_config_bridge_mode(veb
);
8833 /* create the remaining VSIs attached to this VEB */
8834 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
8835 if (!pf
->vsi
[v
] || pf
->vsi
[v
] == ctl_vsi
)
8838 if (pf
->vsi
[v
]->veb_idx
== veb
->idx
) {
8839 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
8841 vsi
->uplink_seid
= veb
->seid
;
8842 ret
= i40e_add_vsi(vsi
);
8844 dev_info(&pf
->pdev
->dev
,
8845 "rebuild of vsi_idx %d failed: %d\n",
8847 goto end_reconstitute
;
8849 i40e_vsi_reset_stats(vsi
);
8853 /* create any VEBs attached to this VEB - RECURSION */
8854 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
8855 if (pf
->veb
[veb_idx
] && pf
->veb
[veb_idx
]->veb_idx
== veb
->idx
) {
8856 pf
->veb
[veb_idx
]->uplink_seid
= veb
->seid
;
8857 ret
= i40e_reconstitute_veb(pf
->veb
[veb_idx
]);
8868 * i40e_get_capabilities - get info about the HW
8869 * @pf: the PF struct
8871 static int i40e_get_capabilities(struct i40e_pf
*pf
,
8872 enum i40e_admin_queue_opc list_type
)
8874 struct i40e_aqc_list_capabilities_element_resp
*cap_buf
;
8879 buf_len
= 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp
);
8881 cap_buf
= kzalloc(buf_len
, GFP_KERNEL
);
8885 /* this loads the data into the hw struct for us */
8886 err
= i40e_aq_discover_capabilities(&pf
->hw
, cap_buf
, buf_len
,
8887 &data_size
, list_type
,
8889 /* data loaded, buffer no longer needed */
8892 if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_ENOMEM
) {
8893 /* retry with a larger buffer */
8894 buf_len
= data_size
;
8895 } else if (pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_OK
) {
8896 dev_info(&pf
->pdev
->dev
,
8897 "capability discovery failed, err %s aq_err %s\n",
8898 i40e_stat_str(&pf
->hw
, err
),
8899 i40e_aq_str(&pf
->hw
,
8900 pf
->hw
.aq
.asq_last_status
));
8905 if (pf
->hw
.debug_mask
& I40E_DEBUG_USER
) {
8906 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
8907 dev_info(&pf
->pdev
->dev
,
8908 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
8909 pf
->hw
.pf_id
, pf
->hw
.func_caps
.num_vfs
,
8910 pf
->hw
.func_caps
.num_msix_vectors
,
8911 pf
->hw
.func_caps
.num_msix_vectors_vf
,
8912 pf
->hw
.func_caps
.fd_filters_guaranteed
,
8913 pf
->hw
.func_caps
.fd_filters_best_effort
,
8914 pf
->hw
.func_caps
.num_tx_qp
,
8915 pf
->hw
.func_caps
.num_vsis
);
8916 } else if (list_type
== i40e_aqc_opc_list_dev_capabilities
) {
8917 dev_info(&pf
->pdev
->dev
,
8918 "switch_mode=0x%04x, function_valid=0x%08x\n",
8919 pf
->hw
.dev_caps
.switch_mode
,
8920 pf
->hw
.dev_caps
.valid_functions
);
8921 dev_info(&pf
->pdev
->dev
,
8922 "SR-IOV=%d, num_vfs for all function=%u\n",
8923 pf
->hw
.dev_caps
.sr_iov_1_1
,
8924 pf
->hw
.dev_caps
.num_vfs
);
8925 dev_info(&pf
->pdev
->dev
,
8926 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
8927 pf
->hw
.dev_caps
.num_vsis
,
8928 pf
->hw
.dev_caps
.num_rx_qp
,
8929 pf
->hw
.dev_caps
.num_tx_qp
);
8932 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
8933 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
8934 + pf->hw.func_caps.num_vfs)
8935 if (pf
->hw
.revision_id
== 0 &&
8936 pf
->hw
.func_caps
.num_vsis
< DEF_NUM_VSI
) {
8937 dev_info(&pf
->pdev
->dev
,
8938 "got num_vsis %d, setting num_vsis to %d\n",
8939 pf
->hw
.func_caps
.num_vsis
, DEF_NUM_VSI
);
8940 pf
->hw
.func_caps
.num_vsis
= DEF_NUM_VSI
;
8946 static int i40e_vsi_clear(struct i40e_vsi
*vsi
);
8949 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
8950 * @pf: board private structure
8952 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
)
8954 struct i40e_vsi
*vsi
;
8956 /* quick workaround for an NVM issue that leaves a critical register
8959 if (!rd32(&pf
->hw
, I40E_GLQF_HKEY(0))) {
8960 static const u32 hkey
[] = {
8961 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
8962 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
8963 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
8967 for (i
= 0; i
<= I40E_GLQF_HKEY_MAX_INDEX
; i
++)
8968 wr32(&pf
->hw
, I40E_GLQF_HKEY(i
), hkey
[i
]);
8971 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
8974 /* find existing VSI and see if it needs configuring */
8975 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
8977 /* create a new VSI if none exists */
8979 vsi
= i40e_vsi_setup(pf
, I40E_VSI_FDIR
,
8980 pf
->vsi
[pf
->lan_vsi
]->seid
, 0);
8982 dev_info(&pf
->pdev
->dev
, "Couldn't create FDir VSI\n");
8983 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
8984 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
8989 i40e_vsi_setup_irqhandler(vsi
, i40e_fdir_clean_ring
);
8993 * i40e_fdir_teardown - release the Flow Director resources
8994 * @pf: board private structure
8996 static void i40e_fdir_teardown(struct i40e_pf
*pf
)
8998 struct i40e_vsi
*vsi
;
9000 i40e_fdir_filter_exit(pf
);
9001 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
9003 i40e_vsi_release(vsi
);
9007 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9009 * @seid: seid of main or channel VSIs
9011 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9012 * existed before reset
9014 static int i40e_rebuild_cloud_filters(struct i40e_vsi
*vsi
, u16 seid
)
9016 struct i40e_cloud_filter
*cfilter
;
9017 struct i40e_pf
*pf
= vsi
->back
;
9018 struct hlist_node
*node
;
9021 /* Add cloud filters back if they exist */
9022 hlist_for_each_entry_safe(cfilter
, node
, &pf
->cloud_filter_list
,
9024 if (cfilter
->seid
!= seid
)
9027 if (cfilter
->dst_port
)
9028 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
,
9031 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, true);
9034 dev_dbg(&pf
->pdev
->dev
,
9035 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9036 i40e_stat_str(&pf
->hw
, ret
),
9037 i40e_aq_str(&pf
->hw
,
9038 pf
->hw
.aq
.asq_last_status
));
9046 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9049 * Rebuilds channel VSIs if they existed before reset
9051 static int i40e_rebuild_channels(struct i40e_vsi
*vsi
)
9053 struct i40e_channel
*ch
, *ch_tmp
;
9056 if (list_empty(&vsi
->ch_list
))
9059 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
9060 if (!ch
->initialized
)
9062 /* Proceed with creation of channel (VMDq2) VSI */
9063 ret
= i40e_add_channel(vsi
->back
, vsi
->uplink_seid
, ch
);
9065 dev_info(&vsi
->back
->pdev
->dev
,
9066 "failed to rebuild channels using uplink_seid %u\n",
9070 if (ch
->max_tx_rate
) {
9071 u64 credits
= ch
->max_tx_rate
;
9073 if (i40e_set_bw_limit(vsi
, ch
->seid
,
9077 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9078 dev_dbg(&vsi
->back
->pdev
->dev
,
9079 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9084 ret
= i40e_rebuild_cloud_filters(vsi
, ch
->seid
);
9086 dev_dbg(&vsi
->back
->pdev
->dev
,
9087 "Failed to rebuild cloud filters for channel VSI %u\n",
9096 * i40e_prep_for_reset - prep for the core to reset
9097 * @pf: board private structure
9098 * @lock_acquired: indicates whether or not the lock has been acquired
9099 * before this function was called.
9101 * Close up the VFs and other things in prep for PF Reset.
9103 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
)
9105 struct i40e_hw
*hw
= &pf
->hw
;
9106 i40e_status ret
= 0;
9109 clear_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
9110 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9112 if (i40e_check_asq_alive(&pf
->hw
))
9113 i40e_vc_notify_reset(pf
);
9115 dev_dbg(&pf
->pdev
->dev
, "Tearing down internal switch for reset\n");
9117 /* quiesce the VSIs and their queues that are not already DOWN */
9118 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9121 i40e_pf_quiesce_all_vsi(pf
);
9125 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
9127 pf
->vsi
[v
]->seid
= 0;
9130 i40e_shutdown_adminq(&pf
->hw
);
9132 /* call shutdown HMC */
9133 if (hw
->hmc
.hmc_obj
) {
9134 ret
= i40e_shutdown_lan_hmc(hw
);
9136 dev_warn(&pf
->pdev
->dev
,
9137 "shutdown_lan_hmc failed: %d\n", ret
);
9142 * i40e_send_version - update firmware with driver version
9145 static void i40e_send_version(struct i40e_pf
*pf
)
9147 struct i40e_driver_version dv
;
9149 dv
.major_version
= DRV_VERSION_MAJOR
;
9150 dv
.minor_version
= DRV_VERSION_MINOR
;
9151 dv
.build_version
= DRV_VERSION_BUILD
;
9152 dv
.subbuild_version
= 0;
9153 strlcpy(dv
.driver_string
, DRV_VERSION
, sizeof(dv
.driver_string
));
9154 i40e_aq_send_driver_version(&pf
->hw
, &dv
, NULL
);
9158 * i40e_get_oem_version - get OEM specific version information
9159 * @hw: pointer to the hardware structure
9161 static void i40e_get_oem_version(struct i40e_hw
*hw
)
9163 u16 block_offset
= 0xffff;
9164 u16 block_length
= 0;
9165 u16 capabilities
= 0;
9169 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9170 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9171 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9172 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9173 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9174 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9175 #define I40E_NVM_OEM_LENGTH 3
9177 /* Check if pointer to OEM version block is valid. */
9178 i40e_read_nvm_word(hw
, I40E_SR_NVM_OEM_VERSION_PTR
, &block_offset
);
9179 if (block_offset
== 0xffff)
9182 /* Check if OEM version block has correct length. */
9183 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_LENGTH_OFFSET
,
9185 if (block_length
< I40E_NVM_OEM_LENGTH
)
9188 /* Check if OEM version format is as expected. */
9189 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_CAPABILITIES_OFFSET
,
9191 if ((capabilities
& I40E_NVM_OEM_CAPABILITIES_MASK
) != 0)
9194 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_GEN_OFFSET
,
9196 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_RELEASE_OFFSET
,
9198 hw
->nvm
.oem_ver
= (gen_snap
<< I40E_OEM_SNAP_SHIFT
) | release
;
9199 hw
->nvm
.eetrack
= I40E_OEM_EETRACK_ID
;
9203 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9204 * @pf: board private structure
9206 static int i40e_reset(struct i40e_pf
*pf
)
9208 struct i40e_hw
*hw
= &pf
->hw
;
9211 ret
= i40e_pf_reset(hw
);
9213 dev_info(&pf
->pdev
->dev
, "PF reset failed, %d\n", ret
);
9214 set_bit(__I40E_RESET_FAILED
, pf
->state
);
9215 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9223 * i40e_rebuild - rebuild using a saved config
9224 * @pf: board private structure
9225 * @reinit: if the Main VSI needs to re-initialized.
9226 * @lock_acquired: indicates whether or not the lock has been acquired
9227 * before this function was called.
9229 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
)
9231 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
9232 struct i40e_hw
*hw
= &pf
->hw
;
9233 u8 set_fc_aq_fail
= 0;
9238 if (test_bit(__I40E_DOWN
, pf
->state
))
9239 goto clear_recovery
;
9240 dev_dbg(&pf
->pdev
->dev
, "Rebuilding internal switch\n");
9242 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9243 ret
= i40e_init_adminq(&pf
->hw
);
9245 dev_info(&pf
->pdev
->dev
, "Rebuild AdminQ failed, err %s aq_err %s\n",
9246 i40e_stat_str(&pf
->hw
, ret
),
9247 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9248 goto clear_recovery
;
9250 i40e_get_oem_version(&pf
->hw
);
9252 /* re-verify the eeprom if we just had an EMP reset */
9253 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
))
9254 i40e_verify_eeprom(pf
);
9256 i40e_clear_pxe_mode(hw
);
9257 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
9259 goto end_core_reset
;
9261 ret
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
9262 hw
->func_caps
.num_rx_qp
, 0, 0);
9264 dev_info(&pf
->pdev
->dev
, "init_lan_hmc failed: %d\n", ret
);
9265 goto end_core_reset
;
9267 ret
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
9269 dev_info(&pf
->pdev
->dev
, "configure_lan_hmc failed: %d\n", ret
);
9270 goto end_core_reset
;
9273 #ifdef CONFIG_I40E_DCB
9274 ret
= i40e_init_pf_dcb(pf
);
9276 dev_info(&pf
->pdev
->dev
, "DCB init failed %d, disabled\n", ret
);
9277 pf
->flags
&= ~I40E_FLAG_DCB_CAPABLE
;
9278 /* Continue without DCB enabled */
9280 #endif /* CONFIG_I40E_DCB */
9281 /* do basic switch setup */
9284 ret
= i40e_setup_pf_switch(pf
, reinit
);
9288 /* The driver only wants link up/down and module qualification
9289 * reports from firmware. Note the negative logic.
9291 ret
= i40e_aq_set_phy_int_mask(&pf
->hw
,
9292 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
9293 I40E_AQ_EVENT_MEDIA_NA
|
9294 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
9296 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
9297 i40e_stat_str(&pf
->hw
, ret
),
9298 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9300 /* make sure our flow control settings are restored */
9301 ret
= i40e_set_fc(&pf
->hw
, &set_fc_aq_fail
, true);
9303 dev_dbg(&pf
->pdev
->dev
, "setting flow control: ret = %s last_status = %s\n",
9304 i40e_stat_str(&pf
->hw
, ret
),
9305 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9307 /* Rebuild the VSIs and VEBs that existed before reset.
9308 * They are still in our local switch element arrays, so only
9309 * need to rebuild the switch model in the HW.
9311 * If there were VEBs but the reconstitution failed, we'll try
9312 * try to recover minimal use by getting the basic PF VSI working.
9314 if (vsi
->uplink_seid
!= pf
->mac_seid
) {
9315 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild switch\n");
9316 /* find the one VEB connected to the MAC, and find orphans */
9317 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
9321 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
||
9322 pf
->veb
[v
]->uplink_seid
== 0) {
9323 ret
= i40e_reconstitute_veb(pf
->veb
[v
]);
9328 /* If Main VEB failed, we're in deep doodoo,
9329 * so give up rebuilding the switch and set up
9330 * for minimal rebuild of PF VSI.
9331 * If orphan failed, we'll report the error
9332 * but try to keep going.
9334 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
) {
9335 dev_info(&pf
->pdev
->dev
,
9336 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9338 vsi
->uplink_seid
= pf
->mac_seid
;
9340 } else if (pf
->veb
[v
]->uplink_seid
== 0) {
9341 dev_info(&pf
->pdev
->dev
,
9342 "rebuild of orphan VEB failed: %d\n",
9349 if (vsi
->uplink_seid
== pf
->mac_seid
) {
9350 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild PF VSI\n");
9351 /* no VEB, so rebuild only the Main VSI */
9352 ret
= i40e_add_vsi(vsi
);
9354 dev_info(&pf
->pdev
->dev
,
9355 "rebuild of Main VSI failed: %d\n", ret
);
9360 if (vsi
->mqprio_qopt
.max_rate
[0]) {
9361 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
9364 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
9365 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
9369 credits
= max_tx_rate
;
9370 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9371 dev_dbg(&vsi
->back
->pdev
->dev
,
9372 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9378 ret
= i40e_rebuild_cloud_filters(vsi
, vsi
->seid
);
9382 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9383 * for this main VSI if they exist
9385 ret
= i40e_rebuild_channels(vsi
);
9389 /* Reconfigure hardware for allowing smaller MSS in the case
9390 * of TSO, so that we avoid the MDD being fired and causing
9391 * a reset in the case of small MSS+TSO.
9393 #define I40E_REG_MSS 0x000E64DC
9394 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9395 #define I40E_64BYTE_MSS 0x400000
9396 val
= rd32(hw
, I40E_REG_MSS
);
9397 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
9398 val
&= ~I40E_REG_MSS_MIN_MASK
;
9399 val
|= I40E_64BYTE_MSS
;
9400 wr32(hw
, I40E_REG_MSS
, val
);
9403 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
9405 ret
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
9407 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
9408 i40e_stat_str(&pf
->hw
, ret
),
9409 i40e_aq_str(&pf
->hw
,
9410 pf
->hw
.aq
.asq_last_status
));
9412 /* reinit the misc interrupt */
9413 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9414 ret
= i40e_setup_misc_vector(pf
);
9416 /* Add a filter to drop all Flow control frames from any VSI from being
9417 * transmitted. By doing so we stop a malicious VF from sending out
9418 * PAUSE or PFC frames and potentially controlling traffic for other
9420 * The FW can still send Flow control frames if enabled.
9422 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
9425 /* restart the VSIs that were rebuilt and running before the reset */
9426 i40e_pf_unquiesce_all_vsi(pf
);
9428 /* Release the RTNL lock before we start resetting VFs */
9432 i40e_reset_all_vfs(pf
, true);
9434 /* tell the firmware that we're starting */
9435 i40e_send_version(pf
);
9437 /* We've already released the lock, so don't do it again */
9438 goto end_core_reset
;
9444 clear_bit(__I40E_RESET_FAILED
, pf
->state
);
9446 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9450 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9451 * @pf: board private structure
9452 * @reinit: if the Main VSI needs to re-initialized.
9453 * @lock_acquired: indicates whether or not the lock has been acquired
9454 * before this function was called.
9456 static void i40e_reset_and_rebuild(struct i40e_pf
*pf
, bool reinit
,
9460 /* Now we wait for GRST to settle out.
9461 * We don't have to delete the VEBs or VSIs from the hw switch
9462 * because the reset will make them disappear.
9464 ret
= i40e_reset(pf
);
9466 i40e_rebuild(pf
, reinit
, lock_acquired
);
9470 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9471 * @pf: board private structure
9473 * Close up the VFs and other things in prep for a Core Reset,
9474 * then get ready to rebuild the world.
9475 * @lock_acquired: indicates whether or not the lock has been acquired
9476 * before this function was called.
9478 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
)
9480 i40e_prep_for_reset(pf
, lock_acquired
);
9481 i40e_reset_and_rebuild(pf
, false, lock_acquired
);
9485 * i40e_handle_mdd_event
9486 * @pf: pointer to the PF structure
9488 * Called from the MDD irq handler to identify possibly malicious vfs
9490 static void i40e_handle_mdd_event(struct i40e_pf
*pf
)
9492 struct i40e_hw
*hw
= &pf
->hw
;
9493 bool mdd_detected
= false;
9494 bool pf_mdd_detected
= false;
9499 if (!test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
))
9502 /* find what triggered the MDD event */
9503 reg
= rd32(hw
, I40E_GL_MDET_TX
);
9504 if (reg
& I40E_GL_MDET_TX_VALID_MASK
) {
9505 u8 pf_num
= (reg
& I40E_GL_MDET_TX_PF_NUM_MASK
) >>
9506 I40E_GL_MDET_TX_PF_NUM_SHIFT
;
9507 u16 vf_num
= (reg
& I40E_GL_MDET_TX_VF_NUM_MASK
) >>
9508 I40E_GL_MDET_TX_VF_NUM_SHIFT
;
9509 u8 event
= (reg
& I40E_GL_MDET_TX_EVENT_MASK
) >>
9510 I40E_GL_MDET_TX_EVENT_SHIFT
;
9511 u16 queue
= ((reg
& I40E_GL_MDET_TX_QUEUE_MASK
) >>
9512 I40E_GL_MDET_TX_QUEUE_SHIFT
) -
9513 pf
->hw
.func_caps
.base_queue
;
9514 if (netif_msg_tx_err(pf
))
9515 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9516 event
, queue
, pf_num
, vf_num
);
9517 wr32(hw
, I40E_GL_MDET_TX
, 0xffffffff);
9518 mdd_detected
= true;
9520 reg
= rd32(hw
, I40E_GL_MDET_RX
);
9521 if (reg
& I40E_GL_MDET_RX_VALID_MASK
) {
9522 u8 func
= (reg
& I40E_GL_MDET_RX_FUNCTION_MASK
) >>
9523 I40E_GL_MDET_RX_FUNCTION_SHIFT
;
9524 u8 event
= (reg
& I40E_GL_MDET_RX_EVENT_MASK
) >>
9525 I40E_GL_MDET_RX_EVENT_SHIFT
;
9526 u16 queue
= ((reg
& I40E_GL_MDET_RX_QUEUE_MASK
) >>
9527 I40E_GL_MDET_RX_QUEUE_SHIFT
) -
9528 pf
->hw
.func_caps
.base_queue
;
9529 if (netif_msg_rx_err(pf
))
9530 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9531 event
, queue
, func
);
9532 wr32(hw
, I40E_GL_MDET_RX
, 0xffffffff);
9533 mdd_detected
= true;
9537 reg
= rd32(hw
, I40E_PF_MDET_TX
);
9538 if (reg
& I40E_PF_MDET_TX_VALID_MASK
) {
9539 wr32(hw
, I40E_PF_MDET_TX
, 0xFFFF);
9540 dev_info(&pf
->pdev
->dev
, "TX driver issue detected, PF reset issued\n");
9541 pf_mdd_detected
= true;
9543 reg
= rd32(hw
, I40E_PF_MDET_RX
);
9544 if (reg
& I40E_PF_MDET_RX_VALID_MASK
) {
9545 wr32(hw
, I40E_PF_MDET_RX
, 0xFFFF);
9546 dev_info(&pf
->pdev
->dev
, "RX driver issue detected, PF reset issued\n");
9547 pf_mdd_detected
= true;
9549 /* Queue belongs to the PF, initiate a reset */
9550 if (pf_mdd_detected
) {
9551 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
9552 i40e_service_event_schedule(pf
);
9556 /* see if one of the VFs needs its hand slapped */
9557 for (i
= 0; i
< pf
->num_alloc_vfs
&& mdd_detected
; i
++) {
9559 reg
= rd32(hw
, I40E_VP_MDET_TX(i
));
9560 if (reg
& I40E_VP_MDET_TX_VALID_MASK
) {
9561 wr32(hw
, I40E_VP_MDET_TX(i
), 0xFFFF);
9562 vf
->num_mdd_events
++;
9563 dev_info(&pf
->pdev
->dev
, "TX driver issue detected on VF %d\n",
9567 reg
= rd32(hw
, I40E_VP_MDET_RX(i
));
9568 if (reg
& I40E_VP_MDET_RX_VALID_MASK
) {
9569 wr32(hw
, I40E_VP_MDET_RX(i
), 0xFFFF);
9570 vf
->num_mdd_events
++;
9571 dev_info(&pf
->pdev
->dev
, "RX driver issue detected on VF %d\n",
9575 if (vf
->num_mdd_events
> I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED
) {
9576 dev_info(&pf
->pdev
->dev
,
9577 "Too many MDD events on VF %d, disabled\n", i
);
9578 dev_info(&pf
->pdev
->dev
,
9579 "Use PF Control I/F to re-enable the VF\n");
9580 set_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
);
9584 /* re-enable mdd interrupt cause */
9585 clear_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
9586 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
9587 reg
|= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
9588 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
9592 static const char *i40e_tunnel_name(struct i40e_udp_port_config
*port
)
9594 switch (port
->type
) {
9595 case UDP_TUNNEL_TYPE_VXLAN
:
9597 case UDP_TUNNEL_TYPE_GENEVE
:
9605 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9606 * @pf: board private structure
9608 static void i40e_sync_udp_filters(struct i40e_pf
*pf
)
9612 /* loop through and set pending bit for all active UDP filters */
9613 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9614 if (pf
->udp_ports
[i
].port
)
9615 pf
->pending_udp_bitmap
|= BIT_ULL(i
);
9618 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
9622 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9623 * @pf: board private structure
9625 static void i40e_sync_udp_filters_subtask(struct i40e_pf
*pf
)
9627 struct i40e_hw
*hw
= &pf
->hw
;
9632 if (!(pf
->flags
& I40E_FLAG_UDP_FILTER_SYNC
))
9635 pf
->flags
&= ~I40E_FLAG_UDP_FILTER_SYNC
;
9637 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9638 if (pf
->pending_udp_bitmap
& BIT_ULL(i
)) {
9639 pf
->pending_udp_bitmap
&= ~BIT_ULL(i
);
9640 port
= pf
->udp_ports
[i
].port
;
9642 ret
= i40e_aq_add_udp_tunnel(hw
, port
,
9643 pf
->udp_ports
[i
].type
,
9646 ret
= i40e_aq_del_udp_tunnel(hw
, i
, NULL
);
9649 dev_info(&pf
->pdev
->dev
,
9650 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9651 i40e_tunnel_name(&pf
->udp_ports
[i
]),
9652 port
? "add" : "delete",
9654 i40e_stat_str(&pf
->hw
, ret
),
9655 i40e_aq_str(&pf
->hw
,
9656 pf
->hw
.aq
.asq_last_status
));
9657 pf
->udp_ports
[i
].port
= 0;
9664 * i40e_service_task - Run the driver's async subtasks
9665 * @work: pointer to work_struct containing our data
9667 static void i40e_service_task(struct work_struct
*work
)
9669 struct i40e_pf
*pf
= container_of(work
,
9672 unsigned long start_time
= jiffies
;
9674 /* don't bother with service tasks if a reset is in progress */
9675 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9678 if (test_and_set_bit(__I40E_SERVICE_SCHED
, pf
->state
))
9681 i40e_detect_recover_hung(pf
);
9682 i40e_sync_filters_subtask(pf
);
9683 i40e_reset_subtask(pf
);
9684 i40e_handle_mdd_event(pf
);
9685 i40e_vc_process_vflr_event(pf
);
9686 i40e_watchdog_subtask(pf
);
9687 i40e_fdir_reinit_subtask(pf
);
9688 if (pf
->flags
& I40E_FLAG_CLIENT_RESET
) {
9689 /* Client subtask will reopen next time through. */
9690 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], true);
9691 pf
->flags
&= ~I40E_FLAG_CLIENT_RESET
;
9693 i40e_client_subtask(pf
);
9694 if (pf
->flags
& I40E_FLAG_CLIENT_L2_CHANGE
) {
9695 i40e_notify_client_of_l2_param_changes(
9696 pf
->vsi
[pf
->lan_vsi
]);
9697 pf
->flags
&= ~I40E_FLAG_CLIENT_L2_CHANGE
;
9700 i40e_sync_filters_subtask(pf
);
9701 i40e_sync_udp_filters_subtask(pf
);
9702 i40e_clean_adminq_subtask(pf
);
9704 /* flush memory to make sure state is correct before next watchdog */
9705 smp_mb__before_atomic();
9706 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
9708 /* If the tasks have taken longer than one timer cycle or there
9709 * is more work to be done, reschedule the service task now
9710 * rather than wait for the timer to tick again.
9712 if (time_after(jiffies
, (start_time
+ pf
->service_timer_period
)) ||
9713 test_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
) ||
9714 test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
) ||
9715 test_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
))
9716 i40e_service_event_schedule(pf
);
9720 * i40e_service_timer - timer callback
9721 * @data: pointer to PF struct
9723 static void i40e_service_timer(struct timer_list
*t
)
9725 struct i40e_pf
*pf
= from_timer(pf
, t
, service_timer
);
9727 mod_timer(&pf
->service_timer
,
9728 round_jiffies(jiffies
+ pf
->service_timer_period
));
9729 i40e_service_event_schedule(pf
);
9733 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9734 * @vsi: the VSI being configured
9736 static int i40e_set_num_rings_in_vsi(struct i40e_vsi
*vsi
)
9738 struct i40e_pf
*pf
= vsi
->back
;
9740 switch (vsi
->type
) {
9742 vsi
->alloc_queue_pairs
= pf
->num_lan_qps
;
9743 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9744 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9745 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9746 vsi
->num_q_vectors
= pf
->num_lan_msix
;
9748 vsi
->num_q_vectors
= 1;
9753 vsi
->alloc_queue_pairs
= 1;
9754 vsi
->num_desc
= ALIGN(I40E_FDIR_RING_COUNT
,
9755 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9756 vsi
->num_q_vectors
= pf
->num_fdsb_msix
;
9759 case I40E_VSI_VMDQ2
:
9760 vsi
->alloc_queue_pairs
= pf
->num_vmdq_qps
;
9761 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9762 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9763 vsi
->num_q_vectors
= pf
->num_vmdq_msix
;
9766 case I40E_VSI_SRIOV
:
9767 vsi
->alloc_queue_pairs
= pf
->num_vf_qps
;
9768 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9769 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9781 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9783 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
9785 * On error: returns error code (negative)
9786 * On success: returns 0
9788 static int i40e_vsi_alloc_arrays(struct i40e_vsi
*vsi
, bool alloc_qvectors
)
9790 struct i40e_ring
**next_rings
;
9794 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
9795 size
= sizeof(struct i40e_ring
*) * vsi
->alloc_queue_pairs
*
9796 (i40e_enabled_xdp_vsi(vsi
) ? 3 : 2);
9797 vsi
->tx_rings
= kzalloc(size
, GFP_KERNEL
);
9800 next_rings
= vsi
->tx_rings
+ vsi
->alloc_queue_pairs
;
9801 if (i40e_enabled_xdp_vsi(vsi
)) {
9802 vsi
->xdp_rings
= next_rings
;
9803 next_rings
+= vsi
->alloc_queue_pairs
;
9805 vsi
->rx_rings
= next_rings
;
9807 if (alloc_qvectors
) {
9808 /* allocate memory for q_vector pointers */
9809 size
= sizeof(struct i40e_q_vector
*) * vsi
->num_q_vectors
;
9810 vsi
->q_vectors
= kzalloc(size
, GFP_KERNEL
);
9811 if (!vsi
->q_vectors
) {
9819 kfree(vsi
->tx_rings
);
9824 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
9825 * @pf: board private structure
9826 * @type: type of VSI
9828 * On error: returns error code (negative)
9829 * On success: returns vsi index in PF (positive)
9831 static int i40e_vsi_mem_alloc(struct i40e_pf
*pf
, enum i40e_vsi_type type
)
9834 struct i40e_vsi
*vsi
;
9838 /* Need to protect the allocation of the VSIs at the PF level */
9839 mutex_lock(&pf
->switch_mutex
);
9841 /* VSI list may be fragmented if VSI creation/destruction has
9842 * been happening. We can afford to do a quick scan to look
9843 * for any free VSIs in the list.
9845 * find next empty vsi slot, looping back around if necessary
9848 while (i
< pf
->num_alloc_vsi
&& pf
->vsi
[i
])
9850 if (i
>= pf
->num_alloc_vsi
) {
9852 while (i
< pf
->next_vsi
&& pf
->vsi
[i
])
9856 if (i
< pf
->num_alloc_vsi
&& !pf
->vsi
[i
]) {
9857 vsi_idx
= i
; /* Found one! */
9860 goto unlock_pf
; /* out of VSI slots! */
9864 vsi
= kzalloc(sizeof(*vsi
), GFP_KERNEL
);
9871 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
9874 vsi
->int_rate_limit
= 0;
9875 vsi
->rss_table_size
= (vsi
->type
== I40E_VSI_MAIN
) ?
9876 pf
->rss_table_size
: 64;
9877 vsi
->netdev_registered
= false;
9878 vsi
->work_limit
= I40E_DEFAULT_IRQ_WORK
;
9879 hash_init(vsi
->mac_filter_hash
);
9880 vsi
->irqs_ready
= false;
9882 ret
= i40e_set_num_rings_in_vsi(vsi
);
9886 ret
= i40e_vsi_alloc_arrays(vsi
, true);
9890 /* Setup default MSIX irq handler for VSI */
9891 i40e_vsi_setup_irqhandler(vsi
, i40e_msix_clean_rings
);
9893 /* Initialize VSI lock */
9894 spin_lock_init(&vsi
->mac_filter_hash_lock
);
9895 pf
->vsi
[vsi_idx
] = vsi
;
9900 pf
->next_vsi
= i
- 1;
9903 mutex_unlock(&pf
->switch_mutex
);
9908 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
9909 * @type: VSI pointer
9910 * @free_qvectors: a bool to specify if q_vectors need to be freed.
9912 * On error: returns error code (negative)
9913 * On success: returns 0
9915 static void i40e_vsi_free_arrays(struct i40e_vsi
*vsi
, bool free_qvectors
)
9917 /* free the ring and vector containers */
9918 if (free_qvectors
) {
9919 kfree(vsi
->q_vectors
);
9920 vsi
->q_vectors
= NULL
;
9922 kfree(vsi
->tx_rings
);
9923 vsi
->tx_rings
= NULL
;
9924 vsi
->rx_rings
= NULL
;
9925 vsi
->xdp_rings
= NULL
;
9929 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
9931 * @vsi: Pointer to VSI structure
9933 static void i40e_clear_rss_config_user(struct i40e_vsi
*vsi
)
9938 kfree(vsi
->rss_hkey_user
);
9939 vsi
->rss_hkey_user
= NULL
;
9941 kfree(vsi
->rss_lut_user
);
9942 vsi
->rss_lut_user
= NULL
;
9946 * i40e_vsi_clear - Deallocate the VSI provided
9947 * @vsi: the VSI being un-configured
9949 static int i40e_vsi_clear(struct i40e_vsi
*vsi
)
9960 mutex_lock(&pf
->switch_mutex
);
9961 if (!pf
->vsi
[vsi
->idx
]) {
9962 dev_err(&pf
->pdev
->dev
, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
9963 vsi
->idx
, vsi
->idx
, vsi
, vsi
->type
);
9967 if (pf
->vsi
[vsi
->idx
] != vsi
) {
9968 dev_err(&pf
->pdev
->dev
,
9969 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
9970 pf
->vsi
[vsi
->idx
]->idx
,
9972 pf
->vsi
[vsi
->idx
]->type
,
9973 vsi
->idx
, vsi
, vsi
->type
);
9977 /* updates the PF for this cleared vsi */
9978 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
9979 i40e_put_lump(pf
->irq_pile
, vsi
->base_vector
, vsi
->idx
);
9981 i40e_vsi_free_arrays(vsi
, true);
9982 i40e_clear_rss_config_user(vsi
);
9984 pf
->vsi
[vsi
->idx
] = NULL
;
9985 if (vsi
->idx
< pf
->next_vsi
)
9986 pf
->next_vsi
= vsi
->idx
;
9989 mutex_unlock(&pf
->switch_mutex
);
9997 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
9998 * @vsi: the VSI being cleaned
10000 static void i40e_vsi_clear_rings(struct i40e_vsi
*vsi
)
10004 if (vsi
->tx_rings
&& vsi
->tx_rings
[0]) {
10005 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10006 kfree_rcu(vsi
->tx_rings
[i
], rcu
);
10007 vsi
->tx_rings
[i
] = NULL
;
10008 vsi
->rx_rings
[i
] = NULL
;
10009 if (vsi
->xdp_rings
)
10010 vsi
->xdp_rings
[i
] = NULL
;
10016 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10017 * @vsi: the VSI being configured
10019 static int i40e_alloc_rings(struct i40e_vsi
*vsi
)
10021 int i
, qpv
= i40e_enabled_xdp_vsi(vsi
) ? 3 : 2;
10022 struct i40e_pf
*pf
= vsi
->back
;
10023 struct i40e_ring
*ring
;
10025 /* Set basic values in the rings to be used later during open() */
10026 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10027 /* allocate space for both Tx and Rx in one shot */
10028 ring
= kcalloc(qpv
, sizeof(struct i40e_ring
), GFP_KERNEL
);
10032 ring
->queue_index
= i
;
10033 ring
->reg_idx
= vsi
->base_queue
+ i
;
10034 ring
->ring_active
= false;
10036 ring
->netdev
= vsi
->netdev
;
10037 ring
->dev
= &pf
->pdev
->dev
;
10038 ring
->count
= vsi
->num_desc
;
10041 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10042 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10043 ring
->tx_itr_setting
= pf
->tx_itr_default
;
10044 vsi
->tx_rings
[i
] = ring
++;
10046 if (!i40e_enabled_xdp_vsi(vsi
))
10049 ring
->queue_index
= vsi
->alloc_queue_pairs
+ i
;
10050 ring
->reg_idx
= vsi
->base_queue
+ ring
->queue_index
;
10051 ring
->ring_active
= false;
10053 ring
->netdev
= NULL
;
10054 ring
->dev
= &pf
->pdev
->dev
;
10055 ring
->count
= vsi
->num_desc
;
10058 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10059 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10060 set_ring_xdp(ring
);
10061 ring
->tx_itr_setting
= pf
->tx_itr_default
;
10062 vsi
->xdp_rings
[i
] = ring
++;
10065 ring
->queue_index
= i
;
10066 ring
->reg_idx
= vsi
->base_queue
+ i
;
10067 ring
->ring_active
= false;
10069 ring
->netdev
= vsi
->netdev
;
10070 ring
->dev
= &pf
->pdev
->dev
;
10071 ring
->count
= vsi
->num_desc
;
10074 ring
->rx_itr_setting
= pf
->rx_itr_default
;
10075 vsi
->rx_rings
[i
] = ring
;
10081 i40e_vsi_clear_rings(vsi
);
10086 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10087 * @pf: board private structure
10088 * @vectors: the number of MSI-X vectors to request
10090 * Returns the number of vectors reserved, or error
10092 static int i40e_reserve_msix_vectors(struct i40e_pf
*pf
, int vectors
)
10094 vectors
= pci_enable_msix_range(pf
->pdev
, pf
->msix_entries
,
10095 I40E_MIN_MSIX
, vectors
);
10097 dev_info(&pf
->pdev
->dev
,
10098 "MSI-X vector reservation failed: %d\n", vectors
);
10106 * i40e_init_msix - Setup the MSIX capability
10107 * @pf: board private structure
10109 * Work with the OS to set up the MSIX vectors needed.
10111 * Returns the number of vectors reserved or negative on failure
10113 static int i40e_init_msix(struct i40e_pf
*pf
)
10115 struct i40e_hw
*hw
= &pf
->hw
;
10116 int cpus
, extra_vectors
;
10120 int iwarp_requested
= 0;
10122 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
10125 /* The number of vectors we'll request will be comprised of:
10126 * - Add 1 for "other" cause for Admin Queue events, etc.
10127 * - The number of LAN queue pairs
10128 * - Queues being used for RSS.
10129 * We don't need as many as max_rss_size vectors.
10130 * use rss_size instead in the calculation since that
10131 * is governed by number of cpus in the system.
10132 * - assumes symmetric Tx/Rx pairing
10133 * - The number of VMDq pairs
10134 * - The CPU count within the NUMA node if iWARP is enabled
10135 * Once we count this up, try the request.
10137 * If we can't get what we want, we'll simplify to nearly nothing
10138 * and try again. If that still fails, we punt.
10140 vectors_left
= hw
->func_caps
.num_msix_vectors
;
10143 /* reserve one vector for miscellaneous handler */
10144 if (vectors_left
) {
10149 /* reserve some vectors for the main PF traffic queues. Initially we
10150 * only reserve at most 50% of the available vectors, in the case that
10151 * the number of online CPUs is large. This ensures that we can enable
10152 * extra features as well. Once we've enabled the other features, we
10153 * will use any remaining vectors to reach as close as we can to the
10154 * number of online CPUs.
10156 cpus
= num_online_cpus();
10157 pf
->num_lan_msix
= min_t(int, cpus
, vectors_left
/ 2);
10158 vectors_left
-= pf
->num_lan_msix
;
10160 /* reserve one vector for sideband flow director */
10161 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10162 if (vectors_left
) {
10163 pf
->num_fdsb_msix
= 1;
10167 pf
->num_fdsb_msix
= 0;
10171 /* can we reserve enough for iWARP? */
10172 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10173 iwarp_requested
= pf
->num_iwarp_msix
;
10176 pf
->num_iwarp_msix
= 0;
10177 else if (vectors_left
< pf
->num_iwarp_msix
)
10178 pf
->num_iwarp_msix
= 1;
10179 v_budget
+= pf
->num_iwarp_msix
;
10180 vectors_left
-= pf
->num_iwarp_msix
;
10183 /* any vectors left over go for VMDq support */
10184 if (pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) {
10185 int vmdq_vecs_wanted
= pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
;
10186 int vmdq_vecs
= min_t(int, vectors_left
, vmdq_vecs_wanted
);
10188 if (!vectors_left
) {
10189 pf
->num_vmdq_msix
= 0;
10190 pf
->num_vmdq_qps
= 0;
10192 /* if we're short on vectors for what's desired, we limit
10193 * the queues per vmdq. If this is still more than are
10194 * available, the user will need to change the number of
10195 * queues/vectors used by the PF later with the ethtool
10198 if (vmdq_vecs
< vmdq_vecs_wanted
)
10199 pf
->num_vmdq_qps
= 1;
10200 pf
->num_vmdq_msix
= pf
->num_vmdq_qps
;
10202 v_budget
+= vmdq_vecs
;
10203 vectors_left
-= vmdq_vecs
;
10207 /* On systems with a large number of SMP cores, we previously limited
10208 * the number of vectors for num_lan_msix to be at most 50% of the
10209 * available vectors, to allow for other features. Now, we add back
10210 * the remaining vectors. However, we ensure that the total
10211 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10212 * calculate the number of vectors we can add without going over the
10213 * cap of CPUs. For systems with a small number of CPUs this will be
10216 extra_vectors
= min_t(int, cpus
- pf
->num_lan_msix
, vectors_left
);
10217 pf
->num_lan_msix
+= extra_vectors
;
10218 vectors_left
-= extra_vectors
;
10220 WARN(vectors_left
< 0,
10221 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10223 v_budget
+= pf
->num_lan_msix
;
10224 pf
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
10226 if (!pf
->msix_entries
)
10229 for (i
= 0; i
< v_budget
; i
++)
10230 pf
->msix_entries
[i
].entry
= i
;
10231 v_actual
= i40e_reserve_msix_vectors(pf
, v_budget
);
10233 if (v_actual
< I40E_MIN_MSIX
) {
10234 pf
->flags
&= ~I40E_FLAG_MSIX_ENABLED
;
10235 kfree(pf
->msix_entries
);
10236 pf
->msix_entries
= NULL
;
10237 pci_disable_msix(pf
->pdev
);
10240 } else if (v_actual
== I40E_MIN_MSIX
) {
10241 /* Adjust for minimal MSIX use */
10242 pf
->num_vmdq_vsis
= 0;
10243 pf
->num_vmdq_qps
= 0;
10244 pf
->num_lan_qps
= 1;
10245 pf
->num_lan_msix
= 1;
10247 } else if (v_actual
!= v_budget
) {
10248 /* If we have limited resources, we will start with no vectors
10249 * for the special features and then allocate vectors to some
10250 * of these features based on the policy and at the end disable
10251 * the features that did not get any vectors.
10255 dev_info(&pf
->pdev
->dev
,
10256 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10257 v_actual
, v_budget
);
10258 /* reserve the misc vector */
10259 vec
= v_actual
- 1;
10261 /* Scale vector usage down */
10262 pf
->num_vmdq_msix
= 1; /* force VMDqs to only one vector */
10263 pf
->num_vmdq_vsis
= 1;
10264 pf
->num_vmdq_qps
= 1;
10266 /* partition out the remaining vectors */
10269 pf
->num_lan_msix
= 1;
10272 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10273 pf
->num_lan_msix
= 1;
10274 pf
->num_iwarp_msix
= 1;
10276 pf
->num_lan_msix
= 2;
10280 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10281 pf
->num_iwarp_msix
= min_t(int, (vec
/ 3),
10283 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 3),
10284 I40E_DEFAULT_NUM_VMDQ_VSI
);
10286 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 2),
10287 I40E_DEFAULT_NUM_VMDQ_VSI
);
10289 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10290 pf
->num_fdsb_msix
= 1;
10293 pf
->num_lan_msix
= min_t(int,
10294 (vec
- (pf
->num_iwarp_msix
+ pf
->num_vmdq_vsis
)),
10296 pf
->num_lan_qps
= pf
->num_lan_msix
;
10301 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
10302 (pf
->num_fdsb_msix
== 0)) {
10303 dev_info(&pf
->pdev
->dev
, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10304 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
10305 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10307 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
10308 (pf
->num_vmdq_msix
== 0)) {
10309 dev_info(&pf
->pdev
->dev
, "VMDq disabled, not enough MSI-X vectors\n");
10310 pf
->flags
&= ~I40E_FLAG_VMDQ_ENABLED
;
10313 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
10314 (pf
->num_iwarp_msix
== 0)) {
10315 dev_info(&pf
->pdev
->dev
, "IWARP disabled, not enough MSI-X vectors\n");
10316 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
10318 i40e_debug(&pf
->hw
, I40E_DEBUG_INIT
,
10319 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10321 pf
->num_vmdq_msix
* pf
->num_vmdq_vsis
,
10323 pf
->num_iwarp_msix
);
10329 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10330 * @vsi: the VSI being configured
10331 * @v_idx: index of the vector in the vsi struct
10332 * @cpu: cpu to be used on affinity_mask
10334 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10336 static int i40e_vsi_alloc_q_vector(struct i40e_vsi
*vsi
, int v_idx
, int cpu
)
10338 struct i40e_q_vector
*q_vector
;
10340 /* allocate q_vector */
10341 q_vector
= kzalloc(sizeof(struct i40e_q_vector
), GFP_KERNEL
);
10345 q_vector
->vsi
= vsi
;
10346 q_vector
->v_idx
= v_idx
;
10347 cpumask_copy(&q_vector
->affinity_mask
, cpu_possible_mask
);
10350 netif_napi_add(vsi
->netdev
, &q_vector
->napi
,
10351 i40e_napi_poll
, NAPI_POLL_WEIGHT
);
10353 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
10354 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
10356 /* tie q_vector and vsi together */
10357 vsi
->q_vectors
[v_idx
] = q_vector
;
10363 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10364 * @vsi: the VSI being configured
10366 * We allocate one q_vector per queue interrupt. If allocation fails we
10369 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi
*vsi
)
10371 struct i40e_pf
*pf
= vsi
->back
;
10372 int err
, v_idx
, num_q_vectors
, current_cpu
;
10374 /* if not MSIX, give the one vector only to the LAN VSI */
10375 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
10376 num_q_vectors
= vsi
->num_q_vectors
;
10377 else if (vsi
== pf
->vsi
[pf
->lan_vsi
])
10382 current_cpu
= cpumask_first(cpu_online_mask
);
10384 for (v_idx
= 0; v_idx
< num_q_vectors
; v_idx
++) {
10385 err
= i40e_vsi_alloc_q_vector(vsi
, v_idx
, current_cpu
);
10388 current_cpu
= cpumask_next(current_cpu
, cpu_online_mask
);
10389 if (unlikely(current_cpu
>= nr_cpu_ids
))
10390 current_cpu
= cpumask_first(cpu_online_mask
);
10397 i40e_free_q_vector(vsi
, v_idx
);
10403 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10404 * @pf: board private structure to initialize
10406 static int i40e_init_interrupt_scheme(struct i40e_pf
*pf
)
10411 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
10412 vectors
= i40e_init_msix(pf
);
10414 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
|
10415 I40E_FLAG_IWARP_ENABLED
|
10416 I40E_FLAG_RSS_ENABLED
|
10417 I40E_FLAG_DCB_CAPABLE
|
10418 I40E_FLAG_DCB_ENABLED
|
10419 I40E_FLAG_SRIOV_ENABLED
|
10420 I40E_FLAG_FD_SB_ENABLED
|
10421 I40E_FLAG_FD_ATR_ENABLED
|
10422 I40E_FLAG_VMDQ_ENABLED
);
10423 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10425 /* rework the queue expectations without MSIX */
10426 i40e_determine_queue_usage(pf
);
10430 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
10431 (pf
->flags
& I40E_FLAG_MSI_ENABLED
)) {
10432 dev_info(&pf
->pdev
->dev
, "MSI-X not available, trying MSI\n");
10433 vectors
= pci_enable_msi(pf
->pdev
);
10435 dev_info(&pf
->pdev
->dev
, "MSI init failed - %d\n",
10437 pf
->flags
&= ~I40E_FLAG_MSI_ENABLED
;
10439 vectors
= 1; /* one MSI or Legacy vector */
10442 if (!(pf
->flags
& (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
)))
10443 dev_info(&pf
->pdev
->dev
, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10445 /* set up vector assignment tracking */
10446 size
= sizeof(struct i40e_lump_tracking
) + (sizeof(u16
) * vectors
);
10447 pf
->irq_pile
= kzalloc(size
, GFP_KERNEL
);
10448 if (!pf
->irq_pile
) {
10449 dev_err(&pf
->pdev
->dev
, "error allocating irq_pile memory\n");
10452 pf
->irq_pile
->num_entries
= vectors
;
10453 pf
->irq_pile
->search_hint
= 0;
10455 /* track first vector for misc interrupts, ignore return */
10456 (void)i40e_get_lump(pf
, pf
->irq_pile
, 1, I40E_PILE_VALID_BIT
- 1);
10462 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10463 * @pf: private board data structure
10465 * Restore the interrupt scheme that was cleared when we suspended the
10466 * device. This should be called during resume to re-allocate the q_vectors
10467 * and reacquire IRQs.
10469 static int i40e_restore_interrupt_scheme(struct i40e_pf
*pf
)
10473 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10474 * scheme. We need to re-enabled them here in order to attempt to
10475 * re-acquire the MSI or MSI-X vectors
10477 pf
->flags
|= (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
10479 err
= i40e_init_interrupt_scheme(pf
);
10483 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10484 * rings together again.
10486 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
10488 err
= i40e_vsi_alloc_q_vectors(pf
->vsi
[i
]);
10491 i40e_vsi_map_rings_to_vectors(pf
->vsi
[i
]);
10495 err
= i40e_setup_misc_vector(pf
);
10504 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
10511 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10512 * @pf: board private structure
10514 * This sets up the handler for MSIX 0, which is used to manage the
10515 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10516 * when in MSI or Legacy interrupt mode.
10518 static int i40e_setup_misc_vector(struct i40e_pf
*pf
)
10520 struct i40e_hw
*hw
= &pf
->hw
;
10523 /* Only request the IRQ once, the first time through. */
10524 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
)) {
10525 err
= request_irq(pf
->msix_entries
[0].vector
,
10526 i40e_intr
, 0, pf
->int_name
, pf
);
10528 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
10529 dev_info(&pf
->pdev
->dev
,
10530 "request_irq for %s failed: %d\n",
10531 pf
->int_name
, err
);
10536 i40e_enable_misc_int_causes(pf
);
10538 /* associate no queues to the misc vector */
10539 wr32(hw
, I40E_PFINT_LNKLST0
, I40E_QUEUE_END_OF_LIST
);
10540 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), I40E_ITR_8K
);
10544 i40e_irq_dynamic_enable_icr0(pf
);
10550 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10551 * @vsi: Pointer to vsi structure
10552 * @seed: Buffter to store the hash keys
10553 * @lut: Buffer to store the lookup table entries
10554 * @lut_size: Size of buffer to store the lookup table entries
10556 * Return 0 on success, negative on failure
10558 static int i40e_get_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
10559 u8
*lut
, u16 lut_size
)
10561 struct i40e_pf
*pf
= vsi
->back
;
10562 struct i40e_hw
*hw
= &pf
->hw
;
10566 ret
= i40e_aq_get_rss_key(hw
, vsi
->id
,
10567 (struct i40e_aqc_get_set_rss_key_data
*)seed
);
10569 dev_info(&pf
->pdev
->dev
,
10570 "Cannot get RSS key, err %s aq_err %s\n",
10571 i40e_stat_str(&pf
->hw
, ret
),
10572 i40e_aq_str(&pf
->hw
,
10573 pf
->hw
.aq
.asq_last_status
));
10579 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
10581 ret
= i40e_aq_get_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
10583 dev_info(&pf
->pdev
->dev
,
10584 "Cannot get RSS lut, err %s aq_err %s\n",
10585 i40e_stat_str(&pf
->hw
, ret
),
10586 i40e_aq_str(&pf
->hw
,
10587 pf
->hw
.aq
.asq_last_status
));
10596 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10597 * @vsi: Pointer to vsi structure
10598 * @seed: RSS hash seed
10599 * @lut: Lookup table
10600 * @lut_size: Lookup table size
10602 * Returns 0 on success, negative on failure
10604 static int i40e_config_rss_reg(struct i40e_vsi
*vsi
, const u8
*seed
,
10605 const u8
*lut
, u16 lut_size
)
10607 struct i40e_pf
*pf
= vsi
->back
;
10608 struct i40e_hw
*hw
= &pf
->hw
;
10609 u16 vf_id
= vsi
->vf_id
;
10612 /* Fill out hash function seed */
10614 u32
*seed_dw
= (u32
*)seed
;
10616 if (vsi
->type
== I40E_VSI_MAIN
) {
10617 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10618 wr32(hw
, I40E_PFQF_HKEY(i
), seed_dw
[i
]);
10619 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10620 for (i
= 0; i
<= I40E_VFQF_HKEY1_MAX_INDEX
; i
++)
10621 wr32(hw
, I40E_VFQF_HKEY1(i
, vf_id
), seed_dw
[i
]);
10623 dev_err(&pf
->pdev
->dev
, "Cannot set RSS seed - invalid VSI type\n");
10628 u32
*lut_dw
= (u32
*)lut
;
10630 if (vsi
->type
== I40E_VSI_MAIN
) {
10631 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10633 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10634 wr32(hw
, I40E_PFQF_HLUT(i
), lut_dw
[i
]);
10635 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10636 if (lut_size
!= I40E_VF_HLUT_ARRAY_SIZE
)
10638 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
10639 wr32(hw
, I40E_VFQF_HLUT1(i
, vf_id
), lut_dw
[i
]);
10641 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
10650 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10651 * @vsi: Pointer to VSI structure
10652 * @seed: Buffer to store the keys
10653 * @lut: Buffer to store the lookup table entries
10654 * @lut_size: Size of buffer to store the lookup table entries
10656 * Returns 0 on success, negative on failure
10658 static int i40e_get_rss_reg(struct i40e_vsi
*vsi
, u8
*seed
,
10659 u8
*lut
, u16 lut_size
)
10661 struct i40e_pf
*pf
= vsi
->back
;
10662 struct i40e_hw
*hw
= &pf
->hw
;
10666 u32
*seed_dw
= (u32
*)seed
;
10668 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10669 seed_dw
[i
] = i40e_read_rx_ctl(hw
, I40E_PFQF_HKEY(i
));
10672 u32
*lut_dw
= (u32
*)lut
;
10674 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10676 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10677 lut_dw
[i
] = rd32(hw
, I40E_PFQF_HLUT(i
));
10684 * i40e_config_rss - Configure RSS keys and lut
10685 * @vsi: Pointer to VSI structure
10686 * @seed: RSS hash seed
10687 * @lut: Lookup table
10688 * @lut_size: Lookup table size
10690 * Returns 0 on success, negative on failure
10692 int i40e_config_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10694 struct i40e_pf
*pf
= vsi
->back
;
10696 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10697 return i40e_config_rss_aq(vsi
, seed
, lut
, lut_size
);
10699 return i40e_config_rss_reg(vsi
, seed
, lut
, lut_size
);
10703 * i40e_get_rss - Get RSS keys and lut
10704 * @vsi: Pointer to VSI structure
10705 * @seed: Buffer to store the keys
10706 * @lut: Buffer to store the lookup table entries
10707 * lut_size: Size of buffer to store the lookup table entries
10709 * Returns 0 on success, negative on failure
10711 int i40e_get_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10713 struct i40e_pf
*pf
= vsi
->back
;
10715 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10716 return i40e_get_rss_aq(vsi
, seed
, lut
, lut_size
);
10718 return i40e_get_rss_reg(vsi
, seed
, lut
, lut_size
);
10722 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10723 * @pf: Pointer to board private structure
10724 * @lut: Lookup table
10725 * @rss_table_size: Lookup table size
10726 * @rss_size: Range of queue number for hashing
10728 void i40e_fill_rss_lut(struct i40e_pf
*pf
, u8
*lut
,
10729 u16 rss_table_size
, u16 rss_size
)
10733 for (i
= 0; i
< rss_table_size
; i
++)
10734 lut
[i
] = i
% rss_size
;
10738 * i40e_pf_config_rss - Prepare for RSS if used
10739 * @pf: board private structure
10741 static int i40e_pf_config_rss(struct i40e_pf
*pf
)
10743 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10744 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
10746 struct i40e_hw
*hw
= &pf
->hw
;
10751 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10752 hena
= (u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(0)) |
10753 ((u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(1)) << 32);
10754 hena
|= i40e_pf_get_default_rss_hena(pf
);
10756 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), (u32
)hena
);
10757 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), (u32
)(hena
>> 32));
10759 /* Determine the RSS table size based on the hardware capabilities */
10760 reg_val
= i40e_read_rx_ctl(hw
, I40E_PFQF_CTL_0
);
10761 reg_val
= (pf
->rss_table_size
== 512) ?
10762 (reg_val
| I40E_PFQF_CTL_0_HASHLUTSIZE_512
) :
10763 (reg_val
& ~I40E_PFQF_CTL_0_HASHLUTSIZE_512
);
10764 i40e_write_rx_ctl(hw
, I40E_PFQF_CTL_0
, reg_val
);
10766 /* Determine the RSS size of the VSI */
10767 if (!vsi
->rss_size
) {
10770 qcount
= vsi
->num_queue_pairs
/ vsi
->tc_config
.numtc
;
10771 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10773 if (!vsi
->rss_size
)
10776 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
10780 /* Use user configured lut if there is one, otherwise use default */
10781 if (vsi
->rss_lut_user
)
10782 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
10784 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
10786 /* Use user configured hash key if there is one, otherwise
10789 if (vsi
->rss_hkey_user
)
10790 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
10792 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
10793 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
10800 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
10801 * @pf: board private structure
10802 * @queue_count: the requested queue count for rss.
10804 * returns 0 if rss is not enabled, if enabled returns the final rss queue
10805 * count which may be different from the requested queue count.
10806 * Note: expects to be called while under rtnl_lock()
10808 int i40e_reconfig_rss_queues(struct i40e_pf
*pf
, int queue_count
)
10810 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10813 if (!(pf
->flags
& I40E_FLAG_RSS_ENABLED
))
10816 new_rss_size
= min_t(int, queue_count
, pf
->rss_size_max
);
10818 if (queue_count
!= vsi
->num_queue_pairs
) {
10821 vsi
->req_queue_pairs
= queue_count
;
10822 i40e_prep_for_reset(pf
, true);
10824 pf
->alloc_rss_size
= new_rss_size
;
10826 i40e_reset_and_rebuild(pf
, true, true);
10828 /* Discard the user configured hash keys and lut, if less
10829 * queues are enabled.
10831 if (queue_count
< vsi
->rss_size
) {
10832 i40e_clear_rss_config_user(vsi
);
10833 dev_dbg(&pf
->pdev
->dev
,
10834 "discard user configured hash keys and lut\n");
10837 /* Reset vsi->rss_size, as number of enabled queues changed */
10838 qcount
= vsi
->num_queue_pairs
/ vsi
->tc_config
.numtc
;
10839 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10841 i40e_pf_config_rss(pf
);
10843 dev_info(&pf
->pdev
->dev
, "User requested queue count/HW max RSS count: %d/%d\n",
10844 vsi
->req_queue_pairs
, pf
->rss_size_max
);
10845 return pf
->alloc_rss_size
;
10849 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
10850 * @pf: board private structure
10852 i40e_status
i40e_get_partition_bw_setting(struct i40e_pf
*pf
)
10854 i40e_status status
;
10855 bool min_valid
, max_valid
;
10856 u32 max_bw
, min_bw
;
10858 status
= i40e_read_bw_from_alt_ram(&pf
->hw
, &max_bw
, &min_bw
,
10859 &min_valid
, &max_valid
);
10863 pf
->min_bw
= min_bw
;
10865 pf
->max_bw
= max_bw
;
10872 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
10873 * @pf: board private structure
10875 i40e_status
i40e_set_partition_bw_setting(struct i40e_pf
*pf
)
10877 struct i40e_aqc_configure_partition_bw_data bw_data
;
10878 i40e_status status
;
10880 /* Set the valid bit for this PF */
10881 bw_data
.pf_valid_bits
= cpu_to_le16(BIT(pf
->hw
.pf_id
));
10882 bw_data
.max_bw
[pf
->hw
.pf_id
] = pf
->max_bw
& I40E_ALT_BW_VALUE_MASK
;
10883 bw_data
.min_bw
[pf
->hw
.pf_id
] = pf
->min_bw
& I40E_ALT_BW_VALUE_MASK
;
10885 /* Set the new bandwidths */
10886 status
= i40e_aq_configure_partition_bw(&pf
->hw
, &bw_data
, NULL
);
10892 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
10893 * @pf: board private structure
10895 i40e_status
i40e_commit_partition_bw_setting(struct i40e_pf
*pf
)
10897 /* Commit temporary BW setting to permanent NVM image */
10898 enum i40e_admin_queue_err last_aq_status
;
10902 if (pf
->hw
.partition_id
!= 1) {
10903 dev_info(&pf
->pdev
->dev
,
10904 "Commit BW only works on partition 1! This is partition %d",
10905 pf
->hw
.partition_id
);
10906 ret
= I40E_NOT_SUPPORTED
;
10907 goto bw_commit_out
;
10910 /* Acquire NVM for read access */
10911 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_READ
);
10912 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10914 dev_info(&pf
->pdev
->dev
,
10915 "Cannot acquire NVM for read access, err %s aq_err %s\n",
10916 i40e_stat_str(&pf
->hw
, ret
),
10917 i40e_aq_str(&pf
->hw
, last_aq_status
));
10918 goto bw_commit_out
;
10921 /* Read word 0x10 of NVM - SW compatibility word 1 */
10922 ret
= i40e_aq_read_nvm(&pf
->hw
,
10923 I40E_SR_NVM_CONTROL_WORD
,
10924 0x10, sizeof(nvm_word
), &nvm_word
,
10926 /* Save off last admin queue command status before releasing
10929 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10930 i40e_release_nvm(&pf
->hw
);
10932 dev_info(&pf
->pdev
->dev
, "NVM read error, err %s aq_err %s\n",
10933 i40e_stat_str(&pf
->hw
, ret
),
10934 i40e_aq_str(&pf
->hw
, last_aq_status
));
10935 goto bw_commit_out
;
10938 /* Wait a bit for NVM release to complete */
10941 /* Acquire NVM for write access */
10942 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_WRITE
);
10943 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10945 dev_info(&pf
->pdev
->dev
,
10946 "Cannot acquire NVM for write access, err %s aq_err %s\n",
10947 i40e_stat_str(&pf
->hw
, ret
),
10948 i40e_aq_str(&pf
->hw
, last_aq_status
));
10949 goto bw_commit_out
;
10951 /* Write it back out unchanged to initiate update NVM,
10952 * which will force a write of the shadow (alt) RAM to
10953 * the NVM - thus storing the bandwidth values permanently.
10955 ret
= i40e_aq_update_nvm(&pf
->hw
,
10956 I40E_SR_NVM_CONTROL_WORD
,
10957 0x10, sizeof(nvm_word
),
10958 &nvm_word
, true, NULL
);
10959 /* Save off last admin queue command status before releasing
10962 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10963 i40e_release_nvm(&pf
->hw
);
10965 dev_info(&pf
->pdev
->dev
,
10966 "BW settings NOT SAVED, err %s aq_err %s\n",
10967 i40e_stat_str(&pf
->hw
, ret
),
10968 i40e_aq_str(&pf
->hw
, last_aq_status
));
10975 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
10976 * @pf: board private structure to initialize
10978 * i40e_sw_init initializes the Adapter private data structure.
10979 * Fields are initialized based on PCI device information and
10980 * OS network device settings (MTU size).
10982 static int i40e_sw_init(struct i40e_pf
*pf
)
10987 /* Set default capability flags */
10988 pf
->flags
= I40E_FLAG_RX_CSUM_ENABLED
|
10989 I40E_FLAG_MSI_ENABLED
|
10990 I40E_FLAG_MSIX_ENABLED
;
10992 /* Set default ITR */
10993 pf
->rx_itr_default
= I40E_ITR_RX_DEF
;
10994 pf
->tx_itr_default
= I40E_ITR_TX_DEF
;
10996 /* Depending on PF configurations, it is possible that the RSS
10997 * maximum might end up larger than the available queues
10999 pf
->rss_size_max
= BIT(pf
->hw
.func_caps
.rss_table_entry_width
);
11000 pf
->alloc_rss_size
= 1;
11001 pf
->rss_table_size
= pf
->hw
.func_caps
.rss_table_size
;
11002 pf
->rss_size_max
= min_t(int, pf
->rss_size_max
,
11003 pf
->hw
.func_caps
.num_tx_qp
);
11004 if (pf
->hw
.func_caps
.rss
) {
11005 pf
->flags
|= I40E_FLAG_RSS_ENABLED
;
11006 pf
->alloc_rss_size
= min_t(int, pf
->rss_size_max
,
11007 num_online_cpus());
11010 /* MFP mode enabled */
11011 if (pf
->hw
.func_caps
.npar_enable
|| pf
->hw
.func_caps
.flex10_enable
) {
11012 pf
->flags
|= I40E_FLAG_MFP_ENABLED
;
11013 dev_info(&pf
->pdev
->dev
, "MFP mode Enabled\n");
11014 if (i40e_get_partition_bw_setting(pf
)) {
11015 dev_warn(&pf
->pdev
->dev
,
11016 "Could not get partition bw settings\n");
11018 dev_info(&pf
->pdev
->dev
,
11019 "Partition BW Min = %8.8x, Max = %8.8x\n",
11020 pf
->min_bw
, pf
->max_bw
);
11022 /* nudge the Tx scheduler */
11023 i40e_set_partition_bw_setting(pf
);
11027 if ((pf
->hw
.func_caps
.fd_filters_guaranteed
> 0) ||
11028 (pf
->hw
.func_caps
.fd_filters_best_effort
> 0)) {
11029 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
11030 pf
->atr_sample_rate
= I40E_DEFAULT_ATR_SAMPLE_RATE
;
11031 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
&&
11032 pf
->hw
.num_partitions
> 1)
11033 dev_info(&pf
->pdev
->dev
,
11034 "Flow Director Sideband mode Disabled in MFP mode\n");
11036 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11037 pf
->fdir_pf_filter_count
=
11038 pf
->hw
.func_caps
.fd_filters_guaranteed
;
11039 pf
->hw
.fdir_shared_filter_count
=
11040 pf
->hw
.func_caps
.fd_filters_best_effort
;
11043 if (pf
->hw
.mac
.type
== I40E_MAC_X722
) {
11044 pf
->hw_features
|= (I40E_HW_RSS_AQ_CAPABLE
|
11045 I40E_HW_128_QP_RSS_CAPABLE
|
11046 I40E_HW_ATR_EVICT_CAPABLE
|
11047 I40E_HW_WB_ON_ITR_CAPABLE
|
11048 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE
|
11049 I40E_HW_NO_PCI_LINK_CHECK
|
11050 I40E_HW_USE_SET_LLDP_MIB
|
11051 I40E_HW_GENEVE_OFFLOAD_CAPABLE
|
11052 I40E_HW_PTP_L4_CAPABLE
|
11053 I40E_HW_WOL_MC_MAGIC_PKT_WAKE
|
11054 I40E_HW_OUTER_UDP_CSUM_CAPABLE
);
11056 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11057 if (rd32(&pf
->hw
, I40E_GLQF_FDEVICTENA(1)) !=
11058 I40E_FDEVICT_PCTYPE_DEFAULT
) {
11059 dev_warn(&pf
->pdev
->dev
,
11060 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11061 pf
->hw_features
&= ~I40E_HW_ATR_EVICT_CAPABLE
;
11063 } else if ((pf
->hw
.aq
.api_maj_ver
> 1) ||
11064 ((pf
->hw
.aq
.api_maj_ver
== 1) &&
11065 (pf
->hw
.aq
.api_min_ver
> 4))) {
11066 /* Supported in FW API version higher than 1.4 */
11067 pf
->hw_features
|= I40E_HW_GENEVE_OFFLOAD_CAPABLE
;
11070 /* Enable HW ATR eviction if possible */
11071 if (pf
->hw_features
& I40E_HW_ATR_EVICT_CAPABLE
)
11072 pf
->flags
|= I40E_FLAG_HW_ATR_EVICT_ENABLED
;
11074 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11075 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 33)) ||
11076 (pf
->hw
.aq
.fw_maj_ver
< 4))) {
11077 pf
->hw_features
|= I40E_HW_RESTART_AUTONEG
;
11078 /* No DCB support for FW < v4.33 */
11079 pf
->hw_features
|= I40E_HW_NO_DCB_SUPPORT
;
11082 /* Disable FW LLDP if FW < v4.3 */
11083 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11084 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 3)) ||
11085 (pf
->hw
.aq
.fw_maj_ver
< 4)))
11086 pf
->hw_features
|= I40E_HW_STOP_FW_LLDP
;
11088 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11089 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11090 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
>= 40)) ||
11091 (pf
->hw
.aq
.fw_maj_ver
>= 5)))
11092 pf
->hw_features
|= I40E_HW_USE_SET_LLDP_MIB
;
11094 /* Enable PTP L4 if FW > v6.0 */
11095 if (pf
->hw
.mac
.type
== I40E_MAC_XL710
&&
11096 pf
->hw
.aq
.fw_maj_ver
>= 6)
11097 pf
->hw_features
|= I40E_HW_PTP_L4_CAPABLE
;
11099 if (pf
->hw
.func_caps
.vmdq
) {
11100 pf
->num_vmdq_vsis
= I40E_DEFAULT_NUM_VMDQ_VSI
;
11101 pf
->flags
|= I40E_FLAG_VMDQ_ENABLED
;
11102 pf
->num_vmdq_qps
= i40e_default_queues_per_vmdq(pf
);
11105 if (pf
->hw
.func_caps
.iwarp
) {
11106 pf
->flags
|= I40E_FLAG_IWARP_ENABLED
;
11107 /* IWARP needs one extra vector for CQP just like MISC.*/
11108 pf
->num_iwarp_msix
= (int)num_online_cpus() + 1;
11111 #ifdef CONFIG_PCI_IOV
11112 if (pf
->hw
.func_caps
.num_vfs
&& pf
->hw
.partition_id
== 1) {
11113 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
11114 pf
->flags
|= I40E_FLAG_SRIOV_ENABLED
;
11115 pf
->num_req_vfs
= min_t(int,
11116 pf
->hw
.func_caps
.num_vfs
,
11117 I40E_MAX_VF_COUNT
);
11119 #endif /* CONFIG_PCI_IOV */
11120 pf
->eeprom_version
= 0xDEAD;
11121 pf
->lan_veb
= I40E_NO_VEB
;
11122 pf
->lan_vsi
= I40E_NO_VSI
;
11124 /* By default FW has this off for performance reasons */
11125 pf
->flags
&= ~I40E_FLAG_VEB_STATS_ENABLED
;
11127 /* set up queue assignment tracking */
11128 size
= sizeof(struct i40e_lump_tracking
)
11129 + (sizeof(u16
) * pf
->hw
.func_caps
.num_tx_qp
);
11130 pf
->qp_pile
= kzalloc(size
, GFP_KERNEL
);
11131 if (!pf
->qp_pile
) {
11135 pf
->qp_pile
->num_entries
= pf
->hw
.func_caps
.num_tx_qp
;
11136 pf
->qp_pile
->search_hint
= 0;
11138 pf
->tx_timeout_recovery_level
= 1;
11140 mutex_init(&pf
->switch_mutex
);
11147 * i40e_set_ntuple - set the ntuple feature flag and take action
11148 * @pf: board private structure to initialize
11149 * @features: the feature set that the stack is suggesting
11151 * returns a bool to indicate if reset needs to happen
11153 bool i40e_set_ntuple(struct i40e_pf
*pf
, netdev_features_t features
)
11155 bool need_reset
= false;
11157 /* Check if Flow Director n-tuple support was enabled or disabled. If
11158 * the state changed, we need to reset.
11160 if (features
& NETIF_F_NTUPLE
) {
11161 /* Enable filters and mark for reset */
11162 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
11164 /* enable FD_SB only if there is MSI-X vector and no cloud
11167 if (pf
->num_fdsb_msix
> 0 && !pf
->num_cloud_filters
) {
11168 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11169 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
11172 /* turn off filters, mark for reset and clear SW filter list */
11173 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
11175 i40e_fdir_filter_exit(pf
);
11177 pf
->flags
&= ~(I40E_FLAG_FD_SB_ENABLED
|
11178 I40E_FLAG_FD_SB_AUTO_DISABLED
);
11179 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
11181 /* reset fd counters */
11182 pf
->fd_add_err
= 0;
11183 pf
->fd_atr_cnt
= 0;
11184 /* if ATR was auto disabled it can be re-enabled. */
11185 if (pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
) {
11186 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
11187 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
11188 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
11189 dev_info(&pf
->pdev
->dev
, "ATR re-enabled.\n");
11196 * i40e_clear_rss_lut - clear the rx hash lookup table
11197 * @vsi: the VSI being configured
11199 static void i40e_clear_rss_lut(struct i40e_vsi
*vsi
)
11201 struct i40e_pf
*pf
= vsi
->back
;
11202 struct i40e_hw
*hw
= &pf
->hw
;
11203 u16 vf_id
= vsi
->vf_id
;
11206 if (vsi
->type
== I40E_VSI_MAIN
) {
11207 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
11208 wr32(hw
, I40E_PFQF_HLUT(i
), 0);
11209 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
11210 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
11211 i40e_write_rx_ctl(hw
, I40E_VFQF_HLUT1(i
, vf_id
), 0);
11213 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
11218 * i40e_set_features - set the netdev feature flags
11219 * @netdev: ptr to the netdev being adjusted
11220 * @features: the feature set that the stack is suggesting
11221 * Note: expects to be called while under rtnl_lock()
11223 static int i40e_set_features(struct net_device
*netdev
,
11224 netdev_features_t features
)
11226 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11227 struct i40e_vsi
*vsi
= np
->vsi
;
11228 struct i40e_pf
*pf
= vsi
->back
;
11231 if (features
& NETIF_F_RXHASH
&& !(netdev
->features
& NETIF_F_RXHASH
))
11232 i40e_pf_config_rss(pf
);
11233 else if (!(features
& NETIF_F_RXHASH
) &&
11234 netdev
->features
& NETIF_F_RXHASH
)
11235 i40e_clear_rss_lut(vsi
);
11237 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
11238 i40e_vlan_stripping_enable(vsi
);
11240 i40e_vlan_stripping_disable(vsi
);
11242 if (!(features
& NETIF_F_HW_TC
) && pf
->num_cloud_filters
) {
11243 dev_err(&pf
->pdev
->dev
,
11244 "Offloaded tc filters active, can't turn hw_tc_offload off");
11248 need_reset
= i40e_set_ntuple(pf
, features
);
11251 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11257 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11258 * @pf: board private structure
11259 * @port: The UDP port to look up
11261 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11263 static u8
i40e_get_udp_port_idx(struct i40e_pf
*pf
, u16 port
)
11267 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
11268 if (pf
->udp_ports
[i
].port
== port
)
11276 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11277 * @netdev: This physical port's netdev
11278 * @ti: Tunnel endpoint information
11280 static void i40e_udp_tunnel_add(struct net_device
*netdev
,
11281 struct udp_tunnel_info
*ti
)
11283 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11284 struct i40e_vsi
*vsi
= np
->vsi
;
11285 struct i40e_pf
*pf
= vsi
->back
;
11286 u16 port
= ntohs(ti
->port
);
11290 idx
= i40e_get_udp_port_idx(pf
, port
);
11292 /* Check if port already exists */
11293 if (idx
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11294 netdev_info(netdev
, "port %d already offloaded\n", port
);
11298 /* Now check if there is space to add the new port */
11299 next_idx
= i40e_get_udp_port_idx(pf
, 0);
11301 if (next_idx
== I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11302 netdev_info(netdev
, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11307 switch (ti
->type
) {
11308 case UDP_TUNNEL_TYPE_VXLAN
:
11309 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_VXLAN
;
11311 case UDP_TUNNEL_TYPE_GENEVE
:
11312 if (!(pf
->hw_features
& I40E_HW_GENEVE_OFFLOAD_CAPABLE
))
11314 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_NGE
;
11320 /* New port: add it and mark its index in the bitmap */
11321 pf
->udp_ports
[next_idx
].port
= port
;
11322 pf
->pending_udp_bitmap
|= BIT_ULL(next_idx
);
11323 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
11327 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11328 * @netdev: This physical port's netdev
11329 * @ti: Tunnel endpoint information
11331 static void i40e_udp_tunnel_del(struct net_device
*netdev
,
11332 struct udp_tunnel_info
*ti
)
11334 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11335 struct i40e_vsi
*vsi
= np
->vsi
;
11336 struct i40e_pf
*pf
= vsi
->back
;
11337 u16 port
= ntohs(ti
->port
);
11340 idx
= i40e_get_udp_port_idx(pf
, port
);
11342 /* Check if port already exists */
11343 if (idx
>= I40E_MAX_PF_UDP_OFFLOAD_PORTS
)
11346 switch (ti
->type
) {
11347 case UDP_TUNNEL_TYPE_VXLAN
:
11348 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_VXLAN
)
11351 case UDP_TUNNEL_TYPE_GENEVE
:
11352 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_NGE
)
11359 /* if port exists, set it to 0 (mark for deletion)
11360 * and make it pending
11362 pf
->udp_ports
[idx
].port
= 0;
11363 pf
->pending_udp_bitmap
|= BIT_ULL(idx
);
11364 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
11368 netdev_warn(netdev
, "UDP port %d was not found, not deleting\n",
11372 static int i40e_get_phys_port_id(struct net_device
*netdev
,
11373 struct netdev_phys_item_id
*ppid
)
11375 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11376 struct i40e_pf
*pf
= np
->vsi
->back
;
11377 struct i40e_hw
*hw
= &pf
->hw
;
11379 if (!(pf
->hw_features
& I40E_HW_PORT_ID_VALID
))
11380 return -EOPNOTSUPP
;
11382 ppid
->id_len
= min_t(int, sizeof(hw
->mac
.port_addr
), sizeof(ppid
->id
));
11383 memcpy(ppid
->id
, hw
->mac
.port_addr
, ppid
->id_len
);
11389 * i40e_ndo_fdb_add - add an entry to the hardware database
11390 * @ndm: the input from the stack
11391 * @tb: pointer to array of nladdr (unused)
11392 * @dev: the net device pointer
11393 * @addr: the MAC address entry being added
11394 * @flags: instructions from stack about fdb operation
11396 static int i40e_ndo_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
11397 struct net_device
*dev
,
11398 const unsigned char *addr
, u16 vid
,
11401 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11402 struct i40e_pf
*pf
= np
->vsi
->back
;
11405 if (!(pf
->flags
& I40E_FLAG_SRIOV_ENABLED
))
11406 return -EOPNOTSUPP
;
11409 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
11413 /* Hardware does not support aging addresses so if a
11414 * ndm_state is given only allow permanent addresses
11416 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
11417 netdev_info(dev
, "FDB only supports static addresses\n");
11421 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
11422 err
= dev_uc_add_excl(dev
, addr
);
11423 else if (is_multicast_ether_addr(addr
))
11424 err
= dev_mc_add_excl(dev
, addr
);
11428 /* Only return duplicate errors if NLM_F_EXCL is set */
11429 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
11436 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11437 * @dev: the netdev being configured
11438 * @nlh: RTNL message
11440 * Inserts a new hardware bridge if not already created and
11441 * enables the bridging mode requested (VEB or VEPA). If the
11442 * hardware bridge has already been inserted and the request
11443 * is to change the mode then that requires a PF reset to
11444 * allow rebuild of the components with required hardware
11445 * bridge mode enabled.
11447 * Note: expects to be called while under rtnl_lock()
11449 static int i40e_ndo_bridge_setlink(struct net_device
*dev
,
11450 struct nlmsghdr
*nlh
,
11453 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11454 struct i40e_vsi
*vsi
= np
->vsi
;
11455 struct i40e_pf
*pf
= vsi
->back
;
11456 struct i40e_veb
*veb
= NULL
;
11457 struct nlattr
*attr
, *br_spec
;
11460 /* Only for PF VSI for now */
11461 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11462 return -EOPNOTSUPP
;
11464 /* Find the HW bridge for PF VSI */
11465 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11466 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11470 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
11472 nla_for_each_nested(attr
, br_spec
, rem
) {
11475 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
11478 mode
= nla_get_u16(attr
);
11479 if ((mode
!= BRIDGE_MODE_VEPA
) &&
11480 (mode
!= BRIDGE_MODE_VEB
))
11483 /* Insert a new HW bridge */
11485 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
11486 vsi
->tc_config
.enabled_tc
);
11488 veb
->bridge_mode
= mode
;
11489 i40e_config_bridge_mode(veb
);
11491 /* No Bridge HW offload available */
11495 } else if (mode
!= veb
->bridge_mode
) {
11496 /* Existing HW bridge but different mode needs reset */
11497 veb
->bridge_mode
= mode
;
11498 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11499 if (mode
== BRIDGE_MODE_VEB
)
11500 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
11502 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
11503 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11512 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11515 * @seq: RTNL message seq #
11516 * @dev: the netdev being configured
11517 * @filter_mask: unused
11518 * @nlflags: netlink flags passed in
11520 * Return the mode in which the hardware bridge is operating in
11523 static int i40e_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
11524 struct net_device
*dev
,
11525 u32 __always_unused filter_mask
,
11528 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11529 struct i40e_vsi
*vsi
= np
->vsi
;
11530 struct i40e_pf
*pf
= vsi
->back
;
11531 struct i40e_veb
*veb
= NULL
;
11534 /* Only for PF VSI for now */
11535 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11536 return -EOPNOTSUPP
;
11538 /* Find the HW bridge for the PF VSI */
11539 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11540 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11547 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, veb
->bridge_mode
,
11548 0, 0, nlflags
, filter_mask
, NULL
);
11552 * i40e_features_check - Validate encapsulated packet conforms to limits
11554 * @dev: This physical port's netdev
11555 * @features: Offload features that the stack believes apply
11557 static netdev_features_t
i40e_features_check(struct sk_buff
*skb
,
11558 struct net_device
*dev
,
11559 netdev_features_t features
)
11563 /* No point in doing any of this if neither checksum nor GSO are
11564 * being requested for this frame. We can rule out both by just
11565 * checking for CHECKSUM_PARTIAL
11567 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
11570 /* We cannot support GSO if the MSS is going to be less than
11571 * 64 bytes. If it is then we need to drop support for GSO.
11573 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
11574 features
&= ~NETIF_F_GSO_MASK
;
11576 /* MACLEN can support at most 63 words */
11577 len
= skb_network_header(skb
) - skb
->data
;
11578 if (len
& ~(63 * 2))
11581 /* IPLEN and EIPLEN can support at most 127 dwords */
11582 len
= skb_transport_header(skb
) - skb_network_header(skb
);
11583 if (len
& ~(127 * 4))
11586 if (skb
->encapsulation
) {
11587 /* L4TUNLEN can support 127 words */
11588 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
11589 if (len
& ~(127 * 2))
11592 /* IPLEN can support at most 127 dwords */
11593 len
= skb_inner_transport_header(skb
) -
11594 skb_inner_network_header(skb
);
11595 if (len
& ~(127 * 4))
11599 /* No need to validate L4LEN as TCP is the only protocol with a
11600 * a flexible value and we support all possible values supported
11601 * by TCP, which is at most 15 dwords
11606 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
11610 * i40e_xdp_setup - add/remove an XDP program
11611 * @vsi: VSI to changed
11612 * @prog: XDP program
11614 static int i40e_xdp_setup(struct i40e_vsi
*vsi
,
11615 struct bpf_prog
*prog
)
11617 int frame_size
= vsi
->netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
11618 struct i40e_pf
*pf
= vsi
->back
;
11619 struct bpf_prog
*old_prog
;
11623 /* Don't allow frames that span over multiple buffers */
11624 if (frame_size
> vsi
->rx_buf_len
)
11627 if (!i40e_enabled_xdp_vsi(vsi
) && !prog
)
11630 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11631 need_reset
= (i40e_enabled_xdp_vsi(vsi
) != !!prog
);
11634 i40e_prep_for_reset(pf
, true);
11636 old_prog
= xchg(&vsi
->xdp_prog
, prog
);
11639 i40e_reset_and_rebuild(pf
, true, true);
11641 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
11642 WRITE_ONCE(vsi
->rx_rings
[i
]->xdp_prog
, vsi
->xdp_prog
);
11645 bpf_prog_put(old_prog
);
11651 * i40e_xdp - implements ndo_bpf for i40e
11653 * @xdp: XDP command
11655 static int i40e_xdp(struct net_device
*dev
,
11656 struct netdev_bpf
*xdp
)
11658 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11659 struct i40e_vsi
*vsi
= np
->vsi
;
11661 if (vsi
->type
!= I40E_VSI_MAIN
)
11664 switch (xdp
->command
) {
11665 case XDP_SETUP_PROG
:
11666 return i40e_xdp_setup(vsi
, xdp
->prog
);
11667 case XDP_QUERY_PROG
:
11668 xdp
->prog_attached
= i40e_enabled_xdp_vsi(vsi
);
11669 xdp
->prog_id
= vsi
->xdp_prog
? vsi
->xdp_prog
->aux
->id
: 0;
11676 static const struct net_device_ops i40e_netdev_ops
= {
11677 .ndo_open
= i40e_open
,
11678 .ndo_stop
= i40e_close
,
11679 .ndo_start_xmit
= i40e_lan_xmit_frame
,
11680 .ndo_get_stats64
= i40e_get_netdev_stats_struct
,
11681 .ndo_set_rx_mode
= i40e_set_rx_mode
,
11682 .ndo_validate_addr
= eth_validate_addr
,
11683 .ndo_set_mac_address
= i40e_set_mac
,
11684 .ndo_change_mtu
= i40e_change_mtu
,
11685 .ndo_do_ioctl
= i40e_ioctl
,
11686 .ndo_tx_timeout
= i40e_tx_timeout
,
11687 .ndo_vlan_rx_add_vid
= i40e_vlan_rx_add_vid
,
11688 .ndo_vlan_rx_kill_vid
= i40e_vlan_rx_kill_vid
,
11689 #ifdef CONFIG_NET_POLL_CONTROLLER
11690 .ndo_poll_controller
= i40e_netpoll
,
11692 .ndo_setup_tc
= __i40e_setup_tc
,
11693 .ndo_set_features
= i40e_set_features
,
11694 .ndo_set_vf_mac
= i40e_ndo_set_vf_mac
,
11695 .ndo_set_vf_vlan
= i40e_ndo_set_vf_port_vlan
,
11696 .ndo_set_vf_rate
= i40e_ndo_set_vf_bw
,
11697 .ndo_get_vf_config
= i40e_ndo_get_vf_config
,
11698 .ndo_set_vf_link_state
= i40e_ndo_set_vf_link_state
,
11699 .ndo_set_vf_spoofchk
= i40e_ndo_set_vf_spoofchk
,
11700 .ndo_set_vf_trust
= i40e_ndo_set_vf_trust
,
11701 .ndo_udp_tunnel_add
= i40e_udp_tunnel_add
,
11702 .ndo_udp_tunnel_del
= i40e_udp_tunnel_del
,
11703 .ndo_get_phys_port_id
= i40e_get_phys_port_id
,
11704 .ndo_fdb_add
= i40e_ndo_fdb_add
,
11705 .ndo_features_check
= i40e_features_check
,
11706 .ndo_bridge_getlink
= i40e_ndo_bridge_getlink
,
11707 .ndo_bridge_setlink
= i40e_ndo_bridge_setlink
,
11708 .ndo_bpf
= i40e_xdp
,
11712 * i40e_config_netdev - Setup the netdev flags
11713 * @vsi: the VSI being configured
11715 * Returns 0 on success, negative value on failure
11717 static int i40e_config_netdev(struct i40e_vsi
*vsi
)
11719 struct i40e_pf
*pf
= vsi
->back
;
11720 struct i40e_hw
*hw
= &pf
->hw
;
11721 struct i40e_netdev_priv
*np
;
11722 struct net_device
*netdev
;
11723 u8 broadcast
[ETH_ALEN
];
11724 u8 mac_addr
[ETH_ALEN
];
11726 netdev_features_t hw_enc_features
;
11727 netdev_features_t hw_features
;
11729 etherdev_size
= sizeof(struct i40e_netdev_priv
);
11730 netdev
= alloc_etherdev_mq(etherdev_size
, vsi
->alloc_queue_pairs
);
11734 vsi
->netdev
= netdev
;
11735 np
= netdev_priv(netdev
);
11738 hw_enc_features
= NETIF_F_SG
|
11740 NETIF_F_IPV6_CSUM
|
11742 NETIF_F_SOFT_FEATURES
|
11747 NETIF_F_GSO_GRE_CSUM
|
11748 NETIF_F_GSO_PARTIAL
|
11749 NETIF_F_GSO_UDP_TUNNEL
|
11750 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
11756 if (!(pf
->hw_features
& I40E_HW_OUTER_UDP_CSUM_CAPABLE
))
11757 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
11759 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
11761 netdev
->hw_enc_features
|= hw_enc_features
;
11763 /* record features VLANs can make use of */
11764 netdev
->vlan_features
|= hw_enc_features
| NETIF_F_TSO_MANGLEID
;
11766 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
11767 netdev
->hw_features
|= NETIF_F_NTUPLE
| NETIF_F_HW_TC
;
11769 hw_features
= hw_enc_features
|
11770 NETIF_F_HW_VLAN_CTAG_TX
|
11771 NETIF_F_HW_VLAN_CTAG_RX
;
11773 netdev
->hw_features
|= hw_features
;
11775 netdev
->features
|= hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
11776 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
11778 if (vsi
->type
== I40E_VSI_MAIN
) {
11779 SET_NETDEV_DEV(netdev
, &pf
->pdev
->dev
);
11780 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
11781 /* The following steps are necessary for two reasons. First,
11782 * some older NVM configurations load a default MAC-VLAN
11783 * filter that will accept any tagged packet, and we want to
11784 * replace this with a normal filter. Additionally, it is
11785 * possible our MAC address was provided by the platform using
11786 * Open Firmware or similar.
11788 * Thus, we need to remove the default filter and install one
11789 * specific to the MAC address.
11791 i40e_rm_default_mac_filter(vsi
, mac_addr
);
11792 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11793 i40e_add_mac_filter(vsi
, mac_addr
);
11794 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11796 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
11797 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
11798 * the end, which is 4 bytes long, so force truncation of the
11799 * original name by IFNAMSIZ - 4
11801 snprintf(netdev
->name
, IFNAMSIZ
, "%.*sv%%d",
11803 pf
->vsi
[pf
->lan_vsi
]->netdev
->name
);
11804 random_ether_addr(mac_addr
);
11806 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11807 i40e_add_mac_filter(vsi
, mac_addr
);
11808 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11811 /* Add the broadcast filter so that we initially will receive
11812 * broadcast packets. Note that when a new VLAN is first added the
11813 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
11814 * specific filters as part of transitioning into "vlan" operation.
11815 * When more VLANs are added, the driver will copy each existing MAC
11816 * filter and add it for the new VLAN.
11818 * Broadcast filters are handled specially by
11819 * i40e_sync_filters_subtask, as the driver must to set the broadcast
11820 * promiscuous bit instead of adding this directly as a MAC/VLAN
11821 * filter. The subtask will update the correct broadcast promiscuous
11822 * bits as VLANs become active or inactive.
11824 eth_broadcast_addr(broadcast
);
11825 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11826 i40e_add_mac_filter(vsi
, broadcast
);
11827 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11829 ether_addr_copy(netdev
->dev_addr
, mac_addr
);
11830 ether_addr_copy(netdev
->perm_addr
, mac_addr
);
11832 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
11833 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
11834 /* Setup netdev TC information */
11835 i40e_vsi_config_netdev_tc(vsi
, vsi
->tc_config
.enabled_tc
);
11837 netdev
->netdev_ops
= &i40e_netdev_ops
;
11838 netdev
->watchdog_timeo
= 5 * HZ
;
11839 i40e_set_ethtool_ops(netdev
);
11841 /* MTU range: 68 - 9706 */
11842 netdev
->min_mtu
= ETH_MIN_MTU
;
11843 netdev
->max_mtu
= I40E_MAX_RXBUFFER
- I40E_PACKET_HDR_PAD
;
11849 * i40e_vsi_delete - Delete a VSI from the switch
11850 * @vsi: the VSI being removed
11852 * Returns 0 on success, negative value on failure
11854 static void i40e_vsi_delete(struct i40e_vsi
*vsi
)
11856 /* remove default VSI is not allowed */
11857 if (vsi
== vsi
->back
->vsi
[vsi
->back
->lan_vsi
])
11860 i40e_aq_delete_element(&vsi
->back
->hw
, vsi
->seid
, NULL
);
11864 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
11865 * @vsi: the VSI being queried
11867 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
11869 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi
*vsi
)
11871 struct i40e_veb
*veb
;
11872 struct i40e_pf
*pf
= vsi
->back
;
11874 /* Uplink is not a bridge so default to VEB */
11875 if (vsi
->veb_idx
== I40E_NO_VEB
)
11878 veb
= pf
->veb
[vsi
->veb_idx
];
11880 dev_info(&pf
->pdev
->dev
,
11881 "There is no veb associated with the bridge\n");
11885 /* Uplink is a bridge in VEPA mode */
11886 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
) {
11889 /* Uplink is a bridge in VEB mode */
11893 /* VEPA is now default bridge, so return 0 */
11898 * i40e_add_vsi - Add a VSI to the switch
11899 * @vsi: the VSI being configured
11901 * This initializes a VSI context depending on the VSI type to be added and
11902 * passes it down to the add_vsi aq command.
11904 static int i40e_add_vsi(struct i40e_vsi
*vsi
)
11907 struct i40e_pf
*pf
= vsi
->back
;
11908 struct i40e_hw
*hw
= &pf
->hw
;
11909 struct i40e_vsi_context ctxt
;
11910 struct i40e_mac_filter
*f
;
11911 struct hlist_node
*h
;
11914 u8 enabled_tc
= 0x1; /* TC0 enabled */
11917 memset(&ctxt
, 0, sizeof(ctxt
));
11918 switch (vsi
->type
) {
11919 case I40E_VSI_MAIN
:
11920 /* The PF's main VSI is already setup as part of the
11921 * device initialization, so we'll not bother with
11922 * the add_vsi call, but we will retrieve the current
11925 ctxt
.seid
= pf
->main_vsi_seid
;
11926 ctxt
.pf_num
= pf
->hw
.pf_id
;
11928 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
11929 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
11931 dev_info(&pf
->pdev
->dev
,
11932 "couldn't get PF vsi config, err %s aq_err %s\n",
11933 i40e_stat_str(&pf
->hw
, ret
),
11934 i40e_aq_str(&pf
->hw
,
11935 pf
->hw
.aq
.asq_last_status
));
11938 vsi
->info
= ctxt
.info
;
11939 vsi
->info
.valid_sections
= 0;
11941 vsi
->seid
= ctxt
.seid
;
11942 vsi
->id
= ctxt
.vsi_number
;
11944 enabled_tc
= i40e_pf_get_tc_map(pf
);
11946 /* Source pruning is enabled by default, so the flag is
11947 * negative logic - if it's set, we need to fiddle with
11948 * the VSI to disable source pruning.
11950 if (pf
->flags
& I40E_FLAG_SOURCE_PRUNING_DISABLED
) {
11951 memset(&ctxt
, 0, sizeof(ctxt
));
11952 ctxt
.seid
= pf
->main_vsi_seid
;
11953 ctxt
.pf_num
= pf
->hw
.pf_id
;
11955 ctxt
.info
.valid_sections
|=
11956 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
11957 ctxt
.info
.switch_id
=
11958 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB
);
11959 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
11961 dev_info(&pf
->pdev
->dev
,
11962 "update vsi failed, err %s aq_err %s\n",
11963 i40e_stat_str(&pf
->hw
, ret
),
11964 i40e_aq_str(&pf
->hw
,
11965 pf
->hw
.aq
.asq_last_status
));
11971 /* MFP mode setup queue map and update VSI */
11972 if ((pf
->flags
& I40E_FLAG_MFP_ENABLED
) &&
11973 !(pf
->hw
.func_caps
.iscsi
)) { /* NIC type PF */
11974 memset(&ctxt
, 0, sizeof(ctxt
));
11975 ctxt
.seid
= pf
->main_vsi_seid
;
11976 ctxt
.pf_num
= pf
->hw
.pf_id
;
11978 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
11979 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
11981 dev_info(&pf
->pdev
->dev
,
11982 "update vsi failed, err %s aq_err %s\n",
11983 i40e_stat_str(&pf
->hw
, ret
),
11984 i40e_aq_str(&pf
->hw
,
11985 pf
->hw
.aq
.asq_last_status
));
11989 /* update the local VSI info queue map */
11990 i40e_vsi_update_queue_map(vsi
, &ctxt
);
11991 vsi
->info
.valid_sections
= 0;
11993 /* Default/Main VSI is only enabled for TC0
11994 * reconfigure it to enable all TCs that are
11995 * available on the port in SFP mode.
11996 * For MFP case the iSCSI PF would use this
11997 * flow to enable LAN+iSCSI TC.
11999 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
12001 /* Single TC condition is not fatal,
12002 * message and continue
12004 dev_info(&pf
->pdev
->dev
,
12005 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12007 i40e_stat_str(&pf
->hw
, ret
),
12008 i40e_aq_str(&pf
->hw
,
12009 pf
->hw
.aq
.asq_last_status
));
12014 case I40E_VSI_FDIR
:
12015 ctxt
.pf_num
= hw
->pf_id
;
12017 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12018 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12019 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
12020 if ((pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) &&
12021 (i40e_is_vsi_uplink_mode_veb(vsi
))) {
12022 ctxt
.info
.valid_sections
|=
12023 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12024 ctxt
.info
.switch_id
=
12025 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12027 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12030 case I40E_VSI_VMDQ2
:
12031 ctxt
.pf_num
= hw
->pf_id
;
12033 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12034 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12035 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
12037 /* This VSI is connected to VEB so the switch_id
12038 * should be set to zero by default.
12040 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12041 ctxt
.info
.valid_sections
|=
12042 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12043 ctxt
.info
.switch_id
=
12044 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12047 /* Setup the VSI tx/rx queue map for TC0 only for now */
12048 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12051 case I40E_VSI_SRIOV
:
12052 ctxt
.pf_num
= hw
->pf_id
;
12053 ctxt
.vf_num
= vsi
->vf_id
+ hw
->func_caps
.vf_base_id
;
12054 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12055 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12056 ctxt
.flags
= I40E_AQ_VSI_TYPE_VF
;
12058 /* This VSI is connected to VEB so the switch_id
12059 * should be set to zero by default.
12061 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12062 ctxt
.info
.valid_sections
|=
12063 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12064 ctxt
.info
.switch_id
=
12065 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12068 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
12069 ctxt
.info
.valid_sections
|=
12070 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
12071 ctxt
.info
.queueing_opt_flags
|=
12072 (I40E_AQ_VSI_QUE_OPT_TCP_ENA
|
12073 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI
);
12076 ctxt
.info
.valid_sections
|= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
12077 ctxt
.info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_ALL
;
12078 if (pf
->vf
[vsi
->vf_id
].spoofchk
) {
12079 ctxt
.info
.valid_sections
|=
12080 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
12081 ctxt
.info
.sec_flags
|=
12082 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
12083 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
12085 /* Setup the VSI tx/rx queue map for TC0 only for now */
12086 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12089 case I40E_VSI_IWARP
:
12090 /* send down message to iWARP */
12097 if (vsi
->type
!= I40E_VSI_MAIN
) {
12098 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
12100 dev_info(&vsi
->back
->pdev
->dev
,
12101 "add vsi failed, err %s aq_err %s\n",
12102 i40e_stat_str(&pf
->hw
, ret
),
12103 i40e_aq_str(&pf
->hw
,
12104 pf
->hw
.aq
.asq_last_status
));
12108 vsi
->info
= ctxt
.info
;
12109 vsi
->info
.valid_sections
= 0;
12110 vsi
->seid
= ctxt
.seid
;
12111 vsi
->id
= ctxt
.vsi_number
;
12114 vsi
->active_filters
= 0;
12115 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
12116 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12117 /* If macvlan filters already exist, force them to get loaded */
12118 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
12119 f
->state
= I40E_FILTER_NEW
;
12122 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12125 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
12126 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
12129 /* Update VSI BW information */
12130 ret
= i40e_vsi_get_bw_info(vsi
);
12132 dev_info(&pf
->pdev
->dev
,
12133 "couldn't get vsi bw info, err %s aq_err %s\n",
12134 i40e_stat_str(&pf
->hw
, ret
),
12135 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12136 /* VSI is already added so not tearing that up */
12145 * i40e_vsi_release - Delete a VSI and free its resources
12146 * @vsi: the VSI being removed
12148 * Returns 0 on success or < 0 on error
12150 int i40e_vsi_release(struct i40e_vsi
*vsi
)
12152 struct i40e_mac_filter
*f
;
12153 struct hlist_node
*h
;
12154 struct i40e_veb
*veb
= NULL
;
12155 struct i40e_pf
*pf
;
12161 /* release of a VEB-owner or last VSI is not allowed */
12162 if (vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
12163 dev_info(&pf
->pdev
->dev
, "VSI %d has existing VEB %d\n",
12164 vsi
->seid
, vsi
->uplink_seid
);
12167 if (vsi
== pf
->vsi
[pf
->lan_vsi
] &&
12168 !test_bit(__I40E_DOWN
, pf
->state
)) {
12169 dev_info(&pf
->pdev
->dev
, "Can't remove PF VSI\n");
12173 uplink_seid
= vsi
->uplink_seid
;
12174 if (vsi
->type
!= I40E_VSI_SRIOV
) {
12175 if (vsi
->netdev_registered
) {
12176 vsi
->netdev_registered
= false;
12178 /* results in a call to i40e_close() */
12179 unregister_netdev(vsi
->netdev
);
12182 i40e_vsi_close(vsi
);
12184 i40e_vsi_disable_irq(vsi
);
12187 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12189 /* clear the sync flag on all filters */
12191 __dev_uc_unsync(vsi
->netdev
, NULL
);
12192 __dev_mc_unsync(vsi
->netdev
, NULL
);
12195 /* make sure any remaining filters are marked for deletion */
12196 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
)
12197 __i40e_del_filter(vsi
, f
);
12199 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12201 i40e_sync_vsi_filters(vsi
);
12203 i40e_vsi_delete(vsi
);
12204 i40e_vsi_free_q_vectors(vsi
);
12206 free_netdev(vsi
->netdev
);
12207 vsi
->netdev
= NULL
;
12209 i40e_vsi_clear_rings(vsi
);
12210 i40e_vsi_clear(vsi
);
12212 /* If this was the last thing on the VEB, except for the
12213 * controlling VSI, remove the VEB, which puts the controlling
12214 * VSI onto the next level down in the switch.
12216 * Well, okay, there's one more exception here: don't remove
12217 * the orphan VEBs yet. We'll wait for an explicit remove request
12218 * from up the network stack.
12220 for (n
= 0, i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12222 pf
->vsi
[i
]->uplink_seid
== uplink_seid
&&
12223 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12224 n
++; /* count the VSIs */
12227 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12230 if (pf
->veb
[i
]->uplink_seid
== uplink_seid
)
12231 n
++; /* count the VEBs */
12232 if (pf
->veb
[i
]->seid
== uplink_seid
)
12235 if (n
== 0 && veb
&& veb
->uplink_seid
!= 0)
12236 i40e_veb_release(veb
);
12242 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12243 * @vsi: ptr to the VSI
12245 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12246 * corresponding SW VSI structure and initializes num_queue_pairs for the
12247 * newly allocated VSI.
12249 * Returns 0 on success or negative on failure
12251 static int i40e_vsi_setup_vectors(struct i40e_vsi
*vsi
)
12254 struct i40e_pf
*pf
= vsi
->back
;
12256 if (vsi
->q_vectors
[0]) {
12257 dev_info(&pf
->pdev
->dev
, "VSI %d has existing q_vectors\n",
12262 if (vsi
->base_vector
) {
12263 dev_info(&pf
->pdev
->dev
, "VSI %d has non-zero base vector %d\n",
12264 vsi
->seid
, vsi
->base_vector
);
12268 ret
= i40e_vsi_alloc_q_vectors(vsi
);
12270 dev_info(&pf
->pdev
->dev
,
12271 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12272 vsi
->num_q_vectors
, vsi
->seid
, ret
);
12273 vsi
->num_q_vectors
= 0;
12274 goto vector_setup_out
;
12277 /* In Legacy mode, we do not have to get any other vector since we
12278 * piggyback on the misc/ICR0 for queue interrupts.
12280 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
12282 if (vsi
->num_q_vectors
)
12283 vsi
->base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
12284 vsi
->num_q_vectors
, vsi
->idx
);
12285 if (vsi
->base_vector
< 0) {
12286 dev_info(&pf
->pdev
->dev
,
12287 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12288 vsi
->num_q_vectors
, vsi
->seid
, vsi
->base_vector
);
12289 i40e_vsi_free_q_vectors(vsi
);
12291 goto vector_setup_out
;
12299 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12300 * @vsi: pointer to the vsi.
12302 * This re-allocates a vsi's queue resources.
12304 * Returns pointer to the successfully allocated and configured VSI sw struct
12305 * on success, otherwise returns NULL on failure.
12307 static struct i40e_vsi
*i40e_vsi_reinit_setup(struct i40e_vsi
*vsi
)
12309 u16 alloc_queue_pairs
;
12310 struct i40e_pf
*pf
;
12319 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
12320 i40e_vsi_clear_rings(vsi
);
12322 i40e_vsi_free_arrays(vsi
, false);
12323 i40e_set_num_rings_in_vsi(vsi
);
12324 ret
= i40e_vsi_alloc_arrays(vsi
, false);
12328 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12329 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12331 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12333 dev_info(&pf
->pdev
->dev
,
12334 "failed to get tracking for %d queues for VSI %d err %d\n",
12335 alloc_queue_pairs
, vsi
->seid
, ret
);
12338 vsi
->base_queue
= ret
;
12340 /* Update the FW view of the VSI. Force a reset of TC and queue
12341 * layout configurations.
12343 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
12344 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
12345 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
12346 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
12347 if (vsi
->type
== I40E_VSI_MAIN
)
12348 i40e_rm_default_mac_filter(vsi
, pf
->hw
.mac
.perm_addr
);
12350 /* assign it some queues */
12351 ret
= i40e_alloc_rings(vsi
);
12355 /* map all of the rings to the q_vectors */
12356 i40e_vsi_map_rings_to_vectors(vsi
);
12360 i40e_vsi_free_q_vectors(vsi
);
12361 if (vsi
->netdev_registered
) {
12362 vsi
->netdev_registered
= false;
12363 unregister_netdev(vsi
->netdev
);
12364 free_netdev(vsi
->netdev
);
12365 vsi
->netdev
= NULL
;
12367 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12369 i40e_vsi_clear(vsi
);
12374 * i40e_vsi_setup - Set up a VSI by a given type
12375 * @pf: board private structure
12377 * @uplink_seid: the switch element to link to
12378 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12380 * This allocates the sw VSI structure and its queue resources, then add a VSI
12381 * to the identified VEB.
12383 * Returns pointer to the successfully allocated and configure VSI sw struct on
12384 * success, otherwise returns NULL on failure.
12386 struct i40e_vsi
*i40e_vsi_setup(struct i40e_pf
*pf
, u8 type
,
12387 u16 uplink_seid
, u32 param1
)
12389 struct i40e_vsi
*vsi
= NULL
;
12390 struct i40e_veb
*veb
= NULL
;
12391 u16 alloc_queue_pairs
;
12395 /* The requested uplink_seid must be either
12396 * - the PF's port seid
12397 * no VEB is needed because this is the PF
12398 * or this is a Flow Director special case VSI
12399 * - seid of an existing VEB
12400 * - seid of a VSI that owns an existing VEB
12401 * - seid of a VSI that doesn't own a VEB
12402 * a new VEB is created and the VSI becomes the owner
12403 * - seid of the PF VSI, which is what creates the first VEB
12404 * this is a special case of the previous
12406 * Find which uplink_seid we were given and create a new VEB if needed
12408 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12409 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== uplink_seid
) {
12415 if (!veb
&& uplink_seid
!= pf
->mac_seid
) {
12417 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12418 if (pf
->vsi
[i
] && pf
->vsi
[i
]->seid
== uplink_seid
) {
12424 dev_info(&pf
->pdev
->dev
, "no such uplink_seid %d\n",
12429 if (vsi
->uplink_seid
== pf
->mac_seid
)
12430 veb
= i40e_veb_setup(pf
, 0, pf
->mac_seid
, vsi
->seid
,
12431 vsi
->tc_config
.enabled_tc
);
12432 else if ((vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0)
12433 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
12434 vsi
->tc_config
.enabled_tc
);
12436 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
) {
12437 dev_info(&vsi
->back
->pdev
->dev
,
12438 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12441 /* We come up by default in VEPA mode if SRIOV is not
12442 * already enabled, in which case we can't force VEPA
12445 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
12446 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
12447 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
12449 i40e_config_bridge_mode(veb
);
12451 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
12452 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
12456 dev_info(&pf
->pdev
->dev
, "couldn't add VEB\n");
12460 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12461 uplink_seid
= veb
->seid
;
12464 /* get vsi sw struct */
12465 v_idx
= i40e_vsi_mem_alloc(pf
, type
);
12468 vsi
= pf
->vsi
[v_idx
];
12472 vsi
->veb_idx
= (veb
? veb
->idx
: I40E_NO_VEB
);
12474 if (type
== I40E_VSI_MAIN
)
12475 pf
->lan_vsi
= v_idx
;
12476 else if (type
== I40E_VSI_SRIOV
)
12477 vsi
->vf_id
= param1
;
12478 /* assign it some queues */
12479 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12480 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12482 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12484 dev_info(&pf
->pdev
->dev
,
12485 "failed to get tracking for %d queues for VSI %d err=%d\n",
12486 alloc_queue_pairs
, vsi
->seid
, ret
);
12489 vsi
->base_queue
= ret
;
12491 /* get a VSI from the hardware */
12492 vsi
->uplink_seid
= uplink_seid
;
12493 ret
= i40e_add_vsi(vsi
);
12497 switch (vsi
->type
) {
12498 /* setup the netdev if needed */
12499 case I40E_VSI_MAIN
:
12500 case I40E_VSI_VMDQ2
:
12501 ret
= i40e_config_netdev(vsi
);
12504 ret
= register_netdev(vsi
->netdev
);
12507 vsi
->netdev_registered
= true;
12508 netif_carrier_off(vsi
->netdev
);
12509 #ifdef CONFIG_I40E_DCB
12510 /* Setup DCB netlink interface */
12511 i40e_dcbnl_setup(vsi
);
12512 #endif /* CONFIG_I40E_DCB */
12515 case I40E_VSI_FDIR
:
12516 /* set up vectors and rings if needed */
12517 ret
= i40e_vsi_setup_vectors(vsi
);
12521 ret
= i40e_alloc_rings(vsi
);
12525 /* map all of the rings to the q_vectors */
12526 i40e_vsi_map_rings_to_vectors(vsi
);
12528 i40e_vsi_reset_stats(vsi
);
12532 /* no netdev or rings for the other VSI types */
12536 if ((pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
) &&
12537 (vsi
->type
== I40E_VSI_VMDQ2
)) {
12538 ret
= i40e_vsi_config_rss(vsi
);
12543 i40e_vsi_free_q_vectors(vsi
);
12545 if (vsi
->netdev_registered
) {
12546 vsi
->netdev_registered
= false;
12547 unregister_netdev(vsi
->netdev
);
12548 free_netdev(vsi
->netdev
);
12549 vsi
->netdev
= NULL
;
12552 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12554 i40e_vsi_clear(vsi
);
12560 * i40e_veb_get_bw_info - Query VEB BW information
12561 * @veb: the veb to query
12563 * Query the Tx scheduler BW configuration data for given VEB
12565 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
)
12567 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data
;
12568 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data
;
12569 struct i40e_pf
*pf
= veb
->pf
;
12570 struct i40e_hw
*hw
= &pf
->hw
;
12575 ret
= i40e_aq_query_switch_comp_bw_config(hw
, veb
->seid
,
12578 dev_info(&pf
->pdev
->dev
,
12579 "query veb bw config failed, err %s aq_err %s\n",
12580 i40e_stat_str(&pf
->hw
, ret
),
12581 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
12585 ret
= i40e_aq_query_switch_comp_ets_config(hw
, veb
->seid
,
12588 dev_info(&pf
->pdev
->dev
,
12589 "query veb bw ets config failed, err %s aq_err %s\n",
12590 i40e_stat_str(&pf
->hw
, ret
),
12591 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
12595 veb
->bw_limit
= le16_to_cpu(ets_data
.port_bw_limit
);
12596 veb
->bw_max_quanta
= ets_data
.tc_bw_max
;
12597 veb
->is_abs_credits
= bw_data
.absolute_credits_enable
;
12598 veb
->enabled_tc
= ets_data
.tc_valid_bits
;
12599 tc_bw_max
= le16_to_cpu(bw_data
.tc_bw_max
[0]) |
12600 (le16_to_cpu(bw_data
.tc_bw_max
[1]) << 16);
12601 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
12602 veb
->bw_tc_share_credits
[i
] = bw_data
.tc_bw_share_credits
[i
];
12603 veb
->bw_tc_limit_credits
[i
] =
12604 le16_to_cpu(bw_data
.tc_bw_limits
[i
]);
12605 veb
->bw_tc_max_quanta
[i
] = ((tc_bw_max
>> (i
*4)) & 0x7);
12613 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12614 * @pf: board private structure
12616 * On error: returns error code (negative)
12617 * On success: returns vsi index in PF (positive)
12619 static int i40e_veb_mem_alloc(struct i40e_pf
*pf
)
12622 struct i40e_veb
*veb
;
12625 /* Need to protect the allocation of switch elements at the PF level */
12626 mutex_lock(&pf
->switch_mutex
);
12628 /* VEB list may be fragmented if VEB creation/destruction has
12629 * been happening. We can afford to do a quick scan to look
12630 * for any free slots in the list.
12632 * find next empty veb slot, looping back around if necessary
12635 while ((i
< I40E_MAX_VEB
) && (pf
->veb
[i
] != NULL
))
12637 if (i
>= I40E_MAX_VEB
) {
12639 goto err_alloc_veb
; /* out of VEB slots! */
12642 veb
= kzalloc(sizeof(*veb
), GFP_KERNEL
);
12645 goto err_alloc_veb
;
12649 veb
->enabled_tc
= 1;
12654 mutex_unlock(&pf
->switch_mutex
);
12659 * i40e_switch_branch_release - Delete a branch of the switch tree
12660 * @branch: where to start deleting
12662 * This uses recursion to find the tips of the branch to be
12663 * removed, deleting until we get back to and can delete this VEB.
12665 static void i40e_switch_branch_release(struct i40e_veb
*branch
)
12667 struct i40e_pf
*pf
= branch
->pf
;
12668 u16 branch_seid
= branch
->seid
;
12669 u16 veb_idx
= branch
->idx
;
12672 /* release any VEBs on this VEB - RECURSION */
12673 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12676 if (pf
->veb
[i
]->uplink_seid
== branch
->seid
)
12677 i40e_switch_branch_release(pf
->veb
[i
]);
12680 /* Release the VSIs on this VEB, but not the owner VSI.
12682 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
12683 * the VEB itself, so don't use (*branch) after this loop.
12685 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12688 if (pf
->vsi
[i
]->uplink_seid
== branch_seid
&&
12689 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12690 i40e_vsi_release(pf
->vsi
[i
]);
12694 /* There's one corner case where the VEB might not have been
12695 * removed, so double check it here and remove it if needed.
12696 * This case happens if the veb was created from the debugfs
12697 * commands and no VSIs were added to it.
12699 if (pf
->veb
[veb_idx
])
12700 i40e_veb_release(pf
->veb
[veb_idx
]);
12704 * i40e_veb_clear - remove veb struct
12705 * @veb: the veb to remove
12707 static void i40e_veb_clear(struct i40e_veb
*veb
)
12713 struct i40e_pf
*pf
= veb
->pf
;
12715 mutex_lock(&pf
->switch_mutex
);
12716 if (pf
->veb
[veb
->idx
] == veb
)
12717 pf
->veb
[veb
->idx
] = NULL
;
12718 mutex_unlock(&pf
->switch_mutex
);
12725 * i40e_veb_release - Delete a VEB and free its resources
12726 * @veb: the VEB being removed
12728 void i40e_veb_release(struct i40e_veb
*veb
)
12730 struct i40e_vsi
*vsi
= NULL
;
12731 struct i40e_pf
*pf
;
12736 /* find the remaining VSI and check for extras */
12737 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12738 if (pf
->vsi
[i
] && pf
->vsi
[i
]->uplink_seid
== veb
->seid
) {
12744 dev_info(&pf
->pdev
->dev
,
12745 "can't remove VEB %d with %d VSIs left\n",
12750 /* move the remaining VSI to uplink veb */
12751 vsi
->flags
&= ~I40E_VSI_FLAG_VEB_OWNER
;
12752 if (veb
->uplink_seid
) {
12753 vsi
->uplink_seid
= veb
->uplink_seid
;
12754 if (veb
->uplink_seid
== pf
->mac_seid
)
12755 vsi
->veb_idx
= I40E_NO_VEB
;
12757 vsi
->veb_idx
= veb
->veb_idx
;
12760 vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
12761 vsi
->veb_idx
= pf
->vsi
[pf
->lan_vsi
]->veb_idx
;
12764 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
12765 i40e_veb_clear(veb
);
12769 * i40e_add_veb - create the VEB in the switch
12770 * @veb: the VEB to be instantiated
12771 * @vsi: the controlling VSI
12773 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
)
12775 struct i40e_pf
*pf
= veb
->pf
;
12776 bool enable_stats
= !!(pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
);
12779 ret
= i40e_aq_add_veb(&pf
->hw
, veb
->uplink_seid
, vsi
->seid
,
12780 veb
->enabled_tc
, false,
12781 &veb
->seid
, enable_stats
, NULL
);
12783 /* get a VEB from the hardware */
12785 dev_info(&pf
->pdev
->dev
,
12786 "couldn't add VEB, err %s aq_err %s\n",
12787 i40e_stat_str(&pf
->hw
, ret
),
12788 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12792 /* get statistics counter */
12793 ret
= i40e_aq_get_veb_parameters(&pf
->hw
, veb
->seid
, NULL
, NULL
,
12794 &veb
->stats_idx
, NULL
, NULL
, NULL
);
12796 dev_info(&pf
->pdev
->dev
,
12797 "couldn't get VEB statistics idx, err %s aq_err %s\n",
12798 i40e_stat_str(&pf
->hw
, ret
),
12799 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12802 ret
= i40e_veb_get_bw_info(veb
);
12804 dev_info(&pf
->pdev
->dev
,
12805 "couldn't get VEB bw info, err %s aq_err %s\n",
12806 i40e_stat_str(&pf
->hw
, ret
),
12807 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12808 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
12812 vsi
->uplink_seid
= veb
->seid
;
12813 vsi
->veb_idx
= veb
->idx
;
12814 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12820 * i40e_veb_setup - Set up a VEB
12821 * @pf: board private structure
12822 * @flags: VEB setup flags
12823 * @uplink_seid: the switch element to link to
12824 * @vsi_seid: the initial VSI seid
12825 * @enabled_tc: Enabled TC bit-map
12827 * This allocates the sw VEB structure and links it into the switch
12828 * It is possible and legal for this to be a duplicate of an already
12829 * existing VEB. It is also possible for both uplink and vsi seids
12830 * to be zero, in order to create a floating VEB.
12832 * Returns pointer to the successfully allocated VEB sw struct on
12833 * success, otherwise returns NULL on failure.
12835 struct i40e_veb
*i40e_veb_setup(struct i40e_pf
*pf
, u16 flags
,
12836 u16 uplink_seid
, u16 vsi_seid
,
12839 struct i40e_veb
*veb
, *uplink_veb
= NULL
;
12840 int vsi_idx
, veb_idx
;
12843 /* if one seid is 0, the other must be 0 to create a floating relay */
12844 if ((uplink_seid
== 0 || vsi_seid
== 0) &&
12845 (uplink_seid
+ vsi_seid
!= 0)) {
12846 dev_info(&pf
->pdev
->dev
,
12847 "one, not both seid's are 0: uplink=%d vsi=%d\n",
12848 uplink_seid
, vsi_seid
);
12852 /* make sure there is such a vsi and uplink */
12853 for (vsi_idx
= 0; vsi_idx
< pf
->num_alloc_vsi
; vsi_idx
++)
12854 if (pf
->vsi
[vsi_idx
] && pf
->vsi
[vsi_idx
]->seid
== vsi_seid
)
12856 if (vsi_idx
>= pf
->num_alloc_vsi
&& vsi_seid
!= 0) {
12857 dev_info(&pf
->pdev
->dev
, "vsi seid %d not found\n",
12862 if (uplink_seid
&& uplink_seid
!= pf
->mac_seid
) {
12863 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
12864 if (pf
->veb
[veb_idx
] &&
12865 pf
->veb
[veb_idx
]->seid
== uplink_seid
) {
12866 uplink_veb
= pf
->veb
[veb_idx
];
12871 dev_info(&pf
->pdev
->dev
,
12872 "uplink seid %d not found\n", uplink_seid
);
12877 /* get veb sw struct */
12878 veb_idx
= i40e_veb_mem_alloc(pf
);
12881 veb
= pf
->veb
[veb_idx
];
12882 veb
->flags
= flags
;
12883 veb
->uplink_seid
= uplink_seid
;
12884 veb
->veb_idx
= (uplink_veb
? uplink_veb
->idx
: I40E_NO_VEB
);
12885 veb
->enabled_tc
= (enabled_tc
? enabled_tc
: 0x1);
12887 /* create the VEB in the switch */
12888 ret
= i40e_add_veb(veb
, pf
->vsi
[vsi_idx
]);
12891 if (vsi_idx
== pf
->lan_vsi
)
12892 pf
->lan_veb
= veb
->idx
;
12897 i40e_veb_clear(veb
);
12903 * i40e_setup_pf_switch_element - set PF vars based on switch type
12904 * @pf: board private structure
12905 * @ele: element we are building info from
12906 * @num_reported: total number of elements
12907 * @printconfig: should we print the contents
12909 * helper function to assist in extracting a few useful SEID values.
12911 static void i40e_setup_pf_switch_element(struct i40e_pf
*pf
,
12912 struct i40e_aqc_switch_config_element_resp
*ele
,
12913 u16 num_reported
, bool printconfig
)
12915 u16 downlink_seid
= le16_to_cpu(ele
->downlink_seid
);
12916 u16 uplink_seid
= le16_to_cpu(ele
->uplink_seid
);
12917 u8 element_type
= ele
->element_type
;
12918 u16 seid
= le16_to_cpu(ele
->seid
);
12921 dev_info(&pf
->pdev
->dev
,
12922 "type=%d seid=%d uplink=%d downlink=%d\n",
12923 element_type
, seid
, uplink_seid
, downlink_seid
);
12925 switch (element_type
) {
12926 case I40E_SWITCH_ELEMENT_TYPE_MAC
:
12927 pf
->mac_seid
= seid
;
12929 case I40E_SWITCH_ELEMENT_TYPE_VEB
:
12931 if (uplink_seid
!= pf
->mac_seid
)
12933 if (pf
->lan_veb
== I40E_NO_VEB
) {
12936 /* find existing or else empty VEB */
12937 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
12938 if (pf
->veb
[v
] && (pf
->veb
[v
]->seid
== seid
)) {
12943 if (pf
->lan_veb
== I40E_NO_VEB
) {
12944 v
= i40e_veb_mem_alloc(pf
);
12951 pf
->veb
[pf
->lan_veb
]->seid
= seid
;
12952 pf
->veb
[pf
->lan_veb
]->uplink_seid
= pf
->mac_seid
;
12953 pf
->veb
[pf
->lan_veb
]->pf
= pf
;
12954 pf
->veb
[pf
->lan_veb
]->veb_idx
= I40E_NO_VEB
;
12956 case I40E_SWITCH_ELEMENT_TYPE_VSI
:
12957 if (num_reported
!= 1)
12959 /* This is immediately after a reset so we can assume this is
12962 pf
->mac_seid
= uplink_seid
;
12963 pf
->pf_seid
= downlink_seid
;
12964 pf
->main_vsi_seid
= seid
;
12966 dev_info(&pf
->pdev
->dev
,
12967 "pf_seid=%d main_vsi_seid=%d\n",
12968 pf
->pf_seid
, pf
->main_vsi_seid
);
12970 case I40E_SWITCH_ELEMENT_TYPE_PF
:
12971 case I40E_SWITCH_ELEMENT_TYPE_VF
:
12972 case I40E_SWITCH_ELEMENT_TYPE_EMP
:
12973 case I40E_SWITCH_ELEMENT_TYPE_BMC
:
12974 case I40E_SWITCH_ELEMENT_TYPE_PE
:
12975 case I40E_SWITCH_ELEMENT_TYPE_PA
:
12976 /* ignore these for now */
12979 dev_info(&pf
->pdev
->dev
, "unknown element type=%d seid=%d\n",
12980 element_type
, seid
);
12986 * i40e_fetch_switch_configuration - Get switch config from firmware
12987 * @pf: board private structure
12988 * @printconfig: should we print the contents
12990 * Get the current switch configuration from the device and
12991 * extract a few useful SEID values.
12993 int i40e_fetch_switch_configuration(struct i40e_pf
*pf
, bool printconfig
)
12995 struct i40e_aqc_get_switch_config_resp
*sw_config
;
13001 aq_buf
= kzalloc(I40E_AQ_LARGE_BUF
, GFP_KERNEL
);
13005 sw_config
= (struct i40e_aqc_get_switch_config_resp
*)aq_buf
;
13007 u16 num_reported
, num_total
;
13009 ret
= i40e_aq_get_switch_config(&pf
->hw
, sw_config
,
13013 dev_info(&pf
->pdev
->dev
,
13014 "get switch config failed err %s aq_err %s\n",
13015 i40e_stat_str(&pf
->hw
, ret
),
13016 i40e_aq_str(&pf
->hw
,
13017 pf
->hw
.aq
.asq_last_status
));
13022 num_reported
= le16_to_cpu(sw_config
->header
.num_reported
);
13023 num_total
= le16_to_cpu(sw_config
->header
.num_total
);
13026 dev_info(&pf
->pdev
->dev
,
13027 "header: %d reported %d total\n",
13028 num_reported
, num_total
);
13030 for (i
= 0; i
< num_reported
; i
++) {
13031 struct i40e_aqc_switch_config_element_resp
*ele
=
13032 &sw_config
->element
[i
];
13034 i40e_setup_pf_switch_element(pf
, ele
, num_reported
,
13037 } while (next_seid
!= 0);
13044 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13045 * @pf: board private structure
13046 * @reinit: if the Main VSI needs to re-initialized.
13048 * Returns 0 on success, negative value on failure
13050 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
)
13055 /* find out what's out there already */
13056 ret
= i40e_fetch_switch_configuration(pf
, false);
13058 dev_info(&pf
->pdev
->dev
,
13059 "couldn't fetch switch config, err %s aq_err %s\n",
13060 i40e_stat_str(&pf
->hw
, ret
),
13061 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13064 i40e_pf_reset_stats(pf
);
13066 /* set the switch config bit for the whole device to
13067 * support limited promisc or true promisc
13068 * when user requests promisc. The default is limited
13072 if ((pf
->hw
.pf_id
== 0) &&
13073 !(pf
->flags
& I40E_FLAG_TRUE_PROMISC_SUPPORT
)) {
13074 flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13075 pf
->last_sw_conf_flags
= flags
;
13078 if (pf
->hw
.pf_id
== 0) {
13081 valid_flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13082 ret
= i40e_aq_set_switch_config(&pf
->hw
, flags
, valid_flags
, 0,
13084 if (ret
&& pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
) {
13085 dev_info(&pf
->pdev
->dev
,
13086 "couldn't set switch config bits, err %s aq_err %s\n",
13087 i40e_stat_str(&pf
->hw
, ret
),
13088 i40e_aq_str(&pf
->hw
,
13089 pf
->hw
.aq
.asq_last_status
));
13090 /* not a fatal problem, just keep going */
13092 pf
->last_sw_conf_valid_flags
= valid_flags
;
13095 /* first time setup */
13096 if (pf
->lan_vsi
== I40E_NO_VSI
|| reinit
) {
13097 struct i40e_vsi
*vsi
= NULL
;
13100 /* Set up the PF VSI associated with the PF's main VSI
13101 * that is already in the HW switch
13103 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
13104 uplink_seid
= pf
->veb
[pf
->lan_veb
]->seid
;
13106 uplink_seid
= pf
->mac_seid
;
13107 if (pf
->lan_vsi
== I40E_NO_VSI
)
13108 vsi
= i40e_vsi_setup(pf
, I40E_VSI_MAIN
, uplink_seid
, 0);
13110 vsi
= i40e_vsi_reinit_setup(pf
->vsi
[pf
->lan_vsi
]);
13112 dev_info(&pf
->pdev
->dev
, "setup of MAIN VSI failed\n");
13113 i40e_cloud_filter_exit(pf
);
13114 i40e_fdir_teardown(pf
);
13118 /* force a reset of TC and queue layout configurations */
13119 u8 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
13121 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
13122 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
13123 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
13125 i40e_vlan_stripping_disable(pf
->vsi
[pf
->lan_vsi
]);
13127 i40e_fdir_sb_setup(pf
);
13129 /* Setup static PF queue filter control settings */
13130 ret
= i40e_setup_pf_filter_control(pf
);
13132 dev_info(&pf
->pdev
->dev
, "setup_pf_filter_control failed: %d\n",
13134 /* Failure here should not stop continuing other steps */
13137 /* enable RSS in the HW, even for only one queue, as the stack can use
13140 if ((pf
->flags
& I40E_FLAG_RSS_ENABLED
))
13141 i40e_pf_config_rss(pf
);
13143 /* fill in link information and enable LSE reporting */
13144 i40e_link_event(pf
);
13146 /* Initialize user-specific link properties */
13147 pf
->fc_autoneg_status
= ((pf
->hw
.phy
.link_info
.an_info
&
13148 I40E_AQ_AN_COMPLETED
) ? true : false);
13152 /* repopulate tunnel port filters */
13153 i40e_sync_udp_filters(pf
);
13159 * i40e_determine_queue_usage - Work out queue distribution
13160 * @pf: board private structure
13162 static void i40e_determine_queue_usage(struct i40e_pf
*pf
)
13167 pf
->num_lan_qps
= 0;
13169 /* Find the max queues to be put into basic use. We'll always be
13170 * using TC0, whether or not DCB is running, and TC0 will get the
13173 queues_left
= pf
->hw
.func_caps
.num_tx_qp
;
13175 if ((queues_left
== 1) ||
13176 !(pf
->flags
& I40E_FLAG_MSIX_ENABLED
)) {
13177 /* one qp for PF, no queues for anything else */
13179 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13181 /* make sure all the fancies are disabled */
13182 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13183 I40E_FLAG_IWARP_ENABLED
|
13184 I40E_FLAG_FD_SB_ENABLED
|
13185 I40E_FLAG_FD_ATR_ENABLED
|
13186 I40E_FLAG_DCB_CAPABLE
|
13187 I40E_FLAG_DCB_ENABLED
|
13188 I40E_FLAG_SRIOV_ENABLED
|
13189 I40E_FLAG_VMDQ_ENABLED
);
13190 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13191 } else if (!(pf
->flags
& (I40E_FLAG_RSS_ENABLED
|
13192 I40E_FLAG_FD_SB_ENABLED
|
13193 I40E_FLAG_FD_ATR_ENABLED
|
13194 I40E_FLAG_DCB_CAPABLE
))) {
13195 /* one qp for PF */
13196 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13197 queues_left
-= pf
->num_lan_qps
;
13199 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13200 I40E_FLAG_IWARP_ENABLED
|
13201 I40E_FLAG_FD_SB_ENABLED
|
13202 I40E_FLAG_FD_ATR_ENABLED
|
13203 I40E_FLAG_DCB_ENABLED
|
13204 I40E_FLAG_VMDQ_ENABLED
);
13205 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13207 /* Not enough queues for all TCs */
13208 if ((pf
->flags
& I40E_FLAG_DCB_CAPABLE
) &&
13209 (queues_left
< I40E_MAX_TRAFFIC_CLASS
)) {
13210 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
|
13211 I40E_FLAG_DCB_ENABLED
);
13212 dev_info(&pf
->pdev
->dev
, "not enough queues for DCB. DCB is disabled.\n");
13215 /* limit lan qps to the smaller of qps, cpus or msix */
13216 q_max
= max_t(int, pf
->rss_size_max
, num_online_cpus());
13217 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_tx_qp
);
13218 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_msix_vectors
);
13219 pf
->num_lan_qps
= q_max
;
13221 queues_left
-= pf
->num_lan_qps
;
13224 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13225 if (queues_left
> 1) {
13226 queues_left
-= 1; /* save 1 queue for FD */
13228 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
13229 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13230 dev_info(&pf
->pdev
->dev
, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13234 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13235 pf
->num_vf_qps
&& pf
->num_req_vfs
&& queues_left
) {
13236 pf
->num_req_vfs
= min_t(int, pf
->num_req_vfs
,
13237 (queues_left
/ pf
->num_vf_qps
));
13238 queues_left
-= (pf
->num_req_vfs
* pf
->num_vf_qps
);
13241 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
13242 pf
->num_vmdq_vsis
&& pf
->num_vmdq_qps
&& queues_left
) {
13243 pf
->num_vmdq_vsis
= min_t(int, pf
->num_vmdq_vsis
,
13244 (queues_left
/ pf
->num_vmdq_qps
));
13245 queues_left
-= (pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
);
13248 pf
->queues_left
= queues_left
;
13249 dev_dbg(&pf
->pdev
->dev
,
13250 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13251 pf
->hw
.func_caps
.num_tx_qp
,
13252 !!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
),
13253 pf
->num_lan_qps
, pf
->alloc_rss_size
, pf
->num_req_vfs
,
13254 pf
->num_vf_qps
, pf
->num_vmdq_vsis
, pf
->num_vmdq_qps
,
13259 * i40e_setup_pf_filter_control - Setup PF static filter control
13260 * @pf: PF to be setup
13262 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13263 * settings. If PE/FCoE are enabled then it will also set the per PF
13264 * based filter sizes required for them. It also enables Flow director,
13265 * ethertype and macvlan type filter settings for the pf.
13267 * Returns 0 on success, negative on failure
13269 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
)
13271 struct i40e_filter_control_settings
*settings
= &pf
->filter_settings
;
13273 settings
->hash_lut_size
= I40E_HASH_LUT_SIZE_128
;
13275 /* Flow Director is enabled */
13276 if (pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
| I40E_FLAG_FD_ATR_ENABLED
))
13277 settings
->enable_fdir
= true;
13279 /* Ethtype and MACVLAN filters enabled for PF */
13280 settings
->enable_ethtype
= true;
13281 settings
->enable_macvlan
= true;
13283 if (i40e_set_filter_control(&pf
->hw
, settings
))
13289 #define INFO_STRING_LEN 255
13290 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13291 static void i40e_print_features(struct i40e_pf
*pf
)
13293 struct i40e_hw
*hw
= &pf
->hw
;
13297 buf
= kmalloc(INFO_STRING_LEN
, GFP_KERNEL
);
13301 i
= snprintf(buf
, INFO_STRING_LEN
, "Features: PF-id[%d]", hw
->pf_id
);
13302 #ifdef CONFIG_PCI_IOV
13303 i
+= snprintf(&buf
[i
], REMAIN(i
), " VFs: %d", pf
->num_req_vfs
);
13305 i
+= snprintf(&buf
[i
], REMAIN(i
), " VSIs: %d QP: %d",
13306 pf
->hw
.func_caps
.num_vsis
,
13307 pf
->vsi
[pf
->lan_vsi
]->num_queue_pairs
);
13308 if (pf
->flags
& I40E_FLAG_RSS_ENABLED
)
13309 i
+= snprintf(&buf
[i
], REMAIN(i
), " RSS");
13310 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
)
13311 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_ATR");
13312 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13313 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_SB");
13314 i
+= snprintf(&buf
[i
], REMAIN(i
), " NTUPLE");
13316 if (pf
->flags
& I40E_FLAG_DCB_CAPABLE
)
13317 i
+= snprintf(&buf
[i
], REMAIN(i
), " DCB");
13318 i
+= snprintf(&buf
[i
], REMAIN(i
), " VxLAN");
13319 i
+= snprintf(&buf
[i
], REMAIN(i
), " Geneve");
13320 if (pf
->flags
& I40E_FLAG_PTP
)
13321 i
+= snprintf(&buf
[i
], REMAIN(i
), " PTP");
13322 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
13323 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEB");
13325 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEPA");
13327 dev_info(&pf
->pdev
->dev
, "%s\n", buf
);
13329 WARN_ON(i
> INFO_STRING_LEN
);
13333 * i40e_get_platform_mac_addr - get platform-specific MAC address
13334 * @pdev: PCI device information struct
13335 * @pf: board private structure
13337 * Look up the MAC address for the device. First we'll try
13338 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13339 * specific fallback. Otherwise, we'll default to the stored value in
13342 static void i40e_get_platform_mac_addr(struct pci_dev
*pdev
, struct i40e_pf
*pf
)
13344 if (eth_platform_get_mac_address(&pdev
->dev
, pf
->hw
.mac
.addr
))
13345 i40e_get_mac_addr(&pf
->hw
, pf
->hw
.mac
.addr
);
13349 * i40e_probe - Device initialization routine
13350 * @pdev: PCI device information struct
13351 * @ent: entry in i40e_pci_tbl
13353 * i40e_probe initializes a PF identified by a pci_dev structure.
13354 * The OS initialization, configuring of the PF private structure,
13355 * and a hardware reset occur.
13357 * Returns 0 on success, negative on failure
13359 static int i40e_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
13361 struct i40e_aq_get_phy_abilities_resp abilities
;
13362 struct i40e_pf
*pf
;
13363 struct i40e_hw
*hw
;
13364 static u16 pfs_found
;
13372 err
= pci_enable_device_mem(pdev
);
13376 /* set up for high or low dma */
13377 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
13379 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
13381 dev_err(&pdev
->dev
,
13382 "DMA configuration failed: 0x%x\n", err
);
13387 /* set up pci connections */
13388 err
= pci_request_mem_regions(pdev
, i40e_driver_name
);
13390 dev_info(&pdev
->dev
,
13391 "pci_request_selected_regions failed %d\n", err
);
13395 pci_enable_pcie_error_reporting(pdev
);
13396 pci_set_master(pdev
);
13398 /* Now that we have a PCI connection, we need to do the
13399 * low level device setup. This is primarily setting up
13400 * the Admin Queue structures and then querying for the
13401 * device's current profile information.
13403 pf
= kzalloc(sizeof(*pf
), GFP_KERNEL
);
13410 set_bit(__I40E_DOWN
, pf
->state
);
13415 pf
->ioremap_len
= min_t(int, pci_resource_len(pdev
, 0),
13416 I40E_MAX_CSR_SPACE
);
13418 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0), pf
->ioremap_len
);
13419 if (!hw
->hw_addr
) {
13421 dev_info(&pdev
->dev
, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13422 (unsigned int)pci_resource_start(pdev
, 0),
13423 pf
->ioremap_len
, err
);
13426 hw
->vendor_id
= pdev
->vendor
;
13427 hw
->device_id
= pdev
->device
;
13428 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
13429 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
13430 hw
->subsystem_device_id
= pdev
->subsystem_device
;
13431 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
13432 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
13433 hw
->bus
.bus_id
= pdev
->bus
->number
;
13434 pf
->instance
= pfs_found
;
13436 /* Select something other than the 802.1ad ethertype for the
13437 * switch to use internally and drop on ingress.
13439 hw
->switch_tag
= 0xffff;
13440 hw
->first_tag
= ETH_P_8021AD
;
13441 hw
->second_tag
= ETH_P_8021Q
;
13443 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
13444 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
13446 /* set up the locks for the AQ, do this only once in probe
13447 * and destroy them only once in remove
13449 mutex_init(&hw
->aq
.asq_mutex
);
13450 mutex_init(&hw
->aq
.arq_mutex
);
13452 pf
->msg_enable
= netif_msg_init(debug
,
13457 pf
->hw
.debug_mask
= debug
;
13459 /* do a special CORER for clearing PXE mode once at init */
13460 if (hw
->revision_id
== 0 &&
13461 (rd32(hw
, I40E_GLLAN_RCTL_0
) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK
)) {
13462 wr32(hw
, I40E_GLGEN_RTRIG
, I40E_GLGEN_RTRIG_CORER_MASK
);
13467 i40e_clear_pxe_mode(hw
);
13470 /* Reset here to make sure all is clean and to define PF 'n' */
13472 err
= i40e_pf_reset(hw
);
13474 dev_info(&pdev
->dev
, "Initial pf_reset failed: %d\n", err
);
13479 hw
->aq
.num_arq_entries
= I40E_AQ_LEN
;
13480 hw
->aq
.num_asq_entries
= I40E_AQ_LEN
;
13481 hw
->aq
.arq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13482 hw
->aq
.asq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13483 pf
->adminq_work_limit
= I40E_AQ_WORK_LIMIT
;
13485 snprintf(pf
->int_name
, sizeof(pf
->int_name
) - 1,
13487 dev_driver_string(&pf
->pdev
->dev
), dev_name(&pdev
->dev
));
13489 err
= i40e_init_shared_code(hw
);
13491 dev_warn(&pdev
->dev
, "unidentified MAC or BLANK NVM: %d\n",
13496 /* set up a default setting for link flow control */
13497 pf
->hw
.fc
.requested_mode
= I40E_FC_NONE
;
13499 err
= i40e_init_adminq(hw
);
13501 if (err
== I40E_ERR_FIRMWARE_API_VERSION
)
13502 dev_info(&pdev
->dev
,
13503 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13505 dev_info(&pdev
->dev
,
13506 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13510 i40e_get_oem_version(hw
);
13512 /* provide nvm, fw, api versions */
13513 dev_info(&pdev
->dev
, "fw %d.%d.%05d api %d.%d nvm %s\n",
13514 hw
->aq
.fw_maj_ver
, hw
->aq
.fw_min_ver
, hw
->aq
.fw_build
,
13515 hw
->aq
.api_maj_ver
, hw
->aq
.api_min_ver
,
13516 i40e_nvm_version_str(hw
));
13518 if (hw
->aq
.api_maj_ver
== I40E_FW_API_VERSION_MAJOR
&&
13519 hw
->aq
.api_min_ver
> I40E_FW_MINOR_VERSION(hw
))
13520 dev_info(&pdev
->dev
,
13521 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13522 else if (hw
->aq
.api_maj_ver
== 1 && hw
->aq
.api_min_ver
< 4)
13523 dev_info(&pdev
->dev
,
13524 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13526 i40e_verify_eeprom(pf
);
13528 /* Rev 0 hardware was never productized */
13529 if (hw
->revision_id
< 1)
13530 dev_warn(&pdev
->dev
, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13532 i40e_clear_pxe_mode(hw
);
13533 err
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
13535 goto err_adminq_setup
;
13537 err
= i40e_sw_init(pf
);
13539 dev_info(&pdev
->dev
, "sw_init failed: %d\n", err
);
13543 err
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
13544 hw
->func_caps
.num_rx_qp
, 0, 0);
13546 dev_info(&pdev
->dev
, "init_lan_hmc failed: %d\n", err
);
13547 goto err_init_lan_hmc
;
13550 err
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
13552 dev_info(&pdev
->dev
, "configure_lan_hmc failed: %d\n", err
);
13554 goto err_configure_lan_hmc
;
13557 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13558 * Ignore error return codes because if it was already disabled via
13559 * hardware settings this will fail
13561 if (pf
->hw_features
& I40E_HW_STOP_FW_LLDP
) {
13562 dev_info(&pdev
->dev
, "Stopping firmware LLDP agent.\n");
13563 i40e_aq_stop_lldp(hw
, true, NULL
);
13566 /* allow a platform config to override the HW addr */
13567 i40e_get_platform_mac_addr(pdev
, pf
);
13569 if (!is_valid_ether_addr(hw
->mac
.addr
)) {
13570 dev_info(&pdev
->dev
, "invalid MAC address %pM\n", hw
->mac
.addr
);
13574 dev_info(&pdev
->dev
, "MAC address: %pM\n", hw
->mac
.addr
);
13575 ether_addr_copy(hw
->mac
.perm_addr
, hw
->mac
.addr
);
13576 i40e_get_port_mac_addr(hw
, hw
->mac
.port_addr
);
13577 if (is_valid_ether_addr(hw
->mac
.port_addr
))
13578 pf
->hw_features
|= I40E_HW_PORT_ID_VALID
;
13580 pci_set_drvdata(pdev
, pf
);
13581 pci_save_state(pdev
);
13582 #ifdef CONFIG_I40E_DCB
13583 err
= i40e_init_pf_dcb(pf
);
13585 dev_info(&pdev
->dev
, "DCB init failed %d, disabled\n", err
);
13586 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
| I40E_FLAG_DCB_ENABLED
);
13587 /* Continue without DCB enabled */
13589 #endif /* CONFIG_I40E_DCB */
13591 /* set up periodic task facility */
13592 timer_setup(&pf
->service_timer
, i40e_service_timer
, 0);
13593 pf
->service_timer_period
= HZ
;
13595 INIT_WORK(&pf
->service_task
, i40e_service_task
);
13596 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
13598 /* NVM bit on means WoL disabled for the port */
13599 i40e_read_nvm_word(hw
, I40E_SR_NVM_WAKE_ON_LAN
, &wol_nvm_bits
);
13600 if (BIT (hw
->port
) & wol_nvm_bits
|| hw
->partition_id
!= 1)
13601 pf
->wol_en
= false;
13604 device_set_wakeup_enable(&pf
->pdev
->dev
, pf
->wol_en
);
13606 /* set up the main switch operations */
13607 i40e_determine_queue_usage(pf
);
13608 err
= i40e_init_interrupt_scheme(pf
);
13610 goto err_switch_setup
;
13612 /* The number of VSIs reported by the FW is the minimum guaranteed
13613 * to us; HW supports far more and we share the remaining pool with
13614 * the other PFs. We allocate space for more than the guarantee with
13615 * the understanding that we might not get them all later.
13617 if (pf
->hw
.func_caps
.num_vsis
< I40E_MIN_VSI_ALLOC
)
13618 pf
->num_alloc_vsi
= I40E_MIN_VSI_ALLOC
;
13620 pf
->num_alloc_vsi
= pf
->hw
.func_caps
.num_vsis
;
13622 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
13623 pf
->vsi
= kcalloc(pf
->num_alloc_vsi
, sizeof(struct i40e_vsi
*),
13627 goto err_switch_setup
;
13630 #ifdef CONFIG_PCI_IOV
13631 /* prep for VF support */
13632 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13633 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
13634 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
13635 if (pci_num_vf(pdev
))
13636 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
13639 err
= i40e_setup_pf_switch(pf
, false);
13641 dev_info(&pdev
->dev
, "setup_pf_switch failed: %d\n", err
);
13644 INIT_LIST_HEAD(&pf
->vsi
[pf
->lan_vsi
]->ch_list
);
13646 /* Make sure flow control is set according to current settings */
13647 err
= i40e_set_fc(hw
, &set_fc_aq_fail
, true);
13648 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_GET
)
13649 dev_dbg(&pf
->pdev
->dev
,
13650 "Set fc with err %s aq_err %s on get_phy_cap\n",
13651 i40e_stat_str(hw
, err
),
13652 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13653 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_SET
)
13654 dev_dbg(&pf
->pdev
->dev
,
13655 "Set fc with err %s aq_err %s on set_phy_config\n",
13656 i40e_stat_str(hw
, err
),
13657 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13658 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_UPDATE
)
13659 dev_dbg(&pf
->pdev
->dev
,
13660 "Set fc with err %s aq_err %s on get_link_info\n",
13661 i40e_stat_str(hw
, err
),
13662 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13664 /* if FDIR VSI was set up, start it now */
13665 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13666 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
13667 i40e_vsi_open(pf
->vsi
[i
]);
13672 /* The driver only wants link up/down and module qualification
13673 * reports from firmware. Note the negative logic.
13675 err
= i40e_aq_set_phy_int_mask(&pf
->hw
,
13676 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
13677 I40E_AQ_EVENT_MEDIA_NA
|
13678 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
13680 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
13681 i40e_stat_str(&pf
->hw
, err
),
13682 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13684 /* Reconfigure hardware for allowing smaller MSS in the case
13685 * of TSO, so that we avoid the MDD being fired and causing
13686 * a reset in the case of small MSS+TSO.
13688 val
= rd32(hw
, I40E_REG_MSS
);
13689 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
13690 val
&= ~I40E_REG_MSS_MIN_MASK
;
13691 val
|= I40E_64BYTE_MSS
;
13692 wr32(hw
, I40E_REG_MSS
, val
);
13695 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
13697 err
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
13699 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
13700 i40e_stat_str(&pf
->hw
, err
),
13701 i40e_aq_str(&pf
->hw
,
13702 pf
->hw
.aq
.asq_last_status
));
13704 /* The main driver is (mostly) up and happy. We need to set this state
13705 * before setting up the misc vector or we get a race and the vector
13706 * ends up disabled forever.
13708 clear_bit(__I40E_DOWN
, pf
->state
);
13710 /* In case of MSIX we are going to setup the misc vector right here
13711 * to handle admin queue events etc. In case of legacy and MSI
13712 * the misc functionality and queue processing is combined in
13713 * the same vector and that gets setup at open.
13715 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
13716 err
= i40e_setup_misc_vector(pf
);
13718 dev_info(&pdev
->dev
,
13719 "setup of misc vector failed: %d\n", err
);
13724 #ifdef CONFIG_PCI_IOV
13725 /* prep for VF support */
13726 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13727 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
13728 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
13729 /* disable link interrupts for VFs */
13730 val
= rd32(hw
, I40E_PFGEN_PORTMDIO_NUM
);
13731 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
13732 wr32(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
13735 if (pci_num_vf(pdev
)) {
13736 dev_info(&pdev
->dev
,
13737 "Active VFs found, allocating resources.\n");
13738 err
= i40e_alloc_vfs(pf
, pci_num_vf(pdev
));
13740 dev_info(&pdev
->dev
,
13741 "Error %d allocating resources for existing VFs\n",
13745 #endif /* CONFIG_PCI_IOV */
13747 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13748 pf
->iwarp_base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
13749 pf
->num_iwarp_msix
,
13750 I40E_IWARP_IRQ_PILE_ID
);
13751 if (pf
->iwarp_base_vector
< 0) {
13752 dev_info(&pdev
->dev
,
13753 "failed to get tracking for %d vectors for IWARP err=%d\n",
13754 pf
->num_iwarp_msix
, pf
->iwarp_base_vector
);
13755 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
13759 i40e_dbg_pf_init(pf
);
13761 /* tell the firmware that we're starting */
13762 i40e_send_version(pf
);
13764 /* since everything's happy, start the service_task timer */
13765 mod_timer(&pf
->service_timer
,
13766 round_jiffies(jiffies
+ pf
->service_timer_period
));
13768 /* add this PF to client device list and launch a client service task */
13769 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13770 err
= i40e_lan_add_device(pf
);
13772 dev_info(&pdev
->dev
, "Failed to add PF to client API service list: %d\n",
13776 #define PCI_SPEED_SIZE 8
13777 #define PCI_WIDTH_SIZE 8
13778 /* Devices on the IOSF bus do not have this information
13779 * and will report PCI Gen 1 x 1 by default so don't bother
13782 if (!(pf
->hw_features
& I40E_HW_NO_PCI_LINK_CHECK
)) {
13783 char speed
[PCI_SPEED_SIZE
] = "Unknown";
13784 char width
[PCI_WIDTH_SIZE
] = "Unknown";
13786 /* Get the negotiated link width and speed from PCI config
13789 pcie_capability_read_word(pf
->pdev
, PCI_EXP_LNKSTA
,
13792 i40e_set_pci_config_data(hw
, link_status
);
13794 switch (hw
->bus
.speed
) {
13795 case i40e_bus_speed_8000
:
13796 strncpy(speed
, "8.0", PCI_SPEED_SIZE
); break;
13797 case i40e_bus_speed_5000
:
13798 strncpy(speed
, "5.0", PCI_SPEED_SIZE
); break;
13799 case i40e_bus_speed_2500
:
13800 strncpy(speed
, "2.5", PCI_SPEED_SIZE
); break;
13804 switch (hw
->bus
.width
) {
13805 case i40e_bus_width_pcie_x8
:
13806 strncpy(width
, "8", PCI_WIDTH_SIZE
); break;
13807 case i40e_bus_width_pcie_x4
:
13808 strncpy(width
, "4", PCI_WIDTH_SIZE
); break;
13809 case i40e_bus_width_pcie_x2
:
13810 strncpy(width
, "2", PCI_WIDTH_SIZE
); break;
13811 case i40e_bus_width_pcie_x1
:
13812 strncpy(width
, "1", PCI_WIDTH_SIZE
); break;
13817 dev_info(&pdev
->dev
, "PCI-Express: Speed %sGT/s Width x%s\n",
13820 if (hw
->bus
.width
< i40e_bus_width_pcie_x8
||
13821 hw
->bus
.speed
< i40e_bus_speed_8000
) {
13822 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
13823 dev_warn(&pdev
->dev
, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
13827 /* get the requested speeds from the fw */
13828 err
= i40e_aq_get_phy_capabilities(hw
, false, false, &abilities
, NULL
);
13830 dev_dbg(&pf
->pdev
->dev
, "get requested speeds ret = %s last_status = %s\n",
13831 i40e_stat_str(&pf
->hw
, err
),
13832 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13833 pf
->hw
.phy
.link_info
.requested_speeds
= abilities
.link_speed
;
13835 /* get the supported phy types from the fw */
13836 err
= i40e_aq_get_phy_capabilities(hw
, false, true, &abilities
, NULL
);
13838 dev_dbg(&pf
->pdev
->dev
, "get supported phy types ret = %s last_status = %s\n",
13839 i40e_stat_str(&pf
->hw
, err
),
13840 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13842 /* Add a filter to drop all Flow control frames from any VSI from being
13843 * transmitted. By doing so we stop a malicious VF from sending out
13844 * PAUSE or PFC frames and potentially controlling traffic for other
13846 * The FW can still send Flow control frames if enabled.
13848 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
13849 pf
->main_vsi_seid
);
13851 if ((pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T
) ||
13852 (pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T4
))
13853 pf
->hw_features
|= I40E_HW_PHY_CONTROLS_LEDS
;
13854 if (pf
->hw
.device_id
== I40E_DEV_ID_SFP_I_X722
)
13855 pf
->hw_features
|= I40E_HW_HAVE_CRT_RETIMER
;
13856 /* print a string summarizing features */
13857 i40e_print_features(pf
);
13861 /* Unwind what we've done if something failed in the setup */
13863 set_bit(__I40E_DOWN
, pf
->state
);
13864 i40e_clear_interrupt_scheme(pf
);
13867 i40e_reset_interrupt_capability(pf
);
13868 del_timer_sync(&pf
->service_timer
);
13870 err_configure_lan_hmc
:
13871 (void)i40e_shutdown_lan_hmc(hw
);
13873 kfree(pf
->qp_pile
);
13877 iounmap(hw
->hw_addr
);
13881 pci_disable_pcie_error_reporting(pdev
);
13882 pci_release_mem_regions(pdev
);
13885 pci_disable_device(pdev
);
13890 * i40e_remove - Device removal routine
13891 * @pdev: PCI device information struct
13893 * i40e_remove is called by the PCI subsystem to alert the driver
13894 * that is should release a PCI device. This could be caused by a
13895 * Hot-Plug event, or because the driver is going to be removed from
13898 static void i40e_remove(struct pci_dev
*pdev
)
13900 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
13901 struct i40e_hw
*hw
= &pf
->hw
;
13902 i40e_status ret_code
;
13905 i40e_dbg_pf_exit(pf
);
13909 /* Disable RSS in hw */
13910 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), 0);
13911 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), 0);
13913 /* no more scheduling of any task */
13914 set_bit(__I40E_SUSPENDED
, pf
->state
);
13915 set_bit(__I40E_DOWN
, pf
->state
);
13916 if (pf
->service_timer
.function
)
13917 del_timer_sync(&pf
->service_timer
);
13918 if (pf
->service_task
.func
)
13919 cancel_work_sync(&pf
->service_task
);
13921 /* Client close must be called explicitly here because the timer
13922 * has been stopped.
13924 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
13926 if (pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) {
13928 pf
->flags
&= ~I40E_FLAG_SRIOV_ENABLED
;
13931 i40e_fdir_teardown(pf
);
13933 /* If there is a switch structure or any orphans, remove them.
13934 * This will leave only the PF's VSI remaining.
13936 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13940 if (pf
->veb
[i
]->uplink_seid
== pf
->mac_seid
||
13941 pf
->veb
[i
]->uplink_seid
== 0)
13942 i40e_switch_branch_release(pf
->veb
[i
]);
13945 /* Now we can shutdown the PF's VSI, just before we kill
13948 if (pf
->vsi
[pf
->lan_vsi
])
13949 i40e_vsi_release(pf
->vsi
[pf
->lan_vsi
]);
13951 i40e_cloud_filter_exit(pf
);
13953 /* remove attached clients */
13954 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13955 ret_code
= i40e_lan_del_device(pf
);
13957 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
13961 /* shutdown and destroy the HMC */
13962 if (hw
->hmc
.hmc_obj
) {
13963 ret_code
= i40e_shutdown_lan_hmc(hw
);
13965 dev_warn(&pdev
->dev
,
13966 "Failed to destroy the HMC resources: %d\n",
13970 /* shutdown the adminq */
13971 i40e_shutdown_adminq(hw
);
13973 /* destroy the locks only once, here */
13974 mutex_destroy(&hw
->aq
.arq_mutex
);
13975 mutex_destroy(&hw
->aq
.asq_mutex
);
13977 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
13978 i40e_clear_interrupt_scheme(pf
);
13979 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13981 i40e_vsi_clear_rings(pf
->vsi
[i
]);
13982 i40e_vsi_clear(pf
->vsi
[i
]);
13987 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13992 kfree(pf
->qp_pile
);
13995 iounmap(hw
->hw_addr
);
13997 pci_release_mem_regions(pdev
);
13999 pci_disable_pcie_error_reporting(pdev
);
14000 pci_disable_device(pdev
);
14004 * i40e_pci_error_detected - warning that something funky happened in PCI land
14005 * @pdev: PCI device information struct
14007 * Called to warn that something happened and the error handling steps
14008 * are in progress. Allows the driver to quiesce things, be ready for
14011 static pci_ers_result_t
i40e_pci_error_detected(struct pci_dev
*pdev
,
14012 enum pci_channel_state error
)
14014 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14016 dev_info(&pdev
->dev
, "%s: error %d\n", __func__
, error
);
14019 dev_info(&pdev
->dev
,
14020 "Cannot recover - error happened during device probe\n");
14021 return PCI_ERS_RESULT_DISCONNECT
;
14024 /* shutdown all operations */
14025 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14026 i40e_prep_for_reset(pf
, false);
14028 /* Request a slot reset */
14029 return PCI_ERS_RESULT_NEED_RESET
;
14033 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14034 * @pdev: PCI device information struct
14036 * Called to find if the driver can work with the device now that
14037 * the pci slot has been reset. If a basic connection seems good
14038 * (registers are readable and have sane content) then return a
14039 * happy little PCI_ERS_RESULT_xxx.
14041 static pci_ers_result_t
i40e_pci_error_slot_reset(struct pci_dev
*pdev
)
14043 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14044 pci_ers_result_t result
;
14048 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14049 if (pci_enable_device_mem(pdev
)) {
14050 dev_info(&pdev
->dev
,
14051 "Cannot re-enable PCI device after reset.\n");
14052 result
= PCI_ERS_RESULT_DISCONNECT
;
14054 pci_set_master(pdev
);
14055 pci_restore_state(pdev
);
14056 pci_save_state(pdev
);
14057 pci_wake_from_d3(pdev
, false);
14059 reg
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
14061 result
= PCI_ERS_RESULT_RECOVERED
;
14063 result
= PCI_ERS_RESULT_DISCONNECT
;
14066 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
14068 dev_info(&pdev
->dev
,
14069 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14071 /* non-fatal, continue */
14078 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14079 * @pdev: PCI device information struct
14081 static void i40e_pci_error_reset_prepare(struct pci_dev
*pdev
)
14083 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14085 i40e_prep_for_reset(pf
, false);
14089 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14090 * @pdev: PCI device information struct
14092 static void i40e_pci_error_reset_done(struct pci_dev
*pdev
)
14094 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14096 i40e_reset_and_rebuild(pf
, false, false);
14100 * i40e_pci_error_resume - restart operations after PCI error recovery
14101 * @pdev: PCI device information struct
14103 * Called to allow the driver to bring things back up after PCI error
14104 * and/or reset recovery has finished.
14106 static void i40e_pci_error_resume(struct pci_dev
*pdev
)
14108 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14110 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14111 if (test_bit(__I40E_SUSPENDED
, pf
->state
))
14114 i40e_handle_reset_warning(pf
, false);
14118 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14119 * using the mac_address_write admin q function
14120 * @pf: pointer to i40e_pf struct
14122 static void i40e_enable_mc_magic_wake(struct i40e_pf
*pf
)
14124 struct i40e_hw
*hw
= &pf
->hw
;
14129 /* Get current MAC address in case it's an LAA */
14130 if (pf
->vsi
[pf
->lan_vsi
] && pf
->vsi
[pf
->lan_vsi
]->netdev
) {
14131 ether_addr_copy(mac_addr
,
14132 pf
->vsi
[pf
->lan_vsi
]->netdev
->dev_addr
);
14134 dev_err(&pf
->pdev
->dev
,
14135 "Failed to retrieve MAC address; using default\n");
14136 ether_addr_copy(mac_addr
, hw
->mac
.addr
);
14139 /* The FW expects the mac address write cmd to first be called with
14140 * one of these flags before calling it again with the multicast
14143 flags
= I40E_AQC_WRITE_TYPE_LAA_WOL
;
14145 if (hw
->func_caps
.flex10_enable
&& hw
->partition_id
!= 1)
14146 flags
= I40E_AQC_WRITE_TYPE_LAA_ONLY
;
14148 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14150 dev_err(&pf
->pdev
->dev
,
14151 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14155 flags
= I40E_AQC_MC_MAG_EN
14156 | I40E_AQC_WOL_PRESERVE_ON_PFR
14157 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG
;
14158 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14160 dev_err(&pf
->pdev
->dev
,
14161 "Failed to enable Multicast Magic Packet wake up\n");
14165 * i40e_shutdown - PCI callback for shutting down
14166 * @pdev: PCI device information struct
14168 static void i40e_shutdown(struct pci_dev
*pdev
)
14170 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14171 struct i40e_hw
*hw
= &pf
->hw
;
14173 set_bit(__I40E_SUSPENDED
, pf
->state
);
14174 set_bit(__I40E_DOWN
, pf
->state
);
14176 i40e_prep_for_reset(pf
, true);
14179 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14180 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14182 del_timer_sync(&pf
->service_timer
);
14183 cancel_work_sync(&pf
->service_task
);
14184 i40e_cloud_filter_exit(pf
);
14185 i40e_fdir_teardown(pf
);
14187 /* Client close must be called explicitly here because the timer
14188 * has been stopped.
14190 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
14192 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14193 i40e_enable_mc_magic_wake(pf
);
14195 i40e_prep_for_reset(pf
, false);
14197 wr32(hw
, I40E_PFPM_APM
,
14198 (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14199 wr32(hw
, I40E_PFPM_WUFC
,
14200 (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14202 i40e_clear_interrupt_scheme(pf
);
14204 if (system_state
== SYSTEM_POWER_OFF
) {
14205 pci_wake_from_d3(pdev
, pf
->wol_en
);
14206 pci_set_power_state(pdev
, PCI_D3hot
);
14211 * i40e_suspend - PM callback for moving to D3
14212 * @dev: generic device information structure
14214 static int __maybe_unused
i40e_suspend(struct device
*dev
)
14216 struct pci_dev
*pdev
= to_pci_dev(dev
);
14217 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14218 struct i40e_hw
*hw
= &pf
->hw
;
14220 /* If we're already suspended, then there is nothing to do */
14221 if (test_and_set_bit(__I40E_SUSPENDED
, pf
->state
))
14224 set_bit(__I40E_DOWN
, pf
->state
);
14226 /* Ensure service task will not be running */
14227 del_timer_sync(&pf
->service_timer
);
14228 cancel_work_sync(&pf
->service_task
);
14230 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14231 i40e_enable_mc_magic_wake(pf
);
14233 i40e_prep_for_reset(pf
, false);
14235 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14236 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14238 /* Clear the interrupt scheme and release our IRQs so that the system
14239 * can safely hibernate even when there are a large number of CPUs.
14240 * Otherwise hibernation might fail when mapping all the vectors back
14243 i40e_clear_interrupt_scheme(pf
);
14249 * i40e_resume - PM callback for waking up from D3
14250 * @dev: generic device information structure
14252 static int __maybe_unused
i40e_resume(struct device
*dev
)
14254 struct pci_dev
*pdev
= to_pci_dev(dev
);
14255 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14258 /* If we're not suspended, then there is nothing to do */
14259 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14262 /* We cleared the interrupt scheme when we suspended, so we need to
14263 * restore it now to resume device functionality.
14265 err
= i40e_restore_interrupt_scheme(pf
);
14267 dev_err(&pdev
->dev
, "Cannot restore interrupt scheme: %d\n",
14271 clear_bit(__I40E_DOWN
, pf
->state
);
14272 i40e_reset_and_rebuild(pf
, false, false);
14274 /* Clear suspended state last after everything is recovered */
14275 clear_bit(__I40E_SUSPENDED
, pf
->state
);
14277 /* Restart the service task */
14278 mod_timer(&pf
->service_timer
,
14279 round_jiffies(jiffies
+ pf
->service_timer_period
));
14284 static const struct pci_error_handlers i40e_err_handler
= {
14285 .error_detected
= i40e_pci_error_detected
,
14286 .slot_reset
= i40e_pci_error_slot_reset
,
14287 .reset_prepare
= i40e_pci_error_reset_prepare
,
14288 .reset_done
= i40e_pci_error_reset_done
,
14289 .resume
= i40e_pci_error_resume
,
14292 static SIMPLE_DEV_PM_OPS(i40e_pm_ops
, i40e_suspend
, i40e_resume
);
14294 static struct pci_driver i40e_driver
= {
14295 .name
= i40e_driver_name
,
14296 .id_table
= i40e_pci_tbl
,
14297 .probe
= i40e_probe
,
14298 .remove
= i40e_remove
,
14300 .pm
= &i40e_pm_ops
,
14302 .shutdown
= i40e_shutdown
,
14303 .err_handler
= &i40e_err_handler
,
14304 .sriov_configure
= i40e_pci_sriov_configure
,
14308 * i40e_init_module - Driver registration routine
14310 * i40e_init_module is the first routine called when the driver is
14311 * loaded. All it does is register with the PCI subsystem.
14313 static int __init
i40e_init_module(void)
14315 pr_info("%s: %s - version %s\n", i40e_driver_name
,
14316 i40e_driver_string
, i40e_driver_version_str
);
14317 pr_info("%s: %s\n", i40e_driver_name
, i40e_copyright
);
14319 /* There is no need to throttle the number of active tasks because
14320 * each device limits its own task using a state bit for scheduling
14321 * the service task, and the device tasks do not interfere with each
14322 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14323 * since we need to be able to guarantee forward progress even under
14326 i40e_wq
= alloc_workqueue("%s", WQ_MEM_RECLAIM
, 0, i40e_driver_name
);
14328 pr_err("%s: Failed to create workqueue\n", i40e_driver_name
);
14333 return pci_register_driver(&i40e_driver
);
14335 module_init(i40e_init_module
);
14338 * i40e_exit_module - Driver exit cleanup routine
14340 * i40e_exit_module is called just before the driver is removed
14343 static void __exit
i40e_exit_module(void)
14345 pci_unregister_driver(&i40e_driver
);
14346 destroy_workqueue(i40e_wq
);
14349 module_exit(i40e_exit_module
);