1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
43 const char i40e_driver_name
[] = "i40e";
44 static const char i40e_driver_string
[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 1
51 #define DRV_VERSION_BUILD 14
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str
[] = DRV_VERSION
;
56 static const char i40e_copyright
[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
);
60 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
);
61 static int i40e_add_vsi(struct i40e_vsi
*vsi
);
62 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
);
63 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
);
64 static int i40e_setup_misc_vector(struct i40e_pf
*pf
);
65 static void i40e_determine_queue_usage(struct i40e_pf
*pf
);
66 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
);
67 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
);
68 static int i40e_reset(struct i40e_pf
*pf
);
69 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
);
70 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
);
71 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
);
72 static int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
73 struct i40e_cloud_filter
*filter
,
75 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
76 struct i40e_cloud_filter
*filter
,
78 static int i40e_get_capabilities(struct i40e_pf
*pf
,
79 enum i40e_admin_queue_opc list_type
);
82 /* i40e_pci_tbl - PCI Device ID Table
84 * Last entry must be all 0s
86 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
87 * Class, Class Mask, private data (not used) }
89 static const struct pci_device_id i40e_pci_tbl
[] = {
90 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_XL710
), 0},
91 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QEMU
), 0},
92 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_B
), 0},
93 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_C
), 0},
94 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_A
), 0},
95 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_B
), 0},
96 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_C
), 0},
97 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T
), 0},
98 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T4
), 0},
99 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_X722
), 0},
100 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_X722
), 0},
101 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_X722
), 0},
102 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_1G_BASE_T_X722
), 0},
103 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T_X722
), 0},
104 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_I_X722
), 0},
105 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2
), 0},
106 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2_A
), 0},
107 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_B
), 0},
108 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_SFP28
), 0},
109 /* required last entry */
112 MODULE_DEVICE_TABLE(pci
, i40e_pci_tbl
);
114 #define I40E_MAX_VF_COUNT 128
115 static int debug
= -1;
116 module_param(debug
, uint
, 0);
117 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
119 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
120 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(DRV_VERSION
);
124 static struct workqueue_struct
*i40e_wq
;
127 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to fill out
130 * @size: size of memory requested
131 * @alignment: what to align the allocation to
133 int i40e_allocate_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
,
134 u64 size
, u32 alignment
)
136 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
138 mem
->size
= ALIGN(size
, alignment
);
139 mem
->va
= dma_zalloc_coherent(&pf
->pdev
->dev
, mem
->size
,
140 &mem
->pa
, GFP_KERNEL
);
148 * i40e_free_dma_mem_d - OS specific memory free for shared code
149 * @hw: pointer to the HW structure
150 * @mem: ptr to mem struct to free
152 int i40e_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
154 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
156 dma_free_coherent(&pf
->pdev
->dev
, mem
->size
, mem
->va
, mem
->pa
);
165 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
166 * @hw: pointer to the HW structure
167 * @mem: ptr to mem struct to fill out
168 * @size: size of memory requested
170 int i40e_allocate_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
,
174 mem
->va
= kzalloc(size
, GFP_KERNEL
);
183 * i40e_free_virt_mem_d - OS specific memory free for shared code
184 * @hw: pointer to the HW structure
185 * @mem: ptr to mem struct to free
187 int i40e_free_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
)
189 /* it's ok to kfree a NULL pointer */
198 * i40e_get_lump - find a lump of free generic resource
199 * @pf: board private structure
200 * @pile: the pile of resource to search
201 * @needed: the number of items needed
202 * @id: an owner id to stick on the items assigned
204 * Returns the base item index of the lump, or negative for error
206 * The search_hint trick and lack of advanced fit-finding only work
207 * because we're highly likely to have all the same size lump requests.
208 * Linear search time and any fragmentation should be minimal.
210 static int i40e_get_lump(struct i40e_pf
*pf
, struct i40e_lump_tracking
*pile
,
216 if (!pile
|| needed
== 0 || id
>= I40E_PILE_VALID_BIT
) {
217 dev_info(&pf
->pdev
->dev
,
218 "param err: pile=%p needed=%d id=0x%04x\n",
223 /* start the linear search with an imperfect hint */
224 i
= pile
->search_hint
;
225 while (i
< pile
->num_entries
) {
226 /* skip already allocated entries */
227 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
232 /* do we have enough in this lump? */
233 for (j
= 0; (j
< needed
) && ((i
+j
) < pile
->num_entries
); j
++) {
234 if (pile
->list
[i
+j
] & I40E_PILE_VALID_BIT
)
239 /* there was enough, so assign it to the requestor */
240 for (j
= 0; j
< needed
; j
++)
241 pile
->list
[i
+j
] = id
| I40E_PILE_VALID_BIT
;
243 pile
->search_hint
= i
+ j
;
247 /* not enough, so skip over it and continue looking */
255 * i40e_put_lump - return a lump of generic resource
256 * @pile: the pile of resource to search
257 * @index: the base item index
258 * @id: the owner id of the items assigned
260 * Returns the count of items in the lump
262 static int i40e_put_lump(struct i40e_lump_tracking
*pile
, u16 index
, u16 id
)
264 int valid_id
= (id
| I40E_PILE_VALID_BIT
);
268 if (!pile
|| index
>= pile
->num_entries
)
272 i
< pile
->num_entries
&& pile
->list
[i
] == valid_id
;
278 if (count
&& index
< pile
->search_hint
)
279 pile
->search_hint
= index
;
285 * i40e_find_vsi_from_id - searches for the vsi with the given id
286 * @pf - the pf structure to search for the vsi
287 * @id - id of the vsi it is searching for
289 struct i40e_vsi
*i40e_find_vsi_from_id(struct i40e_pf
*pf
, u16 id
)
293 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
294 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->id
== id
))
301 * i40e_service_event_schedule - Schedule the service task to wake up
302 * @pf: board private structure
304 * If not already scheduled, this puts the task into the work queue
306 void i40e_service_event_schedule(struct i40e_pf
*pf
)
308 if (!test_bit(__I40E_DOWN
, pf
->state
) &&
309 !test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
310 queue_work(i40e_wq
, &pf
->service_task
);
314 * i40e_tx_timeout - Respond to a Tx Hang
315 * @netdev: network interface device structure
317 * If any port has noticed a Tx timeout, it is likely that the whole
318 * device is munged, not just the one netdev port, so go for the full
321 static void i40e_tx_timeout(struct net_device
*netdev
)
323 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
324 struct i40e_vsi
*vsi
= np
->vsi
;
325 struct i40e_pf
*pf
= vsi
->back
;
326 struct i40e_ring
*tx_ring
= NULL
;
327 unsigned int i
, hung_queue
= 0;
330 pf
->tx_timeout_count
++;
332 /* find the stopped queue the same way the stack does */
333 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
334 struct netdev_queue
*q
;
335 unsigned long trans_start
;
337 q
= netdev_get_tx_queue(netdev
, i
);
338 trans_start
= q
->trans_start
;
339 if (netif_xmit_stopped(q
) &&
341 (trans_start
+ netdev
->watchdog_timeo
))) {
347 if (i
== netdev
->num_tx_queues
) {
348 netdev_info(netdev
, "tx_timeout: no netdev hung queue found\n");
350 /* now that we have an index, find the tx_ring struct */
351 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
352 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
354 vsi
->tx_rings
[i
]->queue_index
) {
355 tx_ring
= vsi
->tx_rings
[i
];
362 if (time_after(jiffies
, (pf
->tx_timeout_last_recovery
+ HZ
*20)))
363 pf
->tx_timeout_recovery_level
= 1; /* reset after some time */
364 else if (time_before(jiffies
,
365 (pf
->tx_timeout_last_recovery
+ netdev
->watchdog_timeo
)))
366 return; /* don't do any new action before the next timeout */
369 head
= i40e_get_head(tx_ring
);
370 /* Read interrupt register */
371 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
373 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
374 tx_ring
->vsi
->base_vector
- 1));
376 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
378 netdev_info(netdev
, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
379 vsi
->seid
, hung_queue
, tx_ring
->next_to_clean
,
380 head
, tx_ring
->next_to_use
,
381 readl(tx_ring
->tail
), val
);
384 pf
->tx_timeout_last_recovery
= jiffies
;
385 netdev_info(netdev
, "tx_timeout recovery level %d, hung_queue %d\n",
386 pf
->tx_timeout_recovery_level
, hung_queue
);
388 switch (pf
->tx_timeout_recovery_level
) {
390 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
393 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
396 set_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
399 netdev_err(netdev
, "tx_timeout recovery unsuccessful\n");
403 i40e_service_event_schedule(pf
);
404 pf
->tx_timeout_recovery_level
++;
408 * i40e_get_vsi_stats_struct - Get System Network Statistics
409 * @vsi: the VSI we care about
411 * Returns the address of the device statistics structure.
412 * The statistics are actually updated from the service task.
414 struct rtnl_link_stats64
*i40e_get_vsi_stats_struct(struct i40e_vsi
*vsi
)
416 return &vsi
->net_stats
;
420 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
421 * @ring: Tx ring to get statistics from
422 * @stats: statistics entry to be updated
424 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring
*ring
,
425 struct rtnl_link_stats64
*stats
)
431 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
432 packets
= ring
->stats
.packets
;
433 bytes
= ring
->stats
.bytes
;
434 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
436 stats
->tx_packets
+= packets
;
437 stats
->tx_bytes
+= bytes
;
441 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
442 * @netdev: network interface device structure
444 * Returns the address of the device statistics structure.
445 * The statistics are actually updated from the service task.
447 static void i40e_get_netdev_stats_struct(struct net_device
*netdev
,
448 struct rtnl_link_stats64
*stats
)
450 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
451 struct i40e_ring
*tx_ring
, *rx_ring
;
452 struct i40e_vsi
*vsi
= np
->vsi
;
453 struct rtnl_link_stats64
*vsi_stats
= i40e_get_vsi_stats_struct(vsi
);
456 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
463 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
467 tx_ring
= READ_ONCE(vsi
->tx_rings
[i
]);
470 i40e_get_netdev_stats_struct_tx(tx_ring
, stats
);
472 rx_ring
= &tx_ring
[1];
475 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
476 packets
= rx_ring
->stats
.packets
;
477 bytes
= rx_ring
->stats
.bytes
;
478 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
480 stats
->rx_packets
+= packets
;
481 stats
->rx_bytes
+= bytes
;
483 if (i40e_enabled_xdp_vsi(vsi
))
484 i40e_get_netdev_stats_struct_tx(&rx_ring
[1], stats
);
488 /* following stats updated by i40e_watchdog_subtask() */
489 stats
->multicast
= vsi_stats
->multicast
;
490 stats
->tx_errors
= vsi_stats
->tx_errors
;
491 stats
->tx_dropped
= vsi_stats
->tx_dropped
;
492 stats
->rx_errors
= vsi_stats
->rx_errors
;
493 stats
->rx_dropped
= vsi_stats
->rx_dropped
;
494 stats
->rx_crc_errors
= vsi_stats
->rx_crc_errors
;
495 stats
->rx_length_errors
= vsi_stats
->rx_length_errors
;
499 * i40e_vsi_reset_stats - Resets all stats of the given vsi
500 * @vsi: the VSI to have its stats reset
502 void i40e_vsi_reset_stats(struct i40e_vsi
*vsi
)
504 struct rtnl_link_stats64
*ns
;
510 ns
= i40e_get_vsi_stats_struct(vsi
);
511 memset(ns
, 0, sizeof(*ns
));
512 memset(&vsi
->net_stats_offsets
, 0, sizeof(vsi
->net_stats_offsets
));
513 memset(&vsi
->eth_stats
, 0, sizeof(vsi
->eth_stats
));
514 memset(&vsi
->eth_stats_offsets
, 0, sizeof(vsi
->eth_stats_offsets
));
515 if (vsi
->rx_rings
&& vsi
->rx_rings
[0]) {
516 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
517 memset(&vsi
->rx_rings
[i
]->stats
, 0,
518 sizeof(vsi
->rx_rings
[i
]->stats
));
519 memset(&vsi
->rx_rings
[i
]->rx_stats
, 0,
520 sizeof(vsi
->rx_rings
[i
]->rx_stats
));
521 memset(&vsi
->tx_rings
[i
]->stats
, 0,
522 sizeof(vsi
->tx_rings
[i
]->stats
));
523 memset(&vsi
->tx_rings
[i
]->tx_stats
, 0,
524 sizeof(vsi
->tx_rings
[i
]->tx_stats
));
527 vsi
->stat_offsets_loaded
= false;
531 * i40e_pf_reset_stats - Reset all of the stats for the given PF
532 * @pf: the PF to be reset
534 void i40e_pf_reset_stats(struct i40e_pf
*pf
)
538 memset(&pf
->stats
, 0, sizeof(pf
->stats
));
539 memset(&pf
->stats_offsets
, 0, sizeof(pf
->stats_offsets
));
540 pf
->stat_offsets_loaded
= false;
542 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
544 memset(&pf
->veb
[i
]->stats
, 0,
545 sizeof(pf
->veb
[i
]->stats
));
546 memset(&pf
->veb
[i
]->stats_offsets
, 0,
547 sizeof(pf
->veb
[i
]->stats_offsets
));
548 pf
->veb
[i
]->stat_offsets_loaded
= false;
551 pf
->hw_csum_rx_error
= 0;
555 * i40e_stat_update48 - read and update a 48 bit stat from the chip
556 * @hw: ptr to the hardware info
557 * @hireg: the high 32 bit reg to read
558 * @loreg: the low 32 bit reg to read
559 * @offset_loaded: has the initial offset been loaded yet
560 * @offset: ptr to current offset value
561 * @stat: ptr to the stat
563 * Since the device stats are not reset at PFReset, they likely will not
564 * be zeroed when the driver starts. We'll save the first values read
565 * and use them as offsets to be subtracted from the raw values in order
566 * to report stats that count from zero. In the process, we also manage
567 * the potential roll-over.
569 static void i40e_stat_update48(struct i40e_hw
*hw
, u32 hireg
, u32 loreg
,
570 bool offset_loaded
, u64
*offset
, u64
*stat
)
574 if (hw
->device_id
== I40E_DEV_ID_QEMU
) {
575 new_data
= rd32(hw
, loreg
);
576 new_data
|= ((u64
)(rd32(hw
, hireg
) & 0xFFFF)) << 32;
578 new_data
= rd64(hw
, loreg
);
582 if (likely(new_data
>= *offset
))
583 *stat
= new_data
- *offset
;
585 *stat
= (new_data
+ BIT_ULL(48)) - *offset
;
586 *stat
&= 0xFFFFFFFFFFFFULL
;
590 * i40e_stat_update32 - read and update a 32 bit stat from the chip
591 * @hw: ptr to the hardware info
592 * @reg: the hw reg to read
593 * @offset_loaded: has the initial offset been loaded yet
594 * @offset: ptr to current offset value
595 * @stat: ptr to the stat
597 static void i40e_stat_update32(struct i40e_hw
*hw
, u32 reg
,
598 bool offset_loaded
, u64
*offset
, u64
*stat
)
602 new_data
= rd32(hw
, reg
);
605 if (likely(new_data
>= *offset
))
606 *stat
= (u32
)(new_data
- *offset
);
608 *stat
= (u32
)((new_data
+ BIT_ULL(32)) - *offset
);
612 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
613 * @hw: ptr to the hardware info
614 * @reg: the hw reg to read and clear
615 * @stat: ptr to the stat
617 static void i40e_stat_update_and_clear32(struct i40e_hw
*hw
, u32 reg
, u64
*stat
)
619 u32 new_data
= rd32(hw
, reg
);
621 wr32(hw
, reg
, 1); /* must write a nonzero value to clear register */
626 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
627 * @vsi: the VSI to be updated
629 void i40e_update_eth_stats(struct i40e_vsi
*vsi
)
631 int stat_idx
= le16_to_cpu(vsi
->info
.stat_counter_idx
);
632 struct i40e_pf
*pf
= vsi
->back
;
633 struct i40e_hw
*hw
= &pf
->hw
;
634 struct i40e_eth_stats
*oes
;
635 struct i40e_eth_stats
*es
; /* device's eth stats */
637 es
= &vsi
->eth_stats
;
638 oes
= &vsi
->eth_stats_offsets
;
640 /* Gather up the stats that the hw collects */
641 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
642 vsi
->stat_offsets_loaded
,
643 &oes
->tx_errors
, &es
->tx_errors
);
644 i40e_stat_update32(hw
, I40E_GLV_RDPC(stat_idx
),
645 vsi
->stat_offsets_loaded
,
646 &oes
->rx_discards
, &es
->rx_discards
);
647 i40e_stat_update32(hw
, I40E_GLV_RUPP(stat_idx
),
648 vsi
->stat_offsets_loaded
,
649 &oes
->rx_unknown_protocol
, &es
->rx_unknown_protocol
);
650 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
651 vsi
->stat_offsets_loaded
,
652 &oes
->tx_errors
, &es
->tx_errors
);
654 i40e_stat_update48(hw
, I40E_GLV_GORCH(stat_idx
),
655 I40E_GLV_GORCL(stat_idx
),
656 vsi
->stat_offsets_loaded
,
657 &oes
->rx_bytes
, &es
->rx_bytes
);
658 i40e_stat_update48(hw
, I40E_GLV_UPRCH(stat_idx
),
659 I40E_GLV_UPRCL(stat_idx
),
660 vsi
->stat_offsets_loaded
,
661 &oes
->rx_unicast
, &es
->rx_unicast
);
662 i40e_stat_update48(hw
, I40E_GLV_MPRCH(stat_idx
),
663 I40E_GLV_MPRCL(stat_idx
),
664 vsi
->stat_offsets_loaded
,
665 &oes
->rx_multicast
, &es
->rx_multicast
);
666 i40e_stat_update48(hw
, I40E_GLV_BPRCH(stat_idx
),
667 I40E_GLV_BPRCL(stat_idx
),
668 vsi
->stat_offsets_loaded
,
669 &oes
->rx_broadcast
, &es
->rx_broadcast
);
671 i40e_stat_update48(hw
, I40E_GLV_GOTCH(stat_idx
),
672 I40E_GLV_GOTCL(stat_idx
),
673 vsi
->stat_offsets_loaded
,
674 &oes
->tx_bytes
, &es
->tx_bytes
);
675 i40e_stat_update48(hw
, I40E_GLV_UPTCH(stat_idx
),
676 I40E_GLV_UPTCL(stat_idx
),
677 vsi
->stat_offsets_loaded
,
678 &oes
->tx_unicast
, &es
->tx_unicast
);
679 i40e_stat_update48(hw
, I40E_GLV_MPTCH(stat_idx
),
680 I40E_GLV_MPTCL(stat_idx
),
681 vsi
->stat_offsets_loaded
,
682 &oes
->tx_multicast
, &es
->tx_multicast
);
683 i40e_stat_update48(hw
, I40E_GLV_BPTCH(stat_idx
),
684 I40E_GLV_BPTCL(stat_idx
),
685 vsi
->stat_offsets_loaded
,
686 &oes
->tx_broadcast
, &es
->tx_broadcast
);
687 vsi
->stat_offsets_loaded
= true;
691 * i40e_update_veb_stats - Update Switch component statistics
692 * @veb: the VEB being updated
694 static void i40e_update_veb_stats(struct i40e_veb
*veb
)
696 struct i40e_pf
*pf
= veb
->pf
;
697 struct i40e_hw
*hw
= &pf
->hw
;
698 struct i40e_eth_stats
*oes
;
699 struct i40e_eth_stats
*es
; /* device's eth stats */
700 struct i40e_veb_tc_stats
*veb_oes
;
701 struct i40e_veb_tc_stats
*veb_es
;
704 idx
= veb
->stats_idx
;
706 oes
= &veb
->stats_offsets
;
707 veb_es
= &veb
->tc_stats
;
708 veb_oes
= &veb
->tc_stats_offsets
;
710 /* Gather up the stats that the hw collects */
711 i40e_stat_update32(hw
, I40E_GLSW_TDPC(idx
),
712 veb
->stat_offsets_loaded
,
713 &oes
->tx_discards
, &es
->tx_discards
);
714 if (hw
->revision_id
> 0)
715 i40e_stat_update32(hw
, I40E_GLSW_RUPP(idx
),
716 veb
->stat_offsets_loaded
,
717 &oes
->rx_unknown_protocol
,
718 &es
->rx_unknown_protocol
);
719 i40e_stat_update48(hw
, I40E_GLSW_GORCH(idx
), I40E_GLSW_GORCL(idx
),
720 veb
->stat_offsets_loaded
,
721 &oes
->rx_bytes
, &es
->rx_bytes
);
722 i40e_stat_update48(hw
, I40E_GLSW_UPRCH(idx
), I40E_GLSW_UPRCL(idx
),
723 veb
->stat_offsets_loaded
,
724 &oes
->rx_unicast
, &es
->rx_unicast
);
725 i40e_stat_update48(hw
, I40E_GLSW_MPRCH(idx
), I40E_GLSW_MPRCL(idx
),
726 veb
->stat_offsets_loaded
,
727 &oes
->rx_multicast
, &es
->rx_multicast
);
728 i40e_stat_update48(hw
, I40E_GLSW_BPRCH(idx
), I40E_GLSW_BPRCL(idx
),
729 veb
->stat_offsets_loaded
,
730 &oes
->rx_broadcast
, &es
->rx_broadcast
);
732 i40e_stat_update48(hw
, I40E_GLSW_GOTCH(idx
), I40E_GLSW_GOTCL(idx
),
733 veb
->stat_offsets_loaded
,
734 &oes
->tx_bytes
, &es
->tx_bytes
);
735 i40e_stat_update48(hw
, I40E_GLSW_UPTCH(idx
), I40E_GLSW_UPTCL(idx
),
736 veb
->stat_offsets_loaded
,
737 &oes
->tx_unicast
, &es
->tx_unicast
);
738 i40e_stat_update48(hw
, I40E_GLSW_MPTCH(idx
), I40E_GLSW_MPTCL(idx
),
739 veb
->stat_offsets_loaded
,
740 &oes
->tx_multicast
, &es
->tx_multicast
);
741 i40e_stat_update48(hw
, I40E_GLSW_BPTCH(idx
), I40E_GLSW_BPTCL(idx
),
742 veb
->stat_offsets_loaded
,
743 &oes
->tx_broadcast
, &es
->tx_broadcast
);
744 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
745 i40e_stat_update48(hw
, I40E_GLVEBTC_RPCH(i
, idx
),
746 I40E_GLVEBTC_RPCL(i
, idx
),
747 veb
->stat_offsets_loaded
,
748 &veb_oes
->tc_rx_packets
[i
],
749 &veb_es
->tc_rx_packets
[i
]);
750 i40e_stat_update48(hw
, I40E_GLVEBTC_RBCH(i
, idx
),
751 I40E_GLVEBTC_RBCL(i
, idx
),
752 veb
->stat_offsets_loaded
,
753 &veb_oes
->tc_rx_bytes
[i
],
754 &veb_es
->tc_rx_bytes
[i
]);
755 i40e_stat_update48(hw
, I40E_GLVEBTC_TPCH(i
, idx
),
756 I40E_GLVEBTC_TPCL(i
, idx
),
757 veb
->stat_offsets_loaded
,
758 &veb_oes
->tc_tx_packets
[i
],
759 &veb_es
->tc_tx_packets
[i
]);
760 i40e_stat_update48(hw
, I40E_GLVEBTC_TBCH(i
, idx
),
761 I40E_GLVEBTC_TBCL(i
, idx
),
762 veb
->stat_offsets_loaded
,
763 &veb_oes
->tc_tx_bytes
[i
],
764 &veb_es
->tc_tx_bytes
[i
]);
766 veb
->stat_offsets_loaded
= true;
770 * i40e_update_vsi_stats - Update the vsi statistics counters.
771 * @vsi: the VSI to be updated
773 * There are a few instances where we store the same stat in a
774 * couple of different structs. This is partly because we have
775 * the netdev stats that need to be filled out, which is slightly
776 * different from the "eth_stats" defined by the chip and used in
777 * VF communications. We sort it out here.
779 static void i40e_update_vsi_stats(struct i40e_vsi
*vsi
)
781 struct i40e_pf
*pf
= vsi
->back
;
782 struct rtnl_link_stats64
*ons
;
783 struct rtnl_link_stats64
*ns
; /* netdev stats */
784 struct i40e_eth_stats
*oes
;
785 struct i40e_eth_stats
*es
; /* device's eth stats */
786 u32 tx_restart
, tx_busy
;
797 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
798 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
801 ns
= i40e_get_vsi_stats_struct(vsi
);
802 ons
= &vsi
->net_stats_offsets
;
803 es
= &vsi
->eth_stats
;
804 oes
= &vsi
->eth_stats_offsets
;
806 /* Gather up the netdev and vsi stats that the driver collects
807 * on the fly during packet processing
811 tx_restart
= tx_busy
= tx_linearize
= tx_force_wb
= 0;
815 for (q
= 0; q
< vsi
->num_queue_pairs
; q
++) {
817 p
= READ_ONCE(vsi
->tx_rings
[q
]);
820 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
821 packets
= p
->stats
.packets
;
822 bytes
= p
->stats
.bytes
;
823 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
826 tx_restart
+= p
->tx_stats
.restart_queue
;
827 tx_busy
+= p
->tx_stats
.tx_busy
;
828 tx_linearize
+= p
->tx_stats
.tx_linearize
;
829 tx_force_wb
+= p
->tx_stats
.tx_force_wb
;
831 /* Rx queue is part of the same block as Tx queue */
834 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
835 packets
= p
->stats
.packets
;
836 bytes
= p
->stats
.bytes
;
837 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
840 rx_buf
+= p
->rx_stats
.alloc_buff_failed
;
841 rx_page
+= p
->rx_stats
.alloc_page_failed
;
844 vsi
->tx_restart
= tx_restart
;
845 vsi
->tx_busy
= tx_busy
;
846 vsi
->tx_linearize
= tx_linearize
;
847 vsi
->tx_force_wb
= tx_force_wb
;
848 vsi
->rx_page_failed
= rx_page
;
849 vsi
->rx_buf_failed
= rx_buf
;
851 ns
->rx_packets
= rx_p
;
853 ns
->tx_packets
= tx_p
;
856 /* update netdev stats from eth stats */
857 i40e_update_eth_stats(vsi
);
858 ons
->tx_errors
= oes
->tx_errors
;
859 ns
->tx_errors
= es
->tx_errors
;
860 ons
->multicast
= oes
->rx_multicast
;
861 ns
->multicast
= es
->rx_multicast
;
862 ons
->rx_dropped
= oes
->rx_discards
;
863 ns
->rx_dropped
= es
->rx_discards
;
864 ons
->tx_dropped
= oes
->tx_discards
;
865 ns
->tx_dropped
= es
->tx_discards
;
867 /* pull in a couple PF stats if this is the main vsi */
868 if (vsi
== pf
->vsi
[pf
->lan_vsi
]) {
869 ns
->rx_crc_errors
= pf
->stats
.crc_errors
;
870 ns
->rx_errors
= pf
->stats
.crc_errors
+ pf
->stats
.illegal_bytes
;
871 ns
->rx_length_errors
= pf
->stats
.rx_length_errors
;
876 * i40e_update_pf_stats - Update the PF statistics counters.
877 * @pf: the PF to be updated
879 static void i40e_update_pf_stats(struct i40e_pf
*pf
)
881 struct i40e_hw_port_stats
*osd
= &pf
->stats_offsets
;
882 struct i40e_hw_port_stats
*nsd
= &pf
->stats
;
883 struct i40e_hw
*hw
= &pf
->hw
;
887 i40e_stat_update48(hw
, I40E_GLPRT_GORCH(hw
->port
),
888 I40E_GLPRT_GORCL(hw
->port
),
889 pf
->stat_offsets_loaded
,
890 &osd
->eth
.rx_bytes
, &nsd
->eth
.rx_bytes
);
891 i40e_stat_update48(hw
, I40E_GLPRT_GOTCH(hw
->port
),
892 I40E_GLPRT_GOTCL(hw
->port
),
893 pf
->stat_offsets_loaded
,
894 &osd
->eth
.tx_bytes
, &nsd
->eth
.tx_bytes
);
895 i40e_stat_update32(hw
, I40E_GLPRT_RDPC(hw
->port
),
896 pf
->stat_offsets_loaded
,
897 &osd
->eth
.rx_discards
,
898 &nsd
->eth
.rx_discards
);
899 i40e_stat_update48(hw
, I40E_GLPRT_UPRCH(hw
->port
),
900 I40E_GLPRT_UPRCL(hw
->port
),
901 pf
->stat_offsets_loaded
,
902 &osd
->eth
.rx_unicast
,
903 &nsd
->eth
.rx_unicast
);
904 i40e_stat_update48(hw
, I40E_GLPRT_MPRCH(hw
->port
),
905 I40E_GLPRT_MPRCL(hw
->port
),
906 pf
->stat_offsets_loaded
,
907 &osd
->eth
.rx_multicast
,
908 &nsd
->eth
.rx_multicast
);
909 i40e_stat_update48(hw
, I40E_GLPRT_BPRCH(hw
->port
),
910 I40E_GLPRT_BPRCL(hw
->port
),
911 pf
->stat_offsets_loaded
,
912 &osd
->eth
.rx_broadcast
,
913 &nsd
->eth
.rx_broadcast
);
914 i40e_stat_update48(hw
, I40E_GLPRT_UPTCH(hw
->port
),
915 I40E_GLPRT_UPTCL(hw
->port
),
916 pf
->stat_offsets_loaded
,
917 &osd
->eth
.tx_unicast
,
918 &nsd
->eth
.tx_unicast
);
919 i40e_stat_update48(hw
, I40E_GLPRT_MPTCH(hw
->port
),
920 I40E_GLPRT_MPTCL(hw
->port
),
921 pf
->stat_offsets_loaded
,
922 &osd
->eth
.tx_multicast
,
923 &nsd
->eth
.tx_multicast
);
924 i40e_stat_update48(hw
, I40E_GLPRT_BPTCH(hw
->port
),
925 I40E_GLPRT_BPTCL(hw
->port
),
926 pf
->stat_offsets_loaded
,
927 &osd
->eth
.tx_broadcast
,
928 &nsd
->eth
.tx_broadcast
);
930 i40e_stat_update32(hw
, I40E_GLPRT_TDOLD(hw
->port
),
931 pf
->stat_offsets_loaded
,
932 &osd
->tx_dropped_link_down
,
933 &nsd
->tx_dropped_link_down
);
935 i40e_stat_update32(hw
, I40E_GLPRT_CRCERRS(hw
->port
),
936 pf
->stat_offsets_loaded
,
937 &osd
->crc_errors
, &nsd
->crc_errors
);
939 i40e_stat_update32(hw
, I40E_GLPRT_ILLERRC(hw
->port
),
940 pf
->stat_offsets_loaded
,
941 &osd
->illegal_bytes
, &nsd
->illegal_bytes
);
943 i40e_stat_update32(hw
, I40E_GLPRT_MLFC(hw
->port
),
944 pf
->stat_offsets_loaded
,
945 &osd
->mac_local_faults
,
946 &nsd
->mac_local_faults
);
947 i40e_stat_update32(hw
, I40E_GLPRT_MRFC(hw
->port
),
948 pf
->stat_offsets_loaded
,
949 &osd
->mac_remote_faults
,
950 &nsd
->mac_remote_faults
);
952 i40e_stat_update32(hw
, I40E_GLPRT_RLEC(hw
->port
),
953 pf
->stat_offsets_loaded
,
954 &osd
->rx_length_errors
,
955 &nsd
->rx_length_errors
);
957 i40e_stat_update32(hw
, I40E_GLPRT_LXONRXC(hw
->port
),
958 pf
->stat_offsets_loaded
,
959 &osd
->link_xon_rx
, &nsd
->link_xon_rx
);
960 i40e_stat_update32(hw
, I40E_GLPRT_LXONTXC(hw
->port
),
961 pf
->stat_offsets_loaded
,
962 &osd
->link_xon_tx
, &nsd
->link_xon_tx
);
963 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFRXC(hw
->port
),
964 pf
->stat_offsets_loaded
,
965 &osd
->link_xoff_rx
, &nsd
->link_xoff_rx
);
966 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFTXC(hw
->port
),
967 pf
->stat_offsets_loaded
,
968 &osd
->link_xoff_tx
, &nsd
->link_xoff_tx
);
970 for (i
= 0; i
< 8; i
++) {
971 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFRXC(hw
->port
, i
),
972 pf
->stat_offsets_loaded
,
973 &osd
->priority_xoff_rx
[i
],
974 &nsd
->priority_xoff_rx
[i
]);
975 i40e_stat_update32(hw
, I40E_GLPRT_PXONRXC(hw
->port
, i
),
976 pf
->stat_offsets_loaded
,
977 &osd
->priority_xon_rx
[i
],
978 &nsd
->priority_xon_rx
[i
]);
979 i40e_stat_update32(hw
, I40E_GLPRT_PXONTXC(hw
->port
, i
),
980 pf
->stat_offsets_loaded
,
981 &osd
->priority_xon_tx
[i
],
982 &nsd
->priority_xon_tx
[i
]);
983 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFTXC(hw
->port
, i
),
984 pf
->stat_offsets_loaded
,
985 &osd
->priority_xoff_tx
[i
],
986 &nsd
->priority_xoff_tx
[i
]);
987 i40e_stat_update32(hw
,
988 I40E_GLPRT_RXON2OFFCNT(hw
->port
, i
),
989 pf
->stat_offsets_loaded
,
990 &osd
->priority_xon_2_xoff
[i
],
991 &nsd
->priority_xon_2_xoff
[i
]);
994 i40e_stat_update48(hw
, I40E_GLPRT_PRC64H(hw
->port
),
995 I40E_GLPRT_PRC64L(hw
->port
),
996 pf
->stat_offsets_loaded
,
997 &osd
->rx_size_64
, &nsd
->rx_size_64
);
998 i40e_stat_update48(hw
, I40E_GLPRT_PRC127H(hw
->port
),
999 I40E_GLPRT_PRC127L(hw
->port
),
1000 pf
->stat_offsets_loaded
,
1001 &osd
->rx_size_127
, &nsd
->rx_size_127
);
1002 i40e_stat_update48(hw
, I40E_GLPRT_PRC255H(hw
->port
),
1003 I40E_GLPRT_PRC255L(hw
->port
),
1004 pf
->stat_offsets_loaded
,
1005 &osd
->rx_size_255
, &nsd
->rx_size_255
);
1006 i40e_stat_update48(hw
, I40E_GLPRT_PRC511H(hw
->port
),
1007 I40E_GLPRT_PRC511L(hw
->port
),
1008 pf
->stat_offsets_loaded
,
1009 &osd
->rx_size_511
, &nsd
->rx_size_511
);
1010 i40e_stat_update48(hw
, I40E_GLPRT_PRC1023H(hw
->port
),
1011 I40E_GLPRT_PRC1023L(hw
->port
),
1012 pf
->stat_offsets_loaded
,
1013 &osd
->rx_size_1023
, &nsd
->rx_size_1023
);
1014 i40e_stat_update48(hw
, I40E_GLPRT_PRC1522H(hw
->port
),
1015 I40E_GLPRT_PRC1522L(hw
->port
),
1016 pf
->stat_offsets_loaded
,
1017 &osd
->rx_size_1522
, &nsd
->rx_size_1522
);
1018 i40e_stat_update48(hw
, I40E_GLPRT_PRC9522H(hw
->port
),
1019 I40E_GLPRT_PRC9522L(hw
->port
),
1020 pf
->stat_offsets_loaded
,
1021 &osd
->rx_size_big
, &nsd
->rx_size_big
);
1023 i40e_stat_update48(hw
, I40E_GLPRT_PTC64H(hw
->port
),
1024 I40E_GLPRT_PTC64L(hw
->port
),
1025 pf
->stat_offsets_loaded
,
1026 &osd
->tx_size_64
, &nsd
->tx_size_64
);
1027 i40e_stat_update48(hw
, I40E_GLPRT_PTC127H(hw
->port
),
1028 I40E_GLPRT_PTC127L(hw
->port
),
1029 pf
->stat_offsets_loaded
,
1030 &osd
->tx_size_127
, &nsd
->tx_size_127
);
1031 i40e_stat_update48(hw
, I40E_GLPRT_PTC255H(hw
->port
),
1032 I40E_GLPRT_PTC255L(hw
->port
),
1033 pf
->stat_offsets_loaded
,
1034 &osd
->tx_size_255
, &nsd
->tx_size_255
);
1035 i40e_stat_update48(hw
, I40E_GLPRT_PTC511H(hw
->port
),
1036 I40E_GLPRT_PTC511L(hw
->port
),
1037 pf
->stat_offsets_loaded
,
1038 &osd
->tx_size_511
, &nsd
->tx_size_511
);
1039 i40e_stat_update48(hw
, I40E_GLPRT_PTC1023H(hw
->port
),
1040 I40E_GLPRT_PTC1023L(hw
->port
),
1041 pf
->stat_offsets_loaded
,
1042 &osd
->tx_size_1023
, &nsd
->tx_size_1023
);
1043 i40e_stat_update48(hw
, I40E_GLPRT_PTC1522H(hw
->port
),
1044 I40E_GLPRT_PTC1522L(hw
->port
),
1045 pf
->stat_offsets_loaded
,
1046 &osd
->tx_size_1522
, &nsd
->tx_size_1522
);
1047 i40e_stat_update48(hw
, I40E_GLPRT_PTC9522H(hw
->port
),
1048 I40E_GLPRT_PTC9522L(hw
->port
),
1049 pf
->stat_offsets_loaded
,
1050 &osd
->tx_size_big
, &nsd
->tx_size_big
);
1052 i40e_stat_update32(hw
, I40E_GLPRT_RUC(hw
->port
),
1053 pf
->stat_offsets_loaded
,
1054 &osd
->rx_undersize
, &nsd
->rx_undersize
);
1055 i40e_stat_update32(hw
, I40E_GLPRT_RFC(hw
->port
),
1056 pf
->stat_offsets_loaded
,
1057 &osd
->rx_fragments
, &nsd
->rx_fragments
);
1058 i40e_stat_update32(hw
, I40E_GLPRT_ROC(hw
->port
),
1059 pf
->stat_offsets_loaded
,
1060 &osd
->rx_oversize
, &nsd
->rx_oversize
);
1061 i40e_stat_update32(hw
, I40E_GLPRT_RJC(hw
->port
),
1062 pf
->stat_offsets_loaded
,
1063 &osd
->rx_jabber
, &nsd
->rx_jabber
);
1066 i40e_stat_update_and_clear32(hw
,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw
->pf_id
)),
1068 &nsd
->fd_atr_match
);
1069 i40e_stat_update_and_clear32(hw
,
1070 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw
->pf_id
)),
1072 i40e_stat_update_and_clear32(hw
,
1073 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw
->pf_id
)),
1074 &nsd
->fd_atr_tunnel_match
);
1076 val
= rd32(hw
, I40E_PRTPM_EEE_STAT
);
1077 nsd
->tx_lpi_status
=
1078 (val
& I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK
) >>
1079 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT
;
1080 nsd
->rx_lpi_status
=
1081 (val
& I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK
) >>
1082 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT
;
1083 i40e_stat_update32(hw
, I40E_PRTPM_TLPIC
,
1084 pf
->stat_offsets_loaded
,
1085 &osd
->tx_lpi_count
, &nsd
->tx_lpi_count
);
1086 i40e_stat_update32(hw
, I40E_PRTPM_RLPIC
,
1087 pf
->stat_offsets_loaded
,
1088 &osd
->rx_lpi_count
, &nsd
->rx_lpi_count
);
1090 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
&&
1091 !(pf
->flags
& I40E_FLAG_FD_SB_AUTO_DISABLED
))
1092 nsd
->fd_sb_status
= true;
1094 nsd
->fd_sb_status
= false;
1096 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
&&
1097 !(pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
))
1098 nsd
->fd_atr_status
= true;
1100 nsd
->fd_atr_status
= false;
1102 pf
->stat_offsets_loaded
= true;
1106 * i40e_update_stats - Update the various statistics counters.
1107 * @vsi: the VSI to be updated
1109 * Update the various stats for this VSI and its related entities.
1111 void i40e_update_stats(struct i40e_vsi
*vsi
)
1113 struct i40e_pf
*pf
= vsi
->back
;
1115 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
1116 i40e_update_pf_stats(pf
);
1118 i40e_update_vsi_stats(vsi
);
1122 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1123 * @vsi: the VSI to be searched
1124 * @macaddr: the MAC address
1127 * Returns ptr to the filter object or NULL
1129 static struct i40e_mac_filter
*i40e_find_filter(struct i40e_vsi
*vsi
,
1130 const u8
*macaddr
, s16 vlan
)
1132 struct i40e_mac_filter
*f
;
1135 if (!vsi
|| !macaddr
)
1138 key
= i40e_addr_to_hkey(macaddr
);
1139 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1140 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1148 * i40e_find_mac - Find a mac addr in the macvlan filters list
1149 * @vsi: the VSI to be searched
1150 * @macaddr: the MAC address we are searching for
1152 * Returns the first filter with the provided MAC address or NULL if
1153 * MAC address was not found
1155 struct i40e_mac_filter
*i40e_find_mac(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1157 struct i40e_mac_filter
*f
;
1160 if (!vsi
|| !macaddr
)
1163 key
= i40e_addr_to_hkey(macaddr
);
1164 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1165 if ((ether_addr_equal(macaddr
, f
->macaddr
)))
1172 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1173 * @vsi: the VSI to be searched
1175 * Returns true if VSI is in vlan mode or false otherwise
1177 bool i40e_is_vsi_in_vlan(struct i40e_vsi
*vsi
)
1179 /* If we have a PVID, always operate in VLAN mode */
1183 /* We need to operate in VLAN mode whenever we have any filters with
1184 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1185 * time, incurring search cost repeatedly. However, we can notice two
1188 * 1) the only place where we can gain a VLAN filter is in
1191 * 2) the only place where filters are actually removed is in
1192 * i40e_sync_filters_subtask.
1194 * Thus, we can simply use a boolean value, has_vlan_filters which we
1195 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1196 * we have to perform the full search after deleting filters in
1197 * i40e_sync_filters_subtask, but we already have to search
1198 * filters here and can perform the check at the same time. This
1199 * results in avoiding embedding a loop for VLAN mode inside another
1200 * loop over all the filters, and should maintain correctness as noted
1203 return vsi
->has_vlan_filter
;
1207 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1208 * @vsi: the VSI to configure
1209 * @tmp_add_list: list of filters ready to be added
1210 * @tmp_del_list: list of filters ready to be deleted
1211 * @vlan_filters: the number of active VLAN filters
1213 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1214 * behave as expected. If we have any active VLAN filters remaining or about
1215 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1216 * so that they only match against untagged traffic. If we no longer have any
1217 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1218 * so that they match against both tagged and untagged traffic. In this way,
1219 * we ensure that we correctly receive the desired traffic. This ensures that
1220 * when we have an active VLAN we will receive only untagged traffic and
1221 * traffic matching active VLANs. If we have no active VLANs then we will
1222 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1224 * Finally, in a similar fashion, this function also corrects filters when
1225 * there is an active PVID assigned to this VSI.
1227 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1229 * This function is only expected to be called from within
1230 * i40e_sync_vsi_filters.
1232 * NOTE: This function expects to be called while under the
1233 * mac_filter_hash_lock
1235 static int i40e_correct_mac_vlan_filters(struct i40e_vsi
*vsi
,
1236 struct hlist_head
*tmp_add_list
,
1237 struct hlist_head
*tmp_del_list
,
1240 s16 pvid
= le16_to_cpu(vsi
->info
.pvid
);
1241 struct i40e_mac_filter
*f
, *add_head
;
1242 struct i40e_new_mac_filter
*new;
1243 struct hlist_node
*h
;
1246 /* To determine if a particular filter needs to be replaced we
1247 * have the three following conditions:
1249 * a) if we have a PVID assigned, then all filters which are
1250 * not marked as VLAN=PVID must be replaced with filters that
1252 * b) otherwise, if we have any active VLANS, all filters
1253 * which are marked as VLAN=-1 must be replaced with
1254 * filters marked as VLAN=0
1255 * c) finally, if we do not have any active VLANS, all filters
1256 * which are marked as VLAN=0 must be replaced with filters
1260 /* Update the filters about to be added in place */
1261 hlist_for_each_entry(new, tmp_add_list
, hlist
) {
1262 if (pvid
&& new->f
->vlan
!= pvid
)
1263 new->f
->vlan
= pvid
;
1264 else if (vlan_filters
&& new->f
->vlan
== I40E_VLAN_ANY
)
1266 else if (!vlan_filters
&& new->f
->vlan
== 0)
1267 new->f
->vlan
= I40E_VLAN_ANY
;
1270 /* Update the remaining active filters */
1271 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1272 /* Combine the checks for whether a filter needs to be changed
1273 * and then determine the new VLAN inside the if block, in
1274 * order to avoid duplicating code for adding the new filter
1275 * then deleting the old filter.
1277 if ((pvid
&& f
->vlan
!= pvid
) ||
1278 (vlan_filters
&& f
->vlan
== I40E_VLAN_ANY
) ||
1279 (!vlan_filters
&& f
->vlan
== 0)) {
1280 /* Determine the new vlan we will be adding */
1283 else if (vlan_filters
)
1286 new_vlan
= I40E_VLAN_ANY
;
1288 /* Create the new filter */
1289 add_head
= i40e_add_filter(vsi
, f
->macaddr
, new_vlan
);
1293 /* Create a temporary i40e_new_mac_filter */
1294 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
1299 new->state
= add_head
->state
;
1301 /* Add the new filter to the tmp list */
1302 hlist_add_head(&new->hlist
, tmp_add_list
);
1304 /* Put the original filter into the delete list */
1305 f
->state
= I40E_FILTER_REMOVE
;
1306 hash_del(&f
->hlist
);
1307 hlist_add_head(&f
->hlist
, tmp_del_list
);
1311 vsi
->has_vlan_filter
= !!vlan_filters
;
1317 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1318 * @vsi: the PF Main VSI - inappropriate for any other VSI
1319 * @macaddr: the MAC address
1321 * Remove whatever filter the firmware set up so the driver can manage
1322 * its own filtering intelligently.
1324 static void i40e_rm_default_mac_filter(struct i40e_vsi
*vsi
, u8
*macaddr
)
1326 struct i40e_aqc_remove_macvlan_element_data element
;
1327 struct i40e_pf
*pf
= vsi
->back
;
1329 /* Only appropriate for the PF main VSI */
1330 if (vsi
->type
!= I40E_VSI_MAIN
)
1333 memset(&element
, 0, sizeof(element
));
1334 ether_addr_copy(element
.mac_addr
, macaddr
);
1335 element
.vlan_tag
= 0;
1336 /* Ignore error returns, some firmware does it this way... */
1337 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
1338 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1340 memset(&element
, 0, sizeof(element
));
1341 ether_addr_copy(element
.mac_addr
, macaddr
);
1342 element
.vlan_tag
= 0;
1343 /* ...and some firmware does it this way. */
1344 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
|
1345 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
1346 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1350 * i40e_add_filter - Add a mac/vlan filter to the VSI
1351 * @vsi: the VSI to be searched
1352 * @macaddr: the MAC address
1355 * Returns ptr to the filter object or NULL when no memory available.
1357 * NOTE: This function is expected to be called with mac_filter_hash_lock
1360 struct i40e_mac_filter
*i40e_add_filter(struct i40e_vsi
*vsi
,
1361 const u8
*macaddr
, s16 vlan
)
1363 struct i40e_mac_filter
*f
;
1366 if (!vsi
|| !macaddr
)
1369 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1371 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
1375 /* Update the boolean indicating if we need to function in
1379 vsi
->has_vlan_filter
= true;
1381 ether_addr_copy(f
->macaddr
, macaddr
);
1383 /* If we're in overflow promisc mode, set the state directly
1384 * to failed, so we don't bother to try sending the filter
1387 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
))
1388 f
->state
= I40E_FILTER_FAILED
;
1390 f
->state
= I40E_FILTER_NEW
;
1391 INIT_HLIST_NODE(&f
->hlist
);
1393 key
= i40e_addr_to_hkey(macaddr
);
1394 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1396 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1397 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1400 /* If we're asked to add a filter that has been marked for removal, it
1401 * is safe to simply restore it to active state. __i40e_del_filter
1402 * will have simply deleted any filters which were previously marked
1403 * NEW or FAILED, so if it is currently marked REMOVE it must have
1404 * previously been ACTIVE. Since we haven't yet run the sync filters
1405 * task, just restore this filter to the ACTIVE state so that the
1406 * sync task leaves it in place
1408 if (f
->state
== I40E_FILTER_REMOVE
)
1409 f
->state
= I40E_FILTER_ACTIVE
;
1415 * __i40e_del_filter - Remove a specific filter from the VSI
1416 * @vsi: VSI to remove from
1417 * @f: the filter to remove from the list
1419 * This function should be called instead of i40e_del_filter only if you know
1420 * the exact filter you will remove already, such as via i40e_find_filter or
1423 * NOTE: This function is expected to be called with mac_filter_hash_lock
1425 * ANOTHER NOTE: This function MUST be called from within the context of
1426 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1427 * instead of list_for_each_entry().
1429 void __i40e_del_filter(struct i40e_vsi
*vsi
, struct i40e_mac_filter
*f
)
1434 /* If the filter was never added to firmware then we can just delete it
1435 * directly and we don't want to set the status to remove or else an
1436 * admin queue command will unnecessarily fire.
1438 if ((f
->state
== I40E_FILTER_FAILED
) ||
1439 (f
->state
== I40E_FILTER_NEW
)) {
1440 hash_del(&f
->hlist
);
1443 f
->state
= I40E_FILTER_REMOVE
;
1446 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1447 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1451 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1452 * @vsi: the VSI to be searched
1453 * @macaddr: the MAC address
1456 * NOTE: This function is expected to be called with mac_filter_hash_lock
1458 * ANOTHER NOTE: This function MUST be called from within the context of
1459 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1460 * instead of list_for_each_entry().
1462 void i40e_del_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
, s16 vlan
)
1464 struct i40e_mac_filter
*f
;
1466 if (!vsi
|| !macaddr
)
1469 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1470 __i40e_del_filter(vsi
, f
);
1474 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1475 * @vsi: the VSI to be searched
1476 * @macaddr: the mac address to be filtered
1478 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1479 * go through all the macvlan filters and add a macvlan filter for each
1480 * unique vlan that already exists. If a PVID has been assigned, instead only
1481 * add the macaddr to that VLAN.
1483 * Returns last filter added on success, else NULL
1485 struct i40e_mac_filter
*i40e_add_mac_filter(struct i40e_vsi
*vsi
,
1488 struct i40e_mac_filter
*f
, *add
= NULL
;
1489 struct hlist_node
*h
;
1493 return i40e_add_filter(vsi
, macaddr
,
1494 le16_to_cpu(vsi
->info
.pvid
));
1496 if (!i40e_is_vsi_in_vlan(vsi
))
1497 return i40e_add_filter(vsi
, macaddr
, I40E_VLAN_ANY
);
1499 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1500 if (f
->state
== I40E_FILTER_REMOVE
)
1502 add
= i40e_add_filter(vsi
, macaddr
, f
->vlan
);
1511 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1512 * @vsi: the VSI to be searched
1513 * @macaddr: the mac address to be removed
1515 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1518 * Returns 0 for success, or error
1520 int i40e_del_mac_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1522 struct i40e_mac_filter
*f
;
1523 struct hlist_node
*h
;
1527 WARN(!spin_is_locked(&vsi
->mac_filter_hash_lock
),
1528 "Missing mac_filter_hash_lock\n");
1529 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1530 if (ether_addr_equal(macaddr
, f
->macaddr
)) {
1531 __i40e_del_filter(vsi
, f
);
1543 * i40e_set_mac - NDO callback to set mac address
1544 * @netdev: network interface device structure
1545 * @p: pointer to an address structure
1547 * Returns 0 on success, negative on failure
1549 static int i40e_set_mac(struct net_device
*netdev
, void *p
)
1551 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1552 struct i40e_vsi
*vsi
= np
->vsi
;
1553 struct i40e_pf
*pf
= vsi
->back
;
1554 struct i40e_hw
*hw
= &pf
->hw
;
1555 struct sockaddr
*addr
= p
;
1557 if (!is_valid_ether_addr(addr
->sa_data
))
1558 return -EADDRNOTAVAIL
;
1560 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
)) {
1561 netdev_info(netdev
, "already using mac address %pM\n",
1566 if (test_bit(__I40E_VSI_DOWN
, vsi
->back
->state
) ||
1567 test_bit(__I40E_RESET_RECOVERY_PENDING
, vsi
->back
->state
))
1568 return -EADDRNOTAVAIL
;
1570 if (ether_addr_equal(hw
->mac
.addr
, addr
->sa_data
))
1571 netdev_info(netdev
, "returning to hw mac address %pM\n",
1574 netdev_info(netdev
, "set new mac address %pM\n", addr
->sa_data
);
1576 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1577 i40e_del_mac_filter(vsi
, netdev
->dev_addr
);
1578 i40e_add_mac_filter(vsi
, addr
->sa_data
);
1579 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1580 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1581 if (vsi
->type
== I40E_VSI_MAIN
) {
1584 ret
= i40e_aq_mac_address_write(&vsi
->back
->hw
,
1585 I40E_AQC_WRITE_TYPE_LAA_WOL
,
1586 addr
->sa_data
, NULL
);
1588 netdev_info(netdev
, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1589 i40e_stat_str(hw
, ret
),
1590 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1593 /* schedule our worker thread which will take care of
1594 * applying the new filter changes
1596 i40e_service_event_schedule(vsi
->back
);
1601 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1602 * @vsi: vsi structure
1603 * @seed: RSS hash seed
1605 static int i40e_config_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
1606 u8
*lut
, u16 lut_size
)
1608 struct i40e_pf
*pf
= vsi
->back
;
1609 struct i40e_hw
*hw
= &pf
->hw
;
1613 struct i40e_aqc_get_set_rss_key_data
*seed_dw
=
1614 (struct i40e_aqc_get_set_rss_key_data
*)seed
;
1615 ret
= i40e_aq_set_rss_key(hw
, vsi
->id
, seed_dw
);
1617 dev_info(&pf
->pdev
->dev
,
1618 "Cannot set RSS key, err %s aq_err %s\n",
1619 i40e_stat_str(hw
, ret
),
1620 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1625 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
1627 ret
= i40e_aq_set_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
1629 dev_info(&pf
->pdev
->dev
,
1630 "Cannot set RSS lut, err %s aq_err %s\n",
1631 i40e_stat_str(hw
, ret
),
1632 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1640 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1641 * @vsi: VSI structure
1643 static int i40e_vsi_config_rss(struct i40e_vsi
*vsi
)
1645 struct i40e_pf
*pf
= vsi
->back
;
1646 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
1650 if (!(pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
))
1653 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
1654 vsi
->num_queue_pairs
);
1657 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
1661 /* Use the user configured hash keys and lookup table if there is one,
1662 * otherwise use default
1664 if (vsi
->rss_lut_user
)
1665 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
1667 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
1668 if (vsi
->rss_hkey_user
)
1669 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
1671 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
1672 ret
= i40e_config_rss_aq(vsi
, seed
, lut
, vsi
->rss_table_size
);
1678 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1679 * @vsi: the VSI being configured,
1680 * @ctxt: VSI context structure
1681 * @enabled_tc: number of traffic classes to enable
1683 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1685 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi
*vsi
,
1686 struct i40e_vsi_context
*ctxt
,
1689 u16 qcount
= 0, max_qcount
, qmap
, sections
= 0;
1690 int i
, override_q
, pow
, num_qps
, ret
;
1691 u8 netdev_tc
= 0, offset
= 0;
1693 if (vsi
->type
!= I40E_VSI_MAIN
)
1695 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1696 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1697 vsi
->tc_config
.numtc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
1698 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1699 num_qps
= vsi
->mqprio_qopt
.qopt
.count
[0];
1701 /* find the next higher power-of-2 of num queue pairs */
1702 pow
= ilog2(num_qps
);
1703 if (!is_power_of_2(num_qps
))
1705 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1706 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1708 /* Setup queue offset/count for all TCs for given VSI */
1709 max_qcount
= vsi
->mqprio_qopt
.qopt
.count
[0];
1710 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1711 /* See if the given TC is enabled for the given VSI */
1712 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1713 offset
= vsi
->mqprio_qopt
.qopt
.offset
[i
];
1714 qcount
= vsi
->mqprio_qopt
.qopt
.count
[i
];
1715 if (qcount
> max_qcount
)
1716 max_qcount
= qcount
;
1717 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1718 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1719 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1721 /* TC is not enabled so set the offset to
1722 * default queue and allocate one queue
1725 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1726 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1727 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1731 /* Set actual Tx/Rx queue pairs */
1732 vsi
->num_queue_pairs
= offset
+ qcount
;
1734 /* Setup queue TC[0].qmap for given VSI context */
1735 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
1736 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1737 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1738 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1740 /* Reconfigure RSS for main VSI with max queue count */
1741 vsi
->rss_size
= max_qcount
;
1742 ret
= i40e_vsi_config_rss(vsi
);
1744 dev_info(&vsi
->back
->pdev
->dev
,
1745 "Failed to reconfig rss for num_queues (%u)\n",
1749 vsi
->reconfig_rss
= true;
1750 dev_dbg(&vsi
->back
->pdev
->dev
,
1751 "Reconfigured rss with num_queues (%u)\n", max_qcount
);
1753 /* Find queue count available for channel VSIs and starting offset
1756 override_q
= vsi
->mqprio_qopt
.qopt
.count
[0];
1757 if (override_q
&& override_q
< vsi
->num_queue_pairs
) {
1758 vsi
->cnt_q_avail
= vsi
->num_queue_pairs
- override_q
;
1759 vsi
->next_base_queue
= override_q
;
1765 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1766 * @vsi: the VSI being setup
1767 * @ctxt: VSI context structure
1768 * @enabled_tc: Enabled TCs bitmap
1769 * @is_add: True if called before Add VSI
1771 * Setup VSI queue mapping for enabled traffic classes.
1773 static void i40e_vsi_setup_queue_map(struct i40e_vsi
*vsi
,
1774 struct i40e_vsi_context
*ctxt
,
1778 struct i40e_pf
*pf
= vsi
->back
;
1788 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1791 if (enabled_tc
&& (vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
1792 /* Find numtc from enabled TC bitmap */
1793 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1794 if (enabled_tc
& BIT(i
)) /* TC is enabled */
1798 dev_warn(&pf
->pdev
->dev
, "DCB is enabled but no TC enabled, forcing TC0\n");
1802 /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
1806 vsi
->tc_config
.numtc
= numtc
;
1807 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1808 /* Number of queues per enabled TC */
1809 qcount
= vsi
->alloc_queue_pairs
;
1811 num_tc_qps
= qcount
/ numtc
;
1812 num_tc_qps
= min_t(int, num_tc_qps
, i40e_pf_get_max_q_per_tc(pf
));
1814 /* Setup queue offset/count for all TCs for given VSI */
1815 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1816 /* See if the given TC is enabled for the given VSI */
1817 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1821 switch (vsi
->type
) {
1823 qcount
= min_t(int, pf
->alloc_rss_size
,
1827 case I40E_VSI_SRIOV
:
1828 case I40E_VSI_VMDQ2
:
1830 qcount
= num_tc_qps
;
1834 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1835 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1837 /* find the next higher power-of-2 of num queue pairs */
1840 while (num_qps
&& (BIT_ULL(pow
) < qcount
)) {
1845 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1847 (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1848 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1852 /* TC is not enabled so set the offset to
1853 * default queue and allocate one queue
1856 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1857 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1858 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1862 ctxt
->info
.tc_mapping
[i
] = cpu_to_le16(qmap
);
1865 /* Set actual Tx/Rx queue pairs */
1866 vsi
->num_queue_pairs
= offset
;
1867 if ((vsi
->type
== I40E_VSI_MAIN
) && (numtc
== 1)) {
1868 if (vsi
->req_queue_pairs
> 0)
1869 vsi
->num_queue_pairs
= vsi
->req_queue_pairs
;
1870 else if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1871 vsi
->num_queue_pairs
= pf
->num_lan_msix
;
1874 /* Scheduler section valid can only be set for ADD VSI */
1876 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1878 ctxt
->info
.up_enable_bits
= enabled_tc
;
1880 if (vsi
->type
== I40E_VSI_SRIOV
) {
1881 ctxt
->info
.mapping_flags
|=
1882 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG
);
1883 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
1884 ctxt
->info
.queue_mapping
[i
] =
1885 cpu_to_le16(vsi
->base_queue
+ i
);
1887 ctxt
->info
.mapping_flags
|=
1888 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1889 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1891 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1895 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1896 * @netdev: the netdevice
1897 * @addr: address to add
1899 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1900 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1902 static int i40e_addr_sync(struct net_device
*netdev
, const u8
*addr
)
1904 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1905 struct i40e_vsi
*vsi
= np
->vsi
;
1907 if (i40e_add_mac_filter(vsi
, addr
))
1914 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1915 * @netdev: the netdevice
1916 * @addr: address to add
1918 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1919 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1921 static int i40e_addr_unsync(struct net_device
*netdev
, const u8
*addr
)
1923 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1924 struct i40e_vsi
*vsi
= np
->vsi
;
1926 i40e_del_mac_filter(vsi
, addr
);
1932 * i40e_set_rx_mode - NDO callback to set the netdev filters
1933 * @netdev: network interface device structure
1935 static void i40e_set_rx_mode(struct net_device
*netdev
)
1937 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1938 struct i40e_vsi
*vsi
= np
->vsi
;
1940 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1942 __dev_uc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1943 __dev_mc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1945 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1947 /* check for other flag changes */
1948 if (vsi
->current_netdev_flags
!= vsi
->netdev
->flags
) {
1949 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1950 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1955 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1956 * @vsi: Pointer to VSI struct
1957 * @from: Pointer to list which contains MAC filter entries - changes to
1958 * those entries needs to be undone.
1960 * MAC filter entries from this list were slated for deletion.
1962 static void i40e_undo_del_filter_entries(struct i40e_vsi
*vsi
,
1963 struct hlist_head
*from
)
1965 struct i40e_mac_filter
*f
;
1966 struct hlist_node
*h
;
1968 hlist_for_each_entry_safe(f
, h
, from
, hlist
) {
1969 u64 key
= i40e_addr_to_hkey(f
->macaddr
);
1971 /* Move the element back into MAC filter list*/
1972 hlist_del(&f
->hlist
);
1973 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1978 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1979 * @vsi: Pointer to vsi struct
1980 * @from: Pointer to list which contains MAC filter entries - changes to
1981 * those entries needs to be undone.
1983 * MAC filter entries from this list were slated for addition.
1985 static void i40e_undo_add_filter_entries(struct i40e_vsi
*vsi
,
1986 struct hlist_head
*from
)
1988 struct i40e_new_mac_filter
*new;
1989 struct hlist_node
*h
;
1991 hlist_for_each_entry_safe(new, h
, from
, hlist
) {
1992 /* We can simply free the wrapper structure */
1993 hlist_del(&new->hlist
);
1999 * i40e_next_entry - Get the next non-broadcast filter from a list
2000 * @next: pointer to filter in list
2002 * Returns the next non-broadcast filter in the list. Required so that we
2003 * ignore broadcast filters within the list, since these are not handled via
2004 * the normal firmware update path.
2007 struct i40e_new_mac_filter
*i40e_next_filter(struct i40e_new_mac_filter
*next
)
2009 hlist_for_each_entry_continue(next
, hlist
) {
2010 if (!is_broadcast_ether_addr(next
->f
->macaddr
))
2018 * i40e_update_filter_state - Update filter state based on return data
2020 * @count: Number of filters added
2021 * @add_list: return data from fw
2022 * @head: pointer to first filter in current batch
2024 * MAC filter entries from list were slated to be added to device. Returns
2025 * number of successful filters. Note that 0 does NOT mean success!
2028 i40e_update_filter_state(int count
,
2029 struct i40e_aqc_add_macvlan_element_data
*add_list
,
2030 struct i40e_new_mac_filter
*add_head
)
2035 for (i
= 0; i
< count
; i
++) {
2036 /* Always check status of each filter. We don't need to check
2037 * the firmware return status because we pre-set the filter
2038 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2039 * request to the adminq. Thus, if it no longer matches then
2040 * we know the filter is active.
2042 if (add_list
[i
].match_method
== I40E_AQC_MM_ERR_NO_RES
) {
2043 add_head
->state
= I40E_FILTER_FAILED
;
2045 add_head
->state
= I40E_FILTER_ACTIVE
;
2049 add_head
= i40e_next_filter(add_head
);
2058 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2059 * @vsi: ptr to the VSI
2060 * @vsi_name: name to display in messages
2061 * @list: the list of filters to send to firmware
2062 * @num_del: the number of filters to delete
2063 * @retval: Set to -EIO on failure to delete
2065 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2066 * *retval instead of a return value so that success does not force ret_val to
2067 * be set to 0. This ensures that a sequence of calls to this function
2068 * preserve the previous value of *retval on successful delete.
2071 void i40e_aqc_del_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2072 struct i40e_aqc_remove_macvlan_element_data
*list
,
2073 int num_del
, int *retval
)
2075 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2079 aq_ret
= i40e_aq_remove_macvlan(hw
, vsi
->seid
, list
, num_del
, NULL
);
2080 aq_err
= hw
->aq
.asq_last_status
;
2082 /* Explicitly ignore and do not report when firmware returns ENOENT */
2083 if (aq_ret
&& !(aq_err
== I40E_AQ_RC_ENOENT
)) {
2085 dev_info(&vsi
->back
->pdev
->dev
,
2086 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2087 vsi_name
, i40e_stat_str(hw
, aq_ret
),
2088 i40e_aq_str(hw
, aq_err
));
2093 * i40e_aqc_add_filters - Request firmware to add a set of filters
2094 * @vsi: ptr to the VSI
2095 * @vsi_name: name to display in messages
2096 * @list: the list of filters to send to firmware
2097 * @add_head: Position in the add hlist
2098 * @num_add: the number of filters to add
2099 * @promisc_change: set to true on exit if promiscuous mode was forced on
2101 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2102 * promisc_changed to true if the firmware has run out of space for more
2106 void i40e_aqc_add_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2107 struct i40e_aqc_add_macvlan_element_data
*list
,
2108 struct i40e_new_mac_filter
*add_head
,
2109 int num_add
, bool *promisc_changed
)
2111 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2114 i40e_aq_add_macvlan(hw
, vsi
->seid
, list
, num_add
, NULL
);
2115 aq_err
= hw
->aq
.asq_last_status
;
2116 fcnt
= i40e_update_filter_state(num_add
, list
, add_head
);
2118 if (fcnt
!= num_add
) {
2119 *promisc_changed
= true;
2120 set_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2121 dev_warn(&vsi
->back
->pdev
->dev
,
2122 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2123 i40e_aq_str(hw
, aq_err
),
2129 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2130 * @vsi: pointer to the VSI
2133 * This function sets or clears the promiscuous broadcast flags for VLAN
2134 * filters in order to properly receive broadcast frames. Assumes that only
2135 * broadcast filters are passed.
2137 * Returns status indicating success or failure;
2140 i40e_aqc_broadcast_filter(struct i40e_vsi
*vsi
, const char *vsi_name
,
2141 struct i40e_mac_filter
*f
)
2143 bool enable
= f
->state
== I40E_FILTER_NEW
;
2144 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2147 if (f
->vlan
== I40E_VLAN_ANY
) {
2148 aq_ret
= i40e_aq_set_vsi_broadcast(hw
,
2153 aq_ret
= i40e_aq_set_vsi_bc_promisc_on_vlan(hw
,
2161 dev_warn(&vsi
->back
->pdev
->dev
,
2162 "Error %s setting broadcast promiscuous mode on %s\n",
2163 i40e_aq_str(hw
, hw
->aq
.asq_last_status
),
2170 * i40e_set_promiscuous - set promiscuous mode
2171 * @pf: board private structure
2172 * @promisc: promisc on or off
2174 * There are different ways of setting promiscuous mode on a PF depending on
2175 * what state/environment we're in. This identifies and sets it appropriately.
2176 * Returns 0 on success.
2178 static int i40e_set_promiscuous(struct i40e_pf
*pf
, bool promisc
)
2180 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
2181 struct i40e_hw
*hw
= &pf
->hw
;
2184 if (vsi
->type
== I40E_VSI_MAIN
&&
2185 pf
->lan_veb
!= I40E_NO_VEB
&&
2186 !(pf
->flags
& I40E_FLAG_MFP_ENABLED
)) {
2187 /* set defport ON for Main VSI instead of true promisc
2188 * this way we will get all unicast/multicast and VLAN
2189 * promisc behavior but will not get VF or VMDq traffic
2190 * replicated on the Main VSI.
2193 aq_ret
= i40e_aq_set_default_vsi(hw
,
2197 aq_ret
= i40e_aq_clear_default_vsi(hw
,
2201 dev_info(&pf
->pdev
->dev
,
2202 "Set default VSI failed, err %s, aq_err %s\n",
2203 i40e_stat_str(hw
, aq_ret
),
2204 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2207 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(
2213 dev_info(&pf
->pdev
->dev
,
2214 "set unicast promisc failed, err %s, aq_err %s\n",
2215 i40e_stat_str(hw
, aq_ret
),
2216 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2218 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(
2223 dev_info(&pf
->pdev
->dev
,
2224 "set multicast promisc failed, err %s, aq_err %s\n",
2225 i40e_stat_str(hw
, aq_ret
),
2226 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2231 pf
->cur_promisc
= promisc
;
2237 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2238 * @vsi: ptr to the VSI
2240 * Push any outstanding VSI filter changes through the AdminQ.
2242 * Returns 0 or error value
2244 int i40e_sync_vsi_filters(struct i40e_vsi
*vsi
)
2246 struct hlist_head tmp_add_list
, tmp_del_list
;
2247 struct i40e_mac_filter
*f
;
2248 struct i40e_new_mac_filter
*new, *add_head
= NULL
;
2249 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2250 unsigned int failed_filters
= 0;
2251 unsigned int vlan_filters
= 0;
2252 bool promisc_changed
= false;
2253 char vsi_name
[16] = "PF";
2254 int filter_list_len
= 0;
2255 i40e_status aq_ret
= 0;
2256 u32 changed_flags
= 0;
2257 struct hlist_node
*h
;
2266 /* empty array typed pointers, kcalloc later */
2267 struct i40e_aqc_add_macvlan_element_data
*add_list
;
2268 struct i40e_aqc_remove_macvlan_element_data
*del_list
;
2270 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
))
2271 usleep_range(1000, 2000);
2275 changed_flags
= vsi
->current_netdev_flags
^ vsi
->netdev
->flags
;
2276 vsi
->current_netdev_flags
= vsi
->netdev
->flags
;
2279 INIT_HLIST_HEAD(&tmp_add_list
);
2280 INIT_HLIST_HEAD(&tmp_del_list
);
2282 if (vsi
->type
== I40E_VSI_SRIOV
)
2283 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "VF %d", vsi
->vf_id
);
2284 else if (vsi
->type
!= I40E_VSI_MAIN
)
2285 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "vsi %d", vsi
->seid
);
2287 if (vsi
->flags
& I40E_VSI_FLAG_FILTER_CHANGED
) {
2288 vsi
->flags
&= ~I40E_VSI_FLAG_FILTER_CHANGED
;
2290 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2291 /* Create a list of filters to delete. */
2292 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2293 if (f
->state
== I40E_FILTER_REMOVE
) {
2294 /* Move the element into temporary del_list */
2295 hash_del(&f
->hlist
);
2296 hlist_add_head(&f
->hlist
, &tmp_del_list
);
2298 /* Avoid counting removed filters */
2301 if (f
->state
== I40E_FILTER_NEW
) {
2302 /* Create a temporary i40e_new_mac_filter */
2303 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
2305 goto err_no_memory_locked
;
2307 /* Store pointer to the real filter */
2309 new->state
= f
->state
;
2311 /* Add it to the hash list */
2312 hlist_add_head(&new->hlist
, &tmp_add_list
);
2315 /* Count the number of active (current and new) VLAN
2316 * filters we have now. Does not count filters which
2317 * are marked for deletion.
2323 retval
= i40e_correct_mac_vlan_filters(vsi
,
2328 goto err_no_memory_locked
;
2330 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2333 /* Now process 'del_list' outside the lock */
2334 if (!hlist_empty(&tmp_del_list
)) {
2335 filter_list_len
= hw
->aq
.asq_buf_size
/
2336 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2337 list_size
= filter_list_len
*
2338 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2339 del_list
= kzalloc(list_size
, GFP_ATOMIC
);
2343 hlist_for_each_entry_safe(f
, h
, &tmp_del_list
, hlist
) {
2346 /* handle broadcast filters by updating the broadcast
2347 * promiscuous flag and release filter list.
2349 if (is_broadcast_ether_addr(f
->macaddr
)) {
2350 i40e_aqc_broadcast_filter(vsi
, vsi_name
, f
);
2352 hlist_del(&f
->hlist
);
2357 /* add to delete list */
2358 ether_addr_copy(del_list
[num_del
].mac_addr
, f
->macaddr
);
2359 if (f
->vlan
== I40E_VLAN_ANY
) {
2360 del_list
[num_del
].vlan_tag
= 0;
2361 cmd_flags
|= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
2363 del_list
[num_del
].vlan_tag
=
2364 cpu_to_le16((u16
)(f
->vlan
));
2367 cmd_flags
|= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
2368 del_list
[num_del
].flags
= cmd_flags
;
2371 /* flush a full buffer */
2372 if (num_del
== filter_list_len
) {
2373 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2375 memset(del_list
, 0, list_size
);
2378 /* Release memory for MAC filter entries which were
2379 * synced up with HW.
2381 hlist_del(&f
->hlist
);
2386 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2394 if (!hlist_empty(&tmp_add_list
)) {
2395 /* Do all the adds now. */
2396 filter_list_len
= hw
->aq
.asq_buf_size
/
2397 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2398 list_size
= filter_list_len
*
2399 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2400 add_list
= kzalloc(list_size
, GFP_ATOMIC
);
2405 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2406 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
,
2408 new->state
= I40E_FILTER_FAILED
;
2412 /* handle broadcast filters by updating the broadcast
2413 * promiscuous flag instead of adding a MAC filter.
2415 if (is_broadcast_ether_addr(new->f
->macaddr
)) {
2416 if (i40e_aqc_broadcast_filter(vsi
, vsi_name
,
2418 new->state
= I40E_FILTER_FAILED
;
2420 new->state
= I40E_FILTER_ACTIVE
;
2424 /* add to add array */
2428 ether_addr_copy(add_list
[num_add
].mac_addr
,
2430 if (new->f
->vlan
== I40E_VLAN_ANY
) {
2431 add_list
[num_add
].vlan_tag
= 0;
2432 cmd_flags
|= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN
;
2434 add_list
[num_add
].vlan_tag
=
2435 cpu_to_le16((u16
)(new->f
->vlan
));
2437 add_list
[num_add
].queue_number
= 0;
2438 /* set invalid match method for later detection */
2439 add_list
[num_add
].match_method
= I40E_AQC_MM_ERR_NO_RES
;
2440 cmd_flags
|= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH
;
2441 add_list
[num_add
].flags
= cpu_to_le16(cmd_flags
);
2444 /* flush a full buffer */
2445 if (num_add
== filter_list_len
) {
2446 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
,
2449 memset(add_list
, 0, list_size
);
2454 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
, add_head
,
2455 num_add
, &promisc_changed
);
2457 /* Now move all of the filters from the temp add list back to
2460 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2461 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2462 /* Only update the state if we're still NEW */
2463 if (new->f
->state
== I40E_FILTER_NEW
)
2464 new->f
->state
= new->state
;
2465 hlist_del(&new->hlist
);
2468 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2473 /* Determine the number of active and failed filters. */
2474 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2475 vsi
->active_filters
= 0;
2476 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
2477 if (f
->state
== I40E_FILTER_ACTIVE
)
2478 vsi
->active_filters
++;
2479 else if (f
->state
== I40E_FILTER_FAILED
)
2482 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2484 /* If promiscuous mode has changed, we need to calculate a new
2485 * threshold for when we are safe to exit
2487 if (promisc_changed
)
2488 vsi
->promisc_threshold
= (vsi
->active_filters
* 3) / 4;
2490 /* Check if we are able to exit overflow promiscuous mode. We can
2491 * safely exit if we didn't just enter, we no longer have any failed
2492 * filters, and we have reduced filters below the threshold value.
2494 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
) &&
2495 !promisc_changed
&& !failed_filters
&&
2496 (vsi
->active_filters
< vsi
->promisc_threshold
)) {
2497 dev_info(&pf
->pdev
->dev
,
2498 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2500 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2501 promisc_changed
= true;
2502 vsi
->promisc_threshold
= 0;
2505 /* if the VF is not trusted do not do promisc */
2506 if ((vsi
->type
== I40E_VSI_SRIOV
) && !pf
->vf
[vsi
->vf_id
].trusted
) {
2507 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2511 /* check for changes in promiscuous modes */
2512 if (changed_flags
& IFF_ALLMULTI
) {
2513 bool cur_multipromisc
;
2515 cur_multipromisc
= !!(vsi
->current_netdev_flags
& IFF_ALLMULTI
);
2516 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(&vsi
->back
->hw
,
2521 retval
= i40e_aq_rc_to_posix(aq_ret
,
2522 hw
->aq
.asq_last_status
);
2523 dev_info(&pf
->pdev
->dev
,
2524 "set multi promisc failed on %s, err %s aq_err %s\n",
2526 i40e_stat_str(hw
, aq_ret
),
2527 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2531 if ((changed_flags
& IFF_PROMISC
) || promisc_changed
) {
2534 cur_promisc
= (!!(vsi
->current_netdev_flags
& IFF_PROMISC
) ||
2535 test_bit(__I40E_VSI_OVERFLOW_PROMISC
,
2537 aq_ret
= i40e_set_promiscuous(pf
, cur_promisc
);
2539 retval
= i40e_aq_rc_to_posix(aq_ret
,
2540 hw
->aq
.asq_last_status
);
2541 dev_info(&pf
->pdev
->dev
,
2542 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2543 cur_promisc
? "on" : "off",
2545 i40e_stat_str(hw
, aq_ret
),
2546 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2550 /* if something went wrong then set the changed flag so we try again */
2552 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2554 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2558 /* Restore elements on the temporary add and delete lists */
2559 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2560 err_no_memory_locked
:
2561 i40e_undo_del_filter_entries(vsi
, &tmp_del_list
);
2562 i40e_undo_add_filter_entries(vsi
, &tmp_add_list
);
2563 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2565 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2566 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2571 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2572 * @pf: board private structure
2574 static void i40e_sync_filters_subtask(struct i40e_pf
*pf
)
2578 if (!pf
|| !(pf
->flags
& I40E_FLAG_FILTER_SYNC
))
2580 pf
->flags
&= ~I40E_FLAG_FILTER_SYNC
;
2582 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
2584 (pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_FILTER_CHANGED
)) {
2585 int ret
= i40e_sync_vsi_filters(pf
->vsi
[v
]);
2588 /* come back and try again later */
2589 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
2597 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2600 static int i40e_max_xdp_frame_size(struct i40e_vsi
*vsi
)
2602 if (PAGE_SIZE
>= 8192 || (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
2603 return I40E_RXBUFFER_2048
;
2605 return I40E_RXBUFFER_3072
;
2609 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2610 * @netdev: network interface device structure
2611 * @new_mtu: new value for maximum frame size
2613 * Returns 0 on success, negative on failure
2615 static int i40e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2617 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2618 struct i40e_vsi
*vsi
= np
->vsi
;
2619 struct i40e_pf
*pf
= vsi
->back
;
2621 if (i40e_enabled_xdp_vsi(vsi
)) {
2622 int frame_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
2624 if (frame_size
> i40e_max_xdp_frame_size(vsi
))
2628 netdev_info(netdev
, "changing MTU from %d to %d\n",
2629 netdev
->mtu
, new_mtu
);
2630 netdev
->mtu
= new_mtu
;
2631 if (netif_running(netdev
))
2632 i40e_vsi_reinit_locked(vsi
);
2633 pf
->flags
|= (I40E_FLAG_SERVICE_CLIENT_REQUESTED
|
2634 I40E_FLAG_CLIENT_L2_CHANGE
);
2639 * i40e_ioctl - Access the hwtstamp interface
2640 * @netdev: network interface device structure
2641 * @ifr: interface request data
2642 * @cmd: ioctl command
2644 int i40e_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2646 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2647 struct i40e_pf
*pf
= np
->vsi
->back
;
2651 return i40e_ptp_get_ts_config(pf
, ifr
);
2653 return i40e_ptp_set_ts_config(pf
, ifr
);
2660 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2661 * @vsi: the vsi being adjusted
2663 void i40e_vlan_stripping_enable(struct i40e_vsi
*vsi
)
2665 struct i40e_vsi_context ctxt
;
2668 if ((vsi
->info
.valid_sections
&
2669 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2670 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_MODE_MASK
) == 0))
2671 return; /* already enabled */
2673 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2674 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2675 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH
;
2677 ctxt
.seid
= vsi
->seid
;
2678 ctxt
.info
= vsi
->info
;
2679 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2681 dev_info(&vsi
->back
->pdev
->dev
,
2682 "update vlan stripping failed, err %s aq_err %s\n",
2683 i40e_stat_str(&vsi
->back
->hw
, ret
),
2684 i40e_aq_str(&vsi
->back
->hw
,
2685 vsi
->back
->hw
.aq
.asq_last_status
));
2690 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2691 * @vsi: the vsi being adjusted
2693 void i40e_vlan_stripping_disable(struct i40e_vsi
*vsi
)
2695 struct i40e_vsi_context ctxt
;
2698 if ((vsi
->info
.valid_sections
&
2699 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2700 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_EMOD_MASK
) ==
2701 I40E_AQ_VSI_PVLAN_EMOD_MASK
))
2702 return; /* already disabled */
2704 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2705 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2706 I40E_AQ_VSI_PVLAN_EMOD_NOTHING
;
2708 ctxt
.seid
= vsi
->seid
;
2709 ctxt
.info
= vsi
->info
;
2710 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2712 dev_info(&vsi
->back
->pdev
->dev
,
2713 "update vlan stripping failed, err %s aq_err %s\n",
2714 i40e_stat_str(&vsi
->back
->hw
, ret
),
2715 i40e_aq_str(&vsi
->back
->hw
,
2716 vsi
->back
->hw
.aq
.asq_last_status
));
2721 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2722 * @netdev: network interface to be adjusted
2723 * @features: netdev features to test if VLAN offload is enabled or not
2725 static void i40e_vlan_rx_register(struct net_device
*netdev
, u32 features
)
2727 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2728 struct i40e_vsi
*vsi
= np
->vsi
;
2730 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2731 i40e_vlan_stripping_enable(vsi
);
2733 i40e_vlan_stripping_disable(vsi
);
2737 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2738 * @vsi: the vsi being configured
2739 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2741 * This is a helper function for adding a new MAC/VLAN filter with the
2742 * specified VLAN for each existing MAC address already in the hash table.
2743 * This function does *not* perform any accounting to update filters based on
2746 * NOTE: this function expects to be called while under the
2747 * mac_filter_hash_lock
2749 int i40e_add_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2751 struct i40e_mac_filter
*f
, *add_f
;
2752 struct hlist_node
*h
;
2755 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2756 if (f
->state
== I40E_FILTER_REMOVE
)
2758 add_f
= i40e_add_filter(vsi
, f
->macaddr
, vid
);
2760 dev_info(&vsi
->back
->pdev
->dev
,
2761 "Could not add vlan filter %d for %pM\n",
2771 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2772 * @vsi: the VSI being configured
2773 * @vid: VLAN id to be added
2775 int i40e_vsi_add_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2782 /* The network stack will attempt to add VID=0, with the intention to
2783 * receive priority tagged packets with a VLAN of 0. Our HW receives
2784 * these packets by default when configured to receive untagged
2785 * packets, so we don't need to add a filter for this case.
2786 * Additionally, HW interprets adding a VID=0 filter as meaning to
2787 * receive *only* tagged traffic and stops receiving untagged traffic.
2788 * Thus, we do not want to actually add a filter for VID=0
2793 /* Locked once because all functions invoked below iterates list*/
2794 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2795 err
= i40e_add_vlan_all_mac(vsi
, vid
);
2796 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2800 /* schedule our worker thread which will take care of
2801 * applying the new filter changes
2803 i40e_service_event_schedule(vsi
->back
);
2808 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2809 * @vsi: the vsi being configured
2810 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2812 * This function should be used to remove all VLAN filters which match the
2813 * given VID. It does not schedule the service event and does not take the
2814 * mac_filter_hash_lock so it may be combined with other operations under
2815 * a single invocation of the mac_filter_hash_lock.
2817 * NOTE: this function expects to be called while under the
2818 * mac_filter_hash_lock
2820 void i40e_rm_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2822 struct i40e_mac_filter
*f
;
2823 struct hlist_node
*h
;
2826 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2828 __i40e_del_filter(vsi
, f
);
2833 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2834 * @vsi: the VSI being configured
2835 * @vid: VLAN id to be removed
2837 void i40e_vsi_kill_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2839 if (!vid
|| vsi
->info
.pvid
)
2842 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2843 i40e_rm_vlan_all_mac(vsi
, vid
);
2844 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2846 /* schedule our worker thread which will take care of
2847 * applying the new filter changes
2849 i40e_service_event_schedule(vsi
->back
);
2853 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2854 * @netdev: network interface to be adjusted
2855 * @vid: vlan id to be added
2857 * net_device_ops implementation for adding vlan ids
2859 static int i40e_vlan_rx_add_vid(struct net_device
*netdev
,
2860 __always_unused __be16 proto
, u16 vid
)
2862 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2863 struct i40e_vsi
*vsi
= np
->vsi
;
2866 if (vid
>= VLAN_N_VID
)
2869 ret
= i40e_vsi_add_vlan(vsi
, vid
);
2871 set_bit(vid
, vsi
->active_vlans
);
2877 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2878 * @netdev: network interface to be adjusted
2879 * @vid: vlan id to be removed
2881 * net_device_ops implementation for removing vlan ids
2883 static int i40e_vlan_rx_kill_vid(struct net_device
*netdev
,
2884 __always_unused __be16 proto
, u16 vid
)
2886 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2887 struct i40e_vsi
*vsi
= np
->vsi
;
2889 /* return code is ignored as there is nothing a user
2890 * can do about failure to remove and a log message was
2891 * already printed from the other function
2893 i40e_vsi_kill_vlan(vsi
, vid
);
2895 clear_bit(vid
, vsi
->active_vlans
);
2901 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2902 * @vsi: the vsi being brought back up
2904 static void i40e_restore_vlan(struct i40e_vsi
*vsi
)
2911 i40e_vlan_rx_register(vsi
->netdev
, vsi
->netdev
->features
);
2913 for_each_set_bit(vid
, vsi
->active_vlans
, VLAN_N_VID
)
2914 i40e_vlan_rx_add_vid(vsi
->netdev
, htons(ETH_P_8021Q
),
2919 * i40e_vsi_add_pvid - Add pvid for the VSI
2920 * @vsi: the vsi being adjusted
2921 * @vid: the vlan id to set as a PVID
2923 int i40e_vsi_add_pvid(struct i40e_vsi
*vsi
, u16 vid
)
2925 struct i40e_vsi_context ctxt
;
2928 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2929 vsi
->info
.pvid
= cpu_to_le16(vid
);
2930 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_TAGGED
|
2931 I40E_AQ_VSI_PVLAN_INSERT_PVID
|
2932 I40E_AQ_VSI_PVLAN_EMOD_STR
;
2934 ctxt
.seid
= vsi
->seid
;
2935 ctxt
.info
= vsi
->info
;
2936 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2938 dev_info(&vsi
->back
->pdev
->dev
,
2939 "add pvid failed, err %s aq_err %s\n",
2940 i40e_stat_str(&vsi
->back
->hw
, ret
),
2941 i40e_aq_str(&vsi
->back
->hw
,
2942 vsi
->back
->hw
.aq
.asq_last_status
));
2950 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2951 * @vsi: the vsi being adjusted
2953 * Just use the vlan_rx_register() service to put it back to normal
2955 void i40e_vsi_remove_pvid(struct i40e_vsi
*vsi
)
2957 i40e_vlan_stripping_disable(vsi
);
2963 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2964 * @vsi: ptr to the VSI
2966 * If this function returns with an error, then it's possible one or
2967 * more of the rings is populated (while the rest are not). It is the
2968 * callers duty to clean those orphaned rings.
2970 * Return 0 on success, negative on failure
2972 static int i40e_vsi_setup_tx_resources(struct i40e_vsi
*vsi
)
2976 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2977 err
= i40e_setup_tx_descriptors(vsi
->tx_rings
[i
]);
2979 if (!i40e_enabled_xdp_vsi(vsi
))
2982 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2983 err
= i40e_setup_tx_descriptors(vsi
->xdp_rings
[i
]);
2989 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2990 * @vsi: ptr to the VSI
2992 * Free VSI's transmit software resources
2994 static void i40e_vsi_free_tx_resources(struct i40e_vsi
*vsi
)
2998 if (vsi
->tx_rings
) {
2999 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3000 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
)
3001 i40e_free_tx_resources(vsi
->tx_rings
[i
]);
3004 if (vsi
->xdp_rings
) {
3005 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3006 if (vsi
->xdp_rings
[i
] && vsi
->xdp_rings
[i
]->desc
)
3007 i40e_free_tx_resources(vsi
->xdp_rings
[i
]);
3012 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3013 * @vsi: ptr to the VSI
3015 * If this function returns with an error, then it's possible one or
3016 * more of the rings is populated (while the rest are not). It is the
3017 * callers duty to clean those orphaned rings.
3019 * Return 0 on success, negative on failure
3021 static int i40e_vsi_setup_rx_resources(struct i40e_vsi
*vsi
)
3025 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3026 err
= i40e_setup_rx_descriptors(vsi
->rx_rings
[i
]);
3031 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3032 * @vsi: ptr to the VSI
3034 * Free all receive software resources
3036 static void i40e_vsi_free_rx_resources(struct i40e_vsi
*vsi
)
3043 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3044 if (vsi
->rx_rings
[i
] && vsi
->rx_rings
[i
]->desc
)
3045 i40e_free_rx_resources(vsi
->rx_rings
[i
]);
3049 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3050 * @ring: The Tx ring to configure
3052 * This enables/disables XPS for a given Tx descriptor ring
3053 * based on the TCs enabled for the VSI that ring belongs to.
3055 static void i40e_config_xps_tx_ring(struct i40e_ring
*ring
)
3059 if (!ring
->q_vector
|| !ring
->netdev
|| ring
->ch
)
3062 /* We only initialize XPS once, so as not to overwrite user settings */
3063 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE
, ring
->state
))
3066 cpu
= cpumask_local_spread(ring
->q_vector
->v_idx
, -1);
3067 netif_set_xps_queue(ring
->netdev
, get_cpu_mask(cpu
),
3072 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3073 * @ring: The Tx ring to configure
3075 * Configure the Tx descriptor ring in the HMC context.
3077 static int i40e_configure_tx_ring(struct i40e_ring
*ring
)
3079 struct i40e_vsi
*vsi
= ring
->vsi
;
3080 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3081 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3082 struct i40e_hmc_obj_txq tx_ctx
;
3083 i40e_status err
= 0;
3086 /* some ATR related tx ring init */
3087 if (vsi
->back
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
3088 ring
->atr_sample_rate
= vsi
->back
->atr_sample_rate
;
3089 ring
->atr_count
= 0;
3091 ring
->atr_sample_rate
= 0;
3095 i40e_config_xps_tx_ring(ring
);
3097 /* clear the context structure first */
3098 memset(&tx_ctx
, 0, sizeof(tx_ctx
));
3100 tx_ctx
.new_context
= 1;
3101 tx_ctx
.base
= (ring
->dma
/ 128);
3102 tx_ctx
.qlen
= ring
->count
;
3103 tx_ctx
.fd_ena
= !!(vsi
->back
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
3104 I40E_FLAG_FD_ATR_ENABLED
));
3105 tx_ctx
.timesync_ena
= !!(vsi
->back
->flags
& I40E_FLAG_PTP
);
3106 /* FDIR VSI tx ring can still use RS bit and writebacks */
3107 if (vsi
->type
!= I40E_VSI_FDIR
)
3108 tx_ctx
.head_wb_ena
= 1;
3109 tx_ctx
.head_wb_addr
= ring
->dma
+
3110 (ring
->count
* sizeof(struct i40e_tx_desc
));
3112 /* As part of VSI creation/update, FW allocates certain
3113 * Tx arbitration queue sets for each TC enabled for
3114 * the VSI. The FW returns the handles to these queue
3115 * sets as part of the response buffer to Add VSI,
3116 * Update VSI, etc. AQ commands. It is expected that
3117 * these queue set handles be associated with the Tx
3118 * queues by the driver as part of the TX queue context
3119 * initialization. This has to be done regardless of
3120 * DCB as by default everything is mapped to TC0.
3125 le16_to_cpu(ring
->ch
->info
.qs_handle
[ring
->dcb_tc
]);
3128 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[ring
->dcb_tc
]);
3130 tx_ctx
.rdylist_act
= 0;
3132 /* clear the context in the HMC */
3133 err
= i40e_clear_lan_tx_queue_context(hw
, pf_q
);
3135 dev_info(&vsi
->back
->pdev
->dev
,
3136 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3137 ring
->queue_index
, pf_q
, err
);
3141 /* set the context in the HMC */
3142 err
= i40e_set_lan_tx_queue_context(hw
, pf_q
, &tx_ctx
);
3144 dev_info(&vsi
->back
->pdev
->dev
,
3145 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3146 ring
->queue_index
, pf_q
, err
);
3150 /* Now associate this queue with this PCI function */
3152 if (ring
->ch
->type
== I40E_VSI_VMDQ2
)
3153 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3157 qtx_ctl
|= (ring
->ch
->vsi_number
<<
3158 I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3159 I40E_QTX_CTL_VFVM_INDX_MASK
;
3161 if (vsi
->type
== I40E_VSI_VMDQ2
) {
3162 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3163 qtx_ctl
|= ((vsi
->id
) << I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3164 I40E_QTX_CTL_VFVM_INDX_MASK
;
3166 qtx_ctl
= I40E_QTX_CTL_PF_QUEUE
;
3170 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
) &
3171 I40E_QTX_CTL_PF_INDX_MASK
);
3172 wr32(hw
, I40E_QTX_CTL(pf_q
), qtx_ctl
);
3175 /* cache tail off for easier writes later */
3176 ring
->tail
= hw
->hw_addr
+ I40E_QTX_TAIL(pf_q
);
3182 * i40e_configure_rx_ring - Configure a receive ring context
3183 * @ring: The Rx ring to configure
3185 * Configure the Rx descriptor ring in the HMC context.
3187 static int i40e_configure_rx_ring(struct i40e_ring
*ring
)
3189 struct i40e_vsi
*vsi
= ring
->vsi
;
3190 u32 chain_len
= vsi
->back
->hw
.func_caps
.rx_buf_chain_len
;
3191 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3192 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3193 struct i40e_hmc_obj_rxq rx_ctx
;
3194 i40e_status err
= 0;
3196 bitmap_zero(ring
->state
, __I40E_RING_STATE_NBITS
);
3198 /* clear the context structure first */
3199 memset(&rx_ctx
, 0, sizeof(rx_ctx
));
3201 ring
->rx_buf_len
= vsi
->rx_buf_len
;
3203 rx_ctx
.dbuff
= DIV_ROUND_UP(ring
->rx_buf_len
,
3204 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT
));
3206 rx_ctx
.base
= (ring
->dma
/ 128);
3207 rx_ctx
.qlen
= ring
->count
;
3209 /* use 32 byte descriptors */
3212 /* descriptor type is always zero
3215 rx_ctx
.hsplit_0
= 0;
3217 rx_ctx
.rxmax
= min_t(u16
, vsi
->max_frame
, chain_len
* ring
->rx_buf_len
);
3218 if (hw
->revision_id
== 0)
3219 rx_ctx
.lrxqthresh
= 0;
3221 rx_ctx
.lrxqthresh
= 1;
3222 rx_ctx
.crcstrip
= 1;
3224 /* this controls whether VLAN is stripped from inner headers */
3226 /* set the prefena field to 1 because the manual says to */
3229 /* clear the context in the HMC */
3230 err
= i40e_clear_lan_rx_queue_context(hw
, pf_q
);
3232 dev_info(&vsi
->back
->pdev
->dev
,
3233 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3234 ring
->queue_index
, pf_q
, err
);
3238 /* set the context in the HMC */
3239 err
= i40e_set_lan_rx_queue_context(hw
, pf_q
, &rx_ctx
);
3241 dev_info(&vsi
->back
->pdev
->dev
,
3242 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3243 ring
->queue_index
, pf_q
, err
);
3247 /* configure Rx buffer alignment */
3248 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
3249 clear_ring_build_skb_enabled(ring
);
3251 set_ring_build_skb_enabled(ring
);
3253 /* cache tail for quicker writes, and clear the reg before use */
3254 ring
->tail
= hw
->hw_addr
+ I40E_QRX_TAIL(pf_q
);
3255 writel(0, ring
->tail
);
3257 i40e_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
3263 * i40e_vsi_configure_tx - Configure the VSI for Tx
3264 * @vsi: VSI structure describing this set of rings and resources
3266 * Configure the Tx VSI for operation.
3268 static int i40e_vsi_configure_tx(struct i40e_vsi
*vsi
)
3273 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3274 err
= i40e_configure_tx_ring(vsi
->tx_rings
[i
]);
3276 if (!i40e_enabled_xdp_vsi(vsi
))
3279 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3280 err
= i40e_configure_tx_ring(vsi
->xdp_rings
[i
]);
3286 * i40e_vsi_configure_rx - Configure the VSI for Rx
3287 * @vsi: the VSI being configured
3289 * Configure the Rx VSI for operation.
3291 static int i40e_vsi_configure_rx(struct i40e_vsi
*vsi
)
3296 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
)) {
3297 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3298 vsi
->rx_buf_len
= I40E_RXBUFFER_2048
;
3299 #if (PAGE_SIZE < 8192)
3300 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING
&&
3301 (vsi
->netdev
->mtu
<= ETH_DATA_LEN
)) {
3302 vsi
->max_frame
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3303 vsi
->rx_buf_len
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3306 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3307 vsi
->rx_buf_len
= (PAGE_SIZE
< 8192) ? I40E_RXBUFFER_3072
:
3311 /* set up individual rings */
3312 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3313 err
= i40e_configure_rx_ring(vsi
->rx_rings
[i
]);
3319 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3320 * @vsi: ptr to the VSI
3322 static void i40e_vsi_config_dcb_rings(struct i40e_vsi
*vsi
)
3324 struct i40e_ring
*tx_ring
, *rx_ring
;
3325 u16 qoffset
, qcount
;
3328 if (!(vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
3329 /* Reset the TC information */
3330 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3331 rx_ring
= vsi
->rx_rings
[i
];
3332 tx_ring
= vsi
->tx_rings
[i
];
3333 rx_ring
->dcb_tc
= 0;
3334 tx_ring
->dcb_tc
= 0;
3339 for (n
= 0; n
< I40E_MAX_TRAFFIC_CLASS
; n
++) {
3340 if (!(vsi
->tc_config
.enabled_tc
& BIT_ULL(n
)))
3343 qoffset
= vsi
->tc_config
.tc_info
[n
].qoffset
;
3344 qcount
= vsi
->tc_config
.tc_info
[n
].qcount
;
3345 for (i
= qoffset
; i
< (qoffset
+ qcount
); i
++) {
3346 rx_ring
= vsi
->rx_rings
[i
];
3347 tx_ring
= vsi
->tx_rings
[i
];
3348 rx_ring
->dcb_tc
= n
;
3349 tx_ring
->dcb_tc
= n
;
3355 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3356 * @vsi: ptr to the VSI
3358 static void i40e_set_vsi_rx_mode(struct i40e_vsi
*vsi
)
3361 i40e_set_rx_mode(vsi
->netdev
);
3365 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3366 * @vsi: Pointer to the targeted VSI
3368 * This function replays the hlist on the hw where all the SB Flow Director
3369 * filters were saved.
3371 static void i40e_fdir_filter_restore(struct i40e_vsi
*vsi
)
3373 struct i40e_fdir_filter
*filter
;
3374 struct i40e_pf
*pf
= vsi
->back
;
3375 struct hlist_node
*node
;
3377 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
3380 /* Reset FDir counters as we're replaying all existing filters */
3381 pf
->fd_tcp4_filter_cnt
= 0;
3382 pf
->fd_udp4_filter_cnt
= 0;
3383 pf
->fd_sctp4_filter_cnt
= 0;
3384 pf
->fd_ip4_filter_cnt
= 0;
3386 hlist_for_each_entry_safe(filter
, node
,
3387 &pf
->fdir_filter_list
, fdir_node
) {
3388 i40e_add_del_fdir(vsi
, filter
, true);
3393 * i40e_vsi_configure - Set up the VSI for action
3394 * @vsi: the VSI being configured
3396 static int i40e_vsi_configure(struct i40e_vsi
*vsi
)
3400 i40e_set_vsi_rx_mode(vsi
);
3401 i40e_restore_vlan(vsi
);
3402 i40e_vsi_config_dcb_rings(vsi
);
3403 err
= i40e_vsi_configure_tx(vsi
);
3405 err
= i40e_vsi_configure_rx(vsi
);
3411 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3412 * @vsi: the VSI being configured
3414 static void i40e_vsi_configure_msix(struct i40e_vsi
*vsi
)
3416 bool has_xdp
= i40e_enabled_xdp_vsi(vsi
);
3417 struct i40e_pf
*pf
= vsi
->back
;
3418 struct i40e_hw
*hw
= &pf
->hw
;
3423 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3424 * and PFINT_LNKLSTn registers, e.g.:
3425 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3427 qp
= vsi
->base_queue
;
3428 vector
= vsi
->base_vector
;
3429 for (i
= 0; i
< vsi
->num_q_vectors
; i
++, vector
++) {
3430 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[i
];
3432 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3433 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[i
]->rx_itr_setting
);
3434 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3435 wr32(hw
, I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1),
3437 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[i
]->tx_itr_setting
);
3438 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3439 wr32(hw
, I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1),
3441 wr32(hw
, I40E_PFINT_RATEN(vector
- 1),
3442 i40e_intrl_usec_to_reg(vsi
->int_rate_limit
));
3444 /* Linked list for the queuepairs assigned to this vector */
3445 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), qp
);
3446 for (q
= 0; q
< q_vector
->num_ringpairs
; q
++) {
3447 u32 nextqp
= has_xdp
? qp
+ vsi
->alloc_queue_pairs
: qp
;
3450 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3451 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3452 (vector
<< I40E_QINT_RQCTL_MSIX_INDX_SHIFT
) |
3453 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
3454 (I40E_QUEUE_TYPE_TX
<<
3455 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
);
3457 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
3460 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3461 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3462 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3463 (qp
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3464 (I40E_QUEUE_TYPE_TX
<<
3465 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3467 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3470 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3471 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3472 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3473 ((qp
+ 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3474 (I40E_QUEUE_TYPE_RX
<<
3475 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3477 /* Terminate the linked list */
3478 if (q
== (q_vector
->num_ringpairs
- 1))
3479 val
|= (I40E_QUEUE_END_OF_LIST
<<
3480 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3482 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
3491 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3492 * @hw: ptr to the hardware info
3494 static void i40e_enable_misc_int_causes(struct i40e_pf
*pf
)
3496 struct i40e_hw
*hw
= &pf
->hw
;
3499 /* clear things first */
3500 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0); /* disable all */
3501 rd32(hw
, I40E_PFINT_ICR0
); /* read to clear */
3503 val
= I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
|
3504 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
|
3505 I40E_PFINT_ICR0_ENA_GRST_MASK
|
3506 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
|
3507 I40E_PFINT_ICR0_ENA_GPIO_MASK
|
3508 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
|
3509 I40E_PFINT_ICR0_ENA_VFLR_MASK
|
3510 I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3512 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
)
3513 val
|= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3515 if (pf
->flags
& I40E_FLAG_PTP
)
3516 val
|= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3518 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
3520 /* SW_ITR_IDX = 0, but don't change INTENA */
3521 wr32(hw
, I40E_PFINT_DYN_CTL0
, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK
|
3522 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK
);
3524 /* OTHER_ITR_IDX = 0 */
3525 wr32(hw
, I40E_PFINT_STAT_CTL0
, 0);
3529 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3530 * @vsi: the VSI being configured
3532 static void i40e_configure_msi_and_legacy(struct i40e_vsi
*vsi
)
3534 u32 nextqp
= i40e_enabled_xdp_vsi(vsi
) ? vsi
->alloc_queue_pairs
: 0;
3535 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3536 struct i40e_pf
*pf
= vsi
->back
;
3537 struct i40e_hw
*hw
= &pf
->hw
;
3540 /* set the ITR configuration */
3541 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3542 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[0]->rx_itr_setting
);
3543 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3544 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), q_vector
->rx
.itr
);
3545 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[0]->tx_itr_setting
);
3546 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3547 wr32(hw
, I40E_PFINT_ITR0(I40E_TX_ITR
), q_vector
->tx
.itr
);
3549 i40e_enable_misc_int_causes(pf
);
3551 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3552 wr32(hw
, I40E_PFINT_LNKLST0
, 0);
3554 /* Associate the queue pair to the vector and enable the queue int */
3555 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3556 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3557 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
)|
3558 (I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3560 wr32(hw
, I40E_QINT_RQCTL(0), val
);
3562 if (i40e_enabled_xdp_vsi(vsi
)) {
3563 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3564 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
)|
3566 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3568 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3571 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3572 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3573 (I40E_QUEUE_END_OF_LIST
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3575 wr32(hw
, I40E_QINT_TQCTL(0), val
);
3580 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3581 * @pf: board private structure
3583 void i40e_irq_dynamic_disable_icr0(struct i40e_pf
*pf
)
3585 struct i40e_hw
*hw
= &pf
->hw
;
3587 wr32(hw
, I40E_PFINT_DYN_CTL0
,
3588 I40E_ITR_NONE
<< I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT
);
3593 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3594 * @pf: board private structure
3596 void i40e_irq_dynamic_enable_icr0(struct i40e_pf
*pf
)
3598 struct i40e_hw
*hw
= &pf
->hw
;
3601 val
= I40E_PFINT_DYN_CTL0_INTENA_MASK
|
3602 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK
|
3603 (I40E_ITR_NONE
<< I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT
);
3605 wr32(hw
, I40E_PFINT_DYN_CTL0
, val
);
3610 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3611 * @irq: interrupt number
3612 * @data: pointer to a q_vector
3614 static irqreturn_t
i40e_msix_clean_rings(int irq
, void *data
)
3616 struct i40e_q_vector
*q_vector
= data
;
3618 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
3621 napi_schedule_irqoff(&q_vector
->napi
);
3627 * i40e_irq_affinity_notify - Callback for affinity changes
3628 * @notify: context as to what irq was changed
3629 * @mask: the new affinity mask
3631 * This is a callback function used by the irq_set_affinity_notifier function
3632 * so that we may register to receive changes to the irq affinity masks.
3634 static void i40e_irq_affinity_notify(struct irq_affinity_notify
*notify
,
3635 const cpumask_t
*mask
)
3637 struct i40e_q_vector
*q_vector
=
3638 container_of(notify
, struct i40e_q_vector
, affinity_notify
);
3640 cpumask_copy(&q_vector
->affinity_mask
, mask
);
3644 * i40e_irq_affinity_release - Callback for affinity notifier release
3645 * @ref: internal core kernel usage
3647 * This is a callback function used by the irq_set_affinity_notifier function
3648 * to inform the current notification subscriber that they will no longer
3649 * receive notifications.
3651 static void i40e_irq_affinity_release(struct kref
*ref
) {}
3654 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3655 * @vsi: the VSI being configured
3656 * @basename: name for the vector
3658 * Allocates MSI-X vectors and requests interrupts from the kernel.
3660 static int i40e_vsi_request_irq_msix(struct i40e_vsi
*vsi
, char *basename
)
3662 int q_vectors
= vsi
->num_q_vectors
;
3663 struct i40e_pf
*pf
= vsi
->back
;
3664 int base
= vsi
->base_vector
;
3671 for (vector
= 0; vector
< q_vectors
; vector
++) {
3672 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[vector
];
3674 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3676 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
3677 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3678 "%s-%s-%d", basename
, "TxRx", rx_int_idx
++);
3680 } else if (q_vector
->rx
.ring
) {
3681 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3682 "%s-%s-%d", basename
, "rx", rx_int_idx
++);
3683 } else if (q_vector
->tx
.ring
) {
3684 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3685 "%s-%s-%d", basename
, "tx", tx_int_idx
++);
3687 /* skip this unused q_vector */
3690 err
= request_irq(irq_num
,
3696 dev_info(&pf
->pdev
->dev
,
3697 "MSIX request_irq failed, error: %d\n", err
);
3698 goto free_queue_irqs
;
3701 /* register for affinity change notifications */
3702 q_vector
->affinity_notify
.notify
= i40e_irq_affinity_notify
;
3703 q_vector
->affinity_notify
.release
= i40e_irq_affinity_release
;
3704 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
3705 /* Spread affinity hints out across online CPUs.
3707 * get_cpu_mask returns a static constant mask with
3708 * a permanent lifetime so it's ok to pass to
3709 * irq_set_affinity_hint without making a copy.
3711 cpu
= cpumask_local_spread(q_vector
->v_idx
, -1);
3712 irq_set_affinity_hint(irq_num
, get_cpu_mask(cpu
));
3715 vsi
->irqs_ready
= true;
3721 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3722 irq_set_affinity_notifier(irq_num
, NULL
);
3723 irq_set_affinity_hint(irq_num
, NULL
);
3724 free_irq(irq_num
, &vsi
->q_vectors
[vector
]);
3730 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3731 * @vsi: the VSI being un-configured
3733 static void i40e_vsi_disable_irq(struct i40e_vsi
*vsi
)
3735 struct i40e_pf
*pf
= vsi
->back
;
3736 struct i40e_hw
*hw
= &pf
->hw
;
3737 int base
= vsi
->base_vector
;
3740 /* disable interrupt causation from each queue */
3741 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3744 val
= rd32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
));
3745 val
&= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
3746 wr32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
), val
);
3748 val
= rd32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
));
3749 val
&= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
3750 wr32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
), val
);
3752 if (!i40e_enabled_xdp_vsi(vsi
))
3754 wr32(hw
, I40E_QINT_TQCTL(vsi
->xdp_rings
[i
]->reg_idx
), 0);
3757 /* disable each interrupt */
3758 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3759 for (i
= vsi
->base_vector
;
3760 i
< (vsi
->num_q_vectors
+ vsi
->base_vector
); i
++)
3761 wr32(hw
, I40E_PFINT_DYN_CTLN(i
- 1), 0);
3764 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3765 synchronize_irq(pf
->msix_entries
[i
+ base
].vector
);
3767 /* Legacy and MSI mode - this stops all interrupt handling */
3768 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0);
3769 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
3771 synchronize_irq(pf
->pdev
->irq
);
3776 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3777 * @vsi: the VSI being configured
3779 static int i40e_vsi_enable_irq(struct i40e_vsi
*vsi
)
3781 struct i40e_pf
*pf
= vsi
->back
;
3784 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3785 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3786 i40e_irq_dynamic_enable(vsi
, i
);
3788 i40e_irq_dynamic_enable_icr0(pf
);
3791 i40e_flush(&pf
->hw
);
3796 * i40e_free_misc_vector - Free the vector that handles non-queue events
3797 * @pf: board private structure
3799 static void i40e_free_misc_vector(struct i40e_pf
*pf
)
3802 wr32(&pf
->hw
, I40E_PFINT_ICR0_ENA
, 0);
3803 i40e_flush(&pf
->hw
);
3805 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
&& pf
->msix_entries
) {
3806 synchronize_irq(pf
->msix_entries
[0].vector
);
3807 free_irq(pf
->msix_entries
[0].vector
, pf
);
3808 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
3813 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3814 * @irq: interrupt number
3815 * @data: pointer to a q_vector
3817 * This is the handler used for all MSI/Legacy interrupts, and deals
3818 * with both queue and non-queue interrupts. This is also used in
3819 * MSIX mode to handle the non-queue interrupts.
3821 static irqreturn_t
i40e_intr(int irq
, void *data
)
3823 struct i40e_pf
*pf
= (struct i40e_pf
*)data
;
3824 struct i40e_hw
*hw
= &pf
->hw
;
3825 irqreturn_t ret
= IRQ_NONE
;
3826 u32 icr0
, icr0_remaining
;
3829 icr0
= rd32(hw
, I40E_PFINT_ICR0
);
3830 ena_mask
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
3832 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3833 if ((icr0
& I40E_PFINT_ICR0_INTEVENT_MASK
) == 0)
3836 /* if interrupt but no bits showing, must be SWINT */
3837 if (((icr0
& ~I40E_PFINT_ICR0_INTEVENT_MASK
) == 0) ||
3838 (icr0
& I40E_PFINT_ICR0_SWINT_MASK
))
3841 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
3842 (icr0
& I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
)) {
3843 ena_mask
&= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3844 dev_dbg(&pf
->pdev
->dev
, "cleared PE_CRITERR\n");
3845 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
3848 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3849 if (icr0
& I40E_PFINT_ICR0_QUEUE_0_MASK
) {
3850 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
3851 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3853 /* We do not have a way to disarm Queue causes while leaving
3854 * interrupt enabled for all other causes, ideally
3855 * interrupt should be disabled while we are in NAPI but
3856 * this is not a performance path and napi_schedule()
3857 * can deal with rescheduling.
3859 if (!test_bit(__I40E_DOWN
, pf
->state
))
3860 napi_schedule_irqoff(&q_vector
->napi
);
3863 if (icr0
& I40E_PFINT_ICR0_ADMINQ_MASK
) {
3864 ena_mask
&= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3865 set_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
3866 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
, "AdminQ event\n");
3869 if (icr0
& I40E_PFINT_ICR0_MAL_DETECT_MASK
) {
3870 ena_mask
&= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
3871 set_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
3874 if (icr0
& I40E_PFINT_ICR0_VFLR_MASK
) {
3875 ena_mask
&= ~I40E_PFINT_ICR0_ENA_VFLR_MASK
;
3876 set_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
);
3879 if (icr0
& I40E_PFINT_ICR0_GRST_MASK
) {
3880 if (!test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
3881 set_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
3882 ena_mask
&= ~I40E_PFINT_ICR0_ENA_GRST_MASK
;
3883 val
= rd32(hw
, I40E_GLGEN_RSTAT
);
3884 val
= (val
& I40E_GLGEN_RSTAT_RESET_TYPE_MASK
)
3885 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT
;
3886 if (val
== I40E_RESET_CORER
) {
3888 } else if (val
== I40E_RESET_GLOBR
) {
3890 } else if (val
== I40E_RESET_EMPR
) {
3892 set_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
);
3896 if (icr0
& I40E_PFINT_ICR0_HMC_ERR_MASK
) {
3897 icr0
&= ~I40E_PFINT_ICR0_HMC_ERR_MASK
;
3898 dev_info(&pf
->pdev
->dev
, "HMC error interrupt\n");
3899 dev_info(&pf
->pdev
->dev
, "HMC error info 0x%x, HMC error data 0x%x\n",
3900 rd32(hw
, I40E_PFHMC_ERRORINFO
),
3901 rd32(hw
, I40E_PFHMC_ERRORDATA
));
3904 if (icr0
& I40E_PFINT_ICR0_TIMESYNC_MASK
) {
3905 u32 prttsyn_stat
= rd32(hw
, I40E_PRTTSYN_STAT_0
);
3907 if (prttsyn_stat
& I40E_PRTTSYN_STAT_0_TXTIME_MASK
) {
3908 icr0
&= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3909 i40e_ptp_tx_hwtstamp(pf
);
3913 /* If a critical error is pending we have no choice but to reset the
3915 * Report and mask out any remaining unexpected interrupts.
3917 icr0_remaining
= icr0
& ena_mask
;
3918 if (icr0_remaining
) {
3919 dev_info(&pf
->pdev
->dev
, "unhandled interrupt icr0=0x%08x\n",
3921 if ((icr0_remaining
& I40E_PFINT_ICR0_PE_CRITERR_MASK
) ||
3922 (icr0_remaining
& I40E_PFINT_ICR0_PCI_EXCEPTION_MASK
) ||
3923 (icr0_remaining
& I40E_PFINT_ICR0_ECC_ERR_MASK
)) {
3924 dev_info(&pf
->pdev
->dev
, "device will be reset\n");
3925 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
3926 i40e_service_event_schedule(pf
);
3928 ena_mask
&= ~icr0_remaining
;
3933 /* re-enable interrupt causes */
3934 wr32(hw
, I40E_PFINT_ICR0_ENA
, ena_mask
);
3935 if (!test_bit(__I40E_DOWN
, pf
->state
)) {
3936 i40e_service_event_schedule(pf
);
3937 i40e_irq_dynamic_enable_icr0(pf
);
3944 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3945 * @tx_ring: tx ring to clean
3946 * @budget: how many cleans we're allowed
3948 * Returns true if there's any budget left (e.g. the clean is finished)
3950 static bool i40e_clean_fdir_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
3952 struct i40e_vsi
*vsi
= tx_ring
->vsi
;
3953 u16 i
= tx_ring
->next_to_clean
;
3954 struct i40e_tx_buffer
*tx_buf
;
3955 struct i40e_tx_desc
*tx_desc
;
3957 tx_buf
= &tx_ring
->tx_bi
[i
];
3958 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
3959 i
-= tx_ring
->count
;
3962 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
3964 /* if next_to_watch is not set then there is no work pending */
3968 /* prevent any other reads prior to eop_desc */
3971 /* if the descriptor isn't done, no work yet to do */
3972 if (!(eop_desc
->cmd_type_offset_bsz
&
3973 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
3976 /* clear next_to_watch to prevent false hangs */
3977 tx_buf
->next_to_watch
= NULL
;
3979 tx_desc
->buffer_addr
= 0;
3980 tx_desc
->cmd_type_offset_bsz
= 0;
3981 /* move past filter desc */
3986 i
-= tx_ring
->count
;
3987 tx_buf
= tx_ring
->tx_bi
;
3988 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
3990 /* unmap skb header data */
3991 dma_unmap_single(tx_ring
->dev
,
3992 dma_unmap_addr(tx_buf
, dma
),
3993 dma_unmap_len(tx_buf
, len
),
3995 if (tx_buf
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
3996 kfree(tx_buf
->raw_buf
);
3998 tx_buf
->raw_buf
= NULL
;
3999 tx_buf
->tx_flags
= 0;
4000 tx_buf
->next_to_watch
= NULL
;
4001 dma_unmap_len_set(tx_buf
, len
, 0);
4002 tx_desc
->buffer_addr
= 0;
4003 tx_desc
->cmd_type_offset_bsz
= 0;
4005 /* move us past the eop_desc for start of next FD desc */
4010 i
-= tx_ring
->count
;
4011 tx_buf
= tx_ring
->tx_bi
;
4012 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
4015 /* update budget accounting */
4017 } while (likely(budget
));
4019 i
+= tx_ring
->count
;
4020 tx_ring
->next_to_clean
= i
;
4022 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
)
4023 i40e_irq_dynamic_enable(vsi
, tx_ring
->q_vector
->v_idx
);
4029 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4030 * @irq: interrupt number
4031 * @data: pointer to a q_vector
4033 static irqreturn_t
i40e_fdir_clean_ring(int irq
, void *data
)
4035 struct i40e_q_vector
*q_vector
= data
;
4036 struct i40e_vsi
*vsi
;
4038 if (!q_vector
->tx
.ring
)
4041 vsi
= q_vector
->tx
.ring
->vsi
;
4042 i40e_clean_fdir_tx_irq(q_vector
->tx
.ring
, vsi
->work_limit
);
4048 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4049 * @vsi: the VSI being configured
4050 * @v_idx: vector index
4051 * @qp_idx: queue pair index
4053 static void i40e_map_vector_to_qp(struct i40e_vsi
*vsi
, int v_idx
, int qp_idx
)
4055 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4056 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[qp_idx
];
4057 struct i40e_ring
*rx_ring
= vsi
->rx_rings
[qp_idx
];
4059 tx_ring
->q_vector
= q_vector
;
4060 tx_ring
->next
= q_vector
->tx
.ring
;
4061 q_vector
->tx
.ring
= tx_ring
;
4062 q_vector
->tx
.count
++;
4064 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4065 if (i40e_enabled_xdp_vsi(vsi
)) {
4066 struct i40e_ring
*xdp_ring
= vsi
->xdp_rings
[qp_idx
];
4068 xdp_ring
->q_vector
= q_vector
;
4069 xdp_ring
->next
= q_vector
->tx
.ring
;
4070 q_vector
->tx
.ring
= xdp_ring
;
4071 q_vector
->tx
.count
++;
4074 rx_ring
->q_vector
= q_vector
;
4075 rx_ring
->next
= q_vector
->rx
.ring
;
4076 q_vector
->rx
.ring
= rx_ring
;
4077 q_vector
->rx
.count
++;
4081 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4082 * @vsi: the VSI being configured
4084 * This function maps descriptor rings to the queue-specific vectors
4085 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4086 * one vector per queue pair, but on a constrained vector budget, we
4087 * group the queue pairs as "efficiently" as possible.
4089 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi
*vsi
)
4091 int qp_remaining
= vsi
->num_queue_pairs
;
4092 int q_vectors
= vsi
->num_q_vectors
;
4097 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4098 * group them so there are multiple queues per vector.
4099 * It is also important to go through all the vectors available to be
4100 * sure that if we don't use all the vectors, that the remaining vectors
4101 * are cleared. This is especially important when decreasing the
4102 * number of queues in use.
4104 for (; v_start
< q_vectors
; v_start
++) {
4105 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_start
];
4107 num_ringpairs
= DIV_ROUND_UP(qp_remaining
, q_vectors
- v_start
);
4109 q_vector
->num_ringpairs
= num_ringpairs
;
4111 q_vector
->rx
.count
= 0;
4112 q_vector
->tx
.count
= 0;
4113 q_vector
->rx
.ring
= NULL
;
4114 q_vector
->tx
.ring
= NULL
;
4116 while (num_ringpairs
--) {
4117 i40e_map_vector_to_qp(vsi
, v_start
, qp_idx
);
4125 * i40e_vsi_request_irq - Request IRQ from the OS
4126 * @vsi: the VSI being configured
4127 * @basename: name for the vector
4129 static int i40e_vsi_request_irq(struct i40e_vsi
*vsi
, char *basename
)
4131 struct i40e_pf
*pf
= vsi
->back
;
4134 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4135 err
= i40e_vsi_request_irq_msix(vsi
, basename
);
4136 else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
)
4137 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, 0,
4140 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, IRQF_SHARED
,
4144 dev_info(&pf
->pdev
->dev
, "request_irq failed, Error %d\n", err
);
4149 #ifdef CONFIG_NET_POLL_CONTROLLER
4151 * i40e_netpoll - A Polling 'interrupt' handler
4152 * @netdev: network interface device structure
4154 * This is used by netconsole to send skbs without having to re-enable
4155 * interrupts. It's not called while the normal interrupt routine is executing.
4157 static void i40e_netpoll(struct net_device
*netdev
)
4159 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4160 struct i40e_vsi
*vsi
= np
->vsi
;
4161 struct i40e_pf
*pf
= vsi
->back
;
4164 /* if interface is down do nothing */
4165 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4168 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4169 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
4170 i40e_msix_clean_rings(0, vsi
->q_vectors
[i
]);
4172 i40e_intr(pf
->pdev
->irq
, netdev
);
4177 #define I40E_QTX_ENA_WAIT_COUNT 50
4180 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4181 * @pf: the PF being configured
4182 * @pf_q: the PF queue
4183 * @enable: enable or disable state of the queue
4185 * This routine will wait for the given Tx queue of the PF to reach the
4186 * enabled or disabled state.
4187 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4188 * multiple retries; else will return 0 in case of success.
4190 static int i40e_pf_txq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4195 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4196 tx_reg
= rd32(&pf
->hw
, I40E_QTX_ENA(pf_q
));
4197 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4200 usleep_range(10, 20);
4202 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4209 * i40e_control_tx_q - Start or stop a particular Tx queue
4210 * @pf: the PF structure
4211 * @pf_q: the PF queue to configure
4212 * @enable: start or stop the queue
4214 * This function enables or disables a single queue. Note that any delay
4215 * required after the operation is expected to be handled by the caller of
4218 static void i40e_control_tx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4220 struct i40e_hw
*hw
= &pf
->hw
;
4224 /* warn the TX unit of coming changes */
4225 i40e_pre_tx_queue_cfg(&pf
->hw
, pf_q
, enable
);
4227 usleep_range(10, 20);
4229 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4230 tx_reg
= rd32(hw
, I40E_QTX_ENA(pf_q
));
4231 if (((tx_reg
>> I40E_QTX_ENA_QENA_REQ_SHIFT
) & 1) ==
4232 ((tx_reg
>> I40E_QTX_ENA_QENA_STAT_SHIFT
) & 1))
4234 usleep_range(1000, 2000);
4237 /* Skip if the queue is already in the requested state */
4238 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4241 /* turn on/off the queue */
4243 wr32(hw
, I40E_QTX_HEAD(pf_q
), 0);
4244 tx_reg
|= I40E_QTX_ENA_QENA_REQ_MASK
;
4246 tx_reg
&= ~I40E_QTX_ENA_QENA_REQ_MASK
;
4249 wr32(hw
, I40E_QTX_ENA(pf_q
), tx_reg
);
4253 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4255 * @pf: the PF structure
4256 * @pf_q: the PF queue to configure
4257 * @is_xdp: true if the queue is used for XDP
4258 * @enable: start or stop the queue
4260 static int i40e_control_wait_tx_q(int seid
, struct i40e_pf
*pf
, int pf_q
,
4261 bool is_xdp
, bool enable
)
4265 i40e_control_tx_q(pf
, pf_q
, enable
);
4267 /* wait for the change to finish */
4268 ret
= i40e_pf_txq_wait(pf
, pf_q
, enable
);
4270 dev_info(&pf
->pdev
->dev
,
4271 "VSI seid %d %sTx ring %d %sable timeout\n",
4272 seid
, (is_xdp
? "XDP " : ""), pf_q
,
4273 (enable
? "en" : "dis"));
4280 * i40e_vsi_control_tx - Start or stop a VSI's rings
4281 * @vsi: the VSI being configured
4282 * @enable: start or stop the rings
4284 static int i40e_vsi_control_tx(struct i40e_vsi
*vsi
, bool enable
)
4286 struct i40e_pf
*pf
= vsi
->back
;
4287 int i
, pf_q
, ret
= 0;
4289 pf_q
= vsi
->base_queue
;
4290 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4291 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4293 false /*is xdp*/, enable
);
4297 if (!i40e_enabled_xdp_vsi(vsi
))
4300 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4301 pf_q
+ vsi
->alloc_queue_pairs
,
4302 true /*is xdp*/, enable
);
4311 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4312 * @pf: the PF being configured
4313 * @pf_q: the PF queue
4314 * @enable: enable or disable state of the queue
4316 * This routine will wait for the given Rx queue of the PF to reach the
4317 * enabled or disabled state.
4318 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4319 * multiple retries; else will return 0 in case of success.
4321 static int i40e_pf_rxq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4326 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4327 rx_reg
= rd32(&pf
->hw
, I40E_QRX_ENA(pf_q
));
4328 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4331 usleep_range(10, 20);
4333 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4340 * i40e_control_rx_q - Start or stop a particular Rx queue
4341 * @pf: the PF structure
4342 * @pf_q: the PF queue to configure
4343 * @enable: start or stop the queue
4345 * This function enables or disables a single queue. Note that any delay
4346 * required after the operation is expected to be handled by the caller of
4349 static void i40e_control_rx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4351 struct i40e_hw
*hw
= &pf
->hw
;
4355 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4356 rx_reg
= rd32(hw
, I40E_QRX_ENA(pf_q
));
4357 if (((rx_reg
>> I40E_QRX_ENA_QENA_REQ_SHIFT
) & 1) ==
4358 ((rx_reg
>> I40E_QRX_ENA_QENA_STAT_SHIFT
) & 1))
4360 usleep_range(1000, 2000);
4363 /* Skip if the queue is already in the requested state */
4364 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4367 /* turn on/off the queue */
4369 rx_reg
|= I40E_QRX_ENA_QENA_REQ_MASK
;
4371 rx_reg
&= ~I40E_QRX_ENA_QENA_REQ_MASK
;
4373 wr32(hw
, I40E_QRX_ENA(pf_q
), rx_reg
);
4377 * i40e_vsi_control_rx - Start or stop a VSI's rings
4378 * @vsi: the VSI being configured
4379 * @enable: start or stop the rings
4381 static int i40e_vsi_control_rx(struct i40e_vsi
*vsi
, bool enable
)
4383 struct i40e_pf
*pf
= vsi
->back
;
4384 int i
, pf_q
, ret
= 0;
4386 pf_q
= vsi
->base_queue
;
4387 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4388 i40e_control_rx_q(pf
, pf_q
, enable
);
4390 /* wait for the change to finish */
4391 ret
= i40e_pf_rxq_wait(pf
, pf_q
, enable
);
4393 dev_info(&pf
->pdev
->dev
,
4394 "VSI seid %d Rx ring %d %sable timeout\n",
4395 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
4400 /* Due to HW errata, on Rx disable only, the register can indicate done
4401 * before it really is. Needs 50ms to be sure
4410 * i40e_vsi_start_rings - Start a VSI's rings
4411 * @vsi: the VSI being configured
4413 int i40e_vsi_start_rings(struct i40e_vsi
*vsi
)
4417 /* do rx first for enable and last for disable */
4418 ret
= i40e_vsi_control_rx(vsi
, true);
4421 ret
= i40e_vsi_control_tx(vsi
, true);
4427 * i40e_vsi_stop_rings - Stop a VSI's rings
4428 * @vsi: the VSI being configured
4430 void i40e_vsi_stop_rings(struct i40e_vsi
*vsi
)
4432 /* When port TX is suspended, don't wait */
4433 if (test_bit(__I40E_PORT_SUSPENDED
, vsi
->back
->state
))
4434 return i40e_vsi_stop_rings_no_wait(vsi
);
4436 /* do rx first for enable and last for disable
4437 * Ignore return value, we need to shutdown whatever we can
4439 i40e_vsi_control_tx(vsi
, false);
4440 i40e_vsi_control_rx(vsi
, false);
4444 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4445 * @vsi: the VSI being shutdown
4447 * This function stops all the rings for a VSI but does not delay to verify
4448 * that rings have been disabled. It is expected that the caller is shutting
4449 * down multiple VSIs at once and will delay together for all the VSIs after
4450 * initiating the shutdown. This is particularly useful for shutting down lots
4451 * of VFs together. Otherwise, a large delay can be incurred while configuring
4452 * each VSI in serial.
4454 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi
*vsi
)
4456 struct i40e_pf
*pf
= vsi
->back
;
4459 pf_q
= vsi
->base_queue
;
4460 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4461 i40e_control_tx_q(pf
, pf_q
, false);
4462 i40e_control_rx_q(pf
, pf_q
, false);
4467 * i40e_vsi_free_irq - Free the irq association with the OS
4468 * @vsi: the VSI being configured
4470 static void i40e_vsi_free_irq(struct i40e_vsi
*vsi
)
4472 struct i40e_pf
*pf
= vsi
->back
;
4473 struct i40e_hw
*hw
= &pf
->hw
;
4474 int base
= vsi
->base_vector
;
4478 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4479 if (!vsi
->q_vectors
)
4482 if (!vsi
->irqs_ready
)
4485 vsi
->irqs_ready
= false;
4486 for (i
= 0; i
< vsi
->num_q_vectors
; i
++) {
4491 irq_num
= pf
->msix_entries
[vector
].vector
;
4493 /* free only the irqs that were actually requested */
4494 if (!vsi
->q_vectors
[i
] ||
4495 !vsi
->q_vectors
[i
]->num_ringpairs
)
4498 /* clear the affinity notifier in the IRQ descriptor */
4499 irq_set_affinity_notifier(irq_num
, NULL
);
4500 /* remove our suggested affinity mask for this IRQ */
4501 irq_set_affinity_hint(irq_num
, NULL
);
4502 synchronize_irq(irq_num
);
4503 free_irq(irq_num
, vsi
->q_vectors
[i
]);
4505 /* Tear down the interrupt queue link list
4507 * We know that they come in pairs and always
4508 * the Rx first, then the Tx. To clear the
4509 * link list, stick the EOL value into the
4510 * next_q field of the registers.
4512 val
= rd32(hw
, I40E_PFINT_LNKLSTN(vector
- 1));
4513 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4514 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4515 val
|= I40E_QUEUE_END_OF_LIST
4516 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4517 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), val
);
4519 while (qp
!= I40E_QUEUE_END_OF_LIST
) {
4522 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4524 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4525 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4526 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4527 I40E_QINT_RQCTL_INTEVENT_MASK
);
4529 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4530 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4532 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4534 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4536 next
= (val
& I40E_QINT_TQCTL_NEXTQ_INDX_MASK
)
4537 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
;
4539 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4540 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4541 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4542 I40E_QINT_TQCTL_INTEVENT_MASK
);
4544 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4545 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4547 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4552 free_irq(pf
->pdev
->irq
, pf
);
4554 val
= rd32(hw
, I40E_PFINT_LNKLST0
);
4555 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4556 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4557 val
|= I40E_QUEUE_END_OF_LIST
4558 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT
;
4559 wr32(hw
, I40E_PFINT_LNKLST0
, val
);
4561 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4562 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4563 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4564 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4565 I40E_QINT_RQCTL_INTEVENT_MASK
);
4567 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4568 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4570 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4572 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4574 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4575 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4576 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4577 I40E_QINT_TQCTL_INTEVENT_MASK
);
4579 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4580 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4582 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4587 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4588 * @vsi: the VSI being configured
4589 * @v_idx: Index of vector to be freed
4591 * This function frees the memory allocated to the q_vector. In addition if
4592 * NAPI is enabled it will delete any references to the NAPI struct prior
4593 * to freeing the q_vector.
4595 static void i40e_free_q_vector(struct i40e_vsi
*vsi
, int v_idx
)
4597 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4598 struct i40e_ring
*ring
;
4603 /* disassociate q_vector from rings */
4604 i40e_for_each_ring(ring
, q_vector
->tx
)
4605 ring
->q_vector
= NULL
;
4607 i40e_for_each_ring(ring
, q_vector
->rx
)
4608 ring
->q_vector
= NULL
;
4610 /* only VSI w/ an associated netdev is set up w/ NAPI */
4612 netif_napi_del(&q_vector
->napi
);
4614 vsi
->q_vectors
[v_idx
] = NULL
;
4616 kfree_rcu(q_vector
, rcu
);
4620 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4621 * @vsi: the VSI being un-configured
4623 * This frees the memory allocated to the q_vectors and
4624 * deletes references to the NAPI struct.
4626 static void i40e_vsi_free_q_vectors(struct i40e_vsi
*vsi
)
4630 for (v_idx
= 0; v_idx
< vsi
->num_q_vectors
; v_idx
++)
4631 i40e_free_q_vector(vsi
, v_idx
);
4635 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4636 * @pf: board private structure
4638 static void i40e_reset_interrupt_capability(struct i40e_pf
*pf
)
4640 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4641 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4642 pci_disable_msix(pf
->pdev
);
4643 kfree(pf
->msix_entries
);
4644 pf
->msix_entries
= NULL
;
4645 kfree(pf
->irq_pile
);
4646 pf
->irq_pile
= NULL
;
4647 } else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
) {
4648 pci_disable_msi(pf
->pdev
);
4650 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
4654 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4655 * @pf: board private structure
4657 * We go through and clear interrupt specific resources and reset the structure
4658 * to pre-load conditions
4660 static void i40e_clear_interrupt_scheme(struct i40e_pf
*pf
)
4664 i40e_free_misc_vector(pf
);
4666 i40e_put_lump(pf
->irq_pile
, pf
->iwarp_base_vector
,
4667 I40E_IWARP_IRQ_PILE_ID
);
4669 i40e_put_lump(pf
->irq_pile
, 0, I40E_PILE_VALID_BIT
-1);
4670 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
4672 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
4673 i40e_reset_interrupt_capability(pf
);
4677 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4678 * @vsi: the VSI being configured
4680 static void i40e_napi_enable_all(struct i40e_vsi
*vsi
)
4687 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4688 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4690 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4691 napi_enable(&q_vector
->napi
);
4696 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4697 * @vsi: the VSI being configured
4699 static void i40e_napi_disable_all(struct i40e_vsi
*vsi
)
4706 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4707 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4709 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4710 napi_disable(&q_vector
->napi
);
4715 * i40e_vsi_close - Shut down a VSI
4716 * @vsi: the vsi to be quelled
4718 static void i40e_vsi_close(struct i40e_vsi
*vsi
)
4720 struct i40e_pf
*pf
= vsi
->back
;
4721 if (!test_and_set_bit(__I40E_VSI_DOWN
, vsi
->state
))
4723 i40e_vsi_free_irq(vsi
);
4724 i40e_vsi_free_tx_resources(vsi
);
4725 i40e_vsi_free_rx_resources(vsi
);
4726 vsi
->current_netdev_flags
= 0;
4727 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
4728 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
4729 pf
->flags
|= I40E_FLAG_CLIENT_RESET
;
4733 * i40e_quiesce_vsi - Pause a given VSI
4734 * @vsi: the VSI being paused
4736 static void i40e_quiesce_vsi(struct i40e_vsi
*vsi
)
4738 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4741 set_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
);
4742 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4743 vsi
->netdev
->netdev_ops
->ndo_stop(vsi
->netdev
);
4745 i40e_vsi_close(vsi
);
4749 * i40e_unquiesce_vsi - Resume a given VSI
4750 * @vsi: the VSI being resumed
4752 static void i40e_unquiesce_vsi(struct i40e_vsi
*vsi
)
4754 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
))
4757 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4758 vsi
->netdev
->netdev_ops
->ndo_open(vsi
->netdev
);
4760 i40e_vsi_open(vsi
); /* this clears the DOWN bit */
4764 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4767 static void i40e_pf_quiesce_all_vsi(struct i40e_pf
*pf
)
4771 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4773 i40e_quiesce_vsi(pf
->vsi
[v
]);
4778 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4781 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf
*pf
)
4785 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4787 i40e_unquiesce_vsi(pf
->vsi
[v
]);
4792 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4793 * @vsi: the VSI being configured
4795 * Wait until all queues on a given VSI have been disabled.
4797 int i40e_vsi_wait_queues_disabled(struct i40e_vsi
*vsi
)
4799 struct i40e_pf
*pf
= vsi
->back
;
4802 pf_q
= vsi
->base_queue
;
4803 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4804 /* Check and wait for the Tx queue */
4805 ret
= i40e_pf_txq_wait(pf
, pf_q
, false);
4807 dev_info(&pf
->pdev
->dev
,
4808 "VSI seid %d Tx ring %d disable timeout\n",
4813 if (!i40e_enabled_xdp_vsi(vsi
))
4816 /* Check and wait for the XDP Tx queue */
4817 ret
= i40e_pf_txq_wait(pf
, pf_q
+ vsi
->alloc_queue_pairs
,
4820 dev_info(&pf
->pdev
->dev
,
4821 "VSI seid %d XDP Tx ring %d disable timeout\n",
4826 /* Check and wait for the Rx queue */
4827 ret
= i40e_pf_rxq_wait(pf
, pf_q
, false);
4829 dev_info(&pf
->pdev
->dev
,
4830 "VSI seid %d Rx ring %d disable timeout\n",
4839 #ifdef CONFIG_I40E_DCB
4841 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4844 * This function waits for the queues to be in disabled state for all the
4845 * VSIs that are managed by this PF.
4847 static int i40e_pf_wait_queues_disabled(struct i40e_pf
*pf
)
4851 for (v
= 0; v
< pf
->hw
.func_caps
.num_vsis
; v
++) {
4853 ret
= i40e_vsi_wait_queues_disabled(pf
->vsi
[v
]);
4865 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4866 * @q_idx: TX queue number
4867 * @vsi: Pointer to VSI struct
4869 * This function checks specified queue for given VSI. Detects hung condition.
4870 * We proactively detect hung TX queues by checking if interrupts are disabled
4871 * but there are pending descriptors. If it appears hung, attempt to recover
4872 * by triggering a SW interrupt.
4874 static void i40e_detect_recover_hung_queue(int q_idx
, struct i40e_vsi
*vsi
)
4876 struct i40e_ring
*tx_ring
= NULL
;
4878 u32 val
, tx_pending
;
4883 /* now that we have an index, find the tx_ring struct */
4884 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
4885 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
4886 if (q_idx
== vsi
->tx_rings
[i
]->queue_index
) {
4887 tx_ring
= vsi
->tx_rings
[i
];
4896 /* Read interrupt register */
4897 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4899 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
4900 tx_ring
->vsi
->base_vector
- 1));
4902 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
4904 tx_pending
= i40e_get_tx_pending(tx_ring
);
4906 /* Interrupts are disabled and TX pending is non-zero,
4907 * trigger the SW interrupt (don't wait). Worst case
4908 * there will be one extra interrupt which may result
4909 * into not cleaning any queues because queues are cleaned.
4911 if (tx_pending
&& (!(val
& I40E_PFINT_DYN_CTLN_INTENA_MASK
)))
4912 i40e_force_wb(vsi
, tx_ring
->q_vector
);
4916 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4917 * @pf: pointer to PF struct
4919 * LAN VSI has netdev and netdev has TX queues. This function is to check
4920 * each of those TX queues if they are hung, trigger recovery by issuing
4923 static void i40e_detect_recover_hung(struct i40e_pf
*pf
)
4925 struct net_device
*netdev
;
4926 struct i40e_vsi
*vsi
;
4929 /* Only for LAN VSI */
4930 vsi
= pf
->vsi
[pf
->lan_vsi
];
4935 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4936 if (test_bit(__I40E_VSI_DOWN
, vsi
->back
->state
) ||
4937 test_bit(__I40E_RESET_RECOVERY_PENDING
, vsi
->back
->state
))
4940 /* Make sure type is MAIN VSI */
4941 if (vsi
->type
!= I40E_VSI_MAIN
)
4944 netdev
= vsi
->netdev
;
4948 /* Bail out if netif_carrier is not OK */
4949 if (!netif_carrier_ok(netdev
))
4952 /* Go thru' TX queues for netdev */
4953 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
4954 struct netdev_queue
*q
;
4956 q
= netdev_get_tx_queue(netdev
, i
);
4958 i40e_detect_recover_hung_queue(i
, vsi
);
4963 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4964 * @pf: pointer to PF
4966 * Get TC map for ISCSI PF type that will include iSCSI TC
4969 static u8
i40e_get_iscsi_tc_map(struct i40e_pf
*pf
)
4971 struct i40e_dcb_app_priority_table app
;
4972 struct i40e_hw
*hw
= &pf
->hw
;
4973 u8 enabled_tc
= 1; /* TC0 is always enabled */
4975 /* Get the iSCSI APP TLV */
4976 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4978 for (i
= 0; i
< dcbcfg
->numapps
; i
++) {
4979 app
= dcbcfg
->app
[i
];
4980 if (app
.selector
== I40E_APP_SEL_TCPIP
&&
4981 app
.protocolid
== I40E_APP_PROTOID_ISCSI
) {
4982 tc
= dcbcfg
->etscfg
.prioritytable
[app
.priority
];
4983 enabled_tc
|= BIT(tc
);
4992 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4993 * @dcbcfg: the corresponding DCBx configuration structure
4995 * Return the number of TCs from given DCBx configuration
4997 static u8
i40e_dcb_get_num_tc(struct i40e_dcbx_config
*dcbcfg
)
4999 int i
, tc_unused
= 0;
5003 /* Scan the ETS Config Priority Table to find
5004 * traffic class enabled for a given priority
5005 * and create a bitmask of enabled TCs
5007 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++)
5008 num_tc
|= BIT(dcbcfg
->etscfg
.prioritytable
[i
]);
5010 /* Now scan the bitmask to check for
5011 * contiguous TCs starting with TC0
5013 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5014 if (num_tc
& BIT(i
)) {
5018 pr_err("Non-contiguous TC - Disabling DCB\n");
5026 /* There is always at least TC0 */
5034 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5035 * @dcbcfg: the corresponding DCBx configuration structure
5037 * Query the current DCB configuration and return the number of
5038 * traffic classes enabled from the given DCBX config
5040 static u8
i40e_dcb_get_enabled_tc(struct i40e_dcbx_config
*dcbcfg
)
5042 u8 num_tc
= i40e_dcb_get_num_tc(dcbcfg
);
5046 for (i
= 0; i
< num_tc
; i
++)
5047 enabled_tc
|= BIT(i
);
5053 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5054 * @pf: PF being queried
5056 * Query the current MQPRIO configuration and return the number of
5057 * traffic classes enabled.
5059 static u8
i40e_mqprio_get_enabled_tc(struct i40e_pf
*pf
)
5061 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
5062 u8 num_tc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
5063 u8 enabled_tc
= 1, i
;
5065 for (i
= 1; i
< num_tc
; i
++)
5066 enabled_tc
|= BIT(i
);
5071 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5072 * @pf: PF being queried
5074 * Return number of traffic classes enabled for the given PF
5076 static u8
i40e_pf_get_num_tc(struct i40e_pf
*pf
)
5078 struct i40e_hw
*hw
= &pf
->hw
;
5079 u8 i
, enabled_tc
= 1;
5081 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5083 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5084 return pf
->vsi
[pf
->lan_vsi
]->mqprio_qopt
.qopt
.num_tc
;
5086 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5087 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5090 /* SFP mode will be enabled for all TCs on port */
5091 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5092 return i40e_dcb_get_num_tc(dcbcfg
);
5094 /* MFP mode return count of enabled TCs for this PF */
5095 if (pf
->hw
.func_caps
.iscsi
)
5096 enabled_tc
= i40e_get_iscsi_tc_map(pf
);
5098 return 1; /* Only TC0 */
5100 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5101 if (enabled_tc
& BIT(i
))
5108 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5109 * @pf: PF being queried
5111 * Return a bitmap for enabled traffic classes for this PF.
5113 static u8
i40e_pf_get_tc_map(struct i40e_pf
*pf
)
5115 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5116 return i40e_mqprio_get_enabled_tc(pf
);
5118 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5121 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5122 return I40E_DEFAULT_TRAFFIC_CLASS
;
5124 /* SFP mode we want PF to be enabled for all TCs */
5125 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5126 return i40e_dcb_get_enabled_tc(&pf
->hw
.local_dcbx_config
);
5128 /* MFP enabled and iSCSI PF type */
5129 if (pf
->hw
.func_caps
.iscsi
)
5130 return i40e_get_iscsi_tc_map(pf
);
5132 return I40E_DEFAULT_TRAFFIC_CLASS
;
5136 * i40e_vsi_get_bw_info - Query VSI BW Information
5137 * @vsi: the VSI being queried
5139 * Returns 0 on success, negative value on failure
5141 static int i40e_vsi_get_bw_info(struct i40e_vsi
*vsi
)
5143 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config
= {0};
5144 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
5145 struct i40e_pf
*pf
= vsi
->back
;
5146 struct i40e_hw
*hw
= &pf
->hw
;
5151 /* Get the VSI level BW configuration */
5152 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
, &bw_config
, NULL
);
5154 dev_info(&pf
->pdev
->dev
,
5155 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5156 i40e_stat_str(&pf
->hw
, ret
),
5157 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5161 /* Get the VSI level BW configuration per TC */
5162 ret
= i40e_aq_query_vsi_ets_sla_config(hw
, vsi
->seid
, &bw_ets_config
,
5165 dev_info(&pf
->pdev
->dev
,
5166 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5167 i40e_stat_str(&pf
->hw
, ret
),
5168 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5172 if (bw_config
.tc_valid_bits
!= bw_ets_config
.tc_valid_bits
) {
5173 dev_info(&pf
->pdev
->dev
,
5174 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5175 bw_config
.tc_valid_bits
,
5176 bw_ets_config
.tc_valid_bits
);
5177 /* Still continuing */
5180 vsi
->bw_limit
= le16_to_cpu(bw_config
.port_bw_limit
);
5181 vsi
->bw_max_quanta
= bw_config
.max_bw
;
5182 tc_bw_max
= le16_to_cpu(bw_ets_config
.tc_bw_max
[0]) |
5183 (le16_to_cpu(bw_ets_config
.tc_bw_max
[1]) << 16);
5184 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5185 vsi
->bw_ets_share_credits
[i
] = bw_ets_config
.share_credits
[i
];
5186 vsi
->bw_ets_limit_credits
[i
] =
5187 le16_to_cpu(bw_ets_config
.credits
[i
]);
5188 /* 3 bits out of 4 for each TC */
5189 vsi
->bw_ets_max_quanta
[i
] = (u8
)((tc_bw_max
>> (i
*4)) & 0x7);
5196 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5197 * @vsi: the VSI being configured
5198 * @enabled_tc: TC bitmap
5199 * @bw_credits: BW shared credits per TC
5201 * Returns 0 on success, negative value on failure
5203 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi
*vsi
, u8 enabled_tc
,
5206 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5210 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
)
5212 if (!vsi
->mqprio_qopt
.qopt
.hw
) {
5213 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, 0);
5215 dev_info(&vsi
->back
->pdev
->dev
,
5216 "Failed to reset tx rate for vsi->seid %u\n",
5220 bw_data
.tc_valid_bits
= enabled_tc
;
5221 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5222 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5224 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, vsi
->seid
, &bw_data
,
5227 dev_info(&vsi
->back
->pdev
->dev
,
5228 "AQ command Config VSI BW allocation per TC failed = %d\n",
5229 vsi
->back
->hw
.aq
.asq_last_status
);
5233 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5234 vsi
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5240 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5241 * @vsi: the VSI being configured
5242 * @enabled_tc: TC map to be enabled
5245 static void i40e_vsi_config_netdev_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5247 struct net_device
*netdev
= vsi
->netdev
;
5248 struct i40e_pf
*pf
= vsi
->back
;
5249 struct i40e_hw
*hw
= &pf
->hw
;
5252 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5258 netdev_reset_tc(netdev
);
5262 /* Set up actual enabled TCs on the VSI */
5263 if (netdev_set_num_tc(netdev
, vsi
->tc_config
.numtc
))
5266 /* set per TC queues for the VSI */
5267 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5268 /* Only set TC queues for enabled tcs
5270 * e.g. For a VSI that has TC0 and TC3 enabled the
5271 * enabled_tc bitmap would be 0x00001001; the driver
5272 * will set the numtc for netdev as 2 that will be
5273 * referenced by the netdev layer as TC 0 and 1.
5275 if (vsi
->tc_config
.enabled_tc
& BIT(i
))
5276 netdev_set_tc_queue(netdev
,
5277 vsi
->tc_config
.tc_info
[i
].netdev_tc
,
5278 vsi
->tc_config
.tc_info
[i
].qcount
,
5279 vsi
->tc_config
.tc_info
[i
].qoffset
);
5282 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5285 /* Assign UP2TC map for the VSI */
5286 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++) {
5287 /* Get the actual TC# for the UP */
5288 u8 ets_tc
= dcbcfg
->etscfg
.prioritytable
[i
];
5289 /* Get the mapped netdev TC# for the UP */
5290 netdev_tc
= vsi
->tc_config
.tc_info
[ets_tc
].netdev_tc
;
5291 netdev_set_prio_tc_map(netdev
, i
, netdev_tc
);
5296 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5297 * @vsi: the VSI being configured
5298 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5300 static void i40e_vsi_update_queue_map(struct i40e_vsi
*vsi
,
5301 struct i40e_vsi_context
*ctxt
)
5303 /* copy just the sections touched not the entire info
5304 * since not all sections are valid as returned by
5307 vsi
->info
.mapping_flags
= ctxt
->info
.mapping_flags
;
5308 memcpy(&vsi
->info
.queue_mapping
,
5309 &ctxt
->info
.queue_mapping
, sizeof(vsi
->info
.queue_mapping
));
5310 memcpy(&vsi
->info
.tc_mapping
, ctxt
->info
.tc_mapping
,
5311 sizeof(vsi
->info
.tc_mapping
));
5315 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5316 * @vsi: VSI to be configured
5317 * @enabled_tc: TC bitmap
5319 * This configures a particular VSI for TCs that are mapped to the
5320 * given TC bitmap. It uses default bandwidth share for TCs across
5321 * VSIs to configure TC for a particular VSI.
5324 * It is expected that the VSI queues have been quisced before calling
5327 static int i40e_vsi_config_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5329 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5330 struct i40e_vsi_context ctxt
;
5334 /* Check if enabled_tc is same as existing or new TCs */
5335 if (vsi
->tc_config
.enabled_tc
== enabled_tc
&&
5336 vsi
->mqprio_qopt
.mode
!= TC_MQPRIO_MODE_CHANNEL
)
5339 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5340 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5341 if (enabled_tc
& BIT(i
))
5345 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
5347 dev_info(&vsi
->back
->pdev
->dev
,
5348 "Failed configuring TC map %d for VSI %d\n",
5349 enabled_tc
, vsi
->seid
);
5353 /* Update Queue Pairs Mapping for currently enabled UPs */
5354 ctxt
.seid
= vsi
->seid
;
5355 ctxt
.pf_num
= vsi
->back
->hw
.pf_id
;
5357 ctxt
.uplink_seid
= vsi
->uplink_seid
;
5358 ctxt
.info
= vsi
->info
;
5359 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
) {
5360 ret
= i40e_vsi_setup_queue_map_mqprio(vsi
, &ctxt
, enabled_tc
);
5364 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
5367 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5370 if (!vsi
->mqprio_qopt
.qopt
.hw
&& vsi
->reconfig_rss
) {
5371 vsi
->rss_size
= min_t(int, vsi
->back
->alloc_rss_size
,
5372 vsi
->num_queue_pairs
);
5373 ret
= i40e_vsi_config_rss(vsi
);
5375 dev_info(&vsi
->back
->pdev
->dev
,
5376 "Failed to reconfig rss for num_queues\n");
5379 vsi
->reconfig_rss
= false;
5381 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
5382 ctxt
.info
.valid_sections
|=
5383 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
5384 ctxt
.info
.queueing_opt_flags
|= I40E_AQ_VSI_QUE_OPT_TCP_ENA
;
5387 /* Update the VSI after updating the VSI queue-mapping
5390 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
5392 dev_info(&vsi
->back
->pdev
->dev
,
5393 "Update vsi tc config failed, err %s aq_err %s\n",
5394 i40e_stat_str(&vsi
->back
->hw
, ret
),
5395 i40e_aq_str(&vsi
->back
->hw
,
5396 vsi
->back
->hw
.aq
.asq_last_status
));
5399 /* update the local VSI info with updated queue map */
5400 i40e_vsi_update_queue_map(vsi
, &ctxt
);
5401 vsi
->info
.valid_sections
= 0;
5403 /* Update current VSI BW information */
5404 ret
= i40e_vsi_get_bw_info(vsi
);
5406 dev_info(&vsi
->back
->pdev
->dev
,
5407 "Failed updating vsi bw info, err %s aq_err %s\n",
5408 i40e_stat_str(&vsi
->back
->hw
, ret
),
5409 i40e_aq_str(&vsi
->back
->hw
,
5410 vsi
->back
->hw
.aq
.asq_last_status
));
5414 /* Update the netdev TC setup */
5415 i40e_vsi_config_netdev_tc(vsi
, enabled_tc
);
5421 * i40e_get_link_speed - Returns link speed for the interface
5422 * @vsi: VSI to be configured
5425 int i40e_get_link_speed(struct i40e_vsi
*vsi
)
5427 struct i40e_pf
*pf
= vsi
->back
;
5429 switch (pf
->hw
.phy
.link_info
.link_speed
) {
5430 case I40E_LINK_SPEED_40GB
:
5432 case I40E_LINK_SPEED_25GB
:
5434 case I40E_LINK_SPEED_20GB
:
5436 case I40E_LINK_SPEED_10GB
:
5438 case I40E_LINK_SPEED_1GB
:
5446 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5447 * @vsi: VSI to be configured
5448 * @seid: seid of the channel/VSI
5449 * @max_tx_rate: max TX rate to be configured as BW limit
5451 * Helper function to set BW limit for a given VSI
5453 int i40e_set_bw_limit(struct i40e_vsi
*vsi
, u16 seid
, u64 max_tx_rate
)
5455 struct i40e_pf
*pf
= vsi
->back
;
5460 speed
= i40e_get_link_speed(vsi
);
5461 if (max_tx_rate
> speed
) {
5462 dev_err(&pf
->pdev
->dev
,
5463 "Invalid max tx rate %llu specified for VSI seid %d.",
5467 if (max_tx_rate
&& max_tx_rate
< 50) {
5468 dev_warn(&pf
->pdev
->dev
,
5469 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5473 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5474 credits
= max_tx_rate
;
5475 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
5476 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, seid
, credits
,
5477 I40E_MAX_BW_INACTIVE_ACCUM
, NULL
);
5479 dev_err(&pf
->pdev
->dev
,
5480 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5481 max_tx_rate
, seid
, i40e_stat_str(&pf
->hw
, ret
),
5482 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5487 * i40e_remove_queue_channels - Remove queue channels for the TCs
5488 * @vsi: VSI to be configured
5490 * Remove queue channels for the TCs
5492 static void i40e_remove_queue_channels(struct i40e_vsi
*vsi
)
5494 enum i40e_admin_queue_err last_aq_status
;
5495 struct i40e_cloud_filter
*cfilter
;
5496 struct i40e_channel
*ch
, *ch_tmp
;
5497 struct i40e_pf
*pf
= vsi
->back
;
5498 struct hlist_node
*node
;
5501 /* Reset rss size that was stored when reconfiguring rss for
5502 * channel VSIs with non-power-of-2 queue count.
5504 vsi
->current_rss_size
= 0;
5506 /* perform cleanup for channels if they exist */
5507 if (list_empty(&vsi
->ch_list
))
5510 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5511 struct i40e_vsi
*p_vsi
;
5513 list_del(&ch
->list
);
5514 p_vsi
= ch
->parent_vsi
;
5515 if (!p_vsi
|| !ch
->initialized
) {
5519 /* Reset queue contexts */
5520 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5521 struct i40e_ring
*tx_ring
, *rx_ring
;
5524 pf_q
= ch
->base_queue
+ i
;
5525 tx_ring
= vsi
->tx_rings
[pf_q
];
5528 rx_ring
= vsi
->rx_rings
[pf_q
];
5532 /* Reset BW configured for this VSI via mqprio */
5533 ret
= i40e_set_bw_limit(vsi
, ch
->seid
, 0);
5535 dev_info(&vsi
->back
->pdev
->dev
,
5536 "Failed to reset tx rate for ch->seid %u\n",
5539 /* delete cloud filters associated with this channel */
5540 hlist_for_each_entry_safe(cfilter
, node
,
5541 &pf
->cloud_filter_list
, cloud_node
) {
5542 if (cfilter
->seid
!= ch
->seid
)
5545 hash_del(&cfilter
->cloud_node
);
5546 if (cfilter
->dst_port
)
5547 ret
= i40e_add_del_cloud_filter_big_buf(vsi
,
5551 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
,
5553 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
5555 dev_info(&pf
->pdev
->dev
,
5556 "Failed to delete cloud filter, err %s aq_err %s\n",
5557 i40e_stat_str(&pf
->hw
, ret
),
5558 i40e_aq_str(&pf
->hw
, last_aq_status
));
5562 /* delete VSI from FW */
5563 ret
= i40e_aq_delete_element(&vsi
->back
->hw
, ch
->seid
,
5566 dev_err(&vsi
->back
->pdev
->dev
,
5567 "unable to remove channel (%d) for parent VSI(%d)\n",
5568 ch
->seid
, p_vsi
->seid
);
5571 INIT_LIST_HEAD(&vsi
->ch_list
);
5575 * i40e_is_any_channel - channel exist or not
5576 * @vsi: ptr to VSI to which channels are associated with
5578 * Returns true or false if channel(s) exist for associated VSI or not
5580 static bool i40e_is_any_channel(struct i40e_vsi
*vsi
)
5582 struct i40e_channel
*ch
, *ch_tmp
;
5584 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5585 if (ch
->initialized
)
5593 * i40e_get_max_queues_for_channel
5594 * @vsi: ptr to VSI to which channels are associated with
5596 * Helper function which returns max value among the queue counts set on the
5597 * channels/TCs created.
5599 static int i40e_get_max_queues_for_channel(struct i40e_vsi
*vsi
)
5601 struct i40e_channel
*ch
, *ch_tmp
;
5604 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5605 if (!ch
->initialized
)
5607 if (ch
->num_queue_pairs
> max
)
5608 max
= ch
->num_queue_pairs
;
5615 * i40e_validate_num_queues - validate num_queues w.r.t channel
5616 * @pf: ptr to PF device
5617 * @num_queues: number of queues
5618 * @vsi: the parent VSI
5619 * @reconfig_rss: indicates should the RSS be reconfigured or not
5621 * This function validates number of queues in the context of new channel
5622 * which is being established and determines if RSS should be reconfigured
5623 * or not for parent VSI.
5625 static int i40e_validate_num_queues(struct i40e_pf
*pf
, int num_queues
,
5626 struct i40e_vsi
*vsi
, bool *reconfig_rss
)
5633 *reconfig_rss
= false;
5634 if (vsi
->current_rss_size
) {
5635 if (num_queues
> vsi
->current_rss_size
) {
5636 dev_dbg(&pf
->pdev
->dev
,
5637 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5638 num_queues
, vsi
->current_rss_size
);
5640 } else if ((num_queues
< vsi
->current_rss_size
) &&
5641 (!is_power_of_2(num_queues
))) {
5642 dev_dbg(&pf
->pdev
->dev
,
5643 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5644 num_queues
, vsi
->current_rss_size
);
5649 if (!is_power_of_2(num_queues
)) {
5650 /* Find the max num_queues configured for channel if channel
5652 * if channel exist, then enforce 'num_queues' to be more than
5653 * max ever queues configured for channel.
5655 max_ch_queues
= i40e_get_max_queues_for_channel(vsi
);
5656 if (num_queues
< max_ch_queues
) {
5657 dev_dbg(&pf
->pdev
->dev
,
5658 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5659 num_queues
, max_ch_queues
);
5662 *reconfig_rss
= true;
5669 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5670 * @vsi: the VSI being setup
5671 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5673 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5675 static int i40e_vsi_reconfig_rss(struct i40e_vsi
*vsi
, u16 rss_size
)
5677 struct i40e_pf
*pf
= vsi
->back
;
5678 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
5679 struct i40e_hw
*hw
= &pf
->hw
;
5687 if (rss_size
> vsi
->rss_size
)
5690 local_rss_size
= min_t(int, vsi
->rss_size
, rss_size
);
5691 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
5695 /* Ignoring user configured lut if there is one */
5696 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, local_rss_size
);
5698 /* Use user configured hash key if there is one, otherwise
5701 if (vsi
->rss_hkey_user
)
5702 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
5704 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
5706 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
5708 dev_info(&pf
->pdev
->dev
,
5709 "Cannot set RSS lut, err %s aq_err %s\n",
5710 i40e_stat_str(hw
, ret
),
5711 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5717 /* Do the update w.r.t. storing rss_size */
5718 if (!vsi
->orig_rss_size
)
5719 vsi
->orig_rss_size
= vsi
->rss_size
;
5720 vsi
->current_rss_size
= local_rss_size
;
5726 * i40e_channel_setup_queue_map - Setup a channel queue map
5727 * @pf: ptr to PF device
5728 * @vsi: the VSI being setup
5729 * @ctxt: VSI context structure
5730 * @ch: ptr to channel structure
5732 * Setup queue map for a specific channel
5734 static void i40e_channel_setup_queue_map(struct i40e_pf
*pf
,
5735 struct i40e_vsi_context
*ctxt
,
5736 struct i40e_channel
*ch
)
5738 u16 qcount
, qmap
, sections
= 0;
5742 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
5743 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
5745 qcount
= min_t(int, ch
->num_queue_pairs
, pf
->num_lan_msix
);
5746 ch
->num_queue_pairs
= qcount
;
5748 /* find the next higher power-of-2 of num queue pairs */
5749 pow
= ilog2(qcount
);
5750 if (!is_power_of_2(qcount
))
5753 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
5754 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
5756 /* Setup queue TC[0].qmap for given VSI context */
5757 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
5759 ctxt
->info
.up_enable_bits
= 0x1; /* TC0 enabled */
5760 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
5761 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(ch
->base_queue
);
5762 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
5766 * i40e_add_channel - add a channel by adding VSI
5767 * @pf: ptr to PF device
5768 * @uplink_seid: underlying HW switching element (VEB) ID
5769 * @ch: ptr to channel structure
5771 * Add a channel (VSI) using add_vsi and queue_map
5773 static int i40e_add_channel(struct i40e_pf
*pf
, u16 uplink_seid
,
5774 struct i40e_channel
*ch
)
5776 struct i40e_hw
*hw
= &pf
->hw
;
5777 struct i40e_vsi_context ctxt
;
5778 u8 enabled_tc
= 0x1; /* TC0 enabled */
5781 if (ch
->type
!= I40E_VSI_VMDQ2
) {
5782 dev_info(&pf
->pdev
->dev
,
5783 "add new vsi failed, ch->type %d\n", ch
->type
);
5787 memset(&ctxt
, 0, sizeof(ctxt
));
5788 ctxt
.pf_num
= hw
->pf_id
;
5790 ctxt
.uplink_seid
= uplink_seid
;
5791 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
5792 if (ch
->type
== I40E_VSI_VMDQ2
)
5793 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
5795 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) {
5796 ctxt
.info
.valid_sections
|=
5797 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
5798 ctxt
.info
.switch_id
=
5799 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
5802 /* Set queue map for a given VSI context */
5803 i40e_channel_setup_queue_map(pf
, &ctxt
, ch
);
5805 /* Now time to create VSI */
5806 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
5808 dev_info(&pf
->pdev
->dev
,
5809 "add new vsi failed, err %s aq_err %s\n",
5810 i40e_stat_str(&pf
->hw
, ret
),
5811 i40e_aq_str(&pf
->hw
,
5812 pf
->hw
.aq
.asq_last_status
));
5816 /* Success, update channel */
5817 ch
->enabled_tc
= enabled_tc
;
5818 ch
->seid
= ctxt
.seid
;
5819 ch
->vsi_number
= ctxt
.vsi_number
;
5820 ch
->stat_counter_idx
= cpu_to_le16(ctxt
.info
.stat_counter_idx
);
5822 /* copy just the sections touched not the entire info
5823 * since not all sections are valid as returned by
5826 ch
->info
.mapping_flags
= ctxt
.info
.mapping_flags
;
5827 memcpy(&ch
->info
.queue_mapping
,
5828 &ctxt
.info
.queue_mapping
, sizeof(ctxt
.info
.queue_mapping
));
5829 memcpy(&ch
->info
.tc_mapping
, ctxt
.info
.tc_mapping
,
5830 sizeof(ctxt
.info
.tc_mapping
));
5835 static int i40e_channel_config_bw(struct i40e_vsi
*vsi
, struct i40e_channel
*ch
,
5838 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5842 bw_data
.tc_valid_bits
= ch
->enabled_tc
;
5843 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5844 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5846 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, ch
->seid
,
5849 dev_info(&vsi
->back
->pdev
->dev
,
5850 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5851 vsi
->back
->hw
.aq
.asq_last_status
, ch
->seid
);
5855 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5856 ch
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5862 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5863 * @pf: ptr to PF device
5864 * @vsi: the VSI being setup
5865 * @ch: ptr to channel structure
5867 * Configure TX rings associated with channel (VSI) since queues are being
5870 static int i40e_channel_config_tx_ring(struct i40e_pf
*pf
,
5871 struct i40e_vsi
*vsi
,
5872 struct i40e_channel
*ch
)
5876 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5878 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5879 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5880 if (ch
->enabled_tc
& BIT(i
))
5884 /* configure BW for new VSI */
5885 ret
= i40e_channel_config_bw(vsi
, ch
, bw_share
);
5887 dev_info(&vsi
->back
->pdev
->dev
,
5888 "Failed configuring TC map %d for channel (seid %u)\n",
5889 ch
->enabled_tc
, ch
->seid
);
5893 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5894 struct i40e_ring
*tx_ring
, *rx_ring
;
5897 pf_q
= ch
->base_queue
+ i
;
5899 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5902 tx_ring
= vsi
->tx_rings
[pf_q
];
5905 /* Get the RX ring ptr */
5906 rx_ring
= vsi
->rx_rings
[pf_q
];
5914 * i40e_setup_hw_channel - setup new channel
5915 * @pf: ptr to PF device
5916 * @vsi: the VSI being setup
5917 * @ch: ptr to channel structure
5918 * @uplink_seid: underlying HW switching element (VEB) ID
5919 * @type: type of channel to be created (VMDq2/VF)
5921 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5922 * and configures TX rings accordingly
5924 static inline int i40e_setup_hw_channel(struct i40e_pf
*pf
,
5925 struct i40e_vsi
*vsi
,
5926 struct i40e_channel
*ch
,
5927 u16 uplink_seid
, u8 type
)
5931 ch
->initialized
= false;
5932 ch
->base_queue
= vsi
->next_base_queue
;
5935 /* Proceed with creation of channel (VMDq2) VSI */
5936 ret
= i40e_add_channel(pf
, uplink_seid
, ch
);
5938 dev_info(&pf
->pdev
->dev
,
5939 "failed to add_channel using uplink_seid %u\n",
5944 /* Mark the successful creation of channel */
5945 ch
->initialized
= true;
5947 /* Reconfigure TX queues using QTX_CTL register */
5948 ret
= i40e_channel_config_tx_ring(pf
, vsi
, ch
);
5950 dev_info(&pf
->pdev
->dev
,
5951 "failed to configure TX rings for channel %u\n",
5956 /* update 'next_base_queue' */
5957 vsi
->next_base_queue
= vsi
->next_base_queue
+ ch
->num_queue_pairs
;
5958 dev_dbg(&pf
->pdev
->dev
,
5959 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5960 ch
->seid
, ch
->vsi_number
, ch
->stat_counter_idx
,
5961 ch
->num_queue_pairs
,
5962 vsi
->next_base_queue
);
5967 * i40e_setup_channel - setup new channel using uplink element
5968 * @pf: ptr to PF device
5969 * @type: type of channel to be created (VMDq2/VF)
5970 * @uplink_seid: underlying HW switching element (VEB) ID
5971 * @ch: ptr to channel structure
5973 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5974 * and uplink switching element (uplink_seid)
5976 static bool i40e_setup_channel(struct i40e_pf
*pf
, struct i40e_vsi
*vsi
,
5977 struct i40e_channel
*ch
)
5983 if (vsi
->type
== I40E_VSI_MAIN
) {
5984 vsi_type
= I40E_VSI_VMDQ2
;
5986 dev_err(&pf
->pdev
->dev
, "unsupported parent vsi type(%d)\n",
5991 /* underlying switching element */
5992 seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
5994 /* create channel (VSI), configure TX rings */
5995 ret
= i40e_setup_hw_channel(pf
, vsi
, ch
, seid
, vsi_type
);
5997 dev_err(&pf
->pdev
->dev
, "failed to setup hw_channel\n");
6001 return ch
->initialized
? true : false;
6005 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6006 * @vsi: ptr to VSI which has PF backing
6008 * Sets up switch mode correctly if it needs to be changed and perform
6009 * what are allowed modes.
6011 static int i40e_validate_and_set_switch_mode(struct i40e_vsi
*vsi
)
6014 struct i40e_pf
*pf
= vsi
->back
;
6015 struct i40e_hw
*hw
= &pf
->hw
;
6018 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_dev_capabilities
);
6022 if (hw
->dev_caps
.switch_mode
) {
6023 /* if switch mode is set, support mode2 (non-tunneled for
6024 * cloud filter) for now
6026 u32 switch_mode
= hw
->dev_caps
.switch_mode
&
6027 I40E_SWITCH_MODE_MASK
;
6028 if (switch_mode
>= I40E_CLOUD_FILTER_MODE1
) {
6029 if (switch_mode
== I40E_CLOUD_FILTER_MODE2
)
6031 dev_err(&pf
->pdev
->dev
,
6032 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6033 hw
->dev_caps
.switch_mode
);
6038 /* Set Bit 7 to be valid */
6039 mode
= I40E_AQ_SET_SWITCH_BIT7_VALID
;
6041 /* Set L4type to both TCP and UDP support */
6042 mode
|= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH
;
6044 /* Set cloud filter mode */
6045 mode
|= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL
;
6047 /* Prep mode field for set_switch_config */
6048 ret
= i40e_aq_set_switch_config(hw
, pf
->last_sw_conf_flags
,
6049 pf
->last_sw_conf_valid_flags
,
6051 if (ret
&& hw
->aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
)
6052 dev_err(&pf
->pdev
->dev
,
6053 "couldn't set switch config bits, err %s aq_err %s\n",
6054 i40e_stat_str(hw
, ret
),
6056 hw
->aq
.asq_last_status
));
6062 * i40e_create_queue_channel - function to create channel
6063 * @vsi: VSI to be configured
6064 * @ch: ptr to channel (it contains channel specific params)
6066 * This function creates channel (VSI) using num_queues specified by user,
6067 * reconfigs RSS if needed.
6069 int i40e_create_queue_channel(struct i40e_vsi
*vsi
,
6070 struct i40e_channel
*ch
)
6072 struct i40e_pf
*pf
= vsi
->back
;
6079 if (!ch
->num_queue_pairs
) {
6080 dev_err(&pf
->pdev
->dev
, "Invalid num_queues requested: %d\n",
6081 ch
->num_queue_pairs
);
6085 /* validate user requested num_queues for channel */
6086 err
= i40e_validate_num_queues(pf
, ch
->num_queue_pairs
, vsi
,
6089 dev_info(&pf
->pdev
->dev
, "Failed to validate num_queues (%d)\n",
6090 ch
->num_queue_pairs
);
6094 /* By default we are in VEPA mode, if this is the first VF/VMDq
6095 * VSI to be added switch to VEB mode.
6097 if ((!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) ||
6098 (!i40e_is_any_channel(vsi
))) {
6099 if (!is_power_of_2(vsi
->tc_config
.tc_info
[0].qcount
)) {
6100 dev_dbg(&pf
->pdev
->dev
,
6101 "Failed to create channel. Override queues (%u) not power of 2\n",
6102 vsi
->tc_config
.tc_info
[0].qcount
);
6106 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
6107 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
6109 if (vsi
->type
== I40E_VSI_MAIN
) {
6110 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
6111 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
,
6114 i40e_do_reset_safe(pf
,
6115 I40E_PF_RESET_FLAG
);
6118 /* now onwards for main VSI, number of queues will be value
6119 * of TC0's queue count
6123 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6124 * it should be more than num_queues
6126 if (!vsi
->cnt_q_avail
|| vsi
->cnt_q_avail
< ch
->num_queue_pairs
) {
6127 dev_dbg(&pf
->pdev
->dev
,
6128 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6129 vsi
->cnt_q_avail
, ch
->num_queue_pairs
);
6133 /* reconfig_rss only if vsi type is MAIN_VSI */
6134 if (reconfig_rss
&& (vsi
->type
== I40E_VSI_MAIN
)) {
6135 err
= i40e_vsi_reconfig_rss(vsi
, ch
->num_queue_pairs
);
6137 dev_info(&pf
->pdev
->dev
,
6138 "Error: unable to reconfig rss for num_queues (%u)\n",
6139 ch
->num_queue_pairs
);
6144 if (!i40e_setup_channel(pf
, vsi
, ch
)) {
6145 dev_info(&pf
->pdev
->dev
, "Failed to setup channel\n");
6149 dev_info(&pf
->pdev
->dev
,
6150 "Setup channel (id:%u) utilizing num_queues %d\n",
6151 ch
->seid
, ch
->num_queue_pairs
);
6153 /* configure VSI for BW limit */
6154 if (ch
->max_tx_rate
) {
6155 u64 credits
= ch
->max_tx_rate
;
6157 if (i40e_set_bw_limit(vsi
, ch
->seid
, ch
->max_tx_rate
))
6160 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6161 dev_dbg(&pf
->pdev
->dev
,
6162 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6168 /* in case of VF, this will be main SRIOV VSI */
6169 ch
->parent_vsi
= vsi
;
6171 /* and update main_vsi's count for queue_available to use */
6172 vsi
->cnt_q_avail
-= ch
->num_queue_pairs
;
6178 * i40e_configure_queue_channels - Add queue channel for the given TCs
6179 * @vsi: VSI to be configured
6181 * Configures queue channel mapping to the given TCs
6183 static int i40e_configure_queue_channels(struct i40e_vsi
*vsi
)
6185 struct i40e_channel
*ch
;
6189 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6190 vsi
->tc_seid_map
[0] = vsi
->seid
;
6191 for (i
= 1; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6192 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
6193 ch
= kzalloc(sizeof(*ch
), GFP_KERNEL
);
6199 INIT_LIST_HEAD(&ch
->list
);
6200 ch
->num_queue_pairs
=
6201 vsi
->tc_config
.tc_info
[i
].qcount
;
6203 vsi
->tc_config
.tc_info
[i
].qoffset
;
6205 /* Bandwidth limit through tc interface is in bytes/s,
6208 max_rate
= vsi
->mqprio_qopt
.max_rate
[i
];
6209 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6210 ch
->max_tx_rate
= max_rate
;
6212 list_add_tail(&ch
->list
, &vsi
->ch_list
);
6214 ret
= i40e_create_queue_channel(vsi
, ch
);
6216 dev_err(&vsi
->back
->pdev
->dev
,
6217 "Failed creating queue channel with TC%d: queues %d\n",
6218 i
, ch
->num_queue_pairs
);
6221 vsi
->tc_seid_map
[i
] = ch
->seid
;
6227 i40e_remove_queue_channels(vsi
);
6232 * i40e_veb_config_tc - Configure TCs for given VEB
6234 * @enabled_tc: TC bitmap
6236 * Configures given TC bitmap for VEB (switching) element
6238 int i40e_veb_config_tc(struct i40e_veb
*veb
, u8 enabled_tc
)
6240 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data
= {0};
6241 struct i40e_pf
*pf
= veb
->pf
;
6245 /* No TCs or already enabled TCs just return */
6246 if (!enabled_tc
|| veb
->enabled_tc
== enabled_tc
)
6249 bw_data
.tc_valid_bits
= enabled_tc
;
6250 /* bw_data.absolute_credits is not set (relative) */
6252 /* Enable ETS TCs with equal BW Share for now */
6253 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6254 if (enabled_tc
& BIT(i
))
6255 bw_data
.tc_bw_share_credits
[i
] = 1;
6258 ret
= i40e_aq_config_switch_comp_bw_config(&pf
->hw
, veb
->seid
,
6261 dev_info(&pf
->pdev
->dev
,
6262 "VEB bw config failed, err %s aq_err %s\n",
6263 i40e_stat_str(&pf
->hw
, ret
),
6264 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6268 /* Update the BW information */
6269 ret
= i40e_veb_get_bw_info(veb
);
6271 dev_info(&pf
->pdev
->dev
,
6272 "Failed getting veb bw config, err %s aq_err %s\n",
6273 i40e_stat_str(&pf
->hw
, ret
),
6274 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6281 #ifdef CONFIG_I40E_DCB
6283 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6286 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6287 * the caller would've quiesce all the VSIs before calling
6290 static void i40e_dcb_reconfigure(struct i40e_pf
*pf
)
6296 /* Enable the TCs available on PF to all VEBs */
6297 tc_map
= i40e_pf_get_tc_map(pf
);
6298 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
6301 ret
= i40e_veb_config_tc(pf
->veb
[v
], tc_map
);
6303 dev_info(&pf
->pdev
->dev
,
6304 "Failed configuring TC for VEB seid=%d\n",
6306 /* Will try to configure as many components */
6310 /* Update each VSI */
6311 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
6315 /* - Enable all TCs for the LAN VSI
6316 * - For all others keep them at TC0 for now
6318 if (v
== pf
->lan_vsi
)
6319 tc_map
= i40e_pf_get_tc_map(pf
);
6321 tc_map
= I40E_DEFAULT_TRAFFIC_CLASS
;
6323 ret
= i40e_vsi_config_tc(pf
->vsi
[v
], tc_map
);
6325 dev_info(&pf
->pdev
->dev
,
6326 "Failed configuring TC for VSI seid=%d\n",
6328 /* Will try to configure as many components */
6330 /* Re-configure VSI vectors based on updated TC map */
6331 i40e_vsi_map_rings_to_vectors(pf
->vsi
[v
]);
6332 if (pf
->vsi
[v
]->netdev
)
6333 i40e_dcbnl_set_all(pf
->vsi
[v
]);
6339 * i40e_resume_port_tx - Resume port Tx
6342 * Resume a port's Tx and issue a PF reset in case of failure to
6345 static int i40e_resume_port_tx(struct i40e_pf
*pf
)
6347 struct i40e_hw
*hw
= &pf
->hw
;
6350 ret
= i40e_aq_resume_port_tx(hw
, NULL
);
6352 dev_info(&pf
->pdev
->dev
,
6353 "Resume Port Tx failed, err %s aq_err %s\n",
6354 i40e_stat_str(&pf
->hw
, ret
),
6355 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6356 /* Schedule PF reset to recover */
6357 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
6358 i40e_service_event_schedule(pf
);
6365 * i40e_init_pf_dcb - Initialize DCB configuration
6366 * @pf: PF being configured
6368 * Query the current DCB configuration and cache it
6369 * in the hardware structure
6371 static int i40e_init_pf_dcb(struct i40e_pf
*pf
)
6373 struct i40e_hw
*hw
= &pf
->hw
;
6376 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
6377 if (pf
->hw_features
& I40E_HW_NO_DCB_SUPPORT
)
6380 /* Get the initial DCB configuration */
6381 err
= i40e_init_dcb(hw
);
6383 /* Device/Function is not DCBX capable */
6384 if ((!hw
->func_caps
.dcb
) ||
6385 (hw
->dcbx_status
== I40E_DCBX_STATUS_DISABLED
)) {
6386 dev_info(&pf
->pdev
->dev
,
6387 "DCBX offload is not supported or is disabled for this PF.\n");
6389 /* When status is not DISABLED then DCBX in FW */
6390 pf
->dcbx_cap
= DCB_CAP_DCBX_LLD_MANAGED
|
6391 DCB_CAP_DCBX_VER_IEEE
;
6393 pf
->flags
|= I40E_FLAG_DCB_CAPABLE
;
6394 /* Enable DCB tagging only when more than one TC
6395 * or explicitly disable if only one TC
6397 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
6398 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
6400 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6401 dev_dbg(&pf
->pdev
->dev
,
6402 "DCBX offload is supported for this PF.\n");
6405 dev_info(&pf
->pdev
->dev
,
6406 "Query for DCB configuration failed, err %s aq_err %s\n",
6407 i40e_stat_str(&pf
->hw
, err
),
6408 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6414 #endif /* CONFIG_I40E_DCB */
6415 #define SPEED_SIZE 14
6418 * i40e_print_link_message - print link up or down
6419 * @vsi: the VSI for which link needs a message
6421 void i40e_print_link_message(struct i40e_vsi
*vsi
, bool isup
)
6423 enum i40e_aq_link_speed new_speed
;
6424 struct i40e_pf
*pf
= vsi
->back
;
6425 char *speed
= "Unknown";
6426 char *fc
= "Unknown";
6431 new_speed
= pf
->hw
.phy
.link_info
.link_speed
;
6433 if ((vsi
->current_isup
== isup
) && (vsi
->current_speed
== new_speed
))
6435 vsi
->current_isup
= isup
;
6436 vsi
->current_speed
= new_speed
;
6438 netdev_info(vsi
->netdev
, "NIC Link is Down\n");
6442 /* Warn user if link speed on NPAR enabled partition is not at
6445 if (pf
->hw
.func_caps
.npar_enable
&&
6446 (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_1GB
||
6447 pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_100MB
))
6448 netdev_warn(vsi
->netdev
,
6449 "The partition detected link speed that is less than 10Gbps\n");
6451 switch (pf
->hw
.phy
.link_info
.link_speed
) {
6452 case I40E_LINK_SPEED_40GB
:
6455 case I40E_LINK_SPEED_20GB
:
6458 case I40E_LINK_SPEED_25GB
:
6461 case I40E_LINK_SPEED_10GB
:
6464 case I40E_LINK_SPEED_1GB
:
6467 case I40E_LINK_SPEED_100MB
:
6474 switch (pf
->hw
.fc
.current_mode
) {
6478 case I40E_FC_TX_PAUSE
:
6481 case I40E_FC_RX_PAUSE
:
6489 if (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_25GB
) {
6490 req_fec
= ", Requested FEC: None";
6491 fec
= ", FEC: None";
6492 an
= ", Autoneg: False";
6494 if (pf
->hw
.phy
.link_info
.an_info
& I40E_AQ_AN_COMPLETED
)
6495 an
= ", Autoneg: True";
6497 if (pf
->hw
.phy
.link_info
.fec_info
&
6498 I40E_AQ_CONFIG_FEC_KR_ENA
)
6499 fec
= ", FEC: CL74 FC-FEC/BASE-R";
6500 else if (pf
->hw
.phy
.link_info
.fec_info
&
6501 I40E_AQ_CONFIG_FEC_RS_ENA
)
6502 fec
= ", FEC: CL108 RS-FEC";
6504 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6505 * both RS and FC are requested
6507 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6508 (I40E_AQ_REQUEST_FEC_KR
| I40E_AQ_REQUEST_FEC_RS
)) {
6509 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6510 I40E_AQ_REQUEST_FEC_RS
)
6511 req_fec
= ", Requested FEC: CL108 RS-FEC";
6513 req_fec
= ", Requested FEC: CL74 FC-FEC/BASE-R";
6517 netdev_info(vsi
->netdev
, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6518 speed
, req_fec
, fec
, an
, fc
);
6522 * i40e_up_complete - Finish the last steps of bringing up a connection
6523 * @vsi: the VSI being configured
6525 static int i40e_up_complete(struct i40e_vsi
*vsi
)
6527 struct i40e_pf
*pf
= vsi
->back
;
6530 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
6531 i40e_vsi_configure_msix(vsi
);
6533 i40e_configure_msi_and_legacy(vsi
);
6536 err
= i40e_vsi_start_rings(vsi
);
6540 clear_bit(__I40E_VSI_DOWN
, vsi
->state
);
6541 i40e_napi_enable_all(vsi
);
6542 i40e_vsi_enable_irq(vsi
);
6544 if ((pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
) &&
6546 i40e_print_link_message(vsi
, true);
6547 netif_tx_start_all_queues(vsi
->netdev
);
6548 netif_carrier_on(vsi
->netdev
);
6551 /* replay FDIR SB filters */
6552 if (vsi
->type
== I40E_VSI_FDIR
) {
6553 /* reset fd counters */
6556 i40e_fdir_filter_restore(vsi
);
6559 /* On the next run of the service_task, notify any clients of the new
6562 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
6563 i40e_service_event_schedule(pf
);
6569 * i40e_vsi_reinit_locked - Reset the VSI
6570 * @vsi: the VSI being configured
6572 * Rebuild the ring structs after some configuration
6573 * has changed, e.g. MTU size.
6575 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
)
6577 struct i40e_pf
*pf
= vsi
->back
;
6579 WARN_ON(in_interrupt());
6580 while (test_and_set_bit(__I40E_CONFIG_BUSY
, pf
->state
))
6581 usleep_range(1000, 2000);
6585 clear_bit(__I40E_CONFIG_BUSY
, pf
->state
);
6589 * i40e_up - Bring the connection back up after being down
6590 * @vsi: the VSI being configured
6592 int i40e_up(struct i40e_vsi
*vsi
)
6596 err
= i40e_vsi_configure(vsi
);
6598 err
= i40e_up_complete(vsi
);
6604 * i40e_down - Shutdown the connection processing
6605 * @vsi: the VSI being stopped
6607 void i40e_down(struct i40e_vsi
*vsi
)
6611 /* It is assumed that the caller of this function
6612 * sets the vsi->state __I40E_VSI_DOWN bit.
6615 netif_carrier_off(vsi
->netdev
);
6616 netif_tx_disable(vsi
->netdev
);
6618 i40e_vsi_disable_irq(vsi
);
6619 i40e_vsi_stop_rings(vsi
);
6620 i40e_napi_disable_all(vsi
);
6622 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
6623 i40e_clean_tx_ring(vsi
->tx_rings
[i
]);
6624 if (i40e_enabled_xdp_vsi(vsi
))
6625 i40e_clean_tx_ring(vsi
->xdp_rings
[i
]);
6626 i40e_clean_rx_ring(vsi
->rx_rings
[i
]);
6632 * i40e_validate_mqprio_qopt- validate queue mapping info
6633 * @vsi: the VSI being configured
6634 * @mqprio_qopt: queue parametrs
6636 static int i40e_validate_mqprio_qopt(struct i40e_vsi
*vsi
,
6637 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
6639 u64 sum_max_rate
= 0;
6643 if (mqprio_qopt
->qopt
.offset
[0] != 0 ||
6644 mqprio_qopt
->qopt
.num_tc
< 1 ||
6645 mqprio_qopt
->qopt
.num_tc
> I40E_MAX_TRAFFIC_CLASS
)
6647 for (i
= 0; ; i
++) {
6648 if (!mqprio_qopt
->qopt
.count
[i
])
6650 if (mqprio_qopt
->min_rate
[i
]) {
6651 dev_err(&vsi
->back
->pdev
->dev
,
6652 "Invalid min tx rate (greater than 0) specified\n");
6655 max_rate
= mqprio_qopt
->max_rate
[i
];
6656 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6657 sum_max_rate
+= max_rate
;
6659 if (i
>= mqprio_qopt
->qopt
.num_tc
- 1)
6661 if (mqprio_qopt
->qopt
.offset
[i
+ 1] !=
6662 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
]))
6665 if (vsi
->num_queue_pairs
<
6666 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
])) {
6669 if (sum_max_rate
> i40e_get_link_speed(vsi
)) {
6670 dev_err(&vsi
->back
->pdev
->dev
,
6671 "Invalid max tx rate specified\n");
6678 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6679 * @vsi: the VSI being configured
6681 static void i40e_vsi_set_default_tc_config(struct i40e_vsi
*vsi
)
6686 /* Only TC0 is enabled */
6687 vsi
->tc_config
.numtc
= 1;
6688 vsi
->tc_config
.enabled_tc
= 1;
6689 qcount
= min_t(int, vsi
->alloc_queue_pairs
,
6690 i40e_pf_get_max_q_per_tc(vsi
->back
));
6691 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6692 /* For the TC that is not enabled set the offset to to default
6693 * queue and allocate one queue for the given TC.
6695 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
6697 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
6699 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
6700 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
6705 * i40e_setup_tc - configure multiple traffic classes
6706 * @netdev: net device to configure
6707 * @type_data: tc offload data
6709 static int i40e_setup_tc(struct net_device
*netdev
, void *type_data
)
6711 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
6712 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
6713 struct i40e_vsi
*vsi
= np
->vsi
;
6714 struct i40e_pf
*pf
= vsi
->back
;
6715 u8 enabled_tc
= 0, num_tc
, hw
;
6716 bool need_reset
= false;
6721 num_tc
= mqprio_qopt
->qopt
.num_tc
;
6722 hw
= mqprio_qopt
->qopt
.hw
;
6723 mode
= mqprio_qopt
->mode
;
6725 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6726 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
, sizeof(*mqprio_qopt
));
6730 /* Check if MFP enabled */
6731 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
6733 "Configuring TC not supported in MFP mode\n");
6737 case TC_MQPRIO_MODE_DCB
:
6738 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6740 /* Check if DCB enabled to continue */
6741 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
)) {
6743 "DCB is not enabled for adapter\n");
6747 /* Check whether tc count is within enabled limit */
6748 if (num_tc
> i40e_pf_get_num_tc(pf
)) {
6750 "TC count greater than enabled on link for adapter\n");
6754 case TC_MQPRIO_MODE_CHANNEL
:
6755 if (pf
->flags
& I40E_FLAG_DCB_ENABLED
) {
6757 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6760 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
6762 ret
= i40e_validate_mqprio_qopt(vsi
, mqprio_qopt
);
6765 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
,
6766 sizeof(*mqprio_qopt
));
6767 pf
->flags
|= I40E_FLAG_TC_MQPRIO
;
6768 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6775 /* Generate TC map for number of tc requested */
6776 for (i
= 0; i
< num_tc
; i
++)
6777 enabled_tc
|= BIT(i
);
6779 /* Requesting same TC configuration as already enabled */
6780 if (enabled_tc
== vsi
->tc_config
.enabled_tc
&&
6781 mode
!= TC_MQPRIO_MODE_CHANNEL
)
6784 /* Quiesce VSI queues */
6785 i40e_quiesce_vsi(vsi
);
6787 if (!hw
&& !(pf
->flags
& I40E_FLAG_TC_MQPRIO
))
6788 i40e_remove_queue_channels(vsi
);
6790 /* Configure VSI for enabled TCs */
6791 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
6793 netdev_info(netdev
, "Failed configuring TC for VSI seid=%d\n",
6799 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
) {
6800 if (vsi
->mqprio_qopt
.max_rate
[0]) {
6801 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
6803 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
6804 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
6806 u64 credits
= max_tx_rate
;
6808 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6809 dev_dbg(&vsi
->back
->pdev
->dev
,
6810 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6819 ret
= i40e_configure_queue_channels(vsi
);
6822 "Failed configuring queue channels\n");
6829 /* Reset the configuration data to defaults, only TC0 is enabled */
6831 i40e_vsi_set_default_tc_config(vsi
);
6836 i40e_unquiesce_vsi(vsi
);
6841 * i40e_set_cld_element - sets cloud filter element data
6842 * @filter: cloud filter rule
6843 * @cld: ptr to cloud filter element data
6845 * This is helper function to copy data into cloud filter element
6848 i40e_set_cld_element(struct i40e_cloud_filter
*filter
,
6849 struct i40e_aqc_cloud_filters_element_data
*cld
)
6854 memset(cld
, 0, sizeof(*cld
));
6855 ether_addr_copy(cld
->outer_mac
, filter
->dst_mac
);
6856 ether_addr_copy(cld
->inner_mac
, filter
->src_mac
);
6858 if (filter
->n_proto
!= ETH_P_IP
&& filter
->n_proto
!= ETH_P_IPV6
)
6861 if (filter
->n_proto
== ETH_P_IPV6
) {
6862 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6863 for (i
= 0, j
= 0; i
< ARRAY_SIZE(filter
->dst_ipv6
);
6865 ipa
= be32_to_cpu(filter
->dst_ipv6
[IPV6_MAX_INDEX
- i
]);
6866 ipa
= cpu_to_le32(ipa
);
6867 memcpy(&cld
->ipaddr
.raw_v6
.data
[j
], &ipa
, sizeof(ipa
));
6870 ipa
= be32_to_cpu(filter
->dst_ipv4
);
6871 memcpy(&cld
->ipaddr
.v4
.data
, &ipa
, sizeof(ipa
));
6874 cld
->inner_vlan
= cpu_to_le16(ntohs(filter
->vlan_id
));
6876 /* tenant_id is not supported by FW now, once the support is enabled
6877 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6879 if (filter
->tenant_id
)
6884 * i40e_add_del_cloud_filter - Add/del cloud filter
6885 * @vsi: pointer to VSI
6886 * @filter: cloud filter rule
6887 * @add: if true, add, if false, delete
6889 * Add or delete a cloud filter for a specific flow spec.
6890 * Returns 0 if the filter were successfully added.
6892 static int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
6893 struct i40e_cloud_filter
*filter
, bool add
)
6895 struct i40e_aqc_cloud_filters_element_data cld_filter
;
6896 struct i40e_pf
*pf
= vsi
->back
;
6898 static const u16 flag_table
[128] = {
6899 [I40E_CLOUD_FILTER_FLAGS_OMAC
] =
6900 I40E_AQC_ADD_CLOUD_FILTER_OMAC
,
6901 [I40E_CLOUD_FILTER_FLAGS_IMAC
] =
6902 I40E_AQC_ADD_CLOUD_FILTER_IMAC
,
6903 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN
] =
6904 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN
,
6905 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID
] =
6906 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID
,
6907 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC
] =
6908 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC
,
6909 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID
] =
6910 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID
,
6911 [I40E_CLOUD_FILTER_FLAGS_IIP
] =
6912 I40E_AQC_ADD_CLOUD_FILTER_IIP
,
6915 if (filter
->flags
>= ARRAY_SIZE(flag_table
))
6916 return I40E_ERR_CONFIG
;
6918 /* copy element needed to add cloud filter from filter */
6919 i40e_set_cld_element(filter
, &cld_filter
);
6921 if (filter
->tunnel_type
!= I40E_CLOUD_TNL_TYPE_NONE
)
6922 cld_filter
.flags
= cpu_to_le16(filter
->tunnel_type
<<
6923 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT
);
6925 if (filter
->n_proto
== ETH_P_IPV6
)
6926 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
6927 I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
6929 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
6930 I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
6933 ret
= i40e_aq_add_cloud_filters(&pf
->hw
, filter
->seid
,
6936 ret
= i40e_aq_rem_cloud_filters(&pf
->hw
, filter
->seid
,
6939 dev_dbg(&pf
->pdev
->dev
,
6940 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
6941 add
? "add" : "delete", filter
->dst_port
, ret
,
6942 pf
->hw
.aq
.asq_last_status
);
6944 dev_info(&pf
->pdev
->dev
,
6945 "%s cloud filter for VSI: %d\n",
6946 add
? "Added" : "Deleted", filter
->seid
);
6951 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
6952 * @vsi: pointer to VSI
6953 * @filter: cloud filter rule
6954 * @add: if true, add, if false, delete
6956 * Add or delete a cloud filter for a specific flow spec using big buffer.
6957 * Returns 0 if the filter were successfully added.
6959 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
6960 struct i40e_cloud_filter
*filter
,
6963 struct i40e_aqc_cloud_filters_element_bb cld_filter
;
6964 struct i40e_pf
*pf
= vsi
->back
;
6967 /* Both (src/dst) valid mac_addr are not supported */
6968 if ((is_valid_ether_addr(filter
->dst_mac
) &&
6969 is_valid_ether_addr(filter
->src_mac
)) ||
6970 (is_multicast_ether_addr(filter
->dst_mac
) &&
6971 is_multicast_ether_addr(filter
->src_mac
)))
6974 /* Make sure port is specified, otherwise bail out, for channel
6975 * specific cloud filter needs 'L4 port' to be non-zero
6977 if (!filter
->dst_port
)
6980 /* adding filter using src_port/src_ip is not supported at this stage */
6981 if (filter
->src_port
|| filter
->src_ipv4
||
6982 !ipv6_addr_any(&filter
->ip
.v6
.src_ip6
))
6985 /* copy element needed to add cloud filter from filter */
6986 i40e_set_cld_element(filter
, &cld_filter
.element
);
6988 if (is_valid_ether_addr(filter
->dst_mac
) ||
6989 is_valid_ether_addr(filter
->src_mac
) ||
6990 is_multicast_ether_addr(filter
->dst_mac
) ||
6991 is_multicast_ether_addr(filter
->src_mac
)) {
6992 /* MAC + IP : unsupported mode */
6993 if (filter
->dst_ipv4
)
6996 /* since we validated that L4 port must be valid before
6997 * we get here, start with respective "flags" value
6998 * and update if vlan is present or not
7000 cld_filter
.element
.flags
=
7001 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT
);
7003 if (filter
->vlan_id
) {
7004 cld_filter
.element
.flags
=
7005 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT
);
7008 } else if (filter
->dst_ipv4
||
7009 !ipv6_addr_any(&filter
->ip
.v6
.dst_ip6
)) {
7010 cld_filter
.element
.flags
=
7011 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT
);
7012 if (filter
->n_proto
== ETH_P_IPV6
)
7013 cld_filter
.element
.flags
|=
7014 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
7016 cld_filter
.element
.flags
|=
7017 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
7019 dev_err(&pf
->pdev
->dev
,
7020 "either mac or ip has to be valid for cloud filter\n");
7024 /* Now copy L4 port in Byte 6..7 in general fields */
7025 cld_filter
.general_fields
[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0
] =
7026 be16_to_cpu(filter
->dst_port
);
7029 /* Validate current device switch mode, change if necessary */
7030 ret
= i40e_validate_and_set_switch_mode(vsi
);
7032 dev_err(&pf
->pdev
->dev
,
7033 "failed to set switch mode, ret %d\n",
7038 ret
= i40e_aq_add_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7041 ret
= i40e_aq_rem_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7046 dev_dbg(&pf
->pdev
->dev
,
7047 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7048 add
? "add" : "delete", ret
, pf
->hw
.aq
.asq_last_status
);
7050 dev_info(&pf
->pdev
->dev
,
7051 "%s cloud filter for VSI: %d, L4 port: %d\n",
7052 add
? "add" : "delete", filter
->seid
,
7053 ntohs(filter
->dst_port
));
7058 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7059 * @vsi: Pointer to VSI
7060 * @cls_flower: Pointer to struct tc_cls_flower_offload
7061 * @filter: Pointer to cloud filter structure
7064 static int i40e_parse_cls_flower(struct i40e_vsi
*vsi
,
7065 struct tc_cls_flower_offload
*f
,
7066 struct i40e_cloud_filter
*filter
)
7068 u16 n_proto_mask
= 0, n_proto_key
= 0, addr_type
= 0;
7069 struct i40e_pf
*pf
= vsi
->back
;
7072 if (f
->dissector
->used_keys
&
7073 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
7074 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
7075 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7076 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
7077 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7078 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7079 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
7080 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
))) {
7081 dev_err(&pf
->pdev
->dev
, "Unsupported key used: 0x%x\n",
7082 f
->dissector
->used_keys
);
7086 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
7087 struct flow_dissector_key_keyid
*key
=
7088 skb_flow_dissector_target(f
->dissector
,
7089 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7092 struct flow_dissector_key_keyid
*mask
=
7093 skb_flow_dissector_target(f
->dissector
,
7094 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7097 if (mask
->keyid
!= 0)
7098 field_flags
|= I40E_CLOUD_FIELD_TEN_ID
;
7100 filter
->tenant_id
= be32_to_cpu(key
->keyid
);
7103 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
7104 struct flow_dissector_key_basic
*key
=
7105 skb_flow_dissector_target(f
->dissector
,
7106 FLOW_DISSECTOR_KEY_BASIC
,
7109 struct flow_dissector_key_basic
*mask
=
7110 skb_flow_dissector_target(f
->dissector
,
7111 FLOW_DISSECTOR_KEY_BASIC
,
7114 n_proto_key
= ntohs(key
->n_proto
);
7115 n_proto_mask
= ntohs(mask
->n_proto
);
7117 if (n_proto_key
== ETH_P_ALL
) {
7121 filter
->n_proto
= n_proto_key
& n_proto_mask
;
7122 filter
->ip_proto
= key
->ip_proto
;
7125 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7126 struct flow_dissector_key_eth_addrs
*key
=
7127 skb_flow_dissector_target(f
->dissector
,
7128 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7131 struct flow_dissector_key_eth_addrs
*mask
=
7132 skb_flow_dissector_target(f
->dissector
,
7133 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7136 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7137 if (!is_zero_ether_addr(mask
->dst
)) {
7138 if (is_broadcast_ether_addr(mask
->dst
)) {
7139 field_flags
|= I40E_CLOUD_FIELD_OMAC
;
7141 dev_err(&pf
->pdev
->dev
, "Bad ether dest mask %pM\n",
7143 return I40E_ERR_CONFIG
;
7147 if (!is_zero_ether_addr(mask
->src
)) {
7148 if (is_broadcast_ether_addr(mask
->src
)) {
7149 field_flags
|= I40E_CLOUD_FIELD_IMAC
;
7151 dev_err(&pf
->pdev
->dev
, "Bad ether src mask %pM\n",
7153 return I40E_ERR_CONFIG
;
7156 ether_addr_copy(filter
->dst_mac
, key
->dst
);
7157 ether_addr_copy(filter
->src_mac
, key
->src
);
7160 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
7161 struct flow_dissector_key_vlan
*key
=
7162 skb_flow_dissector_target(f
->dissector
,
7163 FLOW_DISSECTOR_KEY_VLAN
,
7165 struct flow_dissector_key_vlan
*mask
=
7166 skb_flow_dissector_target(f
->dissector
,
7167 FLOW_DISSECTOR_KEY_VLAN
,
7170 if (mask
->vlan_id
) {
7171 if (mask
->vlan_id
== VLAN_VID_MASK
) {
7172 field_flags
|= I40E_CLOUD_FIELD_IVLAN
;
7175 dev_err(&pf
->pdev
->dev
, "Bad vlan mask 0x%04x\n",
7177 return I40E_ERR_CONFIG
;
7181 filter
->vlan_id
= cpu_to_be16(key
->vlan_id
);
7184 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7185 struct flow_dissector_key_control
*key
=
7186 skb_flow_dissector_target(f
->dissector
,
7187 FLOW_DISSECTOR_KEY_CONTROL
,
7190 addr_type
= key
->addr_type
;
7193 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7194 struct flow_dissector_key_ipv4_addrs
*key
=
7195 skb_flow_dissector_target(f
->dissector
,
7196 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7198 struct flow_dissector_key_ipv4_addrs
*mask
=
7199 skb_flow_dissector_target(f
->dissector
,
7200 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7204 if (mask
->dst
== cpu_to_be32(0xffffffff)) {
7205 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7207 mask
->dst
= be32_to_cpu(mask
->dst
);
7208 dev_err(&pf
->pdev
->dev
, "Bad ip dst mask %pI4\n",
7210 return I40E_ERR_CONFIG
;
7215 if (mask
->src
== cpu_to_be32(0xffffffff)) {
7216 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7218 mask
->src
= be32_to_cpu(mask
->src
);
7219 dev_err(&pf
->pdev
->dev
, "Bad ip src mask %pI4\n",
7221 return I40E_ERR_CONFIG
;
7225 if (field_flags
& I40E_CLOUD_FIELD_TEN_ID
) {
7226 dev_err(&pf
->pdev
->dev
, "Tenant id not allowed for ip filter\n");
7227 return I40E_ERR_CONFIG
;
7229 filter
->dst_ipv4
= key
->dst
;
7230 filter
->src_ipv4
= key
->src
;
7233 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7234 struct flow_dissector_key_ipv6_addrs
*key
=
7235 skb_flow_dissector_target(f
->dissector
,
7236 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7238 struct flow_dissector_key_ipv6_addrs
*mask
=
7239 skb_flow_dissector_target(f
->dissector
,
7240 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7243 /* src and dest IPV6 address should not be LOOPBACK
7244 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7246 if (ipv6_addr_loopback(&key
->dst
) ||
7247 ipv6_addr_loopback(&key
->src
)) {
7248 dev_err(&pf
->pdev
->dev
,
7249 "Bad ipv6, addr is LOOPBACK\n");
7250 return I40E_ERR_CONFIG
;
7252 if (!ipv6_addr_any(&mask
->dst
) || !ipv6_addr_any(&mask
->src
))
7253 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7255 memcpy(&filter
->src_ipv6
, &key
->src
.s6_addr32
,
7256 sizeof(filter
->src_ipv6
));
7257 memcpy(&filter
->dst_ipv6
, &key
->dst
.s6_addr32
,
7258 sizeof(filter
->dst_ipv6
));
7261 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
7262 struct flow_dissector_key_ports
*key
=
7263 skb_flow_dissector_target(f
->dissector
,
7264 FLOW_DISSECTOR_KEY_PORTS
,
7266 struct flow_dissector_key_ports
*mask
=
7267 skb_flow_dissector_target(f
->dissector
,
7268 FLOW_DISSECTOR_KEY_PORTS
,
7272 if (mask
->src
== cpu_to_be16(0xffff)) {
7273 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7275 dev_err(&pf
->pdev
->dev
, "Bad src port mask 0x%04x\n",
7276 be16_to_cpu(mask
->src
));
7277 return I40E_ERR_CONFIG
;
7282 if (mask
->dst
== cpu_to_be16(0xffff)) {
7283 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7285 dev_err(&pf
->pdev
->dev
, "Bad dst port mask 0x%04x\n",
7286 be16_to_cpu(mask
->dst
));
7287 return I40E_ERR_CONFIG
;
7291 filter
->dst_port
= key
->dst
;
7292 filter
->src_port
= key
->src
;
7294 switch (filter
->ip_proto
) {
7299 dev_err(&pf
->pdev
->dev
,
7300 "Only UDP and TCP transport are supported\n");
7304 filter
->flags
= field_flags
;
7309 * i40e_handle_tclass: Forward to a traffic class on the device
7310 * @vsi: Pointer to VSI
7311 * @tc: traffic class index on the device
7312 * @filter: Pointer to cloud filter structure
7315 static int i40e_handle_tclass(struct i40e_vsi
*vsi
, u32 tc
,
7316 struct i40e_cloud_filter
*filter
)
7318 struct i40e_channel
*ch
, *ch_tmp
;
7320 /* direct to a traffic class on the same device */
7322 filter
->seid
= vsi
->seid
;
7324 } else if (vsi
->tc_config
.enabled_tc
& BIT(tc
)) {
7325 if (!filter
->dst_port
) {
7326 dev_err(&vsi
->back
->pdev
->dev
,
7327 "Specify destination port to direct to traffic class that is not default\n");
7330 if (list_empty(&vsi
->ch_list
))
7332 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
,
7334 if (ch
->seid
== vsi
->tc_seid_map
[tc
])
7335 filter
->seid
= ch
->seid
;
7339 dev_err(&vsi
->back
->pdev
->dev
, "TC is not enabled\n");
7344 * i40e_configure_clsflower - Configure tc flower filters
7345 * @vsi: Pointer to VSI
7346 * @cls_flower: Pointer to struct tc_cls_flower_offload
7349 static int i40e_configure_clsflower(struct i40e_vsi
*vsi
,
7350 struct tc_cls_flower_offload
*cls_flower
)
7352 int tc
= tc_classid_to_hwtc(vsi
->netdev
, cls_flower
->classid
);
7353 struct i40e_cloud_filter
*filter
= NULL
;
7354 struct i40e_pf
*pf
= vsi
->back
;
7358 dev_err(&vsi
->back
->pdev
->dev
, "Invalid traffic class\n");
7362 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
) ||
7363 test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
))
7366 if (pf
->fdir_pf_active_filters
||
7367 (!hlist_empty(&pf
->fdir_filter_list
))) {
7368 dev_err(&vsi
->back
->pdev
->dev
,
7369 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7373 if (vsi
->back
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
7374 dev_err(&vsi
->back
->pdev
->dev
,
7375 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7376 vsi
->back
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
7377 vsi
->back
->flags
|= I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7380 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
7384 filter
->cookie
= cls_flower
->cookie
;
7386 err
= i40e_parse_cls_flower(vsi
, cls_flower
, filter
);
7390 err
= i40e_handle_tclass(vsi
, tc
, filter
);
7394 /* Add cloud filter */
7395 if (filter
->dst_port
)
7396 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, true);
7398 err
= i40e_add_del_cloud_filter(vsi
, filter
, true);
7401 dev_err(&pf
->pdev
->dev
,
7402 "Failed to add cloud filter, err %s\n",
7403 i40e_stat_str(&pf
->hw
, err
));
7404 err
= i40e_aq_rc_to_posix(err
, pf
->hw
.aq
.asq_last_status
);
7408 /* add filter to the ordered list */
7409 INIT_HLIST_NODE(&filter
->cloud_node
);
7411 hlist_add_head(&filter
->cloud_node
, &pf
->cloud_filter_list
);
7413 pf
->num_cloud_filters
++;
7422 * i40e_find_cloud_filter - Find the could filter in the list
7423 * @vsi: Pointer to VSI
7424 * @cookie: filter specific cookie
7427 static struct i40e_cloud_filter
*i40e_find_cloud_filter(struct i40e_vsi
*vsi
,
7428 unsigned long *cookie
)
7430 struct i40e_cloud_filter
*filter
= NULL
;
7431 struct hlist_node
*node2
;
7433 hlist_for_each_entry_safe(filter
, node2
,
7434 &vsi
->back
->cloud_filter_list
, cloud_node
)
7435 if (!memcmp(cookie
, &filter
->cookie
, sizeof(filter
->cookie
)))
7441 * i40e_delete_clsflower - Remove tc flower filters
7442 * @vsi: Pointer to VSI
7443 * @cls_flower: Pointer to struct tc_cls_flower_offload
7446 static int i40e_delete_clsflower(struct i40e_vsi
*vsi
,
7447 struct tc_cls_flower_offload
*cls_flower
)
7449 struct i40e_cloud_filter
*filter
= NULL
;
7450 struct i40e_pf
*pf
= vsi
->back
;
7453 filter
= i40e_find_cloud_filter(vsi
, &cls_flower
->cookie
);
7458 hash_del(&filter
->cloud_node
);
7460 if (filter
->dst_port
)
7461 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, false);
7463 err
= i40e_add_del_cloud_filter(vsi
, filter
, false);
7467 dev_err(&pf
->pdev
->dev
,
7468 "Failed to delete cloud filter, err %s\n",
7469 i40e_stat_str(&pf
->hw
, err
));
7470 return i40e_aq_rc_to_posix(err
, pf
->hw
.aq
.asq_last_status
);
7473 pf
->num_cloud_filters
--;
7474 if (!pf
->num_cloud_filters
)
7475 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7476 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7477 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7478 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7479 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7485 * i40e_setup_tc_cls_flower - flower classifier offloads
7486 * @netdev: net device to configure
7487 * @type_data: offload data
7489 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv
*np
,
7490 struct tc_cls_flower_offload
*cls_flower
)
7492 struct i40e_vsi
*vsi
= np
->vsi
;
7494 if (cls_flower
->common
.chain_index
)
7497 switch (cls_flower
->command
) {
7498 case TC_CLSFLOWER_REPLACE
:
7499 return i40e_configure_clsflower(vsi
, cls_flower
);
7500 case TC_CLSFLOWER_DESTROY
:
7501 return i40e_delete_clsflower(vsi
, cls_flower
);
7502 case TC_CLSFLOWER_STATS
:
7509 static int i40e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
7512 struct i40e_netdev_priv
*np
= cb_priv
;
7515 case TC_SETUP_CLSFLOWER
:
7516 return i40e_setup_tc_cls_flower(np
, type_data
);
7523 static int i40e_setup_tc_block(struct net_device
*dev
,
7524 struct tc_block_offload
*f
)
7526 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
7528 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
7531 switch (f
->command
) {
7533 return tcf_block_cb_register(f
->block
, i40e_setup_tc_block_cb
,
7535 case TC_BLOCK_UNBIND
:
7536 tcf_block_cb_unregister(f
->block
, i40e_setup_tc_block_cb
, np
);
7543 static int __i40e_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
7547 case TC_SETUP_QDISC_MQPRIO
:
7548 return i40e_setup_tc(netdev
, type_data
);
7549 case TC_SETUP_BLOCK
:
7550 return i40e_setup_tc_block(netdev
, type_data
);
7557 * i40e_open - Called when a network interface is made active
7558 * @netdev: network interface device structure
7560 * The open entry point is called when a network interface is made
7561 * active by the system (IFF_UP). At this point all resources needed
7562 * for transmit and receive operations are allocated, the interrupt
7563 * handler is registered with the OS, the netdev watchdog subtask is
7564 * enabled, and the stack is notified that the interface is ready.
7566 * Returns 0 on success, negative value on failure
7568 int i40e_open(struct net_device
*netdev
)
7570 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7571 struct i40e_vsi
*vsi
= np
->vsi
;
7572 struct i40e_pf
*pf
= vsi
->back
;
7575 /* disallow open during test or if eeprom is broken */
7576 if (test_bit(__I40E_TESTING
, pf
->state
) ||
7577 test_bit(__I40E_BAD_EEPROM
, pf
->state
))
7580 netif_carrier_off(netdev
);
7582 err
= i40e_vsi_open(vsi
);
7586 /* configure global TSO hardware offload settings */
7587 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_F
, be32_to_cpu(TCP_FLAG_PSH
|
7588 TCP_FLAG_FIN
) >> 16);
7589 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_M
, be32_to_cpu(TCP_FLAG_PSH
|
7591 TCP_FLAG_CWR
) >> 16);
7592 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_L
, be32_to_cpu(TCP_FLAG_CWR
) >> 16);
7594 udp_tunnel_get_rx_info(netdev
);
7601 * @vsi: the VSI to open
7603 * Finish initialization of the VSI.
7605 * Returns 0 on success, negative value on failure
7607 * Note: expects to be called while under rtnl_lock()
7609 int i40e_vsi_open(struct i40e_vsi
*vsi
)
7611 struct i40e_pf
*pf
= vsi
->back
;
7612 char int_name
[I40E_INT_NAME_STR_LEN
];
7615 /* allocate descriptors */
7616 err
= i40e_vsi_setup_tx_resources(vsi
);
7619 err
= i40e_vsi_setup_rx_resources(vsi
);
7623 err
= i40e_vsi_configure(vsi
);
7628 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s",
7629 dev_driver_string(&pf
->pdev
->dev
), vsi
->netdev
->name
);
7630 err
= i40e_vsi_request_irq(vsi
, int_name
);
7634 /* Notify the stack of the actual queue counts. */
7635 err
= netif_set_real_num_tx_queues(vsi
->netdev
,
7636 vsi
->num_queue_pairs
);
7638 goto err_set_queues
;
7640 err
= netif_set_real_num_rx_queues(vsi
->netdev
,
7641 vsi
->num_queue_pairs
);
7643 goto err_set_queues
;
7645 } else if (vsi
->type
== I40E_VSI_FDIR
) {
7646 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s:fdir",
7647 dev_driver_string(&pf
->pdev
->dev
),
7648 dev_name(&pf
->pdev
->dev
));
7649 err
= i40e_vsi_request_irq(vsi
, int_name
);
7656 err
= i40e_up_complete(vsi
);
7658 goto err_up_complete
;
7665 i40e_vsi_free_irq(vsi
);
7667 i40e_vsi_free_rx_resources(vsi
);
7669 i40e_vsi_free_tx_resources(vsi
);
7670 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
7671 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
7677 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7678 * @pf: Pointer to PF
7680 * This function destroys the hlist where all the Flow Director
7681 * filters were saved.
7683 static void i40e_fdir_filter_exit(struct i40e_pf
*pf
)
7685 struct i40e_fdir_filter
*filter
;
7686 struct i40e_flex_pit
*pit_entry
, *tmp
;
7687 struct hlist_node
*node2
;
7689 hlist_for_each_entry_safe(filter
, node2
,
7690 &pf
->fdir_filter_list
, fdir_node
) {
7691 hlist_del(&filter
->fdir_node
);
7695 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l3_flex_pit_list
, list
) {
7696 list_del(&pit_entry
->list
);
7699 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
7701 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l4_flex_pit_list
, list
) {
7702 list_del(&pit_entry
->list
);
7705 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
7707 pf
->fdir_pf_active_filters
= 0;
7708 pf
->fd_tcp4_filter_cnt
= 0;
7709 pf
->fd_udp4_filter_cnt
= 0;
7710 pf
->fd_sctp4_filter_cnt
= 0;
7711 pf
->fd_ip4_filter_cnt
= 0;
7713 /* Reprogram the default input set for TCP/IPv4 */
7714 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_TCP
,
7715 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7716 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7718 /* Reprogram the default input set for UDP/IPv4 */
7719 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_UDP
,
7720 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7721 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7723 /* Reprogram the default input set for SCTP/IPv4 */
7724 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP
,
7725 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7726 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7728 /* Reprogram the default input set for Other/IPv4 */
7729 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
,
7730 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
);
7734 * i40e_cloud_filter_exit - Cleans up the cloud filters
7735 * @pf: Pointer to PF
7737 * This function destroys the hlist where all the cloud filters
7740 static void i40e_cloud_filter_exit(struct i40e_pf
*pf
)
7742 struct i40e_cloud_filter
*cfilter
;
7743 struct hlist_node
*node
;
7745 hlist_for_each_entry_safe(cfilter
, node
,
7746 &pf
->cloud_filter_list
, cloud_node
) {
7747 hlist_del(&cfilter
->cloud_node
);
7750 pf
->num_cloud_filters
= 0;
7752 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7753 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7754 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7755 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7756 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7761 * i40e_close - Disables a network interface
7762 * @netdev: network interface device structure
7764 * The close entry point is called when an interface is de-activated
7765 * by the OS. The hardware is still under the driver's control, but
7766 * this netdev interface is disabled.
7768 * Returns 0, this is not allowed to fail
7770 int i40e_close(struct net_device
*netdev
)
7772 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7773 struct i40e_vsi
*vsi
= np
->vsi
;
7775 i40e_vsi_close(vsi
);
7781 * i40e_do_reset - Start a PF or Core Reset sequence
7782 * @pf: board private structure
7783 * @reset_flags: which reset is requested
7784 * @lock_acquired: indicates whether or not the lock has been acquired
7785 * before this function was called.
7787 * The essential difference in resets is that the PF Reset
7788 * doesn't clear the packet buffers, doesn't reset the PE
7789 * firmware, and doesn't bother the other PFs on the chip.
7791 void i40e_do_reset(struct i40e_pf
*pf
, u32 reset_flags
, bool lock_acquired
)
7795 WARN_ON(in_interrupt());
7798 /* do the biggest reset indicated */
7799 if (reset_flags
& BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED
)) {
7801 /* Request a Global Reset
7803 * This will start the chip's countdown to the actual full
7804 * chip reset event, and a warning interrupt to be sent
7805 * to all PFs, including the requestor. Our handler
7806 * for the warning interrupt will deal with the shutdown
7807 * and recovery of the switch setup.
7809 dev_dbg(&pf
->pdev
->dev
, "GlobalR requested\n");
7810 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7811 val
|= I40E_GLGEN_RTRIG_GLOBR_MASK
;
7812 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7814 } else if (reset_flags
& BIT_ULL(__I40E_CORE_RESET_REQUESTED
)) {
7816 /* Request a Core Reset
7818 * Same as Global Reset, except does *not* include the MAC/PHY
7820 dev_dbg(&pf
->pdev
->dev
, "CoreR requested\n");
7821 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7822 val
|= I40E_GLGEN_RTRIG_CORER_MASK
;
7823 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7824 i40e_flush(&pf
->hw
);
7826 } else if (reset_flags
& I40E_PF_RESET_FLAG
) {
7828 /* Request a PF Reset
7830 * Resets only the PF-specific registers
7832 * This goes directly to the tear-down and rebuild of
7833 * the switch, since we need to do all the recovery as
7834 * for the Core Reset.
7836 dev_dbg(&pf
->pdev
->dev
, "PFR requested\n");
7837 i40e_handle_reset_warning(pf
, lock_acquired
);
7839 } else if (reset_flags
& BIT_ULL(__I40E_REINIT_REQUESTED
)) {
7842 /* Find the VSI(s) that requested a re-init */
7843 dev_info(&pf
->pdev
->dev
,
7844 "VSI reinit requested\n");
7845 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7846 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7849 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED
,
7851 i40e_vsi_reinit_locked(pf
->vsi
[v
]);
7853 } else if (reset_flags
& BIT_ULL(__I40E_DOWN_REQUESTED
)) {
7856 /* Find the VSI(s) that needs to be brought down */
7857 dev_info(&pf
->pdev
->dev
, "VSI down requested\n");
7858 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7859 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7862 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED
,
7864 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
7869 dev_info(&pf
->pdev
->dev
,
7870 "bad reset request 0x%08x\n", reset_flags
);
7874 #ifdef CONFIG_I40E_DCB
7876 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7877 * @pf: board private structure
7878 * @old_cfg: current DCB config
7879 * @new_cfg: new DCB config
7881 bool i40e_dcb_need_reconfig(struct i40e_pf
*pf
,
7882 struct i40e_dcbx_config
*old_cfg
,
7883 struct i40e_dcbx_config
*new_cfg
)
7885 bool need_reconfig
= false;
7887 /* Check if ETS configuration has changed */
7888 if (memcmp(&new_cfg
->etscfg
,
7890 sizeof(new_cfg
->etscfg
))) {
7891 /* If Priority Table has changed reconfig is needed */
7892 if (memcmp(&new_cfg
->etscfg
.prioritytable
,
7893 &old_cfg
->etscfg
.prioritytable
,
7894 sizeof(new_cfg
->etscfg
.prioritytable
))) {
7895 need_reconfig
= true;
7896 dev_dbg(&pf
->pdev
->dev
, "ETS UP2TC changed.\n");
7899 if (memcmp(&new_cfg
->etscfg
.tcbwtable
,
7900 &old_cfg
->etscfg
.tcbwtable
,
7901 sizeof(new_cfg
->etscfg
.tcbwtable
)))
7902 dev_dbg(&pf
->pdev
->dev
, "ETS TC BW Table changed.\n");
7904 if (memcmp(&new_cfg
->etscfg
.tsatable
,
7905 &old_cfg
->etscfg
.tsatable
,
7906 sizeof(new_cfg
->etscfg
.tsatable
)))
7907 dev_dbg(&pf
->pdev
->dev
, "ETS TSA Table changed.\n");
7910 /* Check if PFC configuration has changed */
7911 if (memcmp(&new_cfg
->pfc
,
7913 sizeof(new_cfg
->pfc
))) {
7914 need_reconfig
= true;
7915 dev_dbg(&pf
->pdev
->dev
, "PFC config change detected.\n");
7918 /* Check if APP Table has changed */
7919 if (memcmp(&new_cfg
->app
,
7921 sizeof(new_cfg
->app
))) {
7922 need_reconfig
= true;
7923 dev_dbg(&pf
->pdev
->dev
, "APP Table change detected.\n");
7926 dev_dbg(&pf
->pdev
->dev
, "dcb need_reconfig=%d\n", need_reconfig
);
7927 return need_reconfig
;
7931 * i40e_handle_lldp_event - Handle LLDP Change MIB event
7932 * @pf: board private structure
7933 * @e: event info posted on ARQ
7935 static int i40e_handle_lldp_event(struct i40e_pf
*pf
,
7936 struct i40e_arq_event_info
*e
)
7938 struct i40e_aqc_lldp_get_mib
*mib
=
7939 (struct i40e_aqc_lldp_get_mib
*)&e
->desc
.params
.raw
;
7940 struct i40e_hw
*hw
= &pf
->hw
;
7941 struct i40e_dcbx_config tmp_dcbx_cfg
;
7942 bool need_reconfig
= false;
7946 /* Not DCB capable or capability disabled */
7947 if (!(pf
->flags
& I40E_FLAG_DCB_CAPABLE
))
7950 /* Ignore if event is not for Nearest Bridge */
7951 type
= ((mib
->type
>> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT
)
7952 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK
);
7953 dev_dbg(&pf
->pdev
->dev
, "LLDP event mib bridge type 0x%x\n", type
);
7954 if (type
!= I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
)
7957 /* Check MIB Type and return if event for Remote MIB update */
7958 type
= mib
->type
& I40E_AQ_LLDP_MIB_TYPE_MASK
;
7959 dev_dbg(&pf
->pdev
->dev
,
7960 "LLDP event mib type %s\n", type
? "remote" : "local");
7961 if (type
== I40E_AQ_LLDP_MIB_REMOTE
) {
7962 /* Update the remote cached instance and return */
7963 ret
= i40e_aq_get_dcb_config(hw
, I40E_AQ_LLDP_MIB_REMOTE
,
7964 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
,
7965 &hw
->remote_dcbx_config
);
7969 /* Store the old configuration */
7970 tmp_dcbx_cfg
= hw
->local_dcbx_config
;
7972 /* Reset the old DCBx configuration data */
7973 memset(&hw
->local_dcbx_config
, 0, sizeof(hw
->local_dcbx_config
));
7974 /* Get updated DCBX data from firmware */
7975 ret
= i40e_get_dcb_config(&pf
->hw
);
7977 dev_info(&pf
->pdev
->dev
,
7978 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
7979 i40e_stat_str(&pf
->hw
, ret
),
7980 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
7984 /* No change detected in DCBX configs */
7985 if (!memcmp(&tmp_dcbx_cfg
, &hw
->local_dcbx_config
,
7986 sizeof(tmp_dcbx_cfg
))) {
7987 dev_dbg(&pf
->pdev
->dev
, "No change detected in DCBX configuration.\n");
7991 need_reconfig
= i40e_dcb_need_reconfig(pf
, &tmp_dcbx_cfg
,
7992 &hw
->local_dcbx_config
);
7994 i40e_dcbnl_flush_apps(pf
, &tmp_dcbx_cfg
, &hw
->local_dcbx_config
);
7999 /* Enable DCB tagging only when more than one TC */
8000 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
8001 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
8003 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
8005 set_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
8006 /* Reconfiguration needed quiesce all VSIs */
8007 i40e_pf_quiesce_all_vsi(pf
);
8009 /* Changes in configuration update VEB/VSI */
8010 i40e_dcb_reconfigure(pf
);
8012 ret
= i40e_resume_port_tx(pf
);
8014 clear_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
8015 /* In case of error no point in resuming VSIs */
8019 /* Wait for the PF's queues to be disabled */
8020 ret
= i40e_pf_wait_queues_disabled(pf
);
8022 /* Schedule PF reset to recover */
8023 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8024 i40e_service_event_schedule(pf
);
8026 i40e_pf_unquiesce_all_vsi(pf
);
8027 pf
->flags
|= (I40E_FLAG_SERVICE_CLIENT_REQUESTED
|
8028 I40E_FLAG_CLIENT_L2_CHANGE
);
8034 #endif /* CONFIG_I40E_DCB */
8037 * i40e_do_reset_safe - Protected reset path for userland calls.
8038 * @pf: board private structure
8039 * @reset_flags: which reset is requested
8042 void i40e_do_reset_safe(struct i40e_pf
*pf
, u32 reset_flags
)
8045 i40e_do_reset(pf
, reset_flags
, true);
8050 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8051 * @pf: board private structure
8052 * @e: event info posted on ARQ
8054 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8057 static void i40e_handle_lan_overflow_event(struct i40e_pf
*pf
,
8058 struct i40e_arq_event_info
*e
)
8060 struct i40e_aqc_lan_overflow
*data
=
8061 (struct i40e_aqc_lan_overflow
*)&e
->desc
.params
.raw
;
8062 u32 queue
= le32_to_cpu(data
->prtdcb_rupto
);
8063 u32 qtx_ctl
= le32_to_cpu(data
->otx_ctl
);
8064 struct i40e_hw
*hw
= &pf
->hw
;
8068 dev_dbg(&pf
->pdev
->dev
, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8071 /* Queue belongs to VF, find the VF and issue VF reset */
8072 if (((qtx_ctl
& I40E_QTX_CTL_PFVF_Q_MASK
)
8073 >> I40E_QTX_CTL_PFVF_Q_SHIFT
) == I40E_QTX_CTL_VF_QUEUE
) {
8074 vf_id
= (u16
)((qtx_ctl
& I40E_QTX_CTL_VFVM_INDX_MASK
)
8075 >> I40E_QTX_CTL_VFVM_INDX_SHIFT
);
8076 vf_id
-= hw
->func_caps
.vf_base_id
;
8077 vf
= &pf
->vf
[vf_id
];
8078 i40e_vc_notify_vf_reset(vf
);
8079 /* Allow VF to process pending reset notification */
8081 i40e_reset_vf(vf
, false);
8086 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8087 * @pf: board private structure
8089 u32
i40e_get_cur_guaranteed_fd_count(struct i40e_pf
*pf
)
8093 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8094 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
);
8099 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8100 * @pf: board private structure
8102 u32
i40e_get_current_fd_count(struct i40e_pf
*pf
)
8106 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8107 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
) +
8108 ((val
& I40E_PFQF_FDSTAT_BEST_CNT_MASK
) >>
8109 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT
);
8114 * i40e_get_global_fd_count - Get total FD filters programmed on device
8115 * @pf: board private structure
8117 u32
i40e_get_global_fd_count(struct i40e_pf
*pf
)
8121 val
= rd32(&pf
->hw
, I40E_GLQF_FDCNT_0
);
8122 fcnt_prog
= (val
& I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK
) +
8123 ((val
& I40E_GLQF_FDCNT_0_BESTCNT_MASK
) >>
8124 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT
);
8129 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8130 * @pf: board private structure
8132 void i40e_fdir_check_and_reenable(struct i40e_pf
*pf
)
8134 struct i40e_fdir_filter
*filter
;
8135 u32 fcnt_prog
, fcnt_avail
;
8136 struct hlist_node
*node
;
8138 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8141 /* Check if we have enough room to re-enable FDir SB capability. */
8142 fcnt_prog
= i40e_get_global_fd_count(pf
);
8143 fcnt_avail
= pf
->fdir_pf_filter_count
;
8144 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM
)) ||
8145 (pf
->fd_add_err
== 0) ||
8146 (i40e_get_current_atr_cnt(pf
) < pf
->fd_atr_cnt
)) {
8147 if (pf
->flags
& I40E_FLAG_FD_SB_AUTO_DISABLED
) {
8148 pf
->flags
&= ~I40E_FLAG_FD_SB_AUTO_DISABLED
;
8149 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
8150 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8151 dev_info(&pf
->pdev
->dev
, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8155 /* We should wait for even more space before re-enabling ATR.
8156 * Additionally, we cannot enable ATR as long as we still have TCP SB
8159 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) &&
8160 (pf
->fd_tcp4_filter_cnt
== 0)) {
8161 if (pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
) {
8162 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8163 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
8164 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8165 dev_info(&pf
->pdev
->dev
, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8169 /* if hw had a problem adding a filter, delete it */
8170 if (pf
->fd_inv
> 0) {
8171 hlist_for_each_entry_safe(filter
, node
,
8172 &pf
->fdir_filter_list
, fdir_node
) {
8173 if (filter
->fd_id
== pf
->fd_inv
) {
8174 hlist_del(&filter
->fdir_node
);
8176 pf
->fdir_pf_active_filters
--;
8183 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8184 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8186 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8187 * @pf: board private structure
8189 static void i40e_fdir_flush_and_replay(struct i40e_pf
*pf
)
8191 unsigned long min_flush_time
;
8192 int flush_wait_retry
= 50;
8193 bool disable_atr
= false;
8197 if (!time_after(jiffies
, pf
->fd_flush_timestamp
+
8198 (I40E_MIN_FD_FLUSH_INTERVAL
* HZ
)))
8201 /* If the flush is happening too quick and we have mostly SB rules we
8202 * should not re-enable ATR for some time.
8204 min_flush_time
= pf
->fd_flush_timestamp
+
8205 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE
* HZ
);
8206 fd_room
= pf
->fdir_pf_filter_count
- pf
->fdir_pf_active_filters
;
8208 if (!(time_after(jiffies
, min_flush_time
)) &&
8209 (fd_room
< I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) {
8210 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8211 dev_info(&pf
->pdev
->dev
, "ATR disabled, not enough FD filter space.\n");
8215 pf
->fd_flush_timestamp
= jiffies
;
8216 pf
->flags
|= I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8217 /* flush all filters */
8218 wr32(&pf
->hw
, I40E_PFQF_CTL_1
,
8219 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
);
8220 i40e_flush(&pf
->hw
);
8224 /* Check FD flush status every 5-6msec */
8225 usleep_range(5000, 6000);
8226 reg
= rd32(&pf
->hw
, I40E_PFQF_CTL_1
);
8227 if (!(reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
))
8229 } while (flush_wait_retry
--);
8230 if (reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
) {
8231 dev_warn(&pf
->pdev
->dev
, "FD table did not flush, needs more time\n");
8233 /* replay sideband filters */
8234 i40e_fdir_filter_restore(pf
->vsi
[pf
->lan_vsi
]);
8235 if (!disable_atr
&& !pf
->fd_tcp4_filter_cnt
)
8236 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8237 clear_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
);
8238 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8239 dev_info(&pf
->pdev
->dev
, "FD Filter table flushed and FD-SB replayed.\n");
8244 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8245 * @pf: board private structure
8247 u32
i40e_get_current_atr_cnt(struct i40e_pf
*pf
)
8249 return i40e_get_current_fd_count(pf
) - pf
->fdir_pf_active_filters
;
8252 /* We can see up to 256 filter programming desc in transit if the filters are
8253 * being applied really fast; before we see the first
8254 * filter miss error on Rx queue 0. Accumulating enough error messages before
8255 * reacting will make sure we don't cause flush too often.
8257 #define I40E_MAX_FD_PROGRAM_ERROR 256
8260 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8261 * @pf: board private structure
8263 static void i40e_fdir_reinit_subtask(struct i40e_pf
*pf
)
8266 /* if interface is down do nothing */
8267 if (test_bit(__I40E_DOWN
, pf
->state
))
8270 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8271 i40e_fdir_flush_and_replay(pf
);
8273 i40e_fdir_check_and_reenable(pf
);
8278 * i40e_vsi_link_event - notify VSI of a link event
8279 * @vsi: vsi to be notified
8280 * @link_up: link up or down
8282 static void i40e_vsi_link_event(struct i40e_vsi
*vsi
, bool link_up
)
8284 if (!vsi
|| test_bit(__I40E_VSI_DOWN
, vsi
->state
))
8287 switch (vsi
->type
) {
8289 if (!vsi
->netdev
|| !vsi
->netdev_registered
)
8293 netif_carrier_on(vsi
->netdev
);
8294 netif_tx_wake_all_queues(vsi
->netdev
);
8296 netif_carrier_off(vsi
->netdev
);
8297 netif_tx_stop_all_queues(vsi
->netdev
);
8301 case I40E_VSI_SRIOV
:
8302 case I40E_VSI_VMDQ2
:
8304 case I40E_VSI_IWARP
:
8305 case I40E_VSI_MIRROR
:
8307 /* there is no notification for other VSIs */
8313 * i40e_veb_link_event - notify elements on the veb of a link event
8314 * @veb: veb to be notified
8315 * @link_up: link up or down
8317 static void i40e_veb_link_event(struct i40e_veb
*veb
, bool link_up
)
8322 if (!veb
|| !veb
->pf
)
8326 /* depth first... */
8327 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8328 if (pf
->veb
[i
] && (pf
->veb
[i
]->uplink_seid
== veb
->seid
))
8329 i40e_veb_link_event(pf
->veb
[i
], link_up
);
8331 /* ... now the local VSIs */
8332 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8333 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->uplink_seid
== veb
->seid
))
8334 i40e_vsi_link_event(pf
->vsi
[i
], link_up
);
8338 * i40e_link_event - Update netif_carrier status
8339 * @pf: board private structure
8341 static void i40e_link_event(struct i40e_pf
*pf
)
8343 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8344 u8 new_link_speed
, old_link_speed
;
8346 bool new_link
, old_link
;
8348 /* save off old link status information */
8349 pf
->hw
.phy
.link_info_old
= pf
->hw
.phy
.link_info
;
8351 /* set this to force the get_link_status call to refresh state */
8352 pf
->hw
.phy
.get_link_info
= true;
8354 old_link
= (pf
->hw
.phy
.link_info_old
.link_info
& I40E_AQ_LINK_UP
);
8356 status
= i40e_get_link_status(&pf
->hw
, &new_link
);
8358 /* On success, disable temp link polling */
8359 if (status
== I40E_SUCCESS
) {
8360 if (pf
->flags
& I40E_FLAG_TEMP_LINK_POLLING
)
8361 pf
->flags
&= ~I40E_FLAG_TEMP_LINK_POLLING
;
8363 /* Enable link polling temporarily until i40e_get_link_status
8364 * returns I40E_SUCCESS
8366 pf
->flags
|= I40E_FLAG_TEMP_LINK_POLLING
;
8367 dev_dbg(&pf
->pdev
->dev
, "couldn't get link state, status: %d\n",
8372 old_link_speed
= pf
->hw
.phy
.link_info_old
.link_speed
;
8373 new_link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
8375 if (new_link
== old_link
&&
8376 new_link_speed
== old_link_speed
&&
8377 (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
8378 new_link
== netif_carrier_ok(vsi
->netdev
)))
8381 i40e_print_link_message(vsi
, new_link
);
8383 /* Notify the base of the switch tree connected to
8384 * the link. Floating VEBs are not notified.
8386 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
8387 i40e_veb_link_event(pf
->veb
[pf
->lan_veb
], new_link
);
8389 i40e_vsi_link_event(vsi
, new_link
);
8392 i40e_vc_notify_link_state(pf
);
8394 if (pf
->flags
& I40E_FLAG_PTP
)
8395 i40e_ptp_set_increment(pf
);
8399 * i40e_watchdog_subtask - periodic checks not using event driven response
8400 * @pf: board private structure
8402 static void i40e_watchdog_subtask(struct i40e_pf
*pf
)
8406 /* if interface is down do nothing */
8407 if (test_bit(__I40E_DOWN
, pf
->state
) ||
8408 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
8411 /* make sure we don't do these things too often */
8412 if (time_before(jiffies
, (pf
->service_timer_previous
+
8413 pf
->service_timer_period
)))
8415 pf
->service_timer_previous
= jiffies
;
8417 if ((pf
->flags
& I40E_FLAG_LINK_POLLING_ENABLED
) ||
8418 (pf
->flags
& I40E_FLAG_TEMP_LINK_POLLING
))
8419 i40e_link_event(pf
);
8421 /* Update the stats for active netdevs so the network stack
8422 * can look at updated numbers whenever it cares to
8424 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8425 if (pf
->vsi
[i
] && pf
->vsi
[i
]->netdev
)
8426 i40e_update_stats(pf
->vsi
[i
]);
8428 if (pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
) {
8429 /* Update the stats for the active switching components */
8430 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8432 i40e_update_veb_stats(pf
->veb
[i
]);
8435 i40e_ptp_rx_hang(pf
);
8436 i40e_ptp_tx_hang(pf
);
8440 * i40e_reset_subtask - Set up for resetting the device and driver
8441 * @pf: board private structure
8443 static void i40e_reset_subtask(struct i40e_pf
*pf
)
8445 u32 reset_flags
= 0;
8447 if (test_bit(__I40E_REINIT_REQUESTED
, pf
->state
)) {
8448 reset_flags
|= BIT(__I40E_REINIT_REQUESTED
);
8449 clear_bit(__I40E_REINIT_REQUESTED
, pf
->state
);
8451 if (test_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
)) {
8452 reset_flags
|= BIT(__I40E_PF_RESET_REQUESTED
);
8453 clear_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8455 if (test_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
)) {
8456 reset_flags
|= BIT(__I40E_CORE_RESET_REQUESTED
);
8457 clear_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
8459 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
)) {
8460 reset_flags
|= BIT(__I40E_GLOBAL_RESET_REQUESTED
);
8461 clear_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
8463 if (test_bit(__I40E_DOWN_REQUESTED
, pf
->state
)) {
8464 reset_flags
|= BIT(__I40E_DOWN_REQUESTED
);
8465 clear_bit(__I40E_DOWN_REQUESTED
, pf
->state
);
8468 /* If there's a recovery already waiting, it takes
8469 * precedence before starting a new reset sequence.
8471 if (test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
)) {
8472 i40e_prep_for_reset(pf
, false);
8474 i40e_rebuild(pf
, false, false);
8477 /* If we're already down or resetting, just bail */
8479 !test_bit(__I40E_DOWN
, pf
->state
) &&
8480 !test_bit(__I40E_CONFIG_BUSY
, pf
->state
)) {
8481 i40e_do_reset(pf
, reset_flags
, false);
8486 * i40e_handle_link_event - Handle link event
8487 * @pf: board private structure
8488 * @e: event info posted on ARQ
8490 static void i40e_handle_link_event(struct i40e_pf
*pf
,
8491 struct i40e_arq_event_info
*e
)
8493 struct i40e_aqc_get_link_status
*status
=
8494 (struct i40e_aqc_get_link_status
*)&e
->desc
.params
.raw
;
8496 /* Do a new status request to re-enable LSE reporting
8497 * and load new status information into the hw struct
8498 * This completely ignores any state information
8499 * in the ARQ event info, instead choosing to always
8500 * issue the AQ update link status command.
8502 i40e_link_event(pf
);
8504 /* Check if module meets thermal requirements */
8505 if (status
->phy_type
== I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP
) {
8506 dev_err(&pf
->pdev
->dev
,
8507 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8508 dev_err(&pf
->pdev
->dev
,
8509 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8511 /* check for unqualified module, if link is down, suppress
8512 * the message if link was forced to be down.
8514 if ((status
->link_info
& I40E_AQ_MEDIA_AVAILABLE
) &&
8515 (!(status
->an_info
& I40E_AQ_QUALIFIED_MODULE
)) &&
8516 (!(status
->link_info
& I40E_AQ_LINK_UP
)) &&
8517 (!(pf
->flags
& I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED
))) {
8518 dev_err(&pf
->pdev
->dev
,
8519 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8520 dev_err(&pf
->pdev
->dev
,
8521 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8527 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8528 * @pf: board private structure
8530 static void i40e_clean_adminq_subtask(struct i40e_pf
*pf
)
8532 struct i40e_arq_event_info event
;
8533 struct i40e_hw
*hw
= &pf
->hw
;
8540 /* Do not run clean AQ when PF reset fails */
8541 if (test_bit(__I40E_RESET_FAILED
, pf
->state
))
8544 /* check for error indications */
8545 val
= rd32(&pf
->hw
, pf
->hw
.aq
.arq
.len
);
8547 if (val
& I40E_PF_ARQLEN_ARQVFE_MASK
) {
8548 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8549 dev_info(&pf
->pdev
->dev
, "ARQ VF Error detected\n");
8550 val
&= ~I40E_PF_ARQLEN_ARQVFE_MASK
;
8552 if (val
& I40E_PF_ARQLEN_ARQOVFL_MASK
) {
8553 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8554 dev_info(&pf
->pdev
->dev
, "ARQ Overflow Error detected\n");
8555 val
&= ~I40E_PF_ARQLEN_ARQOVFL_MASK
;
8556 pf
->arq_overflows
++;
8558 if (val
& I40E_PF_ARQLEN_ARQCRIT_MASK
) {
8559 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8560 dev_info(&pf
->pdev
->dev
, "ARQ Critical Error detected\n");
8561 val
&= ~I40E_PF_ARQLEN_ARQCRIT_MASK
;
8564 wr32(&pf
->hw
, pf
->hw
.aq
.arq
.len
, val
);
8566 val
= rd32(&pf
->hw
, pf
->hw
.aq
.asq
.len
);
8568 if (val
& I40E_PF_ATQLEN_ATQVFE_MASK
) {
8569 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8570 dev_info(&pf
->pdev
->dev
, "ASQ VF Error detected\n");
8571 val
&= ~I40E_PF_ATQLEN_ATQVFE_MASK
;
8573 if (val
& I40E_PF_ATQLEN_ATQOVFL_MASK
) {
8574 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8575 dev_info(&pf
->pdev
->dev
, "ASQ Overflow Error detected\n");
8576 val
&= ~I40E_PF_ATQLEN_ATQOVFL_MASK
;
8578 if (val
& I40E_PF_ATQLEN_ATQCRIT_MASK
) {
8579 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8580 dev_info(&pf
->pdev
->dev
, "ASQ Critical Error detected\n");
8581 val
&= ~I40E_PF_ATQLEN_ATQCRIT_MASK
;
8584 wr32(&pf
->hw
, pf
->hw
.aq
.asq
.len
, val
);
8586 event
.buf_len
= I40E_MAX_AQ_BUF_SIZE
;
8587 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
8592 ret
= i40e_clean_arq_element(hw
, &event
, &pending
);
8593 if (ret
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
8596 dev_info(&pf
->pdev
->dev
, "ARQ event error %d\n", ret
);
8600 opcode
= le16_to_cpu(event
.desc
.opcode
);
8603 case i40e_aqc_opc_get_link_status
:
8604 i40e_handle_link_event(pf
, &event
);
8606 case i40e_aqc_opc_send_msg_to_pf
:
8607 ret
= i40e_vc_process_vf_msg(pf
,
8608 le16_to_cpu(event
.desc
.retval
),
8609 le32_to_cpu(event
.desc
.cookie_high
),
8610 le32_to_cpu(event
.desc
.cookie_low
),
8614 case i40e_aqc_opc_lldp_update_mib
:
8615 dev_dbg(&pf
->pdev
->dev
, "ARQ: Update LLDP MIB event received\n");
8616 #ifdef CONFIG_I40E_DCB
8618 ret
= i40e_handle_lldp_event(pf
, &event
);
8620 #endif /* CONFIG_I40E_DCB */
8622 case i40e_aqc_opc_event_lan_overflow
:
8623 dev_dbg(&pf
->pdev
->dev
, "ARQ LAN queue overflow event received\n");
8624 i40e_handle_lan_overflow_event(pf
, &event
);
8626 case i40e_aqc_opc_send_msg_to_peer
:
8627 dev_info(&pf
->pdev
->dev
, "ARQ: Msg from other pf\n");
8629 case i40e_aqc_opc_nvm_erase
:
8630 case i40e_aqc_opc_nvm_update
:
8631 case i40e_aqc_opc_oem_post_update
:
8632 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
,
8633 "ARQ NVM operation 0x%04x completed\n",
8637 dev_info(&pf
->pdev
->dev
,
8638 "ARQ: Unknown event 0x%04x ignored\n",
8642 } while (i
++ < pf
->adminq_work_limit
);
8644 if (i
< pf
->adminq_work_limit
)
8645 clear_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
8647 /* re-enable Admin queue interrupt cause */
8648 val
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
8649 val
|= I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
8650 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
8653 kfree(event
.msg_buf
);
8657 * i40e_verify_eeprom - make sure eeprom is good to use
8658 * @pf: board private structure
8660 static void i40e_verify_eeprom(struct i40e_pf
*pf
)
8664 err
= i40e_diag_eeprom_test(&pf
->hw
);
8666 /* retry in case of garbage read */
8667 err
= i40e_diag_eeprom_test(&pf
->hw
);
8669 dev_info(&pf
->pdev
->dev
, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8671 set_bit(__I40E_BAD_EEPROM
, pf
->state
);
8675 if (!err
&& test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
8676 dev_info(&pf
->pdev
->dev
, "eeprom check passed, Tx/Rx traffic enabled\n");
8677 clear_bit(__I40E_BAD_EEPROM
, pf
->state
);
8682 * i40e_enable_pf_switch_lb
8683 * @pf: pointer to the PF structure
8685 * enable switch loop back or die - no point in a return value
8687 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
8689 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8690 struct i40e_vsi_context ctxt
;
8693 ctxt
.seid
= pf
->main_vsi_seid
;
8694 ctxt
.pf_num
= pf
->hw
.pf_id
;
8696 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8698 dev_info(&pf
->pdev
->dev
,
8699 "couldn't get PF vsi config, err %s aq_err %s\n",
8700 i40e_stat_str(&pf
->hw
, ret
),
8701 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8704 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8705 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8706 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8708 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8710 dev_info(&pf
->pdev
->dev
,
8711 "update vsi switch failed, err %s aq_err %s\n",
8712 i40e_stat_str(&pf
->hw
, ret
),
8713 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8718 * i40e_disable_pf_switch_lb
8719 * @pf: pointer to the PF structure
8721 * disable switch loop back or die - no point in a return value
8723 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
8725 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8726 struct i40e_vsi_context ctxt
;
8729 ctxt
.seid
= pf
->main_vsi_seid
;
8730 ctxt
.pf_num
= pf
->hw
.pf_id
;
8732 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8734 dev_info(&pf
->pdev
->dev
,
8735 "couldn't get PF vsi config, err %s aq_err %s\n",
8736 i40e_stat_str(&pf
->hw
, ret
),
8737 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8740 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8741 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8742 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8744 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8746 dev_info(&pf
->pdev
->dev
,
8747 "update vsi switch failed, err %s aq_err %s\n",
8748 i40e_stat_str(&pf
->hw
, ret
),
8749 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8754 * i40e_config_bridge_mode - Configure the HW bridge mode
8755 * @veb: pointer to the bridge instance
8757 * Configure the loop back mode for the LAN VSI that is downlink to the
8758 * specified HW bridge instance. It is expected this function is called
8759 * when a new HW bridge is instantiated.
8761 static void i40e_config_bridge_mode(struct i40e_veb
*veb
)
8763 struct i40e_pf
*pf
= veb
->pf
;
8765 if (pf
->hw
.debug_mask
& I40E_DEBUG_LAN
)
8766 dev_info(&pf
->pdev
->dev
, "enabling bridge mode: %s\n",
8767 veb
->bridge_mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
8768 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
)
8769 i40e_disable_pf_switch_lb(pf
);
8771 i40e_enable_pf_switch_lb(pf
);
8775 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8776 * @veb: pointer to the VEB instance
8778 * This is a recursive function that first builds the attached VSIs then
8779 * recurses in to build the next layer of VEB. We track the connections
8780 * through our own index numbers because the seid's from the HW could
8781 * change across the reset.
8783 static int i40e_reconstitute_veb(struct i40e_veb
*veb
)
8785 struct i40e_vsi
*ctl_vsi
= NULL
;
8786 struct i40e_pf
*pf
= veb
->pf
;
8790 /* build VSI that owns this VEB, temporarily attached to base VEB */
8791 for (v
= 0; v
< pf
->num_alloc_vsi
&& !ctl_vsi
; v
++) {
8793 pf
->vsi
[v
]->veb_idx
== veb
->idx
&&
8794 pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
8795 ctl_vsi
= pf
->vsi
[v
];
8800 dev_info(&pf
->pdev
->dev
,
8801 "missing owner VSI for veb_idx %d\n", veb
->idx
);
8803 goto end_reconstitute
;
8805 if (ctl_vsi
!= pf
->vsi
[pf
->lan_vsi
])
8806 ctl_vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
8807 ret
= i40e_add_vsi(ctl_vsi
);
8809 dev_info(&pf
->pdev
->dev
,
8810 "rebuild of veb_idx %d owner VSI failed: %d\n",
8812 goto end_reconstitute
;
8814 i40e_vsi_reset_stats(ctl_vsi
);
8816 /* create the VEB in the switch and move the VSI onto the VEB */
8817 ret
= i40e_add_veb(veb
, ctl_vsi
);
8819 goto end_reconstitute
;
8821 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
8822 veb
->bridge_mode
= BRIDGE_MODE_VEB
;
8824 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
8825 i40e_config_bridge_mode(veb
);
8827 /* create the remaining VSIs attached to this VEB */
8828 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
8829 if (!pf
->vsi
[v
] || pf
->vsi
[v
] == ctl_vsi
)
8832 if (pf
->vsi
[v
]->veb_idx
== veb
->idx
) {
8833 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
8835 vsi
->uplink_seid
= veb
->seid
;
8836 ret
= i40e_add_vsi(vsi
);
8838 dev_info(&pf
->pdev
->dev
,
8839 "rebuild of vsi_idx %d failed: %d\n",
8841 goto end_reconstitute
;
8843 i40e_vsi_reset_stats(vsi
);
8847 /* create any VEBs attached to this VEB - RECURSION */
8848 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
8849 if (pf
->veb
[veb_idx
] && pf
->veb
[veb_idx
]->veb_idx
== veb
->idx
) {
8850 pf
->veb
[veb_idx
]->uplink_seid
= veb
->seid
;
8851 ret
= i40e_reconstitute_veb(pf
->veb
[veb_idx
]);
8862 * i40e_get_capabilities - get info about the HW
8863 * @pf: the PF struct
8865 static int i40e_get_capabilities(struct i40e_pf
*pf
,
8866 enum i40e_admin_queue_opc list_type
)
8868 struct i40e_aqc_list_capabilities_element_resp
*cap_buf
;
8873 buf_len
= 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp
);
8875 cap_buf
= kzalloc(buf_len
, GFP_KERNEL
);
8879 /* this loads the data into the hw struct for us */
8880 err
= i40e_aq_discover_capabilities(&pf
->hw
, cap_buf
, buf_len
,
8881 &data_size
, list_type
,
8883 /* data loaded, buffer no longer needed */
8886 if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_ENOMEM
) {
8887 /* retry with a larger buffer */
8888 buf_len
= data_size
;
8889 } else if (pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_OK
) {
8890 dev_info(&pf
->pdev
->dev
,
8891 "capability discovery failed, err %s aq_err %s\n",
8892 i40e_stat_str(&pf
->hw
, err
),
8893 i40e_aq_str(&pf
->hw
,
8894 pf
->hw
.aq
.asq_last_status
));
8899 if (pf
->hw
.debug_mask
& I40E_DEBUG_USER
) {
8900 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
8901 dev_info(&pf
->pdev
->dev
,
8902 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
8903 pf
->hw
.pf_id
, pf
->hw
.func_caps
.num_vfs
,
8904 pf
->hw
.func_caps
.num_msix_vectors
,
8905 pf
->hw
.func_caps
.num_msix_vectors_vf
,
8906 pf
->hw
.func_caps
.fd_filters_guaranteed
,
8907 pf
->hw
.func_caps
.fd_filters_best_effort
,
8908 pf
->hw
.func_caps
.num_tx_qp
,
8909 pf
->hw
.func_caps
.num_vsis
);
8910 } else if (list_type
== i40e_aqc_opc_list_dev_capabilities
) {
8911 dev_info(&pf
->pdev
->dev
,
8912 "switch_mode=0x%04x, function_valid=0x%08x\n",
8913 pf
->hw
.dev_caps
.switch_mode
,
8914 pf
->hw
.dev_caps
.valid_functions
);
8915 dev_info(&pf
->pdev
->dev
,
8916 "SR-IOV=%d, num_vfs for all function=%u\n",
8917 pf
->hw
.dev_caps
.sr_iov_1_1
,
8918 pf
->hw
.dev_caps
.num_vfs
);
8919 dev_info(&pf
->pdev
->dev
,
8920 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
8921 pf
->hw
.dev_caps
.num_vsis
,
8922 pf
->hw
.dev_caps
.num_rx_qp
,
8923 pf
->hw
.dev_caps
.num_tx_qp
);
8926 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
8927 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
8928 + pf->hw.func_caps.num_vfs)
8929 if (pf
->hw
.revision_id
== 0 &&
8930 pf
->hw
.func_caps
.num_vsis
< DEF_NUM_VSI
) {
8931 dev_info(&pf
->pdev
->dev
,
8932 "got num_vsis %d, setting num_vsis to %d\n",
8933 pf
->hw
.func_caps
.num_vsis
, DEF_NUM_VSI
);
8934 pf
->hw
.func_caps
.num_vsis
= DEF_NUM_VSI
;
8940 static int i40e_vsi_clear(struct i40e_vsi
*vsi
);
8943 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
8944 * @pf: board private structure
8946 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
)
8948 struct i40e_vsi
*vsi
;
8950 /* quick workaround for an NVM issue that leaves a critical register
8953 if (!rd32(&pf
->hw
, I40E_GLQF_HKEY(0))) {
8954 static const u32 hkey
[] = {
8955 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
8956 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
8957 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
8961 for (i
= 0; i
<= I40E_GLQF_HKEY_MAX_INDEX
; i
++)
8962 wr32(&pf
->hw
, I40E_GLQF_HKEY(i
), hkey
[i
]);
8965 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
8968 /* find existing VSI and see if it needs configuring */
8969 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
8971 /* create a new VSI if none exists */
8973 vsi
= i40e_vsi_setup(pf
, I40E_VSI_FDIR
,
8974 pf
->vsi
[pf
->lan_vsi
]->seid
, 0);
8976 dev_info(&pf
->pdev
->dev
, "Couldn't create FDir VSI\n");
8977 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
8978 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
8983 i40e_vsi_setup_irqhandler(vsi
, i40e_fdir_clean_ring
);
8987 * i40e_fdir_teardown - release the Flow Director resources
8988 * @pf: board private structure
8990 static void i40e_fdir_teardown(struct i40e_pf
*pf
)
8992 struct i40e_vsi
*vsi
;
8994 i40e_fdir_filter_exit(pf
);
8995 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
8997 i40e_vsi_release(vsi
);
9001 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9003 * @seid: seid of main or channel VSIs
9005 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9006 * existed before reset
9008 static int i40e_rebuild_cloud_filters(struct i40e_vsi
*vsi
, u16 seid
)
9010 struct i40e_cloud_filter
*cfilter
;
9011 struct i40e_pf
*pf
= vsi
->back
;
9012 struct hlist_node
*node
;
9015 /* Add cloud filters back if they exist */
9016 hlist_for_each_entry_safe(cfilter
, node
, &pf
->cloud_filter_list
,
9018 if (cfilter
->seid
!= seid
)
9021 if (cfilter
->dst_port
)
9022 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
,
9025 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, true);
9028 dev_dbg(&pf
->pdev
->dev
,
9029 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9030 i40e_stat_str(&pf
->hw
, ret
),
9031 i40e_aq_str(&pf
->hw
,
9032 pf
->hw
.aq
.asq_last_status
));
9040 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9043 * Rebuilds channel VSIs if they existed before reset
9045 static int i40e_rebuild_channels(struct i40e_vsi
*vsi
)
9047 struct i40e_channel
*ch
, *ch_tmp
;
9050 if (list_empty(&vsi
->ch_list
))
9053 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
9054 if (!ch
->initialized
)
9056 /* Proceed with creation of channel (VMDq2) VSI */
9057 ret
= i40e_add_channel(vsi
->back
, vsi
->uplink_seid
, ch
);
9059 dev_info(&vsi
->back
->pdev
->dev
,
9060 "failed to rebuild channels using uplink_seid %u\n",
9064 if (ch
->max_tx_rate
) {
9065 u64 credits
= ch
->max_tx_rate
;
9067 if (i40e_set_bw_limit(vsi
, ch
->seid
,
9071 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9072 dev_dbg(&vsi
->back
->pdev
->dev
,
9073 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9078 ret
= i40e_rebuild_cloud_filters(vsi
, ch
->seid
);
9080 dev_dbg(&vsi
->back
->pdev
->dev
,
9081 "Failed to rebuild cloud filters for channel VSI %u\n",
9090 * i40e_prep_for_reset - prep for the core to reset
9091 * @pf: board private structure
9092 * @lock_acquired: indicates whether or not the lock has been acquired
9093 * before this function was called.
9095 * Close up the VFs and other things in prep for PF Reset.
9097 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
)
9099 struct i40e_hw
*hw
= &pf
->hw
;
9100 i40e_status ret
= 0;
9103 clear_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
9104 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9106 if (i40e_check_asq_alive(&pf
->hw
))
9107 i40e_vc_notify_reset(pf
);
9109 dev_dbg(&pf
->pdev
->dev
, "Tearing down internal switch for reset\n");
9111 /* quiesce the VSIs and their queues that are not already DOWN */
9112 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9115 i40e_pf_quiesce_all_vsi(pf
);
9119 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
9121 pf
->vsi
[v
]->seid
= 0;
9124 i40e_shutdown_adminq(&pf
->hw
);
9126 /* call shutdown HMC */
9127 if (hw
->hmc
.hmc_obj
) {
9128 ret
= i40e_shutdown_lan_hmc(hw
);
9130 dev_warn(&pf
->pdev
->dev
,
9131 "shutdown_lan_hmc failed: %d\n", ret
);
9136 * i40e_send_version - update firmware with driver version
9139 static void i40e_send_version(struct i40e_pf
*pf
)
9141 struct i40e_driver_version dv
;
9143 dv
.major_version
= DRV_VERSION_MAJOR
;
9144 dv
.minor_version
= DRV_VERSION_MINOR
;
9145 dv
.build_version
= DRV_VERSION_BUILD
;
9146 dv
.subbuild_version
= 0;
9147 strlcpy(dv
.driver_string
, DRV_VERSION
, sizeof(dv
.driver_string
));
9148 i40e_aq_send_driver_version(&pf
->hw
, &dv
, NULL
);
9152 * i40e_get_oem_version - get OEM specific version information
9153 * @hw: pointer to the hardware structure
9155 static void i40e_get_oem_version(struct i40e_hw
*hw
)
9157 u16 block_offset
= 0xffff;
9158 u16 block_length
= 0;
9159 u16 capabilities
= 0;
9163 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9164 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9165 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9166 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9167 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9168 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9169 #define I40E_NVM_OEM_LENGTH 3
9171 /* Check if pointer to OEM version block is valid. */
9172 i40e_read_nvm_word(hw
, I40E_SR_NVM_OEM_VERSION_PTR
, &block_offset
);
9173 if (block_offset
== 0xffff)
9176 /* Check if OEM version block has correct length. */
9177 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_LENGTH_OFFSET
,
9179 if (block_length
< I40E_NVM_OEM_LENGTH
)
9182 /* Check if OEM version format is as expected. */
9183 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_CAPABILITIES_OFFSET
,
9185 if ((capabilities
& I40E_NVM_OEM_CAPABILITIES_MASK
) != 0)
9188 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_GEN_OFFSET
,
9190 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_RELEASE_OFFSET
,
9192 hw
->nvm
.oem_ver
= (gen_snap
<< I40E_OEM_SNAP_SHIFT
) | release
;
9193 hw
->nvm
.eetrack
= I40E_OEM_EETRACK_ID
;
9197 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9198 * @pf: board private structure
9200 static int i40e_reset(struct i40e_pf
*pf
)
9202 struct i40e_hw
*hw
= &pf
->hw
;
9205 ret
= i40e_pf_reset(hw
);
9207 dev_info(&pf
->pdev
->dev
, "PF reset failed, %d\n", ret
);
9208 set_bit(__I40E_RESET_FAILED
, pf
->state
);
9209 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9217 * i40e_rebuild - rebuild using a saved config
9218 * @pf: board private structure
9219 * @reinit: if the Main VSI needs to re-initialized.
9220 * @lock_acquired: indicates whether or not the lock has been acquired
9221 * before this function was called.
9223 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
)
9225 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
9226 struct i40e_hw
*hw
= &pf
->hw
;
9227 u8 set_fc_aq_fail
= 0;
9232 if (test_bit(__I40E_DOWN
, pf
->state
))
9233 goto clear_recovery
;
9234 dev_dbg(&pf
->pdev
->dev
, "Rebuilding internal switch\n");
9236 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9237 ret
= i40e_init_adminq(&pf
->hw
);
9239 dev_info(&pf
->pdev
->dev
, "Rebuild AdminQ failed, err %s aq_err %s\n",
9240 i40e_stat_str(&pf
->hw
, ret
),
9241 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9242 goto clear_recovery
;
9244 i40e_get_oem_version(&pf
->hw
);
9246 /* re-verify the eeprom if we just had an EMP reset */
9247 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
))
9248 i40e_verify_eeprom(pf
);
9250 i40e_clear_pxe_mode(hw
);
9251 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
9253 goto end_core_reset
;
9255 ret
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
9256 hw
->func_caps
.num_rx_qp
, 0, 0);
9258 dev_info(&pf
->pdev
->dev
, "init_lan_hmc failed: %d\n", ret
);
9259 goto end_core_reset
;
9261 ret
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
9263 dev_info(&pf
->pdev
->dev
, "configure_lan_hmc failed: %d\n", ret
);
9264 goto end_core_reset
;
9267 #ifdef CONFIG_I40E_DCB
9268 ret
= i40e_init_pf_dcb(pf
);
9270 dev_info(&pf
->pdev
->dev
, "DCB init failed %d, disabled\n", ret
);
9271 pf
->flags
&= ~I40E_FLAG_DCB_CAPABLE
;
9272 /* Continue without DCB enabled */
9274 #endif /* CONFIG_I40E_DCB */
9275 /* do basic switch setup */
9278 ret
= i40e_setup_pf_switch(pf
, reinit
);
9282 /* The driver only wants link up/down and module qualification
9283 * reports from firmware. Note the negative logic.
9285 ret
= i40e_aq_set_phy_int_mask(&pf
->hw
,
9286 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
9287 I40E_AQ_EVENT_MEDIA_NA
|
9288 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
9290 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
9291 i40e_stat_str(&pf
->hw
, ret
),
9292 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9294 /* make sure our flow control settings are restored */
9295 ret
= i40e_set_fc(&pf
->hw
, &set_fc_aq_fail
, true);
9297 dev_dbg(&pf
->pdev
->dev
, "setting flow control: ret = %s last_status = %s\n",
9298 i40e_stat_str(&pf
->hw
, ret
),
9299 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9301 /* Rebuild the VSIs and VEBs that existed before reset.
9302 * They are still in our local switch element arrays, so only
9303 * need to rebuild the switch model in the HW.
9305 * If there were VEBs but the reconstitution failed, we'll try
9306 * try to recover minimal use by getting the basic PF VSI working.
9308 if (vsi
->uplink_seid
!= pf
->mac_seid
) {
9309 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild switch\n");
9310 /* find the one VEB connected to the MAC, and find orphans */
9311 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
9315 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
||
9316 pf
->veb
[v
]->uplink_seid
== 0) {
9317 ret
= i40e_reconstitute_veb(pf
->veb
[v
]);
9322 /* If Main VEB failed, we're in deep doodoo,
9323 * so give up rebuilding the switch and set up
9324 * for minimal rebuild of PF VSI.
9325 * If orphan failed, we'll report the error
9326 * but try to keep going.
9328 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
) {
9329 dev_info(&pf
->pdev
->dev
,
9330 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9332 vsi
->uplink_seid
= pf
->mac_seid
;
9334 } else if (pf
->veb
[v
]->uplink_seid
== 0) {
9335 dev_info(&pf
->pdev
->dev
,
9336 "rebuild of orphan VEB failed: %d\n",
9343 if (vsi
->uplink_seid
== pf
->mac_seid
) {
9344 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild PF VSI\n");
9345 /* no VEB, so rebuild only the Main VSI */
9346 ret
= i40e_add_vsi(vsi
);
9348 dev_info(&pf
->pdev
->dev
,
9349 "rebuild of Main VSI failed: %d\n", ret
);
9354 if (vsi
->mqprio_qopt
.max_rate
[0]) {
9355 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
9358 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
9359 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
9363 credits
= max_tx_rate
;
9364 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9365 dev_dbg(&vsi
->back
->pdev
->dev
,
9366 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9372 ret
= i40e_rebuild_cloud_filters(vsi
, vsi
->seid
);
9376 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9377 * for this main VSI if they exist
9379 ret
= i40e_rebuild_channels(vsi
);
9383 /* Reconfigure hardware for allowing smaller MSS in the case
9384 * of TSO, so that we avoid the MDD being fired and causing
9385 * a reset in the case of small MSS+TSO.
9387 #define I40E_REG_MSS 0x000E64DC
9388 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9389 #define I40E_64BYTE_MSS 0x400000
9390 val
= rd32(hw
, I40E_REG_MSS
);
9391 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
9392 val
&= ~I40E_REG_MSS_MIN_MASK
;
9393 val
|= I40E_64BYTE_MSS
;
9394 wr32(hw
, I40E_REG_MSS
, val
);
9397 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
9399 ret
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
9401 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
9402 i40e_stat_str(&pf
->hw
, ret
),
9403 i40e_aq_str(&pf
->hw
,
9404 pf
->hw
.aq
.asq_last_status
));
9406 /* reinit the misc interrupt */
9407 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9408 ret
= i40e_setup_misc_vector(pf
);
9410 /* Add a filter to drop all Flow control frames from any VSI from being
9411 * transmitted. By doing so we stop a malicious VF from sending out
9412 * PAUSE or PFC frames and potentially controlling traffic for other
9414 * The FW can still send Flow control frames if enabled.
9416 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
9419 /* restart the VSIs that were rebuilt and running before the reset */
9420 i40e_pf_unquiesce_all_vsi(pf
);
9422 /* Release the RTNL lock before we start resetting VFs */
9426 /* Restore promiscuous settings */
9427 ret
= i40e_set_promiscuous(pf
, pf
->cur_promisc
);
9429 dev_warn(&pf
->pdev
->dev
,
9430 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9431 pf
->cur_promisc
? "on" : "off",
9432 i40e_stat_str(&pf
->hw
, ret
),
9433 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9435 i40e_reset_all_vfs(pf
, true);
9437 /* tell the firmware that we're starting */
9438 i40e_send_version(pf
);
9440 /* We've already released the lock, so don't do it again */
9441 goto end_core_reset
;
9447 clear_bit(__I40E_RESET_FAILED
, pf
->state
);
9449 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9453 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9454 * @pf: board private structure
9455 * @reinit: if the Main VSI needs to re-initialized.
9456 * @lock_acquired: indicates whether or not the lock has been acquired
9457 * before this function was called.
9459 static void i40e_reset_and_rebuild(struct i40e_pf
*pf
, bool reinit
,
9463 /* Now we wait for GRST to settle out.
9464 * We don't have to delete the VEBs or VSIs from the hw switch
9465 * because the reset will make them disappear.
9467 ret
= i40e_reset(pf
);
9469 i40e_rebuild(pf
, reinit
, lock_acquired
);
9473 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9474 * @pf: board private structure
9476 * Close up the VFs and other things in prep for a Core Reset,
9477 * then get ready to rebuild the world.
9478 * @lock_acquired: indicates whether or not the lock has been acquired
9479 * before this function was called.
9481 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
)
9483 i40e_prep_for_reset(pf
, lock_acquired
);
9484 i40e_reset_and_rebuild(pf
, false, lock_acquired
);
9488 * i40e_handle_mdd_event
9489 * @pf: pointer to the PF structure
9491 * Called from the MDD irq handler to identify possibly malicious vfs
9493 static void i40e_handle_mdd_event(struct i40e_pf
*pf
)
9495 struct i40e_hw
*hw
= &pf
->hw
;
9496 bool mdd_detected
= false;
9497 bool pf_mdd_detected
= false;
9502 if (!test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
))
9505 /* find what triggered the MDD event */
9506 reg
= rd32(hw
, I40E_GL_MDET_TX
);
9507 if (reg
& I40E_GL_MDET_TX_VALID_MASK
) {
9508 u8 pf_num
= (reg
& I40E_GL_MDET_TX_PF_NUM_MASK
) >>
9509 I40E_GL_MDET_TX_PF_NUM_SHIFT
;
9510 u16 vf_num
= (reg
& I40E_GL_MDET_TX_VF_NUM_MASK
) >>
9511 I40E_GL_MDET_TX_VF_NUM_SHIFT
;
9512 u8 event
= (reg
& I40E_GL_MDET_TX_EVENT_MASK
) >>
9513 I40E_GL_MDET_TX_EVENT_SHIFT
;
9514 u16 queue
= ((reg
& I40E_GL_MDET_TX_QUEUE_MASK
) >>
9515 I40E_GL_MDET_TX_QUEUE_SHIFT
) -
9516 pf
->hw
.func_caps
.base_queue
;
9517 if (netif_msg_tx_err(pf
))
9518 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9519 event
, queue
, pf_num
, vf_num
);
9520 wr32(hw
, I40E_GL_MDET_TX
, 0xffffffff);
9521 mdd_detected
= true;
9523 reg
= rd32(hw
, I40E_GL_MDET_RX
);
9524 if (reg
& I40E_GL_MDET_RX_VALID_MASK
) {
9525 u8 func
= (reg
& I40E_GL_MDET_RX_FUNCTION_MASK
) >>
9526 I40E_GL_MDET_RX_FUNCTION_SHIFT
;
9527 u8 event
= (reg
& I40E_GL_MDET_RX_EVENT_MASK
) >>
9528 I40E_GL_MDET_RX_EVENT_SHIFT
;
9529 u16 queue
= ((reg
& I40E_GL_MDET_RX_QUEUE_MASK
) >>
9530 I40E_GL_MDET_RX_QUEUE_SHIFT
) -
9531 pf
->hw
.func_caps
.base_queue
;
9532 if (netif_msg_rx_err(pf
))
9533 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9534 event
, queue
, func
);
9535 wr32(hw
, I40E_GL_MDET_RX
, 0xffffffff);
9536 mdd_detected
= true;
9540 reg
= rd32(hw
, I40E_PF_MDET_TX
);
9541 if (reg
& I40E_PF_MDET_TX_VALID_MASK
) {
9542 wr32(hw
, I40E_PF_MDET_TX
, 0xFFFF);
9543 dev_info(&pf
->pdev
->dev
, "TX driver issue detected, PF reset issued\n");
9544 pf_mdd_detected
= true;
9546 reg
= rd32(hw
, I40E_PF_MDET_RX
);
9547 if (reg
& I40E_PF_MDET_RX_VALID_MASK
) {
9548 wr32(hw
, I40E_PF_MDET_RX
, 0xFFFF);
9549 dev_info(&pf
->pdev
->dev
, "RX driver issue detected, PF reset issued\n");
9550 pf_mdd_detected
= true;
9552 /* Queue belongs to the PF, initiate a reset */
9553 if (pf_mdd_detected
) {
9554 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
9555 i40e_service_event_schedule(pf
);
9559 /* see if one of the VFs needs its hand slapped */
9560 for (i
= 0; i
< pf
->num_alloc_vfs
&& mdd_detected
; i
++) {
9562 reg
= rd32(hw
, I40E_VP_MDET_TX(i
));
9563 if (reg
& I40E_VP_MDET_TX_VALID_MASK
) {
9564 wr32(hw
, I40E_VP_MDET_TX(i
), 0xFFFF);
9565 vf
->num_mdd_events
++;
9566 dev_info(&pf
->pdev
->dev
, "TX driver issue detected on VF %d\n",
9570 reg
= rd32(hw
, I40E_VP_MDET_RX(i
));
9571 if (reg
& I40E_VP_MDET_RX_VALID_MASK
) {
9572 wr32(hw
, I40E_VP_MDET_RX(i
), 0xFFFF);
9573 vf
->num_mdd_events
++;
9574 dev_info(&pf
->pdev
->dev
, "RX driver issue detected on VF %d\n",
9578 if (vf
->num_mdd_events
> I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED
) {
9579 dev_info(&pf
->pdev
->dev
,
9580 "Too many MDD events on VF %d, disabled\n", i
);
9581 dev_info(&pf
->pdev
->dev
,
9582 "Use PF Control I/F to re-enable the VF\n");
9583 set_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
);
9587 /* re-enable mdd interrupt cause */
9588 clear_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
9589 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
9590 reg
|= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
9591 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
9595 static const char *i40e_tunnel_name(struct i40e_udp_port_config
*port
)
9597 switch (port
->type
) {
9598 case UDP_TUNNEL_TYPE_VXLAN
:
9600 case UDP_TUNNEL_TYPE_GENEVE
:
9608 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9609 * @pf: board private structure
9611 static void i40e_sync_udp_filters(struct i40e_pf
*pf
)
9615 /* loop through and set pending bit for all active UDP filters */
9616 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9617 if (pf
->udp_ports
[i
].port
)
9618 pf
->pending_udp_bitmap
|= BIT_ULL(i
);
9621 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
9625 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9626 * @pf: board private structure
9628 static void i40e_sync_udp_filters_subtask(struct i40e_pf
*pf
)
9630 struct i40e_hw
*hw
= &pf
->hw
;
9635 if (!(pf
->flags
& I40E_FLAG_UDP_FILTER_SYNC
))
9638 pf
->flags
&= ~I40E_FLAG_UDP_FILTER_SYNC
;
9640 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9641 if (pf
->pending_udp_bitmap
& BIT_ULL(i
)) {
9642 pf
->pending_udp_bitmap
&= ~BIT_ULL(i
);
9643 port
= pf
->udp_ports
[i
].port
;
9645 ret
= i40e_aq_add_udp_tunnel(hw
, port
,
9646 pf
->udp_ports
[i
].type
,
9649 ret
= i40e_aq_del_udp_tunnel(hw
, i
, NULL
);
9652 dev_info(&pf
->pdev
->dev
,
9653 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9654 i40e_tunnel_name(&pf
->udp_ports
[i
]),
9655 port
? "add" : "delete",
9657 i40e_stat_str(&pf
->hw
, ret
),
9658 i40e_aq_str(&pf
->hw
,
9659 pf
->hw
.aq
.asq_last_status
));
9660 pf
->udp_ports
[i
].port
= 0;
9667 * i40e_service_task - Run the driver's async subtasks
9668 * @work: pointer to work_struct containing our data
9670 static void i40e_service_task(struct work_struct
*work
)
9672 struct i40e_pf
*pf
= container_of(work
,
9675 unsigned long start_time
= jiffies
;
9677 /* don't bother with service tasks if a reset is in progress */
9678 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9681 if (test_and_set_bit(__I40E_SERVICE_SCHED
, pf
->state
))
9684 i40e_detect_recover_hung(pf
);
9685 i40e_sync_filters_subtask(pf
);
9686 i40e_reset_subtask(pf
);
9687 i40e_handle_mdd_event(pf
);
9688 i40e_vc_process_vflr_event(pf
);
9689 i40e_watchdog_subtask(pf
);
9690 i40e_fdir_reinit_subtask(pf
);
9691 if (pf
->flags
& I40E_FLAG_CLIENT_RESET
) {
9692 /* Client subtask will reopen next time through. */
9693 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], true);
9694 pf
->flags
&= ~I40E_FLAG_CLIENT_RESET
;
9696 i40e_client_subtask(pf
);
9697 if (pf
->flags
& I40E_FLAG_CLIENT_L2_CHANGE
) {
9698 i40e_notify_client_of_l2_param_changes(
9699 pf
->vsi
[pf
->lan_vsi
]);
9700 pf
->flags
&= ~I40E_FLAG_CLIENT_L2_CHANGE
;
9703 i40e_sync_filters_subtask(pf
);
9704 i40e_sync_udp_filters_subtask(pf
);
9705 i40e_clean_adminq_subtask(pf
);
9707 /* flush memory to make sure state is correct before next watchdog */
9708 smp_mb__before_atomic();
9709 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
9711 /* If the tasks have taken longer than one timer cycle or there
9712 * is more work to be done, reschedule the service task now
9713 * rather than wait for the timer to tick again.
9715 if (time_after(jiffies
, (start_time
+ pf
->service_timer_period
)) ||
9716 test_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
) ||
9717 test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
) ||
9718 test_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
))
9719 i40e_service_event_schedule(pf
);
9723 * i40e_service_timer - timer callback
9724 * @data: pointer to PF struct
9726 static void i40e_service_timer(struct timer_list
*t
)
9728 struct i40e_pf
*pf
= from_timer(pf
, t
, service_timer
);
9730 mod_timer(&pf
->service_timer
,
9731 round_jiffies(jiffies
+ pf
->service_timer_period
));
9732 i40e_service_event_schedule(pf
);
9736 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9737 * @vsi: the VSI being configured
9739 static int i40e_set_num_rings_in_vsi(struct i40e_vsi
*vsi
)
9741 struct i40e_pf
*pf
= vsi
->back
;
9743 switch (vsi
->type
) {
9745 vsi
->alloc_queue_pairs
= pf
->num_lan_qps
;
9746 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9747 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9748 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9749 vsi
->num_q_vectors
= pf
->num_lan_msix
;
9751 vsi
->num_q_vectors
= 1;
9756 vsi
->alloc_queue_pairs
= 1;
9757 vsi
->num_desc
= ALIGN(I40E_FDIR_RING_COUNT
,
9758 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9759 vsi
->num_q_vectors
= pf
->num_fdsb_msix
;
9762 case I40E_VSI_VMDQ2
:
9763 vsi
->alloc_queue_pairs
= pf
->num_vmdq_qps
;
9764 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9765 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9766 vsi
->num_q_vectors
= pf
->num_vmdq_msix
;
9769 case I40E_VSI_SRIOV
:
9770 vsi
->alloc_queue_pairs
= pf
->num_vf_qps
;
9771 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9772 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9784 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9786 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
9788 * On error: returns error code (negative)
9789 * On success: returns 0
9791 static int i40e_vsi_alloc_arrays(struct i40e_vsi
*vsi
, bool alloc_qvectors
)
9793 struct i40e_ring
**next_rings
;
9797 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
9798 size
= sizeof(struct i40e_ring
*) * vsi
->alloc_queue_pairs
*
9799 (i40e_enabled_xdp_vsi(vsi
) ? 3 : 2);
9800 vsi
->tx_rings
= kzalloc(size
, GFP_KERNEL
);
9803 next_rings
= vsi
->tx_rings
+ vsi
->alloc_queue_pairs
;
9804 if (i40e_enabled_xdp_vsi(vsi
)) {
9805 vsi
->xdp_rings
= next_rings
;
9806 next_rings
+= vsi
->alloc_queue_pairs
;
9808 vsi
->rx_rings
= next_rings
;
9810 if (alloc_qvectors
) {
9811 /* allocate memory for q_vector pointers */
9812 size
= sizeof(struct i40e_q_vector
*) * vsi
->num_q_vectors
;
9813 vsi
->q_vectors
= kzalloc(size
, GFP_KERNEL
);
9814 if (!vsi
->q_vectors
) {
9822 kfree(vsi
->tx_rings
);
9827 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
9828 * @pf: board private structure
9829 * @type: type of VSI
9831 * On error: returns error code (negative)
9832 * On success: returns vsi index in PF (positive)
9834 static int i40e_vsi_mem_alloc(struct i40e_pf
*pf
, enum i40e_vsi_type type
)
9837 struct i40e_vsi
*vsi
;
9841 /* Need to protect the allocation of the VSIs at the PF level */
9842 mutex_lock(&pf
->switch_mutex
);
9844 /* VSI list may be fragmented if VSI creation/destruction has
9845 * been happening. We can afford to do a quick scan to look
9846 * for any free VSIs in the list.
9848 * find next empty vsi slot, looping back around if necessary
9851 while (i
< pf
->num_alloc_vsi
&& pf
->vsi
[i
])
9853 if (i
>= pf
->num_alloc_vsi
) {
9855 while (i
< pf
->next_vsi
&& pf
->vsi
[i
])
9859 if (i
< pf
->num_alloc_vsi
&& !pf
->vsi
[i
]) {
9860 vsi_idx
= i
; /* Found one! */
9863 goto unlock_pf
; /* out of VSI slots! */
9867 vsi
= kzalloc(sizeof(*vsi
), GFP_KERNEL
);
9874 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
9877 vsi
->int_rate_limit
= 0;
9878 vsi
->rss_table_size
= (vsi
->type
== I40E_VSI_MAIN
) ?
9879 pf
->rss_table_size
: 64;
9880 vsi
->netdev_registered
= false;
9881 vsi
->work_limit
= I40E_DEFAULT_IRQ_WORK
;
9882 hash_init(vsi
->mac_filter_hash
);
9883 vsi
->irqs_ready
= false;
9885 ret
= i40e_set_num_rings_in_vsi(vsi
);
9889 ret
= i40e_vsi_alloc_arrays(vsi
, true);
9893 /* Setup default MSIX irq handler for VSI */
9894 i40e_vsi_setup_irqhandler(vsi
, i40e_msix_clean_rings
);
9896 /* Initialize VSI lock */
9897 spin_lock_init(&vsi
->mac_filter_hash_lock
);
9898 pf
->vsi
[vsi_idx
] = vsi
;
9903 pf
->next_vsi
= i
- 1;
9906 mutex_unlock(&pf
->switch_mutex
);
9911 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
9912 * @type: VSI pointer
9913 * @free_qvectors: a bool to specify if q_vectors need to be freed.
9915 * On error: returns error code (negative)
9916 * On success: returns 0
9918 static void i40e_vsi_free_arrays(struct i40e_vsi
*vsi
, bool free_qvectors
)
9920 /* free the ring and vector containers */
9921 if (free_qvectors
) {
9922 kfree(vsi
->q_vectors
);
9923 vsi
->q_vectors
= NULL
;
9925 kfree(vsi
->tx_rings
);
9926 vsi
->tx_rings
= NULL
;
9927 vsi
->rx_rings
= NULL
;
9928 vsi
->xdp_rings
= NULL
;
9932 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
9934 * @vsi: Pointer to VSI structure
9936 static void i40e_clear_rss_config_user(struct i40e_vsi
*vsi
)
9941 kfree(vsi
->rss_hkey_user
);
9942 vsi
->rss_hkey_user
= NULL
;
9944 kfree(vsi
->rss_lut_user
);
9945 vsi
->rss_lut_user
= NULL
;
9949 * i40e_vsi_clear - Deallocate the VSI provided
9950 * @vsi: the VSI being un-configured
9952 static int i40e_vsi_clear(struct i40e_vsi
*vsi
)
9963 mutex_lock(&pf
->switch_mutex
);
9964 if (!pf
->vsi
[vsi
->idx
]) {
9965 dev_err(&pf
->pdev
->dev
, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
9966 vsi
->idx
, vsi
->idx
, vsi
, vsi
->type
);
9970 if (pf
->vsi
[vsi
->idx
] != vsi
) {
9971 dev_err(&pf
->pdev
->dev
,
9972 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
9973 pf
->vsi
[vsi
->idx
]->idx
,
9975 pf
->vsi
[vsi
->idx
]->type
,
9976 vsi
->idx
, vsi
, vsi
->type
);
9980 /* updates the PF for this cleared vsi */
9981 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
9982 i40e_put_lump(pf
->irq_pile
, vsi
->base_vector
, vsi
->idx
);
9984 i40e_vsi_free_arrays(vsi
, true);
9985 i40e_clear_rss_config_user(vsi
);
9987 pf
->vsi
[vsi
->idx
] = NULL
;
9988 if (vsi
->idx
< pf
->next_vsi
)
9989 pf
->next_vsi
= vsi
->idx
;
9992 mutex_unlock(&pf
->switch_mutex
);
10000 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10001 * @vsi: the VSI being cleaned
10003 static void i40e_vsi_clear_rings(struct i40e_vsi
*vsi
)
10007 if (vsi
->tx_rings
&& vsi
->tx_rings
[0]) {
10008 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10009 kfree_rcu(vsi
->tx_rings
[i
], rcu
);
10010 vsi
->tx_rings
[i
] = NULL
;
10011 vsi
->rx_rings
[i
] = NULL
;
10012 if (vsi
->xdp_rings
)
10013 vsi
->xdp_rings
[i
] = NULL
;
10019 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10020 * @vsi: the VSI being configured
10022 static int i40e_alloc_rings(struct i40e_vsi
*vsi
)
10024 int i
, qpv
= i40e_enabled_xdp_vsi(vsi
) ? 3 : 2;
10025 struct i40e_pf
*pf
= vsi
->back
;
10026 struct i40e_ring
*ring
;
10028 /* Set basic values in the rings to be used later during open() */
10029 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10030 /* allocate space for both Tx and Rx in one shot */
10031 ring
= kcalloc(qpv
, sizeof(struct i40e_ring
), GFP_KERNEL
);
10035 ring
->queue_index
= i
;
10036 ring
->reg_idx
= vsi
->base_queue
+ i
;
10037 ring
->ring_active
= false;
10039 ring
->netdev
= vsi
->netdev
;
10040 ring
->dev
= &pf
->pdev
->dev
;
10041 ring
->count
= vsi
->num_desc
;
10044 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10045 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10046 ring
->tx_itr_setting
= pf
->tx_itr_default
;
10047 vsi
->tx_rings
[i
] = ring
++;
10049 if (!i40e_enabled_xdp_vsi(vsi
))
10052 ring
->queue_index
= vsi
->alloc_queue_pairs
+ i
;
10053 ring
->reg_idx
= vsi
->base_queue
+ ring
->queue_index
;
10054 ring
->ring_active
= false;
10056 ring
->netdev
= NULL
;
10057 ring
->dev
= &pf
->pdev
->dev
;
10058 ring
->count
= vsi
->num_desc
;
10061 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10062 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10063 set_ring_xdp(ring
);
10064 ring
->tx_itr_setting
= pf
->tx_itr_default
;
10065 vsi
->xdp_rings
[i
] = ring
++;
10068 ring
->queue_index
= i
;
10069 ring
->reg_idx
= vsi
->base_queue
+ i
;
10070 ring
->ring_active
= false;
10072 ring
->netdev
= vsi
->netdev
;
10073 ring
->dev
= &pf
->pdev
->dev
;
10074 ring
->count
= vsi
->num_desc
;
10077 ring
->rx_itr_setting
= pf
->rx_itr_default
;
10078 vsi
->rx_rings
[i
] = ring
;
10084 i40e_vsi_clear_rings(vsi
);
10089 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10090 * @pf: board private structure
10091 * @vectors: the number of MSI-X vectors to request
10093 * Returns the number of vectors reserved, or error
10095 static int i40e_reserve_msix_vectors(struct i40e_pf
*pf
, int vectors
)
10097 vectors
= pci_enable_msix_range(pf
->pdev
, pf
->msix_entries
,
10098 I40E_MIN_MSIX
, vectors
);
10100 dev_info(&pf
->pdev
->dev
,
10101 "MSI-X vector reservation failed: %d\n", vectors
);
10109 * i40e_init_msix - Setup the MSIX capability
10110 * @pf: board private structure
10112 * Work with the OS to set up the MSIX vectors needed.
10114 * Returns the number of vectors reserved or negative on failure
10116 static int i40e_init_msix(struct i40e_pf
*pf
)
10118 struct i40e_hw
*hw
= &pf
->hw
;
10119 int cpus
, extra_vectors
;
10123 int iwarp_requested
= 0;
10125 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
10128 /* The number of vectors we'll request will be comprised of:
10129 * - Add 1 for "other" cause for Admin Queue events, etc.
10130 * - The number of LAN queue pairs
10131 * - Queues being used for RSS.
10132 * We don't need as many as max_rss_size vectors.
10133 * use rss_size instead in the calculation since that
10134 * is governed by number of cpus in the system.
10135 * - assumes symmetric Tx/Rx pairing
10136 * - The number of VMDq pairs
10137 * - The CPU count within the NUMA node if iWARP is enabled
10138 * Once we count this up, try the request.
10140 * If we can't get what we want, we'll simplify to nearly nothing
10141 * and try again. If that still fails, we punt.
10143 vectors_left
= hw
->func_caps
.num_msix_vectors
;
10146 /* reserve one vector for miscellaneous handler */
10147 if (vectors_left
) {
10152 /* reserve some vectors for the main PF traffic queues. Initially we
10153 * only reserve at most 50% of the available vectors, in the case that
10154 * the number of online CPUs is large. This ensures that we can enable
10155 * extra features as well. Once we've enabled the other features, we
10156 * will use any remaining vectors to reach as close as we can to the
10157 * number of online CPUs.
10159 cpus
= num_online_cpus();
10160 pf
->num_lan_msix
= min_t(int, cpus
, vectors_left
/ 2);
10161 vectors_left
-= pf
->num_lan_msix
;
10163 /* reserve one vector for sideband flow director */
10164 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10165 if (vectors_left
) {
10166 pf
->num_fdsb_msix
= 1;
10170 pf
->num_fdsb_msix
= 0;
10174 /* can we reserve enough for iWARP? */
10175 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10176 iwarp_requested
= pf
->num_iwarp_msix
;
10179 pf
->num_iwarp_msix
= 0;
10180 else if (vectors_left
< pf
->num_iwarp_msix
)
10181 pf
->num_iwarp_msix
= 1;
10182 v_budget
+= pf
->num_iwarp_msix
;
10183 vectors_left
-= pf
->num_iwarp_msix
;
10186 /* any vectors left over go for VMDq support */
10187 if (pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) {
10188 int vmdq_vecs_wanted
= pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
;
10189 int vmdq_vecs
= min_t(int, vectors_left
, vmdq_vecs_wanted
);
10191 if (!vectors_left
) {
10192 pf
->num_vmdq_msix
= 0;
10193 pf
->num_vmdq_qps
= 0;
10195 /* if we're short on vectors for what's desired, we limit
10196 * the queues per vmdq. If this is still more than are
10197 * available, the user will need to change the number of
10198 * queues/vectors used by the PF later with the ethtool
10201 if (vmdq_vecs
< vmdq_vecs_wanted
)
10202 pf
->num_vmdq_qps
= 1;
10203 pf
->num_vmdq_msix
= pf
->num_vmdq_qps
;
10205 v_budget
+= vmdq_vecs
;
10206 vectors_left
-= vmdq_vecs
;
10210 /* On systems with a large number of SMP cores, we previously limited
10211 * the number of vectors for num_lan_msix to be at most 50% of the
10212 * available vectors, to allow for other features. Now, we add back
10213 * the remaining vectors. However, we ensure that the total
10214 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10215 * calculate the number of vectors we can add without going over the
10216 * cap of CPUs. For systems with a small number of CPUs this will be
10219 extra_vectors
= min_t(int, cpus
- pf
->num_lan_msix
, vectors_left
);
10220 pf
->num_lan_msix
+= extra_vectors
;
10221 vectors_left
-= extra_vectors
;
10223 WARN(vectors_left
< 0,
10224 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10226 v_budget
+= pf
->num_lan_msix
;
10227 pf
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
10229 if (!pf
->msix_entries
)
10232 for (i
= 0; i
< v_budget
; i
++)
10233 pf
->msix_entries
[i
].entry
= i
;
10234 v_actual
= i40e_reserve_msix_vectors(pf
, v_budget
);
10236 if (v_actual
< I40E_MIN_MSIX
) {
10237 pf
->flags
&= ~I40E_FLAG_MSIX_ENABLED
;
10238 kfree(pf
->msix_entries
);
10239 pf
->msix_entries
= NULL
;
10240 pci_disable_msix(pf
->pdev
);
10243 } else if (v_actual
== I40E_MIN_MSIX
) {
10244 /* Adjust for minimal MSIX use */
10245 pf
->num_vmdq_vsis
= 0;
10246 pf
->num_vmdq_qps
= 0;
10247 pf
->num_lan_qps
= 1;
10248 pf
->num_lan_msix
= 1;
10250 } else if (v_actual
!= v_budget
) {
10251 /* If we have limited resources, we will start with no vectors
10252 * for the special features and then allocate vectors to some
10253 * of these features based on the policy and at the end disable
10254 * the features that did not get any vectors.
10258 dev_info(&pf
->pdev
->dev
,
10259 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10260 v_actual
, v_budget
);
10261 /* reserve the misc vector */
10262 vec
= v_actual
- 1;
10264 /* Scale vector usage down */
10265 pf
->num_vmdq_msix
= 1; /* force VMDqs to only one vector */
10266 pf
->num_vmdq_vsis
= 1;
10267 pf
->num_vmdq_qps
= 1;
10269 /* partition out the remaining vectors */
10272 pf
->num_lan_msix
= 1;
10275 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10276 pf
->num_lan_msix
= 1;
10277 pf
->num_iwarp_msix
= 1;
10279 pf
->num_lan_msix
= 2;
10283 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10284 pf
->num_iwarp_msix
= min_t(int, (vec
/ 3),
10286 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 3),
10287 I40E_DEFAULT_NUM_VMDQ_VSI
);
10289 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 2),
10290 I40E_DEFAULT_NUM_VMDQ_VSI
);
10292 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10293 pf
->num_fdsb_msix
= 1;
10296 pf
->num_lan_msix
= min_t(int,
10297 (vec
- (pf
->num_iwarp_msix
+ pf
->num_vmdq_vsis
)),
10299 pf
->num_lan_qps
= pf
->num_lan_msix
;
10304 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
10305 (pf
->num_fdsb_msix
== 0)) {
10306 dev_info(&pf
->pdev
->dev
, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10307 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
10308 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10310 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
10311 (pf
->num_vmdq_msix
== 0)) {
10312 dev_info(&pf
->pdev
->dev
, "VMDq disabled, not enough MSI-X vectors\n");
10313 pf
->flags
&= ~I40E_FLAG_VMDQ_ENABLED
;
10316 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
10317 (pf
->num_iwarp_msix
== 0)) {
10318 dev_info(&pf
->pdev
->dev
, "IWARP disabled, not enough MSI-X vectors\n");
10319 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
10321 i40e_debug(&pf
->hw
, I40E_DEBUG_INIT
,
10322 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10324 pf
->num_vmdq_msix
* pf
->num_vmdq_vsis
,
10326 pf
->num_iwarp_msix
);
10332 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10333 * @vsi: the VSI being configured
10334 * @v_idx: index of the vector in the vsi struct
10335 * @cpu: cpu to be used on affinity_mask
10337 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10339 static int i40e_vsi_alloc_q_vector(struct i40e_vsi
*vsi
, int v_idx
, int cpu
)
10341 struct i40e_q_vector
*q_vector
;
10343 /* allocate q_vector */
10344 q_vector
= kzalloc(sizeof(struct i40e_q_vector
), GFP_KERNEL
);
10348 q_vector
->vsi
= vsi
;
10349 q_vector
->v_idx
= v_idx
;
10350 cpumask_copy(&q_vector
->affinity_mask
, cpu_possible_mask
);
10353 netif_napi_add(vsi
->netdev
, &q_vector
->napi
,
10354 i40e_napi_poll
, NAPI_POLL_WEIGHT
);
10356 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
10357 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
10359 /* tie q_vector and vsi together */
10360 vsi
->q_vectors
[v_idx
] = q_vector
;
10366 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10367 * @vsi: the VSI being configured
10369 * We allocate one q_vector per queue interrupt. If allocation fails we
10372 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi
*vsi
)
10374 struct i40e_pf
*pf
= vsi
->back
;
10375 int err
, v_idx
, num_q_vectors
, current_cpu
;
10377 /* if not MSIX, give the one vector only to the LAN VSI */
10378 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
10379 num_q_vectors
= vsi
->num_q_vectors
;
10380 else if (vsi
== pf
->vsi
[pf
->lan_vsi
])
10385 current_cpu
= cpumask_first(cpu_online_mask
);
10387 for (v_idx
= 0; v_idx
< num_q_vectors
; v_idx
++) {
10388 err
= i40e_vsi_alloc_q_vector(vsi
, v_idx
, current_cpu
);
10391 current_cpu
= cpumask_next(current_cpu
, cpu_online_mask
);
10392 if (unlikely(current_cpu
>= nr_cpu_ids
))
10393 current_cpu
= cpumask_first(cpu_online_mask
);
10400 i40e_free_q_vector(vsi
, v_idx
);
10406 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10407 * @pf: board private structure to initialize
10409 static int i40e_init_interrupt_scheme(struct i40e_pf
*pf
)
10414 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
10415 vectors
= i40e_init_msix(pf
);
10417 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
|
10418 I40E_FLAG_IWARP_ENABLED
|
10419 I40E_FLAG_RSS_ENABLED
|
10420 I40E_FLAG_DCB_CAPABLE
|
10421 I40E_FLAG_DCB_ENABLED
|
10422 I40E_FLAG_SRIOV_ENABLED
|
10423 I40E_FLAG_FD_SB_ENABLED
|
10424 I40E_FLAG_FD_ATR_ENABLED
|
10425 I40E_FLAG_VMDQ_ENABLED
);
10426 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10428 /* rework the queue expectations without MSIX */
10429 i40e_determine_queue_usage(pf
);
10433 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
10434 (pf
->flags
& I40E_FLAG_MSI_ENABLED
)) {
10435 dev_info(&pf
->pdev
->dev
, "MSI-X not available, trying MSI\n");
10436 vectors
= pci_enable_msi(pf
->pdev
);
10438 dev_info(&pf
->pdev
->dev
, "MSI init failed - %d\n",
10440 pf
->flags
&= ~I40E_FLAG_MSI_ENABLED
;
10442 vectors
= 1; /* one MSI or Legacy vector */
10445 if (!(pf
->flags
& (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
)))
10446 dev_info(&pf
->pdev
->dev
, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10448 /* set up vector assignment tracking */
10449 size
= sizeof(struct i40e_lump_tracking
) + (sizeof(u16
) * vectors
);
10450 pf
->irq_pile
= kzalloc(size
, GFP_KERNEL
);
10451 if (!pf
->irq_pile
) {
10452 dev_err(&pf
->pdev
->dev
, "error allocating irq_pile memory\n");
10455 pf
->irq_pile
->num_entries
= vectors
;
10456 pf
->irq_pile
->search_hint
= 0;
10458 /* track first vector for misc interrupts, ignore return */
10459 (void)i40e_get_lump(pf
, pf
->irq_pile
, 1, I40E_PILE_VALID_BIT
- 1);
10465 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10466 * @pf: private board data structure
10468 * Restore the interrupt scheme that was cleared when we suspended the
10469 * device. This should be called during resume to re-allocate the q_vectors
10470 * and reacquire IRQs.
10472 static int i40e_restore_interrupt_scheme(struct i40e_pf
*pf
)
10476 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10477 * scheme. We need to re-enabled them here in order to attempt to
10478 * re-acquire the MSI or MSI-X vectors
10480 pf
->flags
|= (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
10482 err
= i40e_init_interrupt_scheme(pf
);
10486 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10487 * rings together again.
10489 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
10491 err
= i40e_vsi_alloc_q_vectors(pf
->vsi
[i
]);
10494 i40e_vsi_map_rings_to_vectors(pf
->vsi
[i
]);
10498 err
= i40e_setup_misc_vector(pf
);
10507 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
10514 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10515 * @pf: board private structure
10517 * This sets up the handler for MSIX 0, which is used to manage the
10518 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10519 * when in MSI or Legacy interrupt mode.
10521 static int i40e_setup_misc_vector(struct i40e_pf
*pf
)
10523 struct i40e_hw
*hw
= &pf
->hw
;
10526 /* Only request the IRQ once, the first time through. */
10527 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
)) {
10528 err
= request_irq(pf
->msix_entries
[0].vector
,
10529 i40e_intr
, 0, pf
->int_name
, pf
);
10531 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
10532 dev_info(&pf
->pdev
->dev
,
10533 "request_irq for %s failed: %d\n",
10534 pf
->int_name
, err
);
10539 i40e_enable_misc_int_causes(pf
);
10541 /* associate no queues to the misc vector */
10542 wr32(hw
, I40E_PFINT_LNKLST0
, I40E_QUEUE_END_OF_LIST
);
10543 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), I40E_ITR_8K
);
10547 i40e_irq_dynamic_enable_icr0(pf
);
10553 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10554 * @vsi: Pointer to vsi structure
10555 * @seed: Buffter to store the hash keys
10556 * @lut: Buffer to store the lookup table entries
10557 * @lut_size: Size of buffer to store the lookup table entries
10559 * Return 0 on success, negative on failure
10561 static int i40e_get_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
10562 u8
*lut
, u16 lut_size
)
10564 struct i40e_pf
*pf
= vsi
->back
;
10565 struct i40e_hw
*hw
= &pf
->hw
;
10569 ret
= i40e_aq_get_rss_key(hw
, vsi
->id
,
10570 (struct i40e_aqc_get_set_rss_key_data
*)seed
);
10572 dev_info(&pf
->pdev
->dev
,
10573 "Cannot get RSS key, err %s aq_err %s\n",
10574 i40e_stat_str(&pf
->hw
, ret
),
10575 i40e_aq_str(&pf
->hw
,
10576 pf
->hw
.aq
.asq_last_status
));
10582 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
10584 ret
= i40e_aq_get_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
10586 dev_info(&pf
->pdev
->dev
,
10587 "Cannot get RSS lut, err %s aq_err %s\n",
10588 i40e_stat_str(&pf
->hw
, ret
),
10589 i40e_aq_str(&pf
->hw
,
10590 pf
->hw
.aq
.asq_last_status
));
10599 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10600 * @vsi: Pointer to vsi structure
10601 * @seed: RSS hash seed
10602 * @lut: Lookup table
10603 * @lut_size: Lookup table size
10605 * Returns 0 on success, negative on failure
10607 static int i40e_config_rss_reg(struct i40e_vsi
*vsi
, const u8
*seed
,
10608 const u8
*lut
, u16 lut_size
)
10610 struct i40e_pf
*pf
= vsi
->back
;
10611 struct i40e_hw
*hw
= &pf
->hw
;
10612 u16 vf_id
= vsi
->vf_id
;
10615 /* Fill out hash function seed */
10617 u32
*seed_dw
= (u32
*)seed
;
10619 if (vsi
->type
== I40E_VSI_MAIN
) {
10620 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10621 wr32(hw
, I40E_PFQF_HKEY(i
), seed_dw
[i
]);
10622 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10623 for (i
= 0; i
<= I40E_VFQF_HKEY1_MAX_INDEX
; i
++)
10624 wr32(hw
, I40E_VFQF_HKEY1(i
, vf_id
), seed_dw
[i
]);
10626 dev_err(&pf
->pdev
->dev
, "Cannot set RSS seed - invalid VSI type\n");
10631 u32
*lut_dw
= (u32
*)lut
;
10633 if (vsi
->type
== I40E_VSI_MAIN
) {
10634 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10636 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10637 wr32(hw
, I40E_PFQF_HLUT(i
), lut_dw
[i
]);
10638 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10639 if (lut_size
!= I40E_VF_HLUT_ARRAY_SIZE
)
10641 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
10642 wr32(hw
, I40E_VFQF_HLUT1(i
, vf_id
), lut_dw
[i
]);
10644 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
10653 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10654 * @vsi: Pointer to VSI structure
10655 * @seed: Buffer to store the keys
10656 * @lut: Buffer to store the lookup table entries
10657 * @lut_size: Size of buffer to store the lookup table entries
10659 * Returns 0 on success, negative on failure
10661 static int i40e_get_rss_reg(struct i40e_vsi
*vsi
, u8
*seed
,
10662 u8
*lut
, u16 lut_size
)
10664 struct i40e_pf
*pf
= vsi
->back
;
10665 struct i40e_hw
*hw
= &pf
->hw
;
10669 u32
*seed_dw
= (u32
*)seed
;
10671 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10672 seed_dw
[i
] = i40e_read_rx_ctl(hw
, I40E_PFQF_HKEY(i
));
10675 u32
*lut_dw
= (u32
*)lut
;
10677 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10679 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10680 lut_dw
[i
] = rd32(hw
, I40E_PFQF_HLUT(i
));
10687 * i40e_config_rss - Configure RSS keys and lut
10688 * @vsi: Pointer to VSI structure
10689 * @seed: RSS hash seed
10690 * @lut: Lookup table
10691 * @lut_size: Lookup table size
10693 * Returns 0 on success, negative on failure
10695 int i40e_config_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10697 struct i40e_pf
*pf
= vsi
->back
;
10699 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10700 return i40e_config_rss_aq(vsi
, seed
, lut
, lut_size
);
10702 return i40e_config_rss_reg(vsi
, seed
, lut
, lut_size
);
10706 * i40e_get_rss - Get RSS keys and lut
10707 * @vsi: Pointer to VSI structure
10708 * @seed: Buffer to store the keys
10709 * @lut: Buffer to store the lookup table entries
10710 * lut_size: Size of buffer to store the lookup table entries
10712 * Returns 0 on success, negative on failure
10714 int i40e_get_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10716 struct i40e_pf
*pf
= vsi
->back
;
10718 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10719 return i40e_get_rss_aq(vsi
, seed
, lut
, lut_size
);
10721 return i40e_get_rss_reg(vsi
, seed
, lut
, lut_size
);
10725 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10726 * @pf: Pointer to board private structure
10727 * @lut: Lookup table
10728 * @rss_table_size: Lookup table size
10729 * @rss_size: Range of queue number for hashing
10731 void i40e_fill_rss_lut(struct i40e_pf
*pf
, u8
*lut
,
10732 u16 rss_table_size
, u16 rss_size
)
10736 for (i
= 0; i
< rss_table_size
; i
++)
10737 lut
[i
] = i
% rss_size
;
10741 * i40e_pf_config_rss - Prepare for RSS if used
10742 * @pf: board private structure
10744 static int i40e_pf_config_rss(struct i40e_pf
*pf
)
10746 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10747 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
10749 struct i40e_hw
*hw
= &pf
->hw
;
10754 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10755 hena
= (u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(0)) |
10756 ((u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(1)) << 32);
10757 hena
|= i40e_pf_get_default_rss_hena(pf
);
10759 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), (u32
)hena
);
10760 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), (u32
)(hena
>> 32));
10762 /* Determine the RSS table size based on the hardware capabilities */
10763 reg_val
= i40e_read_rx_ctl(hw
, I40E_PFQF_CTL_0
);
10764 reg_val
= (pf
->rss_table_size
== 512) ?
10765 (reg_val
| I40E_PFQF_CTL_0_HASHLUTSIZE_512
) :
10766 (reg_val
& ~I40E_PFQF_CTL_0_HASHLUTSIZE_512
);
10767 i40e_write_rx_ctl(hw
, I40E_PFQF_CTL_0
, reg_val
);
10769 /* Determine the RSS size of the VSI */
10770 if (!vsi
->rss_size
) {
10773 qcount
= vsi
->num_queue_pairs
/ vsi
->tc_config
.numtc
;
10774 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10776 if (!vsi
->rss_size
)
10779 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
10783 /* Use user configured lut if there is one, otherwise use default */
10784 if (vsi
->rss_lut_user
)
10785 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
10787 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
10789 /* Use user configured hash key if there is one, otherwise
10792 if (vsi
->rss_hkey_user
)
10793 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
10795 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
10796 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
10803 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
10804 * @pf: board private structure
10805 * @queue_count: the requested queue count for rss.
10807 * returns 0 if rss is not enabled, if enabled returns the final rss queue
10808 * count which may be different from the requested queue count.
10809 * Note: expects to be called while under rtnl_lock()
10811 int i40e_reconfig_rss_queues(struct i40e_pf
*pf
, int queue_count
)
10813 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10816 if (!(pf
->flags
& I40E_FLAG_RSS_ENABLED
))
10819 new_rss_size
= min_t(int, queue_count
, pf
->rss_size_max
);
10821 if (queue_count
!= vsi
->num_queue_pairs
) {
10824 vsi
->req_queue_pairs
= queue_count
;
10825 i40e_prep_for_reset(pf
, true);
10827 pf
->alloc_rss_size
= new_rss_size
;
10829 i40e_reset_and_rebuild(pf
, true, true);
10831 /* Discard the user configured hash keys and lut, if less
10832 * queues are enabled.
10834 if (queue_count
< vsi
->rss_size
) {
10835 i40e_clear_rss_config_user(vsi
);
10836 dev_dbg(&pf
->pdev
->dev
,
10837 "discard user configured hash keys and lut\n");
10840 /* Reset vsi->rss_size, as number of enabled queues changed */
10841 qcount
= vsi
->num_queue_pairs
/ vsi
->tc_config
.numtc
;
10842 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10844 i40e_pf_config_rss(pf
);
10846 dev_info(&pf
->pdev
->dev
, "User requested queue count/HW max RSS count: %d/%d\n",
10847 vsi
->req_queue_pairs
, pf
->rss_size_max
);
10848 return pf
->alloc_rss_size
;
10852 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
10853 * @pf: board private structure
10855 i40e_status
i40e_get_partition_bw_setting(struct i40e_pf
*pf
)
10857 i40e_status status
;
10858 bool min_valid
, max_valid
;
10859 u32 max_bw
, min_bw
;
10861 status
= i40e_read_bw_from_alt_ram(&pf
->hw
, &max_bw
, &min_bw
,
10862 &min_valid
, &max_valid
);
10866 pf
->min_bw
= min_bw
;
10868 pf
->max_bw
= max_bw
;
10875 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
10876 * @pf: board private structure
10878 i40e_status
i40e_set_partition_bw_setting(struct i40e_pf
*pf
)
10880 struct i40e_aqc_configure_partition_bw_data bw_data
;
10881 i40e_status status
;
10883 /* Set the valid bit for this PF */
10884 bw_data
.pf_valid_bits
= cpu_to_le16(BIT(pf
->hw
.pf_id
));
10885 bw_data
.max_bw
[pf
->hw
.pf_id
] = pf
->max_bw
& I40E_ALT_BW_VALUE_MASK
;
10886 bw_data
.min_bw
[pf
->hw
.pf_id
] = pf
->min_bw
& I40E_ALT_BW_VALUE_MASK
;
10888 /* Set the new bandwidths */
10889 status
= i40e_aq_configure_partition_bw(&pf
->hw
, &bw_data
, NULL
);
10895 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
10896 * @pf: board private structure
10898 i40e_status
i40e_commit_partition_bw_setting(struct i40e_pf
*pf
)
10900 /* Commit temporary BW setting to permanent NVM image */
10901 enum i40e_admin_queue_err last_aq_status
;
10905 if (pf
->hw
.partition_id
!= 1) {
10906 dev_info(&pf
->pdev
->dev
,
10907 "Commit BW only works on partition 1! This is partition %d",
10908 pf
->hw
.partition_id
);
10909 ret
= I40E_NOT_SUPPORTED
;
10910 goto bw_commit_out
;
10913 /* Acquire NVM for read access */
10914 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_READ
);
10915 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10917 dev_info(&pf
->pdev
->dev
,
10918 "Cannot acquire NVM for read access, err %s aq_err %s\n",
10919 i40e_stat_str(&pf
->hw
, ret
),
10920 i40e_aq_str(&pf
->hw
, last_aq_status
));
10921 goto bw_commit_out
;
10924 /* Read word 0x10 of NVM - SW compatibility word 1 */
10925 ret
= i40e_aq_read_nvm(&pf
->hw
,
10926 I40E_SR_NVM_CONTROL_WORD
,
10927 0x10, sizeof(nvm_word
), &nvm_word
,
10929 /* Save off last admin queue command status before releasing
10932 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10933 i40e_release_nvm(&pf
->hw
);
10935 dev_info(&pf
->pdev
->dev
, "NVM read error, err %s aq_err %s\n",
10936 i40e_stat_str(&pf
->hw
, ret
),
10937 i40e_aq_str(&pf
->hw
, last_aq_status
));
10938 goto bw_commit_out
;
10941 /* Wait a bit for NVM release to complete */
10944 /* Acquire NVM for write access */
10945 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_WRITE
);
10946 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10948 dev_info(&pf
->pdev
->dev
,
10949 "Cannot acquire NVM for write access, err %s aq_err %s\n",
10950 i40e_stat_str(&pf
->hw
, ret
),
10951 i40e_aq_str(&pf
->hw
, last_aq_status
));
10952 goto bw_commit_out
;
10954 /* Write it back out unchanged to initiate update NVM,
10955 * which will force a write of the shadow (alt) RAM to
10956 * the NVM - thus storing the bandwidth values permanently.
10958 ret
= i40e_aq_update_nvm(&pf
->hw
,
10959 I40E_SR_NVM_CONTROL_WORD
,
10960 0x10, sizeof(nvm_word
),
10961 &nvm_word
, true, NULL
);
10962 /* Save off last admin queue command status before releasing
10965 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10966 i40e_release_nvm(&pf
->hw
);
10968 dev_info(&pf
->pdev
->dev
,
10969 "BW settings NOT SAVED, err %s aq_err %s\n",
10970 i40e_stat_str(&pf
->hw
, ret
),
10971 i40e_aq_str(&pf
->hw
, last_aq_status
));
10978 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
10979 * @pf: board private structure to initialize
10981 * i40e_sw_init initializes the Adapter private data structure.
10982 * Fields are initialized based on PCI device information and
10983 * OS network device settings (MTU size).
10985 static int i40e_sw_init(struct i40e_pf
*pf
)
10990 /* Set default capability flags */
10991 pf
->flags
= I40E_FLAG_RX_CSUM_ENABLED
|
10992 I40E_FLAG_MSI_ENABLED
|
10993 I40E_FLAG_MSIX_ENABLED
;
10995 /* Set default ITR */
10996 pf
->rx_itr_default
= I40E_ITR_RX_DEF
;
10997 pf
->tx_itr_default
= I40E_ITR_TX_DEF
;
10999 /* Depending on PF configurations, it is possible that the RSS
11000 * maximum might end up larger than the available queues
11002 pf
->rss_size_max
= BIT(pf
->hw
.func_caps
.rss_table_entry_width
);
11003 pf
->alloc_rss_size
= 1;
11004 pf
->rss_table_size
= pf
->hw
.func_caps
.rss_table_size
;
11005 pf
->rss_size_max
= min_t(int, pf
->rss_size_max
,
11006 pf
->hw
.func_caps
.num_tx_qp
);
11007 if (pf
->hw
.func_caps
.rss
) {
11008 pf
->flags
|= I40E_FLAG_RSS_ENABLED
;
11009 pf
->alloc_rss_size
= min_t(int, pf
->rss_size_max
,
11010 num_online_cpus());
11013 /* MFP mode enabled */
11014 if (pf
->hw
.func_caps
.npar_enable
|| pf
->hw
.func_caps
.flex10_enable
) {
11015 pf
->flags
|= I40E_FLAG_MFP_ENABLED
;
11016 dev_info(&pf
->pdev
->dev
, "MFP mode Enabled\n");
11017 if (i40e_get_partition_bw_setting(pf
)) {
11018 dev_warn(&pf
->pdev
->dev
,
11019 "Could not get partition bw settings\n");
11021 dev_info(&pf
->pdev
->dev
,
11022 "Partition BW Min = %8.8x, Max = %8.8x\n",
11023 pf
->min_bw
, pf
->max_bw
);
11025 /* nudge the Tx scheduler */
11026 i40e_set_partition_bw_setting(pf
);
11030 if ((pf
->hw
.func_caps
.fd_filters_guaranteed
> 0) ||
11031 (pf
->hw
.func_caps
.fd_filters_best_effort
> 0)) {
11032 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
11033 pf
->atr_sample_rate
= I40E_DEFAULT_ATR_SAMPLE_RATE
;
11034 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
&&
11035 pf
->hw
.num_partitions
> 1)
11036 dev_info(&pf
->pdev
->dev
,
11037 "Flow Director Sideband mode Disabled in MFP mode\n");
11039 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11040 pf
->fdir_pf_filter_count
=
11041 pf
->hw
.func_caps
.fd_filters_guaranteed
;
11042 pf
->hw
.fdir_shared_filter_count
=
11043 pf
->hw
.func_caps
.fd_filters_best_effort
;
11046 if (pf
->hw
.mac
.type
== I40E_MAC_X722
) {
11047 pf
->hw_features
|= (I40E_HW_RSS_AQ_CAPABLE
|
11048 I40E_HW_128_QP_RSS_CAPABLE
|
11049 I40E_HW_ATR_EVICT_CAPABLE
|
11050 I40E_HW_WB_ON_ITR_CAPABLE
|
11051 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE
|
11052 I40E_HW_NO_PCI_LINK_CHECK
|
11053 I40E_HW_USE_SET_LLDP_MIB
|
11054 I40E_HW_GENEVE_OFFLOAD_CAPABLE
|
11055 I40E_HW_PTP_L4_CAPABLE
|
11056 I40E_HW_WOL_MC_MAGIC_PKT_WAKE
|
11057 I40E_HW_OUTER_UDP_CSUM_CAPABLE
);
11059 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11060 if (rd32(&pf
->hw
, I40E_GLQF_FDEVICTENA(1)) !=
11061 I40E_FDEVICT_PCTYPE_DEFAULT
) {
11062 dev_warn(&pf
->pdev
->dev
,
11063 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11064 pf
->hw_features
&= ~I40E_HW_ATR_EVICT_CAPABLE
;
11066 } else if ((pf
->hw
.aq
.api_maj_ver
> 1) ||
11067 ((pf
->hw
.aq
.api_maj_ver
== 1) &&
11068 (pf
->hw
.aq
.api_min_ver
> 4))) {
11069 /* Supported in FW API version higher than 1.4 */
11070 pf
->hw_features
|= I40E_HW_GENEVE_OFFLOAD_CAPABLE
;
11073 /* Enable HW ATR eviction if possible */
11074 if (pf
->hw_features
& I40E_HW_ATR_EVICT_CAPABLE
)
11075 pf
->flags
|= I40E_FLAG_HW_ATR_EVICT_ENABLED
;
11077 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11078 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 33)) ||
11079 (pf
->hw
.aq
.fw_maj_ver
< 4))) {
11080 pf
->hw_features
|= I40E_HW_RESTART_AUTONEG
;
11081 /* No DCB support for FW < v4.33 */
11082 pf
->hw_features
|= I40E_HW_NO_DCB_SUPPORT
;
11085 /* Disable FW LLDP if FW < v4.3 */
11086 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11087 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 3)) ||
11088 (pf
->hw
.aq
.fw_maj_ver
< 4)))
11089 pf
->hw_features
|= I40E_HW_STOP_FW_LLDP
;
11091 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11092 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11093 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
>= 40)) ||
11094 (pf
->hw
.aq
.fw_maj_ver
>= 5)))
11095 pf
->hw_features
|= I40E_HW_USE_SET_LLDP_MIB
;
11097 /* Enable PTP L4 if FW > v6.0 */
11098 if (pf
->hw
.mac
.type
== I40E_MAC_XL710
&&
11099 pf
->hw
.aq
.fw_maj_ver
>= 6)
11100 pf
->hw_features
|= I40E_HW_PTP_L4_CAPABLE
;
11102 if (pf
->hw
.func_caps
.vmdq
) {
11103 pf
->num_vmdq_vsis
= I40E_DEFAULT_NUM_VMDQ_VSI
;
11104 pf
->flags
|= I40E_FLAG_VMDQ_ENABLED
;
11105 pf
->num_vmdq_qps
= i40e_default_queues_per_vmdq(pf
);
11108 if (pf
->hw
.func_caps
.iwarp
) {
11109 pf
->flags
|= I40E_FLAG_IWARP_ENABLED
;
11110 /* IWARP needs one extra vector for CQP just like MISC.*/
11111 pf
->num_iwarp_msix
= (int)num_online_cpus() + 1;
11114 #ifdef CONFIG_PCI_IOV
11115 if (pf
->hw
.func_caps
.num_vfs
&& pf
->hw
.partition_id
== 1) {
11116 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
11117 pf
->flags
|= I40E_FLAG_SRIOV_ENABLED
;
11118 pf
->num_req_vfs
= min_t(int,
11119 pf
->hw
.func_caps
.num_vfs
,
11120 I40E_MAX_VF_COUNT
);
11122 #endif /* CONFIG_PCI_IOV */
11123 pf
->eeprom_version
= 0xDEAD;
11124 pf
->lan_veb
= I40E_NO_VEB
;
11125 pf
->lan_vsi
= I40E_NO_VSI
;
11127 /* By default FW has this off for performance reasons */
11128 pf
->flags
&= ~I40E_FLAG_VEB_STATS_ENABLED
;
11130 /* set up queue assignment tracking */
11131 size
= sizeof(struct i40e_lump_tracking
)
11132 + (sizeof(u16
) * pf
->hw
.func_caps
.num_tx_qp
);
11133 pf
->qp_pile
= kzalloc(size
, GFP_KERNEL
);
11134 if (!pf
->qp_pile
) {
11138 pf
->qp_pile
->num_entries
= pf
->hw
.func_caps
.num_tx_qp
;
11139 pf
->qp_pile
->search_hint
= 0;
11141 pf
->tx_timeout_recovery_level
= 1;
11143 mutex_init(&pf
->switch_mutex
);
11150 * i40e_set_ntuple - set the ntuple feature flag and take action
11151 * @pf: board private structure to initialize
11152 * @features: the feature set that the stack is suggesting
11154 * returns a bool to indicate if reset needs to happen
11156 bool i40e_set_ntuple(struct i40e_pf
*pf
, netdev_features_t features
)
11158 bool need_reset
= false;
11160 /* Check if Flow Director n-tuple support was enabled or disabled. If
11161 * the state changed, we need to reset.
11163 if (features
& NETIF_F_NTUPLE
) {
11164 /* Enable filters and mark for reset */
11165 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
11167 /* enable FD_SB only if there is MSI-X vector and no cloud
11170 if (pf
->num_fdsb_msix
> 0 && !pf
->num_cloud_filters
) {
11171 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11172 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
11175 /* turn off filters, mark for reset and clear SW filter list */
11176 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
11178 i40e_fdir_filter_exit(pf
);
11180 pf
->flags
&= ~(I40E_FLAG_FD_SB_ENABLED
|
11181 I40E_FLAG_FD_SB_AUTO_DISABLED
);
11182 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
11184 /* reset fd counters */
11185 pf
->fd_add_err
= 0;
11186 pf
->fd_atr_cnt
= 0;
11187 /* if ATR was auto disabled it can be re-enabled. */
11188 if (pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
) {
11189 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
11190 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
11191 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
11192 dev_info(&pf
->pdev
->dev
, "ATR re-enabled.\n");
11199 * i40e_clear_rss_lut - clear the rx hash lookup table
11200 * @vsi: the VSI being configured
11202 static void i40e_clear_rss_lut(struct i40e_vsi
*vsi
)
11204 struct i40e_pf
*pf
= vsi
->back
;
11205 struct i40e_hw
*hw
= &pf
->hw
;
11206 u16 vf_id
= vsi
->vf_id
;
11209 if (vsi
->type
== I40E_VSI_MAIN
) {
11210 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
11211 wr32(hw
, I40E_PFQF_HLUT(i
), 0);
11212 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
11213 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
11214 i40e_write_rx_ctl(hw
, I40E_VFQF_HLUT1(i
, vf_id
), 0);
11216 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
11221 * i40e_set_features - set the netdev feature flags
11222 * @netdev: ptr to the netdev being adjusted
11223 * @features: the feature set that the stack is suggesting
11224 * Note: expects to be called while under rtnl_lock()
11226 static int i40e_set_features(struct net_device
*netdev
,
11227 netdev_features_t features
)
11229 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11230 struct i40e_vsi
*vsi
= np
->vsi
;
11231 struct i40e_pf
*pf
= vsi
->back
;
11234 if (features
& NETIF_F_RXHASH
&& !(netdev
->features
& NETIF_F_RXHASH
))
11235 i40e_pf_config_rss(pf
);
11236 else if (!(features
& NETIF_F_RXHASH
) &&
11237 netdev
->features
& NETIF_F_RXHASH
)
11238 i40e_clear_rss_lut(vsi
);
11240 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
11241 i40e_vlan_stripping_enable(vsi
);
11243 i40e_vlan_stripping_disable(vsi
);
11245 if (!(features
& NETIF_F_HW_TC
) && pf
->num_cloud_filters
) {
11246 dev_err(&pf
->pdev
->dev
,
11247 "Offloaded tc filters active, can't turn hw_tc_offload off");
11251 need_reset
= i40e_set_ntuple(pf
, features
);
11254 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11260 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11261 * @pf: board private structure
11262 * @port: The UDP port to look up
11264 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11266 static u8
i40e_get_udp_port_idx(struct i40e_pf
*pf
, u16 port
)
11270 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
11271 if (pf
->udp_ports
[i
].port
== port
)
11279 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11280 * @netdev: This physical port's netdev
11281 * @ti: Tunnel endpoint information
11283 static void i40e_udp_tunnel_add(struct net_device
*netdev
,
11284 struct udp_tunnel_info
*ti
)
11286 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11287 struct i40e_vsi
*vsi
= np
->vsi
;
11288 struct i40e_pf
*pf
= vsi
->back
;
11289 u16 port
= ntohs(ti
->port
);
11293 idx
= i40e_get_udp_port_idx(pf
, port
);
11295 /* Check if port already exists */
11296 if (idx
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11297 netdev_info(netdev
, "port %d already offloaded\n", port
);
11301 /* Now check if there is space to add the new port */
11302 next_idx
= i40e_get_udp_port_idx(pf
, 0);
11304 if (next_idx
== I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11305 netdev_info(netdev
, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11310 switch (ti
->type
) {
11311 case UDP_TUNNEL_TYPE_VXLAN
:
11312 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_VXLAN
;
11314 case UDP_TUNNEL_TYPE_GENEVE
:
11315 if (!(pf
->hw_features
& I40E_HW_GENEVE_OFFLOAD_CAPABLE
))
11317 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_NGE
;
11323 /* New port: add it and mark its index in the bitmap */
11324 pf
->udp_ports
[next_idx
].port
= port
;
11325 pf
->pending_udp_bitmap
|= BIT_ULL(next_idx
);
11326 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
11330 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11331 * @netdev: This physical port's netdev
11332 * @ti: Tunnel endpoint information
11334 static void i40e_udp_tunnel_del(struct net_device
*netdev
,
11335 struct udp_tunnel_info
*ti
)
11337 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11338 struct i40e_vsi
*vsi
= np
->vsi
;
11339 struct i40e_pf
*pf
= vsi
->back
;
11340 u16 port
= ntohs(ti
->port
);
11343 idx
= i40e_get_udp_port_idx(pf
, port
);
11345 /* Check if port already exists */
11346 if (idx
>= I40E_MAX_PF_UDP_OFFLOAD_PORTS
)
11349 switch (ti
->type
) {
11350 case UDP_TUNNEL_TYPE_VXLAN
:
11351 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_VXLAN
)
11354 case UDP_TUNNEL_TYPE_GENEVE
:
11355 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_NGE
)
11362 /* if port exists, set it to 0 (mark for deletion)
11363 * and make it pending
11365 pf
->udp_ports
[idx
].port
= 0;
11366 pf
->pending_udp_bitmap
|= BIT_ULL(idx
);
11367 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
11371 netdev_warn(netdev
, "UDP port %d was not found, not deleting\n",
11375 static int i40e_get_phys_port_id(struct net_device
*netdev
,
11376 struct netdev_phys_item_id
*ppid
)
11378 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11379 struct i40e_pf
*pf
= np
->vsi
->back
;
11380 struct i40e_hw
*hw
= &pf
->hw
;
11382 if (!(pf
->hw_features
& I40E_HW_PORT_ID_VALID
))
11383 return -EOPNOTSUPP
;
11385 ppid
->id_len
= min_t(int, sizeof(hw
->mac
.port_addr
), sizeof(ppid
->id
));
11386 memcpy(ppid
->id
, hw
->mac
.port_addr
, ppid
->id_len
);
11392 * i40e_ndo_fdb_add - add an entry to the hardware database
11393 * @ndm: the input from the stack
11394 * @tb: pointer to array of nladdr (unused)
11395 * @dev: the net device pointer
11396 * @addr: the MAC address entry being added
11397 * @flags: instructions from stack about fdb operation
11399 static int i40e_ndo_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
11400 struct net_device
*dev
,
11401 const unsigned char *addr
, u16 vid
,
11404 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11405 struct i40e_pf
*pf
= np
->vsi
->back
;
11408 if (!(pf
->flags
& I40E_FLAG_SRIOV_ENABLED
))
11409 return -EOPNOTSUPP
;
11412 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
11416 /* Hardware does not support aging addresses so if a
11417 * ndm_state is given only allow permanent addresses
11419 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
11420 netdev_info(dev
, "FDB only supports static addresses\n");
11424 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
11425 err
= dev_uc_add_excl(dev
, addr
);
11426 else if (is_multicast_ether_addr(addr
))
11427 err
= dev_mc_add_excl(dev
, addr
);
11431 /* Only return duplicate errors if NLM_F_EXCL is set */
11432 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
11439 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11440 * @dev: the netdev being configured
11441 * @nlh: RTNL message
11443 * Inserts a new hardware bridge if not already created and
11444 * enables the bridging mode requested (VEB or VEPA). If the
11445 * hardware bridge has already been inserted and the request
11446 * is to change the mode then that requires a PF reset to
11447 * allow rebuild of the components with required hardware
11448 * bridge mode enabled.
11450 * Note: expects to be called while under rtnl_lock()
11452 static int i40e_ndo_bridge_setlink(struct net_device
*dev
,
11453 struct nlmsghdr
*nlh
,
11456 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11457 struct i40e_vsi
*vsi
= np
->vsi
;
11458 struct i40e_pf
*pf
= vsi
->back
;
11459 struct i40e_veb
*veb
= NULL
;
11460 struct nlattr
*attr
, *br_spec
;
11463 /* Only for PF VSI for now */
11464 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11465 return -EOPNOTSUPP
;
11467 /* Find the HW bridge for PF VSI */
11468 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11469 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11473 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
11475 nla_for_each_nested(attr
, br_spec
, rem
) {
11478 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
11481 mode
= nla_get_u16(attr
);
11482 if ((mode
!= BRIDGE_MODE_VEPA
) &&
11483 (mode
!= BRIDGE_MODE_VEB
))
11486 /* Insert a new HW bridge */
11488 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
11489 vsi
->tc_config
.enabled_tc
);
11491 veb
->bridge_mode
= mode
;
11492 i40e_config_bridge_mode(veb
);
11494 /* No Bridge HW offload available */
11498 } else if (mode
!= veb
->bridge_mode
) {
11499 /* Existing HW bridge but different mode needs reset */
11500 veb
->bridge_mode
= mode
;
11501 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11502 if (mode
== BRIDGE_MODE_VEB
)
11503 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
11505 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
11506 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11515 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11518 * @seq: RTNL message seq #
11519 * @dev: the netdev being configured
11520 * @filter_mask: unused
11521 * @nlflags: netlink flags passed in
11523 * Return the mode in which the hardware bridge is operating in
11526 static int i40e_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
11527 struct net_device
*dev
,
11528 u32 __always_unused filter_mask
,
11531 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11532 struct i40e_vsi
*vsi
= np
->vsi
;
11533 struct i40e_pf
*pf
= vsi
->back
;
11534 struct i40e_veb
*veb
= NULL
;
11537 /* Only for PF VSI for now */
11538 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11539 return -EOPNOTSUPP
;
11541 /* Find the HW bridge for the PF VSI */
11542 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11543 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11550 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, veb
->bridge_mode
,
11551 0, 0, nlflags
, filter_mask
, NULL
);
11555 * i40e_features_check - Validate encapsulated packet conforms to limits
11557 * @dev: This physical port's netdev
11558 * @features: Offload features that the stack believes apply
11560 static netdev_features_t
i40e_features_check(struct sk_buff
*skb
,
11561 struct net_device
*dev
,
11562 netdev_features_t features
)
11566 /* No point in doing any of this if neither checksum nor GSO are
11567 * being requested for this frame. We can rule out both by just
11568 * checking for CHECKSUM_PARTIAL
11570 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
11573 /* We cannot support GSO if the MSS is going to be less than
11574 * 64 bytes. If it is then we need to drop support for GSO.
11576 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
11577 features
&= ~NETIF_F_GSO_MASK
;
11579 /* MACLEN can support at most 63 words */
11580 len
= skb_network_header(skb
) - skb
->data
;
11581 if (len
& ~(63 * 2))
11584 /* IPLEN and EIPLEN can support at most 127 dwords */
11585 len
= skb_transport_header(skb
) - skb_network_header(skb
);
11586 if (len
& ~(127 * 4))
11589 if (skb
->encapsulation
) {
11590 /* L4TUNLEN can support 127 words */
11591 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
11592 if (len
& ~(127 * 2))
11595 /* IPLEN can support at most 127 dwords */
11596 len
= skb_inner_transport_header(skb
) -
11597 skb_inner_network_header(skb
);
11598 if (len
& ~(127 * 4))
11602 /* No need to validate L4LEN as TCP is the only protocol with a
11603 * a flexible value and we support all possible values supported
11604 * by TCP, which is at most 15 dwords
11609 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
11613 * i40e_xdp_setup - add/remove an XDP program
11614 * @vsi: VSI to changed
11615 * @prog: XDP program
11617 static int i40e_xdp_setup(struct i40e_vsi
*vsi
,
11618 struct bpf_prog
*prog
)
11620 int frame_size
= vsi
->netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
11621 struct i40e_pf
*pf
= vsi
->back
;
11622 struct bpf_prog
*old_prog
;
11626 /* Don't allow frames that span over multiple buffers */
11627 if (frame_size
> vsi
->rx_buf_len
)
11630 if (!i40e_enabled_xdp_vsi(vsi
) && !prog
)
11633 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11634 need_reset
= (i40e_enabled_xdp_vsi(vsi
) != !!prog
);
11637 i40e_prep_for_reset(pf
, true);
11639 old_prog
= xchg(&vsi
->xdp_prog
, prog
);
11642 i40e_reset_and_rebuild(pf
, true, true);
11644 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
11645 WRITE_ONCE(vsi
->rx_rings
[i
]->xdp_prog
, vsi
->xdp_prog
);
11648 bpf_prog_put(old_prog
);
11654 * i40e_xdp - implements ndo_bpf for i40e
11656 * @xdp: XDP command
11658 static int i40e_xdp(struct net_device
*dev
,
11659 struct netdev_bpf
*xdp
)
11661 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11662 struct i40e_vsi
*vsi
= np
->vsi
;
11664 if (vsi
->type
!= I40E_VSI_MAIN
)
11667 switch (xdp
->command
) {
11668 case XDP_SETUP_PROG
:
11669 return i40e_xdp_setup(vsi
, xdp
->prog
);
11670 case XDP_QUERY_PROG
:
11671 xdp
->prog_attached
= i40e_enabled_xdp_vsi(vsi
);
11672 xdp
->prog_id
= vsi
->xdp_prog
? vsi
->xdp_prog
->aux
->id
: 0;
11679 static const struct net_device_ops i40e_netdev_ops
= {
11680 .ndo_open
= i40e_open
,
11681 .ndo_stop
= i40e_close
,
11682 .ndo_start_xmit
= i40e_lan_xmit_frame
,
11683 .ndo_get_stats64
= i40e_get_netdev_stats_struct
,
11684 .ndo_set_rx_mode
= i40e_set_rx_mode
,
11685 .ndo_validate_addr
= eth_validate_addr
,
11686 .ndo_set_mac_address
= i40e_set_mac
,
11687 .ndo_change_mtu
= i40e_change_mtu
,
11688 .ndo_do_ioctl
= i40e_ioctl
,
11689 .ndo_tx_timeout
= i40e_tx_timeout
,
11690 .ndo_vlan_rx_add_vid
= i40e_vlan_rx_add_vid
,
11691 .ndo_vlan_rx_kill_vid
= i40e_vlan_rx_kill_vid
,
11692 #ifdef CONFIG_NET_POLL_CONTROLLER
11693 .ndo_poll_controller
= i40e_netpoll
,
11695 .ndo_setup_tc
= __i40e_setup_tc
,
11696 .ndo_set_features
= i40e_set_features
,
11697 .ndo_set_vf_mac
= i40e_ndo_set_vf_mac
,
11698 .ndo_set_vf_vlan
= i40e_ndo_set_vf_port_vlan
,
11699 .ndo_set_vf_rate
= i40e_ndo_set_vf_bw
,
11700 .ndo_get_vf_config
= i40e_ndo_get_vf_config
,
11701 .ndo_set_vf_link_state
= i40e_ndo_set_vf_link_state
,
11702 .ndo_set_vf_spoofchk
= i40e_ndo_set_vf_spoofchk
,
11703 .ndo_set_vf_trust
= i40e_ndo_set_vf_trust
,
11704 .ndo_udp_tunnel_add
= i40e_udp_tunnel_add
,
11705 .ndo_udp_tunnel_del
= i40e_udp_tunnel_del
,
11706 .ndo_get_phys_port_id
= i40e_get_phys_port_id
,
11707 .ndo_fdb_add
= i40e_ndo_fdb_add
,
11708 .ndo_features_check
= i40e_features_check
,
11709 .ndo_bridge_getlink
= i40e_ndo_bridge_getlink
,
11710 .ndo_bridge_setlink
= i40e_ndo_bridge_setlink
,
11711 .ndo_bpf
= i40e_xdp
,
11715 * i40e_config_netdev - Setup the netdev flags
11716 * @vsi: the VSI being configured
11718 * Returns 0 on success, negative value on failure
11720 static int i40e_config_netdev(struct i40e_vsi
*vsi
)
11722 struct i40e_pf
*pf
= vsi
->back
;
11723 struct i40e_hw
*hw
= &pf
->hw
;
11724 struct i40e_netdev_priv
*np
;
11725 struct net_device
*netdev
;
11726 u8 broadcast
[ETH_ALEN
];
11727 u8 mac_addr
[ETH_ALEN
];
11729 netdev_features_t hw_enc_features
;
11730 netdev_features_t hw_features
;
11732 etherdev_size
= sizeof(struct i40e_netdev_priv
);
11733 netdev
= alloc_etherdev_mq(etherdev_size
, vsi
->alloc_queue_pairs
);
11737 vsi
->netdev
= netdev
;
11738 np
= netdev_priv(netdev
);
11741 hw_enc_features
= NETIF_F_SG
|
11743 NETIF_F_IPV6_CSUM
|
11745 NETIF_F_SOFT_FEATURES
|
11750 NETIF_F_GSO_GRE_CSUM
|
11751 NETIF_F_GSO_PARTIAL
|
11752 NETIF_F_GSO_UDP_TUNNEL
|
11753 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
11759 if (!(pf
->hw_features
& I40E_HW_OUTER_UDP_CSUM_CAPABLE
))
11760 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
11762 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
11764 netdev
->hw_enc_features
|= hw_enc_features
;
11766 /* record features VLANs can make use of */
11767 netdev
->vlan_features
|= hw_enc_features
| NETIF_F_TSO_MANGLEID
;
11769 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
11770 netdev
->hw_features
|= NETIF_F_NTUPLE
| NETIF_F_HW_TC
;
11772 hw_features
= hw_enc_features
|
11773 NETIF_F_HW_VLAN_CTAG_TX
|
11774 NETIF_F_HW_VLAN_CTAG_RX
;
11776 netdev
->hw_features
|= hw_features
;
11778 netdev
->features
|= hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
11779 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
11781 if (vsi
->type
== I40E_VSI_MAIN
) {
11782 SET_NETDEV_DEV(netdev
, &pf
->pdev
->dev
);
11783 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
11784 /* The following steps are necessary for two reasons. First,
11785 * some older NVM configurations load a default MAC-VLAN
11786 * filter that will accept any tagged packet, and we want to
11787 * replace this with a normal filter. Additionally, it is
11788 * possible our MAC address was provided by the platform using
11789 * Open Firmware or similar.
11791 * Thus, we need to remove the default filter and install one
11792 * specific to the MAC address.
11794 i40e_rm_default_mac_filter(vsi
, mac_addr
);
11795 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11796 i40e_add_mac_filter(vsi
, mac_addr
);
11797 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11799 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
11800 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
11801 * the end, which is 4 bytes long, so force truncation of the
11802 * original name by IFNAMSIZ - 4
11804 snprintf(netdev
->name
, IFNAMSIZ
, "%.*sv%%d",
11806 pf
->vsi
[pf
->lan_vsi
]->netdev
->name
);
11807 random_ether_addr(mac_addr
);
11809 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11810 i40e_add_mac_filter(vsi
, mac_addr
);
11811 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11814 /* Add the broadcast filter so that we initially will receive
11815 * broadcast packets. Note that when a new VLAN is first added the
11816 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
11817 * specific filters as part of transitioning into "vlan" operation.
11818 * When more VLANs are added, the driver will copy each existing MAC
11819 * filter and add it for the new VLAN.
11821 * Broadcast filters are handled specially by
11822 * i40e_sync_filters_subtask, as the driver must to set the broadcast
11823 * promiscuous bit instead of adding this directly as a MAC/VLAN
11824 * filter. The subtask will update the correct broadcast promiscuous
11825 * bits as VLANs become active or inactive.
11827 eth_broadcast_addr(broadcast
);
11828 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11829 i40e_add_mac_filter(vsi
, broadcast
);
11830 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11832 ether_addr_copy(netdev
->dev_addr
, mac_addr
);
11833 ether_addr_copy(netdev
->perm_addr
, mac_addr
);
11835 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
11836 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
11837 /* Setup netdev TC information */
11838 i40e_vsi_config_netdev_tc(vsi
, vsi
->tc_config
.enabled_tc
);
11840 netdev
->netdev_ops
= &i40e_netdev_ops
;
11841 netdev
->watchdog_timeo
= 5 * HZ
;
11842 i40e_set_ethtool_ops(netdev
);
11844 /* MTU range: 68 - 9706 */
11845 netdev
->min_mtu
= ETH_MIN_MTU
;
11846 netdev
->max_mtu
= I40E_MAX_RXBUFFER
- I40E_PACKET_HDR_PAD
;
11852 * i40e_vsi_delete - Delete a VSI from the switch
11853 * @vsi: the VSI being removed
11855 * Returns 0 on success, negative value on failure
11857 static void i40e_vsi_delete(struct i40e_vsi
*vsi
)
11859 /* remove default VSI is not allowed */
11860 if (vsi
== vsi
->back
->vsi
[vsi
->back
->lan_vsi
])
11863 i40e_aq_delete_element(&vsi
->back
->hw
, vsi
->seid
, NULL
);
11867 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
11868 * @vsi: the VSI being queried
11870 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
11872 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi
*vsi
)
11874 struct i40e_veb
*veb
;
11875 struct i40e_pf
*pf
= vsi
->back
;
11877 /* Uplink is not a bridge so default to VEB */
11878 if (vsi
->veb_idx
== I40E_NO_VEB
)
11881 veb
= pf
->veb
[vsi
->veb_idx
];
11883 dev_info(&pf
->pdev
->dev
,
11884 "There is no veb associated with the bridge\n");
11888 /* Uplink is a bridge in VEPA mode */
11889 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
) {
11892 /* Uplink is a bridge in VEB mode */
11896 /* VEPA is now default bridge, so return 0 */
11901 * i40e_add_vsi - Add a VSI to the switch
11902 * @vsi: the VSI being configured
11904 * This initializes a VSI context depending on the VSI type to be added and
11905 * passes it down to the add_vsi aq command.
11907 static int i40e_add_vsi(struct i40e_vsi
*vsi
)
11910 struct i40e_pf
*pf
= vsi
->back
;
11911 struct i40e_hw
*hw
= &pf
->hw
;
11912 struct i40e_vsi_context ctxt
;
11913 struct i40e_mac_filter
*f
;
11914 struct hlist_node
*h
;
11917 u8 enabled_tc
= 0x1; /* TC0 enabled */
11920 memset(&ctxt
, 0, sizeof(ctxt
));
11921 switch (vsi
->type
) {
11922 case I40E_VSI_MAIN
:
11923 /* The PF's main VSI is already setup as part of the
11924 * device initialization, so we'll not bother with
11925 * the add_vsi call, but we will retrieve the current
11928 ctxt
.seid
= pf
->main_vsi_seid
;
11929 ctxt
.pf_num
= pf
->hw
.pf_id
;
11931 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
11932 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
11934 dev_info(&pf
->pdev
->dev
,
11935 "couldn't get PF vsi config, err %s aq_err %s\n",
11936 i40e_stat_str(&pf
->hw
, ret
),
11937 i40e_aq_str(&pf
->hw
,
11938 pf
->hw
.aq
.asq_last_status
));
11941 vsi
->info
= ctxt
.info
;
11942 vsi
->info
.valid_sections
= 0;
11944 vsi
->seid
= ctxt
.seid
;
11945 vsi
->id
= ctxt
.vsi_number
;
11947 enabled_tc
= i40e_pf_get_tc_map(pf
);
11949 /* Source pruning is enabled by default, so the flag is
11950 * negative logic - if it's set, we need to fiddle with
11951 * the VSI to disable source pruning.
11953 if (pf
->flags
& I40E_FLAG_SOURCE_PRUNING_DISABLED
) {
11954 memset(&ctxt
, 0, sizeof(ctxt
));
11955 ctxt
.seid
= pf
->main_vsi_seid
;
11956 ctxt
.pf_num
= pf
->hw
.pf_id
;
11958 ctxt
.info
.valid_sections
|=
11959 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
11960 ctxt
.info
.switch_id
=
11961 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB
);
11962 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
11964 dev_info(&pf
->pdev
->dev
,
11965 "update vsi failed, err %s aq_err %s\n",
11966 i40e_stat_str(&pf
->hw
, ret
),
11967 i40e_aq_str(&pf
->hw
,
11968 pf
->hw
.aq
.asq_last_status
));
11974 /* MFP mode setup queue map and update VSI */
11975 if ((pf
->flags
& I40E_FLAG_MFP_ENABLED
) &&
11976 !(pf
->hw
.func_caps
.iscsi
)) { /* NIC type PF */
11977 memset(&ctxt
, 0, sizeof(ctxt
));
11978 ctxt
.seid
= pf
->main_vsi_seid
;
11979 ctxt
.pf_num
= pf
->hw
.pf_id
;
11981 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
11982 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
11984 dev_info(&pf
->pdev
->dev
,
11985 "update vsi failed, err %s aq_err %s\n",
11986 i40e_stat_str(&pf
->hw
, ret
),
11987 i40e_aq_str(&pf
->hw
,
11988 pf
->hw
.aq
.asq_last_status
));
11992 /* update the local VSI info queue map */
11993 i40e_vsi_update_queue_map(vsi
, &ctxt
);
11994 vsi
->info
.valid_sections
= 0;
11996 /* Default/Main VSI is only enabled for TC0
11997 * reconfigure it to enable all TCs that are
11998 * available on the port in SFP mode.
11999 * For MFP case the iSCSI PF would use this
12000 * flow to enable LAN+iSCSI TC.
12002 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
12004 /* Single TC condition is not fatal,
12005 * message and continue
12007 dev_info(&pf
->pdev
->dev
,
12008 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12010 i40e_stat_str(&pf
->hw
, ret
),
12011 i40e_aq_str(&pf
->hw
,
12012 pf
->hw
.aq
.asq_last_status
));
12017 case I40E_VSI_FDIR
:
12018 ctxt
.pf_num
= hw
->pf_id
;
12020 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12021 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12022 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
12023 if ((pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) &&
12024 (i40e_is_vsi_uplink_mode_veb(vsi
))) {
12025 ctxt
.info
.valid_sections
|=
12026 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12027 ctxt
.info
.switch_id
=
12028 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12030 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12033 case I40E_VSI_VMDQ2
:
12034 ctxt
.pf_num
= hw
->pf_id
;
12036 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12037 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12038 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
12040 /* This VSI is connected to VEB so the switch_id
12041 * should be set to zero by default.
12043 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12044 ctxt
.info
.valid_sections
|=
12045 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12046 ctxt
.info
.switch_id
=
12047 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12050 /* Setup the VSI tx/rx queue map for TC0 only for now */
12051 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12054 case I40E_VSI_SRIOV
:
12055 ctxt
.pf_num
= hw
->pf_id
;
12056 ctxt
.vf_num
= vsi
->vf_id
+ hw
->func_caps
.vf_base_id
;
12057 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12058 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12059 ctxt
.flags
= I40E_AQ_VSI_TYPE_VF
;
12061 /* This VSI is connected to VEB so the switch_id
12062 * should be set to zero by default.
12064 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12065 ctxt
.info
.valid_sections
|=
12066 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12067 ctxt
.info
.switch_id
=
12068 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12071 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
12072 ctxt
.info
.valid_sections
|=
12073 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
12074 ctxt
.info
.queueing_opt_flags
|=
12075 (I40E_AQ_VSI_QUE_OPT_TCP_ENA
|
12076 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI
);
12079 ctxt
.info
.valid_sections
|= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
12080 ctxt
.info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_ALL
;
12081 if (pf
->vf
[vsi
->vf_id
].spoofchk
) {
12082 ctxt
.info
.valid_sections
|=
12083 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
12084 ctxt
.info
.sec_flags
|=
12085 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
12086 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
12088 /* Setup the VSI tx/rx queue map for TC0 only for now */
12089 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12092 case I40E_VSI_IWARP
:
12093 /* send down message to iWARP */
12100 if (vsi
->type
!= I40E_VSI_MAIN
) {
12101 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
12103 dev_info(&vsi
->back
->pdev
->dev
,
12104 "add vsi failed, err %s aq_err %s\n",
12105 i40e_stat_str(&pf
->hw
, ret
),
12106 i40e_aq_str(&pf
->hw
,
12107 pf
->hw
.aq
.asq_last_status
));
12111 vsi
->info
= ctxt
.info
;
12112 vsi
->info
.valid_sections
= 0;
12113 vsi
->seid
= ctxt
.seid
;
12114 vsi
->id
= ctxt
.vsi_number
;
12117 vsi
->active_filters
= 0;
12118 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
12119 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12120 /* If macvlan filters already exist, force them to get loaded */
12121 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
12122 f
->state
= I40E_FILTER_NEW
;
12125 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12128 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
12129 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
12132 /* Update VSI BW information */
12133 ret
= i40e_vsi_get_bw_info(vsi
);
12135 dev_info(&pf
->pdev
->dev
,
12136 "couldn't get vsi bw info, err %s aq_err %s\n",
12137 i40e_stat_str(&pf
->hw
, ret
),
12138 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12139 /* VSI is already added so not tearing that up */
12148 * i40e_vsi_release - Delete a VSI and free its resources
12149 * @vsi: the VSI being removed
12151 * Returns 0 on success or < 0 on error
12153 int i40e_vsi_release(struct i40e_vsi
*vsi
)
12155 struct i40e_mac_filter
*f
;
12156 struct hlist_node
*h
;
12157 struct i40e_veb
*veb
= NULL
;
12158 struct i40e_pf
*pf
;
12164 /* release of a VEB-owner or last VSI is not allowed */
12165 if (vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
12166 dev_info(&pf
->pdev
->dev
, "VSI %d has existing VEB %d\n",
12167 vsi
->seid
, vsi
->uplink_seid
);
12170 if (vsi
== pf
->vsi
[pf
->lan_vsi
] &&
12171 !test_bit(__I40E_DOWN
, pf
->state
)) {
12172 dev_info(&pf
->pdev
->dev
, "Can't remove PF VSI\n");
12176 uplink_seid
= vsi
->uplink_seid
;
12177 if (vsi
->type
!= I40E_VSI_SRIOV
) {
12178 if (vsi
->netdev_registered
) {
12179 vsi
->netdev_registered
= false;
12181 /* results in a call to i40e_close() */
12182 unregister_netdev(vsi
->netdev
);
12185 i40e_vsi_close(vsi
);
12187 i40e_vsi_disable_irq(vsi
);
12190 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12192 /* clear the sync flag on all filters */
12194 __dev_uc_unsync(vsi
->netdev
, NULL
);
12195 __dev_mc_unsync(vsi
->netdev
, NULL
);
12198 /* make sure any remaining filters are marked for deletion */
12199 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
)
12200 __i40e_del_filter(vsi
, f
);
12202 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12204 i40e_sync_vsi_filters(vsi
);
12206 i40e_vsi_delete(vsi
);
12207 i40e_vsi_free_q_vectors(vsi
);
12209 free_netdev(vsi
->netdev
);
12210 vsi
->netdev
= NULL
;
12212 i40e_vsi_clear_rings(vsi
);
12213 i40e_vsi_clear(vsi
);
12215 /* If this was the last thing on the VEB, except for the
12216 * controlling VSI, remove the VEB, which puts the controlling
12217 * VSI onto the next level down in the switch.
12219 * Well, okay, there's one more exception here: don't remove
12220 * the orphan VEBs yet. We'll wait for an explicit remove request
12221 * from up the network stack.
12223 for (n
= 0, i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12225 pf
->vsi
[i
]->uplink_seid
== uplink_seid
&&
12226 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12227 n
++; /* count the VSIs */
12230 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12233 if (pf
->veb
[i
]->uplink_seid
== uplink_seid
)
12234 n
++; /* count the VEBs */
12235 if (pf
->veb
[i
]->seid
== uplink_seid
)
12238 if (n
== 0 && veb
&& veb
->uplink_seid
!= 0)
12239 i40e_veb_release(veb
);
12245 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12246 * @vsi: ptr to the VSI
12248 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12249 * corresponding SW VSI structure and initializes num_queue_pairs for the
12250 * newly allocated VSI.
12252 * Returns 0 on success or negative on failure
12254 static int i40e_vsi_setup_vectors(struct i40e_vsi
*vsi
)
12257 struct i40e_pf
*pf
= vsi
->back
;
12259 if (vsi
->q_vectors
[0]) {
12260 dev_info(&pf
->pdev
->dev
, "VSI %d has existing q_vectors\n",
12265 if (vsi
->base_vector
) {
12266 dev_info(&pf
->pdev
->dev
, "VSI %d has non-zero base vector %d\n",
12267 vsi
->seid
, vsi
->base_vector
);
12271 ret
= i40e_vsi_alloc_q_vectors(vsi
);
12273 dev_info(&pf
->pdev
->dev
,
12274 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12275 vsi
->num_q_vectors
, vsi
->seid
, ret
);
12276 vsi
->num_q_vectors
= 0;
12277 goto vector_setup_out
;
12280 /* In Legacy mode, we do not have to get any other vector since we
12281 * piggyback on the misc/ICR0 for queue interrupts.
12283 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
12285 if (vsi
->num_q_vectors
)
12286 vsi
->base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
12287 vsi
->num_q_vectors
, vsi
->idx
);
12288 if (vsi
->base_vector
< 0) {
12289 dev_info(&pf
->pdev
->dev
,
12290 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12291 vsi
->num_q_vectors
, vsi
->seid
, vsi
->base_vector
);
12292 i40e_vsi_free_q_vectors(vsi
);
12294 goto vector_setup_out
;
12302 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12303 * @vsi: pointer to the vsi.
12305 * This re-allocates a vsi's queue resources.
12307 * Returns pointer to the successfully allocated and configured VSI sw struct
12308 * on success, otherwise returns NULL on failure.
12310 static struct i40e_vsi
*i40e_vsi_reinit_setup(struct i40e_vsi
*vsi
)
12312 u16 alloc_queue_pairs
;
12313 struct i40e_pf
*pf
;
12322 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
12323 i40e_vsi_clear_rings(vsi
);
12325 i40e_vsi_free_arrays(vsi
, false);
12326 i40e_set_num_rings_in_vsi(vsi
);
12327 ret
= i40e_vsi_alloc_arrays(vsi
, false);
12331 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12332 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12334 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12336 dev_info(&pf
->pdev
->dev
,
12337 "failed to get tracking for %d queues for VSI %d err %d\n",
12338 alloc_queue_pairs
, vsi
->seid
, ret
);
12341 vsi
->base_queue
= ret
;
12343 /* Update the FW view of the VSI. Force a reset of TC and queue
12344 * layout configurations.
12346 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
12347 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
12348 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
12349 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
12350 if (vsi
->type
== I40E_VSI_MAIN
)
12351 i40e_rm_default_mac_filter(vsi
, pf
->hw
.mac
.perm_addr
);
12353 /* assign it some queues */
12354 ret
= i40e_alloc_rings(vsi
);
12358 /* map all of the rings to the q_vectors */
12359 i40e_vsi_map_rings_to_vectors(vsi
);
12363 i40e_vsi_free_q_vectors(vsi
);
12364 if (vsi
->netdev_registered
) {
12365 vsi
->netdev_registered
= false;
12366 unregister_netdev(vsi
->netdev
);
12367 free_netdev(vsi
->netdev
);
12368 vsi
->netdev
= NULL
;
12370 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12372 i40e_vsi_clear(vsi
);
12377 * i40e_vsi_setup - Set up a VSI by a given type
12378 * @pf: board private structure
12380 * @uplink_seid: the switch element to link to
12381 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12383 * This allocates the sw VSI structure and its queue resources, then add a VSI
12384 * to the identified VEB.
12386 * Returns pointer to the successfully allocated and configure VSI sw struct on
12387 * success, otherwise returns NULL on failure.
12389 struct i40e_vsi
*i40e_vsi_setup(struct i40e_pf
*pf
, u8 type
,
12390 u16 uplink_seid
, u32 param1
)
12392 struct i40e_vsi
*vsi
= NULL
;
12393 struct i40e_veb
*veb
= NULL
;
12394 u16 alloc_queue_pairs
;
12398 /* The requested uplink_seid must be either
12399 * - the PF's port seid
12400 * no VEB is needed because this is the PF
12401 * or this is a Flow Director special case VSI
12402 * - seid of an existing VEB
12403 * - seid of a VSI that owns an existing VEB
12404 * - seid of a VSI that doesn't own a VEB
12405 * a new VEB is created and the VSI becomes the owner
12406 * - seid of the PF VSI, which is what creates the first VEB
12407 * this is a special case of the previous
12409 * Find which uplink_seid we were given and create a new VEB if needed
12411 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12412 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== uplink_seid
) {
12418 if (!veb
&& uplink_seid
!= pf
->mac_seid
) {
12420 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12421 if (pf
->vsi
[i
] && pf
->vsi
[i
]->seid
== uplink_seid
) {
12427 dev_info(&pf
->pdev
->dev
, "no such uplink_seid %d\n",
12432 if (vsi
->uplink_seid
== pf
->mac_seid
)
12433 veb
= i40e_veb_setup(pf
, 0, pf
->mac_seid
, vsi
->seid
,
12434 vsi
->tc_config
.enabled_tc
);
12435 else if ((vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0)
12436 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
12437 vsi
->tc_config
.enabled_tc
);
12439 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
) {
12440 dev_info(&vsi
->back
->pdev
->dev
,
12441 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12444 /* We come up by default in VEPA mode if SRIOV is not
12445 * already enabled, in which case we can't force VEPA
12448 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
12449 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
12450 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
12452 i40e_config_bridge_mode(veb
);
12454 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
12455 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
12459 dev_info(&pf
->pdev
->dev
, "couldn't add VEB\n");
12463 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12464 uplink_seid
= veb
->seid
;
12467 /* get vsi sw struct */
12468 v_idx
= i40e_vsi_mem_alloc(pf
, type
);
12471 vsi
= pf
->vsi
[v_idx
];
12475 vsi
->veb_idx
= (veb
? veb
->idx
: I40E_NO_VEB
);
12477 if (type
== I40E_VSI_MAIN
)
12478 pf
->lan_vsi
= v_idx
;
12479 else if (type
== I40E_VSI_SRIOV
)
12480 vsi
->vf_id
= param1
;
12481 /* assign it some queues */
12482 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12483 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12485 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12487 dev_info(&pf
->pdev
->dev
,
12488 "failed to get tracking for %d queues for VSI %d err=%d\n",
12489 alloc_queue_pairs
, vsi
->seid
, ret
);
12492 vsi
->base_queue
= ret
;
12494 /* get a VSI from the hardware */
12495 vsi
->uplink_seid
= uplink_seid
;
12496 ret
= i40e_add_vsi(vsi
);
12500 switch (vsi
->type
) {
12501 /* setup the netdev if needed */
12502 case I40E_VSI_MAIN
:
12503 case I40E_VSI_VMDQ2
:
12504 ret
= i40e_config_netdev(vsi
);
12507 ret
= register_netdev(vsi
->netdev
);
12510 vsi
->netdev_registered
= true;
12511 netif_carrier_off(vsi
->netdev
);
12512 #ifdef CONFIG_I40E_DCB
12513 /* Setup DCB netlink interface */
12514 i40e_dcbnl_setup(vsi
);
12515 #endif /* CONFIG_I40E_DCB */
12518 case I40E_VSI_FDIR
:
12519 /* set up vectors and rings if needed */
12520 ret
= i40e_vsi_setup_vectors(vsi
);
12524 ret
= i40e_alloc_rings(vsi
);
12528 /* map all of the rings to the q_vectors */
12529 i40e_vsi_map_rings_to_vectors(vsi
);
12531 i40e_vsi_reset_stats(vsi
);
12535 /* no netdev or rings for the other VSI types */
12539 if ((pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
) &&
12540 (vsi
->type
== I40E_VSI_VMDQ2
)) {
12541 ret
= i40e_vsi_config_rss(vsi
);
12546 i40e_vsi_free_q_vectors(vsi
);
12548 if (vsi
->netdev_registered
) {
12549 vsi
->netdev_registered
= false;
12550 unregister_netdev(vsi
->netdev
);
12551 free_netdev(vsi
->netdev
);
12552 vsi
->netdev
= NULL
;
12555 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12557 i40e_vsi_clear(vsi
);
12563 * i40e_veb_get_bw_info - Query VEB BW information
12564 * @veb: the veb to query
12566 * Query the Tx scheduler BW configuration data for given VEB
12568 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
)
12570 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data
;
12571 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data
;
12572 struct i40e_pf
*pf
= veb
->pf
;
12573 struct i40e_hw
*hw
= &pf
->hw
;
12578 ret
= i40e_aq_query_switch_comp_bw_config(hw
, veb
->seid
,
12581 dev_info(&pf
->pdev
->dev
,
12582 "query veb bw config failed, err %s aq_err %s\n",
12583 i40e_stat_str(&pf
->hw
, ret
),
12584 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
12588 ret
= i40e_aq_query_switch_comp_ets_config(hw
, veb
->seid
,
12591 dev_info(&pf
->pdev
->dev
,
12592 "query veb bw ets config failed, err %s aq_err %s\n",
12593 i40e_stat_str(&pf
->hw
, ret
),
12594 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
12598 veb
->bw_limit
= le16_to_cpu(ets_data
.port_bw_limit
);
12599 veb
->bw_max_quanta
= ets_data
.tc_bw_max
;
12600 veb
->is_abs_credits
= bw_data
.absolute_credits_enable
;
12601 veb
->enabled_tc
= ets_data
.tc_valid_bits
;
12602 tc_bw_max
= le16_to_cpu(bw_data
.tc_bw_max
[0]) |
12603 (le16_to_cpu(bw_data
.tc_bw_max
[1]) << 16);
12604 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
12605 veb
->bw_tc_share_credits
[i
] = bw_data
.tc_bw_share_credits
[i
];
12606 veb
->bw_tc_limit_credits
[i
] =
12607 le16_to_cpu(bw_data
.tc_bw_limits
[i
]);
12608 veb
->bw_tc_max_quanta
[i
] = ((tc_bw_max
>> (i
*4)) & 0x7);
12616 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12617 * @pf: board private structure
12619 * On error: returns error code (negative)
12620 * On success: returns vsi index in PF (positive)
12622 static int i40e_veb_mem_alloc(struct i40e_pf
*pf
)
12625 struct i40e_veb
*veb
;
12628 /* Need to protect the allocation of switch elements at the PF level */
12629 mutex_lock(&pf
->switch_mutex
);
12631 /* VEB list may be fragmented if VEB creation/destruction has
12632 * been happening. We can afford to do a quick scan to look
12633 * for any free slots in the list.
12635 * find next empty veb slot, looping back around if necessary
12638 while ((i
< I40E_MAX_VEB
) && (pf
->veb
[i
] != NULL
))
12640 if (i
>= I40E_MAX_VEB
) {
12642 goto err_alloc_veb
; /* out of VEB slots! */
12645 veb
= kzalloc(sizeof(*veb
), GFP_KERNEL
);
12648 goto err_alloc_veb
;
12652 veb
->enabled_tc
= 1;
12657 mutex_unlock(&pf
->switch_mutex
);
12662 * i40e_switch_branch_release - Delete a branch of the switch tree
12663 * @branch: where to start deleting
12665 * This uses recursion to find the tips of the branch to be
12666 * removed, deleting until we get back to and can delete this VEB.
12668 static void i40e_switch_branch_release(struct i40e_veb
*branch
)
12670 struct i40e_pf
*pf
= branch
->pf
;
12671 u16 branch_seid
= branch
->seid
;
12672 u16 veb_idx
= branch
->idx
;
12675 /* release any VEBs on this VEB - RECURSION */
12676 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12679 if (pf
->veb
[i
]->uplink_seid
== branch
->seid
)
12680 i40e_switch_branch_release(pf
->veb
[i
]);
12683 /* Release the VSIs on this VEB, but not the owner VSI.
12685 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
12686 * the VEB itself, so don't use (*branch) after this loop.
12688 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12691 if (pf
->vsi
[i
]->uplink_seid
== branch_seid
&&
12692 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12693 i40e_vsi_release(pf
->vsi
[i
]);
12697 /* There's one corner case where the VEB might not have been
12698 * removed, so double check it here and remove it if needed.
12699 * This case happens if the veb was created from the debugfs
12700 * commands and no VSIs were added to it.
12702 if (pf
->veb
[veb_idx
])
12703 i40e_veb_release(pf
->veb
[veb_idx
]);
12707 * i40e_veb_clear - remove veb struct
12708 * @veb: the veb to remove
12710 static void i40e_veb_clear(struct i40e_veb
*veb
)
12716 struct i40e_pf
*pf
= veb
->pf
;
12718 mutex_lock(&pf
->switch_mutex
);
12719 if (pf
->veb
[veb
->idx
] == veb
)
12720 pf
->veb
[veb
->idx
] = NULL
;
12721 mutex_unlock(&pf
->switch_mutex
);
12728 * i40e_veb_release - Delete a VEB and free its resources
12729 * @veb: the VEB being removed
12731 void i40e_veb_release(struct i40e_veb
*veb
)
12733 struct i40e_vsi
*vsi
= NULL
;
12734 struct i40e_pf
*pf
;
12739 /* find the remaining VSI and check for extras */
12740 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12741 if (pf
->vsi
[i
] && pf
->vsi
[i
]->uplink_seid
== veb
->seid
) {
12747 dev_info(&pf
->pdev
->dev
,
12748 "can't remove VEB %d with %d VSIs left\n",
12753 /* move the remaining VSI to uplink veb */
12754 vsi
->flags
&= ~I40E_VSI_FLAG_VEB_OWNER
;
12755 if (veb
->uplink_seid
) {
12756 vsi
->uplink_seid
= veb
->uplink_seid
;
12757 if (veb
->uplink_seid
== pf
->mac_seid
)
12758 vsi
->veb_idx
= I40E_NO_VEB
;
12760 vsi
->veb_idx
= veb
->veb_idx
;
12763 vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
12764 vsi
->veb_idx
= pf
->vsi
[pf
->lan_vsi
]->veb_idx
;
12767 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
12768 i40e_veb_clear(veb
);
12772 * i40e_add_veb - create the VEB in the switch
12773 * @veb: the VEB to be instantiated
12774 * @vsi: the controlling VSI
12776 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
)
12778 struct i40e_pf
*pf
= veb
->pf
;
12779 bool enable_stats
= !!(pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
);
12782 ret
= i40e_aq_add_veb(&pf
->hw
, veb
->uplink_seid
, vsi
->seid
,
12783 veb
->enabled_tc
, false,
12784 &veb
->seid
, enable_stats
, NULL
);
12786 /* get a VEB from the hardware */
12788 dev_info(&pf
->pdev
->dev
,
12789 "couldn't add VEB, err %s aq_err %s\n",
12790 i40e_stat_str(&pf
->hw
, ret
),
12791 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12795 /* get statistics counter */
12796 ret
= i40e_aq_get_veb_parameters(&pf
->hw
, veb
->seid
, NULL
, NULL
,
12797 &veb
->stats_idx
, NULL
, NULL
, NULL
);
12799 dev_info(&pf
->pdev
->dev
,
12800 "couldn't get VEB statistics idx, err %s aq_err %s\n",
12801 i40e_stat_str(&pf
->hw
, ret
),
12802 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12805 ret
= i40e_veb_get_bw_info(veb
);
12807 dev_info(&pf
->pdev
->dev
,
12808 "couldn't get VEB bw info, err %s aq_err %s\n",
12809 i40e_stat_str(&pf
->hw
, ret
),
12810 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12811 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
12815 vsi
->uplink_seid
= veb
->seid
;
12816 vsi
->veb_idx
= veb
->idx
;
12817 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12823 * i40e_veb_setup - Set up a VEB
12824 * @pf: board private structure
12825 * @flags: VEB setup flags
12826 * @uplink_seid: the switch element to link to
12827 * @vsi_seid: the initial VSI seid
12828 * @enabled_tc: Enabled TC bit-map
12830 * This allocates the sw VEB structure and links it into the switch
12831 * It is possible and legal for this to be a duplicate of an already
12832 * existing VEB. It is also possible for both uplink and vsi seids
12833 * to be zero, in order to create a floating VEB.
12835 * Returns pointer to the successfully allocated VEB sw struct on
12836 * success, otherwise returns NULL on failure.
12838 struct i40e_veb
*i40e_veb_setup(struct i40e_pf
*pf
, u16 flags
,
12839 u16 uplink_seid
, u16 vsi_seid
,
12842 struct i40e_veb
*veb
, *uplink_veb
= NULL
;
12843 int vsi_idx
, veb_idx
;
12846 /* if one seid is 0, the other must be 0 to create a floating relay */
12847 if ((uplink_seid
== 0 || vsi_seid
== 0) &&
12848 (uplink_seid
+ vsi_seid
!= 0)) {
12849 dev_info(&pf
->pdev
->dev
,
12850 "one, not both seid's are 0: uplink=%d vsi=%d\n",
12851 uplink_seid
, vsi_seid
);
12855 /* make sure there is such a vsi and uplink */
12856 for (vsi_idx
= 0; vsi_idx
< pf
->num_alloc_vsi
; vsi_idx
++)
12857 if (pf
->vsi
[vsi_idx
] && pf
->vsi
[vsi_idx
]->seid
== vsi_seid
)
12859 if (vsi_idx
>= pf
->num_alloc_vsi
&& vsi_seid
!= 0) {
12860 dev_info(&pf
->pdev
->dev
, "vsi seid %d not found\n",
12865 if (uplink_seid
&& uplink_seid
!= pf
->mac_seid
) {
12866 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
12867 if (pf
->veb
[veb_idx
] &&
12868 pf
->veb
[veb_idx
]->seid
== uplink_seid
) {
12869 uplink_veb
= pf
->veb
[veb_idx
];
12874 dev_info(&pf
->pdev
->dev
,
12875 "uplink seid %d not found\n", uplink_seid
);
12880 /* get veb sw struct */
12881 veb_idx
= i40e_veb_mem_alloc(pf
);
12884 veb
= pf
->veb
[veb_idx
];
12885 veb
->flags
= flags
;
12886 veb
->uplink_seid
= uplink_seid
;
12887 veb
->veb_idx
= (uplink_veb
? uplink_veb
->idx
: I40E_NO_VEB
);
12888 veb
->enabled_tc
= (enabled_tc
? enabled_tc
: 0x1);
12890 /* create the VEB in the switch */
12891 ret
= i40e_add_veb(veb
, pf
->vsi
[vsi_idx
]);
12894 if (vsi_idx
== pf
->lan_vsi
)
12895 pf
->lan_veb
= veb
->idx
;
12900 i40e_veb_clear(veb
);
12906 * i40e_setup_pf_switch_element - set PF vars based on switch type
12907 * @pf: board private structure
12908 * @ele: element we are building info from
12909 * @num_reported: total number of elements
12910 * @printconfig: should we print the contents
12912 * helper function to assist in extracting a few useful SEID values.
12914 static void i40e_setup_pf_switch_element(struct i40e_pf
*pf
,
12915 struct i40e_aqc_switch_config_element_resp
*ele
,
12916 u16 num_reported
, bool printconfig
)
12918 u16 downlink_seid
= le16_to_cpu(ele
->downlink_seid
);
12919 u16 uplink_seid
= le16_to_cpu(ele
->uplink_seid
);
12920 u8 element_type
= ele
->element_type
;
12921 u16 seid
= le16_to_cpu(ele
->seid
);
12924 dev_info(&pf
->pdev
->dev
,
12925 "type=%d seid=%d uplink=%d downlink=%d\n",
12926 element_type
, seid
, uplink_seid
, downlink_seid
);
12928 switch (element_type
) {
12929 case I40E_SWITCH_ELEMENT_TYPE_MAC
:
12930 pf
->mac_seid
= seid
;
12932 case I40E_SWITCH_ELEMENT_TYPE_VEB
:
12934 if (uplink_seid
!= pf
->mac_seid
)
12936 if (pf
->lan_veb
== I40E_NO_VEB
) {
12939 /* find existing or else empty VEB */
12940 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
12941 if (pf
->veb
[v
] && (pf
->veb
[v
]->seid
== seid
)) {
12946 if (pf
->lan_veb
== I40E_NO_VEB
) {
12947 v
= i40e_veb_mem_alloc(pf
);
12954 pf
->veb
[pf
->lan_veb
]->seid
= seid
;
12955 pf
->veb
[pf
->lan_veb
]->uplink_seid
= pf
->mac_seid
;
12956 pf
->veb
[pf
->lan_veb
]->pf
= pf
;
12957 pf
->veb
[pf
->lan_veb
]->veb_idx
= I40E_NO_VEB
;
12959 case I40E_SWITCH_ELEMENT_TYPE_VSI
:
12960 if (num_reported
!= 1)
12962 /* This is immediately after a reset so we can assume this is
12965 pf
->mac_seid
= uplink_seid
;
12966 pf
->pf_seid
= downlink_seid
;
12967 pf
->main_vsi_seid
= seid
;
12969 dev_info(&pf
->pdev
->dev
,
12970 "pf_seid=%d main_vsi_seid=%d\n",
12971 pf
->pf_seid
, pf
->main_vsi_seid
);
12973 case I40E_SWITCH_ELEMENT_TYPE_PF
:
12974 case I40E_SWITCH_ELEMENT_TYPE_VF
:
12975 case I40E_SWITCH_ELEMENT_TYPE_EMP
:
12976 case I40E_SWITCH_ELEMENT_TYPE_BMC
:
12977 case I40E_SWITCH_ELEMENT_TYPE_PE
:
12978 case I40E_SWITCH_ELEMENT_TYPE_PA
:
12979 /* ignore these for now */
12982 dev_info(&pf
->pdev
->dev
, "unknown element type=%d seid=%d\n",
12983 element_type
, seid
);
12989 * i40e_fetch_switch_configuration - Get switch config from firmware
12990 * @pf: board private structure
12991 * @printconfig: should we print the contents
12993 * Get the current switch configuration from the device and
12994 * extract a few useful SEID values.
12996 int i40e_fetch_switch_configuration(struct i40e_pf
*pf
, bool printconfig
)
12998 struct i40e_aqc_get_switch_config_resp
*sw_config
;
13004 aq_buf
= kzalloc(I40E_AQ_LARGE_BUF
, GFP_KERNEL
);
13008 sw_config
= (struct i40e_aqc_get_switch_config_resp
*)aq_buf
;
13010 u16 num_reported
, num_total
;
13012 ret
= i40e_aq_get_switch_config(&pf
->hw
, sw_config
,
13016 dev_info(&pf
->pdev
->dev
,
13017 "get switch config failed err %s aq_err %s\n",
13018 i40e_stat_str(&pf
->hw
, ret
),
13019 i40e_aq_str(&pf
->hw
,
13020 pf
->hw
.aq
.asq_last_status
));
13025 num_reported
= le16_to_cpu(sw_config
->header
.num_reported
);
13026 num_total
= le16_to_cpu(sw_config
->header
.num_total
);
13029 dev_info(&pf
->pdev
->dev
,
13030 "header: %d reported %d total\n",
13031 num_reported
, num_total
);
13033 for (i
= 0; i
< num_reported
; i
++) {
13034 struct i40e_aqc_switch_config_element_resp
*ele
=
13035 &sw_config
->element
[i
];
13037 i40e_setup_pf_switch_element(pf
, ele
, num_reported
,
13040 } while (next_seid
!= 0);
13047 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13048 * @pf: board private structure
13049 * @reinit: if the Main VSI needs to re-initialized.
13051 * Returns 0 on success, negative value on failure
13053 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
)
13058 /* find out what's out there already */
13059 ret
= i40e_fetch_switch_configuration(pf
, false);
13061 dev_info(&pf
->pdev
->dev
,
13062 "couldn't fetch switch config, err %s aq_err %s\n",
13063 i40e_stat_str(&pf
->hw
, ret
),
13064 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13067 i40e_pf_reset_stats(pf
);
13069 /* set the switch config bit for the whole device to
13070 * support limited promisc or true promisc
13071 * when user requests promisc. The default is limited
13075 if ((pf
->hw
.pf_id
== 0) &&
13076 !(pf
->flags
& I40E_FLAG_TRUE_PROMISC_SUPPORT
)) {
13077 flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13078 pf
->last_sw_conf_flags
= flags
;
13081 if (pf
->hw
.pf_id
== 0) {
13084 valid_flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13085 ret
= i40e_aq_set_switch_config(&pf
->hw
, flags
, valid_flags
, 0,
13087 if (ret
&& pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
) {
13088 dev_info(&pf
->pdev
->dev
,
13089 "couldn't set switch config bits, err %s aq_err %s\n",
13090 i40e_stat_str(&pf
->hw
, ret
),
13091 i40e_aq_str(&pf
->hw
,
13092 pf
->hw
.aq
.asq_last_status
));
13093 /* not a fatal problem, just keep going */
13095 pf
->last_sw_conf_valid_flags
= valid_flags
;
13098 /* first time setup */
13099 if (pf
->lan_vsi
== I40E_NO_VSI
|| reinit
) {
13100 struct i40e_vsi
*vsi
= NULL
;
13103 /* Set up the PF VSI associated with the PF's main VSI
13104 * that is already in the HW switch
13106 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
13107 uplink_seid
= pf
->veb
[pf
->lan_veb
]->seid
;
13109 uplink_seid
= pf
->mac_seid
;
13110 if (pf
->lan_vsi
== I40E_NO_VSI
)
13111 vsi
= i40e_vsi_setup(pf
, I40E_VSI_MAIN
, uplink_seid
, 0);
13113 vsi
= i40e_vsi_reinit_setup(pf
->vsi
[pf
->lan_vsi
]);
13115 dev_info(&pf
->pdev
->dev
, "setup of MAIN VSI failed\n");
13116 i40e_cloud_filter_exit(pf
);
13117 i40e_fdir_teardown(pf
);
13121 /* force a reset of TC and queue layout configurations */
13122 u8 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
13124 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
13125 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
13126 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
13128 i40e_vlan_stripping_disable(pf
->vsi
[pf
->lan_vsi
]);
13130 i40e_fdir_sb_setup(pf
);
13132 /* Setup static PF queue filter control settings */
13133 ret
= i40e_setup_pf_filter_control(pf
);
13135 dev_info(&pf
->pdev
->dev
, "setup_pf_filter_control failed: %d\n",
13137 /* Failure here should not stop continuing other steps */
13140 /* enable RSS in the HW, even for only one queue, as the stack can use
13143 if ((pf
->flags
& I40E_FLAG_RSS_ENABLED
))
13144 i40e_pf_config_rss(pf
);
13146 /* fill in link information and enable LSE reporting */
13147 i40e_link_event(pf
);
13149 /* Initialize user-specific link properties */
13150 pf
->fc_autoneg_status
= ((pf
->hw
.phy
.link_info
.an_info
&
13151 I40E_AQ_AN_COMPLETED
) ? true : false);
13155 /* repopulate tunnel port filters */
13156 i40e_sync_udp_filters(pf
);
13162 * i40e_determine_queue_usage - Work out queue distribution
13163 * @pf: board private structure
13165 static void i40e_determine_queue_usage(struct i40e_pf
*pf
)
13170 pf
->num_lan_qps
= 0;
13172 /* Find the max queues to be put into basic use. We'll always be
13173 * using TC0, whether or not DCB is running, and TC0 will get the
13176 queues_left
= pf
->hw
.func_caps
.num_tx_qp
;
13178 if ((queues_left
== 1) ||
13179 !(pf
->flags
& I40E_FLAG_MSIX_ENABLED
)) {
13180 /* one qp for PF, no queues for anything else */
13182 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13184 /* make sure all the fancies are disabled */
13185 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13186 I40E_FLAG_IWARP_ENABLED
|
13187 I40E_FLAG_FD_SB_ENABLED
|
13188 I40E_FLAG_FD_ATR_ENABLED
|
13189 I40E_FLAG_DCB_CAPABLE
|
13190 I40E_FLAG_DCB_ENABLED
|
13191 I40E_FLAG_SRIOV_ENABLED
|
13192 I40E_FLAG_VMDQ_ENABLED
);
13193 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13194 } else if (!(pf
->flags
& (I40E_FLAG_RSS_ENABLED
|
13195 I40E_FLAG_FD_SB_ENABLED
|
13196 I40E_FLAG_FD_ATR_ENABLED
|
13197 I40E_FLAG_DCB_CAPABLE
))) {
13198 /* one qp for PF */
13199 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13200 queues_left
-= pf
->num_lan_qps
;
13202 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13203 I40E_FLAG_IWARP_ENABLED
|
13204 I40E_FLAG_FD_SB_ENABLED
|
13205 I40E_FLAG_FD_ATR_ENABLED
|
13206 I40E_FLAG_DCB_ENABLED
|
13207 I40E_FLAG_VMDQ_ENABLED
);
13208 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13210 /* Not enough queues for all TCs */
13211 if ((pf
->flags
& I40E_FLAG_DCB_CAPABLE
) &&
13212 (queues_left
< I40E_MAX_TRAFFIC_CLASS
)) {
13213 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
|
13214 I40E_FLAG_DCB_ENABLED
);
13215 dev_info(&pf
->pdev
->dev
, "not enough queues for DCB. DCB is disabled.\n");
13218 /* limit lan qps to the smaller of qps, cpus or msix */
13219 q_max
= max_t(int, pf
->rss_size_max
, num_online_cpus());
13220 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_tx_qp
);
13221 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_msix_vectors
);
13222 pf
->num_lan_qps
= q_max
;
13224 queues_left
-= pf
->num_lan_qps
;
13227 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13228 if (queues_left
> 1) {
13229 queues_left
-= 1; /* save 1 queue for FD */
13231 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
13232 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13233 dev_info(&pf
->pdev
->dev
, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13237 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13238 pf
->num_vf_qps
&& pf
->num_req_vfs
&& queues_left
) {
13239 pf
->num_req_vfs
= min_t(int, pf
->num_req_vfs
,
13240 (queues_left
/ pf
->num_vf_qps
));
13241 queues_left
-= (pf
->num_req_vfs
* pf
->num_vf_qps
);
13244 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
13245 pf
->num_vmdq_vsis
&& pf
->num_vmdq_qps
&& queues_left
) {
13246 pf
->num_vmdq_vsis
= min_t(int, pf
->num_vmdq_vsis
,
13247 (queues_left
/ pf
->num_vmdq_qps
));
13248 queues_left
-= (pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
);
13251 pf
->queues_left
= queues_left
;
13252 dev_dbg(&pf
->pdev
->dev
,
13253 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13254 pf
->hw
.func_caps
.num_tx_qp
,
13255 !!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
),
13256 pf
->num_lan_qps
, pf
->alloc_rss_size
, pf
->num_req_vfs
,
13257 pf
->num_vf_qps
, pf
->num_vmdq_vsis
, pf
->num_vmdq_qps
,
13262 * i40e_setup_pf_filter_control - Setup PF static filter control
13263 * @pf: PF to be setup
13265 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13266 * settings. If PE/FCoE are enabled then it will also set the per PF
13267 * based filter sizes required for them. It also enables Flow director,
13268 * ethertype and macvlan type filter settings for the pf.
13270 * Returns 0 on success, negative on failure
13272 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
)
13274 struct i40e_filter_control_settings
*settings
= &pf
->filter_settings
;
13276 settings
->hash_lut_size
= I40E_HASH_LUT_SIZE_128
;
13278 /* Flow Director is enabled */
13279 if (pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
| I40E_FLAG_FD_ATR_ENABLED
))
13280 settings
->enable_fdir
= true;
13282 /* Ethtype and MACVLAN filters enabled for PF */
13283 settings
->enable_ethtype
= true;
13284 settings
->enable_macvlan
= true;
13286 if (i40e_set_filter_control(&pf
->hw
, settings
))
13292 #define INFO_STRING_LEN 255
13293 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13294 static void i40e_print_features(struct i40e_pf
*pf
)
13296 struct i40e_hw
*hw
= &pf
->hw
;
13300 buf
= kmalloc(INFO_STRING_LEN
, GFP_KERNEL
);
13304 i
= snprintf(buf
, INFO_STRING_LEN
, "Features: PF-id[%d]", hw
->pf_id
);
13305 #ifdef CONFIG_PCI_IOV
13306 i
+= snprintf(&buf
[i
], REMAIN(i
), " VFs: %d", pf
->num_req_vfs
);
13308 i
+= snprintf(&buf
[i
], REMAIN(i
), " VSIs: %d QP: %d",
13309 pf
->hw
.func_caps
.num_vsis
,
13310 pf
->vsi
[pf
->lan_vsi
]->num_queue_pairs
);
13311 if (pf
->flags
& I40E_FLAG_RSS_ENABLED
)
13312 i
+= snprintf(&buf
[i
], REMAIN(i
), " RSS");
13313 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
)
13314 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_ATR");
13315 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13316 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_SB");
13317 i
+= snprintf(&buf
[i
], REMAIN(i
), " NTUPLE");
13319 if (pf
->flags
& I40E_FLAG_DCB_CAPABLE
)
13320 i
+= snprintf(&buf
[i
], REMAIN(i
), " DCB");
13321 i
+= snprintf(&buf
[i
], REMAIN(i
), " VxLAN");
13322 i
+= snprintf(&buf
[i
], REMAIN(i
), " Geneve");
13323 if (pf
->flags
& I40E_FLAG_PTP
)
13324 i
+= snprintf(&buf
[i
], REMAIN(i
), " PTP");
13325 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
13326 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEB");
13328 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEPA");
13330 dev_info(&pf
->pdev
->dev
, "%s\n", buf
);
13332 WARN_ON(i
> INFO_STRING_LEN
);
13336 * i40e_get_platform_mac_addr - get platform-specific MAC address
13337 * @pdev: PCI device information struct
13338 * @pf: board private structure
13340 * Look up the MAC address for the device. First we'll try
13341 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13342 * specific fallback. Otherwise, we'll default to the stored value in
13345 static void i40e_get_platform_mac_addr(struct pci_dev
*pdev
, struct i40e_pf
*pf
)
13347 if (eth_platform_get_mac_address(&pdev
->dev
, pf
->hw
.mac
.addr
))
13348 i40e_get_mac_addr(&pf
->hw
, pf
->hw
.mac
.addr
);
13352 * i40e_probe - Device initialization routine
13353 * @pdev: PCI device information struct
13354 * @ent: entry in i40e_pci_tbl
13356 * i40e_probe initializes a PF identified by a pci_dev structure.
13357 * The OS initialization, configuring of the PF private structure,
13358 * and a hardware reset occur.
13360 * Returns 0 on success, negative on failure
13362 static int i40e_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
13364 struct i40e_aq_get_phy_abilities_resp abilities
;
13365 struct i40e_pf
*pf
;
13366 struct i40e_hw
*hw
;
13367 static u16 pfs_found
;
13375 err
= pci_enable_device_mem(pdev
);
13379 /* set up for high or low dma */
13380 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
13382 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
13384 dev_err(&pdev
->dev
,
13385 "DMA configuration failed: 0x%x\n", err
);
13390 /* set up pci connections */
13391 err
= pci_request_mem_regions(pdev
, i40e_driver_name
);
13393 dev_info(&pdev
->dev
,
13394 "pci_request_selected_regions failed %d\n", err
);
13398 pci_enable_pcie_error_reporting(pdev
);
13399 pci_set_master(pdev
);
13401 /* Now that we have a PCI connection, we need to do the
13402 * low level device setup. This is primarily setting up
13403 * the Admin Queue structures and then querying for the
13404 * device's current profile information.
13406 pf
= kzalloc(sizeof(*pf
), GFP_KERNEL
);
13413 set_bit(__I40E_DOWN
, pf
->state
);
13418 pf
->ioremap_len
= min_t(int, pci_resource_len(pdev
, 0),
13419 I40E_MAX_CSR_SPACE
);
13421 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0), pf
->ioremap_len
);
13422 if (!hw
->hw_addr
) {
13424 dev_info(&pdev
->dev
, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13425 (unsigned int)pci_resource_start(pdev
, 0),
13426 pf
->ioremap_len
, err
);
13429 hw
->vendor_id
= pdev
->vendor
;
13430 hw
->device_id
= pdev
->device
;
13431 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
13432 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
13433 hw
->subsystem_device_id
= pdev
->subsystem_device
;
13434 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
13435 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
13436 hw
->bus
.bus_id
= pdev
->bus
->number
;
13437 pf
->instance
= pfs_found
;
13439 /* Select something other than the 802.1ad ethertype for the
13440 * switch to use internally and drop on ingress.
13442 hw
->switch_tag
= 0xffff;
13443 hw
->first_tag
= ETH_P_8021AD
;
13444 hw
->second_tag
= ETH_P_8021Q
;
13446 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
13447 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
13449 /* set up the locks for the AQ, do this only once in probe
13450 * and destroy them only once in remove
13452 mutex_init(&hw
->aq
.asq_mutex
);
13453 mutex_init(&hw
->aq
.arq_mutex
);
13455 pf
->msg_enable
= netif_msg_init(debug
,
13460 pf
->hw
.debug_mask
= debug
;
13462 /* do a special CORER for clearing PXE mode once at init */
13463 if (hw
->revision_id
== 0 &&
13464 (rd32(hw
, I40E_GLLAN_RCTL_0
) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK
)) {
13465 wr32(hw
, I40E_GLGEN_RTRIG
, I40E_GLGEN_RTRIG_CORER_MASK
);
13470 i40e_clear_pxe_mode(hw
);
13473 /* Reset here to make sure all is clean and to define PF 'n' */
13475 err
= i40e_pf_reset(hw
);
13477 dev_info(&pdev
->dev
, "Initial pf_reset failed: %d\n", err
);
13482 hw
->aq
.num_arq_entries
= I40E_AQ_LEN
;
13483 hw
->aq
.num_asq_entries
= I40E_AQ_LEN
;
13484 hw
->aq
.arq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13485 hw
->aq
.asq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13486 pf
->adminq_work_limit
= I40E_AQ_WORK_LIMIT
;
13488 snprintf(pf
->int_name
, sizeof(pf
->int_name
) - 1,
13490 dev_driver_string(&pf
->pdev
->dev
), dev_name(&pdev
->dev
));
13492 err
= i40e_init_shared_code(hw
);
13494 dev_warn(&pdev
->dev
, "unidentified MAC or BLANK NVM: %d\n",
13499 /* set up a default setting for link flow control */
13500 pf
->hw
.fc
.requested_mode
= I40E_FC_NONE
;
13502 err
= i40e_init_adminq(hw
);
13504 if (err
== I40E_ERR_FIRMWARE_API_VERSION
)
13505 dev_info(&pdev
->dev
,
13506 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13508 dev_info(&pdev
->dev
,
13509 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13513 i40e_get_oem_version(hw
);
13515 /* provide nvm, fw, api versions */
13516 dev_info(&pdev
->dev
, "fw %d.%d.%05d api %d.%d nvm %s\n",
13517 hw
->aq
.fw_maj_ver
, hw
->aq
.fw_min_ver
, hw
->aq
.fw_build
,
13518 hw
->aq
.api_maj_ver
, hw
->aq
.api_min_ver
,
13519 i40e_nvm_version_str(hw
));
13521 if (hw
->aq
.api_maj_ver
== I40E_FW_API_VERSION_MAJOR
&&
13522 hw
->aq
.api_min_ver
> I40E_FW_MINOR_VERSION(hw
))
13523 dev_info(&pdev
->dev
,
13524 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13525 else if (hw
->aq
.api_maj_ver
== 1 && hw
->aq
.api_min_ver
< 4)
13526 dev_info(&pdev
->dev
,
13527 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13529 i40e_verify_eeprom(pf
);
13531 /* Rev 0 hardware was never productized */
13532 if (hw
->revision_id
< 1)
13533 dev_warn(&pdev
->dev
, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13535 i40e_clear_pxe_mode(hw
);
13536 err
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
13538 goto err_adminq_setup
;
13540 err
= i40e_sw_init(pf
);
13542 dev_info(&pdev
->dev
, "sw_init failed: %d\n", err
);
13546 err
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
13547 hw
->func_caps
.num_rx_qp
, 0, 0);
13549 dev_info(&pdev
->dev
, "init_lan_hmc failed: %d\n", err
);
13550 goto err_init_lan_hmc
;
13553 err
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
13555 dev_info(&pdev
->dev
, "configure_lan_hmc failed: %d\n", err
);
13557 goto err_configure_lan_hmc
;
13560 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13561 * Ignore error return codes because if it was already disabled via
13562 * hardware settings this will fail
13564 if (pf
->hw_features
& I40E_HW_STOP_FW_LLDP
) {
13565 dev_info(&pdev
->dev
, "Stopping firmware LLDP agent.\n");
13566 i40e_aq_stop_lldp(hw
, true, NULL
);
13569 /* allow a platform config to override the HW addr */
13570 i40e_get_platform_mac_addr(pdev
, pf
);
13572 if (!is_valid_ether_addr(hw
->mac
.addr
)) {
13573 dev_info(&pdev
->dev
, "invalid MAC address %pM\n", hw
->mac
.addr
);
13577 dev_info(&pdev
->dev
, "MAC address: %pM\n", hw
->mac
.addr
);
13578 ether_addr_copy(hw
->mac
.perm_addr
, hw
->mac
.addr
);
13579 i40e_get_port_mac_addr(hw
, hw
->mac
.port_addr
);
13580 if (is_valid_ether_addr(hw
->mac
.port_addr
))
13581 pf
->hw_features
|= I40E_HW_PORT_ID_VALID
;
13583 pci_set_drvdata(pdev
, pf
);
13584 pci_save_state(pdev
);
13585 #ifdef CONFIG_I40E_DCB
13586 err
= i40e_init_pf_dcb(pf
);
13588 dev_info(&pdev
->dev
, "DCB init failed %d, disabled\n", err
);
13589 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
| I40E_FLAG_DCB_ENABLED
);
13590 /* Continue without DCB enabled */
13592 #endif /* CONFIG_I40E_DCB */
13594 /* set up periodic task facility */
13595 timer_setup(&pf
->service_timer
, i40e_service_timer
, 0);
13596 pf
->service_timer_period
= HZ
;
13598 INIT_WORK(&pf
->service_task
, i40e_service_task
);
13599 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
13601 /* NVM bit on means WoL disabled for the port */
13602 i40e_read_nvm_word(hw
, I40E_SR_NVM_WAKE_ON_LAN
, &wol_nvm_bits
);
13603 if (BIT (hw
->port
) & wol_nvm_bits
|| hw
->partition_id
!= 1)
13604 pf
->wol_en
= false;
13607 device_set_wakeup_enable(&pf
->pdev
->dev
, pf
->wol_en
);
13609 /* set up the main switch operations */
13610 i40e_determine_queue_usage(pf
);
13611 err
= i40e_init_interrupt_scheme(pf
);
13613 goto err_switch_setup
;
13615 /* The number of VSIs reported by the FW is the minimum guaranteed
13616 * to us; HW supports far more and we share the remaining pool with
13617 * the other PFs. We allocate space for more than the guarantee with
13618 * the understanding that we might not get them all later.
13620 if (pf
->hw
.func_caps
.num_vsis
< I40E_MIN_VSI_ALLOC
)
13621 pf
->num_alloc_vsi
= I40E_MIN_VSI_ALLOC
;
13623 pf
->num_alloc_vsi
= pf
->hw
.func_caps
.num_vsis
;
13625 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
13626 pf
->vsi
= kcalloc(pf
->num_alloc_vsi
, sizeof(struct i40e_vsi
*),
13630 goto err_switch_setup
;
13633 #ifdef CONFIG_PCI_IOV
13634 /* prep for VF support */
13635 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13636 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
13637 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
13638 if (pci_num_vf(pdev
))
13639 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
13642 err
= i40e_setup_pf_switch(pf
, false);
13644 dev_info(&pdev
->dev
, "setup_pf_switch failed: %d\n", err
);
13647 INIT_LIST_HEAD(&pf
->vsi
[pf
->lan_vsi
]->ch_list
);
13649 /* Make sure flow control is set according to current settings */
13650 err
= i40e_set_fc(hw
, &set_fc_aq_fail
, true);
13651 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_GET
)
13652 dev_dbg(&pf
->pdev
->dev
,
13653 "Set fc with err %s aq_err %s on get_phy_cap\n",
13654 i40e_stat_str(hw
, err
),
13655 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13656 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_SET
)
13657 dev_dbg(&pf
->pdev
->dev
,
13658 "Set fc with err %s aq_err %s on set_phy_config\n",
13659 i40e_stat_str(hw
, err
),
13660 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13661 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_UPDATE
)
13662 dev_dbg(&pf
->pdev
->dev
,
13663 "Set fc with err %s aq_err %s on get_link_info\n",
13664 i40e_stat_str(hw
, err
),
13665 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13667 /* if FDIR VSI was set up, start it now */
13668 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13669 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
13670 i40e_vsi_open(pf
->vsi
[i
]);
13675 /* The driver only wants link up/down and module qualification
13676 * reports from firmware. Note the negative logic.
13678 err
= i40e_aq_set_phy_int_mask(&pf
->hw
,
13679 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
13680 I40E_AQ_EVENT_MEDIA_NA
|
13681 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
13683 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
13684 i40e_stat_str(&pf
->hw
, err
),
13685 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13687 /* Reconfigure hardware for allowing smaller MSS in the case
13688 * of TSO, so that we avoid the MDD being fired and causing
13689 * a reset in the case of small MSS+TSO.
13691 val
= rd32(hw
, I40E_REG_MSS
);
13692 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
13693 val
&= ~I40E_REG_MSS_MIN_MASK
;
13694 val
|= I40E_64BYTE_MSS
;
13695 wr32(hw
, I40E_REG_MSS
, val
);
13698 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
13700 err
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
13702 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
13703 i40e_stat_str(&pf
->hw
, err
),
13704 i40e_aq_str(&pf
->hw
,
13705 pf
->hw
.aq
.asq_last_status
));
13707 /* The main driver is (mostly) up and happy. We need to set this state
13708 * before setting up the misc vector or we get a race and the vector
13709 * ends up disabled forever.
13711 clear_bit(__I40E_DOWN
, pf
->state
);
13713 /* In case of MSIX we are going to setup the misc vector right here
13714 * to handle admin queue events etc. In case of legacy and MSI
13715 * the misc functionality and queue processing is combined in
13716 * the same vector and that gets setup at open.
13718 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
13719 err
= i40e_setup_misc_vector(pf
);
13721 dev_info(&pdev
->dev
,
13722 "setup of misc vector failed: %d\n", err
);
13727 #ifdef CONFIG_PCI_IOV
13728 /* prep for VF support */
13729 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13730 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
13731 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
13732 /* disable link interrupts for VFs */
13733 val
= rd32(hw
, I40E_PFGEN_PORTMDIO_NUM
);
13734 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
13735 wr32(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
13738 if (pci_num_vf(pdev
)) {
13739 dev_info(&pdev
->dev
,
13740 "Active VFs found, allocating resources.\n");
13741 err
= i40e_alloc_vfs(pf
, pci_num_vf(pdev
));
13743 dev_info(&pdev
->dev
,
13744 "Error %d allocating resources for existing VFs\n",
13748 #endif /* CONFIG_PCI_IOV */
13750 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13751 pf
->iwarp_base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
13752 pf
->num_iwarp_msix
,
13753 I40E_IWARP_IRQ_PILE_ID
);
13754 if (pf
->iwarp_base_vector
< 0) {
13755 dev_info(&pdev
->dev
,
13756 "failed to get tracking for %d vectors for IWARP err=%d\n",
13757 pf
->num_iwarp_msix
, pf
->iwarp_base_vector
);
13758 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
13762 i40e_dbg_pf_init(pf
);
13764 /* tell the firmware that we're starting */
13765 i40e_send_version(pf
);
13767 /* since everything's happy, start the service_task timer */
13768 mod_timer(&pf
->service_timer
,
13769 round_jiffies(jiffies
+ pf
->service_timer_period
));
13771 /* add this PF to client device list and launch a client service task */
13772 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13773 err
= i40e_lan_add_device(pf
);
13775 dev_info(&pdev
->dev
, "Failed to add PF to client API service list: %d\n",
13779 #define PCI_SPEED_SIZE 8
13780 #define PCI_WIDTH_SIZE 8
13781 /* Devices on the IOSF bus do not have this information
13782 * and will report PCI Gen 1 x 1 by default so don't bother
13785 if (!(pf
->hw_features
& I40E_HW_NO_PCI_LINK_CHECK
)) {
13786 char speed
[PCI_SPEED_SIZE
] = "Unknown";
13787 char width
[PCI_WIDTH_SIZE
] = "Unknown";
13789 /* Get the negotiated link width and speed from PCI config
13792 pcie_capability_read_word(pf
->pdev
, PCI_EXP_LNKSTA
,
13795 i40e_set_pci_config_data(hw
, link_status
);
13797 switch (hw
->bus
.speed
) {
13798 case i40e_bus_speed_8000
:
13799 strncpy(speed
, "8.0", PCI_SPEED_SIZE
); break;
13800 case i40e_bus_speed_5000
:
13801 strncpy(speed
, "5.0", PCI_SPEED_SIZE
); break;
13802 case i40e_bus_speed_2500
:
13803 strncpy(speed
, "2.5", PCI_SPEED_SIZE
); break;
13807 switch (hw
->bus
.width
) {
13808 case i40e_bus_width_pcie_x8
:
13809 strncpy(width
, "8", PCI_WIDTH_SIZE
); break;
13810 case i40e_bus_width_pcie_x4
:
13811 strncpy(width
, "4", PCI_WIDTH_SIZE
); break;
13812 case i40e_bus_width_pcie_x2
:
13813 strncpy(width
, "2", PCI_WIDTH_SIZE
); break;
13814 case i40e_bus_width_pcie_x1
:
13815 strncpy(width
, "1", PCI_WIDTH_SIZE
); break;
13820 dev_info(&pdev
->dev
, "PCI-Express: Speed %sGT/s Width x%s\n",
13823 if (hw
->bus
.width
< i40e_bus_width_pcie_x8
||
13824 hw
->bus
.speed
< i40e_bus_speed_8000
) {
13825 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
13826 dev_warn(&pdev
->dev
, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
13830 /* get the requested speeds from the fw */
13831 err
= i40e_aq_get_phy_capabilities(hw
, false, false, &abilities
, NULL
);
13833 dev_dbg(&pf
->pdev
->dev
, "get requested speeds ret = %s last_status = %s\n",
13834 i40e_stat_str(&pf
->hw
, err
),
13835 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13836 pf
->hw
.phy
.link_info
.requested_speeds
= abilities
.link_speed
;
13838 /* get the supported phy types from the fw */
13839 err
= i40e_aq_get_phy_capabilities(hw
, false, true, &abilities
, NULL
);
13841 dev_dbg(&pf
->pdev
->dev
, "get supported phy types ret = %s last_status = %s\n",
13842 i40e_stat_str(&pf
->hw
, err
),
13843 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13845 /* Add a filter to drop all Flow control frames from any VSI from being
13846 * transmitted. By doing so we stop a malicious VF from sending out
13847 * PAUSE or PFC frames and potentially controlling traffic for other
13849 * The FW can still send Flow control frames if enabled.
13851 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
13852 pf
->main_vsi_seid
);
13854 if ((pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T
) ||
13855 (pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T4
))
13856 pf
->hw_features
|= I40E_HW_PHY_CONTROLS_LEDS
;
13857 if (pf
->hw
.device_id
== I40E_DEV_ID_SFP_I_X722
)
13858 pf
->hw_features
|= I40E_HW_HAVE_CRT_RETIMER
;
13859 /* print a string summarizing features */
13860 i40e_print_features(pf
);
13864 /* Unwind what we've done if something failed in the setup */
13866 set_bit(__I40E_DOWN
, pf
->state
);
13867 i40e_clear_interrupt_scheme(pf
);
13870 i40e_reset_interrupt_capability(pf
);
13871 del_timer_sync(&pf
->service_timer
);
13873 err_configure_lan_hmc
:
13874 (void)i40e_shutdown_lan_hmc(hw
);
13876 kfree(pf
->qp_pile
);
13880 iounmap(hw
->hw_addr
);
13884 pci_disable_pcie_error_reporting(pdev
);
13885 pci_release_mem_regions(pdev
);
13888 pci_disable_device(pdev
);
13893 * i40e_remove - Device removal routine
13894 * @pdev: PCI device information struct
13896 * i40e_remove is called by the PCI subsystem to alert the driver
13897 * that is should release a PCI device. This could be caused by a
13898 * Hot-Plug event, or because the driver is going to be removed from
13901 static void i40e_remove(struct pci_dev
*pdev
)
13903 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
13904 struct i40e_hw
*hw
= &pf
->hw
;
13905 i40e_status ret_code
;
13908 i40e_dbg_pf_exit(pf
);
13912 /* Disable RSS in hw */
13913 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), 0);
13914 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), 0);
13916 /* no more scheduling of any task */
13917 set_bit(__I40E_SUSPENDED
, pf
->state
);
13918 set_bit(__I40E_DOWN
, pf
->state
);
13919 if (pf
->service_timer
.function
)
13920 del_timer_sync(&pf
->service_timer
);
13921 if (pf
->service_task
.func
)
13922 cancel_work_sync(&pf
->service_task
);
13924 /* Client close must be called explicitly here because the timer
13925 * has been stopped.
13927 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
13929 if (pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) {
13931 pf
->flags
&= ~I40E_FLAG_SRIOV_ENABLED
;
13934 i40e_fdir_teardown(pf
);
13936 /* If there is a switch structure or any orphans, remove them.
13937 * This will leave only the PF's VSI remaining.
13939 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13943 if (pf
->veb
[i
]->uplink_seid
== pf
->mac_seid
||
13944 pf
->veb
[i
]->uplink_seid
== 0)
13945 i40e_switch_branch_release(pf
->veb
[i
]);
13948 /* Now we can shutdown the PF's VSI, just before we kill
13951 if (pf
->vsi
[pf
->lan_vsi
])
13952 i40e_vsi_release(pf
->vsi
[pf
->lan_vsi
]);
13954 i40e_cloud_filter_exit(pf
);
13956 /* remove attached clients */
13957 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13958 ret_code
= i40e_lan_del_device(pf
);
13960 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
13964 /* shutdown and destroy the HMC */
13965 if (hw
->hmc
.hmc_obj
) {
13966 ret_code
= i40e_shutdown_lan_hmc(hw
);
13968 dev_warn(&pdev
->dev
,
13969 "Failed to destroy the HMC resources: %d\n",
13973 /* shutdown the adminq */
13974 i40e_shutdown_adminq(hw
);
13976 /* destroy the locks only once, here */
13977 mutex_destroy(&hw
->aq
.arq_mutex
);
13978 mutex_destroy(&hw
->aq
.asq_mutex
);
13980 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
13981 i40e_clear_interrupt_scheme(pf
);
13982 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13984 i40e_vsi_clear_rings(pf
->vsi
[i
]);
13985 i40e_vsi_clear(pf
->vsi
[i
]);
13990 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13995 kfree(pf
->qp_pile
);
13998 iounmap(hw
->hw_addr
);
14000 pci_release_mem_regions(pdev
);
14002 pci_disable_pcie_error_reporting(pdev
);
14003 pci_disable_device(pdev
);
14007 * i40e_pci_error_detected - warning that something funky happened in PCI land
14008 * @pdev: PCI device information struct
14010 * Called to warn that something happened and the error handling steps
14011 * are in progress. Allows the driver to quiesce things, be ready for
14014 static pci_ers_result_t
i40e_pci_error_detected(struct pci_dev
*pdev
,
14015 enum pci_channel_state error
)
14017 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14019 dev_info(&pdev
->dev
, "%s: error %d\n", __func__
, error
);
14022 dev_info(&pdev
->dev
,
14023 "Cannot recover - error happened during device probe\n");
14024 return PCI_ERS_RESULT_DISCONNECT
;
14027 /* shutdown all operations */
14028 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14029 i40e_prep_for_reset(pf
, false);
14031 /* Request a slot reset */
14032 return PCI_ERS_RESULT_NEED_RESET
;
14036 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14037 * @pdev: PCI device information struct
14039 * Called to find if the driver can work with the device now that
14040 * the pci slot has been reset. If a basic connection seems good
14041 * (registers are readable and have sane content) then return a
14042 * happy little PCI_ERS_RESULT_xxx.
14044 static pci_ers_result_t
i40e_pci_error_slot_reset(struct pci_dev
*pdev
)
14046 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14047 pci_ers_result_t result
;
14051 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14052 if (pci_enable_device_mem(pdev
)) {
14053 dev_info(&pdev
->dev
,
14054 "Cannot re-enable PCI device after reset.\n");
14055 result
= PCI_ERS_RESULT_DISCONNECT
;
14057 pci_set_master(pdev
);
14058 pci_restore_state(pdev
);
14059 pci_save_state(pdev
);
14060 pci_wake_from_d3(pdev
, false);
14062 reg
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
14064 result
= PCI_ERS_RESULT_RECOVERED
;
14066 result
= PCI_ERS_RESULT_DISCONNECT
;
14069 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
14071 dev_info(&pdev
->dev
,
14072 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14074 /* non-fatal, continue */
14081 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14082 * @pdev: PCI device information struct
14084 static void i40e_pci_error_reset_prepare(struct pci_dev
*pdev
)
14086 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14088 i40e_prep_for_reset(pf
, false);
14092 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14093 * @pdev: PCI device information struct
14095 static void i40e_pci_error_reset_done(struct pci_dev
*pdev
)
14097 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14099 i40e_reset_and_rebuild(pf
, false, false);
14103 * i40e_pci_error_resume - restart operations after PCI error recovery
14104 * @pdev: PCI device information struct
14106 * Called to allow the driver to bring things back up after PCI error
14107 * and/or reset recovery has finished.
14109 static void i40e_pci_error_resume(struct pci_dev
*pdev
)
14111 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14113 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14114 if (test_bit(__I40E_SUSPENDED
, pf
->state
))
14117 i40e_handle_reset_warning(pf
, false);
14121 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14122 * using the mac_address_write admin q function
14123 * @pf: pointer to i40e_pf struct
14125 static void i40e_enable_mc_magic_wake(struct i40e_pf
*pf
)
14127 struct i40e_hw
*hw
= &pf
->hw
;
14132 /* Get current MAC address in case it's an LAA */
14133 if (pf
->vsi
[pf
->lan_vsi
] && pf
->vsi
[pf
->lan_vsi
]->netdev
) {
14134 ether_addr_copy(mac_addr
,
14135 pf
->vsi
[pf
->lan_vsi
]->netdev
->dev_addr
);
14137 dev_err(&pf
->pdev
->dev
,
14138 "Failed to retrieve MAC address; using default\n");
14139 ether_addr_copy(mac_addr
, hw
->mac
.addr
);
14142 /* The FW expects the mac address write cmd to first be called with
14143 * one of these flags before calling it again with the multicast
14146 flags
= I40E_AQC_WRITE_TYPE_LAA_WOL
;
14148 if (hw
->func_caps
.flex10_enable
&& hw
->partition_id
!= 1)
14149 flags
= I40E_AQC_WRITE_TYPE_LAA_ONLY
;
14151 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14153 dev_err(&pf
->pdev
->dev
,
14154 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14158 flags
= I40E_AQC_MC_MAG_EN
14159 | I40E_AQC_WOL_PRESERVE_ON_PFR
14160 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG
;
14161 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14163 dev_err(&pf
->pdev
->dev
,
14164 "Failed to enable Multicast Magic Packet wake up\n");
14168 * i40e_shutdown - PCI callback for shutting down
14169 * @pdev: PCI device information struct
14171 static void i40e_shutdown(struct pci_dev
*pdev
)
14173 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14174 struct i40e_hw
*hw
= &pf
->hw
;
14176 set_bit(__I40E_SUSPENDED
, pf
->state
);
14177 set_bit(__I40E_DOWN
, pf
->state
);
14179 i40e_prep_for_reset(pf
, true);
14182 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14183 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14185 del_timer_sync(&pf
->service_timer
);
14186 cancel_work_sync(&pf
->service_task
);
14187 i40e_cloud_filter_exit(pf
);
14188 i40e_fdir_teardown(pf
);
14190 /* Client close must be called explicitly here because the timer
14191 * has been stopped.
14193 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
14195 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14196 i40e_enable_mc_magic_wake(pf
);
14198 i40e_prep_for_reset(pf
, false);
14200 wr32(hw
, I40E_PFPM_APM
,
14201 (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14202 wr32(hw
, I40E_PFPM_WUFC
,
14203 (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14205 i40e_clear_interrupt_scheme(pf
);
14207 if (system_state
== SYSTEM_POWER_OFF
) {
14208 pci_wake_from_d3(pdev
, pf
->wol_en
);
14209 pci_set_power_state(pdev
, PCI_D3hot
);
14214 * i40e_suspend - PM callback for moving to D3
14215 * @dev: generic device information structure
14217 static int __maybe_unused
i40e_suspend(struct device
*dev
)
14219 struct pci_dev
*pdev
= to_pci_dev(dev
);
14220 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14221 struct i40e_hw
*hw
= &pf
->hw
;
14223 /* If we're already suspended, then there is nothing to do */
14224 if (test_and_set_bit(__I40E_SUSPENDED
, pf
->state
))
14227 set_bit(__I40E_DOWN
, pf
->state
);
14229 /* Ensure service task will not be running */
14230 del_timer_sync(&pf
->service_timer
);
14231 cancel_work_sync(&pf
->service_task
);
14233 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14234 i40e_enable_mc_magic_wake(pf
);
14236 i40e_prep_for_reset(pf
, false);
14238 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14239 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14241 /* Clear the interrupt scheme and release our IRQs so that the system
14242 * can safely hibernate even when there are a large number of CPUs.
14243 * Otherwise hibernation might fail when mapping all the vectors back
14246 i40e_clear_interrupt_scheme(pf
);
14252 * i40e_resume - PM callback for waking up from D3
14253 * @dev: generic device information structure
14255 static int __maybe_unused
i40e_resume(struct device
*dev
)
14257 struct pci_dev
*pdev
= to_pci_dev(dev
);
14258 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14261 /* If we're not suspended, then there is nothing to do */
14262 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14265 /* We cleared the interrupt scheme when we suspended, so we need to
14266 * restore it now to resume device functionality.
14268 err
= i40e_restore_interrupt_scheme(pf
);
14270 dev_err(&pdev
->dev
, "Cannot restore interrupt scheme: %d\n",
14274 clear_bit(__I40E_DOWN
, pf
->state
);
14275 i40e_reset_and_rebuild(pf
, false, false);
14277 /* Clear suspended state last after everything is recovered */
14278 clear_bit(__I40E_SUSPENDED
, pf
->state
);
14280 /* Restart the service task */
14281 mod_timer(&pf
->service_timer
,
14282 round_jiffies(jiffies
+ pf
->service_timer_period
));
14287 static const struct pci_error_handlers i40e_err_handler
= {
14288 .error_detected
= i40e_pci_error_detected
,
14289 .slot_reset
= i40e_pci_error_slot_reset
,
14290 .reset_prepare
= i40e_pci_error_reset_prepare
,
14291 .reset_done
= i40e_pci_error_reset_done
,
14292 .resume
= i40e_pci_error_resume
,
14295 static SIMPLE_DEV_PM_OPS(i40e_pm_ops
, i40e_suspend
, i40e_resume
);
14297 static struct pci_driver i40e_driver
= {
14298 .name
= i40e_driver_name
,
14299 .id_table
= i40e_pci_tbl
,
14300 .probe
= i40e_probe
,
14301 .remove
= i40e_remove
,
14303 .pm
= &i40e_pm_ops
,
14305 .shutdown
= i40e_shutdown
,
14306 .err_handler
= &i40e_err_handler
,
14307 .sriov_configure
= i40e_pci_sriov_configure
,
14311 * i40e_init_module - Driver registration routine
14313 * i40e_init_module is the first routine called when the driver is
14314 * loaded. All it does is register with the PCI subsystem.
14316 static int __init
i40e_init_module(void)
14318 pr_info("%s: %s - version %s\n", i40e_driver_name
,
14319 i40e_driver_string
, i40e_driver_version_str
);
14320 pr_info("%s: %s\n", i40e_driver_name
, i40e_copyright
);
14322 /* There is no need to throttle the number of active tasks because
14323 * each device limits its own task using a state bit for scheduling
14324 * the service task, and the device tasks do not interfere with each
14325 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14326 * since we need to be able to guarantee forward progress even under
14329 i40e_wq
= alloc_workqueue("%s", WQ_MEM_RECLAIM
, 0, i40e_driver_name
);
14331 pr_err("%s: Failed to create workqueue\n", i40e_driver_name
);
14336 return pci_register_driver(&i40e_driver
);
14338 module_init(i40e_init_module
);
14341 * i40e_exit_module - Driver exit cleanup routine
14343 * i40e_exit_module is called just before the driver is removed
14346 static void __exit
i40e_exit_module(void)
14348 pci_unregister_driver(&i40e_driver
);
14349 destroy_workqueue(i40e_wq
);
14352 module_exit(i40e_exit_module
);