1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
13 #include <netinet/in.h>
14 #include <rte_string_fns.h>
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_bus_pci.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_malloc.h>
32 #include <rte_random.h>
34 #include <rte_hash_crc.h>
35 #ifdef RTE_LIBRTE_SECURITY
36 #include <rte_security_driver.h>
39 #include "ixgbe_logs.h"
40 #include "base/ixgbe_api.h"
41 #include "base/ixgbe_vf.h"
42 #include "base/ixgbe_common.h"
43 #include "ixgbe_ethdev.h"
44 #include "ixgbe_bypass.h"
45 #include "ixgbe_rxtx.h"
46 #include "base/ixgbe_type.h"
47 #include "base/ixgbe_phy.h"
48 #include "ixgbe_regs.h"
51 * High threshold controlling when to start sending XOFF frames. Must be at
52 * least 8 bytes less than receive packet buffer size. This value is in units
55 #define IXGBE_FC_HI 0x80
58 * Low threshold controlling when to start sending XON frames. This value is
59 * in units of 1024 bytes.
61 #define IXGBE_FC_LO 0x40
63 /* Timer value included in XOFF frames. */
64 #define IXGBE_FC_PAUSE 0x680
66 /*Default value of Max Rx Queue*/
67 #define IXGBE_MAX_RX_QUEUE_NUM 128
69 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
70 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
71 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
73 #define IXGBE_MMW_SIZE_DEFAULT 0x4
74 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14
75 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */
78 * Default values for RX/TX configuration
80 #define IXGBE_DEFAULT_RX_FREE_THRESH 32
81 #define IXGBE_DEFAULT_RX_PTHRESH 8
82 #define IXGBE_DEFAULT_RX_HTHRESH 8
83 #define IXGBE_DEFAULT_RX_WTHRESH 0
85 #define IXGBE_DEFAULT_TX_FREE_THRESH 32
86 #define IXGBE_DEFAULT_TX_PTHRESH 32
87 #define IXGBE_DEFAULT_TX_HTHRESH 0
88 #define IXGBE_DEFAULT_TX_WTHRESH 0
89 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
91 /* Bit shift and mask */
92 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2)
93 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
94 #define IXGBE_8_BIT_WIDTH CHAR_BIT
95 #define IXGBE_8_BIT_MASK UINT8_MAX
97 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
99 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
101 /* Additional timesync values. */
102 #define NSEC_PER_SEC 1000000000L
103 #define IXGBE_INCVAL_10GB 0x66666666
104 #define IXGBE_INCVAL_1GB 0x40000000
105 #define IXGBE_INCVAL_100 0x50000000
106 #define IXGBE_INCVAL_SHIFT_10GB 28
107 #define IXGBE_INCVAL_SHIFT_1GB 24
108 #define IXGBE_INCVAL_SHIFT_100 21
109 #define IXGBE_INCVAL_SHIFT_82599 7
110 #define IXGBE_INCPER_SHIFT_82599 24
112 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
114 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
115 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
116 #define IXGBE_ETAG_ETYPE 0x00005084
117 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
118 #define IXGBE_ETAG_ETYPE_VALID 0x80000000
119 #define IXGBE_RAH_ADTYPE 0x40000000
120 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff
121 #define IXGBE_VMVIR_TAGA_MASK 0x18000000
122 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
123 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
124 #define IXGBE_QDE_STRIP_TAG 0x00000004
125 #define IXGBE_VTEICR_MASK 0x07
127 #define IXGBE_EXVET_VET_EXT_SHIFT 16
128 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
130 static int eth_ixgbe_dev_init(struct rte_eth_dev
*eth_dev
, void *init_params
);
131 static int eth_ixgbe_dev_uninit(struct rte_eth_dev
*eth_dev
);
132 static int ixgbe_fdir_filter_init(struct rte_eth_dev
*eth_dev
);
133 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev
*eth_dev
);
134 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev
*eth_dev
);
135 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev
*eth_dev
);
136 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev
*eth_dev
);
137 static int ixgbe_dev_configure(struct rte_eth_dev
*dev
);
138 static int ixgbe_dev_start(struct rte_eth_dev
*dev
);
139 static void ixgbe_dev_stop(struct rte_eth_dev
*dev
);
140 static int ixgbe_dev_set_link_up(struct rte_eth_dev
*dev
);
141 static int ixgbe_dev_set_link_down(struct rte_eth_dev
*dev
);
142 static void ixgbe_dev_close(struct rte_eth_dev
*dev
);
143 static int ixgbe_dev_reset(struct rte_eth_dev
*dev
);
144 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev
*dev
);
145 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev
*dev
);
146 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev
*dev
);
147 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev
*dev
);
148 static int ixgbe_dev_link_update(struct rte_eth_dev
*dev
,
149 int wait_to_complete
);
150 static int ixgbe_dev_stats_get(struct rte_eth_dev
*dev
,
151 struct rte_eth_stats
*stats
);
152 static int ixgbe_dev_xstats_get(struct rte_eth_dev
*dev
,
153 struct rte_eth_xstat
*xstats
, unsigned n
);
154 static int ixgbevf_dev_xstats_get(struct rte_eth_dev
*dev
,
155 struct rte_eth_xstat
*xstats
, unsigned n
);
157 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev
*dev
, const uint64_t *ids
,
158 uint64_t *values
, unsigned int n
);
159 static void ixgbe_dev_stats_reset(struct rte_eth_dev
*dev
);
160 static void ixgbe_dev_xstats_reset(struct rte_eth_dev
*dev
);
161 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev
*dev
,
162 struct rte_eth_xstat_name
*xstats_names
,
164 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev
*dev
,
165 struct rte_eth_xstat_name
*xstats_names
, unsigned limit
);
166 static int ixgbe_dev_xstats_get_names_by_id(
167 struct rte_eth_dev
*dev
,
168 struct rte_eth_xstat_name
*xstats_names
,
171 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev
*eth_dev
,
175 static int ixgbe_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
,
177 static void ixgbe_dev_info_get(struct rte_eth_dev
*dev
,
178 struct rte_eth_dev_info
*dev_info
);
179 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev
*dev
);
180 static void ixgbevf_dev_info_get(struct rte_eth_dev
*dev
,
181 struct rte_eth_dev_info
*dev_info
);
182 static int ixgbe_dev_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
);
184 static int ixgbe_vlan_filter_set(struct rte_eth_dev
*dev
,
185 uint16_t vlan_id
, int on
);
186 static int ixgbe_vlan_tpid_set(struct rte_eth_dev
*dev
,
187 enum rte_vlan_type vlan_type
,
189 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev
*dev
,
190 uint16_t queue
, bool on
);
191 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev
*dev
, uint16_t queue
,
193 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev
*dev
,
195 static int ixgbe_vlan_offload_config(struct rte_eth_dev
*dev
, int mask
);
196 static int ixgbe_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
);
197 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev
*dev
, uint16_t queue
);
198 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev
*dev
, uint16_t queue
);
199 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev
*dev
);
200 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev
*dev
);
202 static int ixgbe_dev_led_on(struct rte_eth_dev
*dev
);
203 static int ixgbe_dev_led_off(struct rte_eth_dev
*dev
);
204 static int ixgbe_flow_ctrl_get(struct rte_eth_dev
*dev
,
205 struct rte_eth_fc_conf
*fc_conf
);
206 static int ixgbe_flow_ctrl_set(struct rte_eth_dev
*dev
,
207 struct rte_eth_fc_conf
*fc_conf
);
208 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev
*dev
,
209 struct rte_eth_pfc_conf
*pfc_conf
);
210 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev
*dev
,
211 struct rte_eth_rss_reta_entry64
*reta_conf
,
213 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev
*dev
,
214 struct rte_eth_rss_reta_entry64
*reta_conf
,
216 static void ixgbe_dev_link_status_print(struct rte_eth_dev
*dev
);
217 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev
*dev
, uint8_t on
);
218 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev
*dev
);
219 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev
*dev
);
220 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev
*dev
);
221 static int ixgbe_dev_interrupt_action(struct rte_eth_dev
*dev
);
222 static void ixgbe_dev_interrupt_handler(void *param
);
223 static void ixgbe_dev_interrupt_delayed_handler(void *param
);
224 static void ixgbe_dev_setup_link_alarm_handler(void *param
);
226 static int ixgbe_add_rar(struct rte_eth_dev
*dev
, struct ether_addr
*mac_addr
,
227 uint32_t index
, uint32_t pool
);
228 static void ixgbe_remove_rar(struct rte_eth_dev
*dev
, uint32_t index
);
229 static int ixgbe_set_default_mac_addr(struct rte_eth_dev
*dev
,
230 struct ether_addr
*mac_addr
);
231 static void ixgbe_dcb_init(struct ixgbe_hw
*hw
, struct ixgbe_dcb_config
*dcb_config
);
232 static bool is_device_supported(struct rte_eth_dev
*dev
,
233 struct rte_pci_driver
*drv
);
235 /* For Virtual Function support */
236 static int eth_ixgbevf_dev_init(struct rte_eth_dev
*eth_dev
);
237 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev
*eth_dev
);
238 static int ixgbevf_dev_configure(struct rte_eth_dev
*dev
);
239 static int ixgbevf_dev_start(struct rte_eth_dev
*dev
);
240 static int ixgbevf_dev_link_update(struct rte_eth_dev
*dev
,
241 int wait_to_complete
);
242 static void ixgbevf_dev_stop(struct rte_eth_dev
*dev
);
243 static void ixgbevf_dev_close(struct rte_eth_dev
*dev
);
244 static int ixgbevf_dev_reset(struct rte_eth_dev
*dev
);
245 static void ixgbevf_intr_disable(struct rte_eth_dev
*dev
);
246 static void ixgbevf_intr_enable(struct rte_eth_dev
*dev
);
247 static int ixgbevf_dev_stats_get(struct rte_eth_dev
*dev
,
248 struct rte_eth_stats
*stats
);
249 static void ixgbevf_dev_stats_reset(struct rte_eth_dev
*dev
);
250 static int ixgbevf_vlan_filter_set(struct rte_eth_dev
*dev
,
251 uint16_t vlan_id
, int on
);
252 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev
*dev
,
253 uint16_t queue
, int on
);
254 static int ixgbevf_vlan_offload_config(struct rte_eth_dev
*dev
, int mask
);
255 static int ixgbevf_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
);
256 static void ixgbevf_set_vfta_all(struct rte_eth_dev
*dev
, bool on
);
257 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev
*dev
,
259 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev
*dev
,
261 static void ixgbevf_set_ivar_map(struct ixgbe_hw
*hw
, int8_t direction
,
262 uint8_t queue
, uint8_t msix_vector
);
263 static void ixgbevf_configure_msix(struct rte_eth_dev
*dev
);
264 static void ixgbevf_dev_promiscuous_enable(struct rte_eth_dev
*dev
);
265 static void ixgbevf_dev_promiscuous_disable(struct rte_eth_dev
*dev
);
266 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev
*dev
);
267 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev
*dev
);
269 /* For Eth VMDQ APIs support */
270 static int ixgbe_uc_hash_table_set(struct rte_eth_dev
*dev
, struct
271 ether_addr
* mac_addr
, uint8_t on
);
272 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev
*dev
, uint8_t on
);
273 static int ixgbe_mirror_rule_set(struct rte_eth_dev
*dev
,
274 struct rte_eth_mirror_conf
*mirror_conf
,
275 uint8_t rule_id
, uint8_t on
);
276 static int ixgbe_mirror_rule_reset(struct rte_eth_dev
*dev
,
278 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev
*dev
,
280 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev
*dev
,
282 static void ixgbe_set_ivar_map(struct ixgbe_hw
*hw
, int8_t direction
,
283 uint8_t queue
, uint8_t msix_vector
);
284 static void ixgbe_configure_msix(struct rte_eth_dev
*dev
);
286 static int ixgbevf_add_mac_addr(struct rte_eth_dev
*dev
,
287 struct ether_addr
*mac_addr
,
288 uint32_t index
, uint32_t pool
);
289 static void ixgbevf_remove_mac_addr(struct rte_eth_dev
*dev
, uint32_t index
);
290 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev
*dev
,
291 struct ether_addr
*mac_addr
);
292 static int ixgbe_syn_filter_get(struct rte_eth_dev
*dev
,
293 struct rte_eth_syn_filter
*filter
);
294 static int ixgbe_syn_filter_handle(struct rte_eth_dev
*dev
,
295 enum rte_filter_op filter_op
,
297 static int ixgbe_add_5tuple_filter(struct rte_eth_dev
*dev
,
298 struct ixgbe_5tuple_filter
*filter
);
299 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev
*dev
,
300 struct ixgbe_5tuple_filter
*filter
);
301 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev
*dev
,
302 enum rte_filter_op filter_op
,
304 static int ixgbe_get_ntuple_filter(struct rte_eth_dev
*dev
,
305 struct rte_eth_ntuple_filter
*filter
);
306 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev
*dev
,
307 enum rte_filter_op filter_op
,
309 static int ixgbe_get_ethertype_filter(struct rte_eth_dev
*dev
,
310 struct rte_eth_ethertype_filter
*filter
);
311 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev
*dev
,
312 enum rte_filter_type filter_type
,
313 enum rte_filter_op filter_op
,
315 static int ixgbevf_dev_set_mtu(struct rte_eth_dev
*dev
, uint16_t mtu
);
317 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev
*dev
,
318 struct ether_addr
*mc_addr_set
,
319 uint32_t nb_mc_addr
);
320 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev
*dev
,
321 struct rte_eth_dcb_info
*dcb_info
);
323 static int ixgbe_get_reg_length(struct rte_eth_dev
*dev
);
324 static int ixgbe_get_regs(struct rte_eth_dev
*dev
,
325 struct rte_dev_reg_info
*regs
);
326 static int ixgbe_get_eeprom_length(struct rte_eth_dev
*dev
);
327 static int ixgbe_get_eeprom(struct rte_eth_dev
*dev
,
328 struct rte_dev_eeprom_info
*eeprom
);
329 static int ixgbe_set_eeprom(struct rte_eth_dev
*dev
,
330 struct rte_dev_eeprom_info
*eeprom
);
332 static int ixgbe_get_module_info(struct rte_eth_dev
*dev
,
333 struct rte_eth_dev_module_info
*modinfo
);
334 static int ixgbe_get_module_eeprom(struct rte_eth_dev
*dev
,
335 struct rte_dev_eeprom_info
*info
);
337 static int ixgbevf_get_reg_length(struct rte_eth_dev
*dev
);
338 static int ixgbevf_get_regs(struct rte_eth_dev
*dev
,
339 struct rte_dev_reg_info
*regs
);
341 static int ixgbe_timesync_enable(struct rte_eth_dev
*dev
);
342 static int ixgbe_timesync_disable(struct rte_eth_dev
*dev
);
343 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev
*dev
,
344 struct timespec
*timestamp
,
346 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev
*dev
,
347 struct timespec
*timestamp
);
348 static int ixgbe_timesync_adjust_time(struct rte_eth_dev
*dev
, int64_t delta
);
349 static int ixgbe_timesync_read_time(struct rte_eth_dev
*dev
,
350 struct timespec
*timestamp
);
351 static int ixgbe_timesync_write_time(struct rte_eth_dev
*dev
,
352 const struct timespec
*timestamp
);
353 static void ixgbevf_dev_interrupt_handler(void *param
);
355 static int ixgbe_dev_l2_tunnel_eth_type_conf
356 (struct rte_eth_dev
*dev
, struct rte_eth_l2_tunnel_conf
*l2_tunnel
);
357 static int ixgbe_dev_l2_tunnel_offload_set
358 (struct rte_eth_dev
*dev
,
359 struct rte_eth_l2_tunnel_conf
*l2_tunnel
,
362 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev
*dev
,
363 enum rte_filter_op filter_op
,
366 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev
*dev
,
367 struct rte_eth_udp_tunnel
*udp_tunnel
);
368 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev
*dev
,
369 struct rte_eth_udp_tunnel
*udp_tunnel
);
370 static int ixgbe_filter_restore(struct rte_eth_dev
*dev
);
371 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev
*dev
);
374 * Define VF Stats MACRO for Non "cleared on read" register
376 #define UPDATE_VF_STAT(reg, last, cur) \
378 uint32_t latest = IXGBE_READ_REG(hw, reg); \
379 cur += (latest - last) & UINT_MAX; \
383 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \
385 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \
386 u64 new_msb = IXGBE_READ_REG(hw, msb); \
387 u64 latest = ((new_msb << 32) | new_lsb); \
388 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
392 #define IXGBE_SET_HWSTRIP(h, q) do {\
393 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
394 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
395 (h)->bitmap[idx] |= 1 << bit;\
398 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\
399 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
400 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
401 (h)->bitmap[idx] &= ~(1 << bit);\
404 #define IXGBE_GET_HWSTRIP(h, q, r) do {\
405 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
406 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
407 (r) = (h)->bitmap[idx] >> bit & 1;\
410 int ixgbe_logtype_init
;
411 int ixgbe_logtype_driver
;
414 * The set of PCI devices this driver supports
416 static const struct rte_pci_id pci_id_ixgbe_map
[] = {
417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598
) },
418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598_BX
) },
419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598AF_DUAL_PORT
) },
420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598AF_SINGLE_PORT
) },
421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598AT
) },
422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598AT2
) },
423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598EB_SFP_LOM
) },
424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598EB_CX4
) },
425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598_CX4_DUAL_PORT
) },
426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598_DA_DUAL_PORT
) },
427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM
) },
428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82598EB_XF_LR
) },
429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_KX4
) },
430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_KX4_MEZZ
) },
431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_KR
) },
432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_COMBO_BACKPLANE
) },
433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_CX4
) },
434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_SFP
) },
435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_BACKPLANE_FCOE
) },
436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_SFP_FCOE
) },
437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_SFP_EM
) },
438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_SFP_SF2
) },
439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_SFP_SF_QP
) },
440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_QSFP_SF_QP
) },
441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599EN_SFP
) },
442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_XAUI_LOM
) },
443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_T3_LOM
) },
444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X540T
) },
445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X540T1
) },
446 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_SFP
) },
447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_10G_T
) },
448 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_1G_T
) },
449 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550T
) },
450 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550T1
) },
451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_KR
) },
452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_KR_L
) },
453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_SFP_N
) },
454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_SGMII
) },
455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_SGMII_L
) },
456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_10G_T
) },
457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_QSFP
) },
458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_QSFP_N
) },
459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_SFP
) },
460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_1G_T
) },
461 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_1G_T_L
) },
462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_KX4
) },
463 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_KR
) },
464 #ifdef RTE_LIBRTE_IXGBE_BYPASS
465 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_BYPASS
) },
467 { .vendor_id
= 0, /* sentinel */ },
471 * The set of PCI devices this driver supports (for 82599 VF)
473 static const struct rte_pci_id pci_id_ixgbevf_map
[] = {
474 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_VF
) },
475 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_82599_VF_HV
) },
476 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X540_VF
) },
477 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X540_VF_HV
) },
478 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550_VF_HV
) },
479 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550_VF
) },
480 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_VF
) },
481 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_A_VF_HV
) },
482 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_VF
) },
483 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID
, IXGBE_DEV_ID_X550EM_X_VF_HV
) },
484 { .vendor_id
= 0, /* sentinel */ },
487 static const struct rte_eth_desc_lim rx_desc_lim
= {
488 .nb_max
= IXGBE_MAX_RING_DESC
,
489 .nb_min
= IXGBE_MIN_RING_DESC
,
490 .nb_align
= IXGBE_RXD_ALIGN
,
493 static const struct rte_eth_desc_lim tx_desc_lim
= {
494 .nb_max
= IXGBE_MAX_RING_DESC
,
495 .nb_min
= IXGBE_MIN_RING_DESC
,
496 .nb_align
= IXGBE_TXD_ALIGN
,
497 .nb_seg_max
= IXGBE_TX_MAX_SEG
,
498 .nb_mtu_seg_max
= IXGBE_TX_MAX_SEG
,
501 static const struct eth_dev_ops ixgbe_eth_dev_ops
= {
502 .dev_configure
= ixgbe_dev_configure
,
503 .dev_start
= ixgbe_dev_start
,
504 .dev_stop
= ixgbe_dev_stop
,
505 .dev_set_link_up
= ixgbe_dev_set_link_up
,
506 .dev_set_link_down
= ixgbe_dev_set_link_down
,
507 .dev_close
= ixgbe_dev_close
,
508 .dev_reset
= ixgbe_dev_reset
,
509 .promiscuous_enable
= ixgbe_dev_promiscuous_enable
,
510 .promiscuous_disable
= ixgbe_dev_promiscuous_disable
,
511 .allmulticast_enable
= ixgbe_dev_allmulticast_enable
,
512 .allmulticast_disable
= ixgbe_dev_allmulticast_disable
,
513 .link_update
= ixgbe_dev_link_update
,
514 .stats_get
= ixgbe_dev_stats_get
,
515 .xstats_get
= ixgbe_dev_xstats_get
,
516 .xstats_get_by_id
= ixgbe_dev_xstats_get_by_id
,
517 .stats_reset
= ixgbe_dev_stats_reset
,
518 .xstats_reset
= ixgbe_dev_xstats_reset
,
519 .xstats_get_names
= ixgbe_dev_xstats_get_names
,
520 .xstats_get_names_by_id
= ixgbe_dev_xstats_get_names_by_id
,
521 .queue_stats_mapping_set
= ixgbe_dev_queue_stats_mapping_set
,
522 .fw_version_get
= ixgbe_fw_version_get
,
523 .dev_infos_get
= ixgbe_dev_info_get
,
524 .dev_supported_ptypes_get
= ixgbe_dev_supported_ptypes_get
,
525 .mtu_set
= ixgbe_dev_mtu_set
,
526 .vlan_filter_set
= ixgbe_vlan_filter_set
,
527 .vlan_tpid_set
= ixgbe_vlan_tpid_set
,
528 .vlan_offload_set
= ixgbe_vlan_offload_set
,
529 .vlan_strip_queue_set
= ixgbe_vlan_strip_queue_set
,
530 .rx_queue_start
= ixgbe_dev_rx_queue_start
,
531 .rx_queue_stop
= ixgbe_dev_rx_queue_stop
,
532 .tx_queue_start
= ixgbe_dev_tx_queue_start
,
533 .tx_queue_stop
= ixgbe_dev_tx_queue_stop
,
534 .rx_queue_setup
= ixgbe_dev_rx_queue_setup
,
535 .rx_queue_intr_enable
= ixgbe_dev_rx_queue_intr_enable
,
536 .rx_queue_intr_disable
= ixgbe_dev_rx_queue_intr_disable
,
537 .rx_queue_release
= ixgbe_dev_rx_queue_release
,
538 .rx_queue_count
= ixgbe_dev_rx_queue_count
,
539 .rx_descriptor_done
= ixgbe_dev_rx_descriptor_done
,
540 .rx_descriptor_status
= ixgbe_dev_rx_descriptor_status
,
541 .tx_descriptor_status
= ixgbe_dev_tx_descriptor_status
,
542 .tx_queue_setup
= ixgbe_dev_tx_queue_setup
,
543 .tx_queue_release
= ixgbe_dev_tx_queue_release
,
544 .dev_led_on
= ixgbe_dev_led_on
,
545 .dev_led_off
= ixgbe_dev_led_off
,
546 .flow_ctrl_get
= ixgbe_flow_ctrl_get
,
547 .flow_ctrl_set
= ixgbe_flow_ctrl_set
,
548 .priority_flow_ctrl_set
= ixgbe_priority_flow_ctrl_set
,
549 .mac_addr_add
= ixgbe_add_rar
,
550 .mac_addr_remove
= ixgbe_remove_rar
,
551 .mac_addr_set
= ixgbe_set_default_mac_addr
,
552 .uc_hash_table_set
= ixgbe_uc_hash_table_set
,
553 .uc_all_hash_table_set
= ixgbe_uc_all_hash_table_set
,
554 .mirror_rule_set
= ixgbe_mirror_rule_set
,
555 .mirror_rule_reset
= ixgbe_mirror_rule_reset
,
556 .set_queue_rate_limit
= ixgbe_set_queue_rate_limit
,
557 .reta_update
= ixgbe_dev_rss_reta_update
,
558 .reta_query
= ixgbe_dev_rss_reta_query
,
559 .rss_hash_update
= ixgbe_dev_rss_hash_update
,
560 .rss_hash_conf_get
= ixgbe_dev_rss_hash_conf_get
,
561 .filter_ctrl
= ixgbe_dev_filter_ctrl
,
562 .set_mc_addr_list
= ixgbe_dev_set_mc_addr_list
,
563 .rxq_info_get
= ixgbe_rxq_info_get
,
564 .txq_info_get
= ixgbe_txq_info_get
,
565 .timesync_enable
= ixgbe_timesync_enable
,
566 .timesync_disable
= ixgbe_timesync_disable
,
567 .timesync_read_rx_timestamp
= ixgbe_timesync_read_rx_timestamp
,
568 .timesync_read_tx_timestamp
= ixgbe_timesync_read_tx_timestamp
,
569 .get_reg
= ixgbe_get_regs
,
570 .get_eeprom_length
= ixgbe_get_eeprom_length
,
571 .get_eeprom
= ixgbe_get_eeprom
,
572 .set_eeprom
= ixgbe_set_eeprom
,
573 .get_module_info
= ixgbe_get_module_info
,
574 .get_module_eeprom
= ixgbe_get_module_eeprom
,
575 .get_dcb_info
= ixgbe_dev_get_dcb_info
,
576 .timesync_adjust_time
= ixgbe_timesync_adjust_time
,
577 .timesync_read_time
= ixgbe_timesync_read_time
,
578 .timesync_write_time
= ixgbe_timesync_write_time
,
579 .l2_tunnel_eth_type_conf
= ixgbe_dev_l2_tunnel_eth_type_conf
,
580 .l2_tunnel_offload_set
= ixgbe_dev_l2_tunnel_offload_set
,
581 .udp_tunnel_port_add
= ixgbe_dev_udp_tunnel_port_add
,
582 .udp_tunnel_port_del
= ixgbe_dev_udp_tunnel_port_del
,
583 .tm_ops_get
= ixgbe_tm_ops_get
,
587 * dev_ops for virtual function, bare necessities for basic vf
588 * operation have been implemented
590 static const struct eth_dev_ops ixgbevf_eth_dev_ops
= {
591 .dev_configure
= ixgbevf_dev_configure
,
592 .dev_start
= ixgbevf_dev_start
,
593 .dev_stop
= ixgbevf_dev_stop
,
594 .link_update
= ixgbevf_dev_link_update
,
595 .stats_get
= ixgbevf_dev_stats_get
,
596 .xstats_get
= ixgbevf_dev_xstats_get
,
597 .stats_reset
= ixgbevf_dev_stats_reset
,
598 .xstats_reset
= ixgbevf_dev_stats_reset
,
599 .xstats_get_names
= ixgbevf_dev_xstats_get_names
,
600 .dev_close
= ixgbevf_dev_close
,
601 .dev_reset
= ixgbevf_dev_reset
,
602 .promiscuous_enable
= ixgbevf_dev_promiscuous_enable
,
603 .promiscuous_disable
= ixgbevf_dev_promiscuous_disable
,
604 .allmulticast_enable
= ixgbevf_dev_allmulticast_enable
,
605 .allmulticast_disable
= ixgbevf_dev_allmulticast_disable
,
606 .dev_infos_get
= ixgbevf_dev_info_get
,
607 .dev_supported_ptypes_get
= ixgbe_dev_supported_ptypes_get
,
608 .mtu_set
= ixgbevf_dev_set_mtu
,
609 .vlan_filter_set
= ixgbevf_vlan_filter_set
,
610 .vlan_strip_queue_set
= ixgbevf_vlan_strip_queue_set
,
611 .vlan_offload_set
= ixgbevf_vlan_offload_set
,
612 .rx_queue_setup
= ixgbe_dev_rx_queue_setup
,
613 .rx_queue_release
= ixgbe_dev_rx_queue_release
,
614 .rx_descriptor_done
= ixgbe_dev_rx_descriptor_done
,
615 .rx_descriptor_status
= ixgbe_dev_rx_descriptor_status
,
616 .tx_descriptor_status
= ixgbe_dev_tx_descriptor_status
,
617 .tx_queue_setup
= ixgbe_dev_tx_queue_setup
,
618 .tx_queue_release
= ixgbe_dev_tx_queue_release
,
619 .rx_queue_intr_enable
= ixgbevf_dev_rx_queue_intr_enable
,
620 .rx_queue_intr_disable
= ixgbevf_dev_rx_queue_intr_disable
,
621 .mac_addr_add
= ixgbevf_add_mac_addr
,
622 .mac_addr_remove
= ixgbevf_remove_mac_addr
,
623 .set_mc_addr_list
= ixgbe_dev_set_mc_addr_list
,
624 .rxq_info_get
= ixgbe_rxq_info_get
,
625 .txq_info_get
= ixgbe_txq_info_get
,
626 .mac_addr_set
= ixgbevf_set_default_mac_addr
,
627 .get_reg
= ixgbevf_get_regs
,
628 .reta_update
= ixgbe_dev_rss_reta_update
,
629 .reta_query
= ixgbe_dev_rss_reta_query
,
630 .rss_hash_update
= ixgbe_dev_rss_hash_update
,
631 .rss_hash_conf_get
= ixgbe_dev_rss_hash_conf_get
,
634 /* store statistics names and its offset in stats structure */
635 struct rte_ixgbe_xstats_name_off
{
636 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
640 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings
[] = {
641 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats
, crcerrs
)},
642 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats
, illerrc
)},
643 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats
, errbc
)},
644 {"mac_local_errors", offsetof(struct ixgbe_hw_stats
, mlfc
)},
645 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats
, mrfc
)},
646 {"rx_length_errors", offsetof(struct ixgbe_hw_stats
, rlec
)},
647 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats
, lxontxc
)},
648 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats
, lxonrxc
)},
649 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats
, lxofftxc
)},
650 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats
, lxoffrxc
)},
651 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats
, prc64
)},
652 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats
, prc127
)},
653 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats
, prc255
)},
654 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats
, prc511
)},
655 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats
,
657 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats
,
659 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats
, bprc
)},
660 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats
, mprc
)},
661 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats
, rfc
)},
662 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats
, ruc
)},
663 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats
, roc
)},
664 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats
, rjc
)},
665 {"rx_management_packets", offsetof(struct ixgbe_hw_stats
, mngprc
)},
666 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats
, mngpdc
)},
667 {"tx_management_packets", offsetof(struct ixgbe_hw_stats
, mngptc
)},
668 {"rx_total_packets", offsetof(struct ixgbe_hw_stats
, tpr
)},
669 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats
, tor
)},
670 {"tx_total_packets", offsetof(struct ixgbe_hw_stats
, tpt
)},
671 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats
, ptc64
)},
672 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats
, ptc127
)},
673 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats
, ptc255
)},
674 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats
, ptc511
)},
675 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats
,
677 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats
,
679 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats
, mptc
)},
680 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats
, bptc
)},
681 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats
, mspdc
)},
682 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats
, xec
)},
684 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats
,
686 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats
,
688 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats
,
690 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats
,
692 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats
,
694 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats
,
697 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats
, fccrc
)},
698 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats
, fcoerpdc
)},
699 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats
,
701 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats
, fcoeprc
)},
702 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats
, fcoeptc
)},
703 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats
, fcoedwrc
)},
704 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats
, fcoedwtc
)},
705 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats
,
707 {"rx_fcoe_no_direct_data_placement_ext_buff",
708 offsetof(struct ixgbe_hw_stats
, fcoe_noddp_ext_buff
)},
710 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats
,
712 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats
,
714 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats
,
716 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats
,
718 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats
, mpctotal
)},
721 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
722 sizeof(rte_ixgbe_stats_strings[0]))
724 /* MACsec statistics */
725 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings
[] = {
726 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats
,
728 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats
,
729 out_pkts_encrypted
)},
730 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats
,
731 out_pkts_protected
)},
732 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats
,
733 out_octets_encrypted
)},
734 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats
,
735 out_octets_protected
)},
736 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats
,
738 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats
,
740 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats
,
742 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats
,
743 in_pkts_unknownsci
)},
744 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats
,
745 in_octets_decrypted
)},
746 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats
,
747 in_octets_validated
)},
748 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats
,
750 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats
,
752 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats
,
754 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats
,
756 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats
,
758 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats
,
760 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats
,
762 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats
,
763 in_pkts_notusingsa
)},
766 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
767 sizeof(rte_ixgbe_macsec_strings[0]))
769 /* Per-queue statistics */
770 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings
[] = {
771 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats
, rnbc
)},
772 {"dropped", offsetof(struct ixgbe_hw_stats
, mpc
)},
773 {"xon_packets", offsetof(struct ixgbe_hw_stats
, pxonrxc
)},
774 {"xoff_packets", offsetof(struct ixgbe_hw_stats
, pxoffrxc
)},
777 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
778 sizeof(rte_ixgbe_rxq_strings[0]))
779 #define IXGBE_NB_RXQ_PRIO_VALUES 8
781 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings
[] = {
782 {"xon_packets", offsetof(struct ixgbe_hw_stats
, pxontxc
)},
783 {"xoff_packets", offsetof(struct ixgbe_hw_stats
, pxofftxc
)},
784 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats
,
788 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
789 sizeof(rte_ixgbe_txq_strings[0]))
790 #define IXGBE_NB_TXQ_PRIO_VALUES 8
792 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings
[] = {
793 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats
, vfmprc
)},
796 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
797 sizeof(rte_ixgbevf_stats_strings[0]))
800 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
803 ixgbe_is_sfp(struct ixgbe_hw
*hw
)
805 switch (hw
->phy
.type
) {
806 case ixgbe_phy_sfp_avago
:
807 case ixgbe_phy_sfp_ftl
:
808 case ixgbe_phy_sfp_intel
:
809 case ixgbe_phy_sfp_unknown
:
810 case ixgbe_phy_sfp_passive_tyco
:
811 case ixgbe_phy_sfp_passive_unknown
:
818 static inline int32_t
819 ixgbe_pf_reset_hw(struct ixgbe_hw
*hw
)
824 status
= ixgbe_reset_hw(hw
);
826 ctrl_ext
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
827 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
828 ctrl_ext
|= IXGBE_CTRL_EXT_PFRSTD
;
829 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl_ext
);
830 IXGBE_WRITE_FLUSH(hw
);
832 if (status
== IXGBE_ERR_SFP_NOT_PRESENT
)
833 status
= IXGBE_SUCCESS
;
838 ixgbe_enable_intr(struct rte_eth_dev
*dev
)
840 struct ixgbe_interrupt
*intr
=
841 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
842 struct ixgbe_hw
*hw
=
843 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
845 IXGBE_WRITE_REG(hw
, IXGBE_EIMS
, intr
->mask
);
846 IXGBE_WRITE_FLUSH(hw
);
850 * This function is based on ixgbe_disable_intr() in base/ixgbe.h.
853 ixgbe_disable_intr(struct ixgbe_hw
*hw
)
855 PMD_INIT_FUNC_TRACE();
857 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
858 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, ~0);
860 IXGBE_WRITE_REG(hw
, IXGBE_EIMC
, 0xFFFF0000);
861 IXGBE_WRITE_REG(hw
, IXGBE_EIMC_EX(0), ~0);
862 IXGBE_WRITE_REG(hw
, IXGBE_EIMC_EX(1), ~0);
864 IXGBE_WRITE_FLUSH(hw
);
868 * This function resets queue statistics mapping registers.
869 * From Niantic datasheet, Initialization of Statistics section:
870 * "...if software requires the queue counters, the RQSMR and TQSM registers
871 * must be re-programmed following a device reset.
874 ixgbe_reset_qstat_mappings(struct ixgbe_hw
*hw
)
878 for (i
= 0; i
!= IXGBE_NB_STAT_MAPPING_REGS
; i
++) {
879 IXGBE_WRITE_REG(hw
, IXGBE_RQSMR(i
), 0);
880 IXGBE_WRITE_REG(hw
, IXGBE_TQSM(i
), 0);
886 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev
*eth_dev
,
891 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
892 #define NB_QMAP_FIELDS_PER_QSM_REG 4
893 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
895 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
896 struct ixgbe_stat_mapping_registers
*stat_mappings
=
897 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev
->data
->dev_private
);
898 uint32_t qsmr_mask
= 0;
899 uint32_t clearing_mask
= QMAP_FIELD_RESERVED_BITS_MASK
;
903 if ((hw
->mac
.type
!= ixgbe_mac_82599EB
) &&
904 (hw
->mac
.type
!= ixgbe_mac_X540
) &&
905 (hw
->mac
.type
!= ixgbe_mac_X550
) &&
906 (hw
->mac
.type
!= ixgbe_mac_X550EM_x
) &&
907 (hw
->mac
.type
!= ixgbe_mac_X550EM_a
))
910 PMD_INIT_LOG(DEBUG
, "Setting port %d, %s queue_id %d to stat index %d",
911 (int)(eth_dev
->data
->port_id
), is_rx
? "RX" : "TX",
914 n
= (uint8_t)(queue_id
/ NB_QMAP_FIELDS_PER_QSM_REG
);
915 if (n
>= IXGBE_NB_STAT_MAPPING_REGS
) {
916 PMD_INIT_LOG(ERR
, "Nb of stat mapping registers exceeded");
919 offset
= (uint8_t)(queue_id
% NB_QMAP_FIELDS_PER_QSM_REG
);
921 /* Now clear any previous stat_idx set */
922 clearing_mask
<<= (QSM_REG_NB_BITS_PER_QMAP_FIELD
* offset
);
924 stat_mappings
->tqsm
[n
] &= ~clearing_mask
;
926 stat_mappings
->rqsmr
[n
] &= ~clearing_mask
;
928 q_map
= (uint32_t)stat_idx
;
929 q_map
&= QMAP_FIELD_RESERVED_BITS_MASK
;
930 qsmr_mask
= q_map
<< (QSM_REG_NB_BITS_PER_QMAP_FIELD
* offset
);
932 stat_mappings
->tqsm
[n
] |= qsmr_mask
;
934 stat_mappings
->rqsmr
[n
] |= qsmr_mask
;
936 PMD_INIT_LOG(DEBUG
, "Set port %d, %s queue_id %d to stat index %d",
937 (int)(eth_dev
->data
->port_id
), is_rx
? "RX" : "TX",
939 PMD_INIT_LOG(DEBUG
, "%s[%d] = 0x%08x", is_rx
? "RQSMR" : "TQSM", n
,
940 is_rx
? stat_mappings
->rqsmr
[n
] : stat_mappings
->tqsm
[n
]);
942 /* Now write the mapping in the appropriate register */
944 PMD_INIT_LOG(DEBUG
, "Write 0x%x to RX IXGBE stat mapping reg:%d",
945 stat_mappings
->rqsmr
[n
], n
);
946 IXGBE_WRITE_REG(hw
, IXGBE_RQSMR(n
), stat_mappings
->rqsmr
[n
]);
948 PMD_INIT_LOG(DEBUG
, "Write 0x%x to TX IXGBE stat mapping reg:%d",
949 stat_mappings
->tqsm
[n
], n
);
950 IXGBE_WRITE_REG(hw
, IXGBE_TQSM(n
), stat_mappings
->tqsm
[n
]);
956 ixgbe_restore_statistics_mapping(struct rte_eth_dev
*dev
)
958 struct ixgbe_stat_mapping_registers
*stat_mappings
=
959 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev
->data
->dev_private
);
960 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
963 /* write whatever was in stat mapping table to the NIC */
964 for (i
= 0; i
< IXGBE_NB_STAT_MAPPING_REGS
; i
++) {
966 IXGBE_WRITE_REG(hw
, IXGBE_RQSMR(i
), stat_mappings
->rqsmr
[i
]);
969 IXGBE_WRITE_REG(hw
, IXGBE_TQSM(i
), stat_mappings
->tqsm
[i
]);
974 ixgbe_dcb_init(struct ixgbe_hw
*hw
, struct ixgbe_dcb_config
*dcb_config
)
977 struct ixgbe_dcb_tc_config
*tc
;
978 uint8_t dcb_max_tc
= IXGBE_DCB_MAX_TRAFFIC_CLASS
;
980 dcb_config
->num_tcs
.pg_tcs
= dcb_max_tc
;
981 dcb_config
->num_tcs
.pfc_tcs
= dcb_max_tc
;
982 for (i
= 0; i
< dcb_max_tc
; i
++) {
983 tc
= &dcb_config
->tc_config
[i
];
984 tc
->path
[IXGBE_DCB_TX_CONFIG
].bwg_id
= i
;
985 tc
->path
[IXGBE_DCB_TX_CONFIG
].bwg_percent
=
986 (uint8_t)(100/dcb_max_tc
+ (i
& 1));
987 tc
->path
[IXGBE_DCB_RX_CONFIG
].bwg_id
= i
;
988 tc
->path
[IXGBE_DCB_RX_CONFIG
].bwg_percent
=
989 (uint8_t)(100/dcb_max_tc
+ (i
& 1));
990 tc
->pfc
= ixgbe_dcb_pfc_disabled
;
993 /* Initialize default user to priority mapping, UPx->TC0 */
994 tc
= &dcb_config
->tc_config
[0];
995 tc
->path
[IXGBE_DCB_TX_CONFIG
].up_to_tc_bitmap
= 0xFF;
996 tc
->path
[IXGBE_DCB_RX_CONFIG
].up_to_tc_bitmap
= 0xFF;
997 for (i
= 0; i
< IXGBE_DCB_MAX_BW_GROUP
; i
++) {
998 dcb_config
->bw_percentage
[IXGBE_DCB_TX_CONFIG
][i
] = 100;
999 dcb_config
->bw_percentage
[IXGBE_DCB_RX_CONFIG
][i
] = 100;
1001 dcb_config
->rx_pba_cfg
= ixgbe_dcb_pba_equal
;
1002 dcb_config
->pfc_mode_enable
= false;
1003 dcb_config
->vt_mode
= true;
1004 dcb_config
->round_robin_enable
= false;
1005 /* support all DCB capabilities in 82599 */
1006 dcb_config
->support
.capabilities
= 0xFF;
1008 /*we only support 4 Tcs for X540, X550 */
1009 if (hw
->mac
.type
== ixgbe_mac_X540
||
1010 hw
->mac
.type
== ixgbe_mac_X550
||
1011 hw
->mac
.type
== ixgbe_mac_X550EM_x
||
1012 hw
->mac
.type
== ixgbe_mac_X550EM_a
) {
1013 dcb_config
->num_tcs
.pg_tcs
= 4;
1014 dcb_config
->num_tcs
.pfc_tcs
= 4;
1019 * Ensure that all locks are released before first NVM or PHY access
1022 ixgbe_swfw_lock_reset(struct ixgbe_hw
*hw
)
1027 * Phy lock should not fail in this early stage. If this is the case,
1028 * it is due to an improper exit of the application.
1029 * So force the release of the faulty lock. Release of common lock
1030 * is done automatically by swfw_sync function.
1032 mask
= IXGBE_GSSR_PHY0_SM
<< hw
->bus
.func
;
1033 if (ixgbe_acquire_swfw_semaphore(hw
, mask
) < 0) {
1034 PMD_DRV_LOG(DEBUG
, "SWFW phy%d lock released", hw
->bus
.func
);
1036 ixgbe_release_swfw_semaphore(hw
, mask
);
1039 * These ones are more tricky since they are common to all ports; but
1040 * swfw_sync retries last long enough (1s) to be almost sure that if
1041 * lock can not be taken it is due to an improper lock of the
1044 mask
= IXGBE_GSSR_EEP_SM
| IXGBE_GSSR_MAC_CSR_SM
| IXGBE_GSSR_SW_MNG_SM
;
1045 if (ixgbe_acquire_swfw_semaphore(hw
, mask
) < 0) {
1046 PMD_DRV_LOG(DEBUG
, "SWFW common locks released");
1048 ixgbe_release_swfw_semaphore(hw
, mask
);
1052 * This function is based on code in ixgbe_attach() in base/ixgbe.c.
1053 * It returns 0 on success.
1056 eth_ixgbe_dev_init(struct rte_eth_dev
*eth_dev
, void *init_params __rte_unused
)
1058 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
1059 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1060 struct ixgbe_hw
*hw
=
1061 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1062 struct ixgbe_vfta
*shadow_vfta
=
1063 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev
->data
->dev_private
);
1064 struct ixgbe_hwstrip
*hwstrip
=
1065 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev
->data
->dev_private
);
1066 struct ixgbe_dcb_config
*dcb_config
=
1067 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev
->data
->dev_private
);
1068 struct ixgbe_filter_info
*filter_info
=
1069 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
1070 struct ixgbe_bw_conf
*bw_conf
=
1071 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev
->data
->dev_private
);
1076 PMD_INIT_FUNC_TRACE();
1078 eth_dev
->dev_ops
= &ixgbe_eth_dev_ops
;
1079 eth_dev
->rx_pkt_burst
= &ixgbe_recv_pkts
;
1080 eth_dev
->tx_pkt_burst
= &ixgbe_xmit_pkts
;
1081 eth_dev
->tx_pkt_prepare
= &ixgbe_prep_pkts
;
1084 * For secondary processes, we don't initialise any further as primary
1085 * has already done this work. Only check we don't need a different
1086 * RX and TX function.
1088 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
1089 struct ixgbe_tx_queue
*txq
;
1090 /* TX queue function in primary, set by last queue initialized
1091 * Tx queue may not initialized by primary process
1093 if (eth_dev
->data
->tx_queues
) {
1094 txq
= eth_dev
->data
->tx_queues
[eth_dev
->data
->nb_tx_queues
-1];
1095 ixgbe_set_tx_function(eth_dev
, txq
);
1097 /* Use default TX function if we get here */
1098 PMD_INIT_LOG(NOTICE
, "No TX queues configured yet. "
1099 "Using default TX function.");
1102 ixgbe_set_rx_function(eth_dev
);
1107 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
1109 /* Vendor and Device ID need to be set before init of shared code */
1110 hw
->device_id
= pci_dev
->id
.device_id
;
1111 hw
->vendor_id
= pci_dev
->id
.vendor_id
;
1112 hw
->hw_addr
= (void *)pci_dev
->mem_resource
[0].addr
;
1113 hw
->allow_unsupported_sfp
= 1;
1115 /* Initialize the shared code (base driver) */
1116 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1117 diag
= ixgbe_bypass_init_shared_code(hw
);
1119 diag
= ixgbe_init_shared_code(hw
);
1120 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1122 if (diag
!= IXGBE_SUCCESS
) {
1123 PMD_INIT_LOG(ERR
, "Shared code init failed: %d", diag
);
1127 if (hw
->mac
.ops
.fw_recovery_mode
&& hw
->mac
.ops
.fw_recovery_mode(hw
)) {
1128 PMD_INIT_LOG(ERR
, "\nERROR: "
1129 "Firmware recovery mode detected. Limiting functionality.\n"
1130 "Refer to the Intel(R) Ethernet Adapters and Devices "
1131 "User Guide for details on firmware recovery mode.");
1135 /* pick up the PCI bus settings for reporting later */
1136 ixgbe_get_bus_info(hw
);
1138 /* Unlock any pending hardware semaphore */
1139 ixgbe_swfw_lock_reset(hw
);
1141 #ifdef RTE_LIBRTE_SECURITY
1142 /* Initialize security_ctx only for primary process*/
1143 if (ixgbe_ipsec_ctx_create(eth_dev
))
1147 /* Initialize DCB configuration*/
1148 memset(dcb_config
, 0, sizeof(struct ixgbe_dcb_config
));
1149 ixgbe_dcb_init(hw
, dcb_config
);
1150 /* Get Hardware Flow Control setting */
1151 hw
->fc
.requested_mode
= ixgbe_fc_full
;
1152 hw
->fc
.current_mode
= ixgbe_fc_full
;
1153 hw
->fc
.pause_time
= IXGBE_FC_PAUSE
;
1154 for (i
= 0; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
1155 hw
->fc
.low_water
[i
] = IXGBE_FC_LO
;
1156 hw
->fc
.high_water
[i
] = IXGBE_FC_HI
;
1158 hw
->fc
.send_xon
= 1;
1160 /* Make sure we have a good EEPROM before we read from it */
1161 diag
= ixgbe_validate_eeprom_checksum(hw
, &csum
);
1162 if (diag
!= IXGBE_SUCCESS
) {
1163 PMD_INIT_LOG(ERR
, "The EEPROM checksum is not valid: %d", diag
);
1167 #ifdef RTE_LIBRTE_IXGBE_BYPASS
1168 diag
= ixgbe_bypass_init_hw(hw
);
1170 diag
= ixgbe_init_hw(hw
);
1171 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
1174 * Devices with copper phys will fail to initialise if ixgbe_init_hw()
1175 * is called too soon after the kernel driver unbinding/binding occurs.
1176 * The failure occurs in ixgbe_identify_phy_generic() for all devices,
1177 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
1178 * also called. See ixgbe_identify_phy_82599(). The reason for the
1179 * failure is not known, and only occuts when virtualisation features
1180 * are disabled in the bios. A delay of 100ms was found to be enough by
1181 * trial-and-error, and is doubled to be safe.
1183 if (diag
&& (hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_copper
)) {
1185 diag
= ixgbe_init_hw(hw
);
1188 if (diag
== IXGBE_ERR_SFP_NOT_PRESENT
)
1189 diag
= IXGBE_SUCCESS
;
1191 if (diag
== IXGBE_ERR_EEPROM_VERSION
) {
1192 PMD_INIT_LOG(ERR
, "This device is a pre-production adapter/"
1193 "LOM. Please be aware there may be issues associated "
1194 "with your hardware.");
1195 PMD_INIT_LOG(ERR
, "If you are experiencing problems "
1196 "please contact your Intel or hardware representative "
1197 "who provided you with this hardware.");
1198 } else if (diag
== IXGBE_ERR_SFP_NOT_SUPPORTED
)
1199 PMD_INIT_LOG(ERR
, "Unsupported SFP+ Module");
1201 PMD_INIT_LOG(ERR
, "Hardware Initialization Failure: %d", diag
);
1205 /* Reset the hw statistics */
1206 ixgbe_dev_stats_reset(eth_dev
);
1208 /* disable interrupt */
1209 ixgbe_disable_intr(hw
);
1211 /* reset mappings for queue statistics hw counters*/
1212 ixgbe_reset_qstat_mappings(hw
);
1214 /* Allocate memory for storing MAC addresses */
1215 eth_dev
->data
->mac_addrs
= rte_zmalloc("ixgbe", ETHER_ADDR_LEN
*
1216 hw
->mac
.num_rar_entries
, 0);
1217 if (eth_dev
->data
->mac_addrs
== NULL
) {
1219 "Failed to allocate %u bytes needed to store "
1221 ETHER_ADDR_LEN
* hw
->mac
.num_rar_entries
);
1224 /* Copy the permanent MAC address */
1225 ether_addr_copy((struct ether_addr
*) hw
->mac
.perm_addr
,
1226 ð_dev
->data
->mac_addrs
[0]);
1228 /* Allocate memory for storing hash filter MAC addresses */
1229 eth_dev
->data
->hash_mac_addrs
= rte_zmalloc("ixgbe", ETHER_ADDR_LEN
*
1230 IXGBE_VMDQ_NUM_UC_MAC
, 0);
1231 if (eth_dev
->data
->hash_mac_addrs
== NULL
) {
1233 "Failed to allocate %d bytes needed to store MAC addresses",
1234 ETHER_ADDR_LEN
* IXGBE_VMDQ_NUM_UC_MAC
);
1238 /* initialize the vfta */
1239 memset(shadow_vfta
, 0, sizeof(*shadow_vfta
));
1241 /* initialize the hw strip bitmap*/
1242 memset(hwstrip
, 0, sizeof(*hwstrip
));
1244 /* initialize PF if max_vfs not zero */
1245 ixgbe_pf_host_init(eth_dev
);
1247 ctrl_ext
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
1248 /* let hardware know driver is loaded */
1249 ctrl_ext
|= IXGBE_CTRL_EXT_DRV_LOAD
;
1250 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1251 ctrl_ext
|= IXGBE_CTRL_EXT_PFRSTD
;
1252 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl_ext
);
1253 IXGBE_WRITE_FLUSH(hw
);
1255 if (ixgbe_is_sfp(hw
) && hw
->phy
.sfp_type
!= ixgbe_sfp_type_not_present
)
1256 PMD_INIT_LOG(DEBUG
, "MAC: %d, PHY: %d, SFP+: %d",
1257 (int) hw
->mac
.type
, (int) hw
->phy
.type
,
1258 (int) hw
->phy
.sfp_type
);
1260 PMD_INIT_LOG(DEBUG
, "MAC: %d, PHY: %d",
1261 (int) hw
->mac
.type
, (int) hw
->phy
.type
);
1263 PMD_INIT_LOG(DEBUG
, "port %d vendorID=0x%x deviceID=0x%x",
1264 eth_dev
->data
->port_id
, pci_dev
->id
.vendor_id
,
1265 pci_dev
->id
.device_id
);
1267 rte_intr_callback_register(intr_handle
,
1268 ixgbe_dev_interrupt_handler
, eth_dev
);
1270 /* enable uio/vfio intr/eventfd mapping */
1271 rte_intr_enable(intr_handle
);
1273 /* enable support intr */
1274 ixgbe_enable_intr(eth_dev
);
1276 /* initialize filter info */
1277 memset(filter_info
, 0,
1278 sizeof(struct ixgbe_filter_info
));
1280 /* initialize 5tuple filter list */
1281 TAILQ_INIT(&filter_info
->fivetuple_list
);
1283 /* initialize flow director filter list & hash */
1284 ixgbe_fdir_filter_init(eth_dev
);
1286 /* initialize l2 tunnel filter list & hash */
1287 ixgbe_l2_tn_filter_init(eth_dev
);
1289 /* initialize flow filter lists */
1290 ixgbe_filterlist_init();
1292 /* initialize bandwidth configuration info */
1293 memset(bw_conf
, 0, sizeof(struct ixgbe_bw_conf
));
1295 /* initialize Traffic Manager configuration */
1296 ixgbe_tm_conf_init(eth_dev
);
1302 eth_ixgbe_dev_uninit(struct rte_eth_dev
*eth_dev
)
1304 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
1305 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1306 struct ixgbe_hw
*hw
;
1310 PMD_INIT_FUNC_TRACE();
1312 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1315 hw
= IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1317 if (hw
->adapter_stopped
== 0)
1318 ixgbe_dev_close(eth_dev
);
1320 eth_dev
->dev_ops
= NULL
;
1321 eth_dev
->rx_pkt_burst
= NULL
;
1322 eth_dev
->tx_pkt_burst
= NULL
;
1324 /* Unlock any pending hardware semaphore */
1325 ixgbe_swfw_lock_reset(hw
);
1327 /* disable uio intr before callback unregister */
1328 rte_intr_disable(intr_handle
);
1331 ret
= rte_intr_callback_unregister(intr_handle
,
1332 ixgbe_dev_interrupt_handler
, eth_dev
);
1335 } else if (ret
!= -EAGAIN
) {
1337 "intr callback unregister failed: %d",
1342 } while (retries
++ < (10 + IXGBE_LINK_UP_TIME
));
1344 /* cancel the delay handler before remove dev */
1345 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler
, eth_dev
);
1347 /* uninitialize PF if max_vfs not zero */
1348 ixgbe_pf_host_uninit(eth_dev
);
1350 /* remove all the fdir filters & hash */
1351 ixgbe_fdir_filter_uninit(eth_dev
);
1353 /* remove all the L2 tunnel filters & hash */
1354 ixgbe_l2_tn_filter_uninit(eth_dev
);
1356 /* Remove all ntuple filters of the device */
1357 ixgbe_ntuple_filter_uninit(eth_dev
);
1359 /* clear all the filters list */
1360 ixgbe_filterlist_flush();
1362 /* Remove all Traffic Manager configuration */
1363 ixgbe_tm_conf_uninit(eth_dev
);
1365 #ifdef RTE_LIBRTE_SECURITY
1366 rte_free(eth_dev
->security_ctx
);
1372 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev
*eth_dev
)
1374 struct ixgbe_filter_info
*filter_info
=
1375 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
1376 struct ixgbe_5tuple_filter
*p_5tuple
;
1378 while ((p_5tuple
= TAILQ_FIRST(&filter_info
->fivetuple_list
))) {
1379 TAILQ_REMOVE(&filter_info
->fivetuple_list
,
1384 memset(filter_info
->fivetuple_mask
, 0,
1385 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE
);
1390 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev
*eth_dev
)
1392 struct ixgbe_hw_fdir_info
*fdir_info
=
1393 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev
->data
->dev_private
);
1394 struct ixgbe_fdir_filter
*fdir_filter
;
1396 if (fdir_info
->hash_map
)
1397 rte_free(fdir_info
->hash_map
);
1398 if (fdir_info
->hash_handle
)
1399 rte_hash_free(fdir_info
->hash_handle
);
1401 while ((fdir_filter
= TAILQ_FIRST(&fdir_info
->fdir_list
))) {
1402 TAILQ_REMOVE(&fdir_info
->fdir_list
,
1405 rte_free(fdir_filter
);
1411 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev
*eth_dev
)
1413 struct ixgbe_l2_tn_info
*l2_tn_info
=
1414 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev
->data
->dev_private
);
1415 struct ixgbe_l2_tn_filter
*l2_tn_filter
;
1417 if (l2_tn_info
->hash_map
)
1418 rte_free(l2_tn_info
->hash_map
);
1419 if (l2_tn_info
->hash_handle
)
1420 rte_hash_free(l2_tn_info
->hash_handle
);
1422 while ((l2_tn_filter
= TAILQ_FIRST(&l2_tn_info
->l2_tn_list
))) {
1423 TAILQ_REMOVE(&l2_tn_info
->l2_tn_list
,
1426 rte_free(l2_tn_filter
);
1432 static int ixgbe_fdir_filter_init(struct rte_eth_dev
*eth_dev
)
1434 struct ixgbe_hw_fdir_info
*fdir_info
=
1435 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev
->data
->dev_private
);
1436 char fdir_hash_name
[RTE_HASH_NAMESIZE
];
1437 struct rte_hash_parameters fdir_hash_params
= {
1438 .name
= fdir_hash_name
,
1439 .entries
= IXGBE_MAX_FDIR_FILTER_NUM
,
1440 .key_len
= sizeof(union ixgbe_atr_input
),
1441 .hash_func
= rte_hash_crc
,
1442 .hash_func_init_val
= 0,
1443 .socket_id
= rte_socket_id(),
1446 TAILQ_INIT(&fdir_info
->fdir_list
);
1447 snprintf(fdir_hash_name
, RTE_HASH_NAMESIZE
,
1448 "fdir_%s", eth_dev
->device
->name
);
1449 fdir_info
->hash_handle
= rte_hash_create(&fdir_hash_params
);
1450 if (!fdir_info
->hash_handle
) {
1451 PMD_INIT_LOG(ERR
, "Failed to create fdir hash table!");
1454 fdir_info
->hash_map
= rte_zmalloc("ixgbe",
1455 sizeof(struct ixgbe_fdir_filter
*) *
1456 IXGBE_MAX_FDIR_FILTER_NUM
,
1458 if (!fdir_info
->hash_map
) {
1460 "Failed to allocate memory for fdir hash map!");
1463 fdir_info
->mask_added
= FALSE
;
1468 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev
*eth_dev
)
1470 struct ixgbe_l2_tn_info
*l2_tn_info
=
1471 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev
->data
->dev_private
);
1472 char l2_tn_hash_name
[RTE_HASH_NAMESIZE
];
1473 struct rte_hash_parameters l2_tn_hash_params
= {
1474 .name
= l2_tn_hash_name
,
1475 .entries
= IXGBE_MAX_L2_TN_FILTER_NUM
,
1476 .key_len
= sizeof(struct ixgbe_l2_tn_key
),
1477 .hash_func
= rte_hash_crc
,
1478 .hash_func_init_val
= 0,
1479 .socket_id
= rte_socket_id(),
1482 TAILQ_INIT(&l2_tn_info
->l2_tn_list
);
1483 snprintf(l2_tn_hash_name
, RTE_HASH_NAMESIZE
,
1484 "l2_tn_%s", eth_dev
->device
->name
);
1485 l2_tn_info
->hash_handle
= rte_hash_create(&l2_tn_hash_params
);
1486 if (!l2_tn_info
->hash_handle
) {
1487 PMD_INIT_LOG(ERR
, "Failed to create L2 TN hash table!");
1490 l2_tn_info
->hash_map
= rte_zmalloc("ixgbe",
1491 sizeof(struct ixgbe_l2_tn_filter
*) *
1492 IXGBE_MAX_L2_TN_FILTER_NUM
,
1494 if (!l2_tn_info
->hash_map
) {
1496 "Failed to allocate memory for L2 TN hash map!");
1499 l2_tn_info
->e_tag_en
= FALSE
;
1500 l2_tn_info
->e_tag_fwd_en
= FALSE
;
1501 l2_tn_info
->e_tag_ether_type
= ETHER_TYPE_ETAG
;
1506 * Negotiate mailbox API version with the PF.
1507 * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
1508 * Then we try to negotiate starting with the most recent one.
1509 * If all negotiation attempts fail, then we will proceed with
1510 * the default one (ixgbe_mbox_api_10).
1513 ixgbevf_negotiate_api(struct ixgbe_hw
*hw
)
1517 /* start with highest supported, proceed down */
1518 static const enum ixgbe_pfvf_api_rev sup_ver
[] = {
1526 i
!= RTE_DIM(sup_ver
) &&
1527 ixgbevf_negotiate_api_version(hw
, sup_ver
[i
]) != 0;
1533 generate_random_mac_addr(struct ether_addr
*mac_addr
)
1537 /* Set Organizationally Unique Identifier (OUI) prefix. */
1538 mac_addr
->addr_bytes
[0] = 0x00;
1539 mac_addr
->addr_bytes
[1] = 0x09;
1540 mac_addr
->addr_bytes
[2] = 0xC0;
1541 /* Force indication of locally assigned MAC address. */
1542 mac_addr
->addr_bytes
[0] |= ETHER_LOCAL_ADMIN_ADDR
;
1543 /* Generate the last 3 bytes of the MAC address with a random number. */
1544 random
= rte_rand();
1545 memcpy(&mac_addr
->addr_bytes
[3], &random
, 3);
1549 * Virtual Function device init
1552 eth_ixgbevf_dev_init(struct rte_eth_dev
*eth_dev
)
1556 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
1557 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1558 struct ixgbe_hw
*hw
=
1559 IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1560 struct ixgbe_vfta
*shadow_vfta
=
1561 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev
->data
->dev_private
);
1562 struct ixgbe_hwstrip
*hwstrip
=
1563 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev
->data
->dev_private
);
1564 struct ether_addr
*perm_addr
= (struct ether_addr
*) hw
->mac
.perm_addr
;
1566 PMD_INIT_FUNC_TRACE();
1568 eth_dev
->dev_ops
= &ixgbevf_eth_dev_ops
;
1569 eth_dev
->rx_pkt_burst
= &ixgbe_recv_pkts
;
1570 eth_dev
->tx_pkt_burst
= &ixgbe_xmit_pkts
;
1572 /* for secondary processes, we don't initialise any further as primary
1573 * has already done this work. Only check we don't need a different
1576 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
1577 struct ixgbe_tx_queue
*txq
;
1578 /* TX queue function in primary, set by last queue initialized
1579 * Tx queue may not initialized by primary process
1581 if (eth_dev
->data
->tx_queues
) {
1582 txq
= eth_dev
->data
->tx_queues
[eth_dev
->data
->nb_tx_queues
- 1];
1583 ixgbe_set_tx_function(eth_dev
, txq
);
1585 /* Use default TX function if we get here */
1586 PMD_INIT_LOG(NOTICE
,
1587 "No TX queues configured yet. Using default TX function.");
1590 ixgbe_set_rx_function(eth_dev
);
1595 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
1597 hw
->device_id
= pci_dev
->id
.device_id
;
1598 hw
->vendor_id
= pci_dev
->id
.vendor_id
;
1599 hw
->hw_addr
= (void *)pci_dev
->mem_resource
[0].addr
;
1601 /* initialize the vfta */
1602 memset(shadow_vfta
, 0, sizeof(*shadow_vfta
));
1604 /* initialize the hw strip bitmap*/
1605 memset(hwstrip
, 0, sizeof(*hwstrip
));
1607 /* Initialize the shared code (base driver) */
1608 diag
= ixgbe_init_shared_code(hw
);
1609 if (diag
!= IXGBE_SUCCESS
) {
1610 PMD_INIT_LOG(ERR
, "Shared code init failed for ixgbevf: %d", diag
);
1614 /* init_mailbox_params */
1615 hw
->mbx
.ops
.init_params(hw
);
1617 /* Reset the hw statistics */
1618 ixgbevf_dev_stats_reset(eth_dev
);
1620 /* Disable the interrupts for VF */
1621 ixgbevf_intr_disable(eth_dev
);
1623 hw
->mac
.num_rar_entries
= 128; /* The MAX of the underlying PF */
1624 diag
= hw
->mac
.ops
.reset_hw(hw
);
1627 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
1628 * the underlying PF driver has not assigned a MAC address to the VF.
1629 * In this case, assign a random MAC address.
1631 if ((diag
!= IXGBE_SUCCESS
) && (diag
!= IXGBE_ERR_INVALID_MAC_ADDR
)) {
1632 PMD_INIT_LOG(ERR
, "VF Initialization Failure: %d", diag
);
1634 * This error code will be propagated to the app by
1635 * rte_eth_dev_reset, so use a public error code rather than
1636 * the internal-only IXGBE_ERR_RESET_FAILED
1641 /* negotiate mailbox API version to use with the PF. */
1642 ixgbevf_negotiate_api(hw
);
1644 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
1645 ixgbevf_get_queues(hw
, &tcs
, &tc
);
1647 /* Allocate memory for storing MAC addresses */
1648 eth_dev
->data
->mac_addrs
= rte_zmalloc("ixgbevf", ETHER_ADDR_LEN
*
1649 hw
->mac
.num_rar_entries
, 0);
1650 if (eth_dev
->data
->mac_addrs
== NULL
) {
1652 "Failed to allocate %u bytes needed to store "
1654 ETHER_ADDR_LEN
* hw
->mac
.num_rar_entries
);
1658 /* Generate a random MAC address, if none was assigned by PF. */
1659 if (is_zero_ether_addr(perm_addr
)) {
1660 generate_random_mac_addr(perm_addr
);
1661 diag
= ixgbe_set_rar_vf(hw
, 1, perm_addr
->addr_bytes
, 0, 1);
1663 rte_free(eth_dev
->data
->mac_addrs
);
1664 eth_dev
->data
->mac_addrs
= NULL
;
1667 PMD_INIT_LOG(INFO
, "\tVF MAC address not assigned by Host PF");
1668 PMD_INIT_LOG(INFO
, "\tAssign randomly generated MAC address "
1669 "%02x:%02x:%02x:%02x:%02x:%02x",
1670 perm_addr
->addr_bytes
[0],
1671 perm_addr
->addr_bytes
[1],
1672 perm_addr
->addr_bytes
[2],
1673 perm_addr
->addr_bytes
[3],
1674 perm_addr
->addr_bytes
[4],
1675 perm_addr
->addr_bytes
[5]);
1678 /* Copy the permanent MAC address */
1679 ether_addr_copy(perm_addr
, ð_dev
->data
->mac_addrs
[0]);
1681 /* reset the hardware with the new settings */
1682 diag
= hw
->mac
.ops
.start_hw(hw
);
1688 PMD_INIT_LOG(ERR
, "VF Initialization Failure: %d", diag
);
1692 rte_intr_callback_register(intr_handle
,
1693 ixgbevf_dev_interrupt_handler
, eth_dev
);
1694 rte_intr_enable(intr_handle
);
1695 ixgbevf_intr_enable(eth_dev
);
1697 PMD_INIT_LOG(DEBUG
, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
1698 eth_dev
->data
->port_id
, pci_dev
->id
.vendor_id
,
1699 pci_dev
->id
.device_id
, "ixgbe_mac_82599_vf");
1704 /* Virtual Function device uninit */
1707 eth_ixgbevf_dev_uninit(struct rte_eth_dev
*eth_dev
)
1709 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
1710 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1711 struct ixgbe_hw
*hw
;
1713 PMD_INIT_FUNC_TRACE();
1715 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1718 hw
= IXGBE_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1720 if (hw
->adapter_stopped
== 0)
1721 ixgbevf_dev_close(eth_dev
);
1723 eth_dev
->dev_ops
= NULL
;
1724 eth_dev
->rx_pkt_burst
= NULL
;
1725 eth_dev
->tx_pkt_burst
= NULL
;
1727 /* Disable the interrupts for VF */
1728 ixgbevf_intr_disable(eth_dev
);
1730 rte_intr_disable(intr_handle
);
1731 rte_intr_callback_unregister(intr_handle
,
1732 ixgbevf_dev_interrupt_handler
, eth_dev
);
1738 eth_ixgbe_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
1739 struct rte_pci_device
*pci_dev
)
1741 char name
[RTE_ETH_NAME_MAX_LEN
];
1742 struct rte_eth_dev
*pf_ethdev
;
1743 struct rte_eth_devargs eth_da
;
1746 if (pci_dev
->device
.devargs
) {
1747 retval
= rte_eth_devargs_parse(pci_dev
->device
.devargs
->args
,
1752 memset(ð_da
, 0, sizeof(eth_da
));
1754 retval
= rte_eth_dev_create(&pci_dev
->device
, pci_dev
->device
.name
,
1755 sizeof(struct ixgbe_adapter
),
1756 eth_dev_pci_specific_init
, pci_dev
,
1757 eth_ixgbe_dev_init
, NULL
);
1759 if (retval
|| eth_da
.nb_representor_ports
< 1)
1762 pf_ethdev
= rte_eth_dev_allocated(pci_dev
->device
.name
);
1763 if (pf_ethdev
== NULL
)
1766 /* probe VF representor ports */
1767 for (i
= 0; i
< eth_da
.nb_representor_ports
; i
++) {
1768 struct ixgbe_vf_info
*vfinfo
;
1769 struct ixgbe_vf_representor representor
;
1771 vfinfo
= *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
1772 pf_ethdev
->data
->dev_private
);
1773 if (vfinfo
== NULL
) {
1775 "no virtual functions supported by PF");
1779 representor
.vf_id
= eth_da
.representor_ports
[i
];
1780 representor
.switch_domain_id
= vfinfo
->switch_domain_id
;
1781 representor
.pf_ethdev
= pf_ethdev
;
1783 /* representor port net_bdf_port */
1784 snprintf(name
, sizeof(name
), "net_%s_representor_%d",
1785 pci_dev
->device
.name
,
1786 eth_da
.representor_ports
[i
]);
1788 retval
= rte_eth_dev_create(&pci_dev
->device
, name
,
1789 sizeof(struct ixgbe_vf_representor
), NULL
, NULL
,
1790 ixgbe_vf_representor_init
, &representor
);
1793 PMD_DRV_LOG(ERR
, "failed to create ixgbe vf "
1794 "representor %s.", name
);
1800 static int eth_ixgbe_pci_remove(struct rte_pci_device
*pci_dev
)
1802 struct rte_eth_dev
*ethdev
;
1804 ethdev
= rte_eth_dev_allocated(pci_dev
->device
.name
);
1808 if (ethdev
->data
->dev_flags
& RTE_ETH_DEV_REPRESENTOR
)
1809 return rte_eth_dev_destroy(ethdev
, ixgbe_vf_representor_uninit
);
1811 return rte_eth_dev_destroy(ethdev
, eth_ixgbe_dev_uninit
);
1814 static struct rte_pci_driver rte_ixgbe_pmd
= {
1815 .id_table
= pci_id_ixgbe_map
,
1816 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
|
1817 RTE_PCI_DRV_IOVA_AS_VA
,
1818 .probe
= eth_ixgbe_pci_probe
,
1819 .remove
= eth_ixgbe_pci_remove
,
1822 static int eth_ixgbevf_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
1823 struct rte_pci_device
*pci_dev
)
1825 return rte_eth_dev_pci_generic_probe(pci_dev
,
1826 sizeof(struct ixgbe_adapter
), eth_ixgbevf_dev_init
);
1829 static int eth_ixgbevf_pci_remove(struct rte_pci_device
*pci_dev
)
1831 return rte_eth_dev_pci_generic_remove(pci_dev
, eth_ixgbevf_dev_uninit
);
1835 * virtual function driver struct
1837 static struct rte_pci_driver rte_ixgbevf_pmd
= {
1838 .id_table
= pci_id_ixgbevf_map
,
1839 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_IOVA_AS_VA
,
1840 .probe
= eth_ixgbevf_pci_probe
,
1841 .remove
= eth_ixgbevf_pci_remove
,
1845 ixgbe_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
1847 struct ixgbe_hw
*hw
=
1848 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1849 struct ixgbe_vfta
*shadow_vfta
=
1850 IXGBE_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
1855 vid_idx
= (uint32_t) ((vlan_id
>> 5) & 0x7F);
1856 vid_bit
= (uint32_t) (1 << (vlan_id
& 0x1F));
1857 vfta
= IXGBE_READ_REG(hw
, IXGBE_VFTA(vid_idx
));
1862 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(vid_idx
), vfta
);
1864 /* update local VFTA copy */
1865 shadow_vfta
->vfta
[vid_idx
] = vfta
;
1871 ixgbe_vlan_strip_queue_set(struct rte_eth_dev
*dev
, uint16_t queue
, int on
)
1874 ixgbe_vlan_hw_strip_enable(dev
, queue
);
1876 ixgbe_vlan_hw_strip_disable(dev
, queue
);
1880 ixgbe_vlan_tpid_set(struct rte_eth_dev
*dev
,
1881 enum rte_vlan_type vlan_type
,
1884 struct ixgbe_hw
*hw
=
1885 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1890 qinq
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1891 qinq
&= IXGBE_DMATXCTL_GDV
;
1893 switch (vlan_type
) {
1894 case ETH_VLAN_TYPE_INNER
:
1896 reg
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1897 reg
= (reg
& (~IXGBE_VLNCTRL_VET
)) | (uint32_t)tpid
;
1898 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, reg
);
1899 reg
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1900 reg
= (reg
& (~IXGBE_DMATXCTL_VT_MASK
))
1901 | ((uint32_t)tpid
<< IXGBE_DMATXCTL_VT_SHIFT
);
1902 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg
);
1905 PMD_DRV_LOG(ERR
, "Inner type is not supported"
1909 case ETH_VLAN_TYPE_OUTER
:
1911 /* Only the high 16-bits is valid */
1912 IXGBE_WRITE_REG(hw
, IXGBE_EXVET
, (uint32_t)tpid
<<
1913 IXGBE_EXVET_VET_EXT_SHIFT
);
1915 reg
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1916 reg
= (reg
& (~IXGBE_VLNCTRL_VET
)) | (uint32_t)tpid
;
1917 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, reg
);
1918 reg
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1919 reg
= (reg
& (~IXGBE_DMATXCTL_VT_MASK
))
1920 | ((uint32_t)tpid
<< IXGBE_DMATXCTL_VT_SHIFT
);
1921 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg
);
1927 PMD_DRV_LOG(ERR
, "Unsupported VLAN type %d", vlan_type
);
1935 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev
*dev
)
1937 struct ixgbe_hw
*hw
=
1938 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1941 PMD_INIT_FUNC_TRACE();
1943 /* Filter Table Disable */
1944 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1945 vlnctrl
&= ~IXGBE_VLNCTRL_VFE
;
1947 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
1951 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev
*dev
)
1953 struct ixgbe_hw
*hw
=
1954 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1955 struct ixgbe_vfta
*shadow_vfta
=
1956 IXGBE_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
1960 PMD_INIT_FUNC_TRACE();
1962 /* Filter Table Enable */
1963 vlnctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
1964 vlnctrl
&= ~IXGBE_VLNCTRL_CFIEN
;
1965 vlnctrl
|= IXGBE_VLNCTRL_VFE
;
1967 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlnctrl
);
1969 /* write whatever is in local vfta copy */
1970 for (i
= 0; i
< IXGBE_VFTA_SIZE
; i
++)
1971 IXGBE_WRITE_REG(hw
, IXGBE_VFTA(i
), shadow_vfta
->vfta
[i
]);
1975 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev
*dev
, uint16_t queue
, bool on
)
1977 struct ixgbe_hwstrip
*hwstrip
=
1978 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev
->data
->dev_private
);
1979 struct ixgbe_rx_queue
*rxq
;
1981 if (queue
>= IXGBE_MAX_RX_QUEUE_NUM
)
1985 IXGBE_SET_HWSTRIP(hwstrip
, queue
);
1987 IXGBE_CLEAR_HWSTRIP(hwstrip
, queue
);
1989 if (queue
>= dev
->data
->nb_rx_queues
)
1992 rxq
= dev
->data
->rx_queues
[queue
];
1995 rxq
->vlan_flags
= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
;
1996 rxq
->offloads
|= DEV_RX_OFFLOAD_VLAN_STRIP
;
1998 rxq
->vlan_flags
= PKT_RX_VLAN
;
1999 rxq
->offloads
&= ~DEV_RX_OFFLOAD_VLAN_STRIP
;
2004 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev
*dev
, uint16_t queue
)
2006 struct ixgbe_hw
*hw
=
2007 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2010 PMD_INIT_FUNC_TRACE();
2012 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
2013 /* No queue level support */
2014 PMD_INIT_LOG(NOTICE
, "82598EB not support queue level hw strip");
2018 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2019 ctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(queue
));
2020 ctrl
&= ~IXGBE_RXDCTL_VME
;
2021 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(queue
), ctrl
);
2023 /* record those setting for HW strip per queue */
2024 ixgbe_vlan_hw_strip_bitmap_set(dev
, queue
, 0);
2028 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev
*dev
, uint16_t queue
)
2030 struct ixgbe_hw
*hw
=
2031 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2034 PMD_INIT_FUNC_TRACE();
2036 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
2037 /* No queue level supported */
2038 PMD_INIT_LOG(NOTICE
, "82598EB not support queue level hw strip");
2042 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
2043 ctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(queue
));
2044 ctrl
|= IXGBE_RXDCTL_VME
;
2045 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(queue
), ctrl
);
2047 /* record those setting for HW strip per queue */
2048 ixgbe_vlan_hw_strip_bitmap_set(dev
, queue
, 1);
2052 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev
*dev
)
2054 struct ixgbe_hw
*hw
=
2055 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2058 PMD_INIT_FUNC_TRACE();
2060 /* DMATXCTRL: Geric Double VLAN Disable */
2061 ctrl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
2062 ctrl
&= ~IXGBE_DMATXCTL_GDV
;
2063 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, ctrl
);
2065 /* CTRL_EXT: Global Double VLAN Disable */
2066 ctrl
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
2067 ctrl
&= ~IXGBE_EXTENDED_VLAN
;
2068 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl
);
2073 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev
*dev
)
2075 struct ixgbe_hw
*hw
=
2076 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2079 PMD_INIT_FUNC_TRACE();
2081 /* DMATXCTRL: Geric Double VLAN Enable */
2082 ctrl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
2083 ctrl
|= IXGBE_DMATXCTL_GDV
;
2084 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, ctrl
);
2086 /* CTRL_EXT: Global Double VLAN Enable */
2087 ctrl
= IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
2088 ctrl
|= IXGBE_EXTENDED_VLAN
;
2089 IXGBE_WRITE_REG(hw
, IXGBE_CTRL_EXT
, ctrl
);
2091 /* Clear pooling mode of PFVTCTL. It's required by X550. */
2092 if (hw
->mac
.type
== ixgbe_mac_X550
||
2093 hw
->mac
.type
== ixgbe_mac_X550EM_x
||
2094 hw
->mac
.type
== ixgbe_mac_X550EM_a
) {
2095 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
2096 ctrl
&= ~IXGBE_VT_CTL_POOLING_MODE_MASK
;
2097 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, ctrl
);
2101 * VET EXT field in the EXVET register = 0x8100 by default
2102 * So no need to change. Same to VT field of DMATXCTL register
2107 ixgbe_vlan_hw_strip_config(struct rte_eth_dev
*dev
)
2109 struct ixgbe_hw
*hw
=
2110 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2111 struct rte_eth_rxmode
*rxmode
= &dev
->data
->dev_conf
.rxmode
;
2114 struct ixgbe_rx_queue
*rxq
;
2117 PMD_INIT_FUNC_TRACE();
2119 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
2120 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
) {
2121 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
2122 ctrl
|= IXGBE_VLNCTRL_VME
;
2123 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, ctrl
);
2125 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
2126 ctrl
&= ~IXGBE_VLNCTRL_VME
;
2127 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, ctrl
);
2131 * Other 10G NIC, the VLAN strip can be setup
2132 * per queue in RXDCTL
2134 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
2135 rxq
= dev
->data
->rx_queues
[i
];
2136 ctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(rxq
->reg_idx
));
2137 if (rxq
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
) {
2138 ctrl
|= IXGBE_RXDCTL_VME
;
2141 ctrl
&= ~IXGBE_RXDCTL_VME
;
2144 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(rxq
->reg_idx
), ctrl
);
2146 /* record those setting for HW strip per queue */
2147 ixgbe_vlan_hw_strip_bitmap_set(dev
, i
, on
);
2153 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev
*dev
, int mask
)
2156 struct rte_eth_rxmode
*rxmode
;
2157 struct ixgbe_rx_queue
*rxq
;
2159 if (mask
& ETH_VLAN_STRIP_MASK
) {
2160 rxmode
= &dev
->data
->dev_conf
.rxmode
;
2161 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
2162 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
2163 rxq
= dev
->data
->rx_queues
[i
];
2164 rxq
->offloads
|= DEV_RX_OFFLOAD_VLAN_STRIP
;
2167 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
2168 rxq
= dev
->data
->rx_queues
[i
];
2169 rxq
->offloads
&= ~DEV_RX_OFFLOAD_VLAN_STRIP
;
2175 ixgbe_vlan_offload_config(struct rte_eth_dev
*dev
, int mask
)
2177 struct rte_eth_rxmode
*rxmode
;
2178 rxmode
= &dev
->data
->dev_conf
.rxmode
;
2180 if (mask
& ETH_VLAN_STRIP_MASK
) {
2181 ixgbe_vlan_hw_strip_config(dev
);
2184 if (mask
& ETH_VLAN_FILTER_MASK
) {
2185 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
2186 ixgbe_vlan_hw_filter_enable(dev
);
2188 ixgbe_vlan_hw_filter_disable(dev
);
2191 if (mask
& ETH_VLAN_EXTEND_MASK
) {
2192 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_EXTEND
)
2193 ixgbe_vlan_hw_extend_enable(dev
);
2195 ixgbe_vlan_hw_extend_disable(dev
);
2202 ixgbe_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
2204 ixgbe_config_vlan_strip_on_all_queues(dev
, mask
);
2206 ixgbe_vlan_offload_config(dev
, mask
);
2212 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev
*dev
)
2214 struct ixgbe_hw
*hw
=
2215 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2216 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
2217 uint32_t vlanctrl
= IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
2219 vlanctrl
|= IXGBE_VLNCTRL_VFE
; /* enable vlan filters */
2220 IXGBE_WRITE_REG(hw
, IXGBE_VLNCTRL
, vlanctrl
);
2224 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev
*dev
, uint16_t nb_rx_q
)
2226 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
2231 RTE_ETH_DEV_SRIOV(dev
).active
= ETH_64_POOLS
;
2234 RTE_ETH_DEV_SRIOV(dev
).active
= ETH_32_POOLS
;
2240 RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
=
2241 IXGBE_MAX_RX_QUEUE_NUM
/ RTE_ETH_DEV_SRIOV(dev
).active
;
2242 RTE_ETH_DEV_SRIOV(dev
).def_pool_q_idx
=
2243 pci_dev
->max_vfs
* RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
2248 ixgbe_check_mq_mode(struct rte_eth_dev
*dev
)
2250 struct rte_eth_conf
*dev_conf
= &dev
->data
->dev_conf
;
2251 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2252 uint16_t nb_rx_q
= dev
->data
->nb_rx_queues
;
2253 uint16_t nb_tx_q
= dev
->data
->nb_tx_queues
;
2255 if (RTE_ETH_DEV_SRIOV(dev
).active
!= 0) {
2256 /* check multi-queue mode */
2257 switch (dev_conf
->rxmode
.mq_mode
) {
2258 case ETH_MQ_RX_VMDQ_DCB
:
2259 PMD_INIT_LOG(INFO
, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
2261 case ETH_MQ_RX_VMDQ_DCB_RSS
:
2262 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
2263 PMD_INIT_LOG(ERR
, "SRIOV active,"
2264 " unsupported mq_mode rx %d.",
2265 dev_conf
->rxmode
.mq_mode
);
2268 case ETH_MQ_RX_VMDQ_RSS
:
2269 dev
->data
->dev_conf
.rxmode
.mq_mode
= ETH_MQ_RX_VMDQ_RSS
;
2270 if (nb_rx_q
<= RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
)
2271 if (ixgbe_check_vf_rss_rxq_num(dev
, nb_rx_q
)) {
2272 PMD_INIT_LOG(ERR
, "SRIOV is active,"
2273 " invalid queue number"
2274 " for VMDQ RSS, allowed"
2275 " value are 1, 2 or 4.");
2279 case ETH_MQ_RX_VMDQ_ONLY
:
2280 case ETH_MQ_RX_NONE
:
2281 /* if nothing mq mode configure, use default scheme */
2282 dev
->data
->dev_conf
.rxmode
.mq_mode
= ETH_MQ_RX_VMDQ_ONLY
;
2284 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
2285 /* SRIOV only works in VMDq enable mode */
2286 PMD_INIT_LOG(ERR
, "SRIOV is active,"
2287 " wrong mq_mode rx %d.",
2288 dev_conf
->rxmode
.mq_mode
);
2292 switch (dev_conf
->txmode
.mq_mode
) {
2293 case ETH_MQ_TX_VMDQ_DCB
:
2294 PMD_INIT_LOG(INFO
, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
2295 dev
->data
->dev_conf
.txmode
.mq_mode
= ETH_MQ_TX_VMDQ_DCB
;
2297 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
2298 dev
->data
->dev_conf
.txmode
.mq_mode
= ETH_MQ_TX_VMDQ_ONLY
;
2302 /* check valid queue number */
2303 if ((nb_rx_q
> RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
) ||
2304 (nb_tx_q
> RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
)) {
2305 PMD_INIT_LOG(ERR
, "SRIOV is active,"
2306 " nb_rx_q=%d nb_tx_q=%d queue number"
2307 " must be less than or equal to %d.",
2309 RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
);
2313 if (dev_conf
->rxmode
.mq_mode
== ETH_MQ_RX_VMDQ_DCB_RSS
) {
2314 PMD_INIT_LOG(ERR
, "VMDQ+DCB+RSS mq_mode is"
2318 /* check configuration for vmdb+dcb mode */
2319 if (dev_conf
->rxmode
.mq_mode
== ETH_MQ_RX_VMDQ_DCB
) {
2320 const struct rte_eth_vmdq_dcb_conf
*conf
;
2322 if (nb_rx_q
!= IXGBE_VMDQ_DCB_NB_QUEUES
) {
2323 PMD_INIT_LOG(ERR
, "VMDQ+DCB, nb_rx_q != %d.",
2324 IXGBE_VMDQ_DCB_NB_QUEUES
);
2327 conf
= &dev_conf
->rx_adv_conf
.vmdq_dcb_conf
;
2328 if (!(conf
->nb_queue_pools
== ETH_16_POOLS
||
2329 conf
->nb_queue_pools
== ETH_32_POOLS
)) {
2330 PMD_INIT_LOG(ERR
, "VMDQ+DCB selected,"
2331 " nb_queue_pools must be %d or %d.",
2332 ETH_16_POOLS
, ETH_32_POOLS
);
2336 if (dev_conf
->txmode
.mq_mode
== ETH_MQ_TX_VMDQ_DCB
) {
2337 const struct rte_eth_vmdq_dcb_tx_conf
*conf
;
2339 if (nb_tx_q
!= IXGBE_VMDQ_DCB_NB_QUEUES
) {
2340 PMD_INIT_LOG(ERR
, "VMDQ+DCB, nb_tx_q != %d",
2341 IXGBE_VMDQ_DCB_NB_QUEUES
);
2344 conf
= &dev_conf
->tx_adv_conf
.vmdq_dcb_tx_conf
;
2345 if (!(conf
->nb_queue_pools
== ETH_16_POOLS
||
2346 conf
->nb_queue_pools
== ETH_32_POOLS
)) {
2347 PMD_INIT_LOG(ERR
, "VMDQ+DCB selected,"
2348 " nb_queue_pools != %d and"
2349 " nb_queue_pools != %d.",
2350 ETH_16_POOLS
, ETH_32_POOLS
);
2355 /* For DCB mode check our configuration before we go further */
2356 if (dev_conf
->rxmode
.mq_mode
== ETH_MQ_RX_DCB
) {
2357 const struct rte_eth_dcb_rx_conf
*conf
;
2359 conf
= &dev_conf
->rx_adv_conf
.dcb_rx_conf
;
2360 if (!(conf
->nb_tcs
== ETH_4_TCS
||
2361 conf
->nb_tcs
== ETH_8_TCS
)) {
2362 PMD_INIT_LOG(ERR
, "DCB selected, nb_tcs != %d"
2363 " and nb_tcs != %d.",
2364 ETH_4_TCS
, ETH_8_TCS
);
2369 if (dev_conf
->txmode
.mq_mode
== ETH_MQ_TX_DCB
) {
2370 const struct rte_eth_dcb_tx_conf
*conf
;
2372 conf
= &dev_conf
->tx_adv_conf
.dcb_tx_conf
;
2373 if (!(conf
->nb_tcs
== ETH_4_TCS
||
2374 conf
->nb_tcs
== ETH_8_TCS
)) {
2375 PMD_INIT_LOG(ERR
, "DCB selected, nb_tcs != %d"
2376 " and nb_tcs != %d.",
2377 ETH_4_TCS
, ETH_8_TCS
);
2383 * When DCB/VT is off, maximum number of queues changes,
2384 * except for 82598EB, which remains constant.
2386 if (dev_conf
->txmode
.mq_mode
== ETH_MQ_TX_NONE
&&
2387 hw
->mac
.type
!= ixgbe_mac_82598EB
) {
2388 if (nb_tx_q
> IXGBE_NONE_MODE_TX_NB_QUEUES
) {
2390 "Neither VT nor DCB are enabled, "
2392 IXGBE_NONE_MODE_TX_NB_QUEUES
);
2401 ixgbe_dev_configure(struct rte_eth_dev
*dev
)
2403 struct ixgbe_interrupt
*intr
=
2404 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2405 struct ixgbe_adapter
*adapter
=
2406 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
2409 PMD_INIT_FUNC_TRACE();
2410 /* multipe queue mode checking */
2411 ret
= ixgbe_check_mq_mode(dev
);
2413 PMD_DRV_LOG(ERR
, "ixgbe_check_mq_mode fails with %d.",
2418 /* set flag to update link status after init */
2419 intr
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
2422 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
2423 * allocation or vector Rx preconditions we will reset it.
2425 adapter
->rx_bulk_alloc_allowed
= true;
2426 adapter
->rx_vec_allowed
= true;
2432 ixgbe_dev_phy_intr_setup(struct rte_eth_dev
*dev
)
2434 struct ixgbe_hw
*hw
=
2435 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2436 struct ixgbe_interrupt
*intr
=
2437 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2440 /* only set up it on X550EM_X */
2441 if (hw
->mac
.type
== ixgbe_mac_X550EM_x
) {
2442 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
2443 gpie
|= IXGBE_SDP0_GPIEN_X550EM_x
;
2444 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
2445 if (hw
->phy
.type
== ixgbe_phy_x550em_ext_t
)
2446 intr
->mask
|= IXGBE_EICR_GPI_SDP0_X550EM_x
;
2451 ixgbe_set_vf_rate_limit(struct rte_eth_dev
*dev
, uint16_t vf
,
2452 uint16_t tx_rate
, uint64_t q_msk
)
2454 struct ixgbe_hw
*hw
;
2455 struct ixgbe_vf_info
*vfinfo
;
2456 struct rte_eth_link link
;
2457 uint8_t nb_q_per_pool
;
2458 uint32_t queue_stride
;
2459 uint32_t queue_idx
, idx
= 0, vf_idx
;
2461 uint16_t total_rate
= 0;
2462 struct rte_pci_device
*pci_dev
;
2464 pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
2465 rte_eth_link_get_nowait(dev
->data
->port_id
, &link
);
2467 if (vf
>= pci_dev
->max_vfs
)
2470 if (tx_rate
> link
.link_speed
)
2476 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2477 vfinfo
= *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
));
2478 nb_q_per_pool
= RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
;
2479 queue_stride
= IXGBE_MAX_RX_QUEUE_NUM
/ RTE_ETH_DEV_SRIOV(dev
).active
;
2480 queue_idx
= vf
* queue_stride
;
2481 queue_end
= queue_idx
+ nb_q_per_pool
- 1;
2482 if (queue_end
>= hw
->mac
.max_tx_queues
)
2486 for (vf_idx
= 0; vf_idx
< pci_dev
->max_vfs
; vf_idx
++) {
2489 for (idx
= 0; idx
< RTE_DIM(vfinfo
[vf_idx
].tx_rate
);
2491 total_rate
+= vfinfo
[vf_idx
].tx_rate
[idx
];
2497 /* Store tx_rate for this vf. */
2498 for (idx
= 0; idx
< nb_q_per_pool
; idx
++) {
2499 if (((uint64_t)0x1 << idx
) & q_msk
) {
2500 if (vfinfo
[vf
].tx_rate
[idx
] != tx_rate
)
2501 vfinfo
[vf
].tx_rate
[idx
] = tx_rate
;
2502 total_rate
+= tx_rate
;
2506 if (total_rate
> dev
->data
->dev_link
.link_speed
) {
2507 /* Reset stored TX rate of the VF if it causes exceed
2510 memset(vfinfo
[vf
].tx_rate
, 0, sizeof(vfinfo
[vf
].tx_rate
));
2514 /* Set RTTBCNRC of each queue/pool for vf X */
2515 for (; queue_idx
<= queue_end
; queue_idx
++) {
2517 ixgbe_set_queue_rate_limit(dev
, queue_idx
, tx_rate
);
2525 * Configure device link speed and setup link.
2526 * It returns 0 on success.
2529 ixgbe_dev_start(struct rte_eth_dev
*dev
)
2531 struct ixgbe_hw
*hw
=
2532 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2533 struct ixgbe_vf_info
*vfinfo
=
2534 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
2535 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
2536 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
2537 uint32_t intr_vector
= 0;
2538 int err
, link_up
= 0, negotiate
= 0;
2540 uint32_t allowed_speeds
= 0;
2544 uint32_t *link_speeds
;
2545 struct ixgbe_tm_conf
*tm_conf
=
2546 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev
->data
->dev_private
);
2548 PMD_INIT_FUNC_TRACE();
2550 /* IXGBE devices don't support:
2551 * - half duplex (checked afterwards for valid speeds)
2552 * - fixed speed: TODO implement
2554 if (dev
->data
->dev_conf
.link_speeds
& ETH_LINK_SPEED_FIXED
) {
2556 "Invalid link_speeds for port %u, fix speed not supported",
2557 dev
->data
->port_id
);
2561 /* Stop the link setup handler before resetting the HW. */
2562 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler
, dev
);
2564 /* disable uio/vfio intr/eventfd mapping */
2565 rte_intr_disable(intr_handle
);
2568 hw
->adapter_stopped
= 0;
2569 ixgbe_stop_adapter(hw
);
2571 /* reinitialize adapter
2572 * this calls reset and start
2574 status
= ixgbe_pf_reset_hw(hw
);
2577 hw
->mac
.ops
.start_hw(hw
);
2578 hw
->mac
.get_link_status
= true;
2580 /* configure PF module if SRIOV enabled */
2581 ixgbe_pf_host_configure(dev
);
2583 ixgbe_dev_phy_intr_setup(dev
);
2585 /* check and configure queue intr-vector mapping */
2586 if ((rte_intr_cap_multiple(intr_handle
) ||
2587 !RTE_ETH_DEV_SRIOV(dev
).active
) &&
2588 dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
2589 intr_vector
= dev
->data
->nb_rx_queues
;
2590 if (intr_vector
> IXGBE_MAX_INTR_QUEUE_NUM
) {
2591 PMD_INIT_LOG(ERR
, "At most %d intr queues supported",
2592 IXGBE_MAX_INTR_QUEUE_NUM
);
2595 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
2599 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
2600 intr_handle
->intr_vec
=
2601 rte_zmalloc("intr_vec",
2602 dev
->data
->nb_rx_queues
* sizeof(int), 0);
2603 if (intr_handle
->intr_vec
== NULL
) {
2604 PMD_INIT_LOG(ERR
, "Failed to allocate %d rx_queues"
2605 " intr_vec", dev
->data
->nb_rx_queues
);
2610 /* confiugre msix for sleep until rx interrupt */
2611 ixgbe_configure_msix(dev
);
2613 /* initialize transmission unit */
2614 ixgbe_dev_tx_init(dev
);
2616 /* This can fail when allocating mbufs for descriptor rings */
2617 err
= ixgbe_dev_rx_init(dev
);
2619 PMD_INIT_LOG(ERR
, "Unable to initialize RX hardware");
2623 mask
= ETH_VLAN_STRIP_MASK
| ETH_VLAN_FILTER_MASK
|
2624 ETH_VLAN_EXTEND_MASK
;
2625 err
= ixgbe_vlan_offload_config(dev
, mask
);
2627 PMD_INIT_LOG(ERR
, "Unable to set VLAN offload");
2631 if (dev
->data
->dev_conf
.rxmode
.mq_mode
== ETH_MQ_RX_VMDQ_ONLY
) {
2632 /* Enable vlan filtering for VMDq */
2633 ixgbe_vmdq_vlan_hw_filter_enable(dev
);
2636 /* Configure DCB hw */
2637 ixgbe_configure_dcb(dev
);
2639 if (dev
->data
->dev_conf
.fdir_conf
.mode
!= RTE_FDIR_MODE_NONE
) {
2640 err
= ixgbe_fdir_configure(dev
);
2645 /* Restore vf rate limit */
2646 if (vfinfo
!= NULL
) {
2647 for (vf
= 0; vf
< pci_dev
->max_vfs
; vf
++)
2648 for (idx
= 0; idx
< IXGBE_MAX_QUEUE_NUM_PER_VF
; idx
++)
2649 if (vfinfo
[vf
].tx_rate
[idx
] != 0)
2650 ixgbe_set_vf_rate_limit(
2652 vfinfo
[vf
].tx_rate
[idx
],
2656 ixgbe_restore_statistics_mapping(dev
);
2658 err
= ixgbe_dev_rxtx_start(dev
);
2660 PMD_INIT_LOG(ERR
, "Unable to start rxtx queues");
2664 /* Skip link setup if loopback mode is enabled. */
2665 if (dev
->data
->dev_conf
.lpbk_mode
!= 0) {
2666 err
= ixgbe_check_supported_loopback_mode(dev
);
2668 PMD_INIT_LOG(ERR
, "Unsupported loopback mode");
2671 goto skip_link_setup
;
2675 if (ixgbe_is_sfp(hw
) && hw
->phy
.multispeed_fiber
) {
2676 err
= hw
->mac
.ops
.setup_sfp(hw
);
2681 if (hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_copper
) {
2682 /* Turn on the copper */
2683 ixgbe_set_phy_power(hw
, true);
2685 /* Turn on the laser */
2686 ixgbe_enable_tx_laser(hw
);
2689 err
= ixgbe_check_link(hw
, &speed
, &link_up
, 0);
2692 dev
->data
->dev_link
.link_status
= link_up
;
2694 err
= ixgbe_get_link_capabilities(hw
, &speed
, &negotiate
);
2698 switch (hw
->mac
.type
) {
2699 case ixgbe_mac_X550
:
2700 case ixgbe_mac_X550EM_x
:
2701 case ixgbe_mac_X550EM_a
:
2702 allowed_speeds
= ETH_LINK_SPEED_100M
| ETH_LINK_SPEED_1G
|
2703 ETH_LINK_SPEED_2_5G
| ETH_LINK_SPEED_5G
|
2705 if (hw
->device_id
== IXGBE_DEV_ID_X550EM_A_1G_T
||
2706 hw
->device_id
== IXGBE_DEV_ID_X550EM_A_1G_T_L
)
2707 allowed_speeds
= ETH_LINK_SPEED_10M
|
2708 ETH_LINK_SPEED_100M
| ETH_LINK_SPEED_1G
;
2711 allowed_speeds
= ETH_LINK_SPEED_100M
| ETH_LINK_SPEED_1G
|
2715 link_speeds
= &dev
->data
->dev_conf
.link_speeds
;
2716 if (*link_speeds
& ~allowed_speeds
) {
2717 PMD_INIT_LOG(ERR
, "Invalid link setting");
2722 if (*link_speeds
== ETH_LINK_SPEED_AUTONEG
) {
2723 switch (hw
->mac
.type
) {
2724 case ixgbe_mac_82598EB
:
2725 speed
= IXGBE_LINK_SPEED_82598_AUTONEG
;
2727 case ixgbe_mac_82599EB
:
2728 case ixgbe_mac_X540
:
2729 speed
= IXGBE_LINK_SPEED_82599_AUTONEG
;
2731 case ixgbe_mac_X550
:
2732 case ixgbe_mac_X550EM_x
:
2733 case ixgbe_mac_X550EM_a
:
2734 speed
= IXGBE_LINK_SPEED_X550_AUTONEG
;
2737 speed
= IXGBE_LINK_SPEED_82599_AUTONEG
;
2740 if (*link_speeds
& ETH_LINK_SPEED_10G
)
2741 speed
|= IXGBE_LINK_SPEED_10GB_FULL
;
2742 if (*link_speeds
& ETH_LINK_SPEED_5G
)
2743 speed
|= IXGBE_LINK_SPEED_5GB_FULL
;
2744 if (*link_speeds
& ETH_LINK_SPEED_2_5G
)
2745 speed
|= IXGBE_LINK_SPEED_2_5GB_FULL
;
2746 if (*link_speeds
& ETH_LINK_SPEED_1G
)
2747 speed
|= IXGBE_LINK_SPEED_1GB_FULL
;
2748 if (*link_speeds
& ETH_LINK_SPEED_100M
)
2749 speed
|= IXGBE_LINK_SPEED_100_FULL
;
2750 if (*link_speeds
& ETH_LINK_SPEED_10M
)
2751 speed
|= IXGBE_LINK_SPEED_10_FULL
;
2754 err
= ixgbe_setup_link(hw
, speed
, link_up
);
2760 if (rte_intr_allow_others(intr_handle
)) {
2761 /* check if lsc interrupt is enabled */
2762 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
2763 ixgbe_dev_lsc_interrupt_setup(dev
, TRUE
);
2765 ixgbe_dev_lsc_interrupt_setup(dev
, FALSE
);
2766 ixgbe_dev_macsec_interrupt_setup(dev
);
2768 rte_intr_callback_unregister(intr_handle
,
2769 ixgbe_dev_interrupt_handler
, dev
);
2770 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
2771 PMD_INIT_LOG(INFO
, "lsc won't enable because of"
2772 " no intr multiplex");
2775 /* check if rxq interrupt is enabled */
2776 if (dev
->data
->dev_conf
.intr_conf
.rxq
!= 0 &&
2777 rte_intr_dp_is_en(intr_handle
))
2778 ixgbe_dev_rxq_interrupt_setup(dev
);
2780 /* enable uio/vfio intr/eventfd mapping */
2781 rte_intr_enable(intr_handle
);
2783 /* resume enabled intr since hw reset */
2784 ixgbe_enable_intr(dev
);
2785 ixgbe_l2_tunnel_conf(dev
);
2786 ixgbe_filter_restore(dev
);
2788 if (tm_conf
->root
&& !tm_conf
->committed
)
2789 PMD_DRV_LOG(WARNING
,
2790 "please call hierarchy_commit() "
2791 "before starting the port");
2794 * Update link status right before return, because it may
2795 * start link configuration process in a separate thread.
2797 ixgbe_dev_link_update(dev
, 0);
2802 PMD_INIT_LOG(ERR
, "failure in ixgbe_dev_start(): %d", err
);
2803 ixgbe_dev_clear_queues(dev
);
2808 * Stop device: disable rx and tx functions to allow for reconfiguring.
2811 ixgbe_dev_stop(struct rte_eth_dev
*dev
)
2813 struct rte_eth_link link
;
2814 struct ixgbe_adapter
*adapter
=
2815 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
2816 struct ixgbe_hw
*hw
=
2817 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2818 struct ixgbe_vf_info
*vfinfo
=
2819 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev
->data
->dev_private
);
2820 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
2821 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
2823 struct ixgbe_tm_conf
*tm_conf
=
2824 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev
->data
->dev_private
);
2826 PMD_INIT_FUNC_TRACE();
2828 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler
, dev
);
2830 /* disable interrupts */
2831 ixgbe_disable_intr(hw
);
2834 ixgbe_pf_reset_hw(hw
);
2835 hw
->adapter_stopped
= 0;
2838 ixgbe_stop_adapter(hw
);
2840 for (vf
= 0; vfinfo
!= NULL
&& vf
< pci_dev
->max_vfs
; vf
++)
2841 vfinfo
[vf
].clear_to_send
= false;
2843 if (hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_copper
) {
2844 /* Turn off the copper */
2845 ixgbe_set_phy_power(hw
, false);
2847 /* Turn off the laser */
2848 ixgbe_disable_tx_laser(hw
);
2851 ixgbe_dev_clear_queues(dev
);
2853 /* Clear stored conf */
2854 dev
->data
->scattered_rx
= 0;
2857 /* Clear recorded link status */
2858 memset(&link
, 0, sizeof(link
));
2859 rte_eth_linkstatus_set(dev
, &link
);
2861 if (!rte_intr_allow_others(intr_handle
))
2862 /* resume to the default handler */
2863 rte_intr_callback_register(intr_handle
,
2864 ixgbe_dev_interrupt_handler
,
2867 /* Clean datapath event and queue/vec mapping */
2868 rte_intr_efd_disable(intr_handle
);
2869 if (intr_handle
->intr_vec
!= NULL
) {
2870 rte_free(intr_handle
->intr_vec
);
2871 intr_handle
->intr_vec
= NULL
;
2874 /* reset hierarchy commit */
2875 tm_conf
->committed
= false;
2877 adapter
->rss_reta_updated
= 0;
2881 * Set device link up: enable tx.
2884 ixgbe_dev_set_link_up(struct rte_eth_dev
*dev
)
2886 struct ixgbe_hw
*hw
=
2887 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2888 if (hw
->mac
.type
== ixgbe_mac_82599EB
) {
2889 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2890 if (hw
->device_id
== IXGBE_DEV_ID_82599_BYPASS
) {
2891 /* Not suported in bypass mode */
2892 PMD_INIT_LOG(ERR
, "Set link up is not supported "
2893 "by device id 0x%x", hw
->device_id
);
2899 if (hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_copper
) {
2900 /* Turn on the copper */
2901 ixgbe_set_phy_power(hw
, true);
2903 /* Turn on the laser */
2904 ixgbe_enable_tx_laser(hw
);
2911 * Set device link down: disable tx.
2914 ixgbe_dev_set_link_down(struct rte_eth_dev
*dev
)
2916 struct ixgbe_hw
*hw
=
2917 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2918 if (hw
->mac
.type
== ixgbe_mac_82599EB
) {
2919 #ifdef RTE_LIBRTE_IXGBE_BYPASS
2920 if (hw
->device_id
== IXGBE_DEV_ID_82599_BYPASS
) {
2921 /* Not suported in bypass mode */
2922 PMD_INIT_LOG(ERR
, "Set link down is not supported "
2923 "by device id 0x%x", hw
->device_id
);
2929 if (hw
->mac
.ops
.get_media_type(hw
) == ixgbe_media_type_copper
) {
2930 /* Turn off the copper */
2931 ixgbe_set_phy_power(hw
, false);
2933 /* Turn off the laser */
2934 ixgbe_disable_tx_laser(hw
);
2941 * Reset and stop device.
2944 ixgbe_dev_close(struct rte_eth_dev
*dev
)
2946 struct ixgbe_hw
*hw
=
2947 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2949 PMD_INIT_FUNC_TRACE();
2951 ixgbe_pf_reset_hw(hw
);
2953 ixgbe_dev_stop(dev
);
2954 hw
->adapter_stopped
= 1;
2956 ixgbe_dev_free_queues(dev
);
2958 ixgbe_disable_pcie_master(hw
);
2960 /* reprogram the RAR[0] in case user changed it. */
2961 ixgbe_set_rar(hw
, 0, hw
->mac
.addr
, 0, IXGBE_RAH_AV
);
2968 ixgbe_dev_reset(struct rte_eth_dev
*dev
)
2972 /* When a DPDK PMD PF begin to reset PF port, it should notify all
2973 * its VF to make them align with it. The detailed notification
2974 * mechanism is PMD specific. As to ixgbe PF, it is rather complex.
2975 * To avoid unexpected behavior in VF, currently reset of PF with
2976 * SR-IOV activation is not supported. It might be supported later.
2978 if (dev
->data
->sriov
.active
)
2981 ret
= eth_ixgbe_dev_uninit(dev
);
2985 ret
= eth_ixgbe_dev_init(dev
, NULL
);
2991 ixgbe_read_stats_registers(struct ixgbe_hw
*hw
,
2992 struct ixgbe_hw_stats
*hw_stats
,
2993 struct ixgbe_macsec_stats
*macsec_stats
,
2994 uint64_t *total_missed_rx
, uint64_t *total_qbrc
,
2995 uint64_t *total_qprc
, uint64_t *total_qprdc
)
2997 uint32_t bprc
, lxon
, lxoff
, total
;
2998 uint32_t delta_gprc
= 0;
3000 /* Workaround for RX byte count not including CRC bytes when CRC
3001 * strip is enabled. CRC bytes are removed from counters when crc_strip
3004 int crc_strip
= (IXGBE_READ_REG(hw
, IXGBE_HLREG0
) &
3005 IXGBE_HLREG0_RXCRCSTRP
);
3007 hw_stats
->crcerrs
+= IXGBE_READ_REG(hw
, IXGBE_CRCERRS
);
3008 hw_stats
->illerrc
+= IXGBE_READ_REG(hw
, IXGBE_ILLERRC
);
3009 hw_stats
->errbc
+= IXGBE_READ_REG(hw
, IXGBE_ERRBC
);
3010 hw_stats
->mspdc
+= IXGBE_READ_REG(hw
, IXGBE_MSPDC
);
3012 for (i
= 0; i
< 8; i
++) {
3013 uint32_t mp
= IXGBE_READ_REG(hw
, IXGBE_MPC(i
));
3015 /* global total per queue */
3016 hw_stats
->mpc
[i
] += mp
;
3017 /* Running comprehensive total for stats display */
3018 *total_missed_rx
+= hw_stats
->mpc
[i
];
3019 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
3020 hw_stats
->rnbc
[i
] +=
3021 IXGBE_READ_REG(hw
, IXGBE_RNBC(i
));
3022 hw_stats
->pxonrxc
[i
] +=
3023 IXGBE_READ_REG(hw
, IXGBE_PXONRXC(i
));
3024 hw_stats
->pxoffrxc
[i
] +=
3025 IXGBE_READ_REG(hw
, IXGBE_PXOFFRXC(i
));
3027 hw_stats
->pxonrxc
[i
] +=
3028 IXGBE_READ_REG(hw
, IXGBE_PXONRXCNT(i
));
3029 hw_stats
->pxoffrxc
[i
] +=
3030 IXGBE_READ_REG(hw
, IXGBE_PXOFFRXCNT(i
));
3031 hw_stats
->pxon2offc
[i
] +=
3032 IXGBE_READ_REG(hw
, IXGBE_PXON2OFFCNT(i
));
3034 hw_stats
->pxontxc
[i
] +=
3035 IXGBE_READ_REG(hw
, IXGBE_PXONTXC(i
));
3036 hw_stats
->pxofftxc
[i
] +=
3037 IXGBE_READ_REG(hw
, IXGBE_PXOFFTXC(i
));
3039 for (i
= 0; i
< IXGBE_QUEUE_STAT_COUNTERS
; i
++) {
3040 uint32_t delta_qprc
= IXGBE_READ_REG(hw
, IXGBE_QPRC(i
));
3041 uint32_t delta_qptc
= IXGBE_READ_REG(hw
, IXGBE_QPTC(i
));
3042 uint32_t delta_qprdc
= IXGBE_READ_REG(hw
, IXGBE_QPRDC(i
));
3044 delta_gprc
+= delta_qprc
;
3046 hw_stats
->qprc
[i
] += delta_qprc
;
3047 hw_stats
->qptc
[i
] += delta_qptc
;
3049 hw_stats
->qbrc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBRC_L(i
));
3050 hw_stats
->qbrc
[i
] +=
3051 ((uint64_t)IXGBE_READ_REG(hw
, IXGBE_QBRC_H(i
)) << 32);
3053 hw_stats
->qbrc
[i
] -= delta_qprc
* ETHER_CRC_LEN
;
3055 hw_stats
->qbtc
[i
] += IXGBE_READ_REG(hw
, IXGBE_QBTC_L(i
));
3056 hw_stats
->qbtc
[i
] +=
3057 ((uint64_t)IXGBE_READ_REG(hw
, IXGBE_QBTC_H(i
)) << 32);
3059 hw_stats
->qprdc
[i
] += delta_qprdc
;
3060 *total_qprdc
+= hw_stats
->qprdc
[i
];
3062 *total_qprc
+= hw_stats
->qprc
[i
];
3063 *total_qbrc
+= hw_stats
->qbrc
[i
];
3065 hw_stats
->mlfc
+= IXGBE_READ_REG(hw
, IXGBE_MLFC
);
3066 hw_stats
->mrfc
+= IXGBE_READ_REG(hw
, IXGBE_MRFC
);
3067 hw_stats
->rlec
+= IXGBE_READ_REG(hw
, IXGBE_RLEC
);
3070 * An errata states that gprc actually counts good + missed packets:
3071 * Workaround to set gprc to summated queue packet receives
3073 hw_stats
->gprc
= *total_qprc
;
3075 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
3076 hw_stats
->gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCL
);
3077 hw_stats
->gorc
+= ((u64
)IXGBE_READ_REG(hw
, IXGBE_GORCH
) << 32);
3078 hw_stats
->gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCL
);
3079 hw_stats
->gotc
+= ((u64
)IXGBE_READ_REG(hw
, IXGBE_GOTCH
) << 32);
3080 hw_stats
->tor
+= IXGBE_READ_REG(hw
, IXGBE_TORL
);
3081 hw_stats
->tor
+= ((u64
)IXGBE_READ_REG(hw
, IXGBE_TORH
) << 32);
3082 hw_stats
->lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXCNT
);
3083 hw_stats
->lxoffrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXCNT
);
3085 hw_stats
->lxonrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXONRXC
);
3086 hw_stats
->lxoffrxc
+= IXGBE_READ_REG(hw
, IXGBE_LXOFFRXC
);
3087 /* 82598 only has a counter in the high register */
3088 hw_stats
->gorc
+= IXGBE_READ_REG(hw
, IXGBE_GORCH
);
3089 hw_stats
->gotc
+= IXGBE_READ_REG(hw
, IXGBE_GOTCH
);
3090 hw_stats
->tor
+= IXGBE_READ_REG(hw
, IXGBE_TORH
);
3092 uint64_t old_tpr
= hw_stats
->tpr
;
3094 hw_stats
->tpr
+= IXGBE_READ_REG(hw
, IXGBE_TPR
);
3095 hw_stats
->tpt
+= IXGBE_READ_REG(hw
, IXGBE_TPT
);
3098 hw_stats
->gorc
-= delta_gprc
* ETHER_CRC_LEN
;
3100 uint64_t delta_gptc
= IXGBE_READ_REG(hw
, IXGBE_GPTC
);
3101 hw_stats
->gptc
+= delta_gptc
;
3102 hw_stats
->gotc
-= delta_gptc
* ETHER_CRC_LEN
;
3103 hw_stats
->tor
-= (hw_stats
->tpr
- old_tpr
) * ETHER_CRC_LEN
;
3106 * Workaround: mprc hardware is incorrectly counting
3107 * broadcasts, so for now we subtract those.
3109 bprc
= IXGBE_READ_REG(hw
, IXGBE_BPRC
);
3110 hw_stats
->bprc
+= bprc
;
3111 hw_stats
->mprc
+= IXGBE_READ_REG(hw
, IXGBE_MPRC
);
3112 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3113 hw_stats
->mprc
-= bprc
;
3115 hw_stats
->prc64
+= IXGBE_READ_REG(hw
, IXGBE_PRC64
);
3116 hw_stats
->prc127
+= IXGBE_READ_REG(hw
, IXGBE_PRC127
);
3117 hw_stats
->prc255
+= IXGBE_READ_REG(hw
, IXGBE_PRC255
);
3118 hw_stats
->prc511
+= IXGBE_READ_REG(hw
, IXGBE_PRC511
);
3119 hw_stats
->prc1023
+= IXGBE_READ_REG(hw
, IXGBE_PRC1023
);
3120 hw_stats
->prc1522
+= IXGBE_READ_REG(hw
, IXGBE_PRC1522
);
3122 lxon
= IXGBE_READ_REG(hw
, IXGBE_LXONTXC
);
3123 hw_stats
->lxontxc
+= lxon
;
3124 lxoff
= IXGBE_READ_REG(hw
, IXGBE_LXOFFTXC
);
3125 hw_stats
->lxofftxc
+= lxoff
;
3126 total
= lxon
+ lxoff
;
3128 hw_stats
->mptc
+= IXGBE_READ_REG(hw
, IXGBE_MPTC
);
3129 hw_stats
->ptc64
+= IXGBE_READ_REG(hw
, IXGBE_PTC64
);
3130 hw_stats
->gptc
-= total
;
3131 hw_stats
->mptc
-= total
;
3132 hw_stats
->ptc64
-= total
;
3133 hw_stats
->gotc
-= total
* ETHER_MIN_LEN
;
3135 hw_stats
->ruc
+= IXGBE_READ_REG(hw
, IXGBE_RUC
);
3136 hw_stats
->rfc
+= IXGBE_READ_REG(hw
, IXGBE_RFC
);
3137 hw_stats
->roc
+= IXGBE_READ_REG(hw
, IXGBE_ROC
);
3138 hw_stats
->rjc
+= IXGBE_READ_REG(hw
, IXGBE_RJC
);
3139 hw_stats
->mngprc
+= IXGBE_READ_REG(hw
, IXGBE_MNGPRC
);
3140 hw_stats
->mngpdc
+= IXGBE_READ_REG(hw
, IXGBE_MNGPDC
);
3141 hw_stats
->mngptc
+= IXGBE_READ_REG(hw
, IXGBE_MNGPTC
);
3142 hw_stats
->ptc127
+= IXGBE_READ_REG(hw
, IXGBE_PTC127
);
3143 hw_stats
->ptc255
+= IXGBE_READ_REG(hw
, IXGBE_PTC255
);
3144 hw_stats
->ptc511
+= IXGBE_READ_REG(hw
, IXGBE_PTC511
);
3145 hw_stats
->ptc1023
+= IXGBE_READ_REG(hw
, IXGBE_PTC1023
);
3146 hw_stats
->ptc1522
+= IXGBE_READ_REG(hw
, IXGBE_PTC1522
);
3147 hw_stats
->bptc
+= IXGBE_READ_REG(hw
, IXGBE_BPTC
);
3148 hw_stats
->xec
+= IXGBE_READ_REG(hw
, IXGBE_XEC
);
3149 hw_stats
->fccrc
+= IXGBE_READ_REG(hw
, IXGBE_FCCRC
);
3150 hw_stats
->fclast
+= IXGBE_READ_REG(hw
, IXGBE_FCLAST
);
3151 /* Only read FCOE on 82599 */
3152 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
3153 hw_stats
->fcoerpdc
+= IXGBE_READ_REG(hw
, IXGBE_FCOERPDC
);
3154 hw_stats
->fcoeprc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEPRC
);
3155 hw_stats
->fcoeptc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEPTC
);
3156 hw_stats
->fcoedwrc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEDWRC
);
3157 hw_stats
->fcoedwtc
+= IXGBE_READ_REG(hw
, IXGBE_FCOEDWTC
);
3160 /* Flow Director Stats registers */
3161 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
3162 hw_stats
->fdirmatch
+= IXGBE_READ_REG(hw
, IXGBE_FDIRMATCH
);
3163 hw_stats
->fdirmiss
+= IXGBE_READ_REG(hw
, IXGBE_FDIRMISS
);
3164 hw_stats
->fdirustat_add
+= IXGBE_READ_REG(hw
,
3165 IXGBE_FDIRUSTAT
) & 0xFFFF;
3166 hw_stats
->fdirustat_remove
+= (IXGBE_READ_REG(hw
,
3167 IXGBE_FDIRUSTAT
) >> 16) & 0xFFFF;
3168 hw_stats
->fdirfstat_fadd
+= IXGBE_READ_REG(hw
,
3169 IXGBE_FDIRFSTAT
) & 0xFFFF;
3170 hw_stats
->fdirfstat_fremove
+= (IXGBE_READ_REG(hw
,
3171 IXGBE_FDIRFSTAT
) >> 16) & 0xFFFF;
3173 /* MACsec Stats registers */
3174 macsec_stats
->out_pkts_untagged
+= IXGBE_READ_REG(hw
, IXGBE_LSECTXUT
);
3175 macsec_stats
->out_pkts_encrypted
+=
3176 IXGBE_READ_REG(hw
, IXGBE_LSECTXPKTE
);
3177 macsec_stats
->out_pkts_protected
+=
3178 IXGBE_READ_REG(hw
, IXGBE_LSECTXPKTP
);
3179 macsec_stats
->out_octets_encrypted
+=
3180 IXGBE_READ_REG(hw
, IXGBE_LSECTXOCTE
);
3181 macsec_stats
->out_octets_protected
+=
3182 IXGBE_READ_REG(hw
, IXGBE_LSECTXOCTP
);
3183 macsec_stats
->in_pkts_untagged
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXUT
);
3184 macsec_stats
->in_pkts_badtag
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXBAD
);
3185 macsec_stats
->in_pkts_nosci
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXNOSCI
);
3186 macsec_stats
->in_pkts_unknownsci
+=
3187 IXGBE_READ_REG(hw
, IXGBE_LSECRXUNSCI
);
3188 macsec_stats
->in_octets_decrypted
+=
3189 IXGBE_READ_REG(hw
, IXGBE_LSECRXOCTD
);
3190 macsec_stats
->in_octets_validated
+=
3191 IXGBE_READ_REG(hw
, IXGBE_LSECRXOCTV
);
3192 macsec_stats
->in_pkts_unchecked
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXUNCH
);
3193 macsec_stats
->in_pkts_delayed
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXDELAY
);
3194 macsec_stats
->in_pkts_late
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXLATE
);
3195 for (i
= 0; i
< 2; i
++) {
3196 macsec_stats
->in_pkts_ok
+=
3197 IXGBE_READ_REG(hw
, IXGBE_LSECRXOK(i
));
3198 macsec_stats
->in_pkts_invalid
+=
3199 IXGBE_READ_REG(hw
, IXGBE_LSECRXINV(i
));
3200 macsec_stats
->in_pkts_notvalid
+=
3201 IXGBE_READ_REG(hw
, IXGBE_LSECRXNV(i
));
3203 macsec_stats
->in_pkts_unusedsa
+= IXGBE_READ_REG(hw
, IXGBE_LSECRXUNSA
);
3204 macsec_stats
->in_pkts_notusingsa
+=
3205 IXGBE_READ_REG(hw
, IXGBE_LSECRXNUSA
);
3209 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
3212 ixgbe_dev_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
3214 struct ixgbe_hw
*hw
=
3215 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3216 struct ixgbe_hw_stats
*hw_stats
=
3217 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3218 struct ixgbe_macsec_stats
*macsec_stats
=
3219 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3220 dev
->data
->dev_private
);
3221 uint64_t total_missed_rx
, total_qbrc
, total_qprc
, total_qprdc
;
3224 total_missed_rx
= 0;
3229 ixgbe_read_stats_registers(hw
, hw_stats
, macsec_stats
, &total_missed_rx
,
3230 &total_qbrc
, &total_qprc
, &total_qprdc
);
3235 /* Fill out the rte_eth_stats statistics structure */
3236 stats
->ipackets
= total_qprc
;
3237 stats
->ibytes
= total_qbrc
;
3238 stats
->opackets
= hw_stats
->gptc
;
3239 stats
->obytes
= hw_stats
->gotc
;
3241 for (i
= 0; i
< IXGBE_QUEUE_STAT_COUNTERS
; i
++) {
3242 stats
->q_ipackets
[i
] = hw_stats
->qprc
[i
];
3243 stats
->q_opackets
[i
] = hw_stats
->qptc
[i
];
3244 stats
->q_ibytes
[i
] = hw_stats
->qbrc
[i
];
3245 stats
->q_obytes
[i
] = hw_stats
->qbtc
[i
];
3246 stats
->q_errors
[i
] = hw_stats
->qprdc
[i
];
3250 stats
->imissed
= total_missed_rx
;
3251 stats
->ierrors
= hw_stats
->crcerrs
+
3268 ixgbe_dev_stats_reset(struct rte_eth_dev
*dev
)
3270 struct ixgbe_hw_stats
*stats
=
3271 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3273 /* HW registers are cleared on read */
3274 ixgbe_dev_stats_get(dev
, NULL
);
3276 /* Reset software totals */
3277 memset(stats
, 0, sizeof(*stats
));
3280 /* This function calculates the number of xstats based on the current config */
3282 ixgbe_xstats_calc_num(void) {
3283 return IXGBE_NB_HW_STATS
+ IXGBE_NB_MACSEC_STATS
+
3284 (IXGBE_NB_RXQ_PRIO_STATS
* IXGBE_NB_RXQ_PRIO_VALUES
) +
3285 (IXGBE_NB_TXQ_PRIO_STATS
* IXGBE_NB_TXQ_PRIO_VALUES
);
3288 static int ixgbe_dev_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
3289 struct rte_eth_xstat_name
*xstats_names
, __rte_unused
unsigned int size
)
3291 const unsigned cnt_stats
= ixgbe_xstats_calc_num();
3292 unsigned stat
, i
, count
;
3294 if (xstats_names
!= NULL
) {
3297 /* Note: limit >= cnt_stats checked upstream
3298 * in rte_eth_xstats_names()
3301 /* Extended stats from ixgbe_hw_stats */
3302 for (i
= 0; i
< IXGBE_NB_HW_STATS
; i
++) {
3303 strlcpy(xstats_names
[count
].name
,
3304 rte_ixgbe_stats_strings
[i
].name
,
3305 sizeof(xstats_names
[count
].name
));
3310 for (i
= 0; i
< IXGBE_NB_MACSEC_STATS
; i
++) {
3311 strlcpy(xstats_names
[count
].name
,
3312 rte_ixgbe_macsec_strings
[i
].name
,
3313 sizeof(xstats_names
[count
].name
));
3317 /* RX Priority Stats */
3318 for (stat
= 0; stat
< IXGBE_NB_RXQ_PRIO_STATS
; stat
++) {
3319 for (i
= 0; i
< IXGBE_NB_RXQ_PRIO_VALUES
; i
++) {
3320 snprintf(xstats_names
[count
].name
,
3321 sizeof(xstats_names
[count
].name
),
3322 "rx_priority%u_%s", i
,
3323 rte_ixgbe_rxq_strings
[stat
].name
);
3328 /* TX Priority Stats */
3329 for (stat
= 0; stat
< IXGBE_NB_TXQ_PRIO_STATS
; stat
++) {
3330 for (i
= 0; i
< IXGBE_NB_TXQ_PRIO_VALUES
; i
++) {
3331 snprintf(xstats_names
[count
].name
,
3332 sizeof(xstats_names
[count
].name
),
3333 "tx_priority%u_%s", i
,
3334 rte_ixgbe_txq_strings
[stat
].name
);
3342 static int ixgbe_dev_xstats_get_names_by_id(
3343 struct rte_eth_dev
*dev
,
3344 struct rte_eth_xstat_name
*xstats_names
,
3345 const uint64_t *ids
,
3349 const unsigned int cnt_stats
= ixgbe_xstats_calc_num();
3350 unsigned int stat
, i
, count
;
3352 if (xstats_names
!= NULL
) {
3355 /* Note: limit >= cnt_stats checked upstream
3356 * in rte_eth_xstats_names()
3359 /* Extended stats from ixgbe_hw_stats */
3360 for (i
= 0; i
< IXGBE_NB_HW_STATS
; i
++) {
3361 strlcpy(xstats_names
[count
].name
,
3362 rte_ixgbe_stats_strings
[i
].name
,
3363 sizeof(xstats_names
[count
].name
));
3368 for (i
= 0; i
< IXGBE_NB_MACSEC_STATS
; i
++) {
3369 strlcpy(xstats_names
[count
].name
,
3370 rte_ixgbe_macsec_strings
[i
].name
,
3371 sizeof(xstats_names
[count
].name
));
3375 /* RX Priority Stats */
3376 for (stat
= 0; stat
< IXGBE_NB_RXQ_PRIO_STATS
; stat
++) {
3377 for (i
= 0; i
< IXGBE_NB_RXQ_PRIO_VALUES
; i
++) {
3378 snprintf(xstats_names
[count
].name
,
3379 sizeof(xstats_names
[count
].name
),
3380 "rx_priority%u_%s", i
,
3381 rte_ixgbe_rxq_strings
[stat
].name
);
3386 /* TX Priority Stats */
3387 for (stat
= 0; stat
< IXGBE_NB_TXQ_PRIO_STATS
; stat
++) {
3388 for (i
= 0; i
< IXGBE_NB_TXQ_PRIO_VALUES
; i
++) {
3389 snprintf(xstats_names
[count
].name
,
3390 sizeof(xstats_names
[count
].name
),
3391 "tx_priority%u_%s", i
,
3392 rte_ixgbe_txq_strings
[stat
].name
);
3401 uint16_t size
= ixgbe_xstats_calc_num();
3402 struct rte_eth_xstat_name xstats_names_copy
[size
];
3404 ixgbe_dev_xstats_get_names_by_id(dev
, xstats_names_copy
, NULL
,
3407 for (i
= 0; i
< limit
; i
++) {
3408 if (ids
[i
] >= size
) {
3409 PMD_INIT_LOG(ERR
, "id value isn't valid");
3412 strcpy(xstats_names
[i
].name
,
3413 xstats_names_copy
[ids
[i
]].name
);
3418 static int ixgbevf_dev_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
3419 struct rte_eth_xstat_name
*xstats_names
, unsigned limit
)
3423 if (limit
< IXGBEVF_NB_XSTATS
&& xstats_names
!= NULL
)
3426 if (xstats_names
!= NULL
)
3427 for (i
= 0; i
< IXGBEVF_NB_XSTATS
; i
++)
3428 strlcpy(xstats_names
[i
].name
,
3429 rte_ixgbevf_stats_strings
[i
].name
,
3430 sizeof(xstats_names
[i
].name
));
3431 return IXGBEVF_NB_XSTATS
;
3435 ixgbe_dev_xstats_get(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
3438 struct ixgbe_hw
*hw
=
3439 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3440 struct ixgbe_hw_stats
*hw_stats
=
3441 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3442 struct ixgbe_macsec_stats
*macsec_stats
=
3443 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3444 dev
->data
->dev_private
);
3445 uint64_t total_missed_rx
, total_qbrc
, total_qprc
, total_qprdc
;
3446 unsigned i
, stat
, count
= 0;
3448 count
= ixgbe_xstats_calc_num();
3453 total_missed_rx
= 0;
3458 ixgbe_read_stats_registers(hw
, hw_stats
, macsec_stats
, &total_missed_rx
,
3459 &total_qbrc
, &total_qprc
, &total_qprdc
);
3461 /* If this is a reset xstats is NULL, and we have cleared the
3462 * registers by reading them.
3467 /* Extended stats from ixgbe_hw_stats */
3469 for (i
= 0; i
< IXGBE_NB_HW_STATS
; i
++) {
3470 xstats
[count
].value
= *(uint64_t *)(((char *)hw_stats
) +
3471 rte_ixgbe_stats_strings
[i
].offset
);
3472 xstats
[count
].id
= count
;
3477 for (i
= 0; i
< IXGBE_NB_MACSEC_STATS
; i
++) {
3478 xstats
[count
].value
= *(uint64_t *)(((char *)macsec_stats
) +
3479 rte_ixgbe_macsec_strings
[i
].offset
);
3480 xstats
[count
].id
= count
;
3484 /* RX Priority Stats */
3485 for (stat
= 0; stat
< IXGBE_NB_RXQ_PRIO_STATS
; stat
++) {
3486 for (i
= 0; i
< IXGBE_NB_RXQ_PRIO_VALUES
; i
++) {
3487 xstats
[count
].value
= *(uint64_t *)(((char *)hw_stats
) +
3488 rte_ixgbe_rxq_strings
[stat
].offset
+
3489 (sizeof(uint64_t) * i
));
3490 xstats
[count
].id
= count
;
3495 /* TX Priority Stats */
3496 for (stat
= 0; stat
< IXGBE_NB_TXQ_PRIO_STATS
; stat
++) {
3497 for (i
= 0; i
< IXGBE_NB_TXQ_PRIO_VALUES
; i
++) {
3498 xstats
[count
].value
= *(uint64_t *)(((char *)hw_stats
) +
3499 rte_ixgbe_txq_strings
[stat
].offset
+
3500 (sizeof(uint64_t) * i
));
3501 xstats
[count
].id
= count
;
3509 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev
*dev
, const uint64_t *ids
,
3510 uint64_t *values
, unsigned int n
)
3513 struct ixgbe_hw
*hw
=
3514 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3515 struct ixgbe_hw_stats
*hw_stats
=
3516 IXGBE_DEV_PRIVATE_TO_STATS(
3517 dev
->data
->dev_private
);
3518 struct ixgbe_macsec_stats
*macsec_stats
=
3519 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3520 dev
->data
->dev_private
);
3521 uint64_t total_missed_rx
, total_qbrc
, total_qprc
, total_qprdc
;
3522 unsigned int i
, stat
, count
= 0;
3524 count
= ixgbe_xstats_calc_num();
3526 if (!ids
&& n
< count
)
3529 total_missed_rx
= 0;
3534 ixgbe_read_stats_registers(hw
, hw_stats
, macsec_stats
,
3535 &total_missed_rx
, &total_qbrc
, &total_qprc
,
3538 /* If this is a reset xstats is NULL, and we have cleared the
3539 * registers by reading them.
3541 if (!ids
&& !values
)
3544 /* Extended stats from ixgbe_hw_stats */
3546 for (i
= 0; i
< IXGBE_NB_HW_STATS
; i
++) {
3547 values
[count
] = *(uint64_t *)(((char *)hw_stats
) +
3548 rte_ixgbe_stats_strings
[i
].offset
);
3553 for (i
= 0; i
< IXGBE_NB_MACSEC_STATS
; i
++) {
3554 values
[count
] = *(uint64_t *)(((char *)macsec_stats
) +
3555 rte_ixgbe_macsec_strings
[i
].offset
);
3559 /* RX Priority Stats */
3560 for (stat
= 0; stat
< IXGBE_NB_RXQ_PRIO_STATS
; stat
++) {
3561 for (i
= 0; i
< IXGBE_NB_RXQ_PRIO_VALUES
; i
++) {
3563 *(uint64_t *)(((char *)hw_stats
) +
3564 rte_ixgbe_rxq_strings
[stat
].offset
+
3565 (sizeof(uint64_t) * i
));
3570 /* TX Priority Stats */
3571 for (stat
= 0; stat
< IXGBE_NB_TXQ_PRIO_STATS
; stat
++) {
3572 for (i
= 0; i
< IXGBE_NB_TXQ_PRIO_VALUES
; i
++) {
3574 *(uint64_t *)(((char *)hw_stats
) +
3575 rte_ixgbe_txq_strings
[stat
].offset
+
3576 (sizeof(uint64_t) * i
));
3584 uint16_t size
= ixgbe_xstats_calc_num();
3585 uint64_t values_copy
[size
];
3587 ixgbe_dev_xstats_get_by_id(dev
, NULL
, values_copy
, size
);
3589 for (i
= 0; i
< n
; i
++) {
3590 if (ids
[i
] >= size
) {
3591 PMD_INIT_LOG(ERR
, "id value isn't valid");
3594 values
[i
] = values_copy
[ids
[i
]];
3600 ixgbe_dev_xstats_reset(struct rte_eth_dev
*dev
)
3602 struct ixgbe_hw_stats
*stats
=
3603 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3604 struct ixgbe_macsec_stats
*macsec_stats
=
3605 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
3606 dev
->data
->dev_private
);
3608 unsigned count
= ixgbe_xstats_calc_num();
3610 /* HW registers are cleared on read */
3611 ixgbe_dev_xstats_get(dev
, NULL
, count
);
3613 /* Reset software totals */
3614 memset(stats
, 0, sizeof(*stats
));
3615 memset(macsec_stats
, 0, sizeof(*macsec_stats
));
3619 ixgbevf_update_stats(struct rte_eth_dev
*dev
)
3621 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3622 struct ixgbevf_hw_stats
*hw_stats
= (struct ixgbevf_hw_stats
*)
3623 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3625 /* Good Rx packet, include VF loopback */
3626 UPDATE_VF_STAT(IXGBE_VFGPRC
,
3627 hw_stats
->last_vfgprc
, hw_stats
->vfgprc
);
3629 /* Good Rx octets, include VF loopback */
3630 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
3631 hw_stats
->last_vfgorc
, hw_stats
->vfgorc
);
3633 /* Good Tx packet, include VF loopback */
3634 UPDATE_VF_STAT(IXGBE_VFGPTC
,
3635 hw_stats
->last_vfgptc
, hw_stats
->vfgptc
);
3637 /* Good Tx octets, include VF loopback */
3638 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
3639 hw_stats
->last_vfgotc
, hw_stats
->vfgotc
);
3641 /* Rx Multicst Packet */
3642 UPDATE_VF_STAT(IXGBE_VFMPRC
,
3643 hw_stats
->last_vfmprc
, hw_stats
->vfmprc
);
3647 ixgbevf_dev_xstats_get(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
3650 struct ixgbevf_hw_stats
*hw_stats
= (struct ixgbevf_hw_stats
*)
3651 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3654 if (n
< IXGBEVF_NB_XSTATS
)
3655 return IXGBEVF_NB_XSTATS
;
3657 ixgbevf_update_stats(dev
);
3662 /* Extended stats */
3663 for (i
= 0; i
< IXGBEVF_NB_XSTATS
; i
++) {
3665 xstats
[i
].value
= *(uint64_t *)(((char *)hw_stats
) +
3666 rte_ixgbevf_stats_strings
[i
].offset
);
3669 return IXGBEVF_NB_XSTATS
;
3673 ixgbevf_dev_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
3675 struct ixgbevf_hw_stats
*hw_stats
= (struct ixgbevf_hw_stats
*)
3676 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3678 ixgbevf_update_stats(dev
);
3683 stats
->ipackets
= hw_stats
->vfgprc
;
3684 stats
->ibytes
= hw_stats
->vfgorc
;
3685 stats
->opackets
= hw_stats
->vfgptc
;
3686 stats
->obytes
= hw_stats
->vfgotc
;
3691 ixgbevf_dev_stats_reset(struct rte_eth_dev
*dev
)
3693 struct ixgbevf_hw_stats
*hw_stats
= (struct ixgbevf_hw_stats
*)
3694 IXGBE_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
3696 /* Sync HW register to the last stats */
3697 ixgbevf_dev_stats_get(dev
, NULL
);
3699 /* reset HW current stats*/
3700 hw_stats
->vfgprc
= 0;
3701 hw_stats
->vfgorc
= 0;
3702 hw_stats
->vfgptc
= 0;
3703 hw_stats
->vfgotc
= 0;
3707 ixgbe_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
, size_t fw_size
)
3709 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3710 u16 eeprom_verh
, eeprom_verl
;
3714 ixgbe_read_eeprom(hw
, 0x2e, &eeprom_verh
);
3715 ixgbe_read_eeprom(hw
, 0x2d, &eeprom_verl
);
3717 etrack_id
= (eeprom_verh
<< 16) | eeprom_verl
;
3718 ret
= snprintf(fw_version
, fw_size
, "0x%08x", etrack_id
);
3720 ret
+= 1; /* add the size of '\0' */
3721 if (fw_size
< (u32
)ret
)
3728 ixgbe_dev_info_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
3730 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
3731 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3732 struct rte_eth_conf
*dev_conf
= &dev
->data
->dev_conf
;
3734 dev_info
->max_rx_queues
= (uint16_t)hw
->mac
.max_rx_queues
;
3735 dev_info
->max_tx_queues
= (uint16_t)hw
->mac
.max_tx_queues
;
3736 if (RTE_ETH_DEV_SRIOV(dev
).active
== 0) {
3738 * When DCB/VT is off, maximum number of queues changes,
3739 * except for 82598EB, which remains constant.
3741 if (dev_conf
->txmode
.mq_mode
== ETH_MQ_TX_NONE
&&
3742 hw
->mac
.type
!= ixgbe_mac_82598EB
)
3743 dev_info
->max_tx_queues
= IXGBE_NONE_MODE_TX_NB_QUEUES
;
3745 dev_info
->min_rx_bufsize
= 1024; /* cf BSIZEPACKET in SRRCTL register */
3746 dev_info
->max_rx_pktlen
= 15872; /* includes CRC, cf MAXFRS register */
3747 dev_info
->max_mac_addrs
= hw
->mac
.num_rar_entries
;
3748 dev_info
->max_hash_mac_addrs
= IXGBE_VMDQ_NUM_UC_MAC
;
3749 dev_info
->max_vfs
= pci_dev
->max_vfs
;
3750 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3751 dev_info
->max_vmdq_pools
= ETH_16_POOLS
;
3753 dev_info
->max_vmdq_pools
= ETH_64_POOLS
;
3754 dev_info
->max_mtu
= dev_info
->max_rx_pktlen
- IXGBE_ETH_OVERHEAD
;
3755 dev_info
->min_mtu
= ETHER_MIN_MTU
;
3756 dev_info
->vmdq_queue_num
= dev_info
->max_rx_queues
;
3757 dev_info
->rx_queue_offload_capa
= ixgbe_get_rx_queue_offloads(dev
);
3758 dev_info
->rx_offload_capa
= (ixgbe_get_rx_port_offloads(dev
) |
3759 dev_info
->rx_queue_offload_capa
);
3760 dev_info
->tx_queue_offload_capa
= ixgbe_get_tx_queue_offloads(dev
);
3761 dev_info
->tx_offload_capa
= ixgbe_get_tx_port_offloads(dev
);
3763 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
3765 .pthresh
= IXGBE_DEFAULT_RX_PTHRESH
,
3766 .hthresh
= IXGBE_DEFAULT_RX_HTHRESH
,
3767 .wthresh
= IXGBE_DEFAULT_RX_WTHRESH
,
3769 .rx_free_thresh
= IXGBE_DEFAULT_RX_FREE_THRESH
,
3774 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
3776 .pthresh
= IXGBE_DEFAULT_TX_PTHRESH
,
3777 .hthresh
= IXGBE_DEFAULT_TX_HTHRESH
,
3778 .wthresh
= IXGBE_DEFAULT_TX_WTHRESH
,
3780 .tx_free_thresh
= IXGBE_DEFAULT_TX_FREE_THRESH
,
3782 * According to 82599 and x540 specifications RS bit *must* be
3783 * set on the last descriptor of *every* packet. Therefore we
3784 * will not allow the tx_rs_thresh above 1 for all NICs newer
3785 * than 82598. Since VFs are available only on devices starting
3786 * from 82599, tx_rs_thresh should be set to 1 for ALL VF
3794 * According to 82599 and x540 specifications RS bit *must* be set on the
3795 * last descriptor of *every* packet. Therefore we will not allow the
3796 * tx_rs_thresh above 1 for all NICs newer than 82598.
3798 if (hw
->mac
.type
> ixgbe_mac_82598EB
)
3799 dev_info
->default_txconf
.tx_rs_thresh
= 1;
3801 dev_info
->rx_desc_lim
= rx_desc_lim
;
3802 dev_info
->tx_desc_lim
= tx_desc_lim
;
3804 dev_info
->hash_key_size
= IXGBE_HKEY_MAX_INDEX
* sizeof(uint32_t);
3805 dev_info
->reta_size
= ixgbe_reta_size_get(hw
->mac
.type
);
3806 dev_info
->flow_type_rss_offloads
= IXGBE_RSS_OFFLOAD_ALL
;
3808 dev_info
->speed_capa
= ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_10G
;
3809 if (hw
->mac
.type
== ixgbe_mac_X540
||
3810 hw
->mac
.type
== ixgbe_mac_X540_vf
||
3811 hw
->mac
.type
== ixgbe_mac_X550
||
3812 hw
->mac
.type
== ixgbe_mac_X550_vf
) {
3813 dev_info
->speed_capa
|= ETH_LINK_SPEED_100M
;
3815 if (hw
->mac
.type
== ixgbe_mac_X550
) {
3816 dev_info
->speed_capa
|= ETH_LINK_SPEED_2_5G
;
3817 dev_info
->speed_capa
|= ETH_LINK_SPEED_5G
;
3820 /* Driver-preferred Rx/Tx parameters */
3821 dev_info
->default_rxportconf
.burst_size
= 32;
3822 dev_info
->default_txportconf
.burst_size
= 32;
3823 dev_info
->default_rxportconf
.nb_queues
= 1;
3824 dev_info
->default_txportconf
.nb_queues
= 1;
3825 dev_info
->default_rxportconf
.ring_size
= 256;
3826 dev_info
->default_txportconf
.ring_size
= 256;
3829 static const uint32_t *
3830 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev
*dev
)
3832 static const uint32_t ptypes
[] = {
3833 /* For non-vec functions,
3834 * refers to ixgbe_rxd_pkt_info_to_pkt_type();
3835 * for vec functions,
3836 * refers to _recv_raw_pkts_vec().
3840 RTE_PTYPE_L3_IPV4_EXT
,
3842 RTE_PTYPE_L3_IPV6_EXT
,
3846 RTE_PTYPE_TUNNEL_IP
,
3847 RTE_PTYPE_INNER_L3_IPV6
,
3848 RTE_PTYPE_INNER_L3_IPV6_EXT
,
3849 RTE_PTYPE_INNER_L4_TCP
,
3850 RTE_PTYPE_INNER_L4_UDP
,
3854 if (dev
->rx_pkt_burst
== ixgbe_recv_pkts
||
3855 dev
->rx_pkt_burst
== ixgbe_recv_pkts_lro_single_alloc
||
3856 dev
->rx_pkt_burst
== ixgbe_recv_pkts_lro_bulk_alloc
||
3857 dev
->rx_pkt_burst
== ixgbe_recv_pkts_bulk_alloc
)
3860 #if defined(RTE_ARCH_X86)
3861 if (dev
->rx_pkt_burst
== ixgbe_recv_pkts_vec
||
3862 dev
->rx_pkt_burst
== ixgbe_recv_scattered_pkts_vec
)
3869 ixgbevf_dev_info_get(struct rte_eth_dev
*dev
,
3870 struct rte_eth_dev_info
*dev_info
)
3872 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
3873 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3875 dev_info
->max_rx_queues
= (uint16_t)hw
->mac
.max_rx_queues
;
3876 dev_info
->max_tx_queues
= (uint16_t)hw
->mac
.max_tx_queues
;
3877 dev_info
->min_rx_bufsize
= 1024; /* cf BSIZEPACKET in SRRCTL reg */
3878 dev_info
->max_rx_pktlen
= 9728; /* includes CRC, cf MAXFRS reg */
3879 dev_info
->max_mtu
= dev_info
->max_rx_pktlen
- IXGBE_ETH_OVERHEAD
;
3880 dev_info
->max_mac_addrs
= hw
->mac
.num_rar_entries
;
3881 dev_info
->max_hash_mac_addrs
= IXGBE_VMDQ_NUM_UC_MAC
;
3882 dev_info
->max_vfs
= pci_dev
->max_vfs
;
3883 if (hw
->mac
.type
== ixgbe_mac_82598EB
)
3884 dev_info
->max_vmdq_pools
= ETH_16_POOLS
;
3886 dev_info
->max_vmdq_pools
= ETH_64_POOLS
;
3887 dev_info
->rx_queue_offload_capa
= ixgbe_get_rx_queue_offloads(dev
);
3888 dev_info
->rx_offload_capa
= (ixgbe_get_rx_port_offloads(dev
) |
3889 dev_info
->rx_queue_offload_capa
);
3890 dev_info
->tx_queue_offload_capa
= ixgbe_get_tx_queue_offloads(dev
);
3891 dev_info
->tx_offload_capa
= ixgbe_get_tx_port_offloads(dev
);
3893 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
3895 .pthresh
= IXGBE_DEFAULT_RX_PTHRESH
,
3896 .hthresh
= IXGBE_DEFAULT_RX_HTHRESH
,
3897 .wthresh
= IXGBE_DEFAULT_RX_WTHRESH
,
3899 .rx_free_thresh
= IXGBE_DEFAULT_RX_FREE_THRESH
,
3904 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
3906 .pthresh
= IXGBE_DEFAULT_TX_PTHRESH
,
3907 .hthresh
= IXGBE_DEFAULT_TX_HTHRESH
,
3908 .wthresh
= IXGBE_DEFAULT_TX_WTHRESH
,
3910 .tx_free_thresh
= IXGBE_DEFAULT_TX_FREE_THRESH
,
3911 .tx_rs_thresh
= IXGBE_DEFAULT_TX_RSBIT_THRESH
,
3915 dev_info
->rx_desc_lim
= rx_desc_lim
;
3916 dev_info
->tx_desc_lim
= tx_desc_lim
;
3920 ixgbevf_check_link(struct ixgbe_hw
*hw
, ixgbe_link_speed
*speed
,
3921 int *link_up
, int wait_to_complete
)
3923 struct ixgbe_mbx_info
*mbx
= &hw
->mbx
;
3924 struct ixgbe_mac_info
*mac
= &hw
->mac
;
3925 uint32_t links_reg
, in_msg
;
3928 /* If we were hit with a reset drop the link */
3929 if (!mbx
->ops
.check_for_rst(hw
, 0) || !mbx
->timeout
)
3930 mac
->get_link_status
= true;
3932 if (!mac
->get_link_status
)
3935 /* if link status is down no point in checking to see if pf is up */
3936 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
3937 if (!(links_reg
& IXGBE_LINKS_UP
))
3940 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
3941 * before the link status is correct
3943 if (mac
->type
== ixgbe_mac_82599_vf
&& wait_to_complete
) {
3946 for (i
= 0; i
< 5; i
++) {
3948 links_reg
= IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
3950 if (!(links_reg
& IXGBE_LINKS_UP
))
3955 switch (links_reg
& IXGBE_LINKS_SPEED_82599
) {
3956 case IXGBE_LINKS_SPEED_10G_82599
:
3957 *speed
= IXGBE_LINK_SPEED_10GB_FULL
;
3958 if (hw
->mac
.type
>= ixgbe_mac_X550
) {
3959 if (links_reg
& IXGBE_LINKS_SPEED_NON_STD
)
3960 *speed
= IXGBE_LINK_SPEED_2_5GB_FULL
;
3963 case IXGBE_LINKS_SPEED_1G_82599
:
3964 *speed
= IXGBE_LINK_SPEED_1GB_FULL
;
3966 case IXGBE_LINKS_SPEED_100_82599
:
3967 *speed
= IXGBE_LINK_SPEED_100_FULL
;
3968 if (hw
->mac
.type
== ixgbe_mac_X550
) {
3969 if (links_reg
& IXGBE_LINKS_SPEED_NON_STD
)
3970 *speed
= IXGBE_LINK_SPEED_5GB_FULL
;
3973 case IXGBE_LINKS_SPEED_10_X550EM_A
:
3974 *speed
= IXGBE_LINK_SPEED_UNKNOWN
;
3975 /* Since Reserved in older MAC's */
3976 if (hw
->mac
.type
>= ixgbe_mac_X550
)
3977 *speed
= IXGBE_LINK_SPEED_10_FULL
;
3980 *speed
= IXGBE_LINK_SPEED_UNKNOWN
;
3983 /* if the read failed it could just be a mailbox collision, best wait
3984 * until we are called again and don't report an error
3986 if (mbx
->ops
.read(hw
, &in_msg
, 1, 0))
3989 if (!(in_msg
& IXGBE_VT_MSGTYPE_CTS
)) {
3990 /* msg is not CTS and is NACK we must have lost CTS status */
3991 if (in_msg
& IXGBE_VT_MSGTYPE_NACK
)
3992 mac
->get_link_status
= false;
3996 /* the pf is talking, if we timed out in the past we reinit */
3997 if (!mbx
->timeout
) {
4002 /* if we passed all the tests above then the link is up and we no
4003 * longer need to check for link
4005 mac
->get_link_status
= false;
4008 *link_up
= !mac
->get_link_status
;
4013 ixgbe_dev_setup_link_alarm_handler(void *param
)
4015 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
4016 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4017 struct ixgbe_interrupt
*intr
=
4018 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4020 bool autoneg
= false;
4022 speed
= hw
->phy
.autoneg_advertised
;
4024 ixgbe_get_link_capabilities(hw
, &speed
, &autoneg
);
4026 ixgbe_setup_link(hw
, speed
, true);
4028 intr
->flags
&= ~IXGBE_FLAG_NEED_LINK_CONFIG
;
4031 /* return 0 means link status changed, -1 means not changed */
4033 ixgbe_dev_link_update_share(struct rte_eth_dev
*dev
,
4034 int wait_to_complete
, int vf
)
4036 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4037 struct rte_eth_link link
;
4038 ixgbe_link_speed link_speed
= IXGBE_LINK_SPEED_UNKNOWN
;
4039 struct ixgbe_interrupt
*intr
=
4040 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4045 memset(&link
, 0, sizeof(link
));
4046 link
.link_status
= ETH_LINK_DOWN
;
4047 link
.link_speed
= ETH_SPEED_NUM_NONE
;
4048 link
.link_duplex
= ETH_LINK_HALF_DUPLEX
;
4049 link
.link_autoneg
= ETH_LINK_AUTONEG
;
4051 hw
->mac
.get_link_status
= true;
4053 if (intr
->flags
& IXGBE_FLAG_NEED_LINK_CONFIG
)
4054 return rte_eth_linkstatus_set(dev
, &link
);
4056 /* check if it needs to wait to complete, if lsc interrupt is enabled */
4057 if (wait_to_complete
== 0 || dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
4061 diag
= ixgbevf_check_link(hw
, &link_speed
, &link_up
, wait
);
4063 diag
= ixgbe_check_link(hw
, &link_speed
, &link_up
, wait
);
4066 link
.link_speed
= ETH_SPEED_NUM_100M
;
4067 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
4068 return rte_eth_linkstatus_set(dev
, &link
);
4072 if (ixgbe_get_media_type(hw
) == ixgbe_media_type_fiber
) {
4073 intr
->flags
|= IXGBE_FLAG_NEED_LINK_CONFIG
;
4074 rte_eal_alarm_set(10,
4075 ixgbe_dev_setup_link_alarm_handler
, dev
);
4077 return rte_eth_linkstatus_set(dev
, &link
);
4080 link
.link_status
= ETH_LINK_UP
;
4081 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
4083 switch (link_speed
) {
4085 case IXGBE_LINK_SPEED_UNKNOWN
:
4086 if (hw
->device_id
== IXGBE_DEV_ID_X550EM_A_1G_T
||
4087 hw
->device_id
== IXGBE_DEV_ID_X550EM_A_1G_T_L
)
4088 link
.link_speed
= ETH_SPEED_NUM_10M
;
4090 link
.link_speed
= ETH_SPEED_NUM_100M
;
4091 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
4094 case IXGBE_LINK_SPEED_100_FULL
:
4095 link
.link_speed
= ETH_SPEED_NUM_100M
;
4098 case IXGBE_LINK_SPEED_1GB_FULL
:
4099 link
.link_speed
= ETH_SPEED_NUM_1G
;
4102 case IXGBE_LINK_SPEED_2_5GB_FULL
:
4103 link
.link_speed
= ETH_SPEED_NUM_2_5G
;
4106 case IXGBE_LINK_SPEED_5GB_FULL
:
4107 link
.link_speed
= ETH_SPEED_NUM_5G
;
4110 case IXGBE_LINK_SPEED_10GB_FULL
:
4111 link
.link_speed
= ETH_SPEED_NUM_10G
;
4115 return rte_eth_linkstatus_set(dev
, &link
);
4119 ixgbe_dev_link_update(struct rte_eth_dev
*dev
, int wait_to_complete
)
4121 return ixgbe_dev_link_update_share(dev
, wait_to_complete
, 0);
4125 ixgbevf_dev_link_update(struct rte_eth_dev
*dev
, int wait_to_complete
)
4127 return ixgbe_dev_link_update_share(dev
, wait_to_complete
, 1);
4131 ixgbe_dev_promiscuous_enable(struct rte_eth_dev
*dev
)
4133 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4136 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
4137 fctrl
|= (IXGBE_FCTRL_UPE
| IXGBE_FCTRL_MPE
);
4138 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
4142 ixgbe_dev_promiscuous_disable(struct rte_eth_dev
*dev
)
4144 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4147 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
4148 fctrl
&= (~IXGBE_FCTRL_UPE
);
4149 if (dev
->data
->all_multicast
== 1)
4150 fctrl
|= IXGBE_FCTRL_MPE
;
4152 fctrl
&= (~IXGBE_FCTRL_MPE
);
4153 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
4157 ixgbe_dev_allmulticast_enable(struct rte_eth_dev
*dev
)
4159 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4162 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
4163 fctrl
|= IXGBE_FCTRL_MPE
;
4164 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
4168 ixgbe_dev_allmulticast_disable(struct rte_eth_dev
*dev
)
4170 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4173 if (dev
->data
->promiscuous
== 1)
4174 return; /* must remain in all_multicast mode */
4176 fctrl
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
4177 fctrl
&= (~IXGBE_FCTRL_MPE
);
4178 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, fctrl
);
4182 * It clears the interrupt causes and enables the interrupt.
4183 * It will be called once only during nic initialized.
4186 * Pointer to struct rte_eth_dev.
4188 * Enable or Disable.
4191 * - On success, zero.
4192 * - On failure, a negative value.
4195 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev
*dev
, uint8_t on
)
4197 struct ixgbe_interrupt
*intr
=
4198 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4200 ixgbe_dev_link_status_print(dev
);
4202 intr
->mask
|= IXGBE_EICR_LSC
;
4204 intr
->mask
&= ~IXGBE_EICR_LSC
;
4210 * It clears the interrupt causes and enables the interrupt.
4211 * It will be called once only during nic initialized.
4214 * Pointer to struct rte_eth_dev.
4217 * - On success, zero.
4218 * - On failure, a negative value.
4221 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev
*dev
)
4223 struct ixgbe_interrupt
*intr
=
4224 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4226 intr
->mask
|= IXGBE_EICR_RTX_QUEUE
;
4232 * It clears the interrupt causes and enables the interrupt.
4233 * It will be called once only during nic initialized.
4236 * Pointer to struct rte_eth_dev.
4239 * - On success, zero.
4240 * - On failure, a negative value.
4243 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev
*dev
)
4245 struct ixgbe_interrupt
*intr
=
4246 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4248 intr
->mask
|= IXGBE_EICR_LINKSEC
;
4254 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
4257 * Pointer to struct rte_eth_dev.
4260 * - On success, zero.
4261 * - On failure, a negative value.
4264 ixgbe_dev_interrupt_get_status(struct rte_eth_dev
*dev
)
4267 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4268 struct ixgbe_interrupt
*intr
=
4269 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4271 /* clear all cause mask */
4272 ixgbe_disable_intr(hw
);
4274 /* read-on-clear nic registers here */
4275 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
4276 PMD_DRV_LOG(DEBUG
, "eicr %x", eicr
);
4280 /* set flag for async link update */
4281 if (eicr
& IXGBE_EICR_LSC
)
4282 intr
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
4284 if (eicr
& IXGBE_EICR_MAILBOX
)
4285 intr
->flags
|= IXGBE_FLAG_MAILBOX
;
4287 if (eicr
& IXGBE_EICR_LINKSEC
)
4288 intr
->flags
|= IXGBE_FLAG_MACSEC
;
4290 if (hw
->mac
.type
== ixgbe_mac_X550EM_x
&&
4291 hw
->phy
.type
== ixgbe_phy_x550em_ext_t
&&
4292 (eicr
& IXGBE_EICR_GPI_SDP0_X550EM_x
))
4293 intr
->flags
|= IXGBE_FLAG_PHY_INTERRUPT
;
4299 * It gets and then prints the link status.
4302 * Pointer to struct rte_eth_dev.
4305 * - On success, zero.
4306 * - On failure, a negative value.
4309 ixgbe_dev_link_status_print(struct rte_eth_dev
*dev
)
4311 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
4312 struct rte_eth_link link
;
4314 rte_eth_linkstatus_get(dev
, &link
);
4316 if (link
.link_status
) {
4317 PMD_INIT_LOG(INFO
, "Port %d: Link Up - speed %u Mbps - %s",
4318 (int)(dev
->data
->port_id
),
4319 (unsigned)link
.link_speed
,
4320 link
.link_duplex
== ETH_LINK_FULL_DUPLEX
?
4321 "full-duplex" : "half-duplex");
4323 PMD_INIT_LOG(INFO
, " Port %d: Link Down",
4324 (int)(dev
->data
->port_id
));
4326 PMD_INIT_LOG(DEBUG
, "PCI Address: " PCI_PRI_FMT
,
4327 pci_dev
->addr
.domain
,
4329 pci_dev
->addr
.devid
,
4330 pci_dev
->addr
.function
);
4334 * It executes link_update after knowing an interrupt occurred.
4337 * Pointer to struct rte_eth_dev.
4340 * - On success, zero.
4341 * - On failure, a negative value.
4344 ixgbe_dev_interrupt_action(struct rte_eth_dev
*dev
)
4346 struct ixgbe_interrupt
*intr
=
4347 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4349 struct ixgbe_hw
*hw
=
4350 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4352 PMD_DRV_LOG(DEBUG
, "intr action type %d", intr
->flags
);
4354 if (intr
->flags
& IXGBE_FLAG_MAILBOX
) {
4355 ixgbe_pf_mbx_process(dev
);
4356 intr
->flags
&= ~IXGBE_FLAG_MAILBOX
;
4359 if (intr
->flags
& IXGBE_FLAG_PHY_INTERRUPT
) {
4360 ixgbe_handle_lasi(hw
);
4361 intr
->flags
&= ~IXGBE_FLAG_PHY_INTERRUPT
;
4364 if (intr
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
) {
4365 struct rte_eth_link link
;
4367 /* get the link status before link update, for predicting later */
4368 rte_eth_linkstatus_get(dev
, &link
);
4370 ixgbe_dev_link_update(dev
, 0);
4373 if (!link
.link_status
)
4374 /* handle it 1 sec later, wait it being stable */
4375 timeout
= IXGBE_LINK_UP_CHECK_TIMEOUT
;
4376 /* likely to down */
4378 /* handle it 4 sec later, wait it being stable */
4379 timeout
= IXGBE_LINK_DOWN_CHECK_TIMEOUT
;
4381 ixgbe_dev_link_status_print(dev
);
4382 if (rte_eal_alarm_set(timeout
* 1000,
4383 ixgbe_dev_interrupt_delayed_handler
, (void *)dev
) < 0)
4384 PMD_DRV_LOG(ERR
, "Error setting alarm");
4386 /* remember original mask */
4387 intr
->mask_original
= intr
->mask
;
4388 /* only disable lsc interrupt */
4389 intr
->mask
&= ~IXGBE_EIMS_LSC
;
4393 PMD_DRV_LOG(DEBUG
, "enable intr immediately");
4394 ixgbe_enable_intr(dev
);
4400 * Interrupt handler which shall be registered for alarm callback for delayed
4401 * handling specific interrupt to wait for the stable nic state. As the
4402 * NIC interrupt state is not stable for ixgbe after link is just down,
4403 * it needs to wait 4 seconds to get the stable status.
4406 * Pointer to interrupt handle.
4408 * The address of parameter (struct rte_eth_dev *) regsitered before.
4414 ixgbe_dev_interrupt_delayed_handler(void *param
)
4416 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
4417 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
4418 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
4419 struct ixgbe_interrupt
*intr
=
4420 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
4421 struct ixgbe_hw
*hw
=
4422 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4425 ixgbe_disable_intr(hw
);
4427 eicr
= IXGBE_READ_REG(hw
, IXGBE_EICR
);
4428 if (eicr
& IXGBE_EICR_MAILBOX
)
4429 ixgbe_pf_mbx_process(dev
);
4431 if (intr
->flags
& IXGBE_FLAG_PHY_INTERRUPT
) {
4432 ixgbe_handle_lasi(hw
);
4433 intr
->flags
&= ~IXGBE_FLAG_PHY_INTERRUPT
;
4436 if (intr
->flags
& IXGBE_FLAG_NEED_LINK_UPDATE
) {
4437 ixgbe_dev_link_update(dev
, 0);
4438 intr
->flags
&= ~IXGBE_FLAG_NEED_LINK_UPDATE
;
4439 ixgbe_dev_link_status_print(dev
);
4440 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_LSC
,
4444 if (intr
->flags
& IXGBE_FLAG_MACSEC
) {
4445 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_MACSEC
,
4447 intr
->flags
&= ~IXGBE_FLAG_MACSEC
;
4450 /* restore original mask */
4451 intr
->mask
= intr
->mask_original
;
4452 intr
->mask_original
= 0;
4454 PMD_DRV_LOG(DEBUG
, "enable intr in delayed handler S[%08x]", eicr
);
4455 ixgbe_enable_intr(dev
);
4456 rte_intr_enable(intr_handle
);
4460 * Interrupt handler triggered by NIC for handling
4461 * specific interrupt.
4464 * Pointer to interrupt handle.
4466 * The address of parameter (struct rte_eth_dev *) regsitered before.
4472 ixgbe_dev_interrupt_handler(void *param
)
4474 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
4476 ixgbe_dev_interrupt_get_status(dev
);
4477 ixgbe_dev_interrupt_action(dev
);
4481 ixgbe_dev_led_on(struct rte_eth_dev
*dev
)
4483 struct ixgbe_hw
*hw
;
4485 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4486 return ixgbe_led_on(hw
, 0) == IXGBE_SUCCESS
? 0 : -ENOTSUP
;
4490 ixgbe_dev_led_off(struct rte_eth_dev
*dev
)
4492 struct ixgbe_hw
*hw
;
4494 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4495 return ixgbe_led_off(hw
, 0) == IXGBE_SUCCESS
? 0 : -ENOTSUP
;
4499 ixgbe_flow_ctrl_get(struct rte_eth_dev
*dev
, struct rte_eth_fc_conf
*fc_conf
)
4501 struct ixgbe_hw
*hw
;
4507 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4509 fc_conf
->pause_time
= hw
->fc
.pause_time
;
4510 fc_conf
->high_water
= hw
->fc
.high_water
[0];
4511 fc_conf
->low_water
= hw
->fc
.low_water
[0];
4512 fc_conf
->send_xon
= hw
->fc
.send_xon
;
4513 fc_conf
->autoneg
= !hw
->fc
.disable_fc_autoneg
;
4516 * Return rx_pause status according to actual setting of
4519 mflcn_reg
= IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
4520 if (mflcn_reg
& (IXGBE_MFLCN_RPFCE
| IXGBE_MFLCN_RFCE
))
4526 * Return tx_pause status according to actual setting of
4529 fccfg_reg
= IXGBE_READ_REG(hw
, IXGBE_FCCFG
);
4530 if (fccfg_reg
& (IXGBE_FCCFG_TFCE_802_3X
| IXGBE_FCCFG_TFCE_PRIORITY
))
4535 if (rx_pause
&& tx_pause
)
4536 fc_conf
->mode
= RTE_FC_FULL
;
4538 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
4540 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
4542 fc_conf
->mode
= RTE_FC_NONE
;
4548 ixgbe_flow_ctrl_set(struct rte_eth_dev
*dev
, struct rte_eth_fc_conf
*fc_conf
)
4550 struct ixgbe_hw
*hw
;
4552 uint32_t rx_buf_size
;
4553 uint32_t max_high_water
;
4555 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode
[] = {
4562 PMD_INIT_FUNC_TRACE();
4564 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4565 rx_buf_size
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(0));
4566 PMD_INIT_LOG(DEBUG
, "Rx packet buffer size = 0x%x", rx_buf_size
);
4569 * At least reserve one Ethernet frame for watermark
4570 * high_water/low_water in kilo bytes for ixgbe
4572 max_high_water
= (rx_buf_size
- ETHER_MAX_LEN
) >> IXGBE_RXPBSIZE_SHIFT
;
4573 if ((fc_conf
->high_water
> max_high_water
) ||
4574 (fc_conf
->high_water
< fc_conf
->low_water
)) {
4575 PMD_INIT_LOG(ERR
, "Invalid high/low water setup value in KB");
4576 PMD_INIT_LOG(ERR
, "High_water must <= 0x%x", max_high_water
);
4580 hw
->fc
.requested_mode
= rte_fcmode_2_ixgbe_fcmode
[fc_conf
->mode
];
4581 hw
->fc
.pause_time
= fc_conf
->pause_time
;
4582 hw
->fc
.high_water
[0] = fc_conf
->high_water
;
4583 hw
->fc
.low_water
[0] = fc_conf
->low_water
;
4584 hw
->fc
.send_xon
= fc_conf
->send_xon
;
4585 hw
->fc
.disable_fc_autoneg
= !fc_conf
->autoneg
;
4587 err
= ixgbe_fc_enable(hw
);
4589 /* Not negotiated is not an error case */
4590 if ((err
== IXGBE_SUCCESS
) || (err
== IXGBE_ERR_FC_NOT_NEGOTIATED
)) {
4592 /* check if we want to forward MAC frames - driver doesn't have native
4593 * capability to do that, so we'll write the registers ourselves */
4595 mflcn
= IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
4597 /* set or clear MFLCN.PMCF bit depending on configuration */
4598 if (fc_conf
->mac_ctrl_frame_fwd
!= 0)
4599 mflcn
|= IXGBE_MFLCN_PMCF
;
4601 mflcn
&= ~IXGBE_MFLCN_PMCF
;
4603 IXGBE_WRITE_REG(hw
, IXGBE_MFLCN
, mflcn
);
4604 IXGBE_WRITE_FLUSH(hw
);
4609 PMD_INIT_LOG(ERR
, "ixgbe_fc_enable = 0x%x", err
);
4614 * ixgbe_pfc_enable_generic - Enable flow control
4615 * @hw: pointer to hardware structure
4616 * @tc_num: traffic class number
4617 * Enable flow control according to the current settings.
4620 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw
*hw
, uint8_t tc_num
)
4623 uint32_t mflcn_reg
, fccfg_reg
;
4625 uint32_t fcrtl
, fcrth
;
4629 /* Validate the water mark configuration */
4630 if (!hw
->fc
.pause_time
) {
4631 ret_val
= IXGBE_ERR_INVALID_LINK_SETTINGS
;
4635 /* Low water mark of zero causes XOFF floods */
4636 if (hw
->fc
.current_mode
& ixgbe_fc_tx_pause
) {
4637 /* High/Low water can not be 0 */
4638 if ((!hw
->fc
.high_water
[tc_num
]) || (!hw
->fc
.low_water
[tc_num
])) {
4639 PMD_INIT_LOG(ERR
, "Invalid water mark configuration");
4640 ret_val
= IXGBE_ERR_INVALID_LINK_SETTINGS
;
4644 if (hw
->fc
.low_water
[tc_num
] >= hw
->fc
.high_water
[tc_num
]) {
4645 PMD_INIT_LOG(ERR
, "Invalid water mark configuration");
4646 ret_val
= IXGBE_ERR_INVALID_LINK_SETTINGS
;
4650 /* Negotiate the fc mode to use */
4651 ixgbe_fc_autoneg(hw
);
4653 /* Disable any previous flow control settings */
4654 mflcn_reg
= IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
4655 mflcn_reg
&= ~(IXGBE_MFLCN_RPFCE_SHIFT
| IXGBE_MFLCN_RFCE
|IXGBE_MFLCN_RPFCE
);
4657 fccfg_reg
= IXGBE_READ_REG(hw
, IXGBE_FCCFG
);
4658 fccfg_reg
&= ~(IXGBE_FCCFG_TFCE_802_3X
| IXGBE_FCCFG_TFCE_PRIORITY
);
4660 switch (hw
->fc
.current_mode
) {
4663 * If the count of enabled RX Priority Flow control >1,
4664 * and the TX pause can not be disabled
4667 for (i
= 0; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
4668 reg
= IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
4669 if (reg
& IXGBE_FCRTH_FCEN
)
4673 fccfg_reg
|= IXGBE_FCCFG_TFCE_PRIORITY
;
4675 case ixgbe_fc_rx_pause
:
4677 * Rx Flow control is enabled and Tx Flow control is
4678 * disabled by software override. Since there really
4679 * isn't a way to advertise that we are capable of RX
4680 * Pause ONLY, we will advertise that we support both
4681 * symmetric and asymmetric Rx PAUSE. Later, we will
4682 * disable the adapter's ability to send PAUSE frames.
4684 mflcn_reg
|= IXGBE_MFLCN_RPFCE
;
4686 * If the count of enabled RX Priority Flow control >1,
4687 * and the TX pause can not be disabled
4690 for (i
= 0; i
< IXGBE_DCB_MAX_TRAFFIC_CLASS
; i
++) {
4691 reg
= IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
4692 if (reg
& IXGBE_FCRTH_FCEN
)
4696 fccfg_reg
|= IXGBE_FCCFG_TFCE_PRIORITY
;
4698 case ixgbe_fc_tx_pause
:
4700 * Tx Flow control is enabled, and Rx Flow control is
4701 * disabled by software override.
4703 fccfg_reg
|= IXGBE_FCCFG_TFCE_PRIORITY
;
4706 /* Flow control (both Rx and Tx) is enabled by SW override. */
4707 mflcn_reg
|= IXGBE_MFLCN_RPFCE
;
4708 fccfg_reg
|= IXGBE_FCCFG_TFCE_PRIORITY
;
4711 PMD_DRV_LOG(DEBUG
, "Flow control param set incorrectly");
4712 ret_val
= IXGBE_ERR_CONFIG
;
4716 /* Set 802.3x based flow control settings. */
4717 mflcn_reg
|= IXGBE_MFLCN_DPF
;
4718 IXGBE_WRITE_REG(hw
, IXGBE_MFLCN
, mflcn_reg
);
4719 IXGBE_WRITE_REG(hw
, IXGBE_FCCFG
, fccfg_reg
);
4721 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
4722 if ((hw
->fc
.current_mode
& ixgbe_fc_tx_pause
) &&
4723 hw
->fc
.high_water
[tc_num
]) {
4724 fcrtl
= (hw
->fc
.low_water
[tc_num
] << 10) | IXGBE_FCRTL_XONE
;
4725 IXGBE_WRITE_REG(hw
, IXGBE_FCRTL_82599(tc_num
), fcrtl
);
4726 fcrth
= (hw
->fc
.high_water
[tc_num
] << 10) | IXGBE_FCRTH_FCEN
;
4728 IXGBE_WRITE_REG(hw
, IXGBE_FCRTL_82599(tc_num
), 0);
4730 * In order to prevent Tx hangs when the internal Tx
4731 * switch is enabled we must set the high water mark
4732 * to the maximum FCRTH value. This allows the Tx
4733 * switch to function even under heavy Rx workloads.
4735 fcrth
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(tc_num
)) - 32;
4737 IXGBE_WRITE_REG(hw
, IXGBE_FCRTH_82599(tc_num
), fcrth
);
4739 /* Configure pause time (2 TCs per register) */
4740 reg
= hw
->fc
.pause_time
* 0x00010001;
4741 for (i
= 0; i
< (IXGBE_DCB_MAX_TRAFFIC_CLASS
/ 2); i
++)
4742 IXGBE_WRITE_REG(hw
, IXGBE_FCTTV(i
), reg
);
4744 /* Configure flow control refresh threshold value */
4745 IXGBE_WRITE_REG(hw
, IXGBE_FCRTV
, hw
->fc
.pause_time
/ 2);
4752 ixgbe_dcb_pfc_enable(struct rte_eth_dev
*dev
, uint8_t tc_num
)
4754 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4755 int32_t ret_val
= IXGBE_NOT_IMPLEMENTED
;
4757 if (hw
->mac
.type
!= ixgbe_mac_82598EB
) {
4758 ret_val
= ixgbe_dcb_pfc_enable_generic(hw
, tc_num
);
4764 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev
*dev
, struct rte_eth_pfc_conf
*pfc_conf
)
4767 uint32_t rx_buf_size
;
4768 uint32_t max_high_water
;
4770 uint8_t map
[IXGBE_DCB_MAX_USER_PRIORITY
] = { 0 };
4771 struct ixgbe_hw
*hw
=
4772 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4773 struct ixgbe_dcb_config
*dcb_config
=
4774 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev
->data
->dev_private
);
4776 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode
[] = {
4783 PMD_INIT_FUNC_TRACE();
4785 ixgbe_dcb_unpack_map_cee(dcb_config
, IXGBE_DCB_RX_CONFIG
, map
);
4786 tc_num
= map
[pfc_conf
->priority
];
4787 rx_buf_size
= IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(tc_num
));
4788 PMD_INIT_LOG(DEBUG
, "Rx packet buffer size = 0x%x", rx_buf_size
);
4790 * At least reserve one Ethernet frame for watermark
4791 * high_water/low_water in kilo bytes for ixgbe
4793 max_high_water
= (rx_buf_size
- ETHER_MAX_LEN
) >> IXGBE_RXPBSIZE_SHIFT
;
4794 if ((pfc_conf
->fc
.high_water
> max_high_water
) ||
4795 (pfc_conf
->fc
.high_water
<= pfc_conf
->fc
.low_water
)) {
4796 PMD_INIT_LOG(ERR
, "Invalid high/low water setup value in KB");
4797 PMD_INIT_LOG(ERR
, "High_water must <= 0x%x", max_high_water
);
4801 hw
->fc
.requested_mode
= rte_fcmode_2_ixgbe_fcmode
[pfc_conf
->fc
.mode
];
4802 hw
->fc
.pause_time
= pfc_conf
->fc
.pause_time
;
4803 hw
->fc
.send_xon
= pfc_conf
->fc
.send_xon
;
4804 hw
->fc
.low_water
[tc_num
] = pfc_conf
->fc
.low_water
;
4805 hw
->fc
.high_water
[tc_num
] = pfc_conf
->fc
.high_water
;
4807 err
= ixgbe_dcb_pfc_enable(dev
, tc_num
);
4809 /* Not negotiated is not an error case */
4810 if ((err
== IXGBE_SUCCESS
) || (err
== IXGBE_ERR_FC_NOT_NEGOTIATED
))
4813 PMD_INIT_LOG(ERR
, "ixgbe_dcb_pfc_enable = 0x%x", err
);
4818 ixgbe_dev_rss_reta_update(struct rte_eth_dev
*dev
,
4819 struct rte_eth_rss_reta_entry64
*reta_conf
,
4822 uint16_t i
, sp_reta_size
;
4825 uint16_t idx
, shift
;
4826 struct ixgbe_adapter
*adapter
=
4827 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
4828 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4831 PMD_INIT_FUNC_TRACE();
4833 if (!ixgbe_rss_update_sp(hw
->mac
.type
)) {
4834 PMD_DRV_LOG(ERR
, "RSS reta update is not supported on this "
4839 sp_reta_size
= ixgbe_reta_size_get(hw
->mac
.type
);
4840 if (reta_size
!= sp_reta_size
) {
4841 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
4842 "(%d) doesn't match the number hardware can supported "
4843 "(%d)", reta_size
, sp_reta_size
);
4847 for (i
= 0; i
< reta_size
; i
+= IXGBE_4_BIT_WIDTH
) {
4848 idx
= i
/ RTE_RETA_GROUP_SIZE
;
4849 shift
= i
% RTE_RETA_GROUP_SIZE
;
4850 mask
= (uint8_t)((reta_conf
[idx
].mask
>> shift
) &
4854 reta_reg
= ixgbe_reta_reg_get(hw
->mac
.type
, i
);
4855 if (mask
== IXGBE_4_BIT_MASK
)
4858 r
= IXGBE_READ_REG(hw
, reta_reg
);
4859 for (j
= 0, reta
= 0; j
< IXGBE_4_BIT_WIDTH
; j
++) {
4860 if (mask
& (0x1 << j
))
4861 reta
|= reta_conf
[idx
].reta
[shift
+ j
] <<
4864 reta
|= r
& (IXGBE_8_BIT_MASK
<<
4867 IXGBE_WRITE_REG(hw
, reta_reg
, reta
);
4869 adapter
->rss_reta_updated
= 1;
4875 ixgbe_dev_rss_reta_query(struct rte_eth_dev
*dev
,
4876 struct rte_eth_rss_reta_entry64
*reta_conf
,
4879 uint16_t i
, sp_reta_size
;
4882 uint16_t idx
, shift
;
4883 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4886 PMD_INIT_FUNC_TRACE();
4887 sp_reta_size
= ixgbe_reta_size_get(hw
->mac
.type
);
4888 if (reta_size
!= sp_reta_size
) {
4889 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
4890 "(%d) doesn't match the number hardware can supported "
4891 "(%d)", reta_size
, sp_reta_size
);
4895 for (i
= 0; i
< reta_size
; i
+= IXGBE_4_BIT_WIDTH
) {
4896 idx
= i
/ RTE_RETA_GROUP_SIZE
;
4897 shift
= i
% RTE_RETA_GROUP_SIZE
;
4898 mask
= (uint8_t)((reta_conf
[idx
].mask
>> shift
) &
4903 reta_reg
= ixgbe_reta_reg_get(hw
->mac
.type
, i
);
4904 reta
= IXGBE_READ_REG(hw
, reta_reg
);
4905 for (j
= 0; j
< IXGBE_4_BIT_WIDTH
; j
++) {
4906 if (mask
& (0x1 << j
))
4907 reta_conf
[idx
].reta
[shift
+ j
] =
4908 ((reta
>> (CHAR_BIT
* j
)) &
4917 ixgbe_add_rar(struct rte_eth_dev
*dev
, struct ether_addr
*mac_addr
,
4918 uint32_t index
, uint32_t pool
)
4920 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4921 uint32_t enable_addr
= 1;
4923 return ixgbe_set_rar(hw
, index
, mac_addr
->addr_bytes
,
4928 ixgbe_remove_rar(struct rte_eth_dev
*dev
, uint32_t index
)
4930 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4932 ixgbe_clear_rar(hw
, index
);
4936 ixgbe_set_default_mac_addr(struct rte_eth_dev
*dev
, struct ether_addr
*addr
)
4938 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
4940 ixgbe_remove_rar(dev
, 0);
4941 ixgbe_add_rar(dev
, addr
, 0, pci_dev
->max_vfs
);
4947 is_device_supported(struct rte_eth_dev
*dev
, struct rte_pci_driver
*drv
)
4949 if (strcmp(dev
->device
->driver
->name
, drv
->driver
.name
))
4956 is_ixgbe_supported(struct rte_eth_dev
*dev
)
4958 return is_device_supported(dev
, &rte_ixgbe_pmd
);
4962 ixgbe_dev_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
)
4966 struct ixgbe_hw
*hw
;
4967 struct rte_eth_dev_info dev_info
;
4968 uint32_t frame_size
= mtu
+ IXGBE_ETH_OVERHEAD
;
4969 struct rte_eth_dev_data
*dev_data
= dev
->data
;
4971 ixgbe_dev_info_get(dev
, &dev_info
);
4973 /* check that mtu is within the allowed range */
4974 if ((mtu
< ETHER_MIN_MTU
) || (frame_size
> dev_info
.max_rx_pktlen
))
4977 /* If device is started, refuse mtu that requires the support of
4978 * scattered packets when this feature has not been enabled before.
4980 if (dev_data
->dev_started
&& !dev_data
->scattered_rx
&&
4981 (frame_size
+ 2 * IXGBE_VLAN_TAG_SIZE
>
4982 dev
->data
->min_rx_buf_size
- RTE_PKTMBUF_HEADROOM
)) {
4983 PMD_INIT_LOG(ERR
, "Stop port first.");
4987 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4988 hlreg0
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
4990 /* switch to jumbo mode if needed */
4991 if (frame_size
> ETHER_MAX_LEN
) {
4992 dev
->data
->dev_conf
.rxmode
.offloads
|=
4993 DEV_RX_OFFLOAD_JUMBO_FRAME
;
4994 hlreg0
|= IXGBE_HLREG0_JUMBOEN
;
4996 dev
->data
->dev_conf
.rxmode
.offloads
&=
4997 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
4998 hlreg0
&= ~IXGBE_HLREG0_JUMBOEN
;
5000 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, hlreg0
);
5002 /* update max frame size */
5003 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= frame_size
;
5005 maxfrs
= IXGBE_READ_REG(hw
, IXGBE_MAXFRS
);
5006 maxfrs
&= 0x0000FFFF;
5007 maxfrs
|= (dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
<< 16);
5008 IXGBE_WRITE_REG(hw
, IXGBE_MAXFRS
, maxfrs
);
5014 * Virtual Function operations
5017 ixgbevf_intr_disable(struct rte_eth_dev
*dev
)
5019 struct ixgbe_interrupt
*intr
=
5020 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
5021 struct ixgbe_hw
*hw
=
5022 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5024 PMD_INIT_FUNC_TRACE();
5026 /* Clear interrupt mask to stop from interrupts being generated */
5027 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, IXGBE_VF_IRQ_CLEAR_MASK
);
5029 IXGBE_WRITE_FLUSH(hw
);
5031 /* Clear mask value. */
5036 ixgbevf_intr_enable(struct rte_eth_dev
*dev
)
5038 struct ixgbe_interrupt
*intr
=
5039 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
5040 struct ixgbe_hw
*hw
=
5041 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5043 PMD_INIT_FUNC_TRACE();
5045 /* VF enable interrupt autoclean */
5046 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, IXGBE_VF_IRQ_ENABLE_MASK
);
5047 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, IXGBE_VF_IRQ_ENABLE_MASK
);
5048 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, IXGBE_VF_IRQ_ENABLE_MASK
);
5050 IXGBE_WRITE_FLUSH(hw
);
5052 /* Save IXGBE_VTEIMS value to mask. */
5053 intr
->mask
= IXGBE_VF_IRQ_ENABLE_MASK
;
5057 ixgbevf_dev_configure(struct rte_eth_dev
*dev
)
5059 struct rte_eth_conf
*conf
= &dev
->data
->dev_conf
;
5060 struct ixgbe_adapter
*adapter
=
5061 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
5063 PMD_INIT_LOG(DEBUG
, "Configured Virtual Function port id: %d",
5064 dev
->data
->port_id
);
5067 * VF has no ability to enable/disable HW CRC
5068 * Keep the persistent behavior the same as Host PF
5070 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
5071 if (conf
->rxmode
.offloads
& DEV_RX_OFFLOAD_KEEP_CRC
) {
5072 PMD_INIT_LOG(NOTICE
, "VF can't disable HW CRC Strip");
5073 conf
->rxmode
.offloads
&= ~DEV_RX_OFFLOAD_KEEP_CRC
;
5076 if (!(conf
->rxmode
.offloads
& DEV_RX_OFFLOAD_KEEP_CRC
)) {
5077 PMD_INIT_LOG(NOTICE
, "VF can't enable HW CRC Strip");
5078 conf
->rxmode
.offloads
|= DEV_RX_OFFLOAD_KEEP_CRC
;
5083 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
5084 * allocation or vector Rx preconditions we will reset it.
5086 adapter
->rx_bulk_alloc_allowed
= true;
5087 adapter
->rx_vec_allowed
= true;
5093 ixgbevf_dev_start(struct rte_eth_dev
*dev
)
5095 struct ixgbe_hw
*hw
=
5096 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5097 uint32_t intr_vector
= 0;
5098 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5099 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5103 PMD_INIT_FUNC_TRACE();
5105 /* Stop the link setup handler before resetting the HW. */
5106 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler
, dev
);
5108 err
= hw
->mac
.ops
.reset_hw(hw
);
5110 PMD_INIT_LOG(ERR
, "Unable to reset vf hardware (%d)", err
);
5113 hw
->mac
.get_link_status
= true;
5115 /* negotiate mailbox API version to use with the PF. */
5116 ixgbevf_negotiate_api(hw
);
5118 ixgbevf_dev_tx_init(dev
);
5120 /* This can fail when allocating mbufs for descriptor rings */
5121 err
= ixgbevf_dev_rx_init(dev
);
5123 PMD_INIT_LOG(ERR
, "Unable to initialize RX hardware (%d)", err
);
5124 ixgbe_dev_clear_queues(dev
);
5129 ixgbevf_set_vfta_all(dev
, 1);
5132 mask
= ETH_VLAN_STRIP_MASK
| ETH_VLAN_FILTER_MASK
|
5133 ETH_VLAN_EXTEND_MASK
;
5134 err
= ixgbevf_vlan_offload_config(dev
, mask
);
5136 PMD_INIT_LOG(ERR
, "Unable to set VLAN offload (%d)", err
);
5137 ixgbe_dev_clear_queues(dev
);
5141 ixgbevf_dev_rxtx_start(dev
);
5143 /* check and configure queue intr-vector mapping */
5144 if (rte_intr_cap_multiple(intr_handle
) &&
5145 dev
->data
->dev_conf
.intr_conf
.rxq
) {
5146 /* According to datasheet, only vector 0/1/2 can be used,
5147 * now only one vector is used for Rx queue
5150 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
5154 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
5155 intr_handle
->intr_vec
=
5156 rte_zmalloc("intr_vec",
5157 dev
->data
->nb_rx_queues
* sizeof(int), 0);
5158 if (intr_handle
->intr_vec
== NULL
) {
5159 PMD_INIT_LOG(ERR
, "Failed to allocate %d rx_queues"
5160 " intr_vec", dev
->data
->nb_rx_queues
);
5164 ixgbevf_configure_msix(dev
);
5166 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
5167 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
5168 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
5169 * is not cleared, it will fail when following rte_intr_enable( ) tries
5170 * to map Rx queue interrupt to other VFIO vectors.
5171 * So clear uio/vfio intr/evevnfd first to avoid failure.
5173 rte_intr_disable(intr_handle
);
5175 rte_intr_enable(intr_handle
);
5177 /* Re-enable interrupt for VF */
5178 ixgbevf_intr_enable(dev
);
5181 * Update link status right before return, because it may
5182 * start link configuration process in a separate thread.
5184 ixgbevf_dev_link_update(dev
, 0);
5190 ixgbevf_dev_stop(struct rte_eth_dev
*dev
)
5192 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5193 struct ixgbe_adapter
*adapter
=
5194 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
5195 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5196 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5198 PMD_INIT_FUNC_TRACE();
5200 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler
, dev
);
5202 ixgbevf_intr_disable(dev
);
5204 hw
->adapter_stopped
= 1;
5205 ixgbe_stop_adapter(hw
);
5208 * Clear what we set, but we still keep shadow_vfta to
5209 * restore after device starts
5211 ixgbevf_set_vfta_all(dev
, 0);
5213 /* Clear stored conf */
5214 dev
->data
->scattered_rx
= 0;
5216 ixgbe_dev_clear_queues(dev
);
5218 /* Clean datapath event and queue/vec mapping */
5219 rte_intr_efd_disable(intr_handle
);
5220 if (intr_handle
->intr_vec
!= NULL
) {
5221 rte_free(intr_handle
->intr_vec
);
5222 intr_handle
->intr_vec
= NULL
;
5225 adapter
->rss_reta_updated
= 0;
5229 ixgbevf_dev_close(struct rte_eth_dev
*dev
)
5231 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5233 PMD_INIT_FUNC_TRACE();
5237 ixgbevf_dev_stop(dev
);
5239 ixgbe_dev_free_queues(dev
);
5242 * Remove the VF MAC address ro ensure
5243 * that the VF traffic goes to the PF
5244 * after stop, close and detach of the VF
5246 ixgbevf_remove_mac_addr(dev
, 0);
5253 ixgbevf_dev_reset(struct rte_eth_dev
*dev
)
5257 ret
= eth_ixgbevf_dev_uninit(dev
);
5261 ret
= eth_ixgbevf_dev_init(dev
);
5266 static void ixgbevf_set_vfta_all(struct rte_eth_dev
*dev
, bool on
)
5268 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5269 struct ixgbe_vfta
*shadow_vfta
=
5270 IXGBE_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
5271 int i
= 0, j
= 0, vfta
= 0, mask
= 1;
5273 for (i
= 0; i
< IXGBE_VFTA_SIZE
; i
++) {
5274 vfta
= shadow_vfta
->vfta
[i
];
5277 for (j
= 0; j
< 32; j
++) {
5279 ixgbe_set_vfta(hw
, (i
<<5)+j
, 0,
5289 ixgbevf_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
5291 struct ixgbe_hw
*hw
=
5292 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5293 struct ixgbe_vfta
*shadow_vfta
=
5294 IXGBE_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
5295 uint32_t vid_idx
= 0;
5296 uint32_t vid_bit
= 0;
5299 PMD_INIT_FUNC_TRACE();
5301 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
5302 ret
= ixgbe_set_vfta(hw
, vlan_id
, 0, !!on
, false);
5304 PMD_INIT_LOG(ERR
, "Unable to set VF vlan");
5307 vid_idx
= (uint32_t) ((vlan_id
>> 5) & 0x7F);
5308 vid_bit
= (uint32_t) (1 << (vlan_id
& 0x1F));
5310 /* Save what we set and retore it after device reset */
5312 shadow_vfta
->vfta
[vid_idx
] |= vid_bit
;
5314 shadow_vfta
->vfta
[vid_idx
] &= ~vid_bit
;
5320 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev
*dev
, uint16_t queue
, int on
)
5322 struct ixgbe_hw
*hw
=
5323 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5326 PMD_INIT_FUNC_TRACE();
5328 if (queue
>= hw
->mac
.max_rx_queues
)
5331 ctrl
= IXGBE_READ_REG(hw
, IXGBE_RXDCTL(queue
));
5333 ctrl
|= IXGBE_RXDCTL_VME
;
5335 ctrl
&= ~IXGBE_RXDCTL_VME
;
5336 IXGBE_WRITE_REG(hw
, IXGBE_RXDCTL(queue
), ctrl
);
5338 ixgbe_vlan_hw_strip_bitmap_set(dev
, queue
, on
);
5342 ixgbevf_vlan_offload_config(struct rte_eth_dev
*dev
, int mask
)
5344 struct ixgbe_rx_queue
*rxq
;
5348 /* VF function only support hw strip feature, others are not support */
5349 if (mask
& ETH_VLAN_STRIP_MASK
) {
5350 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
5351 rxq
= dev
->data
->rx_queues
[i
];
5352 on
= !!(rxq
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
);
5353 ixgbevf_vlan_strip_queue_set(dev
, i
, on
);
5361 ixgbevf_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
5363 ixgbe_config_vlan_strip_on_all_queues(dev
, mask
);
5365 ixgbevf_vlan_offload_config(dev
, mask
);
5371 ixgbe_vt_check(struct ixgbe_hw
*hw
)
5375 /* if Virtualization Technology is enabled */
5376 reg_val
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
5377 if (!(reg_val
& IXGBE_VT_CTL_VT_ENABLE
)) {
5378 PMD_INIT_LOG(ERR
, "VT must be enabled for this setting");
5386 ixgbe_uta_vector(struct ixgbe_hw
*hw
, struct ether_addr
*uc_addr
)
5388 uint32_t vector
= 0;
5390 switch (hw
->mac
.mc_filter_type
) {
5391 case 0: /* use bits [47:36] of the address */
5392 vector
= ((uc_addr
->addr_bytes
[4] >> 4) |
5393 (((uint16_t)uc_addr
->addr_bytes
[5]) << 4));
5395 case 1: /* use bits [46:35] of the address */
5396 vector
= ((uc_addr
->addr_bytes
[4] >> 3) |
5397 (((uint16_t)uc_addr
->addr_bytes
[5]) << 5));
5399 case 2: /* use bits [45:34] of the address */
5400 vector
= ((uc_addr
->addr_bytes
[4] >> 2) |
5401 (((uint16_t)uc_addr
->addr_bytes
[5]) << 6));
5403 case 3: /* use bits [43:32] of the address */
5404 vector
= ((uc_addr
->addr_bytes
[4]) |
5405 (((uint16_t)uc_addr
->addr_bytes
[5]) << 8));
5407 default: /* Invalid mc_filter_type */
5411 /* vector can only be 12-bits or boundary will be exceeded */
5417 ixgbe_uc_hash_table_set(struct rte_eth_dev
*dev
, struct ether_addr
*mac_addr
,
5425 const uint32_t ixgbe_uta_idx_mask
= 0x7F;
5426 const uint32_t ixgbe_uta_bit_shift
= 5;
5427 const uint32_t ixgbe_uta_bit_mask
= (0x1 << ixgbe_uta_bit_shift
) - 1;
5428 const uint32_t bit1
= 0x1;
5430 struct ixgbe_hw
*hw
=
5431 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5432 struct ixgbe_uta_info
*uta_info
=
5433 IXGBE_DEV_PRIVATE_TO_UTA(dev
->data
->dev_private
);
5435 /* The UTA table only exists on 82599 hardware and newer */
5436 if (hw
->mac
.type
< ixgbe_mac_82599EB
)
5439 vector
= ixgbe_uta_vector(hw
, mac_addr
);
5440 uta_idx
= (vector
>> ixgbe_uta_bit_shift
) & ixgbe_uta_idx_mask
;
5441 uta_shift
= vector
& ixgbe_uta_bit_mask
;
5443 rc
= ((uta_info
->uta_shadow
[uta_idx
] >> uta_shift
& bit1
) != 0);
5447 reg_val
= IXGBE_READ_REG(hw
, IXGBE_UTA(uta_idx
));
5449 uta_info
->uta_in_use
++;
5450 reg_val
|= (bit1
<< uta_shift
);
5451 uta_info
->uta_shadow
[uta_idx
] |= (bit1
<< uta_shift
);
5453 uta_info
->uta_in_use
--;
5454 reg_val
&= ~(bit1
<< uta_shift
);
5455 uta_info
->uta_shadow
[uta_idx
] &= ~(bit1
<< uta_shift
);
5458 IXGBE_WRITE_REG(hw
, IXGBE_UTA(uta_idx
), reg_val
);
5460 if (uta_info
->uta_in_use
> 0)
5461 IXGBE_WRITE_REG(hw
, IXGBE_MCSTCTRL
,
5462 IXGBE_MCSTCTRL_MFE
| hw
->mac
.mc_filter_type
);
5464 IXGBE_WRITE_REG(hw
, IXGBE_MCSTCTRL
, hw
->mac
.mc_filter_type
);
5470 ixgbe_uc_all_hash_table_set(struct rte_eth_dev
*dev
, uint8_t on
)
5473 struct ixgbe_hw
*hw
=
5474 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5475 struct ixgbe_uta_info
*uta_info
=
5476 IXGBE_DEV_PRIVATE_TO_UTA(dev
->data
->dev_private
);
5478 /* The UTA table only exists on 82599 hardware and newer */
5479 if (hw
->mac
.type
< ixgbe_mac_82599EB
)
5483 for (i
= 0; i
< ETH_VMDQ_NUM_UC_HASH_ARRAY
; i
++) {
5484 uta_info
->uta_shadow
[i
] = ~0;
5485 IXGBE_WRITE_REG(hw
, IXGBE_UTA(i
), ~0);
5488 for (i
= 0; i
< ETH_VMDQ_NUM_UC_HASH_ARRAY
; i
++) {
5489 uta_info
->uta_shadow
[i
] = 0;
5490 IXGBE_WRITE_REG(hw
, IXGBE_UTA(i
), 0);
5498 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask
, uint32_t orig_val
)
5500 uint32_t new_val
= orig_val
;
5502 if (rx_mask
& ETH_VMDQ_ACCEPT_UNTAG
)
5503 new_val
|= IXGBE_VMOLR_AUPE
;
5504 if (rx_mask
& ETH_VMDQ_ACCEPT_HASH_MC
)
5505 new_val
|= IXGBE_VMOLR_ROMPE
;
5506 if (rx_mask
& ETH_VMDQ_ACCEPT_HASH_UC
)
5507 new_val
|= IXGBE_VMOLR_ROPE
;
5508 if (rx_mask
& ETH_VMDQ_ACCEPT_BROADCAST
)
5509 new_val
|= IXGBE_VMOLR_BAM
;
5510 if (rx_mask
& ETH_VMDQ_ACCEPT_MULTICAST
)
5511 new_val
|= IXGBE_VMOLR_MPE
;
5516 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
5517 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
5518 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
5519 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
5520 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
5521 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
5522 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
5525 ixgbe_mirror_rule_set(struct rte_eth_dev
*dev
,
5526 struct rte_eth_mirror_conf
*mirror_conf
,
5527 uint8_t rule_id
, uint8_t on
)
5529 uint32_t mr_ctl
, vlvf
;
5530 uint32_t mp_lsb
= 0;
5531 uint32_t mv_msb
= 0;
5532 uint32_t mv_lsb
= 0;
5533 uint32_t mp_msb
= 0;
5536 uint64_t vlan_mask
= 0;
5538 const uint8_t pool_mask_offset
= 32;
5539 const uint8_t vlan_mask_offset
= 32;
5540 const uint8_t dst_pool_offset
= 8;
5541 const uint8_t rule_mr_offset
= 4;
5542 const uint8_t mirror_rule_mask
= 0x0F;
5544 struct ixgbe_mirror_info
*mr_info
=
5545 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev
->data
->dev_private
));
5546 struct ixgbe_hw
*hw
=
5547 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5548 uint8_t mirror_type
= 0;
5550 if (ixgbe_vt_check(hw
) < 0)
5553 if (rule_id
>= IXGBE_MAX_MIRROR_RULES
)
5556 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf
->rule_type
)) {
5557 PMD_DRV_LOG(ERR
, "unsupported mirror type 0x%x.",
5558 mirror_conf
->rule_type
);
5562 if (mirror_conf
->rule_type
& ETH_MIRROR_VLAN
) {
5563 mirror_type
|= IXGBE_MRCTL_VLME
;
5564 /* Check if vlan id is valid and find conresponding VLAN ID
5567 for (i
= 0; i
< IXGBE_VLVF_ENTRIES
; i
++) {
5568 if (mirror_conf
->vlan
.vlan_mask
& (1ULL << i
)) {
5569 /* search vlan id related pool vlan filter
5572 reg_index
= ixgbe_find_vlvf_slot(
5574 mirror_conf
->vlan
.vlan_id
[i
],
5578 vlvf
= IXGBE_READ_REG(hw
,
5579 IXGBE_VLVF(reg_index
));
5580 if ((vlvf
& IXGBE_VLVF_VIEN
) &&
5581 ((vlvf
& IXGBE_VLVF_VLANID_MASK
) ==
5582 mirror_conf
->vlan
.vlan_id
[i
]))
5583 vlan_mask
|= (1ULL << reg_index
);
5590 mv_lsb
= vlan_mask
& 0xFFFFFFFF;
5591 mv_msb
= vlan_mask
>> vlan_mask_offset
;
5593 mr_info
->mr_conf
[rule_id
].vlan
.vlan_mask
=
5594 mirror_conf
->vlan
.vlan_mask
;
5595 for (i
= 0; i
< ETH_VMDQ_MAX_VLAN_FILTERS
; i
++) {
5596 if (mirror_conf
->vlan
.vlan_mask
& (1ULL << i
))
5597 mr_info
->mr_conf
[rule_id
].vlan
.vlan_id
[i
] =
5598 mirror_conf
->vlan
.vlan_id
[i
];
5603 mr_info
->mr_conf
[rule_id
].vlan
.vlan_mask
= 0;
5604 for (i
= 0; i
< ETH_VMDQ_MAX_VLAN_FILTERS
; i
++)
5605 mr_info
->mr_conf
[rule_id
].vlan
.vlan_id
[i
] = 0;
5610 * if enable pool mirror, write related pool mask register,if disable
5611 * pool mirror, clear PFMRVM register
5613 if (mirror_conf
->rule_type
& ETH_MIRROR_VIRTUAL_POOL_UP
) {
5614 mirror_type
|= IXGBE_MRCTL_VPME
;
5616 mp_lsb
= mirror_conf
->pool_mask
& 0xFFFFFFFF;
5617 mp_msb
= mirror_conf
->pool_mask
>> pool_mask_offset
;
5618 mr_info
->mr_conf
[rule_id
].pool_mask
=
5619 mirror_conf
->pool_mask
;
5624 mr_info
->mr_conf
[rule_id
].pool_mask
= 0;
5627 if (mirror_conf
->rule_type
& ETH_MIRROR_UPLINK_PORT
)
5628 mirror_type
|= IXGBE_MRCTL_UPME
;
5629 if (mirror_conf
->rule_type
& ETH_MIRROR_DOWNLINK_PORT
)
5630 mirror_type
|= IXGBE_MRCTL_DPME
;
5632 /* read mirror control register and recalculate it */
5633 mr_ctl
= IXGBE_READ_REG(hw
, IXGBE_MRCTL(rule_id
));
5636 mr_ctl
|= mirror_type
;
5637 mr_ctl
&= mirror_rule_mask
;
5638 mr_ctl
|= mirror_conf
->dst_pool
<< dst_pool_offset
;
5640 mr_ctl
&= ~(mirror_conf
->rule_type
& mirror_rule_mask
);
5643 mr_info
->mr_conf
[rule_id
].rule_type
= mirror_conf
->rule_type
;
5644 mr_info
->mr_conf
[rule_id
].dst_pool
= mirror_conf
->dst_pool
;
5646 /* write mirrror control register */
5647 IXGBE_WRITE_REG(hw
, IXGBE_MRCTL(rule_id
), mr_ctl
);
5649 /* write pool mirrror control register */
5650 if (mirror_conf
->rule_type
& ETH_MIRROR_VIRTUAL_POOL_UP
) {
5651 IXGBE_WRITE_REG(hw
, IXGBE_VMRVM(rule_id
), mp_lsb
);
5652 IXGBE_WRITE_REG(hw
, IXGBE_VMRVM(rule_id
+ rule_mr_offset
),
5655 /* write VLAN mirrror control register */
5656 if (mirror_conf
->rule_type
& ETH_MIRROR_VLAN
) {
5657 IXGBE_WRITE_REG(hw
, IXGBE_VMRVLAN(rule_id
), mv_lsb
);
5658 IXGBE_WRITE_REG(hw
, IXGBE_VMRVLAN(rule_id
+ rule_mr_offset
),
5666 ixgbe_mirror_rule_reset(struct rte_eth_dev
*dev
, uint8_t rule_id
)
5669 uint32_t lsb_val
= 0;
5670 uint32_t msb_val
= 0;
5671 const uint8_t rule_mr_offset
= 4;
5673 struct ixgbe_hw
*hw
=
5674 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5675 struct ixgbe_mirror_info
*mr_info
=
5676 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev
->data
->dev_private
));
5678 if (ixgbe_vt_check(hw
) < 0)
5681 if (rule_id
>= IXGBE_MAX_MIRROR_RULES
)
5684 memset(&mr_info
->mr_conf
[rule_id
], 0,
5685 sizeof(struct rte_eth_mirror_conf
));
5687 /* clear PFVMCTL register */
5688 IXGBE_WRITE_REG(hw
, IXGBE_MRCTL(rule_id
), mr_ctl
);
5690 /* clear pool mask register */
5691 IXGBE_WRITE_REG(hw
, IXGBE_VMRVM(rule_id
), lsb_val
);
5692 IXGBE_WRITE_REG(hw
, IXGBE_VMRVM(rule_id
+ rule_mr_offset
), msb_val
);
5694 /* clear vlan mask register */
5695 IXGBE_WRITE_REG(hw
, IXGBE_VMRVLAN(rule_id
), lsb_val
);
5696 IXGBE_WRITE_REG(hw
, IXGBE_VMRVLAN(rule_id
+ rule_mr_offset
), msb_val
);
5702 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
5704 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5705 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5706 struct ixgbe_interrupt
*intr
=
5707 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
5708 struct ixgbe_hw
*hw
=
5709 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5710 uint32_t vec
= IXGBE_MISC_VEC_ID
;
5712 if (rte_intr_allow_others(intr_handle
))
5713 vec
= IXGBE_RX_VEC_START
;
5714 intr
->mask
|= (1 << vec
);
5715 RTE_SET_USED(queue_id
);
5716 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, intr
->mask
);
5718 rte_intr_enable(intr_handle
);
5724 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
5726 struct ixgbe_interrupt
*intr
=
5727 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
5728 struct ixgbe_hw
*hw
=
5729 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5730 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5731 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5732 uint32_t vec
= IXGBE_MISC_VEC_ID
;
5734 if (rte_intr_allow_others(intr_handle
))
5735 vec
= IXGBE_RX_VEC_START
;
5736 intr
->mask
&= ~(1 << vec
);
5737 RTE_SET_USED(queue_id
);
5738 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, intr
->mask
);
5744 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
5746 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5747 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5749 struct ixgbe_hw
*hw
=
5750 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5751 struct ixgbe_interrupt
*intr
=
5752 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
5754 if (queue_id
< 16) {
5755 ixgbe_disable_intr(hw
);
5756 intr
->mask
|= (1 << queue_id
);
5757 ixgbe_enable_intr(dev
);
5758 } else if (queue_id
< 32) {
5759 mask
= IXGBE_READ_REG(hw
, IXGBE_EIMS_EX(0));
5760 mask
&= (1 << queue_id
);
5761 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(0), mask
);
5762 } else if (queue_id
< 64) {
5763 mask
= IXGBE_READ_REG(hw
, IXGBE_EIMS_EX(1));
5764 mask
&= (1 << (queue_id
- 32));
5765 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(1), mask
);
5767 rte_intr_enable(intr_handle
);
5773 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
5776 struct ixgbe_hw
*hw
=
5777 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5778 struct ixgbe_interrupt
*intr
=
5779 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
5781 if (queue_id
< 16) {
5782 ixgbe_disable_intr(hw
);
5783 intr
->mask
&= ~(1 << queue_id
);
5784 ixgbe_enable_intr(dev
);
5785 } else if (queue_id
< 32) {
5786 mask
= IXGBE_READ_REG(hw
, IXGBE_EIMS_EX(0));
5787 mask
&= ~(1 << queue_id
);
5788 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(0), mask
);
5789 } else if (queue_id
< 64) {
5790 mask
= IXGBE_READ_REG(hw
, IXGBE_EIMS_EX(1));
5791 mask
&= ~(1 << (queue_id
- 32));
5792 IXGBE_WRITE_REG(hw
, IXGBE_EIMS_EX(1), mask
);
5799 ixgbevf_set_ivar_map(struct ixgbe_hw
*hw
, int8_t direction
,
5800 uint8_t queue
, uint8_t msix_vector
)
5804 if (direction
== -1) {
5806 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
5807 tmp
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
5810 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, tmp
);
5812 /* rx or tx cause */
5813 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
5814 idx
= ((16 * (queue
& 1)) + (8 * direction
));
5815 tmp
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
5816 tmp
&= ~(0xFF << idx
);
5817 tmp
|= (msix_vector
<< idx
);
5818 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), tmp
);
5823 * set the IVAR registers, mapping interrupt causes to vectors
5825 * pointer to ixgbe_hw struct
5827 * 0 for Rx, 1 for Tx, -1 for other causes
5829 * queue to map the corresponding interrupt to
5831 * the vector to map to the corresponding queue
5834 ixgbe_set_ivar_map(struct ixgbe_hw
*hw
, int8_t direction
,
5835 uint8_t queue
, uint8_t msix_vector
)
5839 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
5840 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
5841 if (direction
== -1)
5843 idx
= (((direction
* 64) + queue
) >> 2) & 0x1F;
5844 tmp
= IXGBE_READ_REG(hw
, IXGBE_IVAR(idx
));
5845 tmp
&= ~(0xFF << (8 * (queue
& 0x3)));
5846 tmp
|= (msix_vector
<< (8 * (queue
& 0x3)));
5847 IXGBE_WRITE_REG(hw
, IXGBE_IVAR(idx
), tmp
);
5848 } else if ((hw
->mac
.type
== ixgbe_mac_82599EB
) ||
5849 (hw
->mac
.type
== ixgbe_mac_X540
) ||
5850 (hw
->mac
.type
== ixgbe_mac_X550
)) {
5851 if (direction
== -1) {
5853 idx
= ((queue
& 1) * 8);
5854 tmp
= IXGBE_READ_REG(hw
, IXGBE_IVAR_MISC
);
5855 tmp
&= ~(0xFF << idx
);
5856 tmp
|= (msix_vector
<< idx
);
5857 IXGBE_WRITE_REG(hw
, IXGBE_IVAR_MISC
, tmp
);
5859 /* rx or tx causes */
5860 idx
= ((16 * (queue
& 1)) + (8 * direction
));
5861 tmp
= IXGBE_READ_REG(hw
, IXGBE_IVAR(queue
>> 1));
5862 tmp
&= ~(0xFF << idx
);
5863 tmp
|= (msix_vector
<< idx
);
5864 IXGBE_WRITE_REG(hw
, IXGBE_IVAR(queue
>> 1), tmp
);
5870 ixgbevf_configure_msix(struct rte_eth_dev
*dev
)
5872 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5873 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5874 struct ixgbe_hw
*hw
=
5875 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5877 uint32_t vector_idx
= IXGBE_MISC_VEC_ID
;
5878 uint32_t base
= IXGBE_MISC_VEC_ID
;
5880 /* Configure VF other cause ivar */
5881 ixgbevf_set_ivar_map(hw
, -1, 1, vector_idx
);
5883 /* won't configure msix register if no mapping is done
5884 * between intr vector and event fd.
5886 if (!rte_intr_dp_is_en(intr_handle
))
5889 if (rte_intr_allow_others(intr_handle
)) {
5890 base
= IXGBE_RX_VEC_START
;
5891 vector_idx
= IXGBE_RX_VEC_START
;
5894 /* Configure all RX queues of VF */
5895 for (q_idx
= 0; q_idx
< dev
->data
->nb_rx_queues
; q_idx
++) {
5896 /* Force all queue use vector 0,
5897 * as IXGBE_VF_MAXMSIVECOTR = 1
5899 ixgbevf_set_ivar_map(hw
, 0, q_idx
, vector_idx
);
5900 intr_handle
->intr_vec
[q_idx
] = vector_idx
;
5901 if (vector_idx
< base
+ intr_handle
->nb_efd
- 1)
5905 /* As RX queue setting above show, all queues use the vector 0.
5906 * Set only the ITR value of IXGBE_MISC_VEC_ID.
5908 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(IXGBE_MISC_VEC_ID
),
5909 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT
)
5910 | IXGBE_EITR_CNT_WDIS
);
5914 * Sets up the hardware to properly generate MSI-X interrupts
5916 * board private structure
5919 ixgbe_configure_msix(struct rte_eth_dev
*dev
)
5921 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5922 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5923 struct ixgbe_hw
*hw
=
5924 IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5925 uint32_t queue_id
, base
= IXGBE_MISC_VEC_ID
;
5926 uint32_t vec
= IXGBE_MISC_VEC_ID
;
5930 /* won't configure msix register if no mapping is done
5931 * between intr vector and event fd
5932 * but if misx has been enabled already, need to configure
5933 * auto clean, auto mask and throttling.
5935 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
5936 if (!rte_intr_dp_is_en(intr_handle
) &&
5937 !(gpie
& (IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_PBA_SUPPORT
)))
5940 if (rte_intr_allow_others(intr_handle
))
5941 vec
= base
= IXGBE_RX_VEC_START
;
5943 /* setup GPIE for MSI-x mode */
5944 gpie
= IXGBE_READ_REG(hw
, IXGBE_GPIE
);
5945 gpie
|= IXGBE_GPIE_MSIX_MODE
| IXGBE_GPIE_PBA_SUPPORT
|
5946 IXGBE_GPIE_OCD
| IXGBE_GPIE_EIAME
;
5947 /* auto clearing and auto setting corresponding bits in EIMS
5948 * when MSI-X interrupt is triggered
5950 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
5951 IXGBE_WRITE_REG(hw
, IXGBE_EIAM
, IXGBE_EICS_RTX_QUEUE
);
5953 IXGBE_WRITE_REG(hw
, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
5954 IXGBE_WRITE_REG(hw
, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
5956 IXGBE_WRITE_REG(hw
, IXGBE_GPIE
, gpie
);
5958 /* Populate the IVAR table and set the ITR values to the
5959 * corresponding register.
5961 if (rte_intr_dp_is_en(intr_handle
)) {
5962 for (queue_id
= 0; queue_id
< dev
->data
->nb_rx_queues
;
5964 /* by default, 1:1 mapping */
5965 ixgbe_set_ivar_map(hw
, 0, queue_id
, vec
);
5966 intr_handle
->intr_vec
[queue_id
] = vec
;
5967 if (vec
< base
+ intr_handle
->nb_efd
- 1)
5971 switch (hw
->mac
.type
) {
5972 case ixgbe_mac_82598EB
:
5973 ixgbe_set_ivar_map(hw
, -1,
5974 IXGBE_IVAR_OTHER_CAUSES_INDEX
,
5977 case ixgbe_mac_82599EB
:
5978 case ixgbe_mac_X540
:
5979 case ixgbe_mac_X550
:
5980 ixgbe_set_ivar_map(hw
, -1, 1, IXGBE_MISC_VEC_ID
);
5986 IXGBE_WRITE_REG(hw
, IXGBE_EITR(IXGBE_MISC_VEC_ID
),
5987 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT
)
5988 | IXGBE_EITR_CNT_WDIS
);
5990 /* set up to autoclear timer, and the vectors */
5991 mask
= IXGBE_EIMS_ENABLE_MASK
;
5992 mask
&= ~(IXGBE_EIMS_OTHER
|
5993 IXGBE_EIMS_MAILBOX
|
5996 IXGBE_WRITE_REG(hw
, IXGBE_EIAC
, mask
);
6000 ixgbe_set_queue_rate_limit(struct rte_eth_dev
*dev
,
6001 uint16_t queue_idx
, uint16_t tx_rate
)
6003 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6004 struct rte_eth_rxmode
*rxmode
;
6005 uint32_t rf_dec
, rf_int
;
6007 uint16_t link_speed
= dev
->data
->dev_link
.link_speed
;
6009 if (queue_idx
>= hw
->mac
.max_tx_queues
)
6013 /* Calculate the rate factor values to set */
6014 rf_int
= (uint32_t)link_speed
/ (uint32_t)tx_rate
;
6015 rf_dec
= (uint32_t)link_speed
% (uint32_t)tx_rate
;
6016 rf_dec
= (rf_dec
<< IXGBE_RTTBCNRC_RF_INT_SHIFT
) / tx_rate
;
6018 bcnrc_val
= IXGBE_RTTBCNRC_RS_ENA
;
6019 bcnrc_val
|= ((rf_int
<< IXGBE_RTTBCNRC_RF_INT_SHIFT
) &
6020 IXGBE_RTTBCNRC_RF_INT_MASK_M
);
6021 bcnrc_val
|= (rf_dec
& IXGBE_RTTBCNRC_RF_DEC_MASK
);
6026 rxmode
= &dev
->data
->dev_conf
.rxmode
;
6028 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6029 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
6032 if ((rxmode
->offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
) &&
6033 (rxmode
->max_rx_pkt_len
>= IXGBE_MAX_JUMBO_FRAME_SIZE
))
6034 IXGBE_WRITE_REG(hw
, IXGBE_RTTBCNRM
,
6035 IXGBE_MMW_SIZE_JUMBO_FRAME
);
6037 IXGBE_WRITE_REG(hw
, IXGBE_RTTBCNRM
,
6038 IXGBE_MMW_SIZE_DEFAULT
);
6040 /* Set RTTBCNRC of queue X */
6041 IXGBE_WRITE_REG(hw
, IXGBE_RTTDQSEL
, queue_idx
);
6042 IXGBE_WRITE_REG(hw
, IXGBE_RTTBCNRC
, bcnrc_val
);
6043 IXGBE_WRITE_FLUSH(hw
);
6049 ixgbevf_add_mac_addr(struct rte_eth_dev
*dev
, struct ether_addr
*mac_addr
,
6050 __attribute__((unused
)) uint32_t index
,
6051 __attribute__((unused
)) uint32_t pool
)
6053 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6057 * On a 82599 VF, adding again the same MAC addr is not an idempotent
6058 * operation. Trap this case to avoid exhausting the [very limited]
6059 * set of PF resources used to store VF MAC addresses.
6061 if (memcmp(hw
->mac
.perm_addr
, mac_addr
, sizeof(struct ether_addr
)) == 0)
6063 diag
= ixgbevf_set_uc_addr_vf(hw
, 2, mac_addr
->addr_bytes
);
6065 PMD_DRV_LOG(ERR
, "Unable to add MAC address "
6066 "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
6067 mac_addr
->addr_bytes
[0],
6068 mac_addr
->addr_bytes
[1],
6069 mac_addr
->addr_bytes
[2],
6070 mac_addr
->addr_bytes
[3],
6071 mac_addr
->addr_bytes
[4],
6072 mac_addr
->addr_bytes
[5],
6078 ixgbevf_remove_mac_addr(struct rte_eth_dev
*dev
, uint32_t index
)
6080 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6081 struct ether_addr
*perm_addr
= (struct ether_addr
*) hw
->mac
.perm_addr
;
6082 struct ether_addr
*mac_addr
;
6087 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
6088 * not support the deletion of a given MAC address.
6089 * Instead, it imposes to delete all MAC addresses, then to add again
6090 * all MAC addresses with the exception of the one to be deleted.
6092 (void) ixgbevf_set_uc_addr_vf(hw
, 0, NULL
);
6095 * Add again all MAC addresses, with the exception of the deleted one
6096 * and of the permanent MAC address.
6098 for (i
= 0, mac_addr
= dev
->data
->mac_addrs
;
6099 i
< hw
->mac
.num_rar_entries
; i
++, mac_addr
++) {
6100 /* Skip the deleted MAC address */
6103 /* Skip NULL MAC addresses */
6104 if (is_zero_ether_addr(mac_addr
))
6106 /* Skip the permanent MAC address */
6107 if (memcmp(perm_addr
, mac_addr
, sizeof(struct ether_addr
)) == 0)
6109 diag
= ixgbevf_set_uc_addr_vf(hw
, 2, mac_addr
->addr_bytes
);
6112 "Adding again MAC address "
6113 "%02x:%02x:%02x:%02x:%02x:%02x failed "
6115 mac_addr
->addr_bytes
[0],
6116 mac_addr
->addr_bytes
[1],
6117 mac_addr
->addr_bytes
[2],
6118 mac_addr
->addr_bytes
[3],
6119 mac_addr
->addr_bytes
[4],
6120 mac_addr
->addr_bytes
[5],
6126 ixgbevf_set_default_mac_addr(struct rte_eth_dev
*dev
, struct ether_addr
*addr
)
6128 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6130 hw
->mac
.ops
.set_rar(hw
, 0, (void *)addr
, 0, 0);
6136 ixgbe_syn_filter_set(struct rte_eth_dev
*dev
,
6137 struct rte_eth_syn_filter
*filter
,
6140 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6141 struct ixgbe_filter_info
*filter_info
=
6142 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6146 if (filter
->queue
>= IXGBE_MAX_RX_QUEUE_NUM
)
6149 syn_info
= filter_info
->syn_info
;
6152 if (syn_info
& IXGBE_SYN_FILTER_ENABLE
)
6154 synqf
= (uint32_t)(((filter
->queue
<< IXGBE_SYN_FILTER_QUEUE_SHIFT
) &
6155 IXGBE_SYN_FILTER_QUEUE
) | IXGBE_SYN_FILTER_ENABLE
);
6157 if (filter
->hig_pri
)
6158 synqf
|= IXGBE_SYN_FILTER_SYNQFP
;
6160 synqf
&= ~IXGBE_SYN_FILTER_SYNQFP
;
6162 synqf
= IXGBE_READ_REG(hw
, IXGBE_SYNQF
);
6163 if (!(syn_info
& IXGBE_SYN_FILTER_ENABLE
))
6165 synqf
&= ~(IXGBE_SYN_FILTER_QUEUE
| IXGBE_SYN_FILTER_ENABLE
);
6168 filter_info
->syn_info
= synqf
;
6169 IXGBE_WRITE_REG(hw
, IXGBE_SYNQF
, synqf
);
6170 IXGBE_WRITE_FLUSH(hw
);
6175 ixgbe_syn_filter_get(struct rte_eth_dev
*dev
,
6176 struct rte_eth_syn_filter
*filter
)
6178 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6179 uint32_t synqf
= IXGBE_READ_REG(hw
, IXGBE_SYNQF
);
6181 if (synqf
& IXGBE_SYN_FILTER_ENABLE
) {
6182 filter
->hig_pri
= (synqf
& IXGBE_SYN_FILTER_SYNQFP
) ? 1 : 0;
6183 filter
->queue
= (uint16_t)((synqf
& IXGBE_SYN_FILTER_QUEUE
) >> 1);
6190 ixgbe_syn_filter_handle(struct rte_eth_dev
*dev
,
6191 enum rte_filter_op filter_op
,
6194 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6197 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
6199 if (filter_op
== RTE_ETH_FILTER_NOP
)
6203 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u",
6208 switch (filter_op
) {
6209 case RTE_ETH_FILTER_ADD
:
6210 ret
= ixgbe_syn_filter_set(dev
,
6211 (struct rte_eth_syn_filter
*)arg
,
6214 case RTE_ETH_FILTER_DELETE
:
6215 ret
= ixgbe_syn_filter_set(dev
,
6216 (struct rte_eth_syn_filter
*)arg
,
6219 case RTE_ETH_FILTER_GET
:
6220 ret
= ixgbe_syn_filter_get(dev
,
6221 (struct rte_eth_syn_filter
*)arg
);
6224 PMD_DRV_LOG(ERR
, "unsupported operation %u", filter_op
);
6233 static inline enum ixgbe_5tuple_protocol
6234 convert_protocol_type(uint8_t protocol_value
)
6236 if (protocol_value
== IPPROTO_TCP
)
6237 return IXGBE_FILTER_PROTOCOL_TCP
;
6238 else if (protocol_value
== IPPROTO_UDP
)
6239 return IXGBE_FILTER_PROTOCOL_UDP
;
6240 else if (protocol_value
== IPPROTO_SCTP
)
6241 return IXGBE_FILTER_PROTOCOL_SCTP
;
6243 return IXGBE_FILTER_PROTOCOL_NONE
;
6246 /* inject a 5-tuple filter to HW */
6248 ixgbe_inject_5tuple_filter(struct rte_eth_dev
*dev
,
6249 struct ixgbe_5tuple_filter
*filter
)
6251 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6253 uint32_t ftqf
, sdpqf
;
6254 uint32_t l34timir
= 0;
6255 uint8_t mask
= 0xff;
6259 sdpqf
= (uint32_t)(filter
->filter_info
.dst_port
<<
6260 IXGBE_SDPQF_DSTPORT_SHIFT
);
6261 sdpqf
= sdpqf
| (filter
->filter_info
.src_port
& IXGBE_SDPQF_SRCPORT
);
6263 ftqf
= (uint32_t)(filter
->filter_info
.proto
&
6264 IXGBE_FTQF_PROTOCOL_MASK
);
6265 ftqf
|= (uint32_t)((filter
->filter_info
.priority
&
6266 IXGBE_FTQF_PRIORITY_MASK
) << IXGBE_FTQF_PRIORITY_SHIFT
);
6267 if (filter
->filter_info
.src_ip_mask
== 0) /* 0 means compare. */
6268 mask
&= IXGBE_FTQF_SOURCE_ADDR_MASK
;
6269 if (filter
->filter_info
.dst_ip_mask
== 0)
6270 mask
&= IXGBE_FTQF_DEST_ADDR_MASK
;
6271 if (filter
->filter_info
.src_port_mask
== 0)
6272 mask
&= IXGBE_FTQF_SOURCE_PORT_MASK
;
6273 if (filter
->filter_info
.dst_port_mask
== 0)
6274 mask
&= IXGBE_FTQF_DEST_PORT_MASK
;
6275 if (filter
->filter_info
.proto_mask
== 0)
6276 mask
&= IXGBE_FTQF_PROTOCOL_COMP_MASK
;
6277 ftqf
|= mask
<< IXGBE_FTQF_5TUPLE_MASK_SHIFT
;
6278 ftqf
|= IXGBE_FTQF_POOL_MASK_EN
;
6279 ftqf
|= IXGBE_FTQF_QUEUE_ENABLE
;
6281 IXGBE_WRITE_REG(hw
, IXGBE_DAQF(i
), filter
->filter_info
.dst_ip
);
6282 IXGBE_WRITE_REG(hw
, IXGBE_SAQF(i
), filter
->filter_info
.src_ip
);
6283 IXGBE_WRITE_REG(hw
, IXGBE_SDPQF(i
), sdpqf
);
6284 IXGBE_WRITE_REG(hw
, IXGBE_FTQF(i
), ftqf
);
6286 l34timir
|= IXGBE_L34T_IMIR_RESERVE
;
6287 l34timir
|= (uint32_t)(filter
->queue
<<
6288 IXGBE_L34T_IMIR_QUEUE_SHIFT
);
6289 IXGBE_WRITE_REG(hw
, IXGBE_L34T_IMIR(i
), l34timir
);
6293 * add a 5tuple filter
6296 * dev: Pointer to struct rte_eth_dev.
6297 * index: the index the filter allocates.
6298 * filter: ponter to the filter that will be added.
6299 * rx_queue: the queue id the filter assigned to.
6302 * - On success, zero.
6303 * - On failure, a negative value.
6306 ixgbe_add_5tuple_filter(struct rte_eth_dev
*dev
,
6307 struct ixgbe_5tuple_filter
*filter
)
6309 struct ixgbe_filter_info
*filter_info
=
6310 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6314 * look for an unused 5tuple filter index,
6315 * and insert the filter to list.
6317 for (i
= 0; i
< IXGBE_MAX_FTQF_FILTERS
; i
++) {
6318 idx
= i
/ (sizeof(uint32_t) * NBBY
);
6319 shift
= i
% (sizeof(uint32_t) * NBBY
);
6320 if (!(filter_info
->fivetuple_mask
[idx
] & (1 << shift
))) {
6321 filter_info
->fivetuple_mask
[idx
] |= 1 << shift
;
6323 TAILQ_INSERT_TAIL(&filter_info
->fivetuple_list
,
6329 if (i
>= IXGBE_MAX_FTQF_FILTERS
) {
6330 PMD_DRV_LOG(ERR
, "5tuple filters are full.");
6334 ixgbe_inject_5tuple_filter(dev
, filter
);
6340 * remove a 5tuple filter
6343 * dev: Pointer to struct rte_eth_dev.
6344 * filter: the pointer of the filter will be removed.
6347 ixgbe_remove_5tuple_filter(struct rte_eth_dev
*dev
,
6348 struct ixgbe_5tuple_filter
*filter
)
6350 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6351 struct ixgbe_filter_info
*filter_info
=
6352 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6353 uint16_t index
= filter
->index
;
6355 filter_info
->fivetuple_mask
[index
/ (sizeof(uint32_t) * NBBY
)] &=
6356 ~(1 << (index
% (sizeof(uint32_t) * NBBY
)));
6357 TAILQ_REMOVE(&filter_info
->fivetuple_list
, filter
, entries
);
6360 IXGBE_WRITE_REG(hw
, IXGBE_DAQF(index
), 0);
6361 IXGBE_WRITE_REG(hw
, IXGBE_SAQF(index
), 0);
6362 IXGBE_WRITE_REG(hw
, IXGBE_SDPQF(index
), 0);
6363 IXGBE_WRITE_REG(hw
, IXGBE_FTQF(index
), 0);
6364 IXGBE_WRITE_REG(hw
, IXGBE_L34T_IMIR(index
), 0);
6368 ixgbevf_dev_set_mtu(struct rte_eth_dev
*dev
, uint16_t mtu
)
6370 struct ixgbe_hw
*hw
;
6371 uint32_t max_frame
= mtu
+ IXGBE_ETH_OVERHEAD
;
6372 struct rte_eth_dev_data
*dev_data
= dev
->data
;
6374 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6376 if ((mtu
< ETHER_MIN_MTU
) || (max_frame
> ETHER_MAX_JUMBO_FRAME_LEN
))
6379 /* If device is started, refuse mtu that requires the support of
6380 * scattered packets when this feature has not been enabled before.
6382 if (dev_data
->dev_started
&& !dev_data
->scattered_rx
&&
6383 (max_frame
+ 2 * IXGBE_VLAN_TAG_SIZE
>
6384 dev
->data
->min_rx_buf_size
- RTE_PKTMBUF_HEADROOM
)) {
6385 PMD_INIT_LOG(ERR
, "Stop port first.");
6390 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
6391 * request of the version 2.0 of the mailbox API.
6392 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
6393 * of the mailbox API.
6394 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
6395 * prior to 3.11.33 which contains the following change:
6396 * "ixgbe: Enable jumbo frames support w/ SR-IOV"
6398 ixgbevf_rlpml_set_vf(hw
, max_frame
);
6400 /* update max frame size */
6401 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= max_frame
;
6405 static inline struct ixgbe_5tuple_filter
*
6406 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list
*filter_list
,
6407 struct ixgbe_5tuple_filter_info
*key
)
6409 struct ixgbe_5tuple_filter
*it
;
6411 TAILQ_FOREACH(it
, filter_list
, entries
) {
6412 if (memcmp(key
, &it
->filter_info
,
6413 sizeof(struct ixgbe_5tuple_filter_info
)) == 0) {
6420 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
6422 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter
*filter
,
6423 struct ixgbe_5tuple_filter_info
*filter_info
)
6425 if (filter
->queue
>= IXGBE_MAX_RX_QUEUE_NUM
||
6426 filter
->priority
> IXGBE_5TUPLE_MAX_PRI
||
6427 filter
->priority
< IXGBE_5TUPLE_MIN_PRI
)
6430 switch (filter
->dst_ip_mask
) {
6432 filter_info
->dst_ip_mask
= 0;
6433 filter_info
->dst_ip
= filter
->dst_ip
;
6436 filter_info
->dst_ip_mask
= 1;
6439 PMD_DRV_LOG(ERR
, "invalid dst_ip mask.");
6443 switch (filter
->src_ip_mask
) {
6445 filter_info
->src_ip_mask
= 0;
6446 filter_info
->src_ip
= filter
->src_ip
;
6449 filter_info
->src_ip_mask
= 1;
6452 PMD_DRV_LOG(ERR
, "invalid src_ip mask.");
6456 switch (filter
->dst_port_mask
) {
6458 filter_info
->dst_port_mask
= 0;
6459 filter_info
->dst_port
= filter
->dst_port
;
6462 filter_info
->dst_port_mask
= 1;
6465 PMD_DRV_LOG(ERR
, "invalid dst_port mask.");
6469 switch (filter
->src_port_mask
) {
6471 filter_info
->src_port_mask
= 0;
6472 filter_info
->src_port
= filter
->src_port
;
6475 filter_info
->src_port_mask
= 1;
6478 PMD_DRV_LOG(ERR
, "invalid src_port mask.");
6482 switch (filter
->proto_mask
) {
6484 filter_info
->proto_mask
= 0;
6485 filter_info
->proto
=
6486 convert_protocol_type(filter
->proto
);
6489 filter_info
->proto_mask
= 1;
6492 PMD_DRV_LOG(ERR
, "invalid protocol mask.");
6496 filter_info
->priority
= (uint8_t)filter
->priority
;
6501 * add or delete a ntuple filter
6504 * dev: Pointer to struct rte_eth_dev.
6505 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6506 * add: if true, add filter, if false, remove filter
6509 * - On success, zero.
6510 * - On failure, a negative value.
6513 ixgbe_add_del_ntuple_filter(struct rte_eth_dev
*dev
,
6514 struct rte_eth_ntuple_filter
*ntuple_filter
,
6517 struct ixgbe_filter_info
*filter_info
=
6518 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6519 struct ixgbe_5tuple_filter_info filter_5tuple
;
6520 struct ixgbe_5tuple_filter
*filter
;
6523 if (ntuple_filter
->flags
!= RTE_5TUPLE_FLAGS
) {
6524 PMD_DRV_LOG(ERR
, "only 5tuple is supported.");
6528 memset(&filter_5tuple
, 0, sizeof(struct ixgbe_5tuple_filter_info
));
6529 ret
= ntuple_filter_to_5tuple(ntuple_filter
, &filter_5tuple
);
6533 filter
= ixgbe_5tuple_filter_lookup(&filter_info
->fivetuple_list
,
6535 if (filter
!= NULL
&& add
) {
6536 PMD_DRV_LOG(ERR
, "filter exists.");
6539 if (filter
== NULL
&& !add
) {
6540 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
6545 filter
= rte_zmalloc("ixgbe_5tuple_filter",
6546 sizeof(struct ixgbe_5tuple_filter
), 0);
6549 rte_memcpy(&filter
->filter_info
,
6551 sizeof(struct ixgbe_5tuple_filter_info
));
6552 filter
->queue
= ntuple_filter
->queue
;
6553 ret
= ixgbe_add_5tuple_filter(dev
, filter
);
6559 ixgbe_remove_5tuple_filter(dev
, filter
);
6565 * get a ntuple filter
6568 * dev: Pointer to struct rte_eth_dev.
6569 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
6572 * - On success, zero.
6573 * - On failure, a negative value.
6576 ixgbe_get_ntuple_filter(struct rte_eth_dev
*dev
,
6577 struct rte_eth_ntuple_filter
*ntuple_filter
)
6579 struct ixgbe_filter_info
*filter_info
=
6580 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6581 struct ixgbe_5tuple_filter_info filter_5tuple
;
6582 struct ixgbe_5tuple_filter
*filter
;
6585 if (ntuple_filter
->flags
!= RTE_5TUPLE_FLAGS
) {
6586 PMD_DRV_LOG(ERR
, "only 5tuple is supported.");
6590 memset(&filter_5tuple
, 0, sizeof(struct ixgbe_5tuple_filter_info
));
6591 ret
= ntuple_filter_to_5tuple(ntuple_filter
, &filter_5tuple
);
6595 filter
= ixgbe_5tuple_filter_lookup(&filter_info
->fivetuple_list
,
6597 if (filter
== NULL
) {
6598 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
6601 ntuple_filter
->queue
= filter
->queue
;
6606 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
6607 * @dev: pointer to rte_eth_dev structure
6608 * @filter_op:operation will be taken.
6609 * @arg: a pointer to specific structure corresponding to the filter_op
6612 * - On success, zero.
6613 * - On failure, a negative value.
6616 ixgbe_ntuple_filter_handle(struct rte_eth_dev
*dev
,
6617 enum rte_filter_op filter_op
,
6620 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6623 MAC_TYPE_FILTER_SUP_EXT(hw
->mac
.type
);
6625 if (filter_op
== RTE_ETH_FILTER_NOP
)
6629 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
6634 switch (filter_op
) {
6635 case RTE_ETH_FILTER_ADD
:
6636 ret
= ixgbe_add_del_ntuple_filter(dev
,
6637 (struct rte_eth_ntuple_filter
*)arg
,
6640 case RTE_ETH_FILTER_DELETE
:
6641 ret
= ixgbe_add_del_ntuple_filter(dev
,
6642 (struct rte_eth_ntuple_filter
*)arg
,
6645 case RTE_ETH_FILTER_GET
:
6646 ret
= ixgbe_get_ntuple_filter(dev
,
6647 (struct rte_eth_ntuple_filter
*)arg
);
6650 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
6658 ixgbe_add_del_ethertype_filter(struct rte_eth_dev
*dev
,
6659 struct rte_eth_ethertype_filter
*filter
,
6662 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6663 struct ixgbe_filter_info
*filter_info
=
6664 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6668 struct ixgbe_ethertype_filter ethertype_filter
;
6670 if (filter
->queue
>= IXGBE_MAX_RX_QUEUE_NUM
)
6673 if (filter
->ether_type
== ETHER_TYPE_IPv4
||
6674 filter
->ether_type
== ETHER_TYPE_IPv6
) {
6675 PMD_DRV_LOG(ERR
, "unsupported ether_type(0x%04x) in"
6676 " ethertype filter.", filter
->ether_type
);
6680 if (filter
->flags
& RTE_ETHTYPE_FLAGS_MAC
) {
6681 PMD_DRV_LOG(ERR
, "mac compare is unsupported.");
6684 if (filter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
6685 PMD_DRV_LOG(ERR
, "drop option is unsupported.");
6689 ret
= ixgbe_ethertype_filter_lookup(filter_info
, filter
->ether_type
);
6690 if (ret
>= 0 && add
) {
6691 PMD_DRV_LOG(ERR
, "ethertype (0x%04x) filter exists.",
6692 filter
->ether_type
);
6695 if (ret
< 0 && !add
) {
6696 PMD_DRV_LOG(ERR
, "ethertype (0x%04x) filter doesn't exist.",
6697 filter
->ether_type
);
6702 etqf
= IXGBE_ETQF_FILTER_EN
;
6703 etqf
|= (uint32_t)filter
->ether_type
;
6704 etqs
|= (uint32_t)((filter
->queue
<<
6705 IXGBE_ETQS_RX_QUEUE_SHIFT
) &
6706 IXGBE_ETQS_RX_QUEUE
);
6707 etqs
|= IXGBE_ETQS_QUEUE_EN
;
6709 ethertype_filter
.ethertype
= filter
->ether_type
;
6710 ethertype_filter
.etqf
= etqf
;
6711 ethertype_filter
.etqs
= etqs
;
6712 ethertype_filter
.conf
= FALSE
;
6713 ret
= ixgbe_ethertype_filter_insert(filter_info
,
6716 PMD_DRV_LOG(ERR
, "ethertype filters are full.");
6720 ret
= ixgbe_ethertype_filter_remove(filter_info
, (uint8_t)ret
);
6724 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(ret
), etqf
);
6725 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(ret
), etqs
);
6726 IXGBE_WRITE_FLUSH(hw
);
6732 ixgbe_get_ethertype_filter(struct rte_eth_dev
*dev
,
6733 struct rte_eth_ethertype_filter
*filter
)
6735 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6736 struct ixgbe_filter_info
*filter_info
=
6737 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
6738 uint32_t etqf
, etqs
;
6741 ret
= ixgbe_ethertype_filter_lookup(filter_info
, filter
->ether_type
);
6743 PMD_DRV_LOG(ERR
, "ethertype (0x%04x) filter doesn't exist.",
6744 filter
->ether_type
);
6748 etqf
= IXGBE_READ_REG(hw
, IXGBE_ETQF(ret
));
6749 if (etqf
& IXGBE_ETQF_FILTER_EN
) {
6750 etqs
= IXGBE_READ_REG(hw
, IXGBE_ETQS(ret
));
6751 filter
->ether_type
= etqf
& IXGBE_ETQF_ETHERTYPE
;
6753 filter
->queue
= (etqs
& IXGBE_ETQS_RX_QUEUE
) >>
6754 IXGBE_ETQS_RX_QUEUE_SHIFT
;
6761 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
6762 * @dev: pointer to rte_eth_dev structure
6763 * @filter_op:operation will be taken.
6764 * @arg: a pointer to specific structure corresponding to the filter_op
6767 ixgbe_ethertype_filter_handle(struct rte_eth_dev
*dev
,
6768 enum rte_filter_op filter_op
,
6771 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6774 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
6776 if (filter_op
== RTE_ETH_FILTER_NOP
)
6780 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
6785 switch (filter_op
) {
6786 case RTE_ETH_FILTER_ADD
:
6787 ret
= ixgbe_add_del_ethertype_filter(dev
,
6788 (struct rte_eth_ethertype_filter
*)arg
,
6791 case RTE_ETH_FILTER_DELETE
:
6792 ret
= ixgbe_add_del_ethertype_filter(dev
,
6793 (struct rte_eth_ethertype_filter
*)arg
,
6796 case RTE_ETH_FILTER_GET
:
6797 ret
= ixgbe_get_ethertype_filter(dev
,
6798 (struct rte_eth_ethertype_filter
*)arg
);
6801 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
6809 ixgbe_dev_filter_ctrl(struct rte_eth_dev
*dev
,
6810 enum rte_filter_type filter_type
,
6811 enum rte_filter_op filter_op
,
6816 switch (filter_type
) {
6817 case RTE_ETH_FILTER_NTUPLE
:
6818 ret
= ixgbe_ntuple_filter_handle(dev
, filter_op
, arg
);
6820 case RTE_ETH_FILTER_ETHERTYPE
:
6821 ret
= ixgbe_ethertype_filter_handle(dev
, filter_op
, arg
);
6823 case RTE_ETH_FILTER_SYN
:
6824 ret
= ixgbe_syn_filter_handle(dev
, filter_op
, arg
);
6826 case RTE_ETH_FILTER_FDIR
:
6827 ret
= ixgbe_fdir_ctrl_func(dev
, filter_op
, arg
);
6829 case RTE_ETH_FILTER_L2_TUNNEL
:
6830 ret
= ixgbe_dev_l2_tunnel_filter_handle(dev
, filter_op
, arg
);
6832 case RTE_ETH_FILTER_GENERIC
:
6833 if (filter_op
!= RTE_ETH_FILTER_GET
)
6835 *(const void **)arg
= &ixgbe_flow_ops
;
6838 PMD_DRV_LOG(WARNING
, "Filter type (%d) not supported",
6848 ixgbe_dev_addr_list_itr(__attribute__((unused
)) struct ixgbe_hw
*hw
,
6849 u8
**mc_addr_ptr
, u32
*vmdq
)
6854 mc_addr
= *mc_addr_ptr
;
6855 *mc_addr_ptr
= (mc_addr
+ sizeof(struct ether_addr
));
6860 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev
*dev
,
6861 struct ether_addr
*mc_addr_set
,
6862 uint32_t nb_mc_addr
)
6864 struct ixgbe_hw
*hw
;
6867 hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6868 mc_addr_list
= (u8
*)mc_addr_set
;
6869 return ixgbe_update_mc_addr_list(hw
, mc_addr_list
, nb_mc_addr
,
6870 ixgbe_dev_addr_list_itr
, TRUE
);
6874 ixgbe_read_systime_cyclecounter(struct rte_eth_dev
*dev
)
6876 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6877 uint64_t systime_cycles
;
6879 switch (hw
->mac
.type
) {
6880 case ixgbe_mac_X550
:
6881 case ixgbe_mac_X550EM_x
:
6882 case ixgbe_mac_X550EM_a
:
6883 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
6884 systime_cycles
= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_SYSTIML
);
6885 systime_cycles
+= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_SYSTIMH
)
6889 systime_cycles
= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_SYSTIML
);
6890 systime_cycles
|= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_SYSTIMH
)
6894 return systime_cycles
;
6898 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev
*dev
)
6900 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6901 uint64_t rx_tstamp_cycles
;
6903 switch (hw
->mac
.type
) {
6904 case ixgbe_mac_X550
:
6905 case ixgbe_mac_X550EM_x
:
6906 case ixgbe_mac_X550EM_a
:
6907 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6908 rx_tstamp_cycles
= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_RXSTMPL
);
6909 rx_tstamp_cycles
+= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_RXSTMPH
)
6913 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
6914 rx_tstamp_cycles
= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_RXSTMPL
);
6915 rx_tstamp_cycles
|= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_RXSTMPH
)
6919 return rx_tstamp_cycles
;
6923 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev
*dev
)
6925 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6926 uint64_t tx_tstamp_cycles
;
6928 switch (hw
->mac
.type
) {
6929 case ixgbe_mac_X550
:
6930 case ixgbe_mac_X550EM_x
:
6931 case ixgbe_mac_X550EM_a
:
6932 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6933 tx_tstamp_cycles
= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_TXSTMPL
);
6934 tx_tstamp_cycles
+= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_TXSTMPH
)
6938 /* TXSTMPL stores ns and TXSTMPH stores seconds. */
6939 tx_tstamp_cycles
= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_TXSTMPL
);
6940 tx_tstamp_cycles
|= (uint64_t)IXGBE_READ_REG(hw
, IXGBE_TXSTMPH
)
6944 return tx_tstamp_cycles
;
6948 ixgbe_start_timecounters(struct rte_eth_dev
*dev
)
6950 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
6951 struct ixgbe_adapter
*adapter
=
6952 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
6953 struct rte_eth_link link
;
6954 uint32_t incval
= 0;
6957 /* Get current link speed. */
6958 ixgbe_dev_link_update(dev
, 1);
6959 rte_eth_linkstatus_get(dev
, &link
);
6961 switch (link
.link_speed
) {
6962 case ETH_SPEED_NUM_100M
:
6963 incval
= IXGBE_INCVAL_100
;
6964 shift
= IXGBE_INCVAL_SHIFT_100
;
6966 case ETH_SPEED_NUM_1G
:
6967 incval
= IXGBE_INCVAL_1GB
;
6968 shift
= IXGBE_INCVAL_SHIFT_1GB
;
6970 case ETH_SPEED_NUM_10G
:
6972 incval
= IXGBE_INCVAL_10GB
;
6973 shift
= IXGBE_INCVAL_SHIFT_10GB
;
6977 switch (hw
->mac
.type
) {
6978 case ixgbe_mac_X550
:
6979 case ixgbe_mac_X550EM_x
:
6980 case ixgbe_mac_X550EM_a
:
6981 /* Independent of link speed. */
6983 /* Cycles read will be interpreted as ns. */
6986 case ixgbe_mac_X540
:
6987 IXGBE_WRITE_REG(hw
, IXGBE_TIMINCA
, incval
);
6989 case ixgbe_mac_82599EB
:
6990 incval
>>= IXGBE_INCVAL_SHIFT_82599
;
6991 shift
-= IXGBE_INCVAL_SHIFT_82599
;
6992 IXGBE_WRITE_REG(hw
, IXGBE_TIMINCA
,
6993 (1 << IXGBE_INCPER_SHIFT_82599
) | incval
);
6996 /* Not supported. */
7000 memset(&adapter
->systime_tc
, 0, sizeof(struct rte_timecounter
));
7001 memset(&adapter
->rx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
7002 memset(&adapter
->tx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
7004 adapter
->systime_tc
.cc_mask
= IXGBE_CYCLECOUNTER_MASK
;
7005 adapter
->systime_tc
.cc_shift
= shift
;
7006 adapter
->systime_tc
.nsec_mask
= (1ULL << shift
) - 1;
7008 adapter
->rx_tstamp_tc
.cc_mask
= IXGBE_CYCLECOUNTER_MASK
;
7009 adapter
->rx_tstamp_tc
.cc_shift
= shift
;
7010 adapter
->rx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
7012 adapter
->tx_tstamp_tc
.cc_mask
= IXGBE_CYCLECOUNTER_MASK
;
7013 adapter
->tx_tstamp_tc
.cc_shift
= shift
;
7014 adapter
->tx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
7018 ixgbe_timesync_adjust_time(struct rte_eth_dev
*dev
, int64_t delta
)
7020 struct ixgbe_adapter
*adapter
=
7021 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
7023 adapter
->systime_tc
.nsec
+= delta
;
7024 adapter
->rx_tstamp_tc
.nsec
+= delta
;
7025 adapter
->tx_tstamp_tc
.nsec
+= delta
;
7031 ixgbe_timesync_write_time(struct rte_eth_dev
*dev
, const struct timespec
*ts
)
7034 struct ixgbe_adapter
*adapter
=
7035 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
7037 ns
= rte_timespec_to_ns(ts
);
7038 /* Set the timecounters to a new value. */
7039 adapter
->systime_tc
.nsec
= ns
;
7040 adapter
->rx_tstamp_tc
.nsec
= ns
;
7041 adapter
->tx_tstamp_tc
.nsec
= ns
;
7047 ixgbe_timesync_read_time(struct rte_eth_dev
*dev
, struct timespec
*ts
)
7049 uint64_t ns
, systime_cycles
;
7050 struct ixgbe_adapter
*adapter
=
7051 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
7053 systime_cycles
= ixgbe_read_systime_cyclecounter(dev
);
7054 ns
= rte_timecounter_update(&adapter
->systime_tc
, systime_cycles
);
7055 *ts
= rte_ns_to_timespec(ns
);
7061 ixgbe_timesync_enable(struct rte_eth_dev
*dev
)
7063 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7067 /* Stop the timesync system time. */
7068 IXGBE_WRITE_REG(hw
, IXGBE_TIMINCA
, 0x0);
7069 /* Reset the timesync system time value. */
7070 IXGBE_WRITE_REG(hw
, IXGBE_SYSTIML
, 0x0);
7071 IXGBE_WRITE_REG(hw
, IXGBE_SYSTIMH
, 0x0);
7073 /* Enable system time for platforms where it isn't on by default. */
7074 tsauxc
= IXGBE_READ_REG(hw
, IXGBE_TSAUXC
);
7075 tsauxc
&= ~IXGBE_TSAUXC_DISABLE_SYSTIME
;
7076 IXGBE_WRITE_REG(hw
, IXGBE_TSAUXC
, tsauxc
);
7078 ixgbe_start_timecounters(dev
);
7080 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7081 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588
),
7083 IXGBE_ETQF_FILTER_EN
|
7086 /* Enable timestamping of received PTP packets. */
7087 tsync_ctl
= IXGBE_READ_REG(hw
, IXGBE_TSYNCRXCTL
);
7088 tsync_ctl
|= IXGBE_TSYNCRXCTL_ENABLED
;
7089 IXGBE_WRITE_REG(hw
, IXGBE_TSYNCRXCTL
, tsync_ctl
);
7091 /* Enable timestamping of transmitted PTP packets. */
7092 tsync_ctl
= IXGBE_READ_REG(hw
, IXGBE_TSYNCTXCTL
);
7093 tsync_ctl
|= IXGBE_TSYNCTXCTL_ENABLED
;
7094 IXGBE_WRITE_REG(hw
, IXGBE_TSYNCTXCTL
, tsync_ctl
);
7096 IXGBE_WRITE_FLUSH(hw
);
7102 ixgbe_timesync_disable(struct rte_eth_dev
*dev
)
7104 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7107 /* Disable timestamping of transmitted PTP packets. */
7108 tsync_ctl
= IXGBE_READ_REG(hw
, IXGBE_TSYNCTXCTL
);
7109 tsync_ctl
&= ~IXGBE_TSYNCTXCTL_ENABLED
;
7110 IXGBE_WRITE_REG(hw
, IXGBE_TSYNCTXCTL
, tsync_ctl
);
7112 /* Disable timestamping of received PTP packets. */
7113 tsync_ctl
= IXGBE_READ_REG(hw
, IXGBE_TSYNCRXCTL
);
7114 tsync_ctl
&= ~IXGBE_TSYNCRXCTL_ENABLED
;
7115 IXGBE_WRITE_REG(hw
, IXGBE_TSYNCRXCTL
, tsync_ctl
);
7117 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
7118 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588
), 0);
7120 /* Stop incrementating the System Time registers. */
7121 IXGBE_WRITE_REG(hw
, IXGBE_TIMINCA
, 0);
7127 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev
*dev
,
7128 struct timespec
*timestamp
,
7129 uint32_t flags __rte_unused
)
7131 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7132 struct ixgbe_adapter
*adapter
=
7133 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
7134 uint32_t tsync_rxctl
;
7135 uint64_t rx_tstamp_cycles
;
7138 tsync_rxctl
= IXGBE_READ_REG(hw
, IXGBE_TSYNCRXCTL
);
7139 if ((tsync_rxctl
& IXGBE_TSYNCRXCTL_VALID
) == 0)
7142 rx_tstamp_cycles
= ixgbe_read_rx_tstamp_cyclecounter(dev
);
7143 ns
= rte_timecounter_update(&adapter
->rx_tstamp_tc
, rx_tstamp_cycles
);
7144 *timestamp
= rte_ns_to_timespec(ns
);
7150 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev
*dev
,
7151 struct timespec
*timestamp
)
7153 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7154 struct ixgbe_adapter
*adapter
=
7155 (struct ixgbe_adapter
*)dev
->data
->dev_private
;
7156 uint32_t tsync_txctl
;
7157 uint64_t tx_tstamp_cycles
;
7160 tsync_txctl
= IXGBE_READ_REG(hw
, IXGBE_TSYNCTXCTL
);
7161 if ((tsync_txctl
& IXGBE_TSYNCTXCTL_VALID
) == 0)
7164 tx_tstamp_cycles
= ixgbe_read_tx_tstamp_cyclecounter(dev
);
7165 ns
= rte_timecounter_update(&adapter
->tx_tstamp_tc
, tx_tstamp_cycles
);
7166 *timestamp
= rte_ns_to_timespec(ns
);
7172 ixgbe_get_reg_length(struct rte_eth_dev
*dev
)
7174 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7177 const struct reg_info
*reg_group
;
7178 const struct reg_info
**reg_set
= (hw
->mac
.type
== ixgbe_mac_82598EB
) ?
7179 ixgbe_regs_mac_82598EB
: ixgbe_regs_others
;
7181 while ((reg_group
= reg_set
[g_ind
++]))
7182 count
+= ixgbe_regs_group_count(reg_group
);
7188 ixgbevf_get_reg_length(struct rte_eth_dev
*dev __rte_unused
)
7192 const struct reg_info
*reg_group
;
7194 while ((reg_group
= ixgbevf_regs
[g_ind
++]))
7195 count
+= ixgbe_regs_group_count(reg_group
);
7201 ixgbe_get_regs(struct rte_eth_dev
*dev
,
7202 struct rte_dev_reg_info
*regs
)
7204 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7205 uint32_t *data
= regs
->data
;
7208 const struct reg_info
*reg_group
;
7209 const struct reg_info
**reg_set
= (hw
->mac
.type
== ixgbe_mac_82598EB
) ?
7210 ixgbe_regs_mac_82598EB
: ixgbe_regs_others
;
7213 regs
->length
= ixgbe_get_reg_length(dev
);
7214 regs
->width
= sizeof(uint32_t);
7218 /* Support only full register dump */
7219 if ((regs
->length
== 0) ||
7220 (regs
->length
== (uint32_t)ixgbe_get_reg_length(dev
))) {
7221 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
7223 while ((reg_group
= reg_set
[g_ind
++]))
7224 count
+= ixgbe_read_regs_group(dev
, &data
[count
],
7233 ixgbevf_get_regs(struct rte_eth_dev
*dev
,
7234 struct rte_dev_reg_info
*regs
)
7236 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7237 uint32_t *data
= regs
->data
;
7240 const struct reg_info
*reg_group
;
7243 regs
->length
= ixgbevf_get_reg_length(dev
);
7244 regs
->width
= sizeof(uint32_t);
7248 /* Support only full register dump */
7249 if ((regs
->length
== 0) ||
7250 (regs
->length
== (uint32_t)ixgbevf_get_reg_length(dev
))) {
7251 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
7253 while ((reg_group
= ixgbevf_regs
[g_ind
++]))
7254 count
+= ixgbe_read_regs_group(dev
, &data
[count
],
7263 ixgbe_get_eeprom_length(struct rte_eth_dev
*dev
)
7265 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7267 /* Return unit is byte count */
7268 return hw
->eeprom
.word_size
* 2;
7272 ixgbe_get_eeprom(struct rte_eth_dev
*dev
,
7273 struct rte_dev_eeprom_info
*in_eeprom
)
7275 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7276 struct ixgbe_eeprom_info
*eeprom
= &hw
->eeprom
;
7277 uint16_t *data
= in_eeprom
->data
;
7280 first
= in_eeprom
->offset
>> 1;
7281 length
= in_eeprom
->length
>> 1;
7282 if ((first
> hw
->eeprom
.word_size
) ||
7283 ((first
+ length
) > hw
->eeprom
.word_size
))
7286 in_eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
7288 return eeprom
->ops
.read_buffer(hw
, first
, length
, data
);
7292 ixgbe_set_eeprom(struct rte_eth_dev
*dev
,
7293 struct rte_dev_eeprom_info
*in_eeprom
)
7295 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7296 struct ixgbe_eeprom_info
*eeprom
= &hw
->eeprom
;
7297 uint16_t *data
= in_eeprom
->data
;
7300 first
= in_eeprom
->offset
>> 1;
7301 length
= in_eeprom
->length
>> 1;
7302 if ((first
> hw
->eeprom
.word_size
) ||
7303 ((first
+ length
) > hw
->eeprom
.word_size
))
7306 in_eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
7308 return eeprom
->ops
.write_buffer(hw
, first
, length
, data
);
7312 ixgbe_get_module_info(struct rte_eth_dev
*dev
,
7313 struct rte_eth_dev_module_info
*modinfo
)
7315 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7317 uint8_t sff8472_rev
, addr_mode
;
7318 bool page_swap
= false;
7320 /* Check whether we support SFF-8472 or not */
7321 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
7322 IXGBE_SFF_SFF_8472_COMP
,
7327 /* addressing mode is not supported */
7328 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
7329 IXGBE_SFF_SFF_8472_SWAP
,
7334 if (addr_mode
& IXGBE_SFF_ADDRESSING_MODE
) {
7336 "Address change required to access page 0xA2, "
7337 "but not supported. Please report the module "
7338 "type to the driver maintainers.");
7342 if (sff8472_rev
== IXGBE_SFF_SFF_8472_UNSUP
|| page_swap
) {
7343 /* We have a SFP, but it does not support SFF-8472 */
7344 modinfo
->type
= RTE_ETH_MODULE_SFF_8079
;
7345 modinfo
->eeprom_len
= RTE_ETH_MODULE_SFF_8079_LEN
;
7347 /* We have a SFP which supports a revision of SFF-8472. */
7348 modinfo
->type
= RTE_ETH_MODULE_SFF_8472
;
7349 modinfo
->eeprom_len
= RTE_ETH_MODULE_SFF_8472_LEN
;
7356 ixgbe_get_module_eeprom(struct rte_eth_dev
*dev
,
7357 struct rte_dev_eeprom_info
*info
)
7359 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7360 uint32_t status
= IXGBE_ERR_PHY_ADDR_INVALID
;
7361 uint8_t databyte
= 0xFF;
7362 uint8_t *data
= info
->data
;
7365 if (info
->length
== 0)
7368 for (i
= info
->offset
; i
< info
->offset
+ info
->length
; i
++) {
7369 if (i
< RTE_ETH_MODULE_SFF_8079_LEN
)
7370 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
, i
, &databyte
);
7372 status
= hw
->phy
.ops
.read_i2c_sff8472(hw
, i
, &databyte
);
7377 data
[i
- info
->offset
] = databyte
;
7384 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type
) {
7386 case ixgbe_mac_X550
:
7387 case ixgbe_mac_X550EM_x
:
7388 case ixgbe_mac_X550EM_a
:
7389 return ETH_RSS_RETA_SIZE_512
;
7390 case ixgbe_mac_X550_vf
:
7391 case ixgbe_mac_X550EM_x_vf
:
7392 case ixgbe_mac_X550EM_a_vf
:
7393 return ETH_RSS_RETA_SIZE_64
;
7395 return ETH_RSS_RETA_SIZE_128
;
7400 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type
, uint16_t reta_idx
) {
7402 case ixgbe_mac_X550
:
7403 case ixgbe_mac_X550EM_x
:
7404 case ixgbe_mac_X550EM_a
:
7405 if (reta_idx
< ETH_RSS_RETA_SIZE_128
)
7406 return IXGBE_RETA(reta_idx
>> 2);
7408 return IXGBE_ERETA((reta_idx
- ETH_RSS_RETA_SIZE_128
) >> 2);
7409 case ixgbe_mac_X550_vf
:
7410 case ixgbe_mac_X550EM_x_vf
:
7411 case ixgbe_mac_X550EM_a_vf
:
7412 return IXGBE_VFRETA(reta_idx
>> 2);
7414 return IXGBE_RETA(reta_idx
>> 2);
7419 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type
) {
7421 case ixgbe_mac_X550_vf
:
7422 case ixgbe_mac_X550EM_x_vf
:
7423 case ixgbe_mac_X550EM_a_vf
:
7424 return IXGBE_VFMRQC
;
7431 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type
, uint8_t i
) {
7433 case ixgbe_mac_X550_vf
:
7434 case ixgbe_mac_X550EM_x_vf
:
7435 case ixgbe_mac_X550EM_a_vf
:
7436 return IXGBE_VFRSSRK(i
);
7438 return IXGBE_RSSRK(i
);
7443 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type
) {
7445 case ixgbe_mac_82599_vf
:
7446 case ixgbe_mac_X540_vf
:
7454 ixgbe_dev_get_dcb_info(struct rte_eth_dev
*dev
,
7455 struct rte_eth_dcb_info
*dcb_info
)
7457 struct ixgbe_dcb_config
*dcb_config
=
7458 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev
->data
->dev_private
);
7459 struct ixgbe_dcb_tc_config
*tc
;
7460 struct rte_eth_dcb_tc_queue_mapping
*tc_queue
;
7464 if (dev
->data
->dev_conf
.rxmode
.mq_mode
& ETH_MQ_RX_DCB_FLAG
)
7465 dcb_info
->nb_tcs
= dcb_config
->num_tcs
.pg_tcs
;
7467 dcb_info
->nb_tcs
= 1;
7469 tc_queue
= &dcb_info
->tc_queue
;
7470 nb_tcs
= dcb_info
->nb_tcs
;
7472 if (dcb_config
->vt_mode
) { /* vt is enabled*/
7473 struct rte_eth_vmdq_dcb_conf
*vmdq_rx_conf
=
7474 &dev
->data
->dev_conf
.rx_adv_conf
.vmdq_dcb_conf
;
7475 for (i
= 0; i
< ETH_DCB_NUM_USER_PRIORITIES
; i
++)
7476 dcb_info
->prio_tc
[i
] = vmdq_rx_conf
->dcb_tc
[i
];
7477 if (RTE_ETH_DEV_SRIOV(dev
).active
> 0) {
7478 for (j
= 0; j
< nb_tcs
; j
++) {
7479 tc_queue
->tc_rxq
[0][j
].base
= j
;
7480 tc_queue
->tc_rxq
[0][j
].nb_queue
= 1;
7481 tc_queue
->tc_txq
[0][j
].base
= j
;
7482 tc_queue
->tc_txq
[0][j
].nb_queue
= 1;
7485 for (i
= 0; i
< vmdq_rx_conf
->nb_queue_pools
; i
++) {
7486 for (j
= 0; j
< nb_tcs
; j
++) {
7487 tc_queue
->tc_rxq
[i
][j
].base
=
7489 tc_queue
->tc_rxq
[i
][j
].nb_queue
= 1;
7490 tc_queue
->tc_txq
[i
][j
].base
=
7492 tc_queue
->tc_txq
[i
][j
].nb_queue
= 1;
7496 } else { /* vt is disabled*/
7497 struct rte_eth_dcb_rx_conf
*rx_conf
=
7498 &dev
->data
->dev_conf
.rx_adv_conf
.dcb_rx_conf
;
7499 for (i
= 0; i
< ETH_DCB_NUM_USER_PRIORITIES
; i
++)
7500 dcb_info
->prio_tc
[i
] = rx_conf
->dcb_tc
[i
];
7501 if (dcb_info
->nb_tcs
== ETH_4_TCS
) {
7502 for (i
= 0; i
< dcb_info
->nb_tcs
; i
++) {
7503 dcb_info
->tc_queue
.tc_rxq
[0][i
].base
= i
* 32;
7504 dcb_info
->tc_queue
.tc_rxq
[0][i
].nb_queue
= 16;
7506 dcb_info
->tc_queue
.tc_txq
[0][0].base
= 0;
7507 dcb_info
->tc_queue
.tc_txq
[0][1].base
= 64;
7508 dcb_info
->tc_queue
.tc_txq
[0][2].base
= 96;
7509 dcb_info
->tc_queue
.tc_txq
[0][3].base
= 112;
7510 dcb_info
->tc_queue
.tc_txq
[0][0].nb_queue
= 64;
7511 dcb_info
->tc_queue
.tc_txq
[0][1].nb_queue
= 32;
7512 dcb_info
->tc_queue
.tc_txq
[0][2].nb_queue
= 16;
7513 dcb_info
->tc_queue
.tc_txq
[0][3].nb_queue
= 16;
7514 } else if (dcb_info
->nb_tcs
== ETH_8_TCS
) {
7515 for (i
= 0; i
< dcb_info
->nb_tcs
; i
++) {
7516 dcb_info
->tc_queue
.tc_rxq
[0][i
].base
= i
* 16;
7517 dcb_info
->tc_queue
.tc_rxq
[0][i
].nb_queue
= 16;
7519 dcb_info
->tc_queue
.tc_txq
[0][0].base
= 0;
7520 dcb_info
->tc_queue
.tc_txq
[0][1].base
= 32;
7521 dcb_info
->tc_queue
.tc_txq
[0][2].base
= 64;
7522 dcb_info
->tc_queue
.tc_txq
[0][3].base
= 80;
7523 dcb_info
->tc_queue
.tc_txq
[0][4].base
= 96;
7524 dcb_info
->tc_queue
.tc_txq
[0][5].base
= 104;
7525 dcb_info
->tc_queue
.tc_txq
[0][6].base
= 112;
7526 dcb_info
->tc_queue
.tc_txq
[0][7].base
= 120;
7527 dcb_info
->tc_queue
.tc_txq
[0][0].nb_queue
= 32;
7528 dcb_info
->tc_queue
.tc_txq
[0][1].nb_queue
= 32;
7529 dcb_info
->tc_queue
.tc_txq
[0][2].nb_queue
= 16;
7530 dcb_info
->tc_queue
.tc_txq
[0][3].nb_queue
= 16;
7531 dcb_info
->tc_queue
.tc_txq
[0][4].nb_queue
= 8;
7532 dcb_info
->tc_queue
.tc_txq
[0][5].nb_queue
= 8;
7533 dcb_info
->tc_queue
.tc_txq
[0][6].nb_queue
= 8;
7534 dcb_info
->tc_queue
.tc_txq
[0][7].nb_queue
= 8;
7537 for (i
= 0; i
< dcb_info
->nb_tcs
; i
++) {
7538 tc
= &dcb_config
->tc_config
[i
];
7539 dcb_info
->tc_bws
[i
] = tc
->path
[IXGBE_DCB_TX_CONFIG
].bwg_percent
;
7544 /* Update e-tag ether type */
7546 ixgbe_update_e_tag_eth_type(struct ixgbe_hw
*hw
,
7547 uint16_t ether_type
)
7549 uint32_t etag_etype
;
7551 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
7552 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
7553 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
7557 etag_etype
= IXGBE_READ_REG(hw
, IXGBE_ETAG_ETYPE
);
7558 etag_etype
&= ~IXGBE_ETAG_ETYPE_MASK
;
7559 etag_etype
|= ether_type
;
7560 IXGBE_WRITE_REG(hw
, IXGBE_ETAG_ETYPE
, etag_etype
);
7561 IXGBE_WRITE_FLUSH(hw
);
7566 /* Config l2 tunnel ether type */
7568 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev
*dev
,
7569 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
7572 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7573 struct ixgbe_l2_tn_info
*l2_tn_info
=
7574 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
7576 if (l2_tunnel
== NULL
)
7579 switch (l2_tunnel
->l2_tunnel_type
) {
7580 case RTE_L2_TUNNEL_TYPE_E_TAG
:
7581 l2_tn_info
->e_tag_ether_type
= l2_tunnel
->ether_type
;
7582 ret
= ixgbe_update_e_tag_eth_type(hw
, l2_tunnel
->ether_type
);
7585 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
7593 /* Enable e-tag tunnel */
7595 ixgbe_e_tag_enable(struct ixgbe_hw
*hw
)
7597 uint32_t etag_etype
;
7599 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
7600 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
7601 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
7605 etag_etype
= IXGBE_READ_REG(hw
, IXGBE_ETAG_ETYPE
);
7606 etag_etype
|= IXGBE_ETAG_ETYPE_VALID
;
7607 IXGBE_WRITE_REG(hw
, IXGBE_ETAG_ETYPE
, etag_etype
);
7608 IXGBE_WRITE_FLUSH(hw
);
7613 /* Enable l2 tunnel */
7615 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev
*dev
,
7616 enum rte_eth_tunnel_type l2_tunnel_type
)
7619 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7620 struct ixgbe_l2_tn_info
*l2_tn_info
=
7621 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
7623 switch (l2_tunnel_type
) {
7624 case RTE_L2_TUNNEL_TYPE_E_TAG
:
7625 l2_tn_info
->e_tag_en
= TRUE
;
7626 ret
= ixgbe_e_tag_enable(hw
);
7629 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
7637 /* Disable e-tag tunnel */
7639 ixgbe_e_tag_disable(struct ixgbe_hw
*hw
)
7641 uint32_t etag_etype
;
7643 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
7644 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
7645 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
7649 etag_etype
= IXGBE_READ_REG(hw
, IXGBE_ETAG_ETYPE
);
7650 etag_etype
&= ~IXGBE_ETAG_ETYPE_VALID
;
7651 IXGBE_WRITE_REG(hw
, IXGBE_ETAG_ETYPE
, etag_etype
);
7652 IXGBE_WRITE_FLUSH(hw
);
7657 /* Disable l2 tunnel */
7659 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev
*dev
,
7660 enum rte_eth_tunnel_type l2_tunnel_type
)
7663 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7664 struct ixgbe_l2_tn_info
*l2_tn_info
=
7665 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
7667 switch (l2_tunnel_type
) {
7668 case RTE_L2_TUNNEL_TYPE_E_TAG
:
7669 l2_tn_info
->e_tag_en
= FALSE
;
7670 ret
= ixgbe_e_tag_disable(hw
);
7673 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
7682 ixgbe_e_tag_filter_del(struct rte_eth_dev
*dev
,
7683 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
7686 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7687 uint32_t i
, rar_entries
;
7688 uint32_t rar_low
, rar_high
;
7690 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
7691 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
7692 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
7696 rar_entries
= ixgbe_get_num_rx_addrs(hw
);
7698 for (i
= 1; i
< rar_entries
; i
++) {
7699 rar_high
= IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
7700 rar_low
= IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
7701 if ((rar_high
& IXGBE_RAH_AV
) &&
7702 (rar_high
& IXGBE_RAH_ADTYPE
) &&
7703 ((rar_low
& IXGBE_RAL_ETAG_FILTER_MASK
) ==
7704 l2_tunnel
->tunnel_id
)) {
7705 IXGBE_WRITE_REG(hw
, IXGBE_RAL(i
), 0);
7706 IXGBE_WRITE_REG(hw
, IXGBE_RAH(i
), 0);
7708 ixgbe_clear_vmdq(hw
, i
, IXGBE_CLEAR_VMDQ_ALL
);
7718 ixgbe_e_tag_filter_add(struct rte_eth_dev
*dev
,
7719 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
7722 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7723 uint32_t i
, rar_entries
;
7724 uint32_t rar_low
, rar_high
;
7726 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
7727 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
7728 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
7732 /* One entry for one tunnel. Try to remove potential existing entry. */
7733 ixgbe_e_tag_filter_del(dev
, l2_tunnel
);
7735 rar_entries
= ixgbe_get_num_rx_addrs(hw
);
7737 for (i
= 1; i
< rar_entries
; i
++) {
7738 rar_high
= IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
7739 if (rar_high
& IXGBE_RAH_AV
) {
7742 ixgbe_set_vmdq(hw
, i
, l2_tunnel
->pool
);
7743 rar_high
= IXGBE_RAH_AV
| IXGBE_RAH_ADTYPE
;
7744 rar_low
= l2_tunnel
->tunnel_id
;
7746 IXGBE_WRITE_REG(hw
, IXGBE_RAL(i
), rar_low
);
7747 IXGBE_WRITE_REG(hw
, IXGBE_RAH(i
), rar_high
);
7753 PMD_INIT_LOG(NOTICE
, "The table of E-tag forwarding rule is full."
7754 " Please remove a rule before adding a new one.");
7758 static inline struct ixgbe_l2_tn_filter
*
7759 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info
*l2_tn_info
,
7760 struct ixgbe_l2_tn_key
*key
)
7764 ret
= rte_hash_lookup(l2_tn_info
->hash_handle
, (const void *)key
);
7768 return l2_tn_info
->hash_map
[ret
];
7772 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info
*l2_tn_info
,
7773 struct ixgbe_l2_tn_filter
*l2_tn_filter
)
7777 ret
= rte_hash_add_key(l2_tn_info
->hash_handle
,
7778 &l2_tn_filter
->key
);
7782 "Failed to insert L2 tunnel filter"
7783 " to hash table %d!",
7788 l2_tn_info
->hash_map
[ret
] = l2_tn_filter
;
7790 TAILQ_INSERT_TAIL(&l2_tn_info
->l2_tn_list
, l2_tn_filter
, entries
);
7796 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info
*l2_tn_info
,
7797 struct ixgbe_l2_tn_key
*key
)
7800 struct ixgbe_l2_tn_filter
*l2_tn_filter
;
7802 ret
= rte_hash_del_key(l2_tn_info
->hash_handle
, key
);
7806 "No such L2 tunnel filter to delete %d!",
7811 l2_tn_filter
= l2_tn_info
->hash_map
[ret
];
7812 l2_tn_info
->hash_map
[ret
] = NULL
;
7814 TAILQ_REMOVE(&l2_tn_info
->l2_tn_list
, l2_tn_filter
, entries
);
7815 rte_free(l2_tn_filter
);
7820 /* Add l2 tunnel filter */
7822 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev
*dev
,
7823 struct rte_eth_l2_tunnel_conf
*l2_tunnel
,
7827 struct ixgbe_l2_tn_info
*l2_tn_info
=
7828 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
7829 struct ixgbe_l2_tn_key key
;
7830 struct ixgbe_l2_tn_filter
*node
;
7833 key
.l2_tn_type
= l2_tunnel
->l2_tunnel_type
;
7834 key
.tn_id
= l2_tunnel
->tunnel_id
;
7836 node
= ixgbe_l2_tn_filter_lookup(l2_tn_info
, &key
);
7840 "The L2 tunnel filter already exists!");
7844 node
= rte_zmalloc("ixgbe_l2_tn",
7845 sizeof(struct ixgbe_l2_tn_filter
),
7850 rte_memcpy(&node
->key
,
7852 sizeof(struct ixgbe_l2_tn_key
));
7853 node
->pool
= l2_tunnel
->pool
;
7854 ret
= ixgbe_insert_l2_tn_filter(l2_tn_info
, node
);
7861 switch (l2_tunnel
->l2_tunnel_type
) {
7862 case RTE_L2_TUNNEL_TYPE_E_TAG
:
7863 ret
= ixgbe_e_tag_filter_add(dev
, l2_tunnel
);
7866 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
7871 if ((!restore
) && (ret
< 0))
7872 (void)ixgbe_remove_l2_tn_filter(l2_tn_info
, &key
);
7877 /* Delete l2 tunnel filter */
7879 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev
*dev
,
7880 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
7883 struct ixgbe_l2_tn_info
*l2_tn_info
=
7884 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
7885 struct ixgbe_l2_tn_key key
;
7887 key
.l2_tn_type
= l2_tunnel
->l2_tunnel_type
;
7888 key
.tn_id
= l2_tunnel
->tunnel_id
;
7889 ret
= ixgbe_remove_l2_tn_filter(l2_tn_info
, &key
);
7893 switch (l2_tunnel
->l2_tunnel_type
) {
7894 case RTE_L2_TUNNEL_TYPE_E_TAG
:
7895 ret
= ixgbe_e_tag_filter_del(dev
, l2_tunnel
);
7898 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
7907 * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
7908 * @dev: pointer to rte_eth_dev structure
7909 * @filter_op:operation will be taken.
7910 * @arg: a pointer to specific structure corresponding to the filter_op
7913 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev
*dev
,
7914 enum rte_filter_op filter_op
,
7919 if (filter_op
== RTE_ETH_FILTER_NOP
)
7923 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
7928 switch (filter_op
) {
7929 case RTE_ETH_FILTER_ADD
:
7930 ret
= ixgbe_dev_l2_tunnel_filter_add
7932 (struct rte_eth_l2_tunnel_conf
*)arg
,
7935 case RTE_ETH_FILTER_DELETE
:
7936 ret
= ixgbe_dev_l2_tunnel_filter_del
7938 (struct rte_eth_l2_tunnel_conf
*)arg
);
7941 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
7949 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev
*dev
, bool en
)
7953 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
7955 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
7956 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
7957 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
7961 ctrl
= IXGBE_READ_REG(hw
, IXGBE_VT_CTL
);
7962 ctrl
&= ~IXGBE_VT_CTL_POOLING_MODE_MASK
;
7964 ctrl
|= IXGBE_VT_CTL_POOLING_MODE_ETAG
;
7965 IXGBE_WRITE_REG(hw
, IXGBE_VT_CTL
, ctrl
);
7970 /* Enable l2 tunnel forwarding */
7972 ixgbe_dev_l2_tunnel_forwarding_enable
7973 (struct rte_eth_dev
*dev
,
7974 enum rte_eth_tunnel_type l2_tunnel_type
)
7976 struct ixgbe_l2_tn_info
*l2_tn_info
=
7977 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
7980 switch (l2_tunnel_type
) {
7981 case RTE_L2_TUNNEL_TYPE_E_TAG
:
7982 l2_tn_info
->e_tag_fwd_en
= TRUE
;
7983 ret
= ixgbe_e_tag_forwarding_en_dis(dev
, 1);
7986 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
7994 /* Disable l2 tunnel forwarding */
7996 ixgbe_dev_l2_tunnel_forwarding_disable
7997 (struct rte_eth_dev
*dev
,
7998 enum rte_eth_tunnel_type l2_tunnel_type
)
8000 struct ixgbe_l2_tn_info
*l2_tn_info
=
8001 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
8004 switch (l2_tunnel_type
) {
8005 case RTE_L2_TUNNEL_TYPE_E_TAG
:
8006 l2_tn_info
->e_tag_fwd_en
= FALSE
;
8007 ret
= ixgbe_e_tag_forwarding_en_dis(dev
, 0);
8010 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8019 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev
*dev
,
8020 struct rte_eth_l2_tunnel_conf
*l2_tunnel
,
8023 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
8025 uint32_t vmtir
, vmvir
;
8026 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8028 if (l2_tunnel
->vf_id
>= pci_dev
->max_vfs
) {
8030 "VF id %u should be less than %u",
8036 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
8037 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
8038 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
8043 vmtir
= l2_tunnel
->tunnel_id
;
8047 IXGBE_WRITE_REG(hw
, IXGBE_VMTIR(l2_tunnel
->vf_id
), vmtir
);
8049 vmvir
= IXGBE_READ_REG(hw
, IXGBE_VMVIR(l2_tunnel
->vf_id
));
8050 vmvir
&= ~IXGBE_VMVIR_TAGA_MASK
;
8052 vmvir
|= IXGBE_VMVIR_TAGA_ETAG_INSERT
;
8053 IXGBE_WRITE_REG(hw
, IXGBE_VMVIR(l2_tunnel
->vf_id
), vmvir
);
8058 /* Enable l2 tunnel tag insertion */
8060 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev
*dev
,
8061 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
8065 switch (l2_tunnel
->l2_tunnel_type
) {
8066 case RTE_L2_TUNNEL_TYPE_E_TAG
:
8067 ret
= ixgbe_e_tag_insertion_en_dis(dev
, l2_tunnel
, 1);
8070 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8078 /* Disable l2 tunnel tag insertion */
8080 ixgbe_dev_l2_tunnel_insertion_disable
8081 (struct rte_eth_dev
*dev
,
8082 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
8086 switch (l2_tunnel
->l2_tunnel_type
) {
8087 case RTE_L2_TUNNEL_TYPE_E_TAG
:
8088 ret
= ixgbe_e_tag_insertion_en_dis(dev
, l2_tunnel
, 0);
8091 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8100 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev
*dev
,
8105 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8107 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
8108 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
8109 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
8113 qde
= IXGBE_READ_REG(hw
, IXGBE_QDE
);
8115 qde
|= IXGBE_QDE_STRIP_TAG
;
8117 qde
&= ~IXGBE_QDE_STRIP_TAG
;
8118 qde
&= ~IXGBE_QDE_READ
;
8119 qde
|= IXGBE_QDE_WRITE
;
8120 IXGBE_WRITE_REG(hw
, IXGBE_QDE
, qde
);
8125 /* Enable l2 tunnel tag stripping */
8127 ixgbe_dev_l2_tunnel_stripping_enable
8128 (struct rte_eth_dev
*dev
,
8129 enum rte_eth_tunnel_type l2_tunnel_type
)
8133 switch (l2_tunnel_type
) {
8134 case RTE_L2_TUNNEL_TYPE_E_TAG
:
8135 ret
= ixgbe_e_tag_stripping_en_dis(dev
, 1);
8138 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8146 /* Disable l2 tunnel tag stripping */
8148 ixgbe_dev_l2_tunnel_stripping_disable
8149 (struct rte_eth_dev
*dev
,
8150 enum rte_eth_tunnel_type l2_tunnel_type
)
8154 switch (l2_tunnel_type
) {
8155 case RTE_L2_TUNNEL_TYPE_E_TAG
:
8156 ret
= ixgbe_e_tag_stripping_en_dis(dev
, 0);
8159 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8167 /* Enable/disable l2 tunnel offload functions */
8169 ixgbe_dev_l2_tunnel_offload_set
8170 (struct rte_eth_dev
*dev
,
8171 struct rte_eth_l2_tunnel_conf
*l2_tunnel
,
8177 if (l2_tunnel
== NULL
)
8181 if (mask
& ETH_L2_TUNNEL_ENABLE_MASK
) {
8183 ret
= ixgbe_dev_l2_tunnel_enable(
8185 l2_tunnel
->l2_tunnel_type
);
8187 ret
= ixgbe_dev_l2_tunnel_disable(
8189 l2_tunnel
->l2_tunnel_type
);
8192 if (mask
& ETH_L2_TUNNEL_INSERTION_MASK
) {
8194 ret
= ixgbe_dev_l2_tunnel_insertion_enable(
8198 ret
= ixgbe_dev_l2_tunnel_insertion_disable(
8203 if (mask
& ETH_L2_TUNNEL_STRIPPING_MASK
) {
8205 ret
= ixgbe_dev_l2_tunnel_stripping_enable(
8207 l2_tunnel
->l2_tunnel_type
);
8209 ret
= ixgbe_dev_l2_tunnel_stripping_disable(
8211 l2_tunnel
->l2_tunnel_type
);
8214 if (mask
& ETH_L2_TUNNEL_FORWARDING_MASK
) {
8216 ret
= ixgbe_dev_l2_tunnel_forwarding_enable(
8218 l2_tunnel
->l2_tunnel_type
);
8220 ret
= ixgbe_dev_l2_tunnel_forwarding_disable(
8222 l2_tunnel
->l2_tunnel_type
);
8229 ixgbe_update_vxlan_port(struct ixgbe_hw
*hw
,
8232 IXGBE_WRITE_REG(hw
, IXGBE_VXLANCTRL
, port
);
8233 IXGBE_WRITE_FLUSH(hw
);
8238 /* There's only one register for VxLAN UDP port.
8239 * So, we cannot add several ports. Will update it.
8242 ixgbe_add_vxlan_port(struct ixgbe_hw
*hw
,
8246 PMD_DRV_LOG(ERR
, "Add VxLAN port 0 is not allowed.");
8250 return ixgbe_update_vxlan_port(hw
, port
);
8253 /* We cannot delete the VxLAN port. For there's a register for VxLAN
8254 * UDP port, it must have a value.
8255 * So, will reset it to the original value 0.
8258 ixgbe_del_vxlan_port(struct ixgbe_hw
*hw
,
8263 cur_port
= (uint16_t)IXGBE_READ_REG(hw
, IXGBE_VXLANCTRL
);
8265 if (cur_port
!= port
) {
8266 PMD_DRV_LOG(ERR
, "Port %u does not exist.", port
);
8270 return ixgbe_update_vxlan_port(hw
, 0);
8273 /* Add UDP tunneling port */
8275 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev
*dev
,
8276 struct rte_eth_udp_tunnel
*udp_tunnel
)
8279 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8281 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
8282 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
8283 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
8287 if (udp_tunnel
== NULL
)
8290 switch (udp_tunnel
->prot_type
) {
8291 case RTE_TUNNEL_TYPE_VXLAN
:
8292 ret
= ixgbe_add_vxlan_port(hw
, udp_tunnel
->udp_port
);
8295 case RTE_TUNNEL_TYPE_GENEVE
:
8296 case RTE_TUNNEL_TYPE_TEREDO
:
8297 PMD_DRV_LOG(ERR
, "Tunnel type is not supported now.");
8302 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8310 /* Remove UDP tunneling port */
8312 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev
*dev
,
8313 struct rte_eth_udp_tunnel
*udp_tunnel
)
8316 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8318 if (hw
->mac
.type
!= ixgbe_mac_X550
&&
8319 hw
->mac
.type
!= ixgbe_mac_X550EM_x
&&
8320 hw
->mac
.type
!= ixgbe_mac_X550EM_a
) {
8324 if (udp_tunnel
== NULL
)
8327 switch (udp_tunnel
->prot_type
) {
8328 case RTE_TUNNEL_TYPE_VXLAN
:
8329 ret
= ixgbe_del_vxlan_port(hw
, udp_tunnel
->udp_port
);
8331 case RTE_TUNNEL_TYPE_GENEVE
:
8332 case RTE_TUNNEL_TYPE_TEREDO
:
8333 PMD_DRV_LOG(ERR
, "Tunnel type is not supported now.");
8337 PMD_DRV_LOG(ERR
, "Invalid tunnel type");
8346 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev
*dev
)
8348 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8350 hw
->mac
.ops
.update_xcast_mode(hw
, IXGBEVF_XCAST_MODE_PROMISC
);
8354 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev
*dev
)
8356 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8358 hw
->mac
.ops
.update_xcast_mode(hw
, IXGBEVF_XCAST_MODE_NONE
);
8362 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev
*dev
)
8364 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8366 hw
->mac
.ops
.update_xcast_mode(hw
, IXGBEVF_XCAST_MODE_ALLMULTI
);
8370 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev
*dev
)
8372 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8374 hw
->mac
.ops
.update_xcast_mode(hw
, IXGBEVF_XCAST_MODE_MULTI
);
8377 static void ixgbevf_mbx_process(struct rte_eth_dev
*dev
)
8379 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8382 /* peek the message first */
8383 in_msg
= IXGBE_READ_REG(hw
, IXGBE_VFMBMEM
);
8385 /* PF reset VF event */
8386 if (in_msg
== IXGBE_PF_CONTROL_MSG
) {
8387 /* dummy mbx read to ack pf */
8388 if (ixgbe_read_mbx(hw
, &in_msg
, 1, 0))
8390 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_RESET
,
8396 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev
*dev
)
8399 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8400 struct ixgbe_interrupt
*intr
=
8401 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
8402 ixgbevf_intr_disable(dev
);
8404 /* read-on-clear nic registers here */
8405 eicr
= IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
8408 /* only one misc vector supported - mailbox */
8409 eicr
&= IXGBE_VTEICR_MASK
;
8410 if (eicr
== IXGBE_MISC_VEC_ID
)
8411 intr
->flags
|= IXGBE_FLAG_MAILBOX
;
8417 ixgbevf_dev_interrupt_action(struct rte_eth_dev
*dev
)
8419 struct ixgbe_interrupt
*intr
=
8420 IXGBE_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
8422 if (intr
->flags
& IXGBE_FLAG_MAILBOX
) {
8423 ixgbevf_mbx_process(dev
);
8424 intr
->flags
&= ~IXGBE_FLAG_MAILBOX
;
8427 ixgbevf_intr_enable(dev
);
8433 ixgbevf_dev_interrupt_handler(void *param
)
8435 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
8437 ixgbevf_dev_interrupt_get_status(dev
);
8438 ixgbevf_dev_interrupt_action(dev
);
8442 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
8443 * @hw: pointer to hardware structure
8445 * Stops the transmit data path and waits for the HW to internally empty
8446 * the Tx security block
8448 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw
*hw
)
8450 #define IXGBE_MAX_SECTX_POLL 40
8455 sectxreg
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
8456 sectxreg
|= IXGBE_SECTXCTRL_TX_DIS
;
8457 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, sectxreg
);
8458 for (i
= 0; i
< IXGBE_MAX_SECTX_POLL
; i
++) {
8459 sectxreg
= IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
);
8460 if (sectxreg
& IXGBE_SECTXSTAT_SECTX_RDY
)
8462 /* Use interrupt-safe sleep just in case */
8466 /* For informational purposes only */
8467 if (i
>= IXGBE_MAX_SECTX_POLL
)
8468 PMD_DRV_LOG(DEBUG
, "Tx unit being enabled before security "
8469 "path fully disabled. Continuing with init.");
8471 return IXGBE_SUCCESS
;
8475 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
8476 * @hw: pointer to hardware structure
8478 * Enables the transmit data path.
8480 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw
*hw
)
8484 sectxreg
= IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
8485 sectxreg
&= ~IXGBE_SECTXCTRL_TX_DIS
;
8486 IXGBE_WRITE_REG(hw
, IXGBE_SECTXCTRL
, sectxreg
);
8487 IXGBE_WRITE_FLUSH(hw
);
8489 return IXGBE_SUCCESS
;
8492 /* restore n-tuple filter */
8494 ixgbe_ntuple_filter_restore(struct rte_eth_dev
*dev
)
8496 struct ixgbe_filter_info
*filter_info
=
8497 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8498 struct ixgbe_5tuple_filter
*node
;
8500 TAILQ_FOREACH(node
, &filter_info
->fivetuple_list
, entries
) {
8501 ixgbe_inject_5tuple_filter(dev
, node
);
8505 /* restore ethernet type filter */
8507 ixgbe_ethertype_filter_restore(struct rte_eth_dev
*dev
)
8509 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8510 struct ixgbe_filter_info
*filter_info
=
8511 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8514 for (i
= 0; i
< IXGBE_MAX_ETQF_FILTERS
; i
++) {
8515 if (filter_info
->ethertype_mask
& (1 << i
)) {
8516 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(i
),
8517 filter_info
->ethertype_filters
[i
].etqf
);
8518 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(i
),
8519 filter_info
->ethertype_filters
[i
].etqs
);
8520 IXGBE_WRITE_FLUSH(hw
);
8525 /* restore SYN filter */
8527 ixgbe_syn_filter_restore(struct rte_eth_dev
*dev
)
8529 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8530 struct ixgbe_filter_info
*filter_info
=
8531 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8534 synqf
= filter_info
->syn_info
;
8536 if (synqf
& IXGBE_SYN_FILTER_ENABLE
) {
8537 IXGBE_WRITE_REG(hw
, IXGBE_SYNQF
, synqf
);
8538 IXGBE_WRITE_FLUSH(hw
);
8542 /* restore L2 tunnel filter */
8544 ixgbe_l2_tn_filter_restore(struct rte_eth_dev
*dev
)
8546 struct ixgbe_l2_tn_info
*l2_tn_info
=
8547 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
8548 struct ixgbe_l2_tn_filter
*node
;
8549 struct rte_eth_l2_tunnel_conf l2_tn_conf
;
8551 TAILQ_FOREACH(node
, &l2_tn_info
->l2_tn_list
, entries
) {
8552 l2_tn_conf
.l2_tunnel_type
= node
->key
.l2_tn_type
;
8553 l2_tn_conf
.tunnel_id
= node
->key
.tn_id
;
8554 l2_tn_conf
.pool
= node
->pool
;
8555 (void)ixgbe_dev_l2_tunnel_filter_add(dev
, &l2_tn_conf
, TRUE
);
8559 /* restore rss filter */
8561 ixgbe_rss_filter_restore(struct rte_eth_dev
*dev
)
8563 struct ixgbe_filter_info
*filter_info
=
8564 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8566 if (filter_info
->rss_info
.conf
.queue_num
)
8567 ixgbe_config_rss_filter(dev
,
8568 &filter_info
->rss_info
, TRUE
);
8572 ixgbe_filter_restore(struct rte_eth_dev
*dev
)
8574 ixgbe_ntuple_filter_restore(dev
);
8575 ixgbe_ethertype_filter_restore(dev
);
8576 ixgbe_syn_filter_restore(dev
);
8577 ixgbe_fdir_filter_restore(dev
);
8578 ixgbe_l2_tn_filter_restore(dev
);
8579 ixgbe_rss_filter_restore(dev
);
8585 ixgbe_l2_tunnel_conf(struct rte_eth_dev
*dev
)
8587 struct ixgbe_l2_tn_info
*l2_tn_info
=
8588 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
8589 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8591 if (l2_tn_info
->e_tag_en
)
8592 (void)ixgbe_e_tag_enable(hw
);
8594 if (l2_tn_info
->e_tag_fwd_en
)
8595 (void)ixgbe_e_tag_forwarding_en_dis(dev
, 1);
8597 (void)ixgbe_update_e_tag_eth_type(hw
, l2_tn_info
->e_tag_ether_type
);
8600 /* remove all the n-tuple filters */
8602 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev
*dev
)
8604 struct ixgbe_filter_info
*filter_info
=
8605 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8606 struct ixgbe_5tuple_filter
*p_5tuple
;
8608 while ((p_5tuple
= TAILQ_FIRST(&filter_info
->fivetuple_list
)))
8609 ixgbe_remove_5tuple_filter(dev
, p_5tuple
);
8612 /* remove all the ether type filters */
8614 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev
*dev
)
8616 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8617 struct ixgbe_filter_info
*filter_info
=
8618 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8621 for (i
= 0; i
< IXGBE_MAX_ETQF_FILTERS
; i
++) {
8622 if (filter_info
->ethertype_mask
& (1 << i
) &&
8623 !filter_info
->ethertype_filters
[i
].conf
) {
8624 (void)ixgbe_ethertype_filter_remove(filter_info
,
8626 IXGBE_WRITE_REG(hw
, IXGBE_ETQF(i
), 0);
8627 IXGBE_WRITE_REG(hw
, IXGBE_ETQS(i
), 0);
8628 IXGBE_WRITE_FLUSH(hw
);
8633 /* remove the SYN filter */
8635 ixgbe_clear_syn_filter(struct rte_eth_dev
*dev
)
8637 struct ixgbe_hw
*hw
= IXGBE_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
8638 struct ixgbe_filter_info
*filter_info
=
8639 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
8641 if (filter_info
->syn_info
& IXGBE_SYN_FILTER_ENABLE
) {
8642 filter_info
->syn_info
= 0;
8644 IXGBE_WRITE_REG(hw
, IXGBE_SYNQF
, 0);
8645 IXGBE_WRITE_FLUSH(hw
);
8649 /* remove all the L2 tunnel filters */
8651 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev
*dev
)
8653 struct ixgbe_l2_tn_info
*l2_tn_info
=
8654 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev
->data
->dev_private
);
8655 struct ixgbe_l2_tn_filter
*l2_tn_filter
;
8656 struct rte_eth_l2_tunnel_conf l2_tn_conf
;
8659 while ((l2_tn_filter
= TAILQ_FIRST(&l2_tn_info
->l2_tn_list
))) {
8660 l2_tn_conf
.l2_tunnel_type
= l2_tn_filter
->key
.l2_tn_type
;
8661 l2_tn_conf
.tunnel_id
= l2_tn_filter
->key
.tn_id
;
8662 l2_tn_conf
.pool
= l2_tn_filter
->pool
;
8663 ret
= ixgbe_dev_l2_tunnel_filter_del(dev
, &l2_tn_conf
);
8671 RTE_PMD_REGISTER_PCI(net_ixgbe
, rte_ixgbe_pmd
);
8672 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe
, pci_id_ixgbe_map
);
8673 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe
, "* igb_uio | uio_pci_generic | vfio-pci");
8674 RTE_PMD_REGISTER_PCI(net_ixgbe_vf
, rte_ixgbevf_pmd
);
8675 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf
, pci_id_ixgbevf_map
);
8676 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf
, "* igb_uio | vfio-pci");
8678 RTE_INIT(ixgbe_init_log
)
8680 ixgbe_logtype_init
= rte_log_register("pmd.net.ixgbe.init");
8681 if (ixgbe_logtype_init
>= 0)
8682 rte_log_set_level(ixgbe_logtype_init
, RTE_LOG_NOTICE
);
8683 ixgbe_logtype_driver
= rte_log_register("pmd.net.ixgbe.driver");
8684 if (ixgbe_logtype_driver
>= 0)
8685 rte_log_set_level(ixgbe_logtype_driver
, RTE_LOG_NOTICE
);