1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <rte_string_fns.h>
12 #include <rte_common.h>
13 #include <rte_interrupts.h>
14 #include <rte_byteorder.h>
16 #include <rte_debug.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memory.h>
24 #include <rte_malloc.h>
27 #include "e1000_logs.h"
28 #include "base/e1000_api.h"
29 #include "e1000_ethdev.h"
33 * Default values for port configuration
35 #define IGB_DEFAULT_RX_FREE_THRESH 32
37 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
38 #define IGB_DEFAULT_RX_HTHRESH 8
39 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
41 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
42 #define IGB_DEFAULT_TX_HTHRESH 1
43 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
45 /* Bit shift and mask */
46 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
47 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
48 #define IGB_8_BIT_WIDTH CHAR_BIT
49 #define IGB_8_BIT_MASK UINT8_MAX
51 /* Additional timesync values. */
52 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
53 #define E1000_ETQF_FILTER_1588 3
54 #define IGB_82576_TSYNC_SHIFT 16
55 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
56 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
57 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
59 #define E1000_VTIVAR_MISC 0x01740
60 #define E1000_VTIVAR_MISC_MASK 0xFF
61 #define E1000_VTIVAR_VALID 0x80
62 #define E1000_VTIVAR_MISC_MAILBOX 0
63 #define E1000_VTIVAR_MISC_INTR_MASK 0x3
65 /* External VLAN Enable bit mask */
66 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
68 /* External VLAN Ether Type bit mask and shift */
69 #define E1000_VET_VET_EXT 0xFFFF0000
70 #define E1000_VET_VET_EXT_SHIFT 16
72 /* MSI-X other interrupt vector */
73 #define IGB_MSIX_OTHER_INTR_VEC 0
75 static int eth_igb_configure(struct rte_eth_dev
*dev
);
76 static int eth_igb_start(struct rte_eth_dev
*dev
);
77 static void eth_igb_stop(struct rte_eth_dev
*dev
);
78 static int eth_igb_dev_set_link_up(struct rte_eth_dev
*dev
);
79 static int eth_igb_dev_set_link_down(struct rte_eth_dev
*dev
);
80 static void eth_igb_close(struct rte_eth_dev
*dev
);
81 static int eth_igb_reset(struct rte_eth_dev
*dev
);
82 static int eth_igb_promiscuous_enable(struct rte_eth_dev
*dev
);
83 static int eth_igb_promiscuous_disable(struct rte_eth_dev
*dev
);
84 static int eth_igb_allmulticast_enable(struct rte_eth_dev
*dev
);
85 static int eth_igb_allmulticast_disable(struct rte_eth_dev
*dev
);
86 static int eth_igb_link_update(struct rte_eth_dev
*dev
,
87 int wait_to_complete
);
88 static int eth_igb_stats_get(struct rte_eth_dev
*dev
,
89 struct rte_eth_stats
*rte_stats
);
90 static int eth_igb_xstats_get(struct rte_eth_dev
*dev
,
91 struct rte_eth_xstat
*xstats
, unsigned n
);
92 static int eth_igb_xstats_get_by_id(struct rte_eth_dev
*dev
,
94 uint64_t *values
, unsigned int n
);
95 static int eth_igb_xstats_get_names(struct rte_eth_dev
*dev
,
96 struct rte_eth_xstat_name
*xstats_names
,
98 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev
*dev
,
99 struct rte_eth_xstat_name
*xstats_names
, const uint64_t *ids
,
101 static int eth_igb_stats_reset(struct rte_eth_dev
*dev
);
102 static int eth_igb_xstats_reset(struct rte_eth_dev
*dev
);
103 static int eth_igb_fw_version_get(struct rte_eth_dev
*dev
,
104 char *fw_version
, size_t fw_size
);
105 static int eth_igb_infos_get(struct rte_eth_dev
*dev
,
106 struct rte_eth_dev_info
*dev_info
);
107 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev
*dev
);
108 static int eth_igbvf_infos_get(struct rte_eth_dev
*dev
,
109 struct rte_eth_dev_info
*dev_info
);
110 static int eth_igb_flow_ctrl_get(struct rte_eth_dev
*dev
,
111 struct rte_eth_fc_conf
*fc_conf
);
112 static int eth_igb_flow_ctrl_set(struct rte_eth_dev
*dev
,
113 struct rte_eth_fc_conf
*fc_conf
);
114 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev
*dev
, uint8_t on
);
115 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev
*dev
);
116 static int eth_igb_interrupt_get_status(struct rte_eth_dev
*dev
);
117 static int eth_igb_interrupt_action(struct rte_eth_dev
*dev
,
118 struct rte_intr_handle
*handle
);
119 static void eth_igb_interrupt_handler(void *param
);
120 static int igb_hardware_init(struct e1000_hw
*hw
);
121 static void igb_hw_control_acquire(struct e1000_hw
*hw
);
122 static void igb_hw_control_release(struct e1000_hw
*hw
);
123 static void igb_init_manageability(struct e1000_hw
*hw
);
124 static void igb_release_manageability(struct e1000_hw
*hw
);
126 static int eth_igb_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
);
128 static int eth_igb_vlan_filter_set(struct rte_eth_dev
*dev
,
129 uint16_t vlan_id
, int on
);
130 static int eth_igb_vlan_tpid_set(struct rte_eth_dev
*dev
,
131 enum rte_vlan_type vlan_type
,
133 static int eth_igb_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
);
135 static void igb_vlan_hw_filter_enable(struct rte_eth_dev
*dev
);
136 static void igb_vlan_hw_filter_disable(struct rte_eth_dev
*dev
);
137 static void igb_vlan_hw_strip_enable(struct rte_eth_dev
*dev
);
138 static void igb_vlan_hw_strip_disable(struct rte_eth_dev
*dev
);
139 static void igb_vlan_hw_extend_enable(struct rte_eth_dev
*dev
);
140 static void igb_vlan_hw_extend_disable(struct rte_eth_dev
*dev
);
142 static int eth_igb_led_on(struct rte_eth_dev
*dev
);
143 static int eth_igb_led_off(struct rte_eth_dev
*dev
);
145 static void igb_intr_disable(struct rte_eth_dev
*dev
);
146 static int igb_get_rx_buffer_size(struct e1000_hw
*hw
);
147 static int eth_igb_rar_set(struct rte_eth_dev
*dev
,
148 struct rte_ether_addr
*mac_addr
,
149 uint32_t index
, uint32_t pool
);
150 static void eth_igb_rar_clear(struct rte_eth_dev
*dev
, uint32_t index
);
151 static int eth_igb_default_mac_addr_set(struct rte_eth_dev
*dev
,
152 struct rte_ether_addr
*addr
);
154 static void igbvf_intr_disable(struct e1000_hw
*hw
);
155 static int igbvf_dev_configure(struct rte_eth_dev
*dev
);
156 static int igbvf_dev_start(struct rte_eth_dev
*dev
);
157 static void igbvf_dev_stop(struct rte_eth_dev
*dev
);
158 static void igbvf_dev_close(struct rte_eth_dev
*dev
);
159 static int igbvf_promiscuous_enable(struct rte_eth_dev
*dev
);
160 static int igbvf_promiscuous_disable(struct rte_eth_dev
*dev
);
161 static int igbvf_allmulticast_enable(struct rte_eth_dev
*dev
);
162 static int igbvf_allmulticast_disable(struct rte_eth_dev
*dev
);
163 static int eth_igbvf_link_update(struct e1000_hw
*hw
);
164 static int eth_igbvf_stats_get(struct rte_eth_dev
*dev
,
165 struct rte_eth_stats
*rte_stats
);
166 static int eth_igbvf_xstats_get(struct rte_eth_dev
*dev
,
167 struct rte_eth_xstat
*xstats
, unsigned n
);
168 static int eth_igbvf_xstats_get_names(struct rte_eth_dev
*dev
,
169 struct rte_eth_xstat_name
*xstats_names
,
171 static int eth_igbvf_stats_reset(struct rte_eth_dev
*dev
);
172 static int igbvf_vlan_filter_set(struct rte_eth_dev
*dev
,
173 uint16_t vlan_id
, int on
);
174 static int igbvf_set_vfta(struct e1000_hw
*hw
, uint16_t vid
, bool on
);
175 static void igbvf_set_vfta_all(struct rte_eth_dev
*dev
, bool on
);
176 static int igbvf_default_mac_addr_set(struct rte_eth_dev
*dev
,
177 struct rte_ether_addr
*addr
);
178 static int igbvf_get_reg_length(struct rte_eth_dev
*dev
);
179 static int igbvf_get_regs(struct rte_eth_dev
*dev
,
180 struct rte_dev_reg_info
*regs
);
182 static int eth_igb_rss_reta_update(struct rte_eth_dev
*dev
,
183 struct rte_eth_rss_reta_entry64
*reta_conf
,
185 static int eth_igb_rss_reta_query(struct rte_eth_dev
*dev
,
186 struct rte_eth_rss_reta_entry64
*reta_conf
,
189 static int eth_igb_syn_filter_get(struct rte_eth_dev
*dev
,
190 struct rte_eth_syn_filter
*filter
);
191 static int eth_igb_syn_filter_handle(struct rte_eth_dev
*dev
,
192 enum rte_filter_op filter_op
,
194 static int igb_add_2tuple_filter(struct rte_eth_dev
*dev
,
195 struct rte_eth_ntuple_filter
*ntuple_filter
);
196 static int igb_remove_2tuple_filter(struct rte_eth_dev
*dev
,
197 struct rte_eth_ntuple_filter
*ntuple_filter
);
198 static int eth_igb_get_flex_filter(struct rte_eth_dev
*dev
,
199 struct rte_eth_flex_filter
*filter
);
200 static int eth_igb_flex_filter_handle(struct rte_eth_dev
*dev
,
201 enum rte_filter_op filter_op
,
203 static int igb_add_5tuple_filter_82576(struct rte_eth_dev
*dev
,
204 struct rte_eth_ntuple_filter
*ntuple_filter
);
205 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev
*dev
,
206 struct rte_eth_ntuple_filter
*ntuple_filter
);
207 static int igb_get_ntuple_filter(struct rte_eth_dev
*dev
,
208 struct rte_eth_ntuple_filter
*filter
);
209 static int igb_ntuple_filter_handle(struct rte_eth_dev
*dev
,
210 enum rte_filter_op filter_op
,
212 static int igb_ethertype_filter_handle(struct rte_eth_dev
*dev
,
213 enum rte_filter_op filter_op
,
215 static int igb_get_ethertype_filter(struct rte_eth_dev
*dev
,
216 struct rte_eth_ethertype_filter
*filter
);
217 static int eth_igb_filter_ctrl(struct rte_eth_dev
*dev
,
218 enum rte_filter_type filter_type
,
219 enum rte_filter_op filter_op
,
221 static int eth_igb_get_reg_length(struct rte_eth_dev
*dev
);
222 static int eth_igb_get_regs(struct rte_eth_dev
*dev
,
223 struct rte_dev_reg_info
*regs
);
224 static int eth_igb_get_eeprom_length(struct rte_eth_dev
*dev
);
225 static int eth_igb_get_eeprom(struct rte_eth_dev
*dev
,
226 struct rte_dev_eeprom_info
*eeprom
);
227 static int eth_igb_set_eeprom(struct rte_eth_dev
*dev
,
228 struct rte_dev_eeprom_info
*eeprom
);
229 static int eth_igb_get_module_info(struct rte_eth_dev
*dev
,
230 struct rte_eth_dev_module_info
*modinfo
);
231 static int eth_igb_get_module_eeprom(struct rte_eth_dev
*dev
,
232 struct rte_dev_eeprom_info
*info
);
233 static int eth_igb_set_mc_addr_list(struct rte_eth_dev
*dev
,
234 struct rte_ether_addr
*mc_addr_set
,
235 uint32_t nb_mc_addr
);
236 static int igb_timesync_enable(struct rte_eth_dev
*dev
);
237 static int igb_timesync_disable(struct rte_eth_dev
*dev
);
238 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev
*dev
,
239 struct timespec
*timestamp
,
241 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev
*dev
,
242 struct timespec
*timestamp
);
243 static int igb_timesync_adjust_time(struct rte_eth_dev
*dev
, int64_t delta
);
244 static int igb_timesync_read_time(struct rte_eth_dev
*dev
,
245 struct timespec
*timestamp
);
246 static int igb_timesync_write_time(struct rte_eth_dev
*dev
,
247 const struct timespec
*timestamp
);
248 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev
*dev
,
250 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev
*dev
,
252 static void eth_igb_assign_msix_vector(struct e1000_hw
*hw
, int8_t direction
,
253 uint8_t queue
, uint8_t msix_vector
);
254 static void eth_igb_write_ivar(struct e1000_hw
*hw
, uint8_t msix_vector
,
255 uint8_t index
, uint8_t offset
);
256 static void eth_igb_configure_msix_intr(struct rte_eth_dev
*dev
);
257 static void eth_igbvf_interrupt_handler(void *param
);
258 static void igbvf_mbx_process(struct rte_eth_dev
*dev
);
259 static int igb_filter_restore(struct rte_eth_dev
*dev
);
262 * Define VF Stats MACRO for Non "cleared on read" register
264 #define UPDATE_VF_STAT(reg, last, cur) \
266 u32 latest = E1000_READ_REG(hw, reg); \
267 cur += (latest - last) & UINT_MAX; \
271 #define IGB_FC_PAUSE_TIME 0x0680
272 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
273 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
275 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
277 static enum e1000_fc_mode igb_fc_setting
= e1000_fc_full
;
280 * The set of PCI devices this driver supports
282 static const struct rte_pci_id pci_id_igb_map
[] = {
283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576
) },
284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_FIBER
) },
285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_SERDES
) },
286 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_QUAD_COPPER
) },
287 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_QUAD_COPPER_ET2
) },
288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_NS
) },
289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_NS_SERDES
) },
290 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_SERDES_QUAD
) },
292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82575EB_COPPER
) },
293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82575EB_FIBER_SERDES
) },
294 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82575GB_QUAD_COPPER
) },
296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82580_COPPER
) },
297 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82580_FIBER
) },
298 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82580_SERDES
) },
299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82580_SGMII
) },
300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82580_COPPER_DUAL
) },
301 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82580_QUAD_FIBER
) },
303 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_COPPER
) },
304 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_FIBER
) },
305 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_SERDES
) },
306 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_SGMII
) },
307 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_DA4
) },
308 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_COPPER
) },
309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_COPPER_OEM1
) },
310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_COPPER_IT
) },
311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_FIBER
) },
312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_SERDES
) },
313 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_SGMII
) },
314 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_COPPER_FLASHLESS
) },
315 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I210_SERDES_FLASHLESS
) },
316 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I211_COPPER
) },
317 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I354_BACKPLANE_1GBPS
) },
318 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I354_SGMII
) },
319 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS
) },
320 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_DH89XXCC_SGMII
) },
321 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_DH89XXCC_SERDES
) },
322 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_DH89XXCC_BACKPLANE
) },
323 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_DH89XXCC_SFP
) },
324 { .vendor_id
= 0, /* sentinel */ },
328 * The set of PCI devices this driver supports (for 82576&I350 VF)
330 static const struct rte_pci_id pci_id_igbvf_map
[] = {
331 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_VF
) },
332 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_82576_VF_HV
) },
333 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_VF
) },
334 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID
, E1000_DEV_ID_I350_VF_HV
) },
335 { .vendor_id
= 0, /* sentinel */ },
338 static const struct rte_eth_desc_lim rx_desc_lim
= {
339 .nb_max
= E1000_MAX_RING_DESC
,
340 .nb_min
= E1000_MIN_RING_DESC
,
341 .nb_align
= IGB_RXD_ALIGN
,
344 static const struct rte_eth_desc_lim tx_desc_lim
= {
345 .nb_max
= E1000_MAX_RING_DESC
,
346 .nb_min
= E1000_MIN_RING_DESC
,
347 .nb_align
= IGB_RXD_ALIGN
,
348 .nb_seg_max
= IGB_TX_MAX_SEG
,
349 .nb_mtu_seg_max
= IGB_TX_MAX_MTU_SEG
,
352 static const struct eth_dev_ops eth_igb_ops
= {
353 .dev_configure
= eth_igb_configure
,
354 .dev_start
= eth_igb_start
,
355 .dev_stop
= eth_igb_stop
,
356 .dev_set_link_up
= eth_igb_dev_set_link_up
,
357 .dev_set_link_down
= eth_igb_dev_set_link_down
,
358 .dev_close
= eth_igb_close
,
359 .dev_reset
= eth_igb_reset
,
360 .promiscuous_enable
= eth_igb_promiscuous_enable
,
361 .promiscuous_disable
= eth_igb_promiscuous_disable
,
362 .allmulticast_enable
= eth_igb_allmulticast_enable
,
363 .allmulticast_disable
= eth_igb_allmulticast_disable
,
364 .link_update
= eth_igb_link_update
,
365 .stats_get
= eth_igb_stats_get
,
366 .xstats_get
= eth_igb_xstats_get
,
367 .xstats_get_by_id
= eth_igb_xstats_get_by_id
,
368 .xstats_get_names_by_id
= eth_igb_xstats_get_names_by_id
,
369 .xstats_get_names
= eth_igb_xstats_get_names
,
370 .stats_reset
= eth_igb_stats_reset
,
371 .xstats_reset
= eth_igb_xstats_reset
,
372 .fw_version_get
= eth_igb_fw_version_get
,
373 .dev_infos_get
= eth_igb_infos_get
,
374 .dev_supported_ptypes_get
= eth_igb_supported_ptypes_get
,
375 .mtu_set
= eth_igb_mtu_set
,
376 .vlan_filter_set
= eth_igb_vlan_filter_set
,
377 .vlan_tpid_set
= eth_igb_vlan_tpid_set
,
378 .vlan_offload_set
= eth_igb_vlan_offload_set
,
379 .rx_queue_setup
= eth_igb_rx_queue_setup
,
380 .rx_queue_intr_enable
= eth_igb_rx_queue_intr_enable
,
381 .rx_queue_intr_disable
= eth_igb_rx_queue_intr_disable
,
382 .rx_queue_release
= eth_igb_rx_queue_release
,
383 .rx_queue_count
= eth_igb_rx_queue_count
,
384 .rx_descriptor_done
= eth_igb_rx_descriptor_done
,
385 .rx_descriptor_status
= eth_igb_rx_descriptor_status
,
386 .tx_descriptor_status
= eth_igb_tx_descriptor_status
,
387 .tx_queue_setup
= eth_igb_tx_queue_setup
,
388 .tx_queue_release
= eth_igb_tx_queue_release
,
389 .tx_done_cleanup
= eth_igb_tx_done_cleanup
,
390 .dev_led_on
= eth_igb_led_on
,
391 .dev_led_off
= eth_igb_led_off
,
392 .flow_ctrl_get
= eth_igb_flow_ctrl_get
,
393 .flow_ctrl_set
= eth_igb_flow_ctrl_set
,
394 .mac_addr_add
= eth_igb_rar_set
,
395 .mac_addr_remove
= eth_igb_rar_clear
,
396 .mac_addr_set
= eth_igb_default_mac_addr_set
,
397 .reta_update
= eth_igb_rss_reta_update
,
398 .reta_query
= eth_igb_rss_reta_query
,
399 .rss_hash_update
= eth_igb_rss_hash_update
,
400 .rss_hash_conf_get
= eth_igb_rss_hash_conf_get
,
401 .filter_ctrl
= eth_igb_filter_ctrl
,
402 .set_mc_addr_list
= eth_igb_set_mc_addr_list
,
403 .rxq_info_get
= igb_rxq_info_get
,
404 .txq_info_get
= igb_txq_info_get
,
405 .timesync_enable
= igb_timesync_enable
,
406 .timesync_disable
= igb_timesync_disable
,
407 .timesync_read_rx_timestamp
= igb_timesync_read_rx_timestamp
,
408 .timesync_read_tx_timestamp
= igb_timesync_read_tx_timestamp
,
409 .get_reg
= eth_igb_get_regs
,
410 .get_eeprom_length
= eth_igb_get_eeprom_length
,
411 .get_eeprom
= eth_igb_get_eeprom
,
412 .set_eeprom
= eth_igb_set_eeprom
,
413 .get_module_info
= eth_igb_get_module_info
,
414 .get_module_eeprom
= eth_igb_get_module_eeprom
,
415 .timesync_adjust_time
= igb_timesync_adjust_time
,
416 .timesync_read_time
= igb_timesync_read_time
,
417 .timesync_write_time
= igb_timesync_write_time
,
421 * dev_ops for virtual function, bare necessities for basic vf
422 * operation have been implemented
424 static const struct eth_dev_ops igbvf_eth_dev_ops
= {
425 .dev_configure
= igbvf_dev_configure
,
426 .dev_start
= igbvf_dev_start
,
427 .dev_stop
= igbvf_dev_stop
,
428 .dev_close
= igbvf_dev_close
,
429 .promiscuous_enable
= igbvf_promiscuous_enable
,
430 .promiscuous_disable
= igbvf_promiscuous_disable
,
431 .allmulticast_enable
= igbvf_allmulticast_enable
,
432 .allmulticast_disable
= igbvf_allmulticast_disable
,
433 .link_update
= eth_igb_link_update
,
434 .stats_get
= eth_igbvf_stats_get
,
435 .xstats_get
= eth_igbvf_xstats_get
,
436 .xstats_get_names
= eth_igbvf_xstats_get_names
,
437 .stats_reset
= eth_igbvf_stats_reset
,
438 .xstats_reset
= eth_igbvf_stats_reset
,
439 .vlan_filter_set
= igbvf_vlan_filter_set
,
440 .dev_infos_get
= eth_igbvf_infos_get
,
441 .dev_supported_ptypes_get
= eth_igb_supported_ptypes_get
,
442 .rx_queue_setup
= eth_igb_rx_queue_setup
,
443 .rx_queue_release
= eth_igb_rx_queue_release
,
444 .rx_descriptor_done
= eth_igb_rx_descriptor_done
,
445 .rx_descriptor_status
= eth_igb_rx_descriptor_status
,
446 .tx_descriptor_status
= eth_igb_tx_descriptor_status
,
447 .tx_queue_setup
= eth_igb_tx_queue_setup
,
448 .tx_queue_release
= eth_igb_tx_queue_release
,
449 .tx_done_cleanup
= eth_igb_tx_done_cleanup
,
450 .set_mc_addr_list
= eth_igb_set_mc_addr_list
,
451 .rxq_info_get
= igb_rxq_info_get
,
452 .txq_info_get
= igb_txq_info_get
,
453 .mac_addr_set
= igbvf_default_mac_addr_set
,
454 .get_reg
= igbvf_get_regs
,
457 /* store statistics names and its offset in stats structure */
458 struct rte_igb_xstats_name_off
{
459 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
463 static const struct rte_igb_xstats_name_off rte_igb_stats_strings
[] = {
464 {"rx_crc_errors", offsetof(struct e1000_hw_stats
, crcerrs
)},
465 {"rx_align_errors", offsetof(struct e1000_hw_stats
, algnerrc
)},
466 {"rx_symbol_errors", offsetof(struct e1000_hw_stats
, symerrs
)},
467 {"rx_missed_packets", offsetof(struct e1000_hw_stats
, mpc
)},
468 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats
, scc
)},
469 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats
, mcc
)},
470 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats
,
472 {"tx_late_collisions", offsetof(struct e1000_hw_stats
, latecol
)},
473 {"tx_total_collisions", offsetof(struct e1000_hw_stats
, colc
)},
474 {"tx_deferred_packets", offsetof(struct e1000_hw_stats
, dc
)},
475 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats
, tncrs
)},
476 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats
, cexterr
)},
477 {"rx_length_errors", offsetof(struct e1000_hw_stats
, rlec
)},
478 {"rx_xon_packets", offsetof(struct e1000_hw_stats
, xonrxc
)},
479 {"tx_xon_packets", offsetof(struct e1000_hw_stats
, xontxc
)},
480 {"rx_xoff_packets", offsetof(struct e1000_hw_stats
, xoffrxc
)},
481 {"tx_xoff_packets", offsetof(struct e1000_hw_stats
, xofftxc
)},
482 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats
,
484 {"rx_size_64_packets", offsetof(struct e1000_hw_stats
, prc64
)},
485 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats
, prc127
)},
486 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats
, prc255
)},
487 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats
, prc511
)},
488 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats
,
490 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats
,
492 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats
, bprc
)},
493 {"rx_multicast_packets", offsetof(struct e1000_hw_stats
, mprc
)},
494 {"rx_undersize_errors", offsetof(struct e1000_hw_stats
, ruc
)},
495 {"rx_fragment_errors", offsetof(struct e1000_hw_stats
, rfc
)},
496 {"rx_oversize_errors", offsetof(struct e1000_hw_stats
, roc
)},
497 {"rx_jabber_errors", offsetof(struct e1000_hw_stats
, rjc
)},
498 {"rx_management_packets", offsetof(struct e1000_hw_stats
, mgprc
)},
499 {"rx_management_dropped", offsetof(struct e1000_hw_stats
, mgpdc
)},
500 {"tx_management_packets", offsetof(struct e1000_hw_stats
, mgptc
)},
501 {"rx_total_packets", offsetof(struct e1000_hw_stats
, tpr
)},
502 {"tx_total_packets", offsetof(struct e1000_hw_stats
, tpt
)},
503 {"rx_total_bytes", offsetof(struct e1000_hw_stats
, tor
)},
504 {"tx_total_bytes", offsetof(struct e1000_hw_stats
, tot
)},
505 {"tx_size_64_packets", offsetof(struct e1000_hw_stats
, ptc64
)},
506 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats
, ptc127
)},
507 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats
, ptc255
)},
508 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats
, ptc511
)},
509 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats
,
511 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats
,
513 {"tx_multicast_packets", offsetof(struct e1000_hw_stats
, mptc
)},
514 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats
, bptc
)},
515 {"tx_tso_packets", offsetof(struct e1000_hw_stats
, tsctc
)},
516 {"tx_tso_errors", offsetof(struct e1000_hw_stats
, tsctfc
)},
517 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats
, rpthc
)},
518 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats
, hgptc
)},
519 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats
, scvpc
)},
521 {"interrupt_assert_count", offsetof(struct e1000_hw_stats
, iac
)},
524 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
525 sizeof(rte_igb_stats_strings[0]))
527 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings
[] = {
528 {"rx_multicast_packets", offsetof(struct e1000_vf_stats
, mprc
)},
529 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats
, gprlbc
)},
530 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats
, gptlbc
)},
531 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats
, gorlbc
)},
532 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats
, gotlbc
)},
535 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
536 sizeof(rte_igbvf_stats_strings[0]))
540 igb_intr_enable(struct rte_eth_dev
*dev
)
542 struct e1000_interrupt
*intr
=
543 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
544 struct e1000_hw
*hw
=
545 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
546 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
547 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
549 if (rte_intr_allow_others(intr_handle
) &&
550 dev
->data
->dev_conf
.intr_conf
.lsc
!= 0) {
551 E1000_WRITE_REG(hw
, E1000_EIMS
, 1 << IGB_MSIX_OTHER_INTR_VEC
);
554 E1000_WRITE_REG(hw
, E1000_IMS
, intr
->mask
);
555 E1000_WRITE_FLUSH(hw
);
559 igb_intr_disable(struct rte_eth_dev
*dev
)
561 struct e1000_hw
*hw
=
562 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
563 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
564 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
566 if (rte_intr_allow_others(intr_handle
) &&
567 dev
->data
->dev_conf
.intr_conf
.lsc
!= 0) {
568 E1000_WRITE_REG(hw
, E1000_EIMC
, 1 << IGB_MSIX_OTHER_INTR_VEC
);
571 E1000_WRITE_REG(hw
, E1000_IMC
, ~0);
572 E1000_WRITE_FLUSH(hw
);
576 igbvf_intr_enable(struct rte_eth_dev
*dev
)
578 struct e1000_hw
*hw
=
579 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
581 /* only for mailbox */
582 E1000_WRITE_REG(hw
, E1000_EIAM
, 1 << E1000_VTIVAR_MISC_MAILBOX
);
583 E1000_WRITE_REG(hw
, E1000_EIAC
, 1 << E1000_VTIVAR_MISC_MAILBOX
);
584 E1000_WRITE_REG(hw
, E1000_EIMS
, 1 << E1000_VTIVAR_MISC_MAILBOX
);
585 E1000_WRITE_FLUSH(hw
);
588 /* only for mailbox now. If RX/TX needed, should extend this function. */
590 igbvf_set_ivar_map(struct e1000_hw
*hw
, uint8_t msix_vector
)
595 tmp
|= (msix_vector
& E1000_VTIVAR_MISC_INTR_MASK
);
596 tmp
|= E1000_VTIVAR_VALID
;
597 E1000_WRITE_REG(hw
, E1000_VTIVAR_MISC
, tmp
);
601 eth_igbvf_configure_msix_intr(struct rte_eth_dev
*dev
)
603 struct e1000_hw
*hw
=
604 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
606 /* Configure VF other cause ivar */
607 igbvf_set_ivar_map(hw
, E1000_VTIVAR_MISC_MAILBOX
);
610 static inline int32_t
611 igb_pf_reset_hw(struct e1000_hw
*hw
)
616 status
= e1000_reset_hw(hw
);
618 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
619 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
620 ctrl_ext
|= E1000_CTRL_EXT_PFRSTD
;
621 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, ctrl_ext
);
622 E1000_WRITE_FLUSH(hw
);
628 igb_identify_hardware(struct rte_eth_dev
*dev
, struct rte_pci_device
*pci_dev
)
630 struct e1000_hw
*hw
=
631 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
634 hw
->vendor_id
= pci_dev
->id
.vendor_id
;
635 hw
->device_id
= pci_dev
->id
.device_id
;
636 hw
->subsystem_vendor_id
= pci_dev
->id
.subsystem_vendor_id
;
637 hw
->subsystem_device_id
= pci_dev
->id
.subsystem_device_id
;
639 e1000_set_mac_type(hw
);
641 /* need to check if it is a vf device below */
645 igb_reset_swfw_lock(struct e1000_hw
*hw
)
650 * Do mac ops initialization manually here, since we will need
651 * some function pointers set by this call.
653 ret_val
= e1000_init_mac_params(hw
);
658 * SMBI lock should not fail in this early stage. If this is the case,
659 * it is due to an improper exit of the application.
660 * So force the release of the faulty lock.
662 if (e1000_get_hw_semaphore_generic(hw
) < 0) {
663 PMD_DRV_LOG(DEBUG
, "SMBI lock released");
665 e1000_put_hw_semaphore_generic(hw
);
667 if (hw
->mac
.ops
.acquire_swfw_sync
!= NULL
) {
671 * Phy lock should not fail in this early stage. If this is the case,
672 * it is due to an improper exit of the application.
673 * So force the release of the faulty lock.
675 mask
= E1000_SWFW_PHY0_SM
<< hw
->bus
.func
;
676 if (hw
->bus
.func
> E1000_FUNC_1
)
678 if (hw
->mac
.ops
.acquire_swfw_sync(hw
, mask
) < 0) {
679 PMD_DRV_LOG(DEBUG
, "SWFW phy%d lock released",
682 hw
->mac
.ops
.release_swfw_sync(hw
, mask
);
685 * This one is more tricky since it is common to all ports; but
686 * swfw_sync retries last long enough (1s) to be almost sure that if
687 * lock can not be taken it is due to an improper lock of the
690 mask
= E1000_SWFW_EEP_SM
;
691 if (hw
->mac
.ops
.acquire_swfw_sync(hw
, mask
) < 0) {
692 PMD_DRV_LOG(DEBUG
, "SWFW common locks released");
694 hw
->mac
.ops
.release_swfw_sync(hw
, mask
);
697 return E1000_SUCCESS
;
700 /* Remove all ntuple filters of the device */
701 static int igb_ntuple_filter_uninit(struct rte_eth_dev
*eth_dev
)
703 struct e1000_filter_info
*filter_info
=
704 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
705 struct e1000_5tuple_filter
*p_5tuple
;
706 struct e1000_2tuple_filter
*p_2tuple
;
708 while ((p_5tuple
= TAILQ_FIRST(&filter_info
->fivetuple_list
))) {
709 TAILQ_REMOVE(&filter_info
->fivetuple_list
,
713 filter_info
->fivetuple_mask
= 0;
714 while ((p_2tuple
= TAILQ_FIRST(&filter_info
->twotuple_list
))) {
715 TAILQ_REMOVE(&filter_info
->twotuple_list
,
719 filter_info
->twotuple_mask
= 0;
724 /* Remove all flex filters of the device */
725 static int igb_flex_filter_uninit(struct rte_eth_dev
*eth_dev
)
727 struct e1000_filter_info
*filter_info
=
728 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
729 struct e1000_flex_filter
*p_flex
;
731 while ((p_flex
= TAILQ_FIRST(&filter_info
->flex_list
))) {
732 TAILQ_REMOVE(&filter_info
->flex_list
, p_flex
, entries
);
735 filter_info
->flex_mask
= 0;
741 eth_igb_dev_init(struct rte_eth_dev
*eth_dev
)
744 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
745 struct e1000_hw
*hw
=
746 E1000_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
747 struct e1000_vfta
* shadow_vfta
=
748 E1000_DEV_PRIVATE_TO_VFTA(eth_dev
->data
->dev_private
);
749 struct e1000_filter_info
*filter_info
=
750 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev
->data
->dev_private
);
751 struct e1000_adapter
*adapter
=
752 E1000_DEV_PRIVATE(eth_dev
->data
->dev_private
);
756 eth_dev
->dev_ops
= ð_igb_ops
;
757 eth_dev
->rx_pkt_burst
= ð_igb_recv_pkts
;
758 eth_dev
->tx_pkt_burst
= ð_igb_xmit_pkts
;
759 eth_dev
->tx_pkt_prepare
= ð_igb_prep_pkts
;
761 /* for secondary processes, we don't initialise any further as primary
762 * has already done this work. Only check we don't need a different
764 if (rte_eal_process_type() != RTE_PROC_PRIMARY
){
765 if (eth_dev
->data
->scattered_rx
)
766 eth_dev
->rx_pkt_burst
= ð_igb_recv_scattered_pkts
;
770 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
772 hw
->hw_addr
= (void *)pci_dev
->mem_resource
[0].addr
;
774 igb_identify_hardware(eth_dev
, pci_dev
);
775 if (e1000_setup_init_funcs(hw
, FALSE
) != E1000_SUCCESS
) {
780 e1000_get_bus_info(hw
);
782 /* Reset any pending lock */
783 if (igb_reset_swfw_lock(hw
) != E1000_SUCCESS
) {
788 /* Finish initialization */
789 if (e1000_setup_init_funcs(hw
, TRUE
) != E1000_SUCCESS
) {
795 hw
->phy
.autoneg_wait_to_complete
= 0;
796 hw
->phy
.autoneg_advertised
= E1000_ALL_SPEED_DUPLEX
;
799 if (hw
->phy
.media_type
== e1000_media_type_copper
) {
800 hw
->phy
.mdix
= 0; /* AUTO_ALL_MODES */
801 hw
->phy
.disable_polarity_correction
= 0;
802 hw
->phy
.ms_type
= e1000_ms_hw_default
;
806 * Start from a known state, this is important in reading the nvm
811 /* Make sure we have a good EEPROM before we read from it */
812 if (e1000_validate_nvm_checksum(hw
) < 0) {
814 * Some PCI-E parts fail the first check due to
815 * the link being in sleep state, call it again,
816 * if it fails a second time its a real issue.
818 if (e1000_validate_nvm_checksum(hw
) < 0) {
819 PMD_INIT_LOG(ERR
, "EEPROM checksum invalid");
825 /* Read the permanent MAC address out of the EEPROM */
826 if (e1000_read_mac_addr(hw
) != 0) {
827 PMD_INIT_LOG(ERR
, "EEPROM error while reading MAC address");
832 /* Allocate memory for storing MAC addresses */
833 eth_dev
->data
->mac_addrs
= rte_zmalloc("e1000",
834 RTE_ETHER_ADDR_LEN
* hw
->mac
.rar_entry_count
, 0);
835 if (eth_dev
->data
->mac_addrs
== NULL
) {
836 PMD_INIT_LOG(ERR
, "Failed to allocate %d bytes needed to "
837 "store MAC addresses",
838 RTE_ETHER_ADDR_LEN
* hw
->mac
.rar_entry_count
);
843 /* Copy the permanent MAC address */
844 rte_ether_addr_copy((struct rte_ether_addr
*)hw
->mac
.addr
,
845 ð_dev
->data
->mac_addrs
[0]);
847 /* Pass the information to the rte_eth_dev_close() that it should also
848 * release the private port resources.
850 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_CLOSE_REMOVE
;
852 /* initialize the vfta */
853 memset(shadow_vfta
, 0, sizeof(*shadow_vfta
));
855 /* Now initialize the hardware */
856 if (igb_hardware_init(hw
) != 0) {
857 PMD_INIT_LOG(ERR
, "Hardware initialization failed");
858 rte_free(eth_dev
->data
->mac_addrs
);
859 eth_dev
->data
->mac_addrs
= NULL
;
863 hw
->mac
.get_link_status
= 1;
864 adapter
->stopped
= 0;
866 /* Indicate SOL/IDER usage */
867 if (e1000_check_reset_block(hw
) < 0) {
868 PMD_INIT_LOG(ERR
, "PHY reset is blocked due to"
872 /* initialize PF if max_vfs not zero */
873 igb_pf_host_init(eth_dev
);
875 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
876 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
877 ctrl_ext
|= E1000_CTRL_EXT_PFRSTD
;
878 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, ctrl_ext
);
879 E1000_WRITE_FLUSH(hw
);
881 PMD_INIT_LOG(DEBUG
, "port_id %d vendorID=0x%x deviceID=0x%x",
882 eth_dev
->data
->port_id
, pci_dev
->id
.vendor_id
,
883 pci_dev
->id
.device_id
);
885 rte_intr_callback_register(&pci_dev
->intr_handle
,
886 eth_igb_interrupt_handler
,
889 /* enable uio/vfio intr/eventfd mapping */
890 rte_intr_enable(&pci_dev
->intr_handle
);
892 /* enable support intr */
893 igb_intr_enable(eth_dev
);
895 eth_igb_dev_set_link_down(eth_dev
);
897 /* initialize filter info */
898 memset(filter_info
, 0,
899 sizeof(struct e1000_filter_info
));
901 TAILQ_INIT(&filter_info
->flex_list
);
902 TAILQ_INIT(&filter_info
->twotuple_list
);
903 TAILQ_INIT(&filter_info
->fivetuple_list
);
905 TAILQ_INIT(&igb_filter_ntuple_list
);
906 TAILQ_INIT(&igb_filter_ethertype_list
);
907 TAILQ_INIT(&igb_filter_syn_list
);
908 TAILQ_INIT(&igb_filter_flex_list
);
909 TAILQ_INIT(&igb_filter_rss_list
);
910 TAILQ_INIT(&igb_flow_list
);
915 igb_hw_control_release(hw
);
921 eth_igb_dev_uninit(struct rte_eth_dev
*eth_dev
)
923 PMD_INIT_FUNC_TRACE();
925 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
928 eth_igb_close(eth_dev
);
934 * Virtual Function device init
937 eth_igbvf_dev_init(struct rte_eth_dev
*eth_dev
)
939 struct rte_pci_device
*pci_dev
;
940 struct rte_intr_handle
*intr_handle
;
941 struct e1000_adapter
*adapter
=
942 E1000_DEV_PRIVATE(eth_dev
->data
->dev_private
);
943 struct e1000_hw
*hw
=
944 E1000_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
946 struct rte_ether_addr
*perm_addr
=
947 (struct rte_ether_addr
*)hw
->mac
.perm_addr
;
949 PMD_INIT_FUNC_TRACE();
951 eth_dev
->dev_ops
= &igbvf_eth_dev_ops
;
952 eth_dev
->rx_pkt_burst
= ð_igb_recv_pkts
;
953 eth_dev
->tx_pkt_burst
= ð_igb_xmit_pkts
;
954 eth_dev
->tx_pkt_prepare
= ð_igb_prep_pkts
;
956 /* for secondary processes, we don't initialise any further as primary
957 * has already done this work. Only check we don't need a different
959 if (rte_eal_process_type() != RTE_PROC_PRIMARY
){
960 if (eth_dev
->data
->scattered_rx
)
961 eth_dev
->rx_pkt_burst
= ð_igb_recv_scattered_pkts
;
965 pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
966 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
968 hw
->device_id
= pci_dev
->id
.device_id
;
969 hw
->vendor_id
= pci_dev
->id
.vendor_id
;
970 hw
->hw_addr
= (void *)pci_dev
->mem_resource
[0].addr
;
971 adapter
->stopped
= 0;
973 /* Initialize the shared code (base driver) */
974 diag
= e1000_setup_init_funcs(hw
, TRUE
);
976 PMD_INIT_LOG(ERR
, "Shared code init failed for igbvf: %d",
981 /* init_mailbox_params */
982 hw
->mbx
.ops
.init_params(hw
);
984 /* Disable the interrupts for VF */
985 igbvf_intr_disable(hw
);
987 diag
= hw
->mac
.ops
.reset_hw(hw
);
989 /* Allocate memory for storing MAC addresses */
990 eth_dev
->data
->mac_addrs
= rte_zmalloc("igbvf", RTE_ETHER_ADDR_LEN
*
991 hw
->mac
.rar_entry_count
, 0);
992 if (eth_dev
->data
->mac_addrs
== NULL
) {
994 "Failed to allocate %d bytes needed to store MAC "
996 RTE_ETHER_ADDR_LEN
* hw
->mac
.rar_entry_count
);
1000 /* Pass the information to the rte_eth_dev_close() that it should also
1001 * release the private port resources.
1003 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_CLOSE_REMOVE
;
1005 /* Generate a random MAC address, if none was assigned by PF. */
1006 if (rte_is_zero_ether_addr(perm_addr
)) {
1007 rte_eth_random_addr(perm_addr
->addr_bytes
);
1008 PMD_INIT_LOG(INFO
, "\tVF MAC address not assigned by Host PF");
1009 PMD_INIT_LOG(INFO
, "\tAssign randomly generated MAC address "
1010 "%02x:%02x:%02x:%02x:%02x:%02x",
1011 perm_addr
->addr_bytes
[0],
1012 perm_addr
->addr_bytes
[1],
1013 perm_addr
->addr_bytes
[2],
1014 perm_addr
->addr_bytes
[3],
1015 perm_addr
->addr_bytes
[4],
1016 perm_addr
->addr_bytes
[5]);
1019 diag
= e1000_rar_set(hw
, perm_addr
->addr_bytes
, 0);
1021 rte_free(eth_dev
->data
->mac_addrs
);
1022 eth_dev
->data
->mac_addrs
= NULL
;
1025 /* Copy the permanent MAC address */
1026 rte_ether_addr_copy((struct rte_ether_addr
*)hw
->mac
.perm_addr
,
1027 ð_dev
->data
->mac_addrs
[0]);
1029 PMD_INIT_LOG(DEBUG
, "port %d vendorID=0x%x deviceID=0x%x "
1031 eth_dev
->data
->port_id
, pci_dev
->id
.vendor_id
,
1032 pci_dev
->id
.device_id
, "igb_mac_82576_vf");
1034 intr_handle
= &pci_dev
->intr_handle
;
1035 rte_intr_callback_register(intr_handle
,
1036 eth_igbvf_interrupt_handler
, eth_dev
);
1042 eth_igbvf_dev_uninit(struct rte_eth_dev
*eth_dev
)
1044 PMD_INIT_FUNC_TRACE();
1046 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1049 igbvf_dev_close(eth_dev
);
1054 static int eth_igb_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
1055 struct rte_pci_device
*pci_dev
)
1057 return rte_eth_dev_pci_generic_probe(pci_dev
,
1058 sizeof(struct e1000_adapter
), eth_igb_dev_init
);
1061 static int eth_igb_pci_remove(struct rte_pci_device
*pci_dev
)
1063 return rte_eth_dev_pci_generic_remove(pci_dev
, eth_igb_dev_uninit
);
1066 static struct rte_pci_driver rte_igb_pmd
= {
1067 .id_table
= pci_id_igb_map
,
1068 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
,
1069 .probe
= eth_igb_pci_probe
,
1070 .remove
= eth_igb_pci_remove
,
1074 static int eth_igbvf_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
1075 struct rte_pci_device
*pci_dev
)
1077 return rte_eth_dev_pci_generic_probe(pci_dev
,
1078 sizeof(struct e1000_adapter
), eth_igbvf_dev_init
);
1081 static int eth_igbvf_pci_remove(struct rte_pci_device
*pci_dev
)
1083 return rte_eth_dev_pci_generic_remove(pci_dev
, eth_igbvf_dev_uninit
);
1087 * virtual function driver struct
1089 static struct rte_pci_driver rte_igbvf_pmd
= {
1090 .id_table
= pci_id_igbvf_map
,
1091 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
,
1092 .probe
= eth_igbvf_pci_probe
,
1093 .remove
= eth_igbvf_pci_remove
,
1097 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev
*dev
)
1099 struct e1000_hw
*hw
=
1100 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1101 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
1102 uint32_t rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
1103 rctl
|= E1000_RCTL_VFE
;
1104 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
1108 igb_check_mq_mode(struct rte_eth_dev
*dev
)
1110 enum rte_eth_rx_mq_mode rx_mq_mode
= dev
->data
->dev_conf
.rxmode
.mq_mode
;
1111 enum rte_eth_tx_mq_mode tx_mq_mode
= dev
->data
->dev_conf
.txmode
.mq_mode
;
1112 uint16_t nb_rx_q
= dev
->data
->nb_rx_queues
;
1113 uint16_t nb_tx_q
= dev
->data
->nb_tx_queues
;
1115 if ((rx_mq_mode
& ETH_MQ_RX_DCB_FLAG
) ||
1116 tx_mq_mode
== ETH_MQ_TX_DCB
||
1117 tx_mq_mode
== ETH_MQ_TX_VMDQ_DCB
) {
1118 PMD_INIT_LOG(ERR
, "DCB mode is not supported.");
1121 if (RTE_ETH_DEV_SRIOV(dev
).active
!= 0) {
1122 /* Check multi-queue mode.
1123 * To no break software we accept ETH_MQ_RX_NONE as this might
1124 * be used to turn off VLAN filter.
1127 if (rx_mq_mode
== ETH_MQ_RX_NONE
||
1128 rx_mq_mode
== ETH_MQ_RX_VMDQ_ONLY
) {
1129 dev
->data
->dev_conf
.rxmode
.mq_mode
= ETH_MQ_RX_VMDQ_ONLY
;
1130 RTE_ETH_DEV_SRIOV(dev
).nb_q_per_pool
= 1;
1132 /* Only support one queue on VFs.
1133 * RSS together with SRIOV is not supported.
1135 PMD_INIT_LOG(ERR
, "SRIOV is active,"
1136 " wrong mq_mode rx %d.",
1140 /* TX mode is not used here, so mode might be ignored.*/
1141 if (tx_mq_mode
!= ETH_MQ_TX_VMDQ_ONLY
) {
1142 /* SRIOV only works in VMDq enable mode */
1143 PMD_INIT_LOG(WARNING
, "SRIOV is active,"
1144 " TX mode %d is not supported. "
1145 " Driver will behave as %d mode.",
1146 tx_mq_mode
, ETH_MQ_TX_VMDQ_ONLY
);
1149 /* check valid queue number */
1150 if ((nb_rx_q
> 1) || (nb_tx_q
> 1)) {
1151 PMD_INIT_LOG(ERR
, "SRIOV is active,"
1152 " only support one queue on VFs.");
1156 /* To no break software that set invalid mode, only display
1157 * warning if invalid mode is used.
1159 if (rx_mq_mode
!= ETH_MQ_RX_NONE
&&
1160 rx_mq_mode
!= ETH_MQ_RX_VMDQ_ONLY
&&
1161 rx_mq_mode
!= ETH_MQ_RX_RSS
) {
1162 /* RSS together with VMDq not supported*/
1163 PMD_INIT_LOG(ERR
, "RX mode %d is not supported.",
1168 if (tx_mq_mode
!= ETH_MQ_TX_NONE
&&
1169 tx_mq_mode
!= ETH_MQ_TX_VMDQ_ONLY
) {
1170 PMD_INIT_LOG(WARNING
, "TX mode %d is not supported."
1171 " Due to txmode is meaningless in this"
1172 " driver, just ignore.",
1180 eth_igb_configure(struct rte_eth_dev
*dev
)
1182 struct e1000_interrupt
*intr
=
1183 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
1186 PMD_INIT_FUNC_TRACE();
1188 if (dev
->data
->dev_conf
.rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
)
1189 dev
->data
->dev_conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_RSS_HASH
;
1191 /* multipe queue mode checking */
1192 ret
= igb_check_mq_mode(dev
);
1194 PMD_DRV_LOG(ERR
, "igb_check_mq_mode fails with %d.",
1199 intr
->flags
|= E1000_FLAG_NEED_LINK_UPDATE
;
1200 PMD_INIT_FUNC_TRACE();
1206 eth_igb_rxtx_control(struct rte_eth_dev
*dev
,
1209 struct e1000_hw
*hw
=
1210 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1211 uint32_t tctl
, rctl
;
1213 tctl
= E1000_READ_REG(hw
, E1000_TCTL
);
1214 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
1218 tctl
|= E1000_TCTL_EN
;
1219 rctl
|= E1000_RCTL_EN
;
1222 tctl
&= ~E1000_TCTL_EN
;
1223 rctl
&= ~E1000_RCTL_EN
;
1225 E1000_WRITE_REG(hw
, E1000_TCTL
, tctl
);
1226 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
1227 E1000_WRITE_FLUSH(hw
);
1231 eth_igb_start(struct rte_eth_dev
*dev
)
1233 struct e1000_hw
*hw
=
1234 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1235 struct e1000_adapter
*adapter
=
1236 E1000_DEV_PRIVATE(dev
->data
->dev_private
);
1237 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1238 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1240 uint32_t intr_vector
= 0;
1246 PMD_INIT_FUNC_TRACE();
1248 /* disable uio/vfio intr/eventfd mapping */
1249 rte_intr_disable(intr_handle
);
1251 /* Power up the phy. Needed to make the link go Up */
1252 eth_igb_dev_set_link_up(dev
);
1255 * Packet Buffer Allocation (PBA)
1256 * Writing PBA sets the receive portion of the buffer
1257 * the remainder is used for the transmit buffer.
1259 if (hw
->mac
.type
== e1000_82575
) {
1262 pba
= E1000_PBA_32K
; /* 32K for Rx, 16K for Tx */
1263 E1000_WRITE_REG(hw
, E1000_PBA
, pba
);
1266 /* Put the address into the Receive Address Array */
1267 e1000_rar_set(hw
, hw
->mac
.addr
, 0);
1269 /* Initialize the hardware */
1270 if (igb_hardware_init(hw
)) {
1271 PMD_INIT_LOG(ERR
, "Unable to initialize the hardware");
1274 adapter
->stopped
= 0;
1276 E1000_WRITE_REG(hw
, E1000_VET
,
1277 RTE_ETHER_TYPE_VLAN
<< 16 | RTE_ETHER_TYPE_VLAN
);
1279 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
1280 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1281 ctrl_ext
|= E1000_CTRL_EXT_PFRSTD
;
1282 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, ctrl_ext
);
1283 E1000_WRITE_FLUSH(hw
);
1285 /* configure PF module if SRIOV enabled */
1286 igb_pf_host_configure(dev
);
1288 /* check and configure queue intr-vector mapping */
1289 if ((rte_intr_cap_multiple(intr_handle
) ||
1290 !RTE_ETH_DEV_SRIOV(dev
).active
) &&
1291 dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
1292 intr_vector
= dev
->data
->nb_rx_queues
;
1293 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
1297 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
1298 intr_handle
->intr_vec
=
1299 rte_zmalloc("intr_vec",
1300 dev
->data
->nb_rx_queues
* sizeof(int), 0);
1301 if (intr_handle
->intr_vec
== NULL
) {
1302 PMD_INIT_LOG(ERR
, "Failed to allocate %d rx_queues"
1303 " intr_vec", dev
->data
->nb_rx_queues
);
1308 /* confiugre msix for rx interrupt */
1309 eth_igb_configure_msix_intr(dev
);
1311 /* Configure for OS presence */
1312 igb_init_manageability(hw
);
1314 eth_igb_tx_init(dev
);
1316 /* This can fail when allocating mbufs for descriptor rings */
1317 ret
= eth_igb_rx_init(dev
);
1319 PMD_INIT_LOG(ERR
, "Unable to initialize RX hardware");
1320 igb_dev_clear_queues(dev
);
1324 e1000_clear_hw_cntrs_base_generic(hw
);
1327 * VLAN Offload Settings
1329 mask
= ETH_VLAN_STRIP_MASK
| ETH_VLAN_FILTER_MASK
| \
1330 ETH_VLAN_EXTEND_MASK
;
1331 ret
= eth_igb_vlan_offload_set(dev
, mask
);
1333 PMD_INIT_LOG(ERR
, "Unable to set vlan offload");
1334 igb_dev_clear_queues(dev
);
1338 if (dev
->data
->dev_conf
.rxmode
.mq_mode
== ETH_MQ_RX_VMDQ_ONLY
) {
1339 /* Enable VLAN filter since VMDq always use VLAN filter */
1340 igb_vmdq_vlan_hw_filter_enable(dev
);
1343 if ((hw
->mac
.type
== e1000_82576
) || (hw
->mac
.type
== e1000_82580
) ||
1344 (hw
->mac
.type
== e1000_i350
) || (hw
->mac
.type
== e1000_i210
) ||
1345 (hw
->mac
.type
== e1000_i211
)) {
1346 /* Configure EITR with the maximum possible value (0xFFFF) */
1347 E1000_WRITE_REG(hw
, E1000_EITR(0), 0xFFFF);
1350 /* Setup link speed and duplex */
1351 speeds
= &dev
->data
->dev_conf
.link_speeds
;
1352 if (*speeds
== ETH_LINK_SPEED_AUTONEG
) {
1353 hw
->phy
.autoneg_advertised
= E1000_ALL_SPEED_DUPLEX
;
1354 hw
->mac
.autoneg
= 1;
1357 autoneg
= (*speeds
& ETH_LINK_SPEED_FIXED
) == 0;
1360 hw
->phy
.autoneg_advertised
= 0;
1362 if (*speeds
& ~(ETH_LINK_SPEED_10M_HD
| ETH_LINK_SPEED_10M
|
1363 ETH_LINK_SPEED_100M_HD
| ETH_LINK_SPEED_100M
|
1364 ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_FIXED
)) {
1366 goto error_invalid_config
;
1368 if (*speeds
& ETH_LINK_SPEED_10M_HD
) {
1369 hw
->phy
.autoneg_advertised
|= ADVERTISE_10_HALF
;
1372 if (*speeds
& ETH_LINK_SPEED_10M
) {
1373 hw
->phy
.autoneg_advertised
|= ADVERTISE_10_FULL
;
1376 if (*speeds
& ETH_LINK_SPEED_100M_HD
) {
1377 hw
->phy
.autoneg_advertised
|= ADVERTISE_100_HALF
;
1380 if (*speeds
& ETH_LINK_SPEED_100M
) {
1381 hw
->phy
.autoneg_advertised
|= ADVERTISE_100_FULL
;
1384 if (*speeds
& ETH_LINK_SPEED_1G
) {
1385 hw
->phy
.autoneg_advertised
|= ADVERTISE_1000_FULL
;
1388 if (num_speeds
== 0 || (!autoneg
&& (num_speeds
> 1)))
1389 goto error_invalid_config
;
1391 /* Set/reset the mac.autoneg based on the link speed,
1395 hw
->mac
.autoneg
= 0;
1396 hw
->mac
.forced_speed_duplex
=
1397 hw
->phy
.autoneg_advertised
;
1399 hw
->mac
.autoneg
= 1;
1403 e1000_setup_link(hw
);
1405 if (rte_intr_allow_others(intr_handle
)) {
1406 /* check if lsc interrupt is enabled */
1407 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
1408 eth_igb_lsc_interrupt_setup(dev
, TRUE
);
1410 eth_igb_lsc_interrupt_setup(dev
, FALSE
);
1412 rte_intr_callback_unregister(intr_handle
,
1413 eth_igb_interrupt_handler
,
1415 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
1416 PMD_INIT_LOG(INFO
, "lsc won't enable because of"
1417 " no intr multiplex");
1420 /* check if rxq interrupt is enabled */
1421 if (dev
->data
->dev_conf
.intr_conf
.rxq
!= 0 &&
1422 rte_intr_dp_is_en(intr_handle
))
1423 eth_igb_rxq_interrupt_setup(dev
);
1425 /* enable uio/vfio intr/eventfd mapping */
1426 rte_intr_enable(intr_handle
);
1428 /* resume enabled intr since hw reset */
1429 igb_intr_enable(dev
);
1431 /* restore all types filter */
1432 igb_filter_restore(dev
);
1434 eth_igb_rxtx_control(dev
, true);
1435 eth_igb_link_update(dev
, 0);
1437 PMD_INIT_LOG(DEBUG
, "<<");
1441 error_invalid_config
:
1442 PMD_INIT_LOG(ERR
, "Invalid advertised speeds (%u) for port %u",
1443 dev
->data
->dev_conf
.link_speeds
, dev
->data
->port_id
);
1444 igb_dev_clear_queues(dev
);
1448 /*********************************************************************
1450 * This routine disables all traffic on the adapter by issuing a
1451 * global reset on the MAC.
1453 **********************************************************************/
1455 eth_igb_stop(struct rte_eth_dev
*dev
)
1457 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1458 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1459 struct rte_eth_link link
;
1460 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1461 struct e1000_adapter
*adapter
=
1462 E1000_DEV_PRIVATE(dev
->data
->dev_private
);
1464 if (adapter
->stopped
)
1467 eth_igb_rxtx_control(dev
, false);
1469 igb_intr_disable(dev
);
1471 /* disable intr eventfd mapping */
1472 rte_intr_disable(intr_handle
);
1474 igb_pf_reset_hw(hw
);
1475 E1000_WRITE_REG(hw
, E1000_WUC
, 0);
1477 /* Set bit for Go Link disconnect if PHY reset is not blocked */
1478 if (hw
->mac
.type
>= e1000_82580
&&
1479 (e1000_check_reset_block(hw
) != E1000_BLK_PHY_RESET
)) {
1482 phpm_reg
= E1000_READ_REG(hw
, E1000_82580_PHY_POWER_MGMT
);
1483 phpm_reg
|= E1000_82580_PM_GO_LINKD
;
1484 E1000_WRITE_REG(hw
, E1000_82580_PHY_POWER_MGMT
, phpm_reg
);
1487 /* Power down the phy. Needed to make the link go Down */
1488 eth_igb_dev_set_link_down(dev
);
1490 igb_dev_clear_queues(dev
);
1492 /* clear the recorded link status */
1493 memset(&link
, 0, sizeof(link
));
1494 rte_eth_linkstatus_set(dev
, &link
);
1496 if (!rte_intr_allow_others(intr_handle
))
1497 /* resume to the default handler */
1498 rte_intr_callback_register(intr_handle
,
1499 eth_igb_interrupt_handler
,
1502 /* Clean datapath event and queue/vec mapping */
1503 rte_intr_efd_disable(intr_handle
);
1504 if (intr_handle
->intr_vec
!= NULL
) {
1505 rte_free(intr_handle
->intr_vec
);
1506 intr_handle
->intr_vec
= NULL
;
1509 adapter
->stopped
= true;
1513 eth_igb_dev_set_link_up(struct rte_eth_dev
*dev
)
1515 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1517 if (hw
->phy
.media_type
== e1000_media_type_copper
)
1518 e1000_power_up_phy(hw
);
1520 e1000_power_up_fiber_serdes_link(hw
);
1526 eth_igb_dev_set_link_down(struct rte_eth_dev
*dev
)
1528 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1530 if (hw
->phy
.media_type
== e1000_media_type_copper
)
1531 e1000_power_down_phy(hw
);
1533 e1000_shutdown_fiber_serdes_link(hw
);
1539 eth_igb_close(struct rte_eth_dev
*dev
)
1541 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1542 struct rte_eth_link link
;
1543 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1544 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
1545 struct e1000_filter_info
*filter_info
=
1546 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
1550 e1000_phy_hw_reset(hw
);
1551 igb_release_manageability(hw
);
1552 igb_hw_control_release(hw
);
1554 /* Clear bit for Go Link disconnect if PHY reset is not blocked */
1555 if (hw
->mac
.type
>= e1000_82580
&&
1556 (e1000_check_reset_block(hw
) != E1000_BLK_PHY_RESET
)) {
1559 phpm_reg
= E1000_READ_REG(hw
, E1000_82580_PHY_POWER_MGMT
);
1560 phpm_reg
&= ~E1000_82580_PM_GO_LINKD
;
1561 E1000_WRITE_REG(hw
, E1000_82580_PHY_POWER_MGMT
, phpm_reg
);
1564 igb_dev_free_queues(dev
);
1566 if (intr_handle
->intr_vec
) {
1567 rte_free(intr_handle
->intr_vec
);
1568 intr_handle
->intr_vec
= NULL
;
1571 memset(&link
, 0, sizeof(link
));
1572 rte_eth_linkstatus_set(dev
, &link
);
1574 dev
->dev_ops
= NULL
;
1575 dev
->rx_pkt_burst
= NULL
;
1576 dev
->tx_pkt_burst
= NULL
;
1578 /* Reset any pending lock */
1579 igb_reset_swfw_lock(hw
);
1581 /* uninitialize PF if max_vfs not zero */
1582 igb_pf_host_uninit(dev
);
1584 rte_intr_callback_unregister(intr_handle
,
1585 eth_igb_interrupt_handler
, dev
);
1587 /* clear the SYN filter info */
1588 filter_info
->syn_info
= 0;
1590 /* clear the ethertype filters info */
1591 filter_info
->ethertype_mask
= 0;
1592 memset(filter_info
->ethertype_filters
, 0,
1593 E1000_MAX_ETQF_FILTERS
* sizeof(struct igb_ethertype_filter
));
1595 /* clear the rss filter info */
1596 memset(&filter_info
->rss_info
, 0,
1597 sizeof(struct igb_rte_flow_rss_conf
));
1599 /* remove all ntuple filters of the device */
1600 igb_ntuple_filter_uninit(dev
);
1602 /* remove all flex filters of the device */
1603 igb_flex_filter_uninit(dev
);
1605 /* clear all the filters list */
1606 igb_filterlist_flush(dev
);
1613 eth_igb_reset(struct rte_eth_dev
*dev
)
1617 /* When a DPDK PMD PF begin to reset PF port, it should notify all
1618 * its VF to make them align with it. The detailed notification
1619 * mechanism is PMD specific and is currently not implemented.
1620 * To avoid unexpected behavior in VF, currently reset of PF with
1621 * SR-IOV activation is not supported. It might be supported later.
1623 if (dev
->data
->sriov
.active
)
1626 ret
= eth_igb_dev_uninit(dev
);
1630 ret
= eth_igb_dev_init(dev
);
1637 igb_get_rx_buffer_size(struct e1000_hw
*hw
)
1639 uint32_t rx_buf_size
;
1640 if (hw
->mac
.type
== e1000_82576
) {
1641 rx_buf_size
= (E1000_READ_REG(hw
, E1000_RXPBS
) & 0xffff) << 10;
1642 } else if (hw
->mac
.type
== e1000_82580
|| hw
->mac
.type
== e1000_i350
) {
1643 /* PBS needs to be translated according to a lookup table */
1644 rx_buf_size
= (E1000_READ_REG(hw
, E1000_RXPBS
) & 0xf);
1645 rx_buf_size
= (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size
);
1646 rx_buf_size
= (rx_buf_size
<< 10);
1647 } else if (hw
->mac
.type
== e1000_i210
|| hw
->mac
.type
== e1000_i211
) {
1648 rx_buf_size
= (E1000_READ_REG(hw
, E1000_RXPBS
) & 0x3f) << 10;
1650 rx_buf_size
= (E1000_READ_REG(hw
, E1000_PBA
) & 0xffff) << 10;
1656 /*********************************************************************
1658 * Initialize the hardware
1660 **********************************************************************/
1662 igb_hardware_init(struct e1000_hw
*hw
)
1664 uint32_t rx_buf_size
;
1667 /* Let the firmware know the OS is in control */
1668 igb_hw_control_acquire(hw
);
1671 * These parameters control the automatic generation (Tx) and
1672 * response (Rx) to Ethernet PAUSE frames.
1673 * - High water mark should allow for at least two standard size (1518)
1674 * frames to be received after sending an XOFF.
1675 * - Low water mark works best when it is very near the high water mark.
1676 * This allows the receiver to restart by sending XON when it has
1677 * drained a bit. Here we use an arbitrary value of 1500 which will
1678 * restart after one full frame is pulled from the buffer. There
1679 * could be several smaller frames in the buffer and if so they will
1680 * not trigger the XON until their total number reduces the buffer
1682 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1684 rx_buf_size
= igb_get_rx_buffer_size(hw
);
1686 hw
->fc
.high_water
= rx_buf_size
- (RTE_ETHER_MAX_LEN
* 2);
1687 hw
->fc
.low_water
= hw
->fc
.high_water
- 1500;
1688 hw
->fc
.pause_time
= IGB_FC_PAUSE_TIME
;
1689 hw
->fc
.send_xon
= 1;
1691 /* Set Flow control, use the tunable location if sane */
1692 if ((igb_fc_setting
!= e1000_fc_none
) && (igb_fc_setting
< 4))
1693 hw
->fc
.requested_mode
= igb_fc_setting
;
1695 hw
->fc
.requested_mode
= e1000_fc_none
;
1697 /* Issue a global reset */
1698 igb_pf_reset_hw(hw
);
1699 E1000_WRITE_REG(hw
, E1000_WUC
, 0);
1701 diag
= e1000_init_hw(hw
);
1705 E1000_WRITE_REG(hw
, E1000_VET
,
1706 RTE_ETHER_TYPE_VLAN
<< 16 | RTE_ETHER_TYPE_VLAN
);
1707 e1000_get_phy_info(hw
);
1708 e1000_check_for_link(hw
);
1713 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1715 igb_read_stats_registers(struct e1000_hw
*hw
, struct e1000_hw_stats
*stats
)
1719 uint64_t old_gprc
= stats
->gprc
;
1720 uint64_t old_gptc
= stats
->gptc
;
1721 uint64_t old_tpr
= stats
->tpr
;
1722 uint64_t old_tpt
= stats
->tpt
;
1723 uint64_t old_rpthc
= stats
->rpthc
;
1724 uint64_t old_hgptc
= stats
->hgptc
;
1726 if(hw
->phy
.media_type
== e1000_media_type_copper
||
1727 (E1000_READ_REG(hw
, E1000_STATUS
) & E1000_STATUS_LU
)) {
1729 E1000_READ_REG(hw
,E1000_SYMERRS
);
1730 stats
->sec
+= E1000_READ_REG(hw
, E1000_SEC
);
1733 stats
->crcerrs
+= E1000_READ_REG(hw
, E1000_CRCERRS
);
1734 stats
->mpc
+= E1000_READ_REG(hw
, E1000_MPC
);
1735 stats
->scc
+= E1000_READ_REG(hw
, E1000_SCC
);
1736 stats
->ecol
+= E1000_READ_REG(hw
, E1000_ECOL
);
1738 stats
->mcc
+= E1000_READ_REG(hw
, E1000_MCC
);
1739 stats
->latecol
+= E1000_READ_REG(hw
, E1000_LATECOL
);
1740 stats
->colc
+= E1000_READ_REG(hw
, E1000_COLC
);
1741 stats
->dc
+= E1000_READ_REG(hw
, E1000_DC
);
1742 stats
->rlec
+= E1000_READ_REG(hw
, E1000_RLEC
);
1743 stats
->xonrxc
+= E1000_READ_REG(hw
, E1000_XONRXC
);
1744 stats
->xontxc
+= E1000_READ_REG(hw
, E1000_XONTXC
);
1746 ** For watchdog management we need to know if we have been
1747 ** paused during the last interval, so capture that here.
1749 pause_frames
= E1000_READ_REG(hw
, E1000_XOFFRXC
);
1750 stats
->xoffrxc
+= pause_frames
;
1751 stats
->xofftxc
+= E1000_READ_REG(hw
, E1000_XOFFTXC
);
1752 stats
->fcruc
+= E1000_READ_REG(hw
, E1000_FCRUC
);
1753 stats
->prc64
+= E1000_READ_REG(hw
, E1000_PRC64
);
1754 stats
->prc127
+= E1000_READ_REG(hw
, E1000_PRC127
);
1755 stats
->prc255
+= E1000_READ_REG(hw
, E1000_PRC255
);
1756 stats
->prc511
+= E1000_READ_REG(hw
, E1000_PRC511
);
1757 stats
->prc1023
+= E1000_READ_REG(hw
, E1000_PRC1023
);
1758 stats
->prc1522
+= E1000_READ_REG(hw
, E1000_PRC1522
);
1759 stats
->gprc
+= E1000_READ_REG(hw
, E1000_GPRC
);
1760 stats
->bprc
+= E1000_READ_REG(hw
, E1000_BPRC
);
1761 stats
->mprc
+= E1000_READ_REG(hw
, E1000_MPRC
);
1762 stats
->gptc
+= E1000_READ_REG(hw
, E1000_GPTC
);
1764 /* For the 64-bit byte counters the low dword must be read first. */
1765 /* Both registers clear on the read of the high dword */
1767 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1768 stats
->gorc
+= E1000_READ_REG(hw
, E1000_GORCL
);
1769 stats
->gorc
+= ((uint64_t)E1000_READ_REG(hw
, E1000_GORCH
) << 32);
1770 stats
->gorc
-= (stats
->gprc
- old_gprc
) * RTE_ETHER_CRC_LEN
;
1771 stats
->gotc
+= E1000_READ_REG(hw
, E1000_GOTCL
);
1772 stats
->gotc
+= ((uint64_t)E1000_READ_REG(hw
, E1000_GOTCH
) << 32);
1773 stats
->gotc
-= (stats
->gptc
- old_gptc
) * RTE_ETHER_CRC_LEN
;
1775 stats
->rnbc
+= E1000_READ_REG(hw
, E1000_RNBC
);
1776 stats
->ruc
+= E1000_READ_REG(hw
, E1000_RUC
);
1777 stats
->rfc
+= E1000_READ_REG(hw
, E1000_RFC
);
1778 stats
->roc
+= E1000_READ_REG(hw
, E1000_ROC
);
1779 stats
->rjc
+= E1000_READ_REG(hw
, E1000_RJC
);
1781 stats
->tpr
+= E1000_READ_REG(hw
, E1000_TPR
);
1782 stats
->tpt
+= E1000_READ_REG(hw
, E1000_TPT
);
1784 stats
->tor
+= E1000_READ_REG(hw
, E1000_TORL
);
1785 stats
->tor
+= ((uint64_t)E1000_READ_REG(hw
, E1000_TORH
) << 32);
1786 stats
->tor
-= (stats
->tpr
- old_tpr
) * RTE_ETHER_CRC_LEN
;
1787 stats
->tot
+= E1000_READ_REG(hw
, E1000_TOTL
);
1788 stats
->tot
+= ((uint64_t)E1000_READ_REG(hw
, E1000_TOTH
) << 32);
1789 stats
->tot
-= (stats
->tpt
- old_tpt
) * RTE_ETHER_CRC_LEN
;
1791 stats
->ptc64
+= E1000_READ_REG(hw
, E1000_PTC64
);
1792 stats
->ptc127
+= E1000_READ_REG(hw
, E1000_PTC127
);
1793 stats
->ptc255
+= E1000_READ_REG(hw
, E1000_PTC255
);
1794 stats
->ptc511
+= E1000_READ_REG(hw
, E1000_PTC511
);
1795 stats
->ptc1023
+= E1000_READ_REG(hw
, E1000_PTC1023
);
1796 stats
->ptc1522
+= E1000_READ_REG(hw
, E1000_PTC1522
);
1797 stats
->mptc
+= E1000_READ_REG(hw
, E1000_MPTC
);
1798 stats
->bptc
+= E1000_READ_REG(hw
, E1000_BPTC
);
1800 /* Interrupt Counts */
1802 stats
->iac
+= E1000_READ_REG(hw
, E1000_IAC
);
1803 stats
->icrxptc
+= E1000_READ_REG(hw
, E1000_ICRXPTC
);
1804 stats
->icrxatc
+= E1000_READ_REG(hw
, E1000_ICRXATC
);
1805 stats
->ictxptc
+= E1000_READ_REG(hw
, E1000_ICTXPTC
);
1806 stats
->ictxatc
+= E1000_READ_REG(hw
, E1000_ICTXATC
);
1807 stats
->ictxqec
+= E1000_READ_REG(hw
, E1000_ICTXQEC
);
1808 stats
->ictxqmtc
+= E1000_READ_REG(hw
, E1000_ICTXQMTC
);
1809 stats
->icrxdmtc
+= E1000_READ_REG(hw
, E1000_ICRXDMTC
);
1810 stats
->icrxoc
+= E1000_READ_REG(hw
, E1000_ICRXOC
);
1812 /* Host to Card Statistics */
1814 stats
->cbtmpc
+= E1000_READ_REG(hw
, E1000_CBTMPC
);
1815 stats
->htdpmc
+= E1000_READ_REG(hw
, E1000_HTDPMC
);
1816 stats
->cbrdpc
+= E1000_READ_REG(hw
, E1000_CBRDPC
);
1817 stats
->cbrmpc
+= E1000_READ_REG(hw
, E1000_CBRMPC
);
1818 stats
->rpthc
+= E1000_READ_REG(hw
, E1000_RPTHC
);
1819 stats
->hgptc
+= E1000_READ_REG(hw
, E1000_HGPTC
);
1820 stats
->htcbdpc
+= E1000_READ_REG(hw
, E1000_HTCBDPC
);
1821 stats
->hgorc
+= E1000_READ_REG(hw
, E1000_HGORCL
);
1822 stats
->hgorc
+= ((uint64_t)E1000_READ_REG(hw
, E1000_HGORCH
) << 32);
1823 stats
->hgorc
-= (stats
->rpthc
- old_rpthc
) * RTE_ETHER_CRC_LEN
;
1824 stats
->hgotc
+= E1000_READ_REG(hw
, E1000_HGOTCL
);
1825 stats
->hgotc
+= ((uint64_t)E1000_READ_REG(hw
, E1000_HGOTCH
) << 32);
1826 stats
->hgotc
-= (stats
->hgptc
- old_hgptc
) * RTE_ETHER_CRC_LEN
;
1827 stats
->lenerrs
+= E1000_READ_REG(hw
, E1000_LENERRS
);
1828 stats
->scvpc
+= E1000_READ_REG(hw
, E1000_SCVPC
);
1829 stats
->hrmpc
+= E1000_READ_REG(hw
, E1000_HRMPC
);
1831 stats
->algnerrc
+= E1000_READ_REG(hw
, E1000_ALGNERRC
);
1832 stats
->rxerrc
+= E1000_READ_REG(hw
, E1000_RXERRC
);
1833 stats
->tncrs
+= E1000_READ_REG(hw
, E1000_TNCRS
);
1834 stats
->cexterr
+= E1000_READ_REG(hw
, E1000_CEXTERR
);
1835 stats
->tsctc
+= E1000_READ_REG(hw
, E1000_TSCTC
);
1836 stats
->tsctfc
+= E1000_READ_REG(hw
, E1000_TSCTFC
);
1840 eth_igb_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*rte_stats
)
1842 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1843 struct e1000_hw_stats
*stats
=
1844 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
1846 igb_read_stats_registers(hw
, stats
);
1848 if (rte_stats
== NULL
)
1852 rte_stats
->imissed
= stats
->mpc
;
1853 rte_stats
->ierrors
= stats
->crcerrs
+
1854 stats
->rlec
+ stats
->ruc
+ stats
->roc
+
1855 stats
->rxerrc
+ stats
->algnerrc
+ stats
->cexterr
;
1858 rte_stats
->oerrors
= stats
->ecol
+ stats
->latecol
;
1860 rte_stats
->ipackets
= stats
->gprc
;
1861 rte_stats
->opackets
= stats
->gptc
;
1862 rte_stats
->ibytes
= stats
->gorc
;
1863 rte_stats
->obytes
= stats
->gotc
;
1868 eth_igb_stats_reset(struct rte_eth_dev
*dev
)
1870 struct e1000_hw_stats
*hw_stats
=
1871 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
1873 /* HW registers are cleared on read */
1874 eth_igb_stats_get(dev
, NULL
);
1876 /* Reset software totals */
1877 memset(hw_stats
, 0, sizeof(*hw_stats
));
1883 eth_igb_xstats_reset(struct rte_eth_dev
*dev
)
1885 struct e1000_hw_stats
*stats
=
1886 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
1888 /* HW registers are cleared on read */
1889 eth_igb_xstats_get(dev
, NULL
, IGB_NB_XSTATS
);
1891 /* Reset software totals */
1892 memset(stats
, 0, sizeof(*stats
));
1897 static int eth_igb_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
1898 struct rte_eth_xstat_name
*xstats_names
,
1899 __rte_unused
unsigned int size
)
1903 if (xstats_names
== NULL
)
1904 return IGB_NB_XSTATS
;
1906 /* Note: limit checked in rte_eth_xstats_names() */
1908 for (i
= 0; i
< IGB_NB_XSTATS
; i
++) {
1909 strlcpy(xstats_names
[i
].name
, rte_igb_stats_strings
[i
].name
,
1910 sizeof(xstats_names
[i
].name
));
1913 return IGB_NB_XSTATS
;
1916 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev
*dev
,
1917 struct rte_eth_xstat_name
*xstats_names
, const uint64_t *ids
,
1923 if (xstats_names
== NULL
)
1924 return IGB_NB_XSTATS
;
1926 for (i
= 0; i
< IGB_NB_XSTATS
; i
++)
1927 strlcpy(xstats_names
[i
].name
,
1928 rte_igb_stats_strings
[i
].name
,
1929 sizeof(xstats_names
[i
].name
));
1931 return IGB_NB_XSTATS
;
1934 struct rte_eth_xstat_name xstats_names_copy
[IGB_NB_XSTATS
];
1936 eth_igb_xstats_get_names_by_id(dev
, xstats_names_copy
, NULL
,
1939 for (i
= 0; i
< limit
; i
++) {
1940 if (ids
[i
] >= IGB_NB_XSTATS
) {
1941 PMD_INIT_LOG(ERR
, "id value isn't valid");
1944 strcpy(xstats_names
[i
].name
,
1945 xstats_names_copy
[ids
[i
]].name
);
1952 eth_igb_xstats_get(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
1955 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1956 struct e1000_hw_stats
*hw_stats
=
1957 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
1960 if (n
< IGB_NB_XSTATS
)
1961 return IGB_NB_XSTATS
;
1963 igb_read_stats_registers(hw
, hw_stats
);
1965 /* If this is a reset xstats is NULL, and we have cleared the
1966 * registers by reading them.
1971 /* Extended stats */
1972 for (i
= 0; i
< IGB_NB_XSTATS
; i
++) {
1974 xstats
[i
].value
= *(uint64_t *)(((char *)hw_stats
) +
1975 rte_igb_stats_strings
[i
].offset
);
1978 return IGB_NB_XSTATS
;
1982 eth_igb_xstats_get_by_id(struct rte_eth_dev
*dev
, const uint64_t *ids
,
1983 uint64_t *values
, unsigned int n
)
1988 struct e1000_hw
*hw
=
1989 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1990 struct e1000_hw_stats
*hw_stats
=
1991 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
1993 if (n
< IGB_NB_XSTATS
)
1994 return IGB_NB_XSTATS
;
1996 igb_read_stats_registers(hw
, hw_stats
);
1998 /* If this is a reset xstats is NULL, and we have cleared the
1999 * registers by reading them.
2004 /* Extended stats */
2005 for (i
= 0; i
< IGB_NB_XSTATS
; i
++)
2006 values
[i
] = *(uint64_t *)(((char *)hw_stats
) +
2007 rte_igb_stats_strings
[i
].offset
);
2009 return IGB_NB_XSTATS
;
2012 uint64_t values_copy
[IGB_NB_XSTATS
];
2014 eth_igb_xstats_get_by_id(dev
, NULL
, values_copy
,
2017 for (i
= 0; i
< n
; i
++) {
2018 if (ids
[i
] >= IGB_NB_XSTATS
) {
2019 PMD_INIT_LOG(ERR
, "id value isn't valid");
2022 values
[i
] = values_copy
[ids
[i
]];
2029 igbvf_read_stats_registers(struct e1000_hw
*hw
, struct e1000_vf_stats
*hw_stats
)
2031 /* Good Rx packets, include VF loopback */
2032 UPDATE_VF_STAT(E1000_VFGPRC
,
2033 hw_stats
->last_gprc
, hw_stats
->gprc
);
2035 /* Good Rx octets, include VF loopback */
2036 UPDATE_VF_STAT(E1000_VFGORC
,
2037 hw_stats
->last_gorc
, hw_stats
->gorc
);
2039 /* Good Tx packets, include VF loopback */
2040 UPDATE_VF_STAT(E1000_VFGPTC
,
2041 hw_stats
->last_gptc
, hw_stats
->gptc
);
2043 /* Good Tx octets, include VF loopback */
2044 UPDATE_VF_STAT(E1000_VFGOTC
,
2045 hw_stats
->last_gotc
, hw_stats
->gotc
);
2047 /* Rx Multicst packets */
2048 UPDATE_VF_STAT(E1000_VFMPRC
,
2049 hw_stats
->last_mprc
, hw_stats
->mprc
);
2051 /* Good Rx loopback packets */
2052 UPDATE_VF_STAT(E1000_VFGPRLBC
,
2053 hw_stats
->last_gprlbc
, hw_stats
->gprlbc
);
2055 /* Good Rx loopback octets */
2056 UPDATE_VF_STAT(E1000_VFGORLBC
,
2057 hw_stats
->last_gorlbc
, hw_stats
->gorlbc
);
2059 /* Good Tx loopback packets */
2060 UPDATE_VF_STAT(E1000_VFGPTLBC
,
2061 hw_stats
->last_gptlbc
, hw_stats
->gptlbc
);
2063 /* Good Tx loopback octets */
2064 UPDATE_VF_STAT(E1000_VFGOTLBC
,
2065 hw_stats
->last_gotlbc
, hw_stats
->gotlbc
);
2068 static int eth_igbvf_xstats_get_names(__rte_unused
struct rte_eth_dev
*dev
,
2069 struct rte_eth_xstat_name
*xstats_names
,
2070 __rte_unused
unsigned limit
)
2074 if (xstats_names
!= NULL
)
2075 for (i
= 0; i
< IGBVF_NB_XSTATS
; i
++) {
2076 strlcpy(xstats_names
[i
].name
,
2077 rte_igbvf_stats_strings
[i
].name
,
2078 sizeof(xstats_names
[i
].name
));
2080 return IGBVF_NB_XSTATS
;
2084 eth_igbvf_xstats_get(struct rte_eth_dev
*dev
, struct rte_eth_xstat
*xstats
,
2087 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2088 struct e1000_vf_stats
*hw_stats
= (struct e1000_vf_stats
*)
2089 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
2092 if (n
< IGBVF_NB_XSTATS
)
2093 return IGBVF_NB_XSTATS
;
2095 igbvf_read_stats_registers(hw
, hw_stats
);
2100 for (i
= 0; i
< IGBVF_NB_XSTATS
; i
++) {
2102 xstats
[i
].value
= *(uint64_t *)(((char *)hw_stats
) +
2103 rte_igbvf_stats_strings
[i
].offset
);
2106 return IGBVF_NB_XSTATS
;
2110 eth_igbvf_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*rte_stats
)
2112 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2113 struct e1000_vf_stats
*hw_stats
= (struct e1000_vf_stats
*)
2114 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
2116 igbvf_read_stats_registers(hw
, hw_stats
);
2118 if (rte_stats
== NULL
)
2121 rte_stats
->ipackets
= hw_stats
->gprc
;
2122 rte_stats
->ibytes
= hw_stats
->gorc
;
2123 rte_stats
->opackets
= hw_stats
->gptc
;
2124 rte_stats
->obytes
= hw_stats
->gotc
;
2129 eth_igbvf_stats_reset(struct rte_eth_dev
*dev
)
2131 struct e1000_vf_stats
*hw_stats
= (struct e1000_vf_stats
*)
2132 E1000_DEV_PRIVATE_TO_STATS(dev
->data
->dev_private
);
2134 /* Sync HW register to the last stats */
2135 eth_igbvf_stats_get(dev
, NULL
);
2137 /* reset HW current stats*/
2138 memset(&hw_stats
->gprc
, 0, sizeof(*hw_stats
) -
2139 offsetof(struct e1000_vf_stats
, gprc
));
2145 eth_igb_fw_version_get(struct rte_eth_dev
*dev
, char *fw_version
,
2148 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2149 struct e1000_fw_version fw
;
2152 e1000_get_fw_version(hw
, &fw
);
2154 switch (hw
->mac
.type
) {
2157 if (!(e1000_get_flash_presence_i210(hw
))) {
2158 ret
= snprintf(fw_version
, fw_size
,
2160 fw
.invm_major
, fw
.invm_minor
,
2166 /* if option rom is valid, display its version too */
2168 ret
= snprintf(fw_version
, fw_size
,
2169 "%d.%d, 0x%08x, %d.%d.%d",
2170 fw
.eep_major
, fw
.eep_minor
, fw
.etrack_id
,
2171 fw
.or_major
, fw
.or_build
, fw
.or_patch
);
2174 if (fw
.etrack_id
!= 0X0000) {
2175 ret
= snprintf(fw_version
, fw_size
,
2177 fw
.eep_major
, fw
.eep_minor
,
2180 ret
= snprintf(fw_version
, fw_size
,
2182 fw
.eep_major
, fw
.eep_minor
,
2189 ret
+= 1; /* add the size of '\0' */
2190 if (fw_size
< (u32
)ret
)
2197 eth_igb_infos_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
2199 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2201 dev_info
->min_rx_bufsize
= 256; /* See BSIZE field of RCTL register. */
2202 dev_info
->max_rx_pktlen
= 0x3FFF; /* See RLPML register. */
2203 dev_info
->max_mac_addrs
= hw
->mac
.rar_entry_count
;
2204 dev_info
->rx_queue_offload_capa
= igb_get_rx_queue_offloads_capa(dev
);
2205 dev_info
->rx_offload_capa
= igb_get_rx_port_offloads_capa(dev
) |
2206 dev_info
->rx_queue_offload_capa
;
2207 dev_info
->tx_queue_offload_capa
= igb_get_tx_queue_offloads_capa(dev
);
2208 dev_info
->tx_offload_capa
= igb_get_tx_port_offloads_capa(dev
) |
2209 dev_info
->tx_queue_offload_capa
;
2211 switch (hw
->mac
.type
) {
2213 dev_info
->max_rx_queues
= 4;
2214 dev_info
->max_tx_queues
= 4;
2215 dev_info
->max_vmdq_pools
= 0;
2219 dev_info
->max_rx_queues
= 16;
2220 dev_info
->max_tx_queues
= 16;
2221 dev_info
->max_vmdq_pools
= ETH_8_POOLS
;
2222 dev_info
->vmdq_queue_num
= 16;
2226 dev_info
->max_rx_queues
= 8;
2227 dev_info
->max_tx_queues
= 8;
2228 dev_info
->max_vmdq_pools
= ETH_8_POOLS
;
2229 dev_info
->vmdq_queue_num
= 8;
2233 dev_info
->max_rx_queues
= 8;
2234 dev_info
->max_tx_queues
= 8;
2235 dev_info
->max_vmdq_pools
= ETH_8_POOLS
;
2236 dev_info
->vmdq_queue_num
= 8;
2240 dev_info
->max_rx_queues
= 8;
2241 dev_info
->max_tx_queues
= 8;
2245 dev_info
->max_rx_queues
= 4;
2246 dev_info
->max_tx_queues
= 4;
2247 dev_info
->max_vmdq_pools
= 0;
2251 dev_info
->max_rx_queues
= 2;
2252 dev_info
->max_tx_queues
= 2;
2253 dev_info
->max_vmdq_pools
= 0;
2257 /* Should not happen */
2260 dev_info
->hash_key_size
= IGB_HKEY_MAX_INDEX
* sizeof(uint32_t);
2261 dev_info
->reta_size
= ETH_RSS_RETA_SIZE_128
;
2262 dev_info
->flow_type_rss_offloads
= IGB_RSS_OFFLOAD_ALL
;
2264 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
2266 .pthresh
= IGB_DEFAULT_RX_PTHRESH
,
2267 .hthresh
= IGB_DEFAULT_RX_HTHRESH
,
2268 .wthresh
= IGB_DEFAULT_RX_WTHRESH
,
2270 .rx_free_thresh
= IGB_DEFAULT_RX_FREE_THRESH
,
2275 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
2277 .pthresh
= IGB_DEFAULT_TX_PTHRESH
,
2278 .hthresh
= IGB_DEFAULT_TX_HTHRESH
,
2279 .wthresh
= IGB_DEFAULT_TX_WTHRESH
,
2284 dev_info
->rx_desc_lim
= rx_desc_lim
;
2285 dev_info
->tx_desc_lim
= tx_desc_lim
;
2287 dev_info
->speed_capa
= ETH_LINK_SPEED_10M_HD
| ETH_LINK_SPEED_10M
|
2288 ETH_LINK_SPEED_100M_HD
| ETH_LINK_SPEED_100M
|
2291 dev_info
->max_mtu
= dev_info
->max_rx_pktlen
- E1000_ETH_OVERHEAD
;
2292 dev_info
->min_mtu
= RTE_ETHER_MIN_MTU
;
2297 static const uint32_t *
2298 eth_igb_supported_ptypes_get(struct rte_eth_dev
*dev
)
2300 static const uint32_t ptypes
[] = {
2301 /* refers to igb_rxd_pkt_info_to_pkt_type() */
2304 RTE_PTYPE_L3_IPV4_EXT
,
2306 RTE_PTYPE_L3_IPV6_EXT
,
2310 RTE_PTYPE_TUNNEL_IP
,
2311 RTE_PTYPE_INNER_L3_IPV6
,
2312 RTE_PTYPE_INNER_L3_IPV6_EXT
,
2313 RTE_PTYPE_INNER_L4_TCP
,
2314 RTE_PTYPE_INNER_L4_UDP
,
2318 if (dev
->rx_pkt_burst
== eth_igb_recv_pkts
||
2319 dev
->rx_pkt_burst
== eth_igb_recv_scattered_pkts
)
2325 eth_igbvf_infos_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
2327 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2329 dev_info
->min_rx_bufsize
= 256; /* See BSIZE field of RCTL register. */
2330 dev_info
->max_rx_pktlen
= 0x3FFF; /* See RLPML register. */
2331 dev_info
->max_mac_addrs
= hw
->mac
.rar_entry_count
;
2332 dev_info
->tx_offload_capa
= DEV_TX_OFFLOAD_VLAN_INSERT
|
2333 DEV_TX_OFFLOAD_IPV4_CKSUM
|
2334 DEV_TX_OFFLOAD_UDP_CKSUM
|
2335 DEV_TX_OFFLOAD_TCP_CKSUM
|
2336 DEV_TX_OFFLOAD_SCTP_CKSUM
|
2337 DEV_TX_OFFLOAD_TCP_TSO
;
2338 switch (hw
->mac
.type
) {
2340 dev_info
->max_rx_queues
= 2;
2341 dev_info
->max_tx_queues
= 2;
2343 case e1000_vfadapt_i350
:
2344 dev_info
->max_rx_queues
= 1;
2345 dev_info
->max_tx_queues
= 1;
2348 /* Should not happen */
2352 dev_info
->rx_queue_offload_capa
= igb_get_rx_queue_offloads_capa(dev
);
2353 dev_info
->rx_offload_capa
= igb_get_rx_port_offloads_capa(dev
) |
2354 dev_info
->rx_queue_offload_capa
;
2355 dev_info
->tx_queue_offload_capa
= igb_get_tx_queue_offloads_capa(dev
);
2356 dev_info
->tx_offload_capa
= igb_get_tx_port_offloads_capa(dev
) |
2357 dev_info
->tx_queue_offload_capa
;
2359 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
2361 .pthresh
= IGB_DEFAULT_RX_PTHRESH
,
2362 .hthresh
= IGB_DEFAULT_RX_HTHRESH
,
2363 .wthresh
= IGB_DEFAULT_RX_WTHRESH
,
2365 .rx_free_thresh
= IGB_DEFAULT_RX_FREE_THRESH
,
2370 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
2372 .pthresh
= IGB_DEFAULT_TX_PTHRESH
,
2373 .hthresh
= IGB_DEFAULT_TX_HTHRESH
,
2374 .wthresh
= IGB_DEFAULT_TX_WTHRESH
,
2379 dev_info
->rx_desc_lim
= rx_desc_lim
;
2380 dev_info
->tx_desc_lim
= tx_desc_lim
;
2385 /* return 0 means link status changed, -1 means not changed */
2387 eth_igb_link_update(struct rte_eth_dev
*dev
, int wait_to_complete
)
2389 struct e1000_hw
*hw
=
2390 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2391 struct rte_eth_link link
;
2392 int link_check
, count
;
2395 hw
->mac
.get_link_status
= 1;
2397 /* possible wait-to-complete in up to 9 seconds */
2398 for (count
= 0; count
< IGB_LINK_UPDATE_CHECK_TIMEOUT
; count
++) {
2399 /* Read the real link status */
2400 switch (hw
->phy
.media_type
) {
2401 case e1000_media_type_copper
:
2402 /* Do the work to read phy */
2403 e1000_check_for_link(hw
);
2404 link_check
= !hw
->mac
.get_link_status
;
2407 case e1000_media_type_fiber
:
2408 e1000_check_for_link(hw
);
2409 link_check
= (E1000_READ_REG(hw
, E1000_STATUS
) &
2413 case e1000_media_type_internal_serdes
:
2414 e1000_check_for_link(hw
);
2415 link_check
= hw
->mac
.serdes_has_link
;
2418 /* VF device is type_unknown */
2419 case e1000_media_type_unknown
:
2420 eth_igbvf_link_update(hw
);
2421 link_check
= !hw
->mac
.get_link_status
;
2427 if (link_check
|| wait_to_complete
== 0)
2429 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL
);
2431 memset(&link
, 0, sizeof(link
));
2433 /* Now we check if a transition has happened */
2435 uint16_t duplex
, speed
;
2436 hw
->mac
.ops
.get_link_up_info(hw
, &speed
, &duplex
);
2437 link
.link_duplex
= (duplex
== FULL_DUPLEX
) ?
2438 ETH_LINK_FULL_DUPLEX
:
2439 ETH_LINK_HALF_DUPLEX
;
2440 link
.link_speed
= speed
;
2441 link
.link_status
= ETH_LINK_UP
;
2442 link
.link_autoneg
= !(dev
->data
->dev_conf
.link_speeds
&
2443 ETH_LINK_SPEED_FIXED
);
2444 } else if (!link_check
) {
2445 link
.link_speed
= 0;
2446 link
.link_duplex
= ETH_LINK_HALF_DUPLEX
;
2447 link
.link_status
= ETH_LINK_DOWN
;
2448 link
.link_autoneg
= ETH_LINK_FIXED
;
2451 return rte_eth_linkstatus_set(dev
, &link
);
2455 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2456 * For ASF and Pass Through versions of f/w this means
2457 * that the driver is loaded.
2460 igb_hw_control_acquire(struct e1000_hw
*hw
)
2464 /* Let firmware know the driver has taken over */
2465 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
2466 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
2470 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2471 * For ASF and Pass Through versions of f/w this means that the
2472 * driver is no longer loaded.
2475 igb_hw_control_release(struct e1000_hw
*hw
)
2479 /* Let firmware taken over control of h/w */
2480 ctrl_ext
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
2481 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
,
2482 ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
2486 * Bit of a misnomer, what this really means is
2487 * to enable OS management of the system... aka
2488 * to disable special hardware management features.
2491 igb_init_manageability(struct e1000_hw
*hw
)
2493 if (e1000_enable_mng_pass_thru(hw
)) {
2494 uint32_t manc2h
= E1000_READ_REG(hw
, E1000_MANC2H
);
2495 uint32_t manc
= E1000_READ_REG(hw
, E1000_MANC
);
2497 /* disable hardware interception of ARP */
2498 manc
&= ~(E1000_MANC_ARP_EN
);
2500 /* enable receiving management packets to the host */
2501 manc
|= E1000_MANC_EN_MNG2HOST
;
2502 manc2h
|= 1 << 5; /* Mng Port 623 */
2503 manc2h
|= 1 << 6; /* Mng Port 664 */
2504 E1000_WRITE_REG(hw
, E1000_MANC2H
, manc2h
);
2505 E1000_WRITE_REG(hw
, E1000_MANC
, manc
);
2510 igb_release_manageability(struct e1000_hw
*hw
)
2512 if (e1000_enable_mng_pass_thru(hw
)) {
2513 uint32_t manc
= E1000_READ_REG(hw
, E1000_MANC
);
2515 manc
|= E1000_MANC_ARP_EN
;
2516 manc
&= ~E1000_MANC_EN_MNG2HOST
;
2518 E1000_WRITE_REG(hw
, E1000_MANC
, manc
);
2523 eth_igb_promiscuous_enable(struct rte_eth_dev
*dev
)
2525 struct e1000_hw
*hw
=
2526 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2529 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
2530 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
2531 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
2537 eth_igb_promiscuous_disable(struct rte_eth_dev
*dev
)
2539 struct e1000_hw
*hw
=
2540 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2543 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
2544 rctl
&= (~E1000_RCTL_UPE
);
2545 if (dev
->data
->all_multicast
== 1)
2546 rctl
|= E1000_RCTL_MPE
;
2548 rctl
&= (~E1000_RCTL_MPE
);
2549 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
2555 eth_igb_allmulticast_enable(struct rte_eth_dev
*dev
)
2557 struct e1000_hw
*hw
=
2558 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2561 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
2562 rctl
|= E1000_RCTL_MPE
;
2563 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
2569 eth_igb_allmulticast_disable(struct rte_eth_dev
*dev
)
2571 struct e1000_hw
*hw
=
2572 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2575 if (dev
->data
->promiscuous
== 1)
2576 return 0; /* must remain in all_multicast mode */
2577 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
2578 rctl
&= (~E1000_RCTL_MPE
);
2579 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
2585 eth_igb_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
2587 struct e1000_hw
*hw
=
2588 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2589 struct e1000_vfta
* shadow_vfta
=
2590 E1000_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
2595 vid_idx
= (uint32_t) ((vlan_id
>> E1000_VFTA_ENTRY_SHIFT
) &
2596 E1000_VFTA_ENTRY_MASK
);
2597 vid_bit
= (uint32_t) (1 << (vlan_id
& E1000_VFTA_ENTRY_BIT_SHIFT_MASK
));
2598 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, vid_idx
);
2603 E1000_WRITE_REG_ARRAY(hw
, E1000_VFTA
, vid_idx
, vfta
);
2605 /* update local VFTA copy */
2606 shadow_vfta
->vfta
[vid_idx
] = vfta
;
2612 eth_igb_vlan_tpid_set(struct rte_eth_dev
*dev
,
2613 enum rte_vlan_type vlan_type
,
2616 struct e1000_hw
*hw
=
2617 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2620 qinq
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
2621 qinq
&= E1000_CTRL_EXT_EXT_VLAN
;
2623 /* only outer TPID of double VLAN can be configured*/
2624 if (qinq
&& vlan_type
== ETH_VLAN_TYPE_OUTER
) {
2625 reg
= E1000_READ_REG(hw
, E1000_VET
);
2626 reg
= (reg
& (~E1000_VET_VET_EXT
)) |
2627 ((uint32_t)tpid
<< E1000_VET_VET_EXT_SHIFT
);
2628 E1000_WRITE_REG(hw
, E1000_VET
, reg
);
2633 /* all other TPID values are read-only*/
2634 PMD_DRV_LOG(ERR
, "Not supported");
2640 igb_vlan_hw_filter_disable(struct rte_eth_dev
*dev
)
2642 struct e1000_hw
*hw
=
2643 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2646 /* Filter Table Disable */
2647 reg
= E1000_READ_REG(hw
, E1000_RCTL
);
2648 reg
&= ~E1000_RCTL_CFIEN
;
2649 reg
&= ~E1000_RCTL_VFE
;
2650 E1000_WRITE_REG(hw
, E1000_RCTL
, reg
);
2654 igb_vlan_hw_filter_enable(struct rte_eth_dev
*dev
)
2656 struct e1000_hw
*hw
=
2657 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2658 struct e1000_vfta
* shadow_vfta
=
2659 E1000_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
2663 /* Filter Table Enable, CFI not used for packet acceptance */
2664 reg
= E1000_READ_REG(hw
, E1000_RCTL
);
2665 reg
&= ~E1000_RCTL_CFIEN
;
2666 reg
|= E1000_RCTL_VFE
;
2667 E1000_WRITE_REG(hw
, E1000_RCTL
, reg
);
2669 /* restore VFTA table */
2670 for (i
= 0; i
< IGB_VFTA_SIZE
; i
++)
2671 E1000_WRITE_REG_ARRAY(hw
, E1000_VFTA
, i
, shadow_vfta
->vfta
[i
]);
2675 igb_vlan_hw_strip_disable(struct rte_eth_dev
*dev
)
2677 struct e1000_hw
*hw
=
2678 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2681 /* VLAN Mode Disable */
2682 reg
= E1000_READ_REG(hw
, E1000_CTRL
);
2683 reg
&= ~E1000_CTRL_VME
;
2684 E1000_WRITE_REG(hw
, E1000_CTRL
, reg
);
2688 igb_vlan_hw_strip_enable(struct rte_eth_dev
*dev
)
2690 struct e1000_hw
*hw
=
2691 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2694 /* VLAN Mode Enable */
2695 reg
= E1000_READ_REG(hw
, E1000_CTRL
);
2696 reg
|= E1000_CTRL_VME
;
2697 E1000_WRITE_REG(hw
, E1000_CTRL
, reg
);
2701 igb_vlan_hw_extend_disable(struct rte_eth_dev
*dev
)
2703 struct e1000_hw
*hw
=
2704 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2707 /* CTRL_EXT: Extended VLAN */
2708 reg
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
2709 reg
&= ~E1000_CTRL_EXT_EXTEND_VLAN
;
2710 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, reg
);
2712 /* Update maximum packet length */
2713 if (dev
->data
->dev_conf
.rxmode
.offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
)
2714 E1000_WRITE_REG(hw
, E1000_RLPML
,
2715 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
+
2720 igb_vlan_hw_extend_enable(struct rte_eth_dev
*dev
)
2722 struct e1000_hw
*hw
=
2723 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2726 /* CTRL_EXT: Extended VLAN */
2727 reg
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
2728 reg
|= E1000_CTRL_EXT_EXTEND_VLAN
;
2729 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, reg
);
2731 /* Update maximum packet length */
2732 if (dev
->data
->dev_conf
.rxmode
.offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
)
2733 E1000_WRITE_REG(hw
, E1000_RLPML
,
2734 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
+
2739 eth_igb_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
2741 struct rte_eth_rxmode
*rxmode
;
2743 rxmode
= &dev
->data
->dev_conf
.rxmode
;
2744 if(mask
& ETH_VLAN_STRIP_MASK
){
2745 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
2746 igb_vlan_hw_strip_enable(dev
);
2748 igb_vlan_hw_strip_disable(dev
);
2751 if(mask
& ETH_VLAN_FILTER_MASK
){
2752 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
2753 igb_vlan_hw_filter_enable(dev
);
2755 igb_vlan_hw_filter_disable(dev
);
2758 if(mask
& ETH_VLAN_EXTEND_MASK
){
2759 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_EXTEND
)
2760 igb_vlan_hw_extend_enable(dev
);
2762 igb_vlan_hw_extend_disable(dev
);
2770 * It enables the interrupt mask and then enable the interrupt.
2773 * Pointer to struct rte_eth_dev.
2778 * - On success, zero.
2779 * - On failure, a negative value.
2782 eth_igb_lsc_interrupt_setup(struct rte_eth_dev
*dev
, uint8_t on
)
2784 struct e1000_interrupt
*intr
=
2785 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2788 intr
->mask
|= E1000_ICR_LSC
;
2790 intr
->mask
&= ~E1000_ICR_LSC
;
2795 /* It clears the interrupt causes and enables the interrupt.
2796 * It will be called once only during nic initialized.
2799 * Pointer to struct rte_eth_dev.
2802 * - On success, zero.
2803 * - On failure, a negative value.
2805 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev
*dev
)
2807 uint32_t mask
, regval
;
2809 struct e1000_hw
*hw
=
2810 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2811 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
2812 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
2813 int misc_shift
= rte_intr_allow_others(intr_handle
) ? 1 : 0;
2814 struct rte_eth_dev_info dev_info
;
2816 memset(&dev_info
, 0, sizeof(dev_info
));
2817 ret
= eth_igb_infos_get(dev
, &dev_info
);
2821 mask
= (0xFFFFFFFF >> (32 - dev_info
.max_rx_queues
)) << misc_shift
;
2822 regval
= E1000_READ_REG(hw
, E1000_EIMS
);
2823 E1000_WRITE_REG(hw
, E1000_EIMS
, regval
| mask
);
2829 * It reads ICR and gets interrupt causes, check it and set a bit flag
2830 * to update link status.
2833 * Pointer to struct rte_eth_dev.
2836 * - On success, zero.
2837 * - On failure, a negative value.
2840 eth_igb_interrupt_get_status(struct rte_eth_dev
*dev
)
2843 struct e1000_hw
*hw
=
2844 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2845 struct e1000_interrupt
*intr
=
2846 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2848 igb_intr_disable(dev
);
2850 /* read-on-clear nic registers here */
2851 icr
= E1000_READ_REG(hw
, E1000_ICR
);
2854 if (icr
& E1000_ICR_LSC
) {
2855 intr
->flags
|= E1000_FLAG_NEED_LINK_UPDATE
;
2858 if (icr
& E1000_ICR_VMMB
)
2859 intr
->flags
|= E1000_FLAG_MAILBOX
;
2865 * It executes link_update after knowing an interrupt is prsent.
2868 * Pointer to struct rte_eth_dev.
2871 * - On success, zero.
2872 * - On failure, a negative value.
2875 eth_igb_interrupt_action(struct rte_eth_dev
*dev
,
2876 struct rte_intr_handle
*intr_handle
)
2878 struct e1000_hw
*hw
=
2879 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2880 struct e1000_interrupt
*intr
=
2881 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2882 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
2883 struct rte_eth_link link
;
2886 if (intr
->flags
& E1000_FLAG_MAILBOX
) {
2887 igb_pf_mbx_process(dev
);
2888 intr
->flags
&= ~E1000_FLAG_MAILBOX
;
2891 igb_intr_enable(dev
);
2892 rte_intr_ack(intr_handle
);
2894 if (intr
->flags
& E1000_FLAG_NEED_LINK_UPDATE
) {
2895 intr
->flags
&= ~E1000_FLAG_NEED_LINK_UPDATE
;
2897 /* set get_link_status to check register later */
2898 hw
->mac
.get_link_status
= 1;
2899 ret
= eth_igb_link_update(dev
, 0);
2901 /* check if link has changed */
2905 rte_eth_linkstatus_get(dev
, &link
);
2906 if (link
.link_status
) {
2908 " Port %d: Link Up - speed %u Mbps - %s",
2910 (unsigned)link
.link_speed
,
2911 link
.link_duplex
== ETH_LINK_FULL_DUPLEX
?
2912 "full-duplex" : "half-duplex");
2914 PMD_INIT_LOG(INFO
, " Port %d: Link Down",
2915 dev
->data
->port_id
);
2918 PMD_INIT_LOG(DEBUG
, "PCI Address: " PCI_PRI_FMT
,
2919 pci_dev
->addr
.domain
,
2921 pci_dev
->addr
.devid
,
2922 pci_dev
->addr
.function
);
2923 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_LSC
,
2931 * Interrupt handler which shall be registered at first.
2934 * Pointer to interrupt handle.
2936 * The address of parameter (struct rte_eth_dev *) regsitered before.
2942 eth_igb_interrupt_handler(void *param
)
2944 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
2946 eth_igb_interrupt_get_status(dev
);
2947 eth_igb_interrupt_action(dev
, dev
->intr_handle
);
2951 eth_igbvf_interrupt_get_status(struct rte_eth_dev
*dev
)
2954 struct e1000_hw
*hw
=
2955 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2956 struct e1000_interrupt
*intr
=
2957 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2959 igbvf_intr_disable(hw
);
2961 /* read-on-clear nic registers here */
2962 eicr
= E1000_READ_REG(hw
, E1000_EICR
);
2965 if (eicr
== E1000_VTIVAR_MISC_MAILBOX
)
2966 intr
->flags
|= E1000_FLAG_MAILBOX
;
2971 void igbvf_mbx_process(struct rte_eth_dev
*dev
)
2973 struct e1000_hw
*hw
=
2974 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2975 struct e1000_mbx_info
*mbx
= &hw
->mbx
;
2978 /* peek the message first */
2979 in_msg
= E1000_READ_REG(hw
, E1000_VMBMEM(0));
2981 /* PF reset VF event */
2982 if (in_msg
== E1000_PF_CONTROL_MSG
) {
2983 /* dummy mbx read to ack pf */
2984 if (mbx
->ops
.read(hw
, &in_msg
, 1, 0))
2986 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_RESET
,
2992 eth_igbvf_interrupt_action(struct rte_eth_dev
*dev
, struct rte_intr_handle
*intr_handle
)
2994 struct e1000_interrupt
*intr
=
2995 E1000_DEV_PRIVATE_TO_INTR(dev
->data
->dev_private
);
2997 if (intr
->flags
& E1000_FLAG_MAILBOX
) {
2998 igbvf_mbx_process(dev
);
2999 intr
->flags
&= ~E1000_FLAG_MAILBOX
;
3002 igbvf_intr_enable(dev
);
3003 rte_intr_ack(intr_handle
);
3009 eth_igbvf_interrupt_handler(void *param
)
3011 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
3013 eth_igbvf_interrupt_get_status(dev
);
3014 eth_igbvf_interrupt_action(dev
, dev
->intr_handle
);
3018 eth_igb_led_on(struct rte_eth_dev
*dev
)
3020 struct e1000_hw
*hw
;
3022 hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3023 return e1000_led_on(hw
) == E1000_SUCCESS
? 0 : -ENOTSUP
;
3027 eth_igb_led_off(struct rte_eth_dev
*dev
)
3029 struct e1000_hw
*hw
;
3031 hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3032 return e1000_led_off(hw
) == E1000_SUCCESS
? 0 : -ENOTSUP
;
3036 eth_igb_flow_ctrl_get(struct rte_eth_dev
*dev
, struct rte_eth_fc_conf
*fc_conf
)
3038 struct e1000_hw
*hw
;
3043 hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3044 fc_conf
->pause_time
= hw
->fc
.pause_time
;
3045 fc_conf
->high_water
= hw
->fc
.high_water
;
3046 fc_conf
->low_water
= hw
->fc
.low_water
;
3047 fc_conf
->send_xon
= hw
->fc
.send_xon
;
3048 fc_conf
->autoneg
= hw
->mac
.autoneg
;
3051 * Return rx_pause and tx_pause status according to actual setting of
3052 * the TFCE and RFCE bits in the CTRL register.
3054 ctrl
= E1000_READ_REG(hw
, E1000_CTRL
);
3055 if (ctrl
& E1000_CTRL_TFCE
)
3060 if (ctrl
& E1000_CTRL_RFCE
)
3065 if (rx_pause
&& tx_pause
)
3066 fc_conf
->mode
= RTE_FC_FULL
;
3068 fc_conf
->mode
= RTE_FC_RX_PAUSE
;
3070 fc_conf
->mode
= RTE_FC_TX_PAUSE
;
3072 fc_conf
->mode
= RTE_FC_NONE
;
3078 eth_igb_flow_ctrl_set(struct rte_eth_dev
*dev
, struct rte_eth_fc_conf
*fc_conf
)
3080 struct e1000_hw
*hw
;
3082 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode
[] = {
3088 uint32_t rx_buf_size
;
3089 uint32_t max_high_water
;
3092 hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3093 if (fc_conf
->autoneg
!= hw
->mac
.autoneg
)
3095 rx_buf_size
= igb_get_rx_buffer_size(hw
);
3096 PMD_INIT_LOG(DEBUG
, "Rx packet buffer size = 0x%x", rx_buf_size
);
3098 /* At least reserve one Ethernet frame for watermark */
3099 max_high_water
= rx_buf_size
- RTE_ETHER_MAX_LEN
;
3100 if ((fc_conf
->high_water
> max_high_water
) ||
3101 (fc_conf
->high_water
< fc_conf
->low_water
)) {
3102 PMD_INIT_LOG(ERR
, "e1000 incorrect high/low water value");
3103 PMD_INIT_LOG(ERR
, "high water must <= 0x%x", max_high_water
);
3107 hw
->fc
.requested_mode
= rte_fcmode_2_e1000_fcmode
[fc_conf
->mode
];
3108 hw
->fc
.pause_time
= fc_conf
->pause_time
;
3109 hw
->fc
.high_water
= fc_conf
->high_water
;
3110 hw
->fc
.low_water
= fc_conf
->low_water
;
3111 hw
->fc
.send_xon
= fc_conf
->send_xon
;
3113 err
= e1000_setup_link_generic(hw
);
3114 if (err
== E1000_SUCCESS
) {
3116 /* check if we want to forward MAC frames - driver doesn't have native
3117 * capability to do that, so we'll write the registers ourselves */
3119 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
3121 /* set or clear MFLCN.PMCF bit depending on configuration */
3122 if (fc_conf
->mac_ctrl_frame_fwd
!= 0)
3123 rctl
|= E1000_RCTL_PMCF
;
3125 rctl
&= ~E1000_RCTL_PMCF
;
3127 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
3128 E1000_WRITE_FLUSH(hw
);
3133 PMD_INIT_LOG(ERR
, "e1000_setup_link_generic = 0x%x", err
);
3137 #define E1000_RAH_POOLSEL_SHIFT (18)
3139 eth_igb_rar_set(struct rte_eth_dev
*dev
, struct rte_ether_addr
*mac_addr
,
3140 uint32_t index
, uint32_t pool
)
3142 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3145 e1000_rar_set(hw
, mac_addr
->addr_bytes
, index
);
3146 rah
= E1000_READ_REG(hw
, E1000_RAH(index
));
3147 rah
|= (0x1 << (E1000_RAH_POOLSEL_SHIFT
+ pool
));
3148 E1000_WRITE_REG(hw
, E1000_RAH(index
), rah
);
3153 eth_igb_rar_clear(struct rte_eth_dev
*dev
, uint32_t index
)
3155 uint8_t addr
[RTE_ETHER_ADDR_LEN
];
3156 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3158 memset(addr
, 0, sizeof(addr
));
3160 e1000_rar_set(hw
, addr
, index
);
3164 eth_igb_default_mac_addr_set(struct rte_eth_dev
*dev
,
3165 struct rte_ether_addr
*addr
)
3167 eth_igb_rar_clear(dev
, 0);
3168 eth_igb_rar_set(dev
, (void *)addr
, 0, 0);
3173 * Virtual Function operations
3176 igbvf_intr_disable(struct e1000_hw
*hw
)
3178 PMD_INIT_FUNC_TRACE();
3180 /* Clear interrupt mask to stop from interrupts being generated */
3181 E1000_WRITE_REG(hw
, E1000_EIMC
, 0xFFFF);
3183 E1000_WRITE_FLUSH(hw
);
3187 igbvf_stop_adapter(struct rte_eth_dev
*dev
)
3191 struct rte_eth_dev_info dev_info
;
3192 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3195 memset(&dev_info
, 0, sizeof(dev_info
));
3196 ret
= eth_igbvf_infos_get(dev
, &dev_info
);
3200 /* Clear interrupt mask to stop from interrupts being generated */
3201 igbvf_intr_disable(hw
);
3203 /* Clear any pending interrupts, flush previous writes */
3204 E1000_READ_REG(hw
, E1000_EICR
);
3206 /* Disable the transmit unit. Each queue must be disabled. */
3207 for (i
= 0; i
< dev_info
.max_tx_queues
; i
++)
3208 E1000_WRITE_REG(hw
, E1000_TXDCTL(i
), E1000_TXDCTL_SWFLSH
);
3210 /* Disable the receive unit by stopping each queue */
3211 for (i
= 0; i
< dev_info
.max_rx_queues
; i
++) {
3212 reg_val
= E1000_READ_REG(hw
, E1000_RXDCTL(i
));
3213 reg_val
&= ~E1000_RXDCTL_QUEUE_ENABLE
;
3214 E1000_WRITE_REG(hw
, E1000_RXDCTL(i
), reg_val
);
3215 while (E1000_READ_REG(hw
, E1000_RXDCTL(i
)) & E1000_RXDCTL_QUEUE_ENABLE
)
3219 /* flush all queues disables */
3220 E1000_WRITE_FLUSH(hw
);
3224 static int eth_igbvf_link_update(struct e1000_hw
*hw
)
3226 struct e1000_mbx_info
*mbx
= &hw
->mbx
;
3227 struct e1000_mac_info
*mac
= &hw
->mac
;
3228 int ret_val
= E1000_SUCCESS
;
3230 PMD_INIT_LOG(DEBUG
, "e1000_check_for_link_vf");
3233 * We only want to run this if there has been a rst asserted.
3234 * in this case that could mean a link change, device reset,
3235 * or a virtual function reset
3238 /* If we were hit with a reset or timeout drop the link */
3239 if (!e1000_check_for_rst(hw
, 0) || !mbx
->timeout
)
3240 mac
->get_link_status
= TRUE
;
3242 if (!mac
->get_link_status
)
3245 /* if link status is down no point in checking to see if pf is up */
3246 if (!(E1000_READ_REG(hw
, E1000_STATUS
) & E1000_STATUS_LU
))
3249 /* if we passed all the tests above then the link is up and we no
3250 * longer need to check for link */
3251 mac
->get_link_status
= FALSE
;
3259 igbvf_dev_configure(struct rte_eth_dev
*dev
)
3261 struct rte_eth_conf
* conf
= &dev
->data
->dev_conf
;
3263 PMD_INIT_LOG(DEBUG
, "Configured Virtual Function port id: %d",
3264 dev
->data
->port_id
);
3266 if (dev
->data
->dev_conf
.rxmode
.mq_mode
& ETH_MQ_RX_RSS_FLAG
)
3267 dev
->data
->dev_conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_RSS_HASH
;
3270 * VF has no ability to enable/disable HW CRC
3271 * Keep the persistent behavior the same as Host PF
3273 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3274 if (conf
->rxmode
.offloads
& DEV_RX_OFFLOAD_KEEP_CRC
) {
3275 PMD_INIT_LOG(NOTICE
, "VF can't disable HW CRC Strip");
3276 conf
->rxmode
.offloads
&= ~DEV_RX_OFFLOAD_KEEP_CRC
;
3279 if (!(conf
->rxmode
.offloads
& DEV_RX_OFFLOAD_KEEP_CRC
)) {
3280 PMD_INIT_LOG(NOTICE
, "VF can't enable HW CRC Strip");
3281 conf
->rxmode
.offloads
|= DEV_RX_OFFLOAD_KEEP_CRC
;
3289 igbvf_dev_start(struct rte_eth_dev
*dev
)
3291 struct e1000_hw
*hw
=
3292 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3293 struct e1000_adapter
*adapter
=
3294 E1000_DEV_PRIVATE(dev
->data
->dev_private
);
3295 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
3296 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
3298 uint32_t intr_vector
= 0;
3300 PMD_INIT_FUNC_TRACE();
3302 hw
->mac
.ops
.reset_hw(hw
);
3303 adapter
->stopped
= 0;
3306 igbvf_set_vfta_all(dev
,1);
3308 eth_igbvf_tx_init(dev
);
3310 /* This can fail when allocating mbufs for descriptor rings */
3311 ret
= eth_igbvf_rx_init(dev
);
3313 PMD_INIT_LOG(ERR
, "Unable to initialize RX hardware");
3314 igb_dev_clear_queues(dev
);
3318 /* check and configure queue intr-vector mapping */
3319 if (rte_intr_cap_multiple(intr_handle
) &&
3320 dev
->data
->dev_conf
.intr_conf
.rxq
) {
3321 intr_vector
= dev
->data
->nb_rx_queues
;
3322 ret
= rte_intr_efd_enable(intr_handle
, intr_vector
);
3327 if (rte_intr_dp_is_en(intr_handle
) && !intr_handle
->intr_vec
) {
3328 intr_handle
->intr_vec
=
3329 rte_zmalloc("intr_vec",
3330 dev
->data
->nb_rx_queues
* sizeof(int), 0);
3331 if (!intr_handle
->intr_vec
) {
3332 PMD_INIT_LOG(ERR
, "Failed to allocate %d rx_queues"
3333 " intr_vec", dev
->data
->nb_rx_queues
);
3338 eth_igbvf_configure_msix_intr(dev
);
3340 /* enable uio/vfio intr/eventfd mapping */
3341 rte_intr_enable(intr_handle
);
3343 /* resume enabled intr since hw reset */
3344 igbvf_intr_enable(dev
);
3350 igbvf_dev_stop(struct rte_eth_dev
*dev
)
3352 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
3353 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
3354 struct e1000_adapter
*adapter
=
3355 E1000_DEV_PRIVATE(dev
->data
->dev_private
);
3357 if (adapter
->stopped
)
3360 PMD_INIT_FUNC_TRACE();
3362 igbvf_stop_adapter(dev
);
3365 * Clear what we set, but we still keep shadow_vfta to
3366 * restore after device starts
3368 igbvf_set_vfta_all(dev
,0);
3370 igb_dev_clear_queues(dev
);
3372 /* disable intr eventfd mapping */
3373 rte_intr_disable(intr_handle
);
3375 /* Clean datapath event and queue/vec mapping */
3376 rte_intr_efd_disable(intr_handle
);
3377 if (intr_handle
->intr_vec
) {
3378 rte_free(intr_handle
->intr_vec
);
3379 intr_handle
->intr_vec
= NULL
;
3382 adapter
->stopped
= true;
3386 igbvf_dev_close(struct rte_eth_dev
*dev
)
3388 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3389 struct rte_ether_addr addr
;
3390 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
3392 PMD_INIT_FUNC_TRACE();
3396 igbvf_dev_stop(dev
);
3398 igb_dev_free_queues(dev
);
3401 * reprogram the RAR with a zero mac address,
3402 * to ensure that the VF traffic goes to the PF
3403 * after stop, close and detach of the VF.
3406 memset(&addr
, 0, sizeof(addr
));
3407 igbvf_default_mac_addr_set(dev
, &addr
);
3409 dev
->dev_ops
= NULL
;
3410 dev
->rx_pkt_burst
= NULL
;
3411 dev
->tx_pkt_burst
= NULL
;
3413 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
3414 eth_igbvf_interrupt_handler
,
3419 igbvf_promiscuous_enable(struct rte_eth_dev
*dev
)
3421 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3423 /* Set both unicast and multicast promisc */
3424 e1000_promisc_set_vf(hw
, e1000_promisc_enabled
);
3430 igbvf_promiscuous_disable(struct rte_eth_dev
*dev
)
3432 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3434 /* If in allmulticast mode leave multicast promisc */
3435 if (dev
->data
->all_multicast
== 1)
3436 e1000_promisc_set_vf(hw
, e1000_promisc_multicast
);
3438 e1000_promisc_set_vf(hw
, e1000_promisc_disabled
);
3444 igbvf_allmulticast_enable(struct rte_eth_dev
*dev
)
3446 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3448 /* In promiscuous mode multicast promisc already set */
3449 if (dev
->data
->promiscuous
== 0)
3450 e1000_promisc_set_vf(hw
, e1000_promisc_multicast
);
3456 igbvf_allmulticast_disable(struct rte_eth_dev
*dev
)
3458 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3460 /* In promiscuous mode leave multicast promisc enabled */
3461 if (dev
->data
->promiscuous
== 0)
3462 e1000_promisc_set_vf(hw
, e1000_promisc_disabled
);
3467 static int igbvf_set_vfta(struct e1000_hw
*hw
, uint16_t vid
, bool on
)
3469 struct e1000_mbx_info
*mbx
= &hw
->mbx
;
3473 /* After set vlan, vlan strip will also be enabled in igb driver*/
3474 msgbuf
[0] = E1000_VF_SET_VLAN
;
3476 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
3478 msgbuf
[0] |= E1000_VF_SET_VLAN_ADD
;
3480 err
= mbx
->ops
.write_posted(hw
, msgbuf
, 2, 0);
3484 err
= mbx
->ops
.read_posted(hw
, msgbuf
, 2, 0);
3488 msgbuf
[0] &= ~E1000_VT_MSGTYPE_CTS
;
3489 if (msgbuf
[0] == (E1000_VF_SET_VLAN
| E1000_VT_MSGTYPE_NACK
))
3496 static void igbvf_set_vfta_all(struct rte_eth_dev
*dev
, bool on
)
3498 struct e1000_hw
*hw
=
3499 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3500 struct e1000_vfta
* shadow_vfta
=
3501 E1000_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
3502 int i
= 0, j
= 0, vfta
= 0, mask
= 1;
3504 for (i
= 0; i
< IGB_VFTA_SIZE
; i
++){
3505 vfta
= shadow_vfta
->vfta
[i
];
3508 for (j
= 0; j
< 32; j
++){
3511 (uint16_t)((i
<<5)+j
), on
);
3520 igbvf_vlan_filter_set(struct rte_eth_dev
*dev
, uint16_t vlan_id
, int on
)
3522 struct e1000_hw
*hw
=
3523 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3524 struct e1000_vfta
* shadow_vfta
=
3525 E1000_DEV_PRIVATE_TO_VFTA(dev
->data
->dev_private
);
3526 uint32_t vid_idx
= 0;
3527 uint32_t vid_bit
= 0;
3530 PMD_INIT_FUNC_TRACE();
3532 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
3533 ret
= igbvf_set_vfta(hw
, vlan_id
, !!on
);
3535 PMD_INIT_LOG(ERR
, "Unable to set VF vlan");
3538 vid_idx
= (uint32_t) ((vlan_id
>> 5) & 0x7F);
3539 vid_bit
= (uint32_t) (1 << (vlan_id
& 0x1F));
3541 /*Save what we set and retore it after device reset*/
3543 shadow_vfta
->vfta
[vid_idx
] |= vid_bit
;
3545 shadow_vfta
->vfta
[vid_idx
] &= ~vid_bit
;
3551 igbvf_default_mac_addr_set(struct rte_eth_dev
*dev
, struct rte_ether_addr
*addr
)
3553 struct e1000_hw
*hw
=
3554 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3556 /* index is not used by rar_set() */
3557 hw
->mac
.ops
.rar_set(hw
, (void *)addr
, 0);
3563 eth_igb_rss_reta_update(struct rte_eth_dev
*dev
,
3564 struct rte_eth_rss_reta_entry64
*reta_conf
,
3569 uint16_t idx
, shift
;
3570 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3572 if (reta_size
!= ETH_RSS_RETA_SIZE_128
) {
3573 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
3574 "(%d) doesn't match the number hardware can supported "
3575 "(%d)", reta_size
, ETH_RSS_RETA_SIZE_128
);
3579 for (i
= 0; i
< reta_size
; i
+= IGB_4_BIT_WIDTH
) {
3580 idx
= i
/ RTE_RETA_GROUP_SIZE
;
3581 shift
= i
% RTE_RETA_GROUP_SIZE
;
3582 mask
= (uint8_t)((reta_conf
[idx
].mask
>> shift
) &
3586 if (mask
== IGB_4_BIT_MASK
)
3589 r
= E1000_READ_REG(hw
, E1000_RETA(i
>> 2));
3590 for (j
= 0, reta
= 0; j
< IGB_4_BIT_WIDTH
; j
++) {
3591 if (mask
& (0x1 << j
))
3592 reta
|= reta_conf
[idx
].reta
[shift
+ j
] <<
3595 reta
|= r
& (IGB_8_BIT_MASK
<< (CHAR_BIT
* j
));
3597 E1000_WRITE_REG(hw
, E1000_RETA(i
>> 2), reta
);
3604 eth_igb_rss_reta_query(struct rte_eth_dev
*dev
,
3605 struct rte_eth_rss_reta_entry64
*reta_conf
,
3610 uint16_t idx
, shift
;
3611 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3613 if (reta_size
!= ETH_RSS_RETA_SIZE_128
) {
3614 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
3615 "(%d) doesn't match the number hardware can supported "
3616 "(%d)", reta_size
, ETH_RSS_RETA_SIZE_128
);
3620 for (i
= 0; i
< reta_size
; i
+= IGB_4_BIT_WIDTH
) {
3621 idx
= i
/ RTE_RETA_GROUP_SIZE
;
3622 shift
= i
% RTE_RETA_GROUP_SIZE
;
3623 mask
= (uint8_t)((reta_conf
[idx
].mask
>> shift
) &
3627 reta
= E1000_READ_REG(hw
, E1000_RETA(i
>> 2));
3628 for (j
= 0; j
< IGB_4_BIT_WIDTH
; j
++) {
3629 if (mask
& (0x1 << j
))
3630 reta_conf
[idx
].reta
[shift
+ j
] =
3631 ((reta
>> (CHAR_BIT
* j
)) &
3640 eth_igb_syn_filter_set(struct rte_eth_dev
*dev
,
3641 struct rte_eth_syn_filter
*filter
,
3644 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3645 struct e1000_filter_info
*filter_info
=
3646 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
3647 uint32_t synqf
, rfctl
;
3649 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM
)
3652 synqf
= E1000_READ_REG(hw
, E1000_SYNQF(0));
3655 if (synqf
& E1000_SYN_FILTER_ENABLE
)
3658 synqf
= (uint32_t)(((filter
->queue
<< E1000_SYN_FILTER_QUEUE_SHIFT
) &
3659 E1000_SYN_FILTER_QUEUE
) | E1000_SYN_FILTER_ENABLE
);
3661 rfctl
= E1000_READ_REG(hw
, E1000_RFCTL
);
3662 if (filter
->hig_pri
)
3663 rfctl
|= E1000_RFCTL_SYNQFP
;
3665 rfctl
&= ~E1000_RFCTL_SYNQFP
;
3667 E1000_WRITE_REG(hw
, E1000_RFCTL
, rfctl
);
3669 if (!(synqf
& E1000_SYN_FILTER_ENABLE
))
3674 filter_info
->syn_info
= synqf
;
3675 E1000_WRITE_REG(hw
, E1000_SYNQF(0), synqf
);
3676 E1000_WRITE_FLUSH(hw
);
3681 eth_igb_syn_filter_get(struct rte_eth_dev
*dev
,
3682 struct rte_eth_syn_filter
*filter
)
3684 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3685 uint32_t synqf
, rfctl
;
3687 synqf
= E1000_READ_REG(hw
, E1000_SYNQF(0));
3688 if (synqf
& E1000_SYN_FILTER_ENABLE
) {
3689 rfctl
= E1000_READ_REG(hw
, E1000_RFCTL
);
3690 filter
->hig_pri
= (rfctl
& E1000_RFCTL_SYNQFP
) ? 1 : 0;
3691 filter
->queue
= (uint8_t)((synqf
& E1000_SYN_FILTER_QUEUE
) >>
3692 E1000_SYN_FILTER_QUEUE_SHIFT
);
3700 eth_igb_syn_filter_handle(struct rte_eth_dev
*dev
,
3701 enum rte_filter_op filter_op
,
3704 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3707 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
3709 if (filter_op
== RTE_ETH_FILTER_NOP
)
3713 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u",
3718 switch (filter_op
) {
3719 case RTE_ETH_FILTER_ADD
:
3720 ret
= eth_igb_syn_filter_set(dev
,
3721 (struct rte_eth_syn_filter
*)arg
,
3724 case RTE_ETH_FILTER_DELETE
:
3725 ret
= eth_igb_syn_filter_set(dev
,
3726 (struct rte_eth_syn_filter
*)arg
,
3729 case RTE_ETH_FILTER_GET
:
3730 ret
= eth_igb_syn_filter_get(dev
,
3731 (struct rte_eth_syn_filter
*)arg
);
3734 PMD_DRV_LOG(ERR
, "unsupported operation %u", filter_op
);
3742 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3744 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter
*filter
,
3745 struct e1000_2tuple_filter_info
*filter_info
)
3747 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM
)
3749 if (filter
->priority
> E1000_2TUPLE_MAX_PRI
)
3750 return -EINVAL
; /* filter index is out of range. */
3751 if (filter
->tcp_flags
> RTE_NTUPLE_TCP_FLAGS_MASK
)
3752 return -EINVAL
; /* flags is invalid. */
3754 switch (filter
->dst_port_mask
) {
3756 filter_info
->dst_port_mask
= 0;
3757 filter_info
->dst_port
= filter
->dst_port
;
3760 filter_info
->dst_port_mask
= 1;
3763 PMD_DRV_LOG(ERR
, "invalid dst_port mask.");
3767 switch (filter
->proto_mask
) {
3769 filter_info
->proto_mask
= 0;
3770 filter_info
->proto
= filter
->proto
;
3773 filter_info
->proto_mask
= 1;
3776 PMD_DRV_LOG(ERR
, "invalid protocol mask.");
3780 filter_info
->priority
= (uint8_t)filter
->priority
;
3781 if (filter
->flags
& RTE_NTUPLE_FLAGS_TCP_FLAG
)
3782 filter_info
->tcp_flags
= filter
->tcp_flags
;
3784 filter_info
->tcp_flags
= 0;
3789 static inline struct e1000_2tuple_filter
*
3790 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list
*filter_list
,
3791 struct e1000_2tuple_filter_info
*key
)
3793 struct e1000_2tuple_filter
*it
;
3795 TAILQ_FOREACH(it
, filter_list
, entries
) {
3796 if (memcmp(key
, &it
->filter_info
,
3797 sizeof(struct e1000_2tuple_filter_info
)) == 0) {
3804 /* inject a igb 2tuple filter to HW */
3806 igb_inject_2uple_filter(struct rte_eth_dev
*dev
,
3807 struct e1000_2tuple_filter
*filter
)
3809 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3810 uint32_t ttqf
= E1000_TTQF_DISABLE_MASK
;
3811 uint32_t imir
, imir_ext
= E1000_IMIREXT_SIZE_BP
;
3815 imir
= (uint32_t)(filter
->filter_info
.dst_port
& E1000_IMIR_DSTPORT
);
3816 if (filter
->filter_info
.dst_port_mask
== 1) /* 1b means not compare. */
3817 imir
|= E1000_IMIR_PORT_BP
;
3819 imir
&= ~E1000_IMIR_PORT_BP
;
3821 imir
|= filter
->filter_info
.priority
<< E1000_IMIR_PRIORITY_SHIFT
;
3823 ttqf
|= E1000_TTQF_QUEUE_ENABLE
;
3824 ttqf
|= (uint32_t)(filter
->queue
<< E1000_TTQF_QUEUE_SHIFT
);
3825 ttqf
|= (uint32_t)(filter
->filter_info
.proto
&
3826 E1000_TTQF_PROTOCOL_MASK
);
3827 if (filter
->filter_info
.proto_mask
== 0)
3828 ttqf
&= ~E1000_TTQF_MASK_ENABLE
;
3830 /* tcp flags bits setting. */
3831 if (filter
->filter_info
.tcp_flags
& RTE_NTUPLE_TCP_FLAGS_MASK
) {
3832 if (filter
->filter_info
.tcp_flags
& RTE_TCP_URG_FLAG
)
3833 imir_ext
|= E1000_IMIREXT_CTRL_URG
;
3834 if (filter
->filter_info
.tcp_flags
& RTE_TCP_ACK_FLAG
)
3835 imir_ext
|= E1000_IMIREXT_CTRL_ACK
;
3836 if (filter
->filter_info
.tcp_flags
& RTE_TCP_PSH_FLAG
)
3837 imir_ext
|= E1000_IMIREXT_CTRL_PSH
;
3838 if (filter
->filter_info
.tcp_flags
& RTE_TCP_RST_FLAG
)
3839 imir_ext
|= E1000_IMIREXT_CTRL_RST
;
3840 if (filter
->filter_info
.tcp_flags
& RTE_TCP_SYN_FLAG
)
3841 imir_ext
|= E1000_IMIREXT_CTRL_SYN
;
3842 if (filter
->filter_info
.tcp_flags
& RTE_TCP_FIN_FLAG
)
3843 imir_ext
|= E1000_IMIREXT_CTRL_FIN
;
3845 imir_ext
|= E1000_IMIREXT_CTRL_BP
;
3847 E1000_WRITE_REG(hw
, E1000_IMIR(i
), imir
);
3848 E1000_WRITE_REG(hw
, E1000_TTQF(i
), ttqf
);
3849 E1000_WRITE_REG(hw
, E1000_IMIREXT(i
), imir_ext
);
3853 * igb_add_2tuple_filter - add a 2tuple filter
3856 * dev: Pointer to struct rte_eth_dev.
3857 * ntuple_filter: ponter to the filter that will be added.
3860 * - On success, zero.
3861 * - On failure, a negative value.
3864 igb_add_2tuple_filter(struct rte_eth_dev
*dev
,
3865 struct rte_eth_ntuple_filter
*ntuple_filter
)
3867 struct e1000_filter_info
*filter_info
=
3868 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
3869 struct e1000_2tuple_filter
*filter
;
3872 filter
= rte_zmalloc("e1000_2tuple_filter",
3873 sizeof(struct e1000_2tuple_filter
), 0);
3877 ret
= ntuple_filter_to_2tuple(ntuple_filter
,
3878 &filter
->filter_info
);
3883 if (igb_2tuple_filter_lookup(&filter_info
->twotuple_list
,
3884 &filter
->filter_info
) != NULL
) {
3885 PMD_DRV_LOG(ERR
, "filter exists.");
3889 filter
->queue
= ntuple_filter
->queue
;
3892 * look for an unused 2tuple filter index,
3893 * and insert the filter to list.
3895 for (i
= 0; i
< E1000_MAX_TTQF_FILTERS
; i
++) {
3896 if (!(filter_info
->twotuple_mask
& (1 << i
))) {
3897 filter_info
->twotuple_mask
|= 1 << i
;
3899 TAILQ_INSERT_TAIL(&filter_info
->twotuple_list
,
3905 if (i
>= E1000_MAX_TTQF_FILTERS
) {
3906 PMD_DRV_LOG(ERR
, "2tuple filters are full.");
3911 igb_inject_2uple_filter(dev
, filter
);
3916 igb_delete_2tuple_filter(struct rte_eth_dev
*dev
,
3917 struct e1000_2tuple_filter
*filter
)
3919 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3920 struct e1000_filter_info
*filter_info
=
3921 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
3923 filter_info
->twotuple_mask
&= ~(1 << filter
->index
);
3924 TAILQ_REMOVE(&filter_info
->twotuple_list
, filter
, entries
);
3927 E1000_WRITE_REG(hw
, E1000_TTQF(filter
->index
), E1000_TTQF_DISABLE_MASK
);
3928 E1000_WRITE_REG(hw
, E1000_IMIR(filter
->index
), 0);
3929 E1000_WRITE_REG(hw
, E1000_IMIREXT(filter
->index
), 0);
3934 * igb_remove_2tuple_filter - remove a 2tuple filter
3937 * dev: Pointer to struct rte_eth_dev.
3938 * ntuple_filter: ponter to the filter that will be removed.
3941 * - On success, zero.
3942 * - On failure, a negative value.
3945 igb_remove_2tuple_filter(struct rte_eth_dev
*dev
,
3946 struct rte_eth_ntuple_filter
*ntuple_filter
)
3948 struct e1000_filter_info
*filter_info
=
3949 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
3950 struct e1000_2tuple_filter_info filter_2tuple
;
3951 struct e1000_2tuple_filter
*filter
;
3954 memset(&filter_2tuple
, 0, sizeof(struct e1000_2tuple_filter_info
));
3955 ret
= ntuple_filter_to_2tuple(ntuple_filter
,
3960 filter
= igb_2tuple_filter_lookup(&filter_info
->twotuple_list
,
3962 if (filter
== NULL
) {
3963 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
3967 igb_delete_2tuple_filter(dev
, filter
);
3972 /* inject a igb flex filter to HW */
3974 igb_inject_flex_filter(struct rte_eth_dev
*dev
,
3975 struct e1000_flex_filter
*filter
)
3977 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
3978 uint32_t wufc
, queueing
;
3982 wufc
= E1000_READ_REG(hw
, E1000_WUFC
);
3983 if (filter
->index
< E1000_MAX_FHFT
)
3984 reg_off
= E1000_FHFT(filter
->index
);
3986 reg_off
= E1000_FHFT_EXT(filter
->index
- E1000_MAX_FHFT
);
3988 E1000_WRITE_REG(hw
, E1000_WUFC
, wufc
| E1000_WUFC_FLEX_HQ
|
3989 (E1000_WUFC_FLX0
<< filter
->index
));
3990 queueing
= filter
->filter_info
.len
|
3991 (filter
->queue
<< E1000_FHFT_QUEUEING_QUEUE_SHIFT
) |
3992 (filter
->filter_info
.priority
<<
3993 E1000_FHFT_QUEUEING_PRIO_SHIFT
);
3994 E1000_WRITE_REG(hw
, reg_off
+ E1000_FHFT_QUEUEING_OFFSET
,
3997 for (i
= 0; i
< E1000_FLEX_FILTERS_MASK_SIZE
; i
++) {
3998 E1000_WRITE_REG(hw
, reg_off
,
3999 filter
->filter_info
.dwords
[j
]);
4000 reg_off
+= sizeof(uint32_t);
4001 E1000_WRITE_REG(hw
, reg_off
,
4002 filter
->filter_info
.dwords
[++j
]);
4003 reg_off
+= sizeof(uint32_t);
4004 E1000_WRITE_REG(hw
, reg_off
,
4005 (uint32_t)filter
->filter_info
.mask
[i
]);
4006 reg_off
+= sizeof(uint32_t) * 2;
4011 static inline struct e1000_flex_filter
*
4012 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list
*filter_list
,
4013 struct e1000_flex_filter_info
*key
)
4015 struct e1000_flex_filter
*it
;
4017 TAILQ_FOREACH(it
, filter_list
, entries
) {
4018 if (memcmp(key
, &it
->filter_info
,
4019 sizeof(struct e1000_flex_filter_info
)) == 0)
4026 /* remove a flex byte filter
4028 * dev: Pointer to struct rte_eth_dev.
4029 * filter: the pointer of the filter will be removed.
4032 igb_remove_flex_filter(struct rte_eth_dev
*dev
,
4033 struct e1000_flex_filter
*filter
)
4035 struct e1000_filter_info
*filter_info
=
4036 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4037 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4041 wufc
= E1000_READ_REG(hw
, E1000_WUFC
);
4042 if (filter
->index
< E1000_MAX_FHFT
)
4043 reg_off
= E1000_FHFT(filter
->index
);
4045 reg_off
= E1000_FHFT_EXT(filter
->index
- E1000_MAX_FHFT
);
4047 for (i
= 0; i
< E1000_FHFT_SIZE_IN_DWD
; i
++)
4048 E1000_WRITE_REG(hw
, reg_off
+ i
* sizeof(uint32_t), 0);
4050 E1000_WRITE_REG(hw
, E1000_WUFC
, wufc
&
4051 (~(E1000_WUFC_FLX0
<< filter
->index
)));
4053 filter_info
->flex_mask
&= ~(1 << filter
->index
);
4054 TAILQ_REMOVE(&filter_info
->flex_list
, filter
, entries
);
4059 eth_igb_add_del_flex_filter(struct rte_eth_dev
*dev
,
4060 struct rte_eth_flex_filter
*filter
,
4063 struct e1000_filter_info
*filter_info
=
4064 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4065 struct e1000_flex_filter
*flex_filter
, *it
;
4069 flex_filter
= rte_zmalloc("e1000_flex_filter",
4070 sizeof(struct e1000_flex_filter
), 0);
4071 if (flex_filter
== NULL
)
4074 flex_filter
->filter_info
.len
= filter
->len
;
4075 flex_filter
->filter_info
.priority
= filter
->priority
;
4076 memcpy(flex_filter
->filter_info
.dwords
, filter
->bytes
, filter
->len
);
4077 for (i
= 0; i
< RTE_ALIGN(filter
->len
, CHAR_BIT
) / CHAR_BIT
; i
++) {
4079 /* reverse bits in flex filter's mask*/
4080 for (shift
= 0; shift
< CHAR_BIT
; shift
++) {
4081 if (filter
->mask
[i
] & (0x01 << shift
))
4082 mask
|= (0x80 >> shift
);
4084 flex_filter
->filter_info
.mask
[i
] = mask
;
4087 it
= eth_igb_flex_filter_lookup(&filter_info
->flex_list
,
4088 &flex_filter
->filter_info
);
4089 if (it
== NULL
&& !add
) {
4090 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
4091 rte_free(flex_filter
);
4094 if (it
!= NULL
&& add
) {
4095 PMD_DRV_LOG(ERR
, "filter exists.");
4096 rte_free(flex_filter
);
4101 flex_filter
->queue
= filter
->queue
;
4103 * look for an unused flex filter index
4104 * and insert the filter into the list.
4106 for (i
= 0; i
< E1000_MAX_FLEX_FILTERS
; i
++) {
4107 if (!(filter_info
->flex_mask
& (1 << i
))) {
4108 filter_info
->flex_mask
|= 1 << i
;
4109 flex_filter
->index
= i
;
4110 TAILQ_INSERT_TAIL(&filter_info
->flex_list
,
4116 if (i
>= E1000_MAX_FLEX_FILTERS
) {
4117 PMD_DRV_LOG(ERR
, "flex filters are full.");
4118 rte_free(flex_filter
);
4122 igb_inject_flex_filter(dev
, flex_filter
);
4125 igb_remove_flex_filter(dev
, it
);
4126 rte_free(flex_filter
);
4133 eth_igb_get_flex_filter(struct rte_eth_dev
*dev
,
4134 struct rte_eth_flex_filter
*filter
)
4136 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4137 struct e1000_filter_info
*filter_info
=
4138 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4139 struct e1000_flex_filter flex_filter
, *it
;
4140 uint32_t wufc
, queueing
, wufc_en
= 0;
4142 memset(&flex_filter
, 0, sizeof(struct e1000_flex_filter
));
4143 flex_filter
.filter_info
.len
= filter
->len
;
4144 flex_filter
.filter_info
.priority
= filter
->priority
;
4145 memcpy(flex_filter
.filter_info
.dwords
, filter
->bytes
, filter
->len
);
4146 memcpy(flex_filter
.filter_info
.mask
, filter
->mask
,
4147 RTE_ALIGN(filter
->len
, CHAR_BIT
) / CHAR_BIT
);
4149 it
= eth_igb_flex_filter_lookup(&filter_info
->flex_list
,
4150 &flex_filter
.filter_info
);
4152 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
4156 wufc
= E1000_READ_REG(hw
, E1000_WUFC
);
4157 wufc_en
= E1000_WUFC_FLEX_HQ
| (E1000_WUFC_FLX0
<< it
->index
);
4159 if ((wufc
& wufc_en
) == wufc_en
) {
4160 uint32_t reg_off
= 0;
4161 if (it
->index
< E1000_MAX_FHFT
)
4162 reg_off
= E1000_FHFT(it
->index
);
4164 reg_off
= E1000_FHFT_EXT(it
->index
- E1000_MAX_FHFT
);
4166 queueing
= E1000_READ_REG(hw
,
4167 reg_off
+ E1000_FHFT_QUEUEING_OFFSET
);
4168 filter
->len
= queueing
& E1000_FHFT_QUEUEING_LEN
;
4169 filter
->priority
= (queueing
& E1000_FHFT_QUEUEING_PRIO
) >>
4170 E1000_FHFT_QUEUEING_PRIO_SHIFT
;
4171 filter
->queue
= (queueing
& E1000_FHFT_QUEUEING_QUEUE
) >>
4172 E1000_FHFT_QUEUEING_QUEUE_SHIFT
;
4179 eth_igb_flex_filter_handle(struct rte_eth_dev
*dev
,
4180 enum rte_filter_op filter_op
,
4183 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4184 struct rte_eth_flex_filter
*filter
;
4187 MAC_TYPE_FILTER_SUP_EXT(hw
->mac
.type
);
4189 if (filter_op
== RTE_ETH_FILTER_NOP
)
4193 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u",
4198 filter
= (struct rte_eth_flex_filter
*)arg
;
4199 if (filter
->len
== 0 || filter
->len
> E1000_MAX_FLEX_FILTER_LEN
4200 || filter
->len
% sizeof(uint64_t) != 0) {
4201 PMD_DRV_LOG(ERR
, "filter's length is out of range");
4204 if (filter
->priority
> E1000_MAX_FLEX_FILTER_PRI
) {
4205 PMD_DRV_LOG(ERR
, "filter's priority is out of range");
4209 switch (filter_op
) {
4210 case RTE_ETH_FILTER_ADD
:
4211 ret
= eth_igb_add_del_flex_filter(dev
, filter
, TRUE
);
4213 case RTE_ETH_FILTER_DELETE
:
4214 ret
= eth_igb_add_del_flex_filter(dev
, filter
, FALSE
);
4216 case RTE_ETH_FILTER_GET
:
4217 ret
= eth_igb_get_flex_filter(dev
, filter
);
4220 PMD_DRV_LOG(ERR
, "unsupported operation %u", filter_op
);
4228 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
4230 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter
*filter
,
4231 struct e1000_5tuple_filter_info
*filter_info
)
4233 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM_82576
)
4235 if (filter
->priority
> E1000_2TUPLE_MAX_PRI
)
4236 return -EINVAL
; /* filter index is out of range. */
4237 if (filter
->tcp_flags
> RTE_NTUPLE_TCP_FLAGS_MASK
)
4238 return -EINVAL
; /* flags is invalid. */
4240 switch (filter
->dst_ip_mask
) {
4242 filter_info
->dst_ip_mask
= 0;
4243 filter_info
->dst_ip
= filter
->dst_ip
;
4246 filter_info
->dst_ip_mask
= 1;
4249 PMD_DRV_LOG(ERR
, "invalid dst_ip mask.");
4253 switch (filter
->src_ip_mask
) {
4255 filter_info
->src_ip_mask
= 0;
4256 filter_info
->src_ip
= filter
->src_ip
;
4259 filter_info
->src_ip_mask
= 1;
4262 PMD_DRV_LOG(ERR
, "invalid src_ip mask.");
4266 switch (filter
->dst_port_mask
) {
4268 filter_info
->dst_port_mask
= 0;
4269 filter_info
->dst_port
= filter
->dst_port
;
4272 filter_info
->dst_port_mask
= 1;
4275 PMD_DRV_LOG(ERR
, "invalid dst_port mask.");
4279 switch (filter
->src_port_mask
) {
4281 filter_info
->src_port_mask
= 0;
4282 filter_info
->src_port
= filter
->src_port
;
4285 filter_info
->src_port_mask
= 1;
4288 PMD_DRV_LOG(ERR
, "invalid src_port mask.");
4292 switch (filter
->proto_mask
) {
4294 filter_info
->proto_mask
= 0;
4295 filter_info
->proto
= filter
->proto
;
4298 filter_info
->proto_mask
= 1;
4301 PMD_DRV_LOG(ERR
, "invalid protocol mask.");
4305 filter_info
->priority
= (uint8_t)filter
->priority
;
4306 if (filter
->flags
& RTE_NTUPLE_FLAGS_TCP_FLAG
)
4307 filter_info
->tcp_flags
= filter
->tcp_flags
;
4309 filter_info
->tcp_flags
= 0;
4314 static inline struct e1000_5tuple_filter
*
4315 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list
*filter_list
,
4316 struct e1000_5tuple_filter_info
*key
)
4318 struct e1000_5tuple_filter
*it
;
4320 TAILQ_FOREACH(it
, filter_list
, entries
) {
4321 if (memcmp(key
, &it
->filter_info
,
4322 sizeof(struct e1000_5tuple_filter_info
)) == 0) {
4329 /* inject a igb 5-tuple filter to HW */
4331 igb_inject_5tuple_filter_82576(struct rte_eth_dev
*dev
,
4332 struct e1000_5tuple_filter
*filter
)
4334 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4335 uint32_t ftqf
= E1000_FTQF_VF_BP
| E1000_FTQF_MASK
;
4336 uint32_t spqf
, imir
, imir_ext
= E1000_IMIREXT_SIZE_BP
;
4340 ftqf
|= filter
->filter_info
.proto
& E1000_FTQF_PROTOCOL_MASK
;
4341 if (filter
->filter_info
.src_ip_mask
== 0) /* 0b means compare. */
4342 ftqf
&= ~E1000_FTQF_MASK_SOURCE_ADDR_BP
;
4343 if (filter
->filter_info
.dst_ip_mask
== 0)
4344 ftqf
&= ~E1000_FTQF_MASK_DEST_ADDR_BP
;
4345 if (filter
->filter_info
.src_port_mask
== 0)
4346 ftqf
&= ~E1000_FTQF_MASK_SOURCE_PORT_BP
;
4347 if (filter
->filter_info
.proto_mask
== 0)
4348 ftqf
&= ~E1000_FTQF_MASK_PROTO_BP
;
4349 ftqf
|= (filter
->queue
<< E1000_FTQF_QUEUE_SHIFT
) &
4350 E1000_FTQF_QUEUE_MASK
;
4351 ftqf
|= E1000_FTQF_QUEUE_ENABLE
;
4352 E1000_WRITE_REG(hw
, E1000_FTQF(i
), ftqf
);
4353 E1000_WRITE_REG(hw
, E1000_DAQF(i
), filter
->filter_info
.dst_ip
);
4354 E1000_WRITE_REG(hw
, E1000_SAQF(i
), filter
->filter_info
.src_ip
);
4356 spqf
= filter
->filter_info
.src_port
& E1000_SPQF_SRCPORT
;
4357 E1000_WRITE_REG(hw
, E1000_SPQF(i
), spqf
);
4359 imir
= (uint32_t)(filter
->filter_info
.dst_port
& E1000_IMIR_DSTPORT
);
4360 if (filter
->filter_info
.dst_port_mask
== 1) /* 1b means not compare. */
4361 imir
|= E1000_IMIR_PORT_BP
;
4363 imir
&= ~E1000_IMIR_PORT_BP
;
4364 imir
|= filter
->filter_info
.priority
<< E1000_IMIR_PRIORITY_SHIFT
;
4366 /* tcp flags bits setting. */
4367 if (filter
->filter_info
.tcp_flags
& RTE_NTUPLE_TCP_FLAGS_MASK
) {
4368 if (filter
->filter_info
.tcp_flags
& RTE_TCP_URG_FLAG
)
4369 imir_ext
|= E1000_IMIREXT_CTRL_URG
;
4370 if (filter
->filter_info
.tcp_flags
& RTE_TCP_ACK_FLAG
)
4371 imir_ext
|= E1000_IMIREXT_CTRL_ACK
;
4372 if (filter
->filter_info
.tcp_flags
& RTE_TCP_PSH_FLAG
)
4373 imir_ext
|= E1000_IMIREXT_CTRL_PSH
;
4374 if (filter
->filter_info
.tcp_flags
& RTE_TCP_RST_FLAG
)
4375 imir_ext
|= E1000_IMIREXT_CTRL_RST
;
4376 if (filter
->filter_info
.tcp_flags
& RTE_TCP_SYN_FLAG
)
4377 imir_ext
|= E1000_IMIREXT_CTRL_SYN
;
4378 if (filter
->filter_info
.tcp_flags
& RTE_TCP_FIN_FLAG
)
4379 imir_ext
|= E1000_IMIREXT_CTRL_FIN
;
4381 imir_ext
|= E1000_IMIREXT_CTRL_BP
;
4383 E1000_WRITE_REG(hw
, E1000_IMIR(i
), imir
);
4384 E1000_WRITE_REG(hw
, E1000_IMIREXT(i
), imir_ext
);
4388 * igb_add_5tuple_filter_82576 - add a 5tuple filter
4391 * dev: Pointer to struct rte_eth_dev.
4392 * ntuple_filter: ponter to the filter that will be added.
4395 * - On success, zero.
4396 * - On failure, a negative value.
4399 igb_add_5tuple_filter_82576(struct rte_eth_dev
*dev
,
4400 struct rte_eth_ntuple_filter
*ntuple_filter
)
4402 struct e1000_filter_info
*filter_info
=
4403 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4404 struct e1000_5tuple_filter
*filter
;
4408 filter
= rte_zmalloc("e1000_5tuple_filter",
4409 sizeof(struct e1000_5tuple_filter
), 0);
4413 ret
= ntuple_filter_to_5tuple_82576(ntuple_filter
,
4414 &filter
->filter_info
);
4420 if (igb_5tuple_filter_lookup_82576(&filter_info
->fivetuple_list
,
4421 &filter
->filter_info
) != NULL
) {
4422 PMD_DRV_LOG(ERR
, "filter exists.");
4426 filter
->queue
= ntuple_filter
->queue
;
4429 * look for an unused 5tuple filter index,
4430 * and insert the filter to list.
4432 for (i
= 0; i
< E1000_MAX_FTQF_FILTERS
; i
++) {
4433 if (!(filter_info
->fivetuple_mask
& (1 << i
))) {
4434 filter_info
->fivetuple_mask
|= 1 << i
;
4436 TAILQ_INSERT_TAIL(&filter_info
->fivetuple_list
,
4442 if (i
>= E1000_MAX_FTQF_FILTERS
) {
4443 PMD_DRV_LOG(ERR
, "5tuple filters are full.");
4448 igb_inject_5tuple_filter_82576(dev
, filter
);
4453 igb_delete_5tuple_filter_82576(struct rte_eth_dev
*dev
,
4454 struct e1000_5tuple_filter
*filter
)
4456 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4457 struct e1000_filter_info
*filter_info
=
4458 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4460 filter_info
->fivetuple_mask
&= ~(1 << filter
->index
);
4461 TAILQ_REMOVE(&filter_info
->fivetuple_list
, filter
, entries
);
4464 E1000_WRITE_REG(hw
, E1000_FTQF(filter
->index
),
4465 E1000_FTQF_VF_BP
| E1000_FTQF_MASK
);
4466 E1000_WRITE_REG(hw
, E1000_DAQF(filter
->index
), 0);
4467 E1000_WRITE_REG(hw
, E1000_SAQF(filter
->index
), 0);
4468 E1000_WRITE_REG(hw
, E1000_SPQF(filter
->index
), 0);
4469 E1000_WRITE_REG(hw
, E1000_IMIR(filter
->index
), 0);
4470 E1000_WRITE_REG(hw
, E1000_IMIREXT(filter
->index
), 0);
4475 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
4478 * dev: Pointer to struct rte_eth_dev.
4479 * ntuple_filter: ponter to the filter that will be removed.
4482 * - On success, zero.
4483 * - On failure, a negative value.
4486 igb_remove_5tuple_filter_82576(struct rte_eth_dev
*dev
,
4487 struct rte_eth_ntuple_filter
*ntuple_filter
)
4489 struct e1000_filter_info
*filter_info
=
4490 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4491 struct e1000_5tuple_filter_info filter_5tuple
;
4492 struct e1000_5tuple_filter
*filter
;
4495 memset(&filter_5tuple
, 0, sizeof(struct e1000_5tuple_filter_info
));
4496 ret
= ntuple_filter_to_5tuple_82576(ntuple_filter
,
4501 filter
= igb_5tuple_filter_lookup_82576(&filter_info
->fivetuple_list
,
4503 if (filter
== NULL
) {
4504 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
4508 igb_delete_5tuple_filter_82576(dev
, filter
);
4514 eth_igb_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
)
4517 struct e1000_hw
*hw
;
4518 struct rte_eth_dev_info dev_info
;
4519 uint32_t frame_size
= mtu
+ E1000_ETH_OVERHEAD
;
4522 hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4524 #ifdef RTE_LIBRTE_82571_SUPPORT
4525 /* XXX: not bigger than max_rx_pktlen */
4526 if (hw
->mac
.type
== e1000_82571
)
4529 ret
= eth_igb_infos_get(dev
, &dev_info
);
4533 /* check that mtu is within the allowed range */
4534 if (mtu
< RTE_ETHER_MIN_MTU
||
4535 frame_size
> dev_info
.max_rx_pktlen
)
4538 /* refuse mtu that requires the support of scattered packets when this
4539 * feature has not been enabled before. */
4540 if (!dev
->data
->scattered_rx
&&
4541 frame_size
> dev
->data
->min_rx_buf_size
- RTE_PKTMBUF_HEADROOM
)
4544 rctl
= E1000_READ_REG(hw
, E1000_RCTL
);
4546 /* switch to jumbo mode if needed */
4547 if (frame_size
> RTE_ETHER_MAX_LEN
) {
4548 dev
->data
->dev_conf
.rxmode
.offloads
|=
4549 DEV_RX_OFFLOAD_JUMBO_FRAME
;
4550 rctl
|= E1000_RCTL_LPE
;
4552 dev
->data
->dev_conf
.rxmode
.offloads
&=
4553 ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
4554 rctl
&= ~E1000_RCTL_LPE
;
4556 E1000_WRITE_REG(hw
, E1000_RCTL
, rctl
);
4558 /* update max frame size */
4559 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= frame_size
;
4561 E1000_WRITE_REG(hw
, E1000_RLPML
,
4562 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
);
4568 * igb_add_del_ntuple_filter - add or delete a ntuple filter
4571 * dev: Pointer to struct rte_eth_dev.
4572 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4573 * add: if true, add filter, if false, remove filter
4576 * - On success, zero.
4577 * - On failure, a negative value.
4580 igb_add_del_ntuple_filter(struct rte_eth_dev
*dev
,
4581 struct rte_eth_ntuple_filter
*ntuple_filter
,
4584 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4587 switch (ntuple_filter
->flags
) {
4588 case RTE_5TUPLE_FLAGS
:
4589 case (RTE_5TUPLE_FLAGS
| RTE_NTUPLE_FLAGS_TCP_FLAG
):
4590 if (hw
->mac
.type
!= e1000_82576
)
4593 ret
= igb_add_5tuple_filter_82576(dev
,
4596 ret
= igb_remove_5tuple_filter_82576(dev
,
4599 case RTE_2TUPLE_FLAGS
:
4600 case (RTE_2TUPLE_FLAGS
| RTE_NTUPLE_FLAGS_TCP_FLAG
):
4601 if (hw
->mac
.type
!= e1000_82580
&& hw
->mac
.type
!= e1000_i350
&&
4602 hw
->mac
.type
!= e1000_i210
&&
4603 hw
->mac
.type
!= e1000_i211
)
4606 ret
= igb_add_2tuple_filter(dev
, ntuple_filter
);
4608 ret
= igb_remove_2tuple_filter(dev
, ntuple_filter
);
4619 * igb_get_ntuple_filter - get a ntuple filter
4622 * dev: Pointer to struct rte_eth_dev.
4623 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4626 * - On success, zero.
4627 * - On failure, a negative value.
4630 igb_get_ntuple_filter(struct rte_eth_dev
*dev
,
4631 struct rte_eth_ntuple_filter
*ntuple_filter
)
4633 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4634 struct e1000_filter_info
*filter_info
=
4635 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4636 struct e1000_5tuple_filter_info filter_5tuple
;
4637 struct e1000_2tuple_filter_info filter_2tuple
;
4638 struct e1000_5tuple_filter
*p_5tuple_filter
;
4639 struct e1000_2tuple_filter
*p_2tuple_filter
;
4642 switch (ntuple_filter
->flags
) {
4643 case RTE_5TUPLE_FLAGS
:
4644 case (RTE_5TUPLE_FLAGS
| RTE_NTUPLE_FLAGS_TCP_FLAG
):
4645 if (hw
->mac
.type
!= e1000_82576
)
4647 memset(&filter_5tuple
,
4649 sizeof(struct e1000_5tuple_filter_info
));
4650 ret
= ntuple_filter_to_5tuple_82576(ntuple_filter
,
4654 p_5tuple_filter
= igb_5tuple_filter_lookup_82576(
4655 &filter_info
->fivetuple_list
,
4657 if (p_5tuple_filter
== NULL
) {
4658 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
4661 ntuple_filter
->queue
= p_5tuple_filter
->queue
;
4663 case RTE_2TUPLE_FLAGS
:
4664 case (RTE_2TUPLE_FLAGS
| RTE_NTUPLE_FLAGS_TCP_FLAG
):
4665 if (hw
->mac
.type
!= e1000_82580
&& hw
->mac
.type
!= e1000_i350
)
4667 memset(&filter_2tuple
,
4669 sizeof(struct e1000_2tuple_filter_info
));
4670 ret
= ntuple_filter_to_2tuple(ntuple_filter
, &filter_2tuple
);
4673 p_2tuple_filter
= igb_2tuple_filter_lookup(
4674 &filter_info
->twotuple_list
,
4676 if (p_2tuple_filter
== NULL
) {
4677 PMD_DRV_LOG(ERR
, "filter doesn't exist.");
4680 ntuple_filter
->queue
= p_2tuple_filter
->queue
;
4691 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
4692 * @dev: pointer to rte_eth_dev structure
4693 * @filter_op:operation will be taken.
4694 * @arg: a pointer to specific structure corresponding to the filter_op
4697 igb_ntuple_filter_handle(struct rte_eth_dev
*dev
,
4698 enum rte_filter_op filter_op
,
4701 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4704 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
4706 if (filter_op
== RTE_ETH_FILTER_NOP
)
4710 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
4715 switch (filter_op
) {
4716 case RTE_ETH_FILTER_ADD
:
4717 ret
= igb_add_del_ntuple_filter(dev
,
4718 (struct rte_eth_ntuple_filter
*)arg
,
4721 case RTE_ETH_FILTER_DELETE
:
4722 ret
= igb_add_del_ntuple_filter(dev
,
4723 (struct rte_eth_ntuple_filter
*)arg
,
4726 case RTE_ETH_FILTER_GET
:
4727 ret
= igb_get_ntuple_filter(dev
,
4728 (struct rte_eth_ntuple_filter
*)arg
);
4731 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
4739 igb_ethertype_filter_lookup(struct e1000_filter_info
*filter_info
,
4744 for (i
= 0; i
< E1000_MAX_ETQF_FILTERS
; i
++) {
4745 if (filter_info
->ethertype_filters
[i
].ethertype
== ethertype
&&
4746 (filter_info
->ethertype_mask
& (1 << i
)))
4753 igb_ethertype_filter_insert(struct e1000_filter_info
*filter_info
,
4754 uint16_t ethertype
, uint32_t etqf
)
4758 for (i
= 0; i
< E1000_MAX_ETQF_FILTERS
; i
++) {
4759 if (!(filter_info
->ethertype_mask
& (1 << i
))) {
4760 filter_info
->ethertype_mask
|= 1 << i
;
4761 filter_info
->ethertype_filters
[i
].ethertype
= ethertype
;
4762 filter_info
->ethertype_filters
[i
].etqf
= etqf
;
4770 igb_ethertype_filter_remove(struct e1000_filter_info
*filter_info
,
4773 if (idx
>= E1000_MAX_ETQF_FILTERS
)
4775 filter_info
->ethertype_mask
&= ~(1 << idx
);
4776 filter_info
->ethertype_filters
[idx
].ethertype
= 0;
4777 filter_info
->ethertype_filters
[idx
].etqf
= 0;
4783 igb_add_del_ethertype_filter(struct rte_eth_dev
*dev
,
4784 struct rte_eth_ethertype_filter
*filter
,
4787 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4788 struct e1000_filter_info
*filter_info
=
4789 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4793 if (filter
->ether_type
== RTE_ETHER_TYPE_IPV4
||
4794 filter
->ether_type
== RTE_ETHER_TYPE_IPV6
) {
4795 PMD_DRV_LOG(ERR
, "unsupported ether_type(0x%04x) in"
4796 " ethertype filter.", filter
->ether_type
);
4800 if (filter
->flags
& RTE_ETHTYPE_FLAGS_MAC
) {
4801 PMD_DRV_LOG(ERR
, "mac compare is unsupported.");
4804 if (filter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
4805 PMD_DRV_LOG(ERR
, "drop option is unsupported.");
4809 ret
= igb_ethertype_filter_lookup(filter_info
, filter
->ether_type
);
4810 if (ret
>= 0 && add
) {
4811 PMD_DRV_LOG(ERR
, "ethertype (0x%04x) filter exists.",
4812 filter
->ether_type
);
4815 if (ret
< 0 && !add
) {
4816 PMD_DRV_LOG(ERR
, "ethertype (0x%04x) filter doesn't exist.",
4817 filter
->ether_type
);
4822 etqf
|= E1000_ETQF_FILTER_ENABLE
| E1000_ETQF_QUEUE_ENABLE
;
4823 etqf
|= (uint32_t)(filter
->ether_type
& E1000_ETQF_ETHERTYPE
);
4824 etqf
|= filter
->queue
<< E1000_ETQF_QUEUE_SHIFT
;
4825 ret
= igb_ethertype_filter_insert(filter_info
,
4826 filter
->ether_type
, etqf
);
4828 PMD_DRV_LOG(ERR
, "ethertype filters are full.");
4832 ret
= igb_ethertype_filter_remove(filter_info
, (uint8_t)ret
);
4836 E1000_WRITE_REG(hw
, E1000_ETQF(ret
), etqf
);
4837 E1000_WRITE_FLUSH(hw
);
4843 igb_get_ethertype_filter(struct rte_eth_dev
*dev
,
4844 struct rte_eth_ethertype_filter
*filter
)
4846 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4847 struct e1000_filter_info
*filter_info
=
4848 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
4852 ret
= igb_ethertype_filter_lookup(filter_info
, filter
->ether_type
);
4854 PMD_DRV_LOG(ERR
, "ethertype (0x%04x) filter doesn't exist.",
4855 filter
->ether_type
);
4859 etqf
= E1000_READ_REG(hw
, E1000_ETQF(ret
));
4860 if (etqf
& E1000_ETQF_FILTER_ENABLE
) {
4861 filter
->ether_type
= etqf
& E1000_ETQF_ETHERTYPE
;
4863 filter
->queue
= (etqf
& E1000_ETQF_QUEUE
) >>
4864 E1000_ETQF_QUEUE_SHIFT
;
4872 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
4873 * @dev: pointer to rte_eth_dev structure
4874 * @filter_op:operation will be taken.
4875 * @arg: a pointer to specific structure corresponding to the filter_op
4878 igb_ethertype_filter_handle(struct rte_eth_dev
*dev
,
4879 enum rte_filter_op filter_op
,
4882 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4885 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
4887 if (filter_op
== RTE_ETH_FILTER_NOP
)
4891 PMD_DRV_LOG(ERR
, "arg shouldn't be NULL for operation %u.",
4896 switch (filter_op
) {
4897 case RTE_ETH_FILTER_ADD
:
4898 ret
= igb_add_del_ethertype_filter(dev
,
4899 (struct rte_eth_ethertype_filter
*)arg
,
4902 case RTE_ETH_FILTER_DELETE
:
4903 ret
= igb_add_del_ethertype_filter(dev
,
4904 (struct rte_eth_ethertype_filter
*)arg
,
4907 case RTE_ETH_FILTER_GET
:
4908 ret
= igb_get_ethertype_filter(dev
,
4909 (struct rte_eth_ethertype_filter
*)arg
);
4912 PMD_DRV_LOG(ERR
, "unsupported operation %u.", filter_op
);
4920 eth_igb_filter_ctrl(struct rte_eth_dev
*dev
,
4921 enum rte_filter_type filter_type
,
4922 enum rte_filter_op filter_op
,
4927 switch (filter_type
) {
4928 case RTE_ETH_FILTER_NTUPLE
:
4929 ret
= igb_ntuple_filter_handle(dev
, filter_op
, arg
);
4931 case RTE_ETH_FILTER_ETHERTYPE
:
4932 ret
= igb_ethertype_filter_handle(dev
, filter_op
, arg
);
4934 case RTE_ETH_FILTER_SYN
:
4935 ret
= eth_igb_syn_filter_handle(dev
, filter_op
, arg
);
4937 case RTE_ETH_FILTER_FLEXIBLE
:
4938 ret
= eth_igb_flex_filter_handle(dev
, filter_op
, arg
);
4940 case RTE_ETH_FILTER_GENERIC
:
4941 if (filter_op
!= RTE_ETH_FILTER_GET
)
4943 *(const void **)arg
= &igb_flow_ops
;
4946 PMD_DRV_LOG(WARNING
, "Filter type (%d) not supported",
4955 eth_igb_set_mc_addr_list(struct rte_eth_dev
*dev
,
4956 struct rte_ether_addr
*mc_addr_set
,
4957 uint32_t nb_mc_addr
)
4959 struct e1000_hw
*hw
;
4961 hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4962 e1000_update_mc_addr_list(hw
, (u8
*)mc_addr_set
, nb_mc_addr
);
4967 igb_read_systime_cyclecounter(struct rte_eth_dev
*dev
)
4969 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4970 uint64_t systime_cycles
;
4972 switch (hw
->mac
.type
) {
4976 * Need to read System Time Residue Register to be able
4977 * to read the other two registers.
4979 E1000_READ_REG(hw
, E1000_SYSTIMR
);
4980 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4981 systime_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_SYSTIML
);
4982 systime_cycles
+= (uint64_t)E1000_READ_REG(hw
, E1000_SYSTIMH
)
4989 * Need to read System Time Residue Register to be able
4990 * to read the other two registers.
4992 E1000_READ_REG(hw
, E1000_SYSTIMR
);
4993 systime_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_SYSTIML
);
4994 /* Only the 8 LSB are valid. */
4995 systime_cycles
|= (uint64_t)(E1000_READ_REG(hw
, E1000_SYSTIMH
)
4999 systime_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_SYSTIML
);
5000 systime_cycles
|= (uint64_t)E1000_READ_REG(hw
, E1000_SYSTIMH
)
5005 return systime_cycles
;
5009 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev
*dev
)
5011 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5012 uint64_t rx_tstamp_cycles
;
5014 switch (hw
->mac
.type
) {
5017 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5018 rx_tstamp_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_RXSTMPL
);
5019 rx_tstamp_cycles
+= (uint64_t)E1000_READ_REG(hw
, E1000_RXSTMPH
)
5025 rx_tstamp_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_RXSTMPL
);
5026 /* Only the 8 LSB are valid. */
5027 rx_tstamp_cycles
|= (uint64_t)(E1000_READ_REG(hw
, E1000_RXSTMPH
)
5031 rx_tstamp_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_RXSTMPL
);
5032 rx_tstamp_cycles
|= (uint64_t)E1000_READ_REG(hw
, E1000_RXSTMPH
)
5037 return rx_tstamp_cycles
;
5041 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev
*dev
)
5043 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5044 uint64_t tx_tstamp_cycles
;
5046 switch (hw
->mac
.type
) {
5049 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
5050 tx_tstamp_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_TXSTMPL
);
5051 tx_tstamp_cycles
+= (uint64_t)E1000_READ_REG(hw
, E1000_TXSTMPH
)
5057 tx_tstamp_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_TXSTMPL
);
5058 /* Only the 8 LSB are valid. */
5059 tx_tstamp_cycles
|= (uint64_t)(E1000_READ_REG(hw
, E1000_TXSTMPH
)
5063 tx_tstamp_cycles
= (uint64_t)E1000_READ_REG(hw
, E1000_TXSTMPL
);
5064 tx_tstamp_cycles
|= (uint64_t)E1000_READ_REG(hw
, E1000_TXSTMPH
)
5069 return tx_tstamp_cycles
;
5073 igb_start_timecounters(struct rte_eth_dev
*dev
)
5075 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5076 struct e1000_adapter
*adapter
= dev
->data
->dev_private
;
5077 uint32_t incval
= 1;
5079 uint64_t mask
= E1000_CYCLECOUNTER_MASK
;
5081 switch (hw
->mac
.type
) {
5085 /* 32 LSB bits + 8 MSB bits = 40 bits */
5086 mask
= (1ULL << 40) - 1;
5091 * Start incrementing the register
5092 * used to timestamp PTP packets.
5094 E1000_WRITE_REG(hw
, E1000_TIMINCA
, incval
);
5097 incval
= E1000_INCVALUE_82576
;
5098 shift
= IGB_82576_TSYNC_SHIFT
;
5099 E1000_WRITE_REG(hw
, E1000_TIMINCA
,
5100 E1000_INCPERIOD_82576
| incval
);
5107 memset(&adapter
->systime_tc
, 0, sizeof(struct rte_timecounter
));
5108 memset(&adapter
->rx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
5109 memset(&adapter
->tx_tstamp_tc
, 0, sizeof(struct rte_timecounter
));
5111 adapter
->systime_tc
.cc_mask
= mask
;
5112 adapter
->systime_tc
.cc_shift
= shift
;
5113 adapter
->systime_tc
.nsec_mask
= (1ULL << shift
) - 1;
5115 adapter
->rx_tstamp_tc
.cc_mask
= mask
;
5116 adapter
->rx_tstamp_tc
.cc_shift
= shift
;
5117 adapter
->rx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
5119 adapter
->tx_tstamp_tc
.cc_mask
= mask
;
5120 adapter
->tx_tstamp_tc
.cc_shift
= shift
;
5121 adapter
->tx_tstamp_tc
.nsec_mask
= (1ULL << shift
) - 1;
5125 igb_timesync_adjust_time(struct rte_eth_dev
*dev
, int64_t delta
)
5127 struct e1000_adapter
*adapter
= dev
->data
->dev_private
;
5129 adapter
->systime_tc
.nsec
+= delta
;
5130 adapter
->rx_tstamp_tc
.nsec
+= delta
;
5131 adapter
->tx_tstamp_tc
.nsec
+= delta
;
5137 igb_timesync_write_time(struct rte_eth_dev
*dev
, const struct timespec
*ts
)
5140 struct e1000_adapter
*adapter
= dev
->data
->dev_private
;
5142 ns
= rte_timespec_to_ns(ts
);
5144 /* Set the timecounters to a new value. */
5145 adapter
->systime_tc
.nsec
= ns
;
5146 adapter
->rx_tstamp_tc
.nsec
= ns
;
5147 adapter
->tx_tstamp_tc
.nsec
= ns
;
5153 igb_timesync_read_time(struct rte_eth_dev
*dev
, struct timespec
*ts
)
5155 uint64_t ns
, systime_cycles
;
5156 struct e1000_adapter
*adapter
= dev
->data
->dev_private
;
5158 systime_cycles
= igb_read_systime_cyclecounter(dev
);
5159 ns
= rte_timecounter_update(&adapter
->systime_tc
, systime_cycles
);
5160 *ts
= rte_ns_to_timespec(ns
);
5166 igb_timesync_enable(struct rte_eth_dev
*dev
)
5168 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5172 /* Stop the timesync system time. */
5173 E1000_WRITE_REG(hw
, E1000_TIMINCA
, 0x0);
5174 /* Reset the timesync system time value. */
5175 switch (hw
->mac
.type
) {
5181 E1000_WRITE_REG(hw
, E1000_SYSTIMR
, 0x0);
5184 E1000_WRITE_REG(hw
, E1000_SYSTIML
, 0x0);
5185 E1000_WRITE_REG(hw
, E1000_SYSTIMH
, 0x0);
5188 /* Not supported. */
5192 /* Enable system time for it isn't on by default. */
5193 tsauxc
= E1000_READ_REG(hw
, E1000_TSAUXC
);
5194 tsauxc
&= ~E1000_TSAUXC_DISABLE_SYSTIME
;
5195 E1000_WRITE_REG(hw
, E1000_TSAUXC
, tsauxc
);
5197 igb_start_timecounters(dev
);
5199 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5200 E1000_WRITE_REG(hw
, E1000_ETQF(E1000_ETQF_FILTER_1588
),
5201 (RTE_ETHER_TYPE_1588
|
5202 E1000_ETQF_FILTER_ENABLE
|
5205 /* Enable timestamping of received PTP packets. */
5206 tsync_ctl
= E1000_READ_REG(hw
, E1000_TSYNCRXCTL
);
5207 tsync_ctl
|= E1000_TSYNCRXCTL_ENABLED
;
5208 E1000_WRITE_REG(hw
, E1000_TSYNCRXCTL
, tsync_ctl
);
5210 /* Enable Timestamping of transmitted PTP packets. */
5211 tsync_ctl
= E1000_READ_REG(hw
, E1000_TSYNCTXCTL
);
5212 tsync_ctl
|= E1000_TSYNCTXCTL_ENABLED
;
5213 E1000_WRITE_REG(hw
, E1000_TSYNCTXCTL
, tsync_ctl
);
5219 igb_timesync_disable(struct rte_eth_dev
*dev
)
5221 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5224 /* Disable timestamping of transmitted PTP packets. */
5225 tsync_ctl
= E1000_READ_REG(hw
, E1000_TSYNCTXCTL
);
5226 tsync_ctl
&= ~E1000_TSYNCTXCTL_ENABLED
;
5227 E1000_WRITE_REG(hw
, E1000_TSYNCTXCTL
, tsync_ctl
);
5229 /* Disable timestamping of received PTP packets. */
5230 tsync_ctl
= E1000_READ_REG(hw
, E1000_TSYNCRXCTL
);
5231 tsync_ctl
&= ~E1000_TSYNCRXCTL_ENABLED
;
5232 E1000_WRITE_REG(hw
, E1000_TSYNCRXCTL
, tsync_ctl
);
5234 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5235 E1000_WRITE_REG(hw
, E1000_ETQF(E1000_ETQF_FILTER_1588
), 0);
5237 /* Stop incrementating the System Time registers. */
5238 E1000_WRITE_REG(hw
, E1000_TIMINCA
, 0);
5244 igb_timesync_read_rx_timestamp(struct rte_eth_dev
*dev
,
5245 struct timespec
*timestamp
,
5246 uint32_t flags __rte_unused
)
5248 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5249 struct e1000_adapter
*adapter
= dev
->data
->dev_private
;
5250 uint32_t tsync_rxctl
;
5251 uint64_t rx_tstamp_cycles
;
5254 tsync_rxctl
= E1000_READ_REG(hw
, E1000_TSYNCRXCTL
);
5255 if ((tsync_rxctl
& E1000_TSYNCRXCTL_VALID
) == 0)
5258 rx_tstamp_cycles
= igb_read_rx_tstamp_cyclecounter(dev
);
5259 ns
= rte_timecounter_update(&adapter
->rx_tstamp_tc
, rx_tstamp_cycles
);
5260 *timestamp
= rte_ns_to_timespec(ns
);
5266 igb_timesync_read_tx_timestamp(struct rte_eth_dev
*dev
,
5267 struct timespec
*timestamp
)
5269 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5270 struct e1000_adapter
*adapter
= dev
->data
->dev_private
;
5271 uint32_t tsync_txctl
;
5272 uint64_t tx_tstamp_cycles
;
5275 tsync_txctl
= E1000_READ_REG(hw
, E1000_TSYNCTXCTL
);
5276 if ((tsync_txctl
& E1000_TSYNCTXCTL_VALID
) == 0)
5279 tx_tstamp_cycles
= igb_read_tx_tstamp_cyclecounter(dev
);
5280 ns
= rte_timecounter_update(&adapter
->tx_tstamp_tc
, tx_tstamp_cycles
);
5281 *timestamp
= rte_ns_to_timespec(ns
);
5287 eth_igb_get_reg_length(struct rte_eth_dev
*dev __rte_unused
)
5291 const struct reg_info
*reg_group
;
5293 while ((reg_group
= igb_regs
[g_ind
++]))
5294 count
+= igb_reg_group_count(reg_group
);
5300 igbvf_get_reg_length(struct rte_eth_dev
*dev __rte_unused
)
5304 const struct reg_info
*reg_group
;
5306 while ((reg_group
= igbvf_regs
[g_ind
++]))
5307 count
+= igb_reg_group_count(reg_group
);
5313 eth_igb_get_regs(struct rte_eth_dev
*dev
,
5314 struct rte_dev_reg_info
*regs
)
5316 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5317 uint32_t *data
= regs
->data
;
5320 const struct reg_info
*reg_group
;
5323 regs
->length
= eth_igb_get_reg_length(dev
);
5324 regs
->width
= sizeof(uint32_t);
5328 /* Support only full register dump */
5329 if ((regs
->length
== 0) ||
5330 (regs
->length
== (uint32_t)eth_igb_get_reg_length(dev
))) {
5331 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
5333 while ((reg_group
= igb_regs
[g_ind
++]))
5334 count
+= igb_read_regs_group(dev
, &data
[count
],
5343 igbvf_get_regs(struct rte_eth_dev
*dev
,
5344 struct rte_dev_reg_info
*regs
)
5346 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5347 uint32_t *data
= regs
->data
;
5350 const struct reg_info
*reg_group
;
5353 regs
->length
= igbvf_get_reg_length(dev
);
5354 regs
->width
= sizeof(uint32_t);
5358 /* Support only full register dump */
5359 if ((regs
->length
== 0) ||
5360 (regs
->length
== (uint32_t)igbvf_get_reg_length(dev
))) {
5361 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
5363 while ((reg_group
= igbvf_regs
[g_ind
++]))
5364 count
+= igb_read_regs_group(dev
, &data
[count
],
5373 eth_igb_get_eeprom_length(struct rte_eth_dev
*dev
)
5375 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5377 /* Return unit is byte count */
5378 return hw
->nvm
.word_size
* 2;
5382 eth_igb_get_eeprom(struct rte_eth_dev
*dev
,
5383 struct rte_dev_eeprom_info
*in_eeprom
)
5385 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5386 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
5387 uint16_t *data
= in_eeprom
->data
;
5390 first
= in_eeprom
->offset
>> 1;
5391 length
= in_eeprom
->length
>> 1;
5392 if ((first
>= hw
->nvm
.word_size
) ||
5393 ((first
+ length
) >= hw
->nvm
.word_size
))
5396 in_eeprom
->magic
= hw
->vendor_id
|
5397 ((uint32_t)hw
->device_id
<< 16);
5399 if ((nvm
->ops
.read
) == NULL
)
5402 return nvm
->ops
.read(hw
, first
, length
, data
);
5406 eth_igb_set_eeprom(struct rte_eth_dev
*dev
,
5407 struct rte_dev_eeprom_info
*in_eeprom
)
5409 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5410 struct e1000_nvm_info
*nvm
= &hw
->nvm
;
5411 uint16_t *data
= in_eeprom
->data
;
5414 first
= in_eeprom
->offset
>> 1;
5415 length
= in_eeprom
->length
>> 1;
5416 if ((first
>= hw
->nvm
.word_size
) ||
5417 ((first
+ length
) >= hw
->nvm
.word_size
))
5420 in_eeprom
->magic
= (uint32_t)hw
->vendor_id
|
5421 ((uint32_t)hw
->device_id
<< 16);
5423 if ((nvm
->ops
.write
) == NULL
)
5425 return nvm
->ops
.write(hw
, first
, length
, data
);
5429 eth_igb_get_module_info(struct rte_eth_dev
*dev
,
5430 struct rte_eth_dev_module_info
*modinfo
)
5432 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5434 uint32_t status
= 0;
5435 uint16_t sff8472_rev
, addr_mode
;
5436 bool page_swap
= false;
5438 if (hw
->phy
.media_type
== e1000_media_type_copper
||
5439 hw
->phy
.media_type
== e1000_media_type_unknown
)
5442 /* Check whether we support SFF-8472 or not */
5443 status
= e1000_read_phy_reg_i2c(hw
, IGB_SFF_8472_COMP
, &sff8472_rev
);
5447 /* addressing mode is not supported */
5448 status
= e1000_read_phy_reg_i2c(hw
, IGB_SFF_8472_SWAP
, &addr_mode
);
5452 /* addressing mode is not supported */
5453 if ((addr_mode
& 0xFF) & IGB_SFF_ADDRESSING_MODE
) {
5455 "Address change required to access page 0xA2, "
5456 "but not supported. Please report the module "
5457 "type to the driver maintainers.\n");
5461 if ((sff8472_rev
& 0xFF) == IGB_SFF_8472_UNSUP
|| page_swap
) {
5462 /* We have an SFP, but it does not support SFF-8472 */
5463 modinfo
->type
= RTE_ETH_MODULE_SFF_8079
;
5464 modinfo
->eeprom_len
= RTE_ETH_MODULE_SFF_8079_LEN
;
5466 /* We have an SFP which supports a revision of SFF-8472 */
5467 modinfo
->type
= RTE_ETH_MODULE_SFF_8472
;
5468 modinfo
->eeprom_len
= RTE_ETH_MODULE_SFF_8472_LEN
;
5475 eth_igb_get_module_eeprom(struct rte_eth_dev
*dev
,
5476 struct rte_dev_eeprom_info
*info
)
5478 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5480 uint32_t status
= 0;
5481 uint16_t dataword
[RTE_ETH_MODULE_SFF_8472_LEN
/ 2 + 1];
5482 u16 first_word
, last_word
;
5485 if (info
->length
== 0)
5488 first_word
= info
->offset
>> 1;
5489 last_word
= (info
->offset
+ info
->length
- 1) >> 1;
5491 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
5492 for (i
= 0; i
< last_word
- first_word
+ 1; i
++) {
5493 status
= e1000_read_phy_reg_i2c(hw
, (first_word
+ i
) * 2,
5496 /* Error occurred while reading module */
5500 dataword
[i
] = rte_be_to_cpu_16(dataword
[i
]);
5503 memcpy(info
->data
, (u8
*)dataword
+ (info
->offset
& 1), info
->length
);
5509 eth_igb_rx_queue_intr_disable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
5511 struct e1000_hw
*hw
=
5512 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5513 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5514 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5515 uint32_t vec
= E1000_MISC_VEC_ID
;
5517 if (rte_intr_allow_others(intr_handle
))
5518 vec
= E1000_RX_VEC_START
;
5520 uint32_t mask
= 1 << (queue_id
+ vec
);
5522 E1000_WRITE_REG(hw
, E1000_EIMC
, mask
);
5523 E1000_WRITE_FLUSH(hw
);
5529 eth_igb_rx_queue_intr_enable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
5531 struct e1000_hw
*hw
=
5532 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5533 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5534 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5535 uint32_t vec
= E1000_MISC_VEC_ID
;
5537 if (rte_intr_allow_others(intr_handle
))
5538 vec
= E1000_RX_VEC_START
;
5540 uint32_t mask
= 1 << (queue_id
+ vec
);
5543 regval
= E1000_READ_REG(hw
, E1000_EIMS
);
5544 E1000_WRITE_REG(hw
, E1000_EIMS
, regval
| mask
);
5545 E1000_WRITE_FLUSH(hw
);
5547 rte_intr_ack(intr_handle
);
5553 eth_igb_write_ivar(struct e1000_hw
*hw
, uint8_t msix_vector
,
5554 uint8_t index
, uint8_t offset
)
5556 uint32_t val
= E1000_READ_REG_ARRAY(hw
, E1000_IVAR0
, index
);
5559 val
&= ~((uint32_t)0xFF << offset
);
5561 /* write vector and valid bit */
5562 val
|= (msix_vector
| E1000_IVAR_VALID
) << offset
;
5564 E1000_WRITE_REG_ARRAY(hw
, E1000_IVAR0
, index
, val
);
5568 eth_igb_assign_msix_vector(struct e1000_hw
*hw
, int8_t direction
,
5569 uint8_t queue
, uint8_t msix_vector
)
5573 if (hw
->mac
.type
== e1000_82575
) {
5575 tmp
= E1000_EICR_RX_QUEUE0
<< queue
;
5576 else if (direction
== 1)
5577 tmp
= E1000_EICR_TX_QUEUE0
<< queue
;
5578 E1000_WRITE_REG(hw
, E1000_MSIXBM(msix_vector
), tmp
);
5579 } else if (hw
->mac
.type
== e1000_82576
) {
5580 if ((direction
== 0) || (direction
== 1))
5581 eth_igb_write_ivar(hw
, msix_vector
, queue
& 0x7,
5582 ((queue
& 0x8) << 1) +
5584 } else if ((hw
->mac
.type
== e1000_82580
) ||
5585 (hw
->mac
.type
== e1000_i350
) ||
5586 (hw
->mac
.type
== e1000_i354
) ||
5587 (hw
->mac
.type
== e1000_i210
) ||
5588 (hw
->mac
.type
== e1000_i211
)) {
5589 if ((direction
== 0) || (direction
== 1))
5590 eth_igb_write_ivar(hw
, msix_vector
,
5592 ((queue
& 0x1) << 4) +
5597 /* Sets up the hardware to generate MSI-X interrupts properly
5599 * board private structure
5602 eth_igb_configure_msix_intr(struct rte_eth_dev
*dev
)
5605 uint32_t tmpval
, regval
, intr_mask
;
5606 struct e1000_hw
*hw
=
5607 E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5608 uint32_t vec
= E1000_MISC_VEC_ID
;
5609 uint32_t base
= E1000_MISC_VEC_ID
;
5610 uint32_t misc_shift
= 0;
5611 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
5612 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
5614 /* won't configure msix register if no mapping is done
5615 * between intr vector and event fd
5617 if (!rte_intr_dp_is_en(intr_handle
))
5620 if (rte_intr_allow_others(intr_handle
)) {
5621 vec
= base
= E1000_RX_VEC_START
;
5625 /* set interrupt vector for other causes */
5626 if (hw
->mac
.type
== e1000_82575
) {
5627 tmpval
= E1000_READ_REG(hw
, E1000_CTRL_EXT
);
5628 /* enable MSI-X PBA support */
5629 tmpval
|= E1000_CTRL_EXT_PBA_CLR
;
5631 /* Auto-Mask interrupts upon ICR read */
5632 tmpval
|= E1000_CTRL_EXT_EIAME
;
5633 tmpval
|= E1000_CTRL_EXT_IRCA
;
5635 E1000_WRITE_REG(hw
, E1000_CTRL_EXT
, tmpval
);
5637 /* enable msix_other interrupt */
5638 E1000_WRITE_REG_ARRAY(hw
, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER
);
5639 regval
= E1000_READ_REG(hw
, E1000_EIAC
);
5640 E1000_WRITE_REG(hw
, E1000_EIAC
, regval
| E1000_EIMS_OTHER
);
5641 regval
= E1000_READ_REG(hw
, E1000_EIAM
);
5642 E1000_WRITE_REG(hw
, E1000_EIMS
, regval
| E1000_EIMS_OTHER
);
5643 } else if ((hw
->mac
.type
== e1000_82576
) ||
5644 (hw
->mac
.type
== e1000_82580
) ||
5645 (hw
->mac
.type
== e1000_i350
) ||
5646 (hw
->mac
.type
== e1000_i354
) ||
5647 (hw
->mac
.type
== e1000_i210
) ||
5648 (hw
->mac
.type
== e1000_i211
)) {
5649 /* turn on MSI-X capability first */
5650 E1000_WRITE_REG(hw
, E1000_GPIE
, E1000_GPIE_MSIX_MODE
|
5651 E1000_GPIE_PBA
| E1000_GPIE_EIAME
|
5653 intr_mask
= RTE_LEN2MASK(intr_handle
->nb_efd
, uint32_t) <<
5656 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
5657 intr_mask
|= (1 << IGB_MSIX_OTHER_INTR_VEC
);
5659 regval
= E1000_READ_REG(hw
, E1000_EIAC
);
5660 E1000_WRITE_REG(hw
, E1000_EIAC
, regval
| intr_mask
);
5662 /* enable msix_other interrupt */
5663 regval
= E1000_READ_REG(hw
, E1000_EIMS
);
5664 E1000_WRITE_REG(hw
, E1000_EIMS
, regval
| intr_mask
);
5665 tmpval
= (IGB_MSIX_OTHER_INTR_VEC
| E1000_IVAR_VALID
) << 8;
5666 E1000_WRITE_REG(hw
, E1000_IVAR_MISC
, tmpval
);
5669 /* use EIAM to auto-mask when MSI-X interrupt
5670 * is asserted, this saves a register write for every interrupt
5672 intr_mask
= RTE_LEN2MASK(intr_handle
->nb_efd
, uint32_t) <<
5675 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
5676 intr_mask
|= (1 << IGB_MSIX_OTHER_INTR_VEC
);
5678 regval
= E1000_READ_REG(hw
, E1000_EIAM
);
5679 E1000_WRITE_REG(hw
, E1000_EIAM
, regval
| intr_mask
);
5681 for (queue_id
= 0; queue_id
< dev
->data
->nb_rx_queues
; queue_id
++) {
5682 eth_igb_assign_msix_vector(hw
, 0, queue_id
, vec
);
5683 intr_handle
->intr_vec
[queue_id
] = vec
;
5684 if (vec
< base
+ intr_handle
->nb_efd
- 1)
5688 E1000_WRITE_FLUSH(hw
);
5691 /* restore n-tuple filter */
5693 igb_ntuple_filter_restore(struct rte_eth_dev
*dev
)
5695 struct e1000_filter_info
*filter_info
=
5696 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
5697 struct e1000_5tuple_filter
*p_5tuple
;
5698 struct e1000_2tuple_filter
*p_2tuple
;
5700 TAILQ_FOREACH(p_5tuple
, &filter_info
->fivetuple_list
, entries
) {
5701 igb_inject_5tuple_filter_82576(dev
, p_5tuple
);
5704 TAILQ_FOREACH(p_2tuple
, &filter_info
->twotuple_list
, entries
) {
5705 igb_inject_2uple_filter(dev
, p_2tuple
);
5709 /* restore SYN filter */
5711 igb_syn_filter_restore(struct rte_eth_dev
*dev
)
5713 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5714 struct e1000_filter_info
*filter_info
=
5715 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
5718 synqf
= filter_info
->syn_info
;
5720 if (synqf
& E1000_SYN_FILTER_ENABLE
) {
5721 E1000_WRITE_REG(hw
, E1000_SYNQF(0), synqf
);
5722 E1000_WRITE_FLUSH(hw
);
5726 /* restore ethernet type filter */
5728 igb_ethertype_filter_restore(struct rte_eth_dev
*dev
)
5730 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
5731 struct e1000_filter_info
*filter_info
=
5732 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
5735 for (i
= 0; i
< E1000_MAX_ETQF_FILTERS
; i
++) {
5736 if (filter_info
->ethertype_mask
& (1 << i
)) {
5737 E1000_WRITE_REG(hw
, E1000_ETQF(i
),
5738 filter_info
->ethertype_filters
[i
].etqf
);
5739 E1000_WRITE_FLUSH(hw
);
5744 /* restore flex byte filter */
5746 igb_flex_filter_restore(struct rte_eth_dev
*dev
)
5748 struct e1000_filter_info
*filter_info
=
5749 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
5750 struct e1000_flex_filter
*flex_filter
;
5752 TAILQ_FOREACH(flex_filter
, &filter_info
->flex_list
, entries
) {
5753 igb_inject_flex_filter(dev
, flex_filter
);
5757 /* restore rss filter */
5759 igb_rss_filter_restore(struct rte_eth_dev
*dev
)
5761 struct e1000_filter_info
*filter_info
=
5762 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
5764 if (filter_info
->rss_info
.conf
.queue_num
)
5765 igb_config_rss_filter(dev
, &filter_info
->rss_info
, TRUE
);
5768 /* restore all types filter */
5770 igb_filter_restore(struct rte_eth_dev
*dev
)
5772 igb_ntuple_filter_restore(dev
);
5773 igb_ethertype_filter_restore(dev
);
5774 igb_syn_filter_restore(dev
);
5775 igb_flex_filter_restore(dev
);
5776 igb_rss_filter_restore(dev
);
5781 RTE_PMD_REGISTER_PCI(net_e1000_igb
, rte_igb_pmd
);
5782 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb
, pci_id_igb_map
);
5783 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb
, "* igb_uio | uio_pci_generic | vfio-pci");
5784 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf
, rte_igbvf_pmd
);
5785 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf
, pci_id_igbvf_map
);
5786 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf
, "* igb_uio | vfio-pci");
5788 /* see e1000_logs.c */
5789 RTE_INIT(e1000_init_log
)
5791 e1000_igb_init_log();