]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/e1000/igb_ethdev.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / e1000 / igb_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdarg.h>
10
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_pci.h>
17 #include <rte_bus_pci.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_ethdev_pci.h>
21 #include <rte_memory.h>
22 #include <rte_eal.h>
23 #include <rte_malloc.h>
24 #include <rte_dev.h>
25
26 #include "e1000_logs.h"
27 #include "base/e1000_api.h"
28 #include "e1000_ethdev.h"
29 #include "igb_regs.h"
30
31 /*
32 * Default values for port configuration
33 */
34 #define IGB_DEFAULT_RX_FREE_THRESH 32
35
36 #define IGB_DEFAULT_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
37 #define IGB_DEFAULT_RX_HTHRESH 8
38 #define IGB_DEFAULT_RX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 4)
39
40 #define IGB_DEFAULT_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
41 #define IGB_DEFAULT_TX_HTHRESH 1
42 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
43
44 /* Bit shift and mask */
45 #define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
46 #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
47 #define IGB_8_BIT_WIDTH CHAR_BIT
48 #define IGB_8_BIT_MASK UINT8_MAX
49
50 /* Additional timesync values. */
51 #define E1000_CYCLECOUNTER_MASK 0xffffffffffffffffULL
52 #define E1000_ETQF_FILTER_1588 3
53 #define IGB_82576_TSYNC_SHIFT 16
54 #define E1000_INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
55 #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
56 #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
57
58 #define E1000_VTIVAR_MISC 0x01740
59 #define E1000_VTIVAR_MISC_MASK 0xFF
60 #define E1000_VTIVAR_VALID 0x80
61 #define E1000_VTIVAR_MISC_MAILBOX 0
62 #define E1000_VTIVAR_MISC_INTR_MASK 0x3
63
64 /* External VLAN Enable bit mask */
65 #define E1000_CTRL_EXT_EXT_VLAN (1 << 26)
66
67 /* External VLAN Ether Type bit mask and shift */
68 #define E1000_VET_VET_EXT 0xFFFF0000
69 #define E1000_VET_VET_EXT_SHIFT 16
70
71 static int eth_igb_configure(struct rte_eth_dev *dev);
72 static int eth_igb_start(struct rte_eth_dev *dev);
73 static void eth_igb_stop(struct rte_eth_dev *dev);
74 static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev);
75 static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev);
76 static void eth_igb_close(struct rte_eth_dev *dev);
77 static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
78 static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
79 static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
80 static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
81 static int eth_igb_link_update(struct rte_eth_dev *dev,
82 int wait_to_complete);
83 static int eth_igb_stats_get(struct rte_eth_dev *dev,
84 struct rte_eth_stats *rte_stats);
85 static int eth_igb_xstats_get(struct rte_eth_dev *dev,
86 struct rte_eth_xstat *xstats, unsigned n);
87 static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
88 const uint64_t *ids,
89 uint64_t *values, unsigned int n);
90 static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
91 struct rte_eth_xstat_name *xstats_names,
92 unsigned int size);
93 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
94 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
95 unsigned int limit);
96 static void eth_igb_stats_reset(struct rte_eth_dev *dev);
97 static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
98 static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
99 char *fw_version, size_t fw_size);
100 static void eth_igb_infos_get(struct rte_eth_dev *dev,
101 struct rte_eth_dev_info *dev_info);
102 static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
103 static void eth_igbvf_infos_get(struct rte_eth_dev *dev,
104 struct rte_eth_dev_info *dev_info);
105 static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
106 struct rte_eth_fc_conf *fc_conf);
107 static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
108 struct rte_eth_fc_conf *fc_conf);
109 static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
110 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
111 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
112 static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
113 struct rte_intr_handle *handle);
114 static void eth_igb_interrupt_handler(void *param);
115 static int igb_hardware_init(struct e1000_hw *hw);
116 static void igb_hw_control_acquire(struct e1000_hw *hw);
117 static void igb_hw_control_release(struct e1000_hw *hw);
118 static void igb_init_manageability(struct e1000_hw *hw);
119 static void igb_release_manageability(struct e1000_hw *hw);
120
121 static int eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
122
123 static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
124 uint16_t vlan_id, int on);
125 static int eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
126 enum rte_vlan_type vlan_type,
127 uint16_t tpid_id);
128 static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask);
129
130 static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev);
131 static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev);
132 static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev);
133 static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev);
134 static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev);
135 static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev);
136
137 static int eth_igb_led_on(struct rte_eth_dev *dev);
138 static int eth_igb_led_off(struct rte_eth_dev *dev);
139
140 static void igb_intr_disable(struct e1000_hw *hw);
141 static int igb_get_rx_buffer_size(struct e1000_hw *hw);
142 static int eth_igb_rar_set(struct rte_eth_dev *dev,
143 struct ether_addr *mac_addr,
144 uint32_t index, uint32_t pool);
145 static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
146 static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
147 struct ether_addr *addr);
148
149 static void igbvf_intr_disable(struct e1000_hw *hw);
150 static int igbvf_dev_configure(struct rte_eth_dev *dev);
151 static int igbvf_dev_start(struct rte_eth_dev *dev);
152 static void igbvf_dev_stop(struct rte_eth_dev *dev);
153 static void igbvf_dev_close(struct rte_eth_dev *dev);
154 static void igbvf_promiscuous_enable(struct rte_eth_dev *dev);
155 static void igbvf_promiscuous_disable(struct rte_eth_dev *dev);
156 static void igbvf_allmulticast_enable(struct rte_eth_dev *dev);
157 static void igbvf_allmulticast_disable(struct rte_eth_dev *dev);
158 static int eth_igbvf_link_update(struct e1000_hw *hw);
159 static int eth_igbvf_stats_get(struct rte_eth_dev *dev,
160 struct rte_eth_stats *rte_stats);
161 static int eth_igbvf_xstats_get(struct rte_eth_dev *dev,
162 struct rte_eth_xstat *xstats, unsigned n);
163 static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev,
164 struct rte_eth_xstat_name *xstats_names,
165 unsigned limit);
166 static void eth_igbvf_stats_reset(struct rte_eth_dev *dev);
167 static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
168 uint16_t vlan_id, int on);
169 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
170 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
171 static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
172 struct ether_addr *addr);
173 static int igbvf_get_reg_length(struct rte_eth_dev *dev);
174 static int igbvf_get_regs(struct rte_eth_dev *dev,
175 struct rte_dev_reg_info *regs);
176
177 static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
178 struct rte_eth_rss_reta_entry64 *reta_conf,
179 uint16_t reta_size);
180 static int eth_igb_rss_reta_query(struct rte_eth_dev *dev,
181 struct rte_eth_rss_reta_entry64 *reta_conf,
182 uint16_t reta_size);
183
184 static int eth_igb_syn_filter_get(struct rte_eth_dev *dev,
185 struct rte_eth_syn_filter *filter);
186 static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
187 enum rte_filter_op filter_op,
188 void *arg);
189 static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
190 struct rte_eth_ntuple_filter *ntuple_filter);
191 static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
192 struct rte_eth_ntuple_filter *ntuple_filter);
193 static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
194 struct rte_eth_flex_filter *filter);
195 static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
196 enum rte_filter_op filter_op,
197 void *arg);
198 static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
199 struct rte_eth_ntuple_filter *ntuple_filter);
200 static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
201 struct rte_eth_ntuple_filter *ntuple_filter);
202 static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
203 struct rte_eth_ntuple_filter *filter);
204 static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
205 enum rte_filter_op filter_op,
206 void *arg);
207 static int igb_ethertype_filter_handle(struct rte_eth_dev *dev,
208 enum rte_filter_op filter_op,
209 void *arg);
210 static int igb_get_ethertype_filter(struct rte_eth_dev *dev,
211 struct rte_eth_ethertype_filter *filter);
212 static int eth_igb_filter_ctrl(struct rte_eth_dev *dev,
213 enum rte_filter_type filter_type,
214 enum rte_filter_op filter_op,
215 void *arg);
216 static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
217 static int eth_igb_get_regs(struct rte_eth_dev *dev,
218 struct rte_dev_reg_info *regs);
219 static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
220 static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
221 struct rte_dev_eeprom_info *eeprom);
222 static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
223 struct rte_dev_eeprom_info *eeprom);
224 static int eth_igb_get_module_info(struct rte_eth_dev *dev,
225 struct rte_eth_dev_module_info *modinfo);
226 static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
227 struct rte_dev_eeprom_info *info);
228 static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
229 struct ether_addr *mc_addr_set,
230 uint32_t nb_mc_addr);
231 static int igb_timesync_enable(struct rte_eth_dev *dev);
232 static int igb_timesync_disable(struct rte_eth_dev *dev);
233 static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
234 struct timespec *timestamp,
235 uint32_t flags);
236 static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
237 struct timespec *timestamp);
238 static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
239 static int igb_timesync_read_time(struct rte_eth_dev *dev,
240 struct timespec *timestamp);
241 static int igb_timesync_write_time(struct rte_eth_dev *dev,
242 const struct timespec *timestamp);
243 static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
244 uint16_t queue_id);
245 static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
246 uint16_t queue_id);
247 static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
248 uint8_t queue, uint8_t msix_vector);
249 static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
250 uint8_t index, uint8_t offset);
251 static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
252 static void eth_igbvf_interrupt_handler(void *param);
253 static void igbvf_mbx_process(struct rte_eth_dev *dev);
254 static int igb_filter_restore(struct rte_eth_dev *dev);
255
256 /*
257 * Define VF Stats MACRO for Non "cleared on read" register
258 */
259 #define UPDATE_VF_STAT(reg, last, cur) \
260 { \
261 u32 latest = E1000_READ_REG(hw, reg); \
262 cur += (latest - last) & UINT_MAX; \
263 last = latest; \
264 }
265
266 #define IGB_FC_PAUSE_TIME 0x0680
267 #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
268 #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
269
270 #define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */
271
272 static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
273
274 /*
275 * The set of PCI devices this driver supports
276 */
277 static const struct rte_pci_id pci_id_igb_map[] = {
278 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
279 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
280 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
281 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
282 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
283 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
284 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
285 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
286
287 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
288 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
289 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
290
291 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
292 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
293 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
294 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
295 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
296 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
297
298 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
299 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
300 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
301 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
302 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
303 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
304 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
305 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
306 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
307 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
308 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
309 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
310 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
311 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
312 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
313 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
314 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
315 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
316 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
317 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
318 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
319 { .vendor_id = 0, /* sentinel */ },
320 };
321
322 /*
323 * The set of PCI devices this driver supports (for 82576&I350 VF)
324 */
325 static const struct rte_pci_id pci_id_igbvf_map[] = {
326 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
327 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
328 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
329 { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
330 { .vendor_id = 0, /* sentinel */ },
331 };
332
333 static const struct rte_eth_desc_lim rx_desc_lim = {
334 .nb_max = E1000_MAX_RING_DESC,
335 .nb_min = E1000_MIN_RING_DESC,
336 .nb_align = IGB_RXD_ALIGN,
337 };
338
339 static const struct rte_eth_desc_lim tx_desc_lim = {
340 .nb_max = E1000_MAX_RING_DESC,
341 .nb_min = E1000_MIN_RING_DESC,
342 .nb_align = IGB_RXD_ALIGN,
343 .nb_seg_max = IGB_TX_MAX_SEG,
344 .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
345 };
346
347 static const struct eth_dev_ops eth_igb_ops = {
348 .dev_configure = eth_igb_configure,
349 .dev_start = eth_igb_start,
350 .dev_stop = eth_igb_stop,
351 .dev_set_link_up = eth_igb_dev_set_link_up,
352 .dev_set_link_down = eth_igb_dev_set_link_down,
353 .dev_close = eth_igb_close,
354 .promiscuous_enable = eth_igb_promiscuous_enable,
355 .promiscuous_disable = eth_igb_promiscuous_disable,
356 .allmulticast_enable = eth_igb_allmulticast_enable,
357 .allmulticast_disable = eth_igb_allmulticast_disable,
358 .link_update = eth_igb_link_update,
359 .stats_get = eth_igb_stats_get,
360 .xstats_get = eth_igb_xstats_get,
361 .xstats_get_by_id = eth_igb_xstats_get_by_id,
362 .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
363 .xstats_get_names = eth_igb_xstats_get_names,
364 .stats_reset = eth_igb_stats_reset,
365 .xstats_reset = eth_igb_xstats_reset,
366 .fw_version_get = eth_igb_fw_version_get,
367 .dev_infos_get = eth_igb_infos_get,
368 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
369 .mtu_set = eth_igb_mtu_set,
370 .vlan_filter_set = eth_igb_vlan_filter_set,
371 .vlan_tpid_set = eth_igb_vlan_tpid_set,
372 .vlan_offload_set = eth_igb_vlan_offload_set,
373 .rx_queue_setup = eth_igb_rx_queue_setup,
374 .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
375 .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
376 .rx_queue_release = eth_igb_rx_queue_release,
377 .rx_queue_count = eth_igb_rx_queue_count,
378 .rx_descriptor_done = eth_igb_rx_descriptor_done,
379 .rx_descriptor_status = eth_igb_rx_descriptor_status,
380 .tx_descriptor_status = eth_igb_tx_descriptor_status,
381 .tx_queue_setup = eth_igb_tx_queue_setup,
382 .tx_queue_release = eth_igb_tx_queue_release,
383 .tx_done_cleanup = eth_igb_tx_done_cleanup,
384 .dev_led_on = eth_igb_led_on,
385 .dev_led_off = eth_igb_led_off,
386 .flow_ctrl_get = eth_igb_flow_ctrl_get,
387 .flow_ctrl_set = eth_igb_flow_ctrl_set,
388 .mac_addr_add = eth_igb_rar_set,
389 .mac_addr_remove = eth_igb_rar_clear,
390 .mac_addr_set = eth_igb_default_mac_addr_set,
391 .reta_update = eth_igb_rss_reta_update,
392 .reta_query = eth_igb_rss_reta_query,
393 .rss_hash_update = eth_igb_rss_hash_update,
394 .rss_hash_conf_get = eth_igb_rss_hash_conf_get,
395 .filter_ctrl = eth_igb_filter_ctrl,
396 .set_mc_addr_list = eth_igb_set_mc_addr_list,
397 .rxq_info_get = igb_rxq_info_get,
398 .txq_info_get = igb_txq_info_get,
399 .timesync_enable = igb_timesync_enable,
400 .timesync_disable = igb_timesync_disable,
401 .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
402 .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
403 .get_reg = eth_igb_get_regs,
404 .get_eeprom_length = eth_igb_get_eeprom_length,
405 .get_eeprom = eth_igb_get_eeprom,
406 .set_eeprom = eth_igb_set_eeprom,
407 .get_module_info = eth_igb_get_module_info,
408 .get_module_eeprom = eth_igb_get_module_eeprom,
409 .timesync_adjust_time = igb_timesync_adjust_time,
410 .timesync_read_time = igb_timesync_read_time,
411 .timesync_write_time = igb_timesync_write_time,
412 };
413
414 /*
415 * dev_ops for virtual function, bare necessities for basic vf
416 * operation have been implemented
417 */
418 static const struct eth_dev_ops igbvf_eth_dev_ops = {
419 .dev_configure = igbvf_dev_configure,
420 .dev_start = igbvf_dev_start,
421 .dev_stop = igbvf_dev_stop,
422 .dev_close = igbvf_dev_close,
423 .promiscuous_enable = igbvf_promiscuous_enable,
424 .promiscuous_disable = igbvf_promiscuous_disable,
425 .allmulticast_enable = igbvf_allmulticast_enable,
426 .allmulticast_disable = igbvf_allmulticast_disable,
427 .link_update = eth_igb_link_update,
428 .stats_get = eth_igbvf_stats_get,
429 .xstats_get = eth_igbvf_xstats_get,
430 .xstats_get_names = eth_igbvf_xstats_get_names,
431 .stats_reset = eth_igbvf_stats_reset,
432 .xstats_reset = eth_igbvf_stats_reset,
433 .vlan_filter_set = igbvf_vlan_filter_set,
434 .dev_infos_get = eth_igbvf_infos_get,
435 .dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
436 .rx_queue_setup = eth_igb_rx_queue_setup,
437 .rx_queue_release = eth_igb_rx_queue_release,
438 .rx_descriptor_done = eth_igb_rx_descriptor_done,
439 .rx_descriptor_status = eth_igb_rx_descriptor_status,
440 .tx_descriptor_status = eth_igb_tx_descriptor_status,
441 .tx_queue_setup = eth_igb_tx_queue_setup,
442 .tx_queue_release = eth_igb_tx_queue_release,
443 .set_mc_addr_list = eth_igb_set_mc_addr_list,
444 .rxq_info_get = igb_rxq_info_get,
445 .txq_info_get = igb_txq_info_get,
446 .mac_addr_set = igbvf_default_mac_addr_set,
447 .get_reg = igbvf_get_regs,
448 };
449
450 /* store statistics names and its offset in stats structure */
451 struct rte_igb_xstats_name_off {
452 char name[RTE_ETH_XSTATS_NAME_SIZE];
453 unsigned offset;
454 };
455
456 static const struct rte_igb_xstats_name_off rte_igb_stats_strings[] = {
457 {"rx_crc_errors", offsetof(struct e1000_hw_stats, crcerrs)},
458 {"rx_align_errors", offsetof(struct e1000_hw_stats, algnerrc)},
459 {"rx_symbol_errors", offsetof(struct e1000_hw_stats, symerrs)},
460 {"rx_missed_packets", offsetof(struct e1000_hw_stats, mpc)},
461 {"tx_single_collision_packets", offsetof(struct e1000_hw_stats, scc)},
462 {"tx_multiple_collision_packets", offsetof(struct e1000_hw_stats, mcc)},
463 {"tx_excessive_collision_packets", offsetof(struct e1000_hw_stats,
464 ecol)},
465 {"tx_late_collisions", offsetof(struct e1000_hw_stats, latecol)},
466 {"tx_total_collisions", offsetof(struct e1000_hw_stats, colc)},
467 {"tx_deferred_packets", offsetof(struct e1000_hw_stats, dc)},
468 {"tx_no_carrier_sense_packets", offsetof(struct e1000_hw_stats, tncrs)},
469 {"rx_carrier_ext_errors", offsetof(struct e1000_hw_stats, cexterr)},
470 {"rx_length_errors", offsetof(struct e1000_hw_stats, rlec)},
471 {"rx_xon_packets", offsetof(struct e1000_hw_stats, xonrxc)},
472 {"tx_xon_packets", offsetof(struct e1000_hw_stats, xontxc)},
473 {"rx_xoff_packets", offsetof(struct e1000_hw_stats, xoffrxc)},
474 {"tx_xoff_packets", offsetof(struct e1000_hw_stats, xofftxc)},
475 {"rx_flow_control_unsupported_packets", offsetof(struct e1000_hw_stats,
476 fcruc)},
477 {"rx_size_64_packets", offsetof(struct e1000_hw_stats, prc64)},
478 {"rx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, prc127)},
479 {"rx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, prc255)},
480 {"rx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, prc511)},
481 {"rx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
482 prc1023)},
483 {"rx_size_1024_to_max_packets", offsetof(struct e1000_hw_stats,
484 prc1522)},
485 {"rx_broadcast_packets", offsetof(struct e1000_hw_stats, bprc)},
486 {"rx_multicast_packets", offsetof(struct e1000_hw_stats, mprc)},
487 {"rx_undersize_errors", offsetof(struct e1000_hw_stats, ruc)},
488 {"rx_fragment_errors", offsetof(struct e1000_hw_stats, rfc)},
489 {"rx_oversize_errors", offsetof(struct e1000_hw_stats, roc)},
490 {"rx_jabber_errors", offsetof(struct e1000_hw_stats, rjc)},
491 {"rx_management_packets", offsetof(struct e1000_hw_stats, mgprc)},
492 {"rx_management_dropped", offsetof(struct e1000_hw_stats, mgpdc)},
493 {"tx_management_packets", offsetof(struct e1000_hw_stats, mgptc)},
494 {"rx_total_packets", offsetof(struct e1000_hw_stats, tpr)},
495 {"tx_total_packets", offsetof(struct e1000_hw_stats, tpt)},
496 {"rx_total_bytes", offsetof(struct e1000_hw_stats, tor)},
497 {"tx_total_bytes", offsetof(struct e1000_hw_stats, tot)},
498 {"tx_size_64_packets", offsetof(struct e1000_hw_stats, ptc64)},
499 {"tx_size_65_to_127_packets", offsetof(struct e1000_hw_stats, ptc127)},
500 {"tx_size_128_to_255_packets", offsetof(struct e1000_hw_stats, ptc255)},
501 {"tx_size_256_to_511_packets", offsetof(struct e1000_hw_stats, ptc511)},
502 {"tx_size_512_to_1023_packets", offsetof(struct e1000_hw_stats,
503 ptc1023)},
504 {"tx_size_1023_to_max_packets", offsetof(struct e1000_hw_stats,
505 ptc1522)},
506 {"tx_multicast_packets", offsetof(struct e1000_hw_stats, mptc)},
507 {"tx_broadcast_packets", offsetof(struct e1000_hw_stats, bptc)},
508 {"tx_tso_packets", offsetof(struct e1000_hw_stats, tsctc)},
509 {"tx_tso_errors", offsetof(struct e1000_hw_stats, tsctfc)},
510 {"rx_sent_to_host_packets", offsetof(struct e1000_hw_stats, rpthc)},
511 {"tx_sent_by_host_packets", offsetof(struct e1000_hw_stats, hgptc)},
512 {"rx_code_violation_packets", offsetof(struct e1000_hw_stats, scvpc)},
513
514 {"interrupt_assert_count", offsetof(struct e1000_hw_stats, iac)},
515 };
516
517 #define IGB_NB_XSTATS (sizeof(rte_igb_stats_strings) / \
518 sizeof(rte_igb_stats_strings[0]))
519
520 static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
521 {"rx_multicast_packets", offsetof(struct e1000_vf_stats, mprc)},
522 {"rx_good_loopback_packets", offsetof(struct e1000_vf_stats, gprlbc)},
523 {"tx_good_loopback_packets", offsetof(struct e1000_vf_stats, gptlbc)},
524 {"rx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gorlbc)},
525 {"tx_good_loopback_bytes", offsetof(struct e1000_vf_stats, gotlbc)},
526 };
527
528 #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
529 sizeof(rte_igbvf_stats_strings[0]))
530
531
532 static inline void
533 igb_intr_enable(struct rte_eth_dev *dev)
534 {
535 struct e1000_interrupt *intr =
536 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
537 struct e1000_hw *hw =
538 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
539
540 E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
541 E1000_WRITE_FLUSH(hw);
542 }
543
544 static void
545 igb_intr_disable(struct e1000_hw *hw)
546 {
547 E1000_WRITE_REG(hw, E1000_IMC, ~0);
548 E1000_WRITE_FLUSH(hw);
549 }
550
551 static inline void
552 igbvf_intr_enable(struct rte_eth_dev *dev)
553 {
554 struct e1000_hw *hw =
555 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
556
557 /* only for mailbox */
558 E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX);
559 E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX);
560 E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX);
561 E1000_WRITE_FLUSH(hw);
562 }
563
564 /* only for mailbox now. If RX/TX needed, should extend this function. */
565 static void
566 igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector)
567 {
568 uint32_t tmp = 0;
569
570 /* mailbox */
571 tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK);
572 tmp |= E1000_VTIVAR_VALID;
573 E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp);
574 }
575
576 static void
577 eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev)
578 {
579 struct e1000_hw *hw =
580 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
581
582 /* Configure VF other cause ivar */
583 igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX);
584 }
585
586 static inline int32_t
587 igb_pf_reset_hw(struct e1000_hw *hw)
588 {
589 uint32_t ctrl_ext;
590 int32_t status;
591
592 status = e1000_reset_hw(hw);
593
594 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
595 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
596 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
597 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
598 E1000_WRITE_FLUSH(hw);
599
600 return status;
601 }
602
603 static void
604 igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
605 {
606 struct e1000_hw *hw =
607 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608
609
610 hw->vendor_id = pci_dev->id.vendor_id;
611 hw->device_id = pci_dev->id.device_id;
612 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
613 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
614
615 e1000_set_mac_type(hw);
616
617 /* need to check if it is a vf device below */
618 }
619
620 static int
621 igb_reset_swfw_lock(struct e1000_hw *hw)
622 {
623 int ret_val;
624
625 /*
626 * Do mac ops initialization manually here, since we will need
627 * some function pointers set by this call.
628 */
629 ret_val = e1000_init_mac_params(hw);
630 if (ret_val)
631 return ret_val;
632
633 /*
634 * SMBI lock should not fail in this early stage. If this is the case,
635 * it is due to an improper exit of the application.
636 * So force the release of the faulty lock.
637 */
638 if (e1000_get_hw_semaphore_generic(hw) < 0) {
639 PMD_DRV_LOG(DEBUG, "SMBI lock released");
640 }
641 e1000_put_hw_semaphore_generic(hw);
642
643 if (hw->mac.ops.acquire_swfw_sync != NULL) {
644 uint16_t mask;
645
646 /*
647 * Phy lock should not fail in this early stage. If this is the case,
648 * it is due to an improper exit of the application.
649 * So force the release of the faulty lock.
650 */
651 mask = E1000_SWFW_PHY0_SM << hw->bus.func;
652 if (hw->bus.func > E1000_FUNC_1)
653 mask <<= 2;
654 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
655 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
656 hw->bus.func);
657 }
658 hw->mac.ops.release_swfw_sync(hw, mask);
659
660 /*
661 * This one is more tricky since it is common to all ports; but
662 * swfw_sync retries last long enough (1s) to be almost sure that if
663 * lock can not be taken it is due to an improper lock of the
664 * semaphore.
665 */
666 mask = E1000_SWFW_EEP_SM;
667 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
668 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
669 }
670 hw->mac.ops.release_swfw_sync(hw, mask);
671 }
672
673 return E1000_SUCCESS;
674 }
675
676 /* Remove all ntuple filters of the device */
677 static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
678 {
679 struct e1000_filter_info *filter_info =
680 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
681 struct e1000_5tuple_filter *p_5tuple;
682 struct e1000_2tuple_filter *p_2tuple;
683
684 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
685 TAILQ_REMOVE(&filter_info->fivetuple_list,
686 p_5tuple, entries);
687 rte_free(p_5tuple);
688 }
689 filter_info->fivetuple_mask = 0;
690 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) {
691 TAILQ_REMOVE(&filter_info->twotuple_list,
692 p_2tuple, entries);
693 rte_free(p_2tuple);
694 }
695 filter_info->twotuple_mask = 0;
696
697 return 0;
698 }
699
700 /* Remove all flex filters of the device */
701 static int igb_flex_filter_uninit(struct rte_eth_dev *eth_dev)
702 {
703 struct e1000_filter_info *filter_info =
704 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
705 struct e1000_flex_filter *p_flex;
706
707 while ((p_flex = TAILQ_FIRST(&filter_info->flex_list))) {
708 TAILQ_REMOVE(&filter_info->flex_list, p_flex, entries);
709 rte_free(p_flex);
710 }
711 filter_info->flex_mask = 0;
712
713 return 0;
714 }
715
716 static int
717 eth_igb_dev_init(struct rte_eth_dev *eth_dev)
718 {
719 int error = 0;
720 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
721 struct e1000_hw *hw =
722 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
723 struct e1000_vfta * shadow_vfta =
724 E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
725 struct e1000_filter_info *filter_info =
726 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
727 struct e1000_adapter *adapter =
728 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
729
730 uint32_t ctrl_ext;
731
732 eth_dev->dev_ops = &eth_igb_ops;
733 eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
734 eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
735 eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
736
737 /* for secondary processes, we don't initialise any further as primary
738 * has already done this work. Only check we don't need a different
739 * RX function */
740 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
741 if (eth_dev->data->scattered_rx)
742 eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
743 return 0;
744 }
745
746 rte_eth_copy_pci_info(eth_dev, pci_dev);
747
748 hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
749
750 igb_identify_hardware(eth_dev, pci_dev);
751 if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
752 error = -EIO;
753 goto err_late;
754 }
755
756 e1000_get_bus_info(hw);
757
758 /* Reset any pending lock */
759 if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) {
760 error = -EIO;
761 goto err_late;
762 }
763
764 /* Finish initialization */
765 if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
766 error = -EIO;
767 goto err_late;
768 }
769
770 hw->mac.autoneg = 1;
771 hw->phy.autoneg_wait_to_complete = 0;
772 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
773
774 /* Copper options */
775 if (hw->phy.media_type == e1000_media_type_copper) {
776 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
777 hw->phy.disable_polarity_correction = 0;
778 hw->phy.ms_type = e1000_ms_hw_default;
779 }
780
781 /*
782 * Start from a known state, this is important in reading the nvm
783 * and mac from that.
784 */
785 igb_pf_reset_hw(hw);
786
787 /* Make sure we have a good EEPROM before we read from it */
788 if (e1000_validate_nvm_checksum(hw) < 0) {
789 /*
790 * Some PCI-E parts fail the first check due to
791 * the link being in sleep state, call it again,
792 * if it fails a second time its a real issue.
793 */
794 if (e1000_validate_nvm_checksum(hw) < 0) {
795 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
796 error = -EIO;
797 goto err_late;
798 }
799 }
800
801 /* Read the permanent MAC address out of the EEPROM */
802 if (e1000_read_mac_addr(hw) != 0) {
803 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
804 error = -EIO;
805 goto err_late;
806 }
807
808 /* Allocate memory for storing MAC addresses */
809 eth_dev->data->mac_addrs = rte_zmalloc("e1000",
810 ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
811 if (eth_dev->data->mac_addrs == NULL) {
812 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
813 "store MAC addresses",
814 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
815 error = -ENOMEM;
816 goto err_late;
817 }
818
819 /* Copy the permanent MAC address */
820 ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
821
822 /* initialize the vfta */
823 memset(shadow_vfta, 0, sizeof(*shadow_vfta));
824
825 /* Now initialize the hardware */
826 if (igb_hardware_init(hw) != 0) {
827 PMD_INIT_LOG(ERR, "Hardware initialization failed");
828 rte_free(eth_dev->data->mac_addrs);
829 eth_dev->data->mac_addrs = NULL;
830 error = -ENODEV;
831 goto err_late;
832 }
833 hw->mac.get_link_status = 1;
834 adapter->stopped = 0;
835
836 /* Indicate SOL/IDER usage */
837 if (e1000_check_reset_block(hw) < 0) {
838 PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
839 "SOL/IDER session");
840 }
841
842 /* initialize PF if max_vfs not zero */
843 igb_pf_host_init(eth_dev);
844
845 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
846 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
847 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
848 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
849 E1000_WRITE_FLUSH(hw);
850
851 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
852 eth_dev->data->port_id, pci_dev->id.vendor_id,
853 pci_dev->id.device_id);
854
855 rte_intr_callback_register(&pci_dev->intr_handle,
856 eth_igb_interrupt_handler,
857 (void *)eth_dev);
858
859 /* enable uio/vfio intr/eventfd mapping */
860 rte_intr_enable(&pci_dev->intr_handle);
861
862 /* enable support intr */
863 igb_intr_enable(eth_dev);
864
865 /* initialize filter info */
866 memset(filter_info, 0,
867 sizeof(struct e1000_filter_info));
868
869 TAILQ_INIT(&filter_info->flex_list);
870 TAILQ_INIT(&filter_info->twotuple_list);
871 TAILQ_INIT(&filter_info->fivetuple_list);
872
873 TAILQ_INIT(&igb_filter_ntuple_list);
874 TAILQ_INIT(&igb_filter_ethertype_list);
875 TAILQ_INIT(&igb_filter_syn_list);
876 TAILQ_INIT(&igb_filter_flex_list);
877 TAILQ_INIT(&igb_filter_rss_list);
878 TAILQ_INIT(&igb_flow_list);
879
880 return 0;
881
882 err_late:
883 igb_hw_control_release(hw);
884
885 return error;
886 }
887
888 static int
889 eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
890 {
891 struct rte_pci_device *pci_dev;
892 struct rte_intr_handle *intr_handle;
893 struct e1000_hw *hw;
894 struct e1000_adapter *adapter =
895 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
896 struct e1000_filter_info *filter_info =
897 E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
898
899 PMD_INIT_FUNC_TRACE();
900
901 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
902 return -EPERM;
903
904 hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
905 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
906 intr_handle = &pci_dev->intr_handle;
907
908 if (adapter->stopped == 0)
909 eth_igb_close(eth_dev);
910
911 eth_dev->dev_ops = NULL;
912 eth_dev->rx_pkt_burst = NULL;
913 eth_dev->tx_pkt_burst = NULL;
914
915 /* Reset any pending lock */
916 igb_reset_swfw_lock(hw);
917
918 rte_free(eth_dev->data->mac_addrs);
919 eth_dev->data->mac_addrs = NULL;
920
921 /* uninitialize PF if max_vfs not zero */
922 igb_pf_host_uninit(eth_dev);
923
924 /* disable uio intr before callback unregister */
925 rte_intr_disable(intr_handle);
926 rte_intr_callback_unregister(intr_handle,
927 eth_igb_interrupt_handler, eth_dev);
928
929 /* clear the SYN filter info */
930 filter_info->syn_info = 0;
931
932 /* clear the ethertype filters info */
933 filter_info->ethertype_mask = 0;
934 memset(filter_info->ethertype_filters, 0,
935 E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter));
936
937 /* clear the rss filter info */
938 memset(&filter_info->rss_info, 0,
939 sizeof(struct igb_rte_flow_rss_conf));
940
941 /* remove all ntuple filters of the device */
942 igb_ntuple_filter_uninit(eth_dev);
943
944 /* remove all flex filters of the device */
945 igb_flex_filter_uninit(eth_dev);
946
947 /* clear all the filters list */
948 igb_filterlist_flush(eth_dev);
949
950 return 0;
951 }
952
953 /*
954 * Virtual Function device init
955 */
956 static int
957 eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
958 {
959 struct rte_pci_device *pci_dev;
960 struct rte_intr_handle *intr_handle;
961 struct e1000_adapter *adapter =
962 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
963 struct e1000_hw *hw =
964 E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
965 int diag;
966 struct ether_addr *perm_addr = (struct ether_addr *)hw->mac.perm_addr;
967
968 PMD_INIT_FUNC_TRACE();
969
970 eth_dev->dev_ops = &igbvf_eth_dev_ops;
971 eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
972 eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
973 eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
974
975 /* for secondary processes, we don't initialise any further as primary
976 * has already done this work. Only check we don't need a different
977 * RX function */
978 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
979 if (eth_dev->data->scattered_rx)
980 eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
981 return 0;
982 }
983
984 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
985 rte_eth_copy_pci_info(eth_dev, pci_dev);
986
987 hw->device_id = pci_dev->id.device_id;
988 hw->vendor_id = pci_dev->id.vendor_id;
989 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
990 adapter->stopped = 0;
991
992 /* Initialize the shared code (base driver) */
993 diag = e1000_setup_init_funcs(hw, TRUE);
994 if (diag != 0) {
995 PMD_INIT_LOG(ERR, "Shared code init failed for igbvf: %d",
996 diag);
997 return -EIO;
998 }
999
1000 /* init_mailbox_params */
1001 hw->mbx.ops.init_params(hw);
1002
1003 /* Disable the interrupts for VF */
1004 igbvf_intr_disable(hw);
1005
1006 diag = hw->mac.ops.reset_hw(hw);
1007
1008 /* Allocate memory for storing MAC addresses */
1009 eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
1010 hw->mac.rar_entry_count, 0);
1011 if (eth_dev->data->mac_addrs == NULL) {
1012 PMD_INIT_LOG(ERR,
1013 "Failed to allocate %d bytes needed to store MAC "
1014 "addresses",
1015 ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1016 return -ENOMEM;
1017 }
1018
1019 /* Generate a random MAC address, if none was assigned by PF. */
1020 if (is_zero_ether_addr(perm_addr)) {
1021 eth_random_addr(perm_addr->addr_bytes);
1022 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
1023 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
1024 "%02x:%02x:%02x:%02x:%02x:%02x",
1025 perm_addr->addr_bytes[0],
1026 perm_addr->addr_bytes[1],
1027 perm_addr->addr_bytes[2],
1028 perm_addr->addr_bytes[3],
1029 perm_addr->addr_bytes[4],
1030 perm_addr->addr_bytes[5]);
1031 }
1032
1033 diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
1034 if (diag) {
1035 rte_free(eth_dev->data->mac_addrs);
1036 eth_dev->data->mac_addrs = NULL;
1037 return diag;
1038 }
1039 /* Copy the permanent MAC address */
1040 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
1041 &eth_dev->data->mac_addrs[0]);
1042
1043 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
1044 "mac.type=%s",
1045 eth_dev->data->port_id, pci_dev->id.vendor_id,
1046 pci_dev->id.device_id, "igb_mac_82576_vf");
1047
1048 intr_handle = &pci_dev->intr_handle;
1049 rte_intr_callback_register(intr_handle,
1050 eth_igbvf_interrupt_handler, eth_dev);
1051
1052 return 0;
1053 }
1054
1055 static int
1056 eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
1057 {
1058 struct e1000_adapter *adapter =
1059 E1000_DEV_PRIVATE(eth_dev->data->dev_private);
1060 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1061
1062 PMD_INIT_FUNC_TRACE();
1063
1064 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1065 return -EPERM;
1066
1067 if (adapter->stopped == 0)
1068 igbvf_dev_close(eth_dev);
1069
1070 eth_dev->dev_ops = NULL;
1071 eth_dev->rx_pkt_burst = NULL;
1072 eth_dev->tx_pkt_burst = NULL;
1073
1074 rte_free(eth_dev->data->mac_addrs);
1075 eth_dev->data->mac_addrs = NULL;
1076
1077 /* disable uio intr before callback unregister */
1078 rte_intr_disable(&pci_dev->intr_handle);
1079 rte_intr_callback_unregister(&pci_dev->intr_handle,
1080 eth_igbvf_interrupt_handler,
1081 (void *)eth_dev);
1082
1083 return 0;
1084 }
1085
1086 static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1087 struct rte_pci_device *pci_dev)
1088 {
1089 return rte_eth_dev_pci_generic_probe(pci_dev,
1090 sizeof(struct e1000_adapter), eth_igb_dev_init);
1091 }
1092
1093 static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
1094 {
1095 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
1096 }
1097
1098 static struct rte_pci_driver rte_igb_pmd = {
1099 .id_table = pci_id_igb_map,
1100 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1101 RTE_PCI_DRV_IOVA_AS_VA,
1102 .probe = eth_igb_pci_probe,
1103 .remove = eth_igb_pci_remove,
1104 };
1105
1106
1107 static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1108 struct rte_pci_device *pci_dev)
1109 {
1110 return rte_eth_dev_pci_generic_probe(pci_dev,
1111 sizeof(struct e1000_adapter), eth_igbvf_dev_init);
1112 }
1113
1114 static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
1115 {
1116 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
1117 }
1118
1119 /*
1120 * virtual function driver struct
1121 */
1122 static struct rte_pci_driver rte_igbvf_pmd = {
1123 .id_table = pci_id_igbvf_map,
1124 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
1125 .probe = eth_igbvf_pci_probe,
1126 .remove = eth_igbvf_pci_remove,
1127 };
1128
1129 static void
1130 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
1131 {
1132 struct e1000_hw *hw =
1133 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1134 /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
1135 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
1136 rctl |= E1000_RCTL_VFE;
1137 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1138 }
1139
1140 static int
1141 igb_check_mq_mode(struct rte_eth_dev *dev)
1142 {
1143 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1144 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
1145 uint16_t nb_rx_q = dev->data->nb_rx_queues;
1146 uint16_t nb_tx_q = dev->data->nb_tx_queues;
1147
1148 if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
1149 tx_mq_mode == ETH_MQ_TX_DCB ||
1150 tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
1151 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
1152 return -EINVAL;
1153 }
1154 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
1155 /* Check multi-queue mode.
1156 * To no break software we accept ETH_MQ_RX_NONE as this might
1157 * be used to turn off VLAN filter.
1158 */
1159
1160 if (rx_mq_mode == ETH_MQ_RX_NONE ||
1161 rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1162 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
1163 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
1164 } else {
1165 /* Only support one queue on VFs.
1166 * RSS together with SRIOV is not supported.
1167 */
1168 PMD_INIT_LOG(ERR, "SRIOV is active,"
1169 " wrong mq_mode rx %d.",
1170 rx_mq_mode);
1171 return -EINVAL;
1172 }
1173 /* TX mode is not used here, so mode might be ignored.*/
1174 if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1175 /* SRIOV only works in VMDq enable mode */
1176 PMD_INIT_LOG(WARNING, "SRIOV is active,"
1177 " TX mode %d is not supported. "
1178 " Driver will behave as %d mode.",
1179 tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
1180 }
1181
1182 /* check valid queue number */
1183 if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
1184 PMD_INIT_LOG(ERR, "SRIOV is active,"
1185 " only support one queue on VFs.");
1186 return -EINVAL;
1187 }
1188 } else {
1189 /* To no break software that set invalid mode, only display
1190 * warning if invalid mode is used.
1191 */
1192 if (rx_mq_mode != ETH_MQ_RX_NONE &&
1193 rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
1194 rx_mq_mode != ETH_MQ_RX_RSS) {
1195 /* RSS together with VMDq not supported*/
1196 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
1197 rx_mq_mode);
1198 return -EINVAL;
1199 }
1200
1201 if (tx_mq_mode != ETH_MQ_TX_NONE &&
1202 tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
1203 PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
1204 " Due to txmode is meaningless in this"
1205 " driver, just ignore.",
1206 tx_mq_mode);
1207 }
1208 }
1209 return 0;
1210 }
1211
1212 static int
1213 eth_igb_configure(struct rte_eth_dev *dev)
1214 {
1215 struct e1000_interrupt *intr =
1216 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1217 int ret;
1218
1219 PMD_INIT_FUNC_TRACE();
1220
1221 /* multipe queue mode checking */
1222 ret = igb_check_mq_mode(dev);
1223 if (ret != 0) {
1224 PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
1225 ret);
1226 return ret;
1227 }
1228
1229 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
1230 PMD_INIT_FUNC_TRACE();
1231
1232 return 0;
1233 }
1234
1235 static void
1236 eth_igb_rxtx_control(struct rte_eth_dev *dev,
1237 bool enable)
1238 {
1239 struct e1000_hw *hw =
1240 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1241 uint32_t tctl, rctl;
1242
1243 tctl = E1000_READ_REG(hw, E1000_TCTL);
1244 rctl = E1000_READ_REG(hw, E1000_RCTL);
1245
1246 if (enable) {
1247 /* enable Tx/Rx */
1248 tctl |= E1000_TCTL_EN;
1249 rctl |= E1000_RCTL_EN;
1250 } else {
1251 /* disable Tx/Rx */
1252 tctl &= ~E1000_TCTL_EN;
1253 rctl &= ~E1000_RCTL_EN;
1254 }
1255 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1256 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1257 E1000_WRITE_FLUSH(hw);
1258 }
1259
1260 static int
1261 eth_igb_start(struct rte_eth_dev *dev)
1262 {
1263 struct e1000_hw *hw =
1264 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1265 struct e1000_adapter *adapter =
1266 E1000_DEV_PRIVATE(dev->data->dev_private);
1267 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1268 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1269 int ret, mask;
1270 uint32_t intr_vector = 0;
1271 uint32_t ctrl_ext;
1272 uint32_t *speeds;
1273 int num_speeds;
1274 bool autoneg;
1275
1276 PMD_INIT_FUNC_TRACE();
1277
1278 /* disable uio/vfio intr/eventfd mapping */
1279 rte_intr_disable(intr_handle);
1280
1281 /* Power up the phy. Needed to make the link go Up */
1282 eth_igb_dev_set_link_up(dev);
1283
1284 /*
1285 * Packet Buffer Allocation (PBA)
1286 * Writing PBA sets the receive portion of the buffer
1287 * the remainder is used for the transmit buffer.
1288 */
1289 if (hw->mac.type == e1000_82575) {
1290 uint32_t pba;
1291
1292 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1293 E1000_WRITE_REG(hw, E1000_PBA, pba);
1294 }
1295
1296 /* Put the address into the Receive Address Array */
1297 e1000_rar_set(hw, hw->mac.addr, 0);
1298
1299 /* Initialize the hardware */
1300 if (igb_hardware_init(hw)) {
1301 PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
1302 return -EIO;
1303 }
1304 adapter->stopped = 0;
1305
1306 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1307
1308 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1309 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
1310 ctrl_ext |= E1000_CTRL_EXT_PFRSTD;
1311 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1312 E1000_WRITE_FLUSH(hw);
1313
1314 /* configure PF module if SRIOV enabled */
1315 igb_pf_host_configure(dev);
1316
1317 /* check and configure queue intr-vector mapping */
1318 if ((rte_intr_cap_multiple(intr_handle) ||
1319 !RTE_ETH_DEV_SRIOV(dev).active) &&
1320 dev->data->dev_conf.intr_conf.rxq != 0) {
1321 intr_vector = dev->data->nb_rx_queues;
1322 if (rte_intr_efd_enable(intr_handle, intr_vector))
1323 return -1;
1324 }
1325
1326 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1327 intr_handle->intr_vec =
1328 rte_zmalloc("intr_vec",
1329 dev->data->nb_rx_queues * sizeof(int), 0);
1330 if (intr_handle->intr_vec == NULL) {
1331 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
1332 " intr_vec", dev->data->nb_rx_queues);
1333 return -ENOMEM;
1334 }
1335 }
1336
1337 /* confiugre msix for rx interrupt */
1338 eth_igb_configure_msix_intr(dev);
1339
1340 /* Configure for OS presence */
1341 igb_init_manageability(hw);
1342
1343 eth_igb_tx_init(dev);
1344
1345 /* This can fail when allocating mbufs for descriptor rings */
1346 ret = eth_igb_rx_init(dev);
1347 if (ret) {
1348 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
1349 igb_dev_clear_queues(dev);
1350 return ret;
1351 }
1352
1353 e1000_clear_hw_cntrs_base_generic(hw);
1354
1355 /*
1356 * VLAN Offload Settings
1357 */
1358 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
1359 ETH_VLAN_EXTEND_MASK;
1360 ret = eth_igb_vlan_offload_set(dev, mask);
1361 if (ret) {
1362 PMD_INIT_LOG(ERR, "Unable to set vlan offload");
1363 igb_dev_clear_queues(dev);
1364 return ret;
1365 }
1366
1367 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
1368 /* Enable VLAN filter since VMDq always use VLAN filter */
1369 igb_vmdq_vlan_hw_filter_enable(dev);
1370 }
1371
1372 if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
1373 (hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
1374 (hw->mac.type == e1000_i211)) {
1375 /* Configure EITR with the maximum possible value (0xFFFF) */
1376 E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
1377 }
1378
1379 /* Setup link speed and duplex */
1380 speeds = &dev->data->dev_conf.link_speeds;
1381 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
1382 hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
1383 hw->mac.autoneg = 1;
1384 } else {
1385 num_speeds = 0;
1386 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
1387
1388 /* Reset */
1389 hw->phy.autoneg_advertised = 0;
1390
1391 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1392 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1393 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) {
1394 num_speeds = -1;
1395 goto error_invalid_config;
1396 }
1397 if (*speeds & ETH_LINK_SPEED_10M_HD) {
1398 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1399 num_speeds++;
1400 }
1401 if (*speeds & ETH_LINK_SPEED_10M) {
1402 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1403 num_speeds++;
1404 }
1405 if (*speeds & ETH_LINK_SPEED_100M_HD) {
1406 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1407 num_speeds++;
1408 }
1409 if (*speeds & ETH_LINK_SPEED_100M) {
1410 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1411 num_speeds++;
1412 }
1413 if (*speeds & ETH_LINK_SPEED_1G) {
1414 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1415 num_speeds++;
1416 }
1417 if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
1418 goto error_invalid_config;
1419
1420 /* Set/reset the mac.autoneg based on the link speed,
1421 * fixed or not
1422 */
1423 if (!autoneg) {
1424 hw->mac.autoneg = 0;
1425 hw->mac.forced_speed_duplex =
1426 hw->phy.autoneg_advertised;
1427 } else {
1428 hw->mac.autoneg = 1;
1429 }
1430 }
1431
1432 e1000_setup_link(hw);
1433
1434 if (rte_intr_allow_others(intr_handle)) {
1435 /* check if lsc interrupt is enabled */
1436 if (dev->data->dev_conf.intr_conf.lsc != 0)
1437 eth_igb_lsc_interrupt_setup(dev, TRUE);
1438 else
1439 eth_igb_lsc_interrupt_setup(dev, FALSE);
1440 } else {
1441 rte_intr_callback_unregister(intr_handle,
1442 eth_igb_interrupt_handler,
1443 (void *)dev);
1444 if (dev->data->dev_conf.intr_conf.lsc != 0)
1445 PMD_INIT_LOG(INFO, "lsc won't enable because of"
1446 " no intr multiplex");
1447 }
1448
1449 /* check if rxq interrupt is enabled */
1450 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
1451 rte_intr_dp_is_en(intr_handle))
1452 eth_igb_rxq_interrupt_setup(dev);
1453
1454 /* enable uio/vfio intr/eventfd mapping */
1455 rte_intr_enable(intr_handle);
1456
1457 /* resume enabled intr since hw reset */
1458 igb_intr_enable(dev);
1459
1460 /* restore all types filter */
1461 igb_filter_restore(dev);
1462
1463 eth_igb_rxtx_control(dev, true);
1464 eth_igb_link_update(dev, 0);
1465
1466 PMD_INIT_LOG(DEBUG, "<<");
1467
1468 return 0;
1469
1470 error_invalid_config:
1471 PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1472 dev->data->dev_conf.link_speeds, dev->data->port_id);
1473 igb_dev_clear_queues(dev);
1474 return -EINVAL;
1475 }
1476
1477 /*********************************************************************
1478 *
1479 * This routine disables all traffic on the adapter by issuing a
1480 * global reset on the MAC.
1481 *
1482 **********************************************************************/
1483 static void
1484 eth_igb_stop(struct rte_eth_dev *dev)
1485 {
1486 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1487 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1488 struct rte_eth_link link;
1489 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1490
1491 eth_igb_rxtx_control(dev, false);
1492
1493 igb_intr_disable(hw);
1494
1495 /* disable intr eventfd mapping */
1496 rte_intr_disable(intr_handle);
1497
1498 igb_pf_reset_hw(hw);
1499 E1000_WRITE_REG(hw, E1000_WUC, 0);
1500
1501 /* Set bit for Go Link disconnect */
1502 if (hw->mac.type >= e1000_82580) {
1503 uint32_t phpm_reg;
1504
1505 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1506 phpm_reg |= E1000_82580_PM_GO_LINKD;
1507 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1508 }
1509
1510 /* Power down the phy. Needed to make the link go Down */
1511 eth_igb_dev_set_link_down(dev);
1512
1513 igb_dev_clear_queues(dev);
1514
1515 /* clear the recorded link status */
1516 memset(&link, 0, sizeof(link));
1517 rte_eth_linkstatus_set(dev, &link);
1518
1519 if (!rte_intr_allow_others(intr_handle))
1520 /* resume to the default handler */
1521 rte_intr_callback_register(intr_handle,
1522 eth_igb_interrupt_handler,
1523 (void *)dev);
1524
1525 /* Clean datapath event and queue/vec mapping */
1526 rte_intr_efd_disable(intr_handle);
1527 if (intr_handle->intr_vec != NULL) {
1528 rte_free(intr_handle->intr_vec);
1529 intr_handle->intr_vec = NULL;
1530 }
1531 }
1532
1533 static int
1534 eth_igb_dev_set_link_up(struct rte_eth_dev *dev)
1535 {
1536 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1537
1538 if (hw->phy.media_type == e1000_media_type_copper)
1539 e1000_power_up_phy(hw);
1540 else
1541 e1000_power_up_fiber_serdes_link(hw);
1542
1543 return 0;
1544 }
1545
1546 static int
1547 eth_igb_dev_set_link_down(struct rte_eth_dev *dev)
1548 {
1549 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1550
1551 if (hw->phy.media_type == e1000_media_type_copper)
1552 e1000_power_down_phy(hw);
1553 else
1554 e1000_shutdown_fiber_serdes_link(hw);
1555
1556 return 0;
1557 }
1558
1559 static void
1560 eth_igb_close(struct rte_eth_dev *dev)
1561 {
1562 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1563 struct e1000_adapter *adapter =
1564 E1000_DEV_PRIVATE(dev->data->dev_private);
1565 struct rte_eth_link link;
1566 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1567 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1568
1569 eth_igb_stop(dev);
1570 adapter->stopped = 1;
1571
1572 e1000_phy_hw_reset(hw);
1573 igb_release_manageability(hw);
1574 igb_hw_control_release(hw);
1575
1576 /* Clear bit for Go Link disconnect */
1577 if (hw->mac.type >= e1000_82580) {
1578 uint32_t phpm_reg;
1579
1580 phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
1581 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1582 E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
1583 }
1584
1585 igb_dev_free_queues(dev);
1586
1587 if (intr_handle->intr_vec) {
1588 rte_free(intr_handle->intr_vec);
1589 intr_handle->intr_vec = NULL;
1590 }
1591
1592 memset(&link, 0, sizeof(link));
1593 rte_eth_linkstatus_set(dev, &link);
1594 }
1595
1596 static int
1597 igb_get_rx_buffer_size(struct e1000_hw *hw)
1598 {
1599 uint32_t rx_buf_size;
1600 if (hw->mac.type == e1000_82576) {
1601 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
1602 } else if (hw->mac.type == e1000_82580 || hw->mac.type == e1000_i350) {
1603 /* PBS needs to be translated according to a lookup table */
1604 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
1605 rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
1606 rx_buf_size = (rx_buf_size << 10);
1607 } else if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
1608 rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0x3f) << 10;
1609 } else {
1610 rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
1611 }
1612
1613 return rx_buf_size;
1614 }
1615
1616 /*********************************************************************
1617 *
1618 * Initialize the hardware
1619 *
1620 **********************************************************************/
1621 static int
1622 igb_hardware_init(struct e1000_hw *hw)
1623 {
1624 uint32_t rx_buf_size;
1625 int diag;
1626
1627 /* Let the firmware know the OS is in control */
1628 igb_hw_control_acquire(hw);
1629
1630 /*
1631 * These parameters control the automatic generation (Tx) and
1632 * response (Rx) to Ethernet PAUSE frames.
1633 * - High water mark should allow for at least two standard size (1518)
1634 * frames to be received after sending an XOFF.
1635 * - Low water mark works best when it is very near the high water mark.
1636 * This allows the receiver to restart by sending XON when it has
1637 * drained a bit. Here we use an arbitrary value of 1500 which will
1638 * restart after one full frame is pulled from the buffer. There
1639 * could be several smaller frames in the buffer and if so they will
1640 * not trigger the XON until their total number reduces the buffer
1641 * by 1500.
1642 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1643 */
1644 rx_buf_size = igb_get_rx_buffer_size(hw);
1645
1646 hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
1647 hw->fc.low_water = hw->fc.high_water - 1500;
1648 hw->fc.pause_time = IGB_FC_PAUSE_TIME;
1649 hw->fc.send_xon = 1;
1650
1651 /* Set Flow control, use the tunable location if sane */
1652 if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
1653 hw->fc.requested_mode = igb_fc_setting;
1654 else
1655 hw->fc.requested_mode = e1000_fc_none;
1656
1657 /* Issue a global reset */
1658 igb_pf_reset_hw(hw);
1659 E1000_WRITE_REG(hw, E1000_WUC, 0);
1660
1661 diag = e1000_init_hw(hw);
1662 if (diag < 0)
1663 return diag;
1664
1665 E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
1666 e1000_get_phy_info(hw);
1667 e1000_check_for_link(hw);
1668
1669 return 0;
1670 }
1671
1672 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
1673 static void
1674 igb_read_stats_registers(struct e1000_hw *hw, struct e1000_hw_stats *stats)
1675 {
1676 int pause_frames;
1677
1678 uint64_t old_gprc = stats->gprc;
1679 uint64_t old_gptc = stats->gptc;
1680 uint64_t old_tpr = stats->tpr;
1681 uint64_t old_tpt = stats->tpt;
1682 uint64_t old_rpthc = stats->rpthc;
1683 uint64_t old_hgptc = stats->hgptc;
1684
1685 if(hw->phy.media_type == e1000_media_type_copper ||
1686 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1687 stats->symerrs +=
1688 E1000_READ_REG(hw,E1000_SYMERRS);
1689 stats->sec += E1000_READ_REG(hw, E1000_SEC);
1690 }
1691
1692 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
1693 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
1694 stats->scc += E1000_READ_REG(hw, E1000_SCC);
1695 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
1696
1697 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
1698 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
1699 stats->colc += E1000_READ_REG(hw, E1000_COLC);
1700 stats->dc += E1000_READ_REG(hw, E1000_DC);
1701 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
1702 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
1703 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
1704 /*
1705 ** For watchdog management we need to know if we have been
1706 ** paused during the last interval, so capture that here.
1707 */
1708 pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
1709 stats->xoffrxc += pause_frames;
1710 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
1711 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
1712 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
1713 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
1714 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
1715 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
1716 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
1717 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
1718 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
1719 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
1720 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
1721 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
1722
1723 /* For the 64-bit byte counters the low dword must be read first. */
1724 /* Both registers clear on the read of the high dword */
1725
1726 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1727 stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
1728 stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
1729 stats->gorc -= (stats->gprc - old_gprc) * ETHER_CRC_LEN;
1730 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
1731 stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
1732 stats->gotc -= (stats->gptc - old_gptc) * ETHER_CRC_LEN;
1733
1734 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
1735 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
1736 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
1737 stats->roc += E1000_READ_REG(hw, E1000_ROC);
1738 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
1739
1740 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
1741 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
1742
1743 stats->tor += E1000_READ_REG(hw, E1000_TORL);
1744 stats->tor += ((uint64_t)E1000_READ_REG(hw, E1000_TORH) << 32);
1745 stats->tor -= (stats->tpr - old_tpr) * ETHER_CRC_LEN;
1746 stats->tot += E1000_READ_REG(hw, E1000_TOTL);
1747 stats->tot += ((uint64_t)E1000_READ_REG(hw, E1000_TOTH) << 32);
1748 stats->tot -= (stats->tpt - old_tpt) * ETHER_CRC_LEN;
1749
1750 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
1751 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
1752 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
1753 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
1754 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
1755 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
1756 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
1757 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
1758
1759 /* Interrupt Counts */
1760
1761 stats->iac += E1000_READ_REG(hw, E1000_IAC);
1762 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
1763 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
1764 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
1765 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
1766 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
1767 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
1768 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
1769 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
1770
1771 /* Host to Card Statistics */
1772
1773 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
1774 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
1775 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
1776 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
1777 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
1778 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
1779 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
1780 stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
1781 stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
1782 stats->hgorc -= (stats->rpthc - old_rpthc) * ETHER_CRC_LEN;
1783 stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
1784 stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
1785 stats->hgotc -= (stats->hgptc - old_hgptc) * ETHER_CRC_LEN;
1786 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
1787 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
1788 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
1789
1790 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
1791 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
1792 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
1793 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
1794 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
1795 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
1796 }
1797
1798 static int
1799 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1800 {
1801 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1802 struct e1000_hw_stats *stats =
1803 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1804
1805 igb_read_stats_registers(hw, stats);
1806
1807 if (rte_stats == NULL)
1808 return -EINVAL;
1809
1810 /* Rx Errors */
1811 rte_stats->imissed = stats->mpc;
1812 rte_stats->ierrors = stats->crcerrs +
1813 stats->rlec + stats->ruc + stats->roc +
1814 stats->rxerrc + stats->algnerrc + stats->cexterr;
1815
1816 /* Tx Errors */
1817 rte_stats->oerrors = stats->ecol + stats->latecol;
1818
1819 rte_stats->ipackets = stats->gprc;
1820 rte_stats->opackets = stats->gptc;
1821 rte_stats->ibytes = stats->gorc;
1822 rte_stats->obytes = stats->gotc;
1823 return 0;
1824 }
1825
1826 static void
1827 eth_igb_stats_reset(struct rte_eth_dev *dev)
1828 {
1829 struct e1000_hw_stats *hw_stats =
1830 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1831
1832 /* HW registers are cleared on read */
1833 eth_igb_stats_get(dev, NULL);
1834
1835 /* Reset software totals */
1836 memset(hw_stats, 0, sizeof(*hw_stats));
1837 }
1838
1839 static void
1840 eth_igb_xstats_reset(struct rte_eth_dev *dev)
1841 {
1842 struct e1000_hw_stats *stats =
1843 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1844
1845 /* HW registers are cleared on read */
1846 eth_igb_xstats_get(dev, NULL, IGB_NB_XSTATS);
1847
1848 /* Reset software totals */
1849 memset(stats, 0, sizeof(*stats));
1850 }
1851
1852 static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1853 struct rte_eth_xstat_name *xstats_names,
1854 __rte_unused unsigned int size)
1855 {
1856 unsigned i;
1857
1858 if (xstats_names == NULL)
1859 return IGB_NB_XSTATS;
1860
1861 /* Note: limit checked in rte_eth_xstats_names() */
1862
1863 for (i = 0; i < IGB_NB_XSTATS; i++) {
1864 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
1865 "%s", rte_igb_stats_strings[i].name);
1866 }
1867
1868 return IGB_NB_XSTATS;
1869 }
1870
1871 static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
1872 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
1873 unsigned int limit)
1874 {
1875 unsigned int i;
1876
1877 if (!ids) {
1878 if (xstats_names == NULL)
1879 return IGB_NB_XSTATS;
1880
1881 for (i = 0; i < IGB_NB_XSTATS; i++)
1882 snprintf(xstats_names[i].name,
1883 sizeof(xstats_names[i].name),
1884 "%s", rte_igb_stats_strings[i].name);
1885
1886 return IGB_NB_XSTATS;
1887
1888 } else {
1889 struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
1890
1891 eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
1892 IGB_NB_XSTATS);
1893
1894 for (i = 0; i < limit; i++) {
1895 if (ids[i] >= IGB_NB_XSTATS) {
1896 PMD_INIT_LOG(ERR, "id value isn't valid");
1897 return -1;
1898 }
1899 strcpy(xstats_names[i].name,
1900 xstats_names_copy[ids[i]].name);
1901 }
1902 return limit;
1903 }
1904 }
1905
1906 static int
1907 eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1908 unsigned n)
1909 {
1910 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1911 struct e1000_hw_stats *hw_stats =
1912 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1913 unsigned i;
1914
1915 if (n < IGB_NB_XSTATS)
1916 return IGB_NB_XSTATS;
1917
1918 igb_read_stats_registers(hw, hw_stats);
1919
1920 /* If this is a reset xstats is NULL, and we have cleared the
1921 * registers by reading them.
1922 */
1923 if (!xstats)
1924 return 0;
1925
1926 /* Extended stats */
1927 for (i = 0; i < IGB_NB_XSTATS; i++) {
1928 xstats[i].id = i;
1929 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1930 rte_igb_stats_strings[i].offset);
1931 }
1932
1933 return IGB_NB_XSTATS;
1934 }
1935
1936 static int
1937 eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1938 uint64_t *values, unsigned int n)
1939 {
1940 unsigned int i;
1941
1942 if (!ids) {
1943 struct e1000_hw *hw =
1944 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1945 struct e1000_hw_stats *hw_stats =
1946 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1947
1948 if (n < IGB_NB_XSTATS)
1949 return IGB_NB_XSTATS;
1950
1951 igb_read_stats_registers(hw, hw_stats);
1952
1953 /* If this is a reset xstats is NULL, and we have cleared the
1954 * registers by reading them.
1955 */
1956 if (!values)
1957 return 0;
1958
1959 /* Extended stats */
1960 for (i = 0; i < IGB_NB_XSTATS; i++)
1961 values[i] = *(uint64_t *)(((char *)hw_stats) +
1962 rte_igb_stats_strings[i].offset);
1963
1964 return IGB_NB_XSTATS;
1965
1966 } else {
1967 uint64_t values_copy[IGB_NB_XSTATS];
1968
1969 eth_igb_xstats_get_by_id(dev, NULL, values_copy,
1970 IGB_NB_XSTATS);
1971
1972 for (i = 0; i < n; i++) {
1973 if (ids[i] >= IGB_NB_XSTATS) {
1974 PMD_INIT_LOG(ERR, "id value isn't valid");
1975 return -1;
1976 }
1977 values[i] = values_copy[ids[i]];
1978 }
1979 return n;
1980 }
1981 }
1982
1983 static void
1984 igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
1985 {
1986 /* Good Rx packets, include VF loopback */
1987 UPDATE_VF_STAT(E1000_VFGPRC,
1988 hw_stats->last_gprc, hw_stats->gprc);
1989
1990 /* Good Rx octets, include VF loopback */
1991 UPDATE_VF_STAT(E1000_VFGORC,
1992 hw_stats->last_gorc, hw_stats->gorc);
1993
1994 /* Good Tx packets, include VF loopback */
1995 UPDATE_VF_STAT(E1000_VFGPTC,
1996 hw_stats->last_gptc, hw_stats->gptc);
1997
1998 /* Good Tx octets, include VF loopback */
1999 UPDATE_VF_STAT(E1000_VFGOTC,
2000 hw_stats->last_gotc, hw_stats->gotc);
2001
2002 /* Rx Multicst packets */
2003 UPDATE_VF_STAT(E1000_VFMPRC,
2004 hw_stats->last_mprc, hw_stats->mprc);
2005
2006 /* Good Rx loopback packets */
2007 UPDATE_VF_STAT(E1000_VFGPRLBC,
2008 hw_stats->last_gprlbc, hw_stats->gprlbc);
2009
2010 /* Good Rx loopback octets */
2011 UPDATE_VF_STAT(E1000_VFGORLBC,
2012 hw_stats->last_gorlbc, hw_stats->gorlbc);
2013
2014 /* Good Tx loopback packets */
2015 UPDATE_VF_STAT(E1000_VFGPTLBC,
2016 hw_stats->last_gptlbc, hw_stats->gptlbc);
2017
2018 /* Good Tx loopback octets */
2019 UPDATE_VF_STAT(E1000_VFGOTLBC,
2020 hw_stats->last_gotlbc, hw_stats->gotlbc);
2021 }
2022
2023 static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
2024 struct rte_eth_xstat_name *xstats_names,
2025 __rte_unused unsigned limit)
2026 {
2027 unsigned i;
2028
2029 if (xstats_names != NULL)
2030 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2031 snprintf(xstats_names[i].name,
2032 sizeof(xstats_names[i].name), "%s",
2033 rte_igbvf_stats_strings[i].name);
2034 }
2035 return IGBVF_NB_XSTATS;
2036 }
2037
2038 static int
2039 eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
2040 unsigned n)
2041 {
2042 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2043 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2044 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2045 unsigned i;
2046
2047 if (n < IGBVF_NB_XSTATS)
2048 return IGBVF_NB_XSTATS;
2049
2050 igbvf_read_stats_registers(hw, hw_stats);
2051
2052 if (!xstats)
2053 return 0;
2054
2055 for (i = 0; i < IGBVF_NB_XSTATS; i++) {
2056 xstats[i].id = i;
2057 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
2058 rte_igbvf_stats_strings[i].offset);
2059 }
2060
2061 return IGBVF_NB_XSTATS;
2062 }
2063
2064 static int
2065 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
2066 {
2067 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2068 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats *)
2069 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2070
2071 igbvf_read_stats_registers(hw, hw_stats);
2072
2073 if (rte_stats == NULL)
2074 return -EINVAL;
2075
2076 rte_stats->ipackets = hw_stats->gprc;
2077 rte_stats->ibytes = hw_stats->gorc;
2078 rte_stats->opackets = hw_stats->gptc;
2079 rte_stats->obytes = hw_stats->gotc;
2080 return 0;
2081 }
2082
2083 static void
2084 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
2085 {
2086 struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
2087 E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
2088
2089 /* Sync HW register to the last stats */
2090 eth_igbvf_stats_get(dev, NULL);
2091
2092 /* reset HW current stats*/
2093 memset(&hw_stats->gprc, 0, sizeof(*hw_stats) -
2094 offsetof(struct e1000_vf_stats, gprc));
2095 }
2096
2097 static int
2098 eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
2099 size_t fw_size)
2100 {
2101 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2102 struct e1000_fw_version fw;
2103 int ret;
2104
2105 e1000_get_fw_version(hw, &fw);
2106
2107 switch (hw->mac.type) {
2108 case e1000_i210:
2109 case e1000_i211:
2110 if (!(e1000_get_flash_presence_i210(hw))) {
2111 ret = snprintf(fw_version, fw_size,
2112 "%2d.%2d-%d",
2113 fw.invm_major, fw.invm_minor,
2114 fw.invm_img_type);
2115 break;
2116 }
2117 /* fall through */
2118 default:
2119 /* if option rom is valid, display its version too */
2120 if (fw.or_valid) {
2121 ret = snprintf(fw_version, fw_size,
2122 "%d.%d, 0x%08x, %d.%d.%d",
2123 fw.eep_major, fw.eep_minor, fw.etrack_id,
2124 fw.or_major, fw.or_build, fw.or_patch);
2125 /* no option rom */
2126 } else {
2127 if (fw.etrack_id != 0X0000) {
2128 ret = snprintf(fw_version, fw_size,
2129 "%d.%d, 0x%08x",
2130 fw.eep_major, fw.eep_minor,
2131 fw.etrack_id);
2132 } else {
2133 ret = snprintf(fw_version, fw_size,
2134 "%d.%d.%d",
2135 fw.eep_major, fw.eep_minor,
2136 fw.eep_build);
2137 }
2138 }
2139 break;
2140 }
2141
2142 ret += 1; /* add the size of '\0' */
2143 if (fw_size < (u32)ret)
2144 return ret;
2145 else
2146 return 0;
2147 }
2148
2149 static void
2150 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2151 {
2152 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2153
2154 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2155 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2156 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2157 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2158 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2159 dev_info->rx_queue_offload_capa;
2160 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2161 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2162 dev_info->tx_queue_offload_capa;
2163
2164 switch (hw->mac.type) {
2165 case e1000_82575:
2166 dev_info->max_rx_queues = 4;
2167 dev_info->max_tx_queues = 4;
2168 dev_info->max_vmdq_pools = 0;
2169 break;
2170
2171 case e1000_82576:
2172 dev_info->max_rx_queues = 16;
2173 dev_info->max_tx_queues = 16;
2174 dev_info->max_vmdq_pools = ETH_8_POOLS;
2175 dev_info->vmdq_queue_num = 16;
2176 break;
2177
2178 case e1000_82580:
2179 dev_info->max_rx_queues = 8;
2180 dev_info->max_tx_queues = 8;
2181 dev_info->max_vmdq_pools = ETH_8_POOLS;
2182 dev_info->vmdq_queue_num = 8;
2183 break;
2184
2185 case e1000_i350:
2186 dev_info->max_rx_queues = 8;
2187 dev_info->max_tx_queues = 8;
2188 dev_info->max_vmdq_pools = ETH_8_POOLS;
2189 dev_info->vmdq_queue_num = 8;
2190 break;
2191
2192 case e1000_i354:
2193 dev_info->max_rx_queues = 8;
2194 dev_info->max_tx_queues = 8;
2195 break;
2196
2197 case e1000_i210:
2198 dev_info->max_rx_queues = 4;
2199 dev_info->max_tx_queues = 4;
2200 dev_info->max_vmdq_pools = 0;
2201 break;
2202
2203 case e1000_i211:
2204 dev_info->max_rx_queues = 2;
2205 dev_info->max_tx_queues = 2;
2206 dev_info->max_vmdq_pools = 0;
2207 break;
2208
2209 default:
2210 /* Should not happen */
2211 break;
2212 }
2213 dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
2214 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
2215 dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
2216
2217 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2218 .rx_thresh = {
2219 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2220 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2221 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2222 },
2223 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2224 .rx_drop_en = 0,
2225 .offloads = 0,
2226 };
2227
2228 dev_info->default_txconf = (struct rte_eth_txconf) {
2229 .tx_thresh = {
2230 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2231 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2232 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2233 },
2234 .offloads = 0,
2235 };
2236
2237 dev_info->rx_desc_lim = rx_desc_lim;
2238 dev_info->tx_desc_lim = tx_desc_lim;
2239
2240 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
2241 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
2242 ETH_LINK_SPEED_1G;
2243 }
2244
2245 static const uint32_t *
2246 eth_igb_supported_ptypes_get(struct rte_eth_dev *dev)
2247 {
2248 static const uint32_t ptypes[] = {
2249 /* refers to igb_rxd_pkt_info_to_pkt_type() */
2250 RTE_PTYPE_L2_ETHER,
2251 RTE_PTYPE_L3_IPV4,
2252 RTE_PTYPE_L3_IPV4_EXT,
2253 RTE_PTYPE_L3_IPV6,
2254 RTE_PTYPE_L3_IPV6_EXT,
2255 RTE_PTYPE_L4_TCP,
2256 RTE_PTYPE_L4_UDP,
2257 RTE_PTYPE_L4_SCTP,
2258 RTE_PTYPE_TUNNEL_IP,
2259 RTE_PTYPE_INNER_L3_IPV6,
2260 RTE_PTYPE_INNER_L3_IPV6_EXT,
2261 RTE_PTYPE_INNER_L4_TCP,
2262 RTE_PTYPE_INNER_L4_UDP,
2263 RTE_PTYPE_UNKNOWN
2264 };
2265
2266 if (dev->rx_pkt_burst == eth_igb_recv_pkts ||
2267 dev->rx_pkt_burst == eth_igb_recv_scattered_pkts)
2268 return ptypes;
2269 return NULL;
2270 }
2271
2272 static void
2273 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2274 {
2275 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2276
2277 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
2278 dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
2279 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
2280 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
2281 DEV_TX_OFFLOAD_IPV4_CKSUM |
2282 DEV_TX_OFFLOAD_UDP_CKSUM |
2283 DEV_TX_OFFLOAD_TCP_CKSUM |
2284 DEV_TX_OFFLOAD_SCTP_CKSUM |
2285 DEV_TX_OFFLOAD_TCP_TSO;
2286 switch (hw->mac.type) {
2287 case e1000_vfadapt:
2288 dev_info->max_rx_queues = 2;
2289 dev_info->max_tx_queues = 2;
2290 break;
2291 case e1000_vfadapt_i350:
2292 dev_info->max_rx_queues = 1;
2293 dev_info->max_tx_queues = 1;
2294 break;
2295 default:
2296 /* Should not happen */
2297 break;
2298 }
2299
2300 dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
2301 dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
2302 dev_info->rx_queue_offload_capa;
2303 dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
2304 dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
2305 dev_info->tx_queue_offload_capa;
2306
2307 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2308 .rx_thresh = {
2309 .pthresh = IGB_DEFAULT_RX_PTHRESH,
2310 .hthresh = IGB_DEFAULT_RX_HTHRESH,
2311 .wthresh = IGB_DEFAULT_RX_WTHRESH,
2312 },
2313 .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
2314 .rx_drop_en = 0,
2315 .offloads = 0,
2316 };
2317
2318 dev_info->default_txconf = (struct rte_eth_txconf) {
2319 .tx_thresh = {
2320 .pthresh = IGB_DEFAULT_TX_PTHRESH,
2321 .hthresh = IGB_DEFAULT_TX_HTHRESH,
2322 .wthresh = IGB_DEFAULT_TX_WTHRESH,
2323 },
2324 .offloads = 0,
2325 };
2326
2327 dev_info->rx_desc_lim = rx_desc_lim;
2328 dev_info->tx_desc_lim = tx_desc_lim;
2329 }
2330
2331 /* return 0 means link status changed, -1 means not changed */
2332 static int
2333 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2334 {
2335 struct e1000_hw *hw =
2336 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2337 struct rte_eth_link link;
2338 int link_check, count;
2339
2340 link_check = 0;
2341 hw->mac.get_link_status = 1;
2342
2343 /* possible wait-to-complete in up to 9 seconds */
2344 for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
2345 /* Read the real link status */
2346 switch (hw->phy.media_type) {
2347 case e1000_media_type_copper:
2348 /* Do the work to read phy */
2349 e1000_check_for_link(hw);
2350 link_check = !hw->mac.get_link_status;
2351 break;
2352
2353 case e1000_media_type_fiber:
2354 e1000_check_for_link(hw);
2355 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2356 E1000_STATUS_LU);
2357 break;
2358
2359 case e1000_media_type_internal_serdes:
2360 e1000_check_for_link(hw);
2361 link_check = hw->mac.serdes_has_link;
2362 break;
2363
2364 /* VF device is type_unknown */
2365 case e1000_media_type_unknown:
2366 eth_igbvf_link_update(hw);
2367 link_check = !hw->mac.get_link_status;
2368 break;
2369
2370 default:
2371 break;
2372 }
2373 if (link_check || wait_to_complete == 0)
2374 break;
2375 rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
2376 }
2377 memset(&link, 0, sizeof(link));
2378
2379 /* Now we check if a transition has happened */
2380 if (link_check) {
2381 uint16_t duplex, speed;
2382 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
2383 link.link_duplex = (duplex == FULL_DUPLEX) ?
2384 ETH_LINK_FULL_DUPLEX :
2385 ETH_LINK_HALF_DUPLEX;
2386 link.link_speed = speed;
2387 link.link_status = ETH_LINK_UP;
2388 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2389 ETH_LINK_SPEED_FIXED);
2390 } else if (!link_check) {
2391 link.link_speed = 0;
2392 link.link_duplex = ETH_LINK_HALF_DUPLEX;
2393 link.link_status = ETH_LINK_DOWN;
2394 link.link_autoneg = ETH_LINK_FIXED;
2395 }
2396
2397 return rte_eth_linkstatus_set(dev, &link);
2398 }
2399
2400 /*
2401 * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
2402 * For ASF and Pass Through versions of f/w this means
2403 * that the driver is loaded.
2404 */
2405 static void
2406 igb_hw_control_acquire(struct e1000_hw *hw)
2407 {
2408 uint32_t ctrl_ext;
2409
2410 /* Let firmware know the driver has taken over */
2411 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2412 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2413 }
2414
2415 /*
2416 * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
2417 * For ASF and Pass Through versions of f/w this means that the
2418 * driver is no longer loaded.
2419 */
2420 static void
2421 igb_hw_control_release(struct e1000_hw *hw)
2422 {
2423 uint32_t ctrl_ext;
2424
2425 /* Let firmware taken over control of h/w */
2426 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2427 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
2428 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2429 }
2430
2431 /*
2432 * Bit of a misnomer, what this really means is
2433 * to enable OS management of the system... aka
2434 * to disable special hardware management features.
2435 */
2436 static void
2437 igb_init_manageability(struct e1000_hw *hw)
2438 {
2439 if (e1000_enable_mng_pass_thru(hw)) {
2440 uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
2441 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2442
2443 /* disable hardware interception of ARP */
2444 manc &= ~(E1000_MANC_ARP_EN);
2445
2446 /* enable receiving management packets to the host */
2447 manc |= E1000_MANC_EN_MNG2HOST;
2448 manc2h |= 1 << 5; /* Mng Port 623 */
2449 manc2h |= 1 << 6; /* Mng Port 664 */
2450 E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
2451 E1000_WRITE_REG(hw, E1000_MANC, manc);
2452 }
2453 }
2454
2455 static void
2456 igb_release_manageability(struct e1000_hw *hw)
2457 {
2458 if (e1000_enable_mng_pass_thru(hw)) {
2459 uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
2460
2461 manc |= E1000_MANC_ARP_EN;
2462 manc &= ~E1000_MANC_EN_MNG2HOST;
2463
2464 E1000_WRITE_REG(hw, E1000_MANC, manc);
2465 }
2466 }
2467
2468 static void
2469 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
2470 {
2471 struct e1000_hw *hw =
2472 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2473 uint32_t rctl;
2474
2475 rctl = E1000_READ_REG(hw, E1000_RCTL);
2476 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2477 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2478 }
2479
2480 static void
2481 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
2482 {
2483 struct e1000_hw *hw =
2484 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2485 uint32_t rctl;
2486
2487 rctl = E1000_READ_REG(hw, E1000_RCTL);
2488 rctl &= (~E1000_RCTL_UPE);
2489 if (dev->data->all_multicast == 1)
2490 rctl |= E1000_RCTL_MPE;
2491 else
2492 rctl &= (~E1000_RCTL_MPE);
2493 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2494 }
2495
2496 static void
2497 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
2498 {
2499 struct e1000_hw *hw =
2500 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2501 uint32_t rctl;
2502
2503 rctl = E1000_READ_REG(hw, E1000_RCTL);
2504 rctl |= E1000_RCTL_MPE;
2505 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2506 }
2507
2508 static void
2509 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
2510 {
2511 struct e1000_hw *hw =
2512 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2513 uint32_t rctl;
2514
2515 if (dev->data->promiscuous == 1)
2516 return; /* must remain in all_multicast mode */
2517 rctl = E1000_READ_REG(hw, E1000_RCTL);
2518 rctl &= (~E1000_RCTL_MPE);
2519 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2520 }
2521
2522 static int
2523 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2524 {
2525 struct e1000_hw *hw =
2526 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2527 struct e1000_vfta * shadow_vfta =
2528 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2529 uint32_t vfta;
2530 uint32_t vid_idx;
2531 uint32_t vid_bit;
2532
2533 vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
2534 E1000_VFTA_ENTRY_MASK);
2535 vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
2536 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
2537 if (on)
2538 vfta |= vid_bit;
2539 else
2540 vfta &= ~vid_bit;
2541 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
2542
2543 /* update local VFTA copy */
2544 shadow_vfta->vfta[vid_idx] = vfta;
2545
2546 return 0;
2547 }
2548
2549 static int
2550 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev,
2551 enum rte_vlan_type vlan_type,
2552 uint16_t tpid)
2553 {
2554 struct e1000_hw *hw =
2555 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2556 uint32_t reg, qinq;
2557
2558 qinq = E1000_READ_REG(hw, E1000_CTRL_EXT);
2559 qinq &= E1000_CTRL_EXT_EXT_VLAN;
2560
2561 /* only outer TPID of double VLAN can be configured*/
2562 if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) {
2563 reg = E1000_READ_REG(hw, E1000_VET);
2564 reg = (reg & (~E1000_VET_VET_EXT)) |
2565 ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT);
2566 E1000_WRITE_REG(hw, E1000_VET, reg);
2567
2568 return 0;
2569 }
2570
2571 /* all other TPID values are read-only*/
2572 PMD_DRV_LOG(ERR, "Not supported");
2573
2574 return -ENOTSUP;
2575 }
2576
2577 static void
2578 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2579 {
2580 struct e1000_hw *hw =
2581 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2582 uint32_t reg;
2583
2584 /* Filter Table Disable */
2585 reg = E1000_READ_REG(hw, E1000_RCTL);
2586 reg &= ~E1000_RCTL_CFIEN;
2587 reg &= ~E1000_RCTL_VFE;
2588 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2589 }
2590
2591 static void
2592 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2593 {
2594 struct e1000_hw *hw =
2595 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2596 struct e1000_vfta * shadow_vfta =
2597 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
2598 uint32_t reg;
2599 int i;
2600
2601 /* Filter Table Enable, CFI not used for packet acceptance */
2602 reg = E1000_READ_REG(hw, E1000_RCTL);
2603 reg &= ~E1000_RCTL_CFIEN;
2604 reg |= E1000_RCTL_VFE;
2605 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2606
2607 /* restore VFTA table */
2608 for (i = 0; i < IGB_VFTA_SIZE; i++)
2609 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
2610 }
2611
2612 static void
2613 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2614 {
2615 struct e1000_hw *hw =
2616 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2617 uint32_t reg;
2618
2619 /* VLAN Mode Disable */
2620 reg = E1000_READ_REG(hw, E1000_CTRL);
2621 reg &= ~E1000_CTRL_VME;
2622 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2623 }
2624
2625 static void
2626 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2627 {
2628 struct e1000_hw *hw =
2629 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2630 uint32_t reg;
2631
2632 /* VLAN Mode Enable */
2633 reg = E1000_READ_REG(hw, E1000_CTRL);
2634 reg |= E1000_CTRL_VME;
2635 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2636 }
2637
2638 static void
2639 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2640 {
2641 struct e1000_hw *hw =
2642 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2643 uint32_t reg;
2644
2645 /* CTRL_EXT: Extended VLAN */
2646 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2647 reg &= ~E1000_CTRL_EXT_EXTEND_VLAN;
2648 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2649
2650 /* Update maximum packet length */
2651 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
2652 E1000_WRITE_REG(hw, E1000_RLPML,
2653 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2654 VLAN_TAG_SIZE);
2655 }
2656
2657 static void
2658 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2659 {
2660 struct e1000_hw *hw =
2661 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2662 uint32_t reg;
2663
2664 /* CTRL_EXT: Extended VLAN */
2665 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
2666 reg |= E1000_CTRL_EXT_EXTEND_VLAN;
2667 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
2668
2669 /* Update maximum packet length */
2670 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
2671 E1000_WRITE_REG(hw, E1000_RLPML,
2672 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2673 2 * VLAN_TAG_SIZE);
2674 }
2675
2676 static int
2677 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2678 {
2679 struct rte_eth_rxmode *rxmode;
2680
2681 rxmode = &dev->data->dev_conf.rxmode;
2682 if(mask & ETH_VLAN_STRIP_MASK){
2683 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2684 igb_vlan_hw_strip_enable(dev);
2685 else
2686 igb_vlan_hw_strip_disable(dev);
2687 }
2688
2689 if(mask & ETH_VLAN_FILTER_MASK){
2690 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2691 igb_vlan_hw_filter_enable(dev);
2692 else
2693 igb_vlan_hw_filter_disable(dev);
2694 }
2695
2696 if(mask & ETH_VLAN_EXTEND_MASK){
2697 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2698 igb_vlan_hw_extend_enable(dev);
2699 else
2700 igb_vlan_hw_extend_disable(dev);
2701 }
2702
2703 return 0;
2704 }
2705
2706
2707 /**
2708 * It enables the interrupt mask and then enable the interrupt.
2709 *
2710 * @param dev
2711 * Pointer to struct rte_eth_dev.
2712 * @param on
2713 * Enable or Disable
2714 *
2715 * @return
2716 * - On success, zero.
2717 * - On failure, a negative value.
2718 */
2719 static int
2720 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
2721 {
2722 struct e1000_interrupt *intr =
2723 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2724
2725 if (on)
2726 intr->mask |= E1000_ICR_LSC;
2727 else
2728 intr->mask &= ~E1000_ICR_LSC;
2729
2730 return 0;
2731 }
2732
2733 /* It clears the interrupt causes and enables the interrupt.
2734 * It will be called once only during nic initialized.
2735 *
2736 * @param dev
2737 * Pointer to struct rte_eth_dev.
2738 *
2739 * @return
2740 * - On success, zero.
2741 * - On failure, a negative value.
2742 */
2743 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
2744 {
2745 uint32_t mask, regval;
2746 struct e1000_hw *hw =
2747 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2748 struct rte_eth_dev_info dev_info;
2749
2750 memset(&dev_info, 0, sizeof(dev_info));
2751 eth_igb_infos_get(dev, &dev_info);
2752
2753 mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
2754 regval = E1000_READ_REG(hw, E1000_EIMS);
2755 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
2756
2757 return 0;
2758 }
2759
2760 /*
2761 * It reads ICR and gets interrupt causes, check it and set a bit flag
2762 * to update link status.
2763 *
2764 * @param dev
2765 * Pointer to struct rte_eth_dev.
2766 *
2767 * @return
2768 * - On success, zero.
2769 * - On failure, a negative value.
2770 */
2771 static int
2772 eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
2773 {
2774 uint32_t icr;
2775 struct e1000_hw *hw =
2776 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2777 struct e1000_interrupt *intr =
2778 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2779
2780 igb_intr_disable(hw);
2781
2782 /* read-on-clear nic registers here */
2783 icr = E1000_READ_REG(hw, E1000_ICR);
2784
2785 intr->flags = 0;
2786 if (icr & E1000_ICR_LSC) {
2787 intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
2788 }
2789
2790 if (icr & E1000_ICR_VMMB)
2791 intr->flags |= E1000_FLAG_MAILBOX;
2792
2793 return 0;
2794 }
2795
2796 /*
2797 * It executes link_update after knowing an interrupt is prsent.
2798 *
2799 * @param dev
2800 * Pointer to struct rte_eth_dev.
2801 *
2802 * @return
2803 * - On success, zero.
2804 * - On failure, a negative value.
2805 */
2806 static int
2807 eth_igb_interrupt_action(struct rte_eth_dev *dev,
2808 struct rte_intr_handle *intr_handle)
2809 {
2810 struct e1000_hw *hw =
2811 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2812 struct e1000_interrupt *intr =
2813 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2814 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2815 struct rte_eth_link link;
2816 int ret;
2817
2818 if (intr->flags & E1000_FLAG_MAILBOX) {
2819 igb_pf_mbx_process(dev);
2820 intr->flags &= ~E1000_FLAG_MAILBOX;
2821 }
2822
2823 igb_intr_enable(dev);
2824 rte_intr_enable(intr_handle);
2825
2826 if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
2827 intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
2828
2829 /* set get_link_status to check register later */
2830 hw->mac.get_link_status = 1;
2831 ret = eth_igb_link_update(dev, 0);
2832
2833 /* check if link has changed */
2834 if (ret < 0)
2835 return 0;
2836
2837 rte_eth_linkstatus_get(dev, &link);
2838 if (link.link_status) {
2839 PMD_INIT_LOG(INFO,
2840 " Port %d: Link Up - speed %u Mbps - %s",
2841 dev->data->port_id,
2842 (unsigned)link.link_speed,
2843 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
2844 "full-duplex" : "half-duplex");
2845 } else {
2846 PMD_INIT_LOG(INFO, " Port %d: Link Down",
2847 dev->data->port_id);
2848 }
2849
2850 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
2851 pci_dev->addr.domain,
2852 pci_dev->addr.bus,
2853 pci_dev->addr.devid,
2854 pci_dev->addr.function);
2855 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2856 NULL);
2857 }
2858
2859 return 0;
2860 }
2861
2862 /**
2863 * Interrupt handler which shall be registered at first.
2864 *
2865 * @param handle
2866 * Pointer to interrupt handle.
2867 * @param param
2868 * The address of parameter (struct rte_eth_dev *) regsitered before.
2869 *
2870 * @return
2871 * void
2872 */
2873 static void
2874 eth_igb_interrupt_handler(void *param)
2875 {
2876 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2877
2878 eth_igb_interrupt_get_status(dev);
2879 eth_igb_interrupt_action(dev, dev->intr_handle);
2880 }
2881
2882 static int
2883 eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev)
2884 {
2885 uint32_t eicr;
2886 struct e1000_hw *hw =
2887 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2888 struct e1000_interrupt *intr =
2889 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2890
2891 igbvf_intr_disable(hw);
2892
2893 /* read-on-clear nic registers here */
2894 eicr = E1000_READ_REG(hw, E1000_EICR);
2895 intr->flags = 0;
2896
2897 if (eicr == E1000_VTIVAR_MISC_MAILBOX)
2898 intr->flags |= E1000_FLAG_MAILBOX;
2899
2900 return 0;
2901 }
2902
2903 void igbvf_mbx_process(struct rte_eth_dev *dev)
2904 {
2905 struct e1000_hw *hw =
2906 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2907 struct e1000_mbx_info *mbx = &hw->mbx;
2908 u32 in_msg = 0;
2909
2910 /* peek the message first */
2911 in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
2912
2913 /* PF reset VF event */
2914 if (in_msg == E1000_PF_CONTROL_MSG) {
2915 /* dummy mbx read to ack pf */
2916 if (mbx->ops.read(hw, &in_msg, 1, 0))
2917 return;
2918 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
2919 NULL);
2920 }
2921 }
2922
2923 static int
2924 eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
2925 {
2926 struct e1000_interrupt *intr =
2927 E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
2928
2929 if (intr->flags & E1000_FLAG_MAILBOX) {
2930 igbvf_mbx_process(dev);
2931 intr->flags &= ~E1000_FLAG_MAILBOX;
2932 }
2933
2934 igbvf_intr_enable(dev);
2935 rte_intr_enable(intr_handle);
2936
2937 return 0;
2938 }
2939
2940 static void
2941 eth_igbvf_interrupt_handler(void *param)
2942 {
2943 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2944
2945 eth_igbvf_interrupt_get_status(dev);
2946 eth_igbvf_interrupt_action(dev, dev->intr_handle);
2947 }
2948
2949 static int
2950 eth_igb_led_on(struct rte_eth_dev *dev)
2951 {
2952 struct e1000_hw *hw;
2953
2954 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2955 return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2956 }
2957
2958 static int
2959 eth_igb_led_off(struct rte_eth_dev *dev)
2960 {
2961 struct e1000_hw *hw;
2962
2963 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2964 return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
2965 }
2966
2967 static int
2968 eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2969 {
2970 struct e1000_hw *hw;
2971 uint32_t ctrl;
2972 int tx_pause;
2973 int rx_pause;
2974
2975 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2976 fc_conf->pause_time = hw->fc.pause_time;
2977 fc_conf->high_water = hw->fc.high_water;
2978 fc_conf->low_water = hw->fc.low_water;
2979 fc_conf->send_xon = hw->fc.send_xon;
2980 fc_conf->autoneg = hw->mac.autoneg;
2981
2982 /*
2983 * Return rx_pause and tx_pause status according to actual setting of
2984 * the TFCE and RFCE bits in the CTRL register.
2985 */
2986 ctrl = E1000_READ_REG(hw, E1000_CTRL);
2987 if (ctrl & E1000_CTRL_TFCE)
2988 tx_pause = 1;
2989 else
2990 tx_pause = 0;
2991
2992 if (ctrl & E1000_CTRL_RFCE)
2993 rx_pause = 1;
2994 else
2995 rx_pause = 0;
2996
2997 if (rx_pause && tx_pause)
2998 fc_conf->mode = RTE_FC_FULL;
2999 else if (rx_pause)
3000 fc_conf->mode = RTE_FC_RX_PAUSE;
3001 else if (tx_pause)
3002 fc_conf->mode = RTE_FC_TX_PAUSE;
3003 else
3004 fc_conf->mode = RTE_FC_NONE;
3005
3006 return 0;
3007 }
3008
3009 static int
3010 eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3011 {
3012 struct e1000_hw *hw;
3013 int err;
3014 enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
3015 e1000_fc_none,
3016 e1000_fc_rx_pause,
3017 e1000_fc_tx_pause,
3018 e1000_fc_full
3019 };
3020 uint32_t rx_buf_size;
3021 uint32_t max_high_water;
3022 uint32_t rctl;
3023
3024 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3025 if (fc_conf->autoneg != hw->mac.autoneg)
3026 return -ENOTSUP;
3027 rx_buf_size = igb_get_rx_buffer_size(hw);
3028 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
3029
3030 /* At least reserve one Ethernet frame for watermark */
3031 max_high_water = rx_buf_size - ETHER_MAX_LEN;
3032 if ((fc_conf->high_water > max_high_water) ||
3033 (fc_conf->high_water < fc_conf->low_water)) {
3034 PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
3035 PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
3036 return -EINVAL;
3037 }
3038
3039 hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
3040 hw->fc.pause_time = fc_conf->pause_time;
3041 hw->fc.high_water = fc_conf->high_water;
3042 hw->fc.low_water = fc_conf->low_water;
3043 hw->fc.send_xon = fc_conf->send_xon;
3044
3045 err = e1000_setup_link_generic(hw);
3046 if (err == E1000_SUCCESS) {
3047
3048 /* check if we want to forward MAC frames - driver doesn't have native
3049 * capability to do that, so we'll write the registers ourselves */
3050
3051 rctl = E1000_READ_REG(hw, E1000_RCTL);
3052
3053 /* set or clear MFLCN.PMCF bit depending on configuration */
3054 if (fc_conf->mac_ctrl_frame_fwd != 0)
3055 rctl |= E1000_RCTL_PMCF;
3056 else
3057 rctl &= ~E1000_RCTL_PMCF;
3058
3059 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
3060 E1000_WRITE_FLUSH(hw);
3061
3062 return 0;
3063 }
3064
3065 PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
3066 return -EIO;
3067 }
3068
3069 #define E1000_RAH_POOLSEL_SHIFT (18)
3070 static int
3071 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
3072 uint32_t index, uint32_t pool)
3073 {
3074 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3075 uint32_t rah;
3076
3077 e1000_rar_set(hw, mac_addr->addr_bytes, index);
3078 rah = E1000_READ_REG(hw, E1000_RAH(index));
3079 rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
3080 E1000_WRITE_REG(hw, E1000_RAH(index), rah);
3081 return 0;
3082 }
3083
3084 static void
3085 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
3086 {
3087 uint8_t addr[ETHER_ADDR_LEN];
3088 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3089
3090 memset(addr, 0, sizeof(addr));
3091
3092 e1000_rar_set(hw, addr, index);
3093 }
3094
3095 static int
3096 eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
3097 struct ether_addr *addr)
3098 {
3099 eth_igb_rar_clear(dev, 0);
3100 eth_igb_rar_set(dev, (void *)addr, 0, 0);
3101
3102 return 0;
3103 }
3104 /*
3105 * Virtual Function operations
3106 */
3107 static void
3108 igbvf_intr_disable(struct e1000_hw *hw)
3109 {
3110 PMD_INIT_FUNC_TRACE();
3111
3112 /* Clear interrupt mask to stop from interrupts being generated */
3113 E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF);
3114
3115 E1000_WRITE_FLUSH(hw);
3116 }
3117
3118 static void
3119 igbvf_stop_adapter(struct rte_eth_dev *dev)
3120 {
3121 u32 reg_val;
3122 u16 i;
3123 struct rte_eth_dev_info dev_info;
3124 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3125
3126 memset(&dev_info, 0, sizeof(dev_info));
3127 eth_igbvf_infos_get(dev, &dev_info);
3128
3129 /* Clear interrupt mask to stop from interrupts being generated */
3130 igbvf_intr_disable(hw);
3131
3132 /* Clear any pending interrupts, flush previous writes */
3133 E1000_READ_REG(hw, E1000_EICR);
3134
3135 /* Disable the transmit unit. Each queue must be disabled. */
3136 for (i = 0; i < dev_info.max_tx_queues; i++)
3137 E1000_WRITE_REG(hw, E1000_TXDCTL(i), E1000_TXDCTL_SWFLSH);
3138
3139 /* Disable the receive unit by stopping each queue */
3140 for (i = 0; i < dev_info.max_rx_queues; i++) {
3141 reg_val = E1000_READ_REG(hw, E1000_RXDCTL(i));
3142 reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
3143 E1000_WRITE_REG(hw, E1000_RXDCTL(i), reg_val);
3144 while (E1000_READ_REG(hw, E1000_RXDCTL(i)) & E1000_RXDCTL_QUEUE_ENABLE)
3145 ;
3146 }
3147
3148 /* flush all queues disables */
3149 E1000_WRITE_FLUSH(hw);
3150 msec_delay(2);
3151 }
3152
3153 static int eth_igbvf_link_update(struct e1000_hw *hw)
3154 {
3155 struct e1000_mbx_info *mbx = &hw->mbx;
3156 struct e1000_mac_info *mac = &hw->mac;
3157 int ret_val = E1000_SUCCESS;
3158
3159 PMD_INIT_LOG(DEBUG, "e1000_check_for_link_vf");
3160
3161 /*
3162 * We only want to run this if there has been a rst asserted.
3163 * in this case that could mean a link change, device reset,
3164 * or a virtual function reset
3165 */
3166
3167 /* If we were hit with a reset or timeout drop the link */
3168 if (!e1000_check_for_rst(hw, 0) || !mbx->timeout)
3169 mac->get_link_status = TRUE;
3170
3171 if (!mac->get_link_status)
3172 goto out;
3173
3174 /* if link status is down no point in checking to see if pf is up */
3175 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
3176 goto out;
3177
3178 /* if we passed all the tests above then the link is up and we no
3179 * longer need to check for link */
3180 mac->get_link_status = FALSE;
3181
3182 out:
3183 return ret_val;
3184 }
3185
3186
3187 static int
3188 igbvf_dev_configure(struct rte_eth_dev *dev)
3189 {
3190 struct rte_eth_conf* conf = &dev->data->dev_conf;
3191
3192 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
3193 dev->data->port_id);
3194
3195 /*
3196 * VF has no ability to enable/disable HW CRC
3197 * Keep the persistent behavior the same as Host PF
3198 */
3199 #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
3200 if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
3201 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
3202 conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
3203 }
3204 #else
3205 if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
3206 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
3207 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
3208 }
3209 #endif
3210
3211 return 0;
3212 }
3213
3214 static int
3215 igbvf_dev_start(struct rte_eth_dev *dev)
3216 {
3217 struct e1000_hw *hw =
3218 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3219 struct e1000_adapter *adapter =
3220 E1000_DEV_PRIVATE(dev->data->dev_private);
3221 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3222 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3223 int ret;
3224 uint32_t intr_vector = 0;
3225
3226 PMD_INIT_FUNC_TRACE();
3227
3228 hw->mac.ops.reset_hw(hw);
3229 adapter->stopped = 0;
3230
3231 /* Set all vfta */
3232 igbvf_set_vfta_all(dev,1);
3233
3234 eth_igbvf_tx_init(dev);
3235
3236 /* This can fail when allocating mbufs for descriptor rings */
3237 ret = eth_igbvf_rx_init(dev);
3238 if (ret) {
3239 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
3240 igb_dev_clear_queues(dev);
3241 return ret;
3242 }
3243
3244 /* check and configure queue intr-vector mapping */
3245 if (rte_intr_cap_multiple(intr_handle) &&
3246 dev->data->dev_conf.intr_conf.rxq) {
3247 intr_vector = dev->data->nb_rx_queues;
3248 ret = rte_intr_efd_enable(intr_handle, intr_vector);
3249 if (ret)
3250 return ret;
3251 }
3252
3253 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
3254 intr_handle->intr_vec =
3255 rte_zmalloc("intr_vec",
3256 dev->data->nb_rx_queues * sizeof(int), 0);
3257 if (!intr_handle->intr_vec) {
3258 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
3259 " intr_vec", dev->data->nb_rx_queues);
3260 return -ENOMEM;
3261 }
3262 }
3263
3264 eth_igbvf_configure_msix_intr(dev);
3265
3266 /* enable uio/vfio intr/eventfd mapping */
3267 rte_intr_enable(intr_handle);
3268
3269 /* resume enabled intr since hw reset */
3270 igbvf_intr_enable(dev);
3271
3272 return 0;
3273 }
3274
3275 static void
3276 igbvf_dev_stop(struct rte_eth_dev *dev)
3277 {
3278 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3279 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
3280
3281 PMD_INIT_FUNC_TRACE();
3282
3283 igbvf_stop_adapter(dev);
3284
3285 /*
3286 * Clear what we set, but we still keep shadow_vfta to
3287 * restore after device starts
3288 */
3289 igbvf_set_vfta_all(dev,0);
3290
3291 igb_dev_clear_queues(dev);
3292
3293 /* disable intr eventfd mapping */
3294 rte_intr_disable(intr_handle);
3295
3296 /* Clean datapath event and queue/vec mapping */
3297 rte_intr_efd_disable(intr_handle);
3298 if (intr_handle->intr_vec) {
3299 rte_free(intr_handle->intr_vec);
3300 intr_handle->intr_vec = NULL;
3301 }
3302 }
3303
3304 static void
3305 igbvf_dev_close(struct rte_eth_dev *dev)
3306 {
3307 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3308 struct e1000_adapter *adapter =
3309 E1000_DEV_PRIVATE(dev->data->dev_private);
3310 struct ether_addr addr;
3311
3312 PMD_INIT_FUNC_TRACE();
3313
3314 e1000_reset_hw(hw);
3315
3316 igbvf_dev_stop(dev);
3317 adapter->stopped = 1;
3318 igb_dev_free_queues(dev);
3319
3320 /**
3321 * reprogram the RAR with a zero mac address,
3322 * to ensure that the VF traffic goes to the PF
3323 * after stop, close and detach of the VF.
3324 **/
3325
3326 memset(&addr, 0, sizeof(addr));
3327 igbvf_default_mac_addr_set(dev, &addr);
3328 }
3329
3330 static void
3331 igbvf_promiscuous_enable(struct rte_eth_dev *dev)
3332 {
3333 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3334
3335 /* Set both unicast and multicast promisc */
3336 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
3337 }
3338
3339 static void
3340 igbvf_promiscuous_disable(struct rte_eth_dev *dev)
3341 {
3342 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3343
3344 /* If in allmulticast mode leave multicast promisc */
3345 if (dev->data->all_multicast == 1)
3346 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3347 else
3348 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3349 }
3350
3351 static void
3352 igbvf_allmulticast_enable(struct rte_eth_dev *dev)
3353 {
3354 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3355
3356 /* In promiscuous mode multicast promisc already set */
3357 if (dev->data->promiscuous == 0)
3358 e1000_promisc_set_vf(hw, e1000_promisc_multicast);
3359 }
3360
3361 static void
3362 igbvf_allmulticast_disable(struct rte_eth_dev *dev)
3363 {
3364 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3365
3366 /* In promiscuous mode leave multicast promisc enabled */
3367 if (dev->data->promiscuous == 0)
3368 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
3369 }
3370
3371 static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
3372 {
3373 struct e1000_mbx_info *mbx = &hw->mbx;
3374 uint32_t msgbuf[2];
3375 s32 err;
3376
3377 /* After set vlan, vlan strip will also be enabled in igb driver*/
3378 msgbuf[0] = E1000_VF_SET_VLAN;
3379 msgbuf[1] = vid;
3380 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
3381 if (on)
3382 msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
3383
3384 err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
3385 if (err)
3386 goto mbx_err;
3387
3388 err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
3389 if (err)
3390 goto mbx_err;
3391
3392 msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
3393 if (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))
3394 err = -EINVAL;
3395
3396 mbx_err:
3397 return err;
3398 }
3399
3400 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
3401 {
3402 struct e1000_hw *hw =
3403 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3404 struct e1000_vfta * shadow_vfta =
3405 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3406 int i = 0, j = 0, vfta = 0, mask = 1;
3407
3408 for (i = 0; i < IGB_VFTA_SIZE; i++){
3409 vfta = shadow_vfta->vfta[i];
3410 if(vfta){
3411 mask = 1;
3412 for (j = 0; j < 32; j++){
3413 if(vfta & mask)
3414 igbvf_set_vfta(hw,
3415 (uint16_t)((i<<5)+j), on);
3416 mask<<=1;
3417 }
3418 }
3419 }
3420
3421 }
3422
3423 static int
3424 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3425 {
3426 struct e1000_hw *hw =
3427 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3428 struct e1000_vfta * shadow_vfta =
3429 E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
3430 uint32_t vid_idx = 0;
3431 uint32_t vid_bit = 0;
3432 int ret = 0;
3433
3434 PMD_INIT_FUNC_TRACE();
3435
3436 /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
3437 ret = igbvf_set_vfta(hw, vlan_id, !!on);
3438 if(ret){
3439 PMD_INIT_LOG(ERR, "Unable to set VF vlan");
3440 return ret;
3441 }
3442 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
3443 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
3444
3445 /*Save what we set and retore it after device reset*/
3446 if (on)
3447 shadow_vfta->vfta[vid_idx] |= vid_bit;
3448 else
3449 shadow_vfta->vfta[vid_idx] &= ~vid_bit;
3450
3451 return 0;
3452 }
3453
3454 static int
3455 igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
3456 {
3457 struct e1000_hw *hw =
3458 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3459
3460 /* index is not used by rar_set() */
3461 hw->mac.ops.rar_set(hw, (void *)addr, 0);
3462 return 0;
3463 }
3464
3465
3466 static int
3467 eth_igb_rss_reta_update(struct rte_eth_dev *dev,
3468 struct rte_eth_rss_reta_entry64 *reta_conf,
3469 uint16_t reta_size)
3470 {
3471 uint8_t i, j, mask;
3472 uint32_t reta, r;
3473 uint16_t idx, shift;
3474 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3475
3476 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3477 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3478 "(%d) doesn't match the number hardware can supported "
3479 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3480 return -EINVAL;
3481 }
3482
3483 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3484 idx = i / RTE_RETA_GROUP_SIZE;
3485 shift = i % RTE_RETA_GROUP_SIZE;
3486 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3487 IGB_4_BIT_MASK);
3488 if (!mask)
3489 continue;
3490 if (mask == IGB_4_BIT_MASK)
3491 r = 0;
3492 else
3493 r = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3494 for (j = 0, reta = 0; j < IGB_4_BIT_WIDTH; j++) {
3495 if (mask & (0x1 << j))
3496 reta |= reta_conf[idx].reta[shift + j] <<
3497 (CHAR_BIT * j);
3498 else
3499 reta |= r & (IGB_8_BIT_MASK << (CHAR_BIT * j));
3500 }
3501 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
3502 }
3503
3504 return 0;
3505 }
3506
3507 static int
3508 eth_igb_rss_reta_query(struct rte_eth_dev *dev,
3509 struct rte_eth_rss_reta_entry64 *reta_conf,
3510 uint16_t reta_size)
3511 {
3512 uint8_t i, j, mask;
3513 uint32_t reta;
3514 uint16_t idx, shift;
3515 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3516
3517 if (reta_size != ETH_RSS_RETA_SIZE_128) {
3518 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
3519 "(%d) doesn't match the number hardware can supported "
3520 "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
3521 return -EINVAL;
3522 }
3523
3524 for (i = 0; i < reta_size; i += IGB_4_BIT_WIDTH) {
3525 idx = i / RTE_RETA_GROUP_SIZE;
3526 shift = i % RTE_RETA_GROUP_SIZE;
3527 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
3528 IGB_4_BIT_MASK);
3529 if (!mask)
3530 continue;
3531 reta = E1000_READ_REG(hw, E1000_RETA(i >> 2));
3532 for (j = 0; j < IGB_4_BIT_WIDTH; j++) {
3533 if (mask & (0x1 << j))
3534 reta_conf[idx].reta[shift + j] =
3535 ((reta >> (CHAR_BIT * j)) &
3536 IGB_8_BIT_MASK);
3537 }
3538 }
3539
3540 return 0;
3541 }
3542
3543 int
3544 eth_igb_syn_filter_set(struct rte_eth_dev *dev,
3545 struct rte_eth_syn_filter *filter,
3546 bool add)
3547 {
3548 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3549 struct e1000_filter_info *filter_info =
3550 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3551 uint32_t synqf, rfctl;
3552
3553 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3554 return -EINVAL;
3555
3556 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3557
3558 if (add) {
3559 if (synqf & E1000_SYN_FILTER_ENABLE)
3560 return -EINVAL;
3561
3562 synqf = (uint32_t)(((filter->queue << E1000_SYN_FILTER_QUEUE_SHIFT) &
3563 E1000_SYN_FILTER_QUEUE) | E1000_SYN_FILTER_ENABLE);
3564
3565 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3566 if (filter->hig_pri)
3567 rfctl |= E1000_RFCTL_SYNQFP;
3568 else
3569 rfctl &= ~E1000_RFCTL_SYNQFP;
3570
3571 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
3572 } else {
3573 if (!(synqf & E1000_SYN_FILTER_ENABLE))
3574 return -ENOENT;
3575 synqf = 0;
3576 }
3577
3578 filter_info->syn_info = synqf;
3579 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
3580 E1000_WRITE_FLUSH(hw);
3581 return 0;
3582 }
3583
3584 static int
3585 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
3586 struct rte_eth_syn_filter *filter)
3587 {
3588 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3589 uint32_t synqf, rfctl;
3590
3591 synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
3592 if (synqf & E1000_SYN_FILTER_ENABLE) {
3593 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
3594 filter->hig_pri = (rfctl & E1000_RFCTL_SYNQFP) ? 1 : 0;
3595 filter->queue = (uint8_t)((synqf & E1000_SYN_FILTER_QUEUE) >>
3596 E1000_SYN_FILTER_QUEUE_SHIFT);
3597 return 0;
3598 }
3599
3600 return -ENOENT;
3601 }
3602
3603 static int
3604 eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
3605 enum rte_filter_op filter_op,
3606 void *arg)
3607 {
3608 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3609 int ret;
3610
3611 MAC_TYPE_FILTER_SUP(hw->mac.type);
3612
3613 if (filter_op == RTE_ETH_FILTER_NOP)
3614 return 0;
3615
3616 if (arg == NULL) {
3617 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
3618 filter_op);
3619 return -EINVAL;
3620 }
3621
3622 switch (filter_op) {
3623 case RTE_ETH_FILTER_ADD:
3624 ret = eth_igb_syn_filter_set(dev,
3625 (struct rte_eth_syn_filter *)arg,
3626 TRUE);
3627 break;
3628 case RTE_ETH_FILTER_DELETE:
3629 ret = eth_igb_syn_filter_set(dev,
3630 (struct rte_eth_syn_filter *)arg,
3631 FALSE);
3632 break;
3633 case RTE_ETH_FILTER_GET:
3634 ret = eth_igb_syn_filter_get(dev,
3635 (struct rte_eth_syn_filter *)arg);
3636 break;
3637 default:
3638 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
3639 ret = -EINVAL;
3640 break;
3641 }
3642
3643 return ret;
3644 }
3645
3646 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
3647 static inline int
3648 ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
3649 struct e1000_2tuple_filter_info *filter_info)
3650 {
3651 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
3652 return -EINVAL;
3653 if (filter->priority > E1000_2TUPLE_MAX_PRI)
3654 return -EINVAL; /* filter index is out of range. */
3655 if (filter->tcp_flags > TCP_FLAG_ALL)
3656 return -EINVAL; /* flags is invalid. */
3657
3658 switch (filter->dst_port_mask) {
3659 case UINT16_MAX:
3660 filter_info->dst_port_mask = 0;
3661 filter_info->dst_port = filter->dst_port;
3662 break;
3663 case 0:
3664 filter_info->dst_port_mask = 1;
3665 break;
3666 default:
3667 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
3668 return -EINVAL;
3669 }
3670
3671 switch (filter->proto_mask) {
3672 case UINT8_MAX:
3673 filter_info->proto_mask = 0;
3674 filter_info->proto = filter->proto;
3675 break;
3676 case 0:
3677 filter_info->proto_mask = 1;
3678 break;
3679 default:
3680 PMD_DRV_LOG(ERR, "invalid protocol mask.");
3681 return -EINVAL;
3682 }
3683
3684 filter_info->priority = (uint8_t)filter->priority;
3685 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
3686 filter_info->tcp_flags = filter->tcp_flags;
3687 else
3688 filter_info->tcp_flags = 0;
3689
3690 return 0;
3691 }
3692
3693 static inline struct e1000_2tuple_filter *
3694 igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list,
3695 struct e1000_2tuple_filter_info *key)
3696 {
3697 struct e1000_2tuple_filter *it;
3698
3699 TAILQ_FOREACH(it, filter_list, entries) {
3700 if (memcmp(key, &it->filter_info,
3701 sizeof(struct e1000_2tuple_filter_info)) == 0) {
3702 return it;
3703 }
3704 }
3705 return NULL;
3706 }
3707
3708 /* inject a igb 2tuple filter to HW */
3709 static inline void
3710 igb_inject_2uple_filter(struct rte_eth_dev *dev,
3711 struct e1000_2tuple_filter *filter)
3712 {
3713 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3714 uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
3715 uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
3716 int i;
3717
3718 i = filter->index;
3719 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
3720 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
3721 imir |= E1000_IMIR_PORT_BP;
3722 else
3723 imir &= ~E1000_IMIR_PORT_BP;
3724
3725 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
3726
3727 ttqf |= E1000_TTQF_QUEUE_ENABLE;
3728 ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT);
3729 ttqf |= (uint32_t)(filter->filter_info.proto &
3730 E1000_TTQF_PROTOCOL_MASK);
3731 if (filter->filter_info.proto_mask == 0)
3732 ttqf &= ~E1000_TTQF_MASK_ENABLE;
3733
3734 /* tcp flags bits setting. */
3735 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
3736 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
3737 imir_ext |= E1000_IMIREXT_CTRL_URG;
3738 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
3739 imir_ext |= E1000_IMIREXT_CTRL_ACK;
3740 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
3741 imir_ext |= E1000_IMIREXT_CTRL_PSH;
3742 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
3743 imir_ext |= E1000_IMIREXT_CTRL_RST;
3744 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
3745 imir_ext |= E1000_IMIREXT_CTRL_SYN;
3746 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
3747 imir_ext |= E1000_IMIREXT_CTRL_FIN;
3748 } else {
3749 imir_ext |= E1000_IMIREXT_CTRL_BP;
3750 }
3751 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
3752 E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf);
3753 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
3754 }
3755
3756 /*
3757 * igb_add_2tuple_filter - add a 2tuple filter
3758 *
3759 * @param
3760 * dev: Pointer to struct rte_eth_dev.
3761 * ntuple_filter: ponter to the filter that will be added.
3762 *
3763 * @return
3764 * - On success, zero.
3765 * - On failure, a negative value.
3766 */
3767 static int
3768 igb_add_2tuple_filter(struct rte_eth_dev *dev,
3769 struct rte_eth_ntuple_filter *ntuple_filter)
3770 {
3771 struct e1000_filter_info *filter_info =
3772 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3773 struct e1000_2tuple_filter *filter;
3774 int i, ret;
3775
3776 filter = rte_zmalloc("e1000_2tuple_filter",
3777 sizeof(struct e1000_2tuple_filter), 0);
3778 if (filter == NULL)
3779 return -ENOMEM;
3780
3781 ret = ntuple_filter_to_2tuple(ntuple_filter,
3782 &filter->filter_info);
3783 if (ret < 0) {
3784 rte_free(filter);
3785 return ret;
3786 }
3787 if (igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3788 &filter->filter_info) != NULL) {
3789 PMD_DRV_LOG(ERR, "filter exists.");
3790 rte_free(filter);
3791 return -EEXIST;
3792 }
3793 filter->queue = ntuple_filter->queue;
3794
3795 /*
3796 * look for an unused 2tuple filter index,
3797 * and insert the filter to list.
3798 */
3799 for (i = 0; i < E1000_MAX_TTQF_FILTERS; i++) {
3800 if (!(filter_info->twotuple_mask & (1 << i))) {
3801 filter_info->twotuple_mask |= 1 << i;
3802 filter->index = i;
3803 TAILQ_INSERT_TAIL(&filter_info->twotuple_list,
3804 filter,
3805 entries);
3806 break;
3807 }
3808 }
3809 if (i >= E1000_MAX_TTQF_FILTERS) {
3810 PMD_DRV_LOG(ERR, "2tuple filters are full.");
3811 rte_free(filter);
3812 return -ENOSYS;
3813 }
3814
3815 igb_inject_2uple_filter(dev, filter);
3816 return 0;
3817 }
3818
3819 int
3820 igb_delete_2tuple_filter(struct rte_eth_dev *dev,
3821 struct e1000_2tuple_filter *filter)
3822 {
3823 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3824 struct e1000_filter_info *filter_info =
3825 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3826
3827 filter_info->twotuple_mask &= ~(1 << filter->index);
3828 TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries);
3829 rte_free(filter);
3830
3831 E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK);
3832 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
3833 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
3834 return 0;
3835 }
3836
3837 /*
3838 * igb_remove_2tuple_filter - remove a 2tuple filter
3839 *
3840 * @param
3841 * dev: Pointer to struct rte_eth_dev.
3842 * ntuple_filter: ponter to the filter that will be removed.
3843 *
3844 * @return
3845 * - On success, zero.
3846 * - On failure, a negative value.
3847 */
3848 static int
3849 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
3850 struct rte_eth_ntuple_filter *ntuple_filter)
3851 {
3852 struct e1000_filter_info *filter_info =
3853 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3854 struct e1000_2tuple_filter_info filter_2tuple;
3855 struct e1000_2tuple_filter *filter;
3856 int ret;
3857
3858 memset(&filter_2tuple, 0, sizeof(struct e1000_2tuple_filter_info));
3859 ret = ntuple_filter_to_2tuple(ntuple_filter,
3860 &filter_2tuple);
3861 if (ret < 0)
3862 return ret;
3863
3864 filter = igb_2tuple_filter_lookup(&filter_info->twotuple_list,
3865 &filter_2tuple);
3866 if (filter == NULL) {
3867 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3868 return -ENOENT;
3869 }
3870
3871 igb_delete_2tuple_filter(dev, filter);
3872
3873 return 0;
3874 }
3875
3876 /* inject a igb flex filter to HW */
3877 static inline void
3878 igb_inject_flex_filter(struct rte_eth_dev *dev,
3879 struct e1000_flex_filter *filter)
3880 {
3881 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3882 uint32_t wufc, queueing;
3883 uint32_t reg_off;
3884 uint8_t i, j = 0;
3885
3886 wufc = E1000_READ_REG(hw, E1000_WUFC);
3887 if (filter->index < E1000_MAX_FHFT)
3888 reg_off = E1000_FHFT(filter->index);
3889 else
3890 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
3891
3892 E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
3893 (E1000_WUFC_FLX0 << filter->index));
3894 queueing = filter->filter_info.len |
3895 (filter->queue << E1000_FHFT_QUEUEING_QUEUE_SHIFT) |
3896 (filter->filter_info.priority <<
3897 E1000_FHFT_QUEUEING_PRIO_SHIFT);
3898 E1000_WRITE_REG(hw, reg_off + E1000_FHFT_QUEUEING_OFFSET,
3899 queueing);
3900
3901 for (i = 0; i < E1000_FLEX_FILTERS_MASK_SIZE; i++) {
3902 E1000_WRITE_REG(hw, reg_off,
3903 filter->filter_info.dwords[j]);
3904 reg_off += sizeof(uint32_t);
3905 E1000_WRITE_REG(hw, reg_off,
3906 filter->filter_info.dwords[++j]);
3907 reg_off += sizeof(uint32_t);
3908 E1000_WRITE_REG(hw, reg_off,
3909 (uint32_t)filter->filter_info.mask[i]);
3910 reg_off += sizeof(uint32_t) * 2;
3911 ++j;
3912 }
3913 }
3914
3915 static inline struct e1000_flex_filter *
3916 eth_igb_flex_filter_lookup(struct e1000_flex_filter_list *filter_list,
3917 struct e1000_flex_filter_info *key)
3918 {
3919 struct e1000_flex_filter *it;
3920
3921 TAILQ_FOREACH(it, filter_list, entries) {
3922 if (memcmp(key, &it->filter_info,
3923 sizeof(struct e1000_flex_filter_info)) == 0)
3924 return it;
3925 }
3926
3927 return NULL;
3928 }
3929
3930 /* remove a flex byte filter
3931 * @param
3932 * dev: Pointer to struct rte_eth_dev.
3933 * filter: the pointer of the filter will be removed.
3934 */
3935 void
3936 igb_remove_flex_filter(struct rte_eth_dev *dev,
3937 struct e1000_flex_filter *filter)
3938 {
3939 struct e1000_filter_info *filter_info =
3940 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3941 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3942 uint32_t wufc, i;
3943 uint32_t reg_off;
3944
3945 wufc = E1000_READ_REG(hw, E1000_WUFC);
3946 if (filter->index < E1000_MAX_FHFT)
3947 reg_off = E1000_FHFT(filter->index);
3948 else
3949 reg_off = E1000_FHFT_EXT(filter->index - E1000_MAX_FHFT);
3950
3951 for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
3952 E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
3953
3954 E1000_WRITE_REG(hw, E1000_WUFC, wufc &
3955 (~(E1000_WUFC_FLX0 << filter->index)));
3956
3957 filter_info->flex_mask &= ~(1 << filter->index);
3958 TAILQ_REMOVE(&filter_info->flex_list, filter, entries);
3959 rte_free(filter);
3960 }
3961
3962 int
3963 eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
3964 struct rte_eth_flex_filter *filter,
3965 bool add)
3966 {
3967 struct e1000_filter_info *filter_info =
3968 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3969 struct e1000_flex_filter *flex_filter, *it;
3970 uint32_t mask;
3971 uint8_t shift, i;
3972
3973 flex_filter = rte_zmalloc("e1000_flex_filter",
3974 sizeof(struct e1000_flex_filter), 0);
3975 if (flex_filter == NULL)
3976 return -ENOMEM;
3977
3978 flex_filter->filter_info.len = filter->len;
3979 flex_filter->filter_info.priority = filter->priority;
3980 memcpy(flex_filter->filter_info.dwords, filter->bytes, filter->len);
3981 for (i = 0; i < RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT; i++) {
3982 mask = 0;
3983 /* reverse bits in flex filter's mask*/
3984 for (shift = 0; shift < CHAR_BIT; shift++) {
3985 if (filter->mask[i] & (0x01 << shift))
3986 mask |= (0x80 >> shift);
3987 }
3988 flex_filter->filter_info.mask[i] = mask;
3989 }
3990
3991 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
3992 &flex_filter->filter_info);
3993 if (it == NULL && !add) {
3994 PMD_DRV_LOG(ERR, "filter doesn't exist.");
3995 rte_free(flex_filter);
3996 return -ENOENT;
3997 }
3998 if (it != NULL && add) {
3999 PMD_DRV_LOG(ERR, "filter exists.");
4000 rte_free(flex_filter);
4001 return -EEXIST;
4002 }
4003
4004 if (add) {
4005 flex_filter->queue = filter->queue;
4006 /*
4007 * look for an unused flex filter index
4008 * and insert the filter into the list.
4009 */
4010 for (i = 0; i < E1000_MAX_FLEX_FILTERS; i++) {
4011 if (!(filter_info->flex_mask & (1 << i))) {
4012 filter_info->flex_mask |= 1 << i;
4013 flex_filter->index = i;
4014 TAILQ_INSERT_TAIL(&filter_info->flex_list,
4015 flex_filter,
4016 entries);
4017 break;
4018 }
4019 }
4020 if (i >= E1000_MAX_FLEX_FILTERS) {
4021 PMD_DRV_LOG(ERR, "flex filters are full.");
4022 rte_free(flex_filter);
4023 return -ENOSYS;
4024 }
4025
4026 igb_inject_flex_filter(dev, flex_filter);
4027
4028 } else {
4029 igb_remove_flex_filter(dev, it);
4030 rte_free(flex_filter);
4031 }
4032
4033 return 0;
4034 }
4035
4036 static int
4037 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
4038 struct rte_eth_flex_filter *filter)
4039 {
4040 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4041 struct e1000_filter_info *filter_info =
4042 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4043 struct e1000_flex_filter flex_filter, *it;
4044 uint32_t wufc, queueing, wufc_en = 0;
4045
4046 memset(&flex_filter, 0, sizeof(struct e1000_flex_filter));
4047 flex_filter.filter_info.len = filter->len;
4048 flex_filter.filter_info.priority = filter->priority;
4049 memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
4050 memcpy(flex_filter.filter_info.mask, filter->mask,
4051 RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT);
4052
4053 it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
4054 &flex_filter.filter_info);
4055 if (it == NULL) {
4056 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4057 return -ENOENT;
4058 }
4059
4060 wufc = E1000_READ_REG(hw, E1000_WUFC);
4061 wufc_en = E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << it->index);
4062
4063 if ((wufc & wufc_en) == wufc_en) {
4064 uint32_t reg_off = 0;
4065 if (it->index < E1000_MAX_FHFT)
4066 reg_off = E1000_FHFT(it->index);
4067 else
4068 reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
4069
4070 queueing = E1000_READ_REG(hw,
4071 reg_off + E1000_FHFT_QUEUEING_OFFSET);
4072 filter->len = queueing & E1000_FHFT_QUEUEING_LEN;
4073 filter->priority = (queueing & E1000_FHFT_QUEUEING_PRIO) >>
4074 E1000_FHFT_QUEUEING_PRIO_SHIFT;
4075 filter->queue = (queueing & E1000_FHFT_QUEUEING_QUEUE) >>
4076 E1000_FHFT_QUEUEING_QUEUE_SHIFT;
4077 return 0;
4078 }
4079 return -ENOENT;
4080 }
4081
4082 static int
4083 eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
4084 enum rte_filter_op filter_op,
4085 void *arg)
4086 {
4087 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4088 struct rte_eth_flex_filter *filter;
4089 int ret = 0;
4090
4091 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
4092
4093 if (filter_op == RTE_ETH_FILTER_NOP)
4094 return ret;
4095
4096 if (arg == NULL) {
4097 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
4098 filter_op);
4099 return -EINVAL;
4100 }
4101
4102 filter = (struct rte_eth_flex_filter *)arg;
4103 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
4104 || filter->len % sizeof(uint64_t) != 0) {
4105 PMD_DRV_LOG(ERR, "filter's length is out of range");
4106 return -EINVAL;
4107 }
4108 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
4109 PMD_DRV_LOG(ERR, "filter's priority is out of range");
4110 return -EINVAL;
4111 }
4112
4113 switch (filter_op) {
4114 case RTE_ETH_FILTER_ADD:
4115 ret = eth_igb_add_del_flex_filter(dev, filter, TRUE);
4116 break;
4117 case RTE_ETH_FILTER_DELETE:
4118 ret = eth_igb_add_del_flex_filter(dev, filter, FALSE);
4119 break;
4120 case RTE_ETH_FILTER_GET:
4121 ret = eth_igb_get_flex_filter(dev, filter);
4122 break;
4123 default:
4124 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
4125 ret = -EINVAL;
4126 break;
4127 }
4128
4129 return ret;
4130 }
4131
4132 /* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
4133 static inline int
4134 ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
4135 struct e1000_5tuple_filter_info *filter_info)
4136 {
4137 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
4138 return -EINVAL;
4139 if (filter->priority > E1000_2TUPLE_MAX_PRI)
4140 return -EINVAL; /* filter index is out of range. */
4141 if (filter->tcp_flags > TCP_FLAG_ALL)
4142 return -EINVAL; /* flags is invalid. */
4143
4144 switch (filter->dst_ip_mask) {
4145 case UINT32_MAX:
4146 filter_info->dst_ip_mask = 0;
4147 filter_info->dst_ip = filter->dst_ip;
4148 break;
4149 case 0:
4150 filter_info->dst_ip_mask = 1;
4151 break;
4152 default:
4153 PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
4154 return -EINVAL;
4155 }
4156
4157 switch (filter->src_ip_mask) {
4158 case UINT32_MAX:
4159 filter_info->src_ip_mask = 0;
4160 filter_info->src_ip = filter->src_ip;
4161 break;
4162 case 0:
4163 filter_info->src_ip_mask = 1;
4164 break;
4165 default:
4166 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
4167 return -EINVAL;
4168 }
4169
4170 switch (filter->dst_port_mask) {
4171 case UINT16_MAX:
4172 filter_info->dst_port_mask = 0;
4173 filter_info->dst_port = filter->dst_port;
4174 break;
4175 case 0:
4176 filter_info->dst_port_mask = 1;
4177 break;
4178 default:
4179 PMD_DRV_LOG(ERR, "invalid dst_port mask.");
4180 return -EINVAL;
4181 }
4182
4183 switch (filter->src_port_mask) {
4184 case UINT16_MAX:
4185 filter_info->src_port_mask = 0;
4186 filter_info->src_port = filter->src_port;
4187 break;
4188 case 0:
4189 filter_info->src_port_mask = 1;
4190 break;
4191 default:
4192 PMD_DRV_LOG(ERR, "invalid src_port mask.");
4193 return -EINVAL;
4194 }
4195
4196 switch (filter->proto_mask) {
4197 case UINT8_MAX:
4198 filter_info->proto_mask = 0;
4199 filter_info->proto = filter->proto;
4200 break;
4201 case 0:
4202 filter_info->proto_mask = 1;
4203 break;
4204 default:
4205 PMD_DRV_LOG(ERR, "invalid protocol mask.");
4206 return -EINVAL;
4207 }
4208
4209 filter_info->priority = (uint8_t)filter->priority;
4210 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
4211 filter_info->tcp_flags = filter->tcp_flags;
4212 else
4213 filter_info->tcp_flags = 0;
4214
4215 return 0;
4216 }
4217
4218 static inline struct e1000_5tuple_filter *
4219 igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list,
4220 struct e1000_5tuple_filter_info *key)
4221 {
4222 struct e1000_5tuple_filter *it;
4223
4224 TAILQ_FOREACH(it, filter_list, entries) {
4225 if (memcmp(key, &it->filter_info,
4226 sizeof(struct e1000_5tuple_filter_info)) == 0) {
4227 return it;
4228 }
4229 }
4230 return NULL;
4231 }
4232
4233 /* inject a igb 5-tuple filter to HW */
4234 static inline void
4235 igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
4236 struct e1000_5tuple_filter *filter)
4237 {
4238 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4239 uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
4240 uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
4241 uint8_t i;
4242
4243 i = filter->index;
4244 ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK;
4245 if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */
4246 ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP;
4247 if (filter->filter_info.dst_ip_mask == 0)
4248 ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP;
4249 if (filter->filter_info.src_port_mask == 0)
4250 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
4251 if (filter->filter_info.proto_mask == 0)
4252 ftqf &= ~E1000_FTQF_MASK_PROTO_BP;
4253 ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) &
4254 E1000_FTQF_QUEUE_MASK;
4255 ftqf |= E1000_FTQF_QUEUE_ENABLE;
4256 E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf);
4257 E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip);
4258 E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip);
4259
4260 spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT;
4261 E1000_WRITE_REG(hw, E1000_SPQF(i), spqf);
4262
4263 imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT);
4264 if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */
4265 imir |= E1000_IMIR_PORT_BP;
4266 else
4267 imir &= ~E1000_IMIR_PORT_BP;
4268 imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
4269
4270 /* tcp flags bits setting. */
4271 if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) {
4272 if (filter->filter_info.tcp_flags & TCP_URG_FLAG)
4273 imir_ext |= E1000_IMIREXT_CTRL_URG;
4274 if (filter->filter_info.tcp_flags & TCP_ACK_FLAG)
4275 imir_ext |= E1000_IMIREXT_CTRL_ACK;
4276 if (filter->filter_info.tcp_flags & TCP_PSH_FLAG)
4277 imir_ext |= E1000_IMIREXT_CTRL_PSH;
4278 if (filter->filter_info.tcp_flags & TCP_RST_FLAG)
4279 imir_ext |= E1000_IMIREXT_CTRL_RST;
4280 if (filter->filter_info.tcp_flags & TCP_SYN_FLAG)
4281 imir_ext |= E1000_IMIREXT_CTRL_SYN;
4282 if (filter->filter_info.tcp_flags & TCP_FIN_FLAG)
4283 imir_ext |= E1000_IMIREXT_CTRL_FIN;
4284 } else {
4285 imir_ext |= E1000_IMIREXT_CTRL_BP;
4286 }
4287 E1000_WRITE_REG(hw, E1000_IMIR(i), imir);
4288 E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext);
4289 }
4290
4291 /*
4292 * igb_add_5tuple_filter_82576 - add a 5tuple filter
4293 *
4294 * @param
4295 * dev: Pointer to struct rte_eth_dev.
4296 * ntuple_filter: ponter to the filter that will be added.
4297 *
4298 * @return
4299 * - On success, zero.
4300 * - On failure, a negative value.
4301 */
4302 static int
4303 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
4304 struct rte_eth_ntuple_filter *ntuple_filter)
4305 {
4306 struct e1000_filter_info *filter_info =
4307 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4308 struct e1000_5tuple_filter *filter;
4309 uint8_t i;
4310 int ret;
4311
4312 filter = rte_zmalloc("e1000_5tuple_filter",
4313 sizeof(struct e1000_5tuple_filter), 0);
4314 if (filter == NULL)
4315 return -ENOMEM;
4316
4317 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4318 &filter->filter_info);
4319 if (ret < 0) {
4320 rte_free(filter);
4321 return ret;
4322 }
4323
4324 if (igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4325 &filter->filter_info) != NULL) {
4326 PMD_DRV_LOG(ERR, "filter exists.");
4327 rte_free(filter);
4328 return -EEXIST;
4329 }
4330 filter->queue = ntuple_filter->queue;
4331
4332 /*
4333 * look for an unused 5tuple filter index,
4334 * and insert the filter to list.
4335 */
4336 for (i = 0; i < E1000_MAX_FTQF_FILTERS; i++) {
4337 if (!(filter_info->fivetuple_mask & (1 << i))) {
4338 filter_info->fivetuple_mask |= 1 << i;
4339 filter->index = i;
4340 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
4341 filter,
4342 entries);
4343 break;
4344 }
4345 }
4346 if (i >= E1000_MAX_FTQF_FILTERS) {
4347 PMD_DRV_LOG(ERR, "5tuple filters are full.");
4348 rte_free(filter);
4349 return -ENOSYS;
4350 }
4351
4352 igb_inject_5tuple_filter_82576(dev, filter);
4353 return 0;
4354 }
4355
4356 int
4357 igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
4358 struct e1000_5tuple_filter *filter)
4359 {
4360 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4361 struct e1000_filter_info *filter_info =
4362 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4363
4364 filter_info->fivetuple_mask &= ~(1 << filter->index);
4365 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
4366 rte_free(filter);
4367
4368 E1000_WRITE_REG(hw, E1000_FTQF(filter->index),
4369 E1000_FTQF_VF_BP | E1000_FTQF_MASK);
4370 E1000_WRITE_REG(hw, E1000_DAQF(filter->index), 0);
4371 E1000_WRITE_REG(hw, E1000_SAQF(filter->index), 0);
4372 E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0);
4373 E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0);
4374 E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0);
4375 return 0;
4376 }
4377
4378 /*
4379 * igb_remove_5tuple_filter_82576 - remove a 5tuple filter
4380 *
4381 * @param
4382 * dev: Pointer to struct rte_eth_dev.
4383 * ntuple_filter: ponter to the filter that will be removed.
4384 *
4385 * @return
4386 * - On success, zero.
4387 * - On failure, a negative value.
4388 */
4389 static int
4390 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
4391 struct rte_eth_ntuple_filter *ntuple_filter)
4392 {
4393 struct e1000_filter_info *filter_info =
4394 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4395 struct e1000_5tuple_filter_info filter_5tuple;
4396 struct e1000_5tuple_filter *filter;
4397 int ret;
4398
4399 memset(&filter_5tuple, 0, sizeof(struct e1000_5tuple_filter_info));
4400 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4401 &filter_5tuple);
4402 if (ret < 0)
4403 return ret;
4404
4405 filter = igb_5tuple_filter_lookup_82576(&filter_info->fivetuple_list,
4406 &filter_5tuple);
4407 if (filter == NULL) {
4408 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4409 return -ENOENT;
4410 }
4411
4412 igb_delete_5tuple_filter_82576(dev, filter);
4413
4414 return 0;
4415 }
4416
4417 static int
4418 eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
4419 {
4420 uint32_t rctl;
4421 struct e1000_hw *hw;
4422 struct rte_eth_dev_info dev_info;
4423 uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
4424 VLAN_TAG_SIZE);
4425
4426 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4427
4428 #ifdef RTE_LIBRTE_82571_SUPPORT
4429 /* XXX: not bigger than max_rx_pktlen */
4430 if (hw->mac.type == e1000_82571)
4431 return -ENOTSUP;
4432 #endif
4433 eth_igb_infos_get(dev, &dev_info);
4434
4435 /* check that mtu is within the allowed range */
4436 if ((mtu < ETHER_MIN_MTU) ||
4437 (frame_size > dev_info.max_rx_pktlen))
4438 return -EINVAL;
4439
4440 /* refuse mtu that requires the support of scattered packets when this
4441 * feature has not been enabled before. */
4442 if (!dev->data->scattered_rx &&
4443 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
4444 return -EINVAL;
4445
4446 rctl = E1000_READ_REG(hw, E1000_RCTL);
4447
4448 /* switch to jumbo mode if needed */
4449 if (frame_size > ETHER_MAX_LEN) {
4450 dev->data->dev_conf.rxmode.offloads |=
4451 DEV_RX_OFFLOAD_JUMBO_FRAME;
4452 rctl |= E1000_RCTL_LPE;
4453 } else {
4454 dev->data->dev_conf.rxmode.offloads &=
4455 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
4456 rctl &= ~E1000_RCTL_LPE;
4457 }
4458 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
4459
4460 /* update max frame size */
4461 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
4462
4463 E1000_WRITE_REG(hw, E1000_RLPML,
4464 dev->data->dev_conf.rxmode.max_rx_pkt_len);
4465
4466 return 0;
4467 }
4468
4469 /*
4470 * igb_add_del_ntuple_filter - add or delete a ntuple filter
4471 *
4472 * @param
4473 * dev: Pointer to struct rte_eth_dev.
4474 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4475 * add: if true, add filter, if false, remove filter
4476 *
4477 * @return
4478 * - On success, zero.
4479 * - On failure, a negative value.
4480 */
4481 int
4482 igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
4483 struct rte_eth_ntuple_filter *ntuple_filter,
4484 bool add)
4485 {
4486 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4487 int ret;
4488
4489 switch (ntuple_filter->flags) {
4490 case RTE_5TUPLE_FLAGS:
4491 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4492 if (hw->mac.type != e1000_82576)
4493 return -ENOTSUP;
4494 if (add)
4495 ret = igb_add_5tuple_filter_82576(dev,
4496 ntuple_filter);
4497 else
4498 ret = igb_remove_5tuple_filter_82576(dev,
4499 ntuple_filter);
4500 break;
4501 case RTE_2TUPLE_FLAGS:
4502 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4503 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
4504 hw->mac.type != e1000_i210 &&
4505 hw->mac.type != e1000_i211)
4506 return -ENOTSUP;
4507 if (add)
4508 ret = igb_add_2tuple_filter(dev, ntuple_filter);
4509 else
4510 ret = igb_remove_2tuple_filter(dev, ntuple_filter);
4511 break;
4512 default:
4513 ret = -EINVAL;
4514 break;
4515 }
4516
4517 return ret;
4518 }
4519
4520 /*
4521 * igb_get_ntuple_filter - get a ntuple filter
4522 *
4523 * @param
4524 * dev: Pointer to struct rte_eth_dev.
4525 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
4526 *
4527 * @return
4528 * - On success, zero.
4529 * - On failure, a negative value.
4530 */
4531 static int
4532 igb_get_ntuple_filter(struct rte_eth_dev *dev,
4533 struct rte_eth_ntuple_filter *ntuple_filter)
4534 {
4535 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4536 struct e1000_filter_info *filter_info =
4537 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4538 struct e1000_5tuple_filter_info filter_5tuple;
4539 struct e1000_2tuple_filter_info filter_2tuple;
4540 struct e1000_5tuple_filter *p_5tuple_filter;
4541 struct e1000_2tuple_filter *p_2tuple_filter;
4542 int ret;
4543
4544 switch (ntuple_filter->flags) {
4545 case RTE_5TUPLE_FLAGS:
4546 case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4547 if (hw->mac.type != e1000_82576)
4548 return -ENOTSUP;
4549 memset(&filter_5tuple,
4550 0,
4551 sizeof(struct e1000_5tuple_filter_info));
4552 ret = ntuple_filter_to_5tuple_82576(ntuple_filter,
4553 &filter_5tuple);
4554 if (ret < 0)
4555 return ret;
4556 p_5tuple_filter = igb_5tuple_filter_lookup_82576(
4557 &filter_info->fivetuple_list,
4558 &filter_5tuple);
4559 if (p_5tuple_filter == NULL) {
4560 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4561 return -ENOENT;
4562 }
4563 ntuple_filter->queue = p_5tuple_filter->queue;
4564 break;
4565 case RTE_2TUPLE_FLAGS:
4566 case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
4567 if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350)
4568 return -ENOTSUP;
4569 memset(&filter_2tuple,
4570 0,
4571 sizeof(struct e1000_2tuple_filter_info));
4572 ret = ntuple_filter_to_2tuple(ntuple_filter, &filter_2tuple);
4573 if (ret < 0)
4574 return ret;
4575 p_2tuple_filter = igb_2tuple_filter_lookup(
4576 &filter_info->twotuple_list,
4577 &filter_2tuple);
4578 if (p_2tuple_filter == NULL) {
4579 PMD_DRV_LOG(ERR, "filter doesn't exist.");
4580 return -ENOENT;
4581 }
4582 ntuple_filter->queue = p_2tuple_filter->queue;
4583 break;
4584 default:
4585 ret = -EINVAL;
4586 break;
4587 }
4588
4589 return 0;
4590 }
4591
4592 /*
4593 * igb_ntuple_filter_handle - Handle operations for ntuple filter.
4594 * @dev: pointer to rte_eth_dev structure
4595 * @filter_op:operation will be taken.
4596 * @arg: a pointer to specific structure corresponding to the filter_op
4597 */
4598 static int
4599 igb_ntuple_filter_handle(struct rte_eth_dev *dev,
4600 enum rte_filter_op filter_op,
4601 void *arg)
4602 {
4603 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4604 int ret;
4605
4606 MAC_TYPE_FILTER_SUP(hw->mac.type);
4607
4608 if (filter_op == RTE_ETH_FILTER_NOP)
4609 return 0;
4610
4611 if (arg == NULL) {
4612 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4613 filter_op);
4614 return -EINVAL;
4615 }
4616
4617 switch (filter_op) {
4618 case RTE_ETH_FILTER_ADD:
4619 ret = igb_add_del_ntuple_filter(dev,
4620 (struct rte_eth_ntuple_filter *)arg,
4621 TRUE);
4622 break;
4623 case RTE_ETH_FILTER_DELETE:
4624 ret = igb_add_del_ntuple_filter(dev,
4625 (struct rte_eth_ntuple_filter *)arg,
4626 FALSE);
4627 break;
4628 case RTE_ETH_FILTER_GET:
4629 ret = igb_get_ntuple_filter(dev,
4630 (struct rte_eth_ntuple_filter *)arg);
4631 break;
4632 default:
4633 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4634 ret = -EINVAL;
4635 break;
4636 }
4637 return ret;
4638 }
4639
4640 static inline int
4641 igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info,
4642 uint16_t ethertype)
4643 {
4644 int i;
4645
4646 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4647 if (filter_info->ethertype_filters[i].ethertype == ethertype &&
4648 (filter_info->ethertype_mask & (1 << i)))
4649 return i;
4650 }
4651 return -1;
4652 }
4653
4654 static inline int
4655 igb_ethertype_filter_insert(struct e1000_filter_info *filter_info,
4656 uint16_t ethertype, uint32_t etqf)
4657 {
4658 int i;
4659
4660 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
4661 if (!(filter_info->ethertype_mask & (1 << i))) {
4662 filter_info->ethertype_mask |= 1 << i;
4663 filter_info->ethertype_filters[i].ethertype = ethertype;
4664 filter_info->ethertype_filters[i].etqf = etqf;
4665 return i;
4666 }
4667 }
4668 return -1;
4669 }
4670
4671 int
4672 igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
4673 uint8_t idx)
4674 {
4675 if (idx >= E1000_MAX_ETQF_FILTERS)
4676 return -1;
4677 filter_info->ethertype_mask &= ~(1 << idx);
4678 filter_info->ethertype_filters[idx].ethertype = 0;
4679 filter_info->ethertype_filters[idx].etqf = 0;
4680 return idx;
4681 }
4682
4683
4684 int
4685 igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
4686 struct rte_eth_ethertype_filter *filter,
4687 bool add)
4688 {
4689 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4690 struct e1000_filter_info *filter_info =
4691 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4692 uint32_t etqf = 0;
4693 int ret;
4694
4695 if (filter->ether_type == ETHER_TYPE_IPv4 ||
4696 filter->ether_type == ETHER_TYPE_IPv6) {
4697 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
4698 " ethertype filter.", filter->ether_type);
4699 return -EINVAL;
4700 }
4701
4702 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
4703 PMD_DRV_LOG(ERR, "mac compare is unsupported.");
4704 return -EINVAL;
4705 }
4706 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
4707 PMD_DRV_LOG(ERR, "drop option is unsupported.");
4708 return -EINVAL;
4709 }
4710
4711 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4712 if (ret >= 0 && add) {
4713 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
4714 filter->ether_type);
4715 return -EEXIST;
4716 }
4717 if (ret < 0 && !add) {
4718 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4719 filter->ether_type);
4720 return -ENOENT;
4721 }
4722
4723 if (add) {
4724 etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE;
4725 etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE);
4726 etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT;
4727 ret = igb_ethertype_filter_insert(filter_info,
4728 filter->ether_type, etqf);
4729 if (ret < 0) {
4730 PMD_DRV_LOG(ERR, "ethertype filters are full.");
4731 return -ENOSYS;
4732 }
4733 } else {
4734 ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret);
4735 if (ret < 0)
4736 return -ENOSYS;
4737 }
4738 E1000_WRITE_REG(hw, E1000_ETQF(ret), etqf);
4739 E1000_WRITE_FLUSH(hw);
4740
4741 return 0;
4742 }
4743
4744 static int
4745 igb_get_ethertype_filter(struct rte_eth_dev *dev,
4746 struct rte_eth_ethertype_filter *filter)
4747 {
4748 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4749 struct e1000_filter_info *filter_info =
4750 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
4751 uint32_t etqf;
4752 int ret;
4753
4754 ret = igb_ethertype_filter_lookup(filter_info, filter->ether_type);
4755 if (ret < 0) {
4756 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
4757 filter->ether_type);
4758 return -ENOENT;
4759 }
4760
4761 etqf = E1000_READ_REG(hw, E1000_ETQF(ret));
4762 if (etqf & E1000_ETQF_FILTER_ENABLE) {
4763 filter->ether_type = etqf & E1000_ETQF_ETHERTYPE;
4764 filter->flags = 0;
4765 filter->queue = (etqf & E1000_ETQF_QUEUE) >>
4766 E1000_ETQF_QUEUE_SHIFT;
4767 return 0;
4768 }
4769
4770 return -ENOENT;
4771 }
4772
4773 /*
4774 * igb_ethertype_filter_handle - Handle operations for ethertype filter.
4775 * @dev: pointer to rte_eth_dev structure
4776 * @filter_op:operation will be taken.
4777 * @arg: a pointer to specific structure corresponding to the filter_op
4778 */
4779 static int
4780 igb_ethertype_filter_handle(struct rte_eth_dev *dev,
4781 enum rte_filter_op filter_op,
4782 void *arg)
4783 {
4784 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4785 int ret;
4786
4787 MAC_TYPE_FILTER_SUP(hw->mac.type);
4788
4789 if (filter_op == RTE_ETH_FILTER_NOP)
4790 return 0;
4791
4792 if (arg == NULL) {
4793 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
4794 filter_op);
4795 return -EINVAL;
4796 }
4797
4798 switch (filter_op) {
4799 case RTE_ETH_FILTER_ADD:
4800 ret = igb_add_del_ethertype_filter(dev,
4801 (struct rte_eth_ethertype_filter *)arg,
4802 TRUE);
4803 break;
4804 case RTE_ETH_FILTER_DELETE:
4805 ret = igb_add_del_ethertype_filter(dev,
4806 (struct rte_eth_ethertype_filter *)arg,
4807 FALSE);
4808 break;
4809 case RTE_ETH_FILTER_GET:
4810 ret = igb_get_ethertype_filter(dev,
4811 (struct rte_eth_ethertype_filter *)arg);
4812 break;
4813 default:
4814 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
4815 ret = -EINVAL;
4816 break;
4817 }
4818 return ret;
4819 }
4820
4821 static int
4822 eth_igb_filter_ctrl(struct rte_eth_dev *dev,
4823 enum rte_filter_type filter_type,
4824 enum rte_filter_op filter_op,
4825 void *arg)
4826 {
4827 int ret = 0;
4828
4829 switch (filter_type) {
4830 case RTE_ETH_FILTER_NTUPLE:
4831 ret = igb_ntuple_filter_handle(dev, filter_op, arg);
4832 break;
4833 case RTE_ETH_FILTER_ETHERTYPE:
4834 ret = igb_ethertype_filter_handle(dev, filter_op, arg);
4835 break;
4836 case RTE_ETH_FILTER_SYN:
4837 ret = eth_igb_syn_filter_handle(dev, filter_op, arg);
4838 break;
4839 case RTE_ETH_FILTER_FLEXIBLE:
4840 ret = eth_igb_flex_filter_handle(dev, filter_op, arg);
4841 break;
4842 case RTE_ETH_FILTER_GENERIC:
4843 if (filter_op != RTE_ETH_FILTER_GET)
4844 return -EINVAL;
4845 *(const void **)arg = &igb_flow_ops;
4846 break;
4847 default:
4848 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4849 filter_type);
4850 break;
4851 }
4852
4853 return ret;
4854 }
4855
4856 static int
4857 eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
4858 struct ether_addr *mc_addr_set,
4859 uint32_t nb_mc_addr)
4860 {
4861 struct e1000_hw *hw;
4862
4863 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4864 e1000_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
4865 return 0;
4866 }
4867
4868 static uint64_t
4869 igb_read_systime_cyclecounter(struct rte_eth_dev *dev)
4870 {
4871 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4872 uint64_t systime_cycles;
4873
4874 switch (hw->mac.type) {
4875 case e1000_i210:
4876 case e1000_i211:
4877 /*
4878 * Need to read System Time Residue Register to be able
4879 * to read the other two registers.
4880 */
4881 E1000_READ_REG(hw, E1000_SYSTIMR);
4882 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
4883 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4884 systime_cycles += (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4885 * NSEC_PER_SEC;
4886 break;
4887 case e1000_82580:
4888 case e1000_i350:
4889 case e1000_i354:
4890 /*
4891 * Need to read System Time Residue Register to be able
4892 * to read the other two registers.
4893 */
4894 E1000_READ_REG(hw, E1000_SYSTIMR);
4895 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4896 /* Only the 8 LSB are valid. */
4897 systime_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_SYSTIMH)
4898 & 0xff) << 32;
4899 break;
4900 default:
4901 systime_cycles = (uint64_t)E1000_READ_REG(hw, E1000_SYSTIML);
4902 systime_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_SYSTIMH)
4903 << 32;
4904 break;
4905 }
4906
4907 return systime_cycles;
4908 }
4909
4910 static uint64_t
4911 igb_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4912 {
4913 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4914 uint64_t rx_tstamp_cycles;
4915
4916 switch (hw->mac.type) {
4917 case e1000_i210:
4918 case e1000_i211:
4919 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4920 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4921 rx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4922 * NSEC_PER_SEC;
4923 break;
4924 case e1000_82580:
4925 case e1000_i350:
4926 case e1000_i354:
4927 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4928 /* Only the 8 LSB are valid. */
4929 rx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_RXSTMPH)
4930 & 0xff) << 32;
4931 break;
4932 default:
4933 rx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPL);
4934 rx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_RXSTMPH)
4935 << 32;
4936 break;
4937 }
4938
4939 return rx_tstamp_cycles;
4940 }
4941
4942 static uint64_t
4943 igb_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
4944 {
4945 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4946 uint64_t tx_tstamp_cycles;
4947
4948 switch (hw->mac.type) {
4949 case e1000_i210:
4950 case e1000_i211:
4951 /* RXSTMPL stores ns and RXSTMPH stores seconds. */
4952 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4953 tx_tstamp_cycles += (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4954 * NSEC_PER_SEC;
4955 break;
4956 case e1000_82580:
4957 case e1000_i350:
4958 case e1000_i354:
4959 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4960 /* Only the 8 LSB are valid. */
4961 tx_tstamp_cycles |= (uint64_t)(E1000_READ_REG(hw, E1000_TXSTMPH)
4962 & 0xff) << 32;
4963 break;
4964 default:
4965 tx_tstamp_cycles = (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPL);
4966 tx_tstamp_cycles |= (uint64_t)E1000_READ_REG(hw, E1000_TXSTMPH)
4967 << 32;
4968 break;
4969 }
4970
4971 return tx_tstamp_cycles;
4972 }
4973
4974 static void
4975 igb_start_timecounters(struct rte_eth_dev *dev)
4976 {
4977 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4978 struct e1000_adapter *adapter =
4979 (struct e1000_adapter *)dev->data->dev_private;
4980 uint32_t incval = 1;
4981 uint32_t shift = 0;
4982 uint64_t mask = E1000_CYCLECOUNTER_MASK;
4983
4984 switch (hw->mac.type) {
4985 case e1000_82580:
4986 case e1000_i350:
4987 case e1000_i354:
4988 /* 32 LSB bits + 8 MSB bits = 40 bits */
4989 mask = (1ULL << 40) - 1;
4990 /* fall-through */
4991 case e1000_i210:
4992 case e1000_i211:
4993 /*
4994 * Start incrementing the register
4995 * used to timestamp PTP packets.
4996 */
4997 E1000_WRITE_REG(hw, E1000_TIMINCA, incval);
4998 break;
4999 case e1000_82576:
5000 incval = E1000_INCVALUE_82576;
5001 shift = IGB_82576_TSYNC_SHIFT;
5002 E1000_WRITE_REG(hw, E1000_TIMINCA,
5003 E1000_INCPERIOD_82576 | incval);
5004 break;
5005 default:
5006 /* Not supported */
5007 return;
5008 }
5009
5010 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
5011 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5012 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
5013
5014 adapter->systime_tc.cc_mask = mask;
5015 adapter->systime_tc.cc_shift = shift;
5016 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
5017
5018 adapter->rx_tstamp_tc.cc_mask = mask;
5019 adapter->rx_tstamp_tc.cc_shift = shift;
5020 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5021
5022 adapter->tx_tstamp_tc.cc_mask = mask;
5023 adapter->tx_tstamp_tc.cc_shift = shift;
5024 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
5025 }
5026
5027 static int
5028 igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
5029 {
5030 struct e1000_adapter *adapter =
5031 (struct e1000_adapter *)dev->data->dev_private;
5032
5033 adapter->systime_tc.nsec += delta;
5034 adapter->rx_tstamp_tc.nsec += delta;
5035 adapter->tx_tstamp_tc.nsec += delta;
5036
5037 return 0;
5038 }
5039
5040 static int
5041 igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
5042 {
5043 uint64_t ns;
5044 struct e1000_adapter *adapter =
5045 (struct e1000_adapter *)dev->data->dev_private;
5046
5047 ns = rte_timespec_to_ns(ts);
5048
5049 /* Set the timecounters to a new value. */
5050 adapter->systime_tc.nsec = ns;
5051 adapter->rx_tstamp_tc.nsec = ns;
5052 adapter->tx_tstamp_tc.nsec = ns;
5053
5054 return 0;
5055 }
5056
5057 static int
5058 igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
5059 {
5060 uint64_t ns, systime_cycles;
5061 struct e1000_adapter *adapter =
5062 (struct e1000_adapter *)dev->data->dev_private;
5063
5064 systime_cycles = igb_read_systime_cyclecounter(dev);
5065 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
5066 *ts = rte_ns_to_timespec(ns);
5067
5068 return 0;
5069 }
5070
5071 static int
5072 igb_timesync_enable(struct rte_eth_dev *dev)
5073 {
5074 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5075 uint32_t tsync_ctl;
5076 uint32_t tsauxc;
5077
5078 /* Stop the timesync system time. */
5079 E1000_WRITE_REG(hw, E1000_TIMINCA, 0x0);
5080 /* Reset the timesync system time value. */
5081 switch (hw->mac.type) {
5082 case e1000_82580:
5083 case e1000_i350:
5084 case e1000_i354:
5085 case e1000_i210:
5086 case e1000_i211:
5087 E1000_WRITE_REG(hw, E1000_SYSTIMR, 0x0);
5088 /* fall-through */
5089 case e1000_82576:
5090 E1000_WRITE_REG(hw, E1000_SYSTIML, 0x0);
5091 E1000_WRITE_REG(hw, E1000_SYSTIMH, 0x0);
5092 break;
5093 default:
5094 /* Not supported. */
5095 return -ENOTSUP;
5096 }
5097
5098 /* Enable system time for it isn't on by default. */
5099 tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
5100 tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
5101 E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
5102
5103 igb_start_timecounters(dev);
5104
5105 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5106 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
5107 (ETHER_TYPE_1588 |
5108 E1000_ETQF_FILTER_ENABLE |
5109 E1000_ETQF_1588));
5110
5111 /* Enable timestamping of received PTP packets. */
5112 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
5113 tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
5114 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
5115
5116 /* Enable Timestamping of transmitted PTP packets. */
5117 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
5118 tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
5119 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
5120
5121 return 0;
5122 }
5123
5124 static int
5125 igb_timesync_disable(struct rte_eth_dev *dev)
5126 {
5127 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5128 uint32_t tsync_ctl;
5129
5130 /* Disable timestamping of transmitted PTP packets. */
5131 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
5132 tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
5133 E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
5134
5135 /* Disable timestamping of received PTP packets. */
5136 tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
5137 tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
5138 E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
5139
5140 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
5141 E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
5142
5143 /* Stop incrementating the System Time registers. */
5144 E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
5145
5146 return 0;
5147 }
5148
5149 static int
5150 igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
5151 struct timespec *timestamp,
5152 uint32_t flags __rte_unused)
5153 {
5154 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5155 struct e1000_adapter *adapter =
5156 (struct e1000_adapter *)dev->data->dev_private;
5157 uint32_t tsync_rxctl;
5158 uint64_t rx_tstamp_cycles;
5159 uint64_t ns;
5160
5161 tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
5162 if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
5163 return -EINVAL;
5164
5165 rx_tstamp_cycles = igb_read_rx_tstamp_cyclecounter(dev);
5166 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
5167 *timestamp = rte_ns_to_timespec(ns);
5168
5169 return 0;
5170 }
5171
5172 static int
5173 igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
5174 struct timespec *timestamp)
5175 {
5176 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5177 struct e1000_adapter *adapter =
5178 (struct e1000_adapter *)dev->data->dev_private;
5179 uint32_t tsync_txctl;
5180 uint64_t tx_tstamp_cycles;
5181 uint64_t ns;
5182
5183 tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
5184 if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
5185 return -EINVAL;
5186
5187 tx_tstamp_cycles = igb_read_tx_tstamp_cyclecounter(dev);
5188 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
5189 *timestamp = rte_ns_to_timespec(ns);
5190
5191 return 0;
5192 }
5193
5194 static int
5195 eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5196 {
5197 int count = 0;
5198 int g_ind = 0;
5199 const struct reg_info *reg_group;
5200
5201 while ((reg_group = igb_regs[g_ind++]))
5202 count += igb_reg_group_count(reg_group);
5203
5204 return count;
5205 }
5206
5207 static int
5208 igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
5209 {
5210 int count = 0;
5211 int g_ind = 0;
5212 const struct reg_info *reg_group;
5213
5214 while ((reg_group = igbvf_regs[g_ind++]))
5215 count += igb_reg_group_count(reg_group);
5216
5217 return count;
5218 }
5219
5220 static int
5221 eth_igb_get_regs(struct rte_eth_dev *dev,
5222 struct rte_dev_reg_info *regs)
5223 {
5224 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5225 uint32_t *data = regs->data;
5226 int g_ind = 0;
5227 int count = 0;
5228 const struct reg_info *reg_group;
5229
5230 if (data == NULL) {
5231 regs->length = eth_igb_get_reg_length(dev);
5232 regs->width = sizeof(uint32_t);
5233 return 0;
5234 }
5235
5236 /* Support only full register dump */
5237 if ((regs->length == 0) ||
5238 (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
5239 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5240 hw->device_id;
5241 while ((reg_group = igb_regs[g_ind++]))
5242 count += igb_read_regs_group(dev, &data[count],
5243 reg_group);
5244 return 0;
5245 }
5246
5247 return -ENOTSUP;
5248 }
5249
5250 static int
5251 igbvf_get_regs(struct rte_eth_dev *dev,
5252 struct rte_dev_reg_info *regs)
5253 {
5254 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5255 uint32_t *data = regs->data;
5256 int g_ind = 0;
5257 int count = 0;
5258 const struct reg_info *reg_group;
5259
5260 if (data == NULL) {
5261 regs->length = igbvf_get_reg_length(dev);
5262 regs->width = sizeof(uint32_t);
5263 return 0;
5264 }
5265
5266 /* Support only full register dump */
5267 if ((regs->length == 0) ||
5268 (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
5269 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
5270 hw->device_id;
5271 while ((reg_group = igbvf_regs[g_ind++]))
5272 count += igb_read_regs_group(dev, &data[count],
5273 reg_group);
5274 return 0;
5275 }
5276
5277 return -ENOTSUP;
5278 }
5279
5280 static int
5281 eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
5282 {
5283 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5284
5285 /* Return unit is byte count */
5286 return hw->nvm.word_size * 2;
5287 }
5288
5289 static int
5290 eth_igb_get_eeprom(struct rte_eth_dev *dev,
5291 struct rte_dev_eeprom_info *in_eeprom)
5292 {
5293 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5294 struct e1000_nvm_info *nvm = &hw->nvm;
5295 uint16_t *data = in_eeprom->data;
5296 int first, length;
5297
5298 first = in_eeprom->offset >> 1;
5299 length = in_eeprom->length >> 1;
5300 if ((first >= hw->nvm.word_size) ||
5301 ((first + length) >= hw->nvm.word_size))
5302 return -EINVAL;
5303
5304 in_eeprom->magic = hw->vendor_id |
5305 ((uint32_t)hw->device_id << 16);
5306
5307 if ((nvm->ops.read) == NULL)
5308 return -ENOTSUP;
5309
5310 return nvm->ops.read(hw, first, length, data);
5311 }
5312
5313 static int
5314 eth_igb_set_eeprom(struct rte_eth_dev *dev,
5315 struct rte_dev_eeprom_info *in_eeprom)
5316 {
5317 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5318 struct e1000_nvm_info *nvm = &hw->nvm;
5319 uint16_t *data = in_eeprom->data;
5320 int first, length;
5321
5322 first = in_eeprom->offset >> 1;
5323 length = in_eeprom->length >> 1;
5324 if ((first >= hw->nvm.word_size) ||
5325 ((first + length) >= hw->nvm.word_size))
5326 return -EINVAL;
5327
5328 in_eeprom->magic = (uint32_t)hw->vendor_id |
5329 ((uint32_t)hw->device_id << 16);
5330
5331 if ((nvm->ops.write) == NULL)
5332 return -ENOTSUP;
5333 return nvm->ops.write(hw, first, length, data);
5334 }
5335
5336 static int
5337 eth_igb_get_module_info(struct rte_eth_dev *dev,
5338 struct rte_eth_dev_module_info *modinfo)
5339 {
5340 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5341
5342 uint32_t status = 0;
5343 uint16_t sff8472_rev, addr_mode;
5344 bool page_swap = false;
5345
5346 if (hw->phy.media_type == e1000_media_type_copper ||
5347 hw->phy.media_type == e1000_media_type_unknown)
5348 return -EOPNOTSUPP;
5349
5350 /* Check whether we support SFF-8472 or not */
5351 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
5352 if (status)
5353 return -EIO;
5354
5355 /* addressing mode is not supported */
5356 status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
5357 if (status)
5358 return -EIO;
5359
5360 /* addressing mode is not supported */
5361 if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
5362 PMD_DRV_LOG(ERR,
5363 "Address change required to access page 0xA2, "
5364 "but not supported. Please report the module "
5365 "type to the driver maintainers.\n");
5366 page_swap = true;
5367 }
5368
5369 if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
5370 /* We have an SFP, but it does not support SFF-8472 */
5371 modinfo->type = RTE_ETH_MODULE_SFF_8079;
5372 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
5373 } else {
5374 /* We have an SFP which supports a revision of SFF-8472 */
5375 modinfo->type = RTE_ETH_MODULE_SFF_8472;
5376 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
5377 }
5378
5379 return 0;
5380 }
5381
5382 static int
5383 eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
5384 struct rte_dev_eeprom_info *info)
5385 {
5386 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5387
5388 uint32_t status = 0;
5389 uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
5390 u16 first_word, last_word;
5391 int i = 0;
5392
5393 if (info->length == 0)
5394 return -EINVAL;
5395
5396 first_word = info->offset >> 1;
5397 last_word = (info->offset + info->length - 1) >> 1;
5398
5399 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
5400 for (i = 0; i < last_word - first_word + 1; i++) {
5401 status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
5402 &dataword[i]);
5403 if (status) {
5404 /* Error occurred while reading module */
5405 return -EIO;
5406 }
5407
5408 dataword[i] = rte_be_to_cpu_16(dataword[i]);
5409 }
5410
5411 memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
5412
5413 return 0;
5414 }
5415
5416 static int
5417 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
5418 {
5419 struct e1000_hw *hw =
5420 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5421 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5422 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5423 uint32_t vec = E1000_MISC_VEC_ID;
5424
5425 if (rte_intr_allow_others(intr_handle))
5426 vec = E1000_RX_VEC_START;
5427
5428 uint32_t mask = 1 << (queue_id + vec);
5429
5430 E1000_WRITE_REG(hw, E1000_EIMC, mask);
5431 E1000_WRITE_FLUSH(hw);
5432
5433 return 0;
5434 }
5435
5436 static int
5437 eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
5438 {
5439 struct e1000_hw *hw =
5440 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5441 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5442 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5443 uint32_t vec = E1000_MISC_VEC_ID;
5444
5445 if (rte_intr_allow_others(intr_handle))
5446 vec = E1000_RX_VEC_START;
5447
5448 uint32_t mask = 1 << (queue_id + vec);
5449 uint32_t regval;
5450
5451 regval = E1000_READ_REG(hw, E1000_EIMS);
5452 E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
5453 E1000_WRITE_FLUSH(hw);
5454
5455 rte_intr_enable(intr_handle);
5456
5457 return 0;
5458 }
5459
5460 static void
5461 eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
5462 uint8_t index, uint8_t offset)
5463 {
5464 uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
5465
5466 /* clear bits */
5467 val &= ~((uint32_t)0xFF << offset);
5468
5469 /* write vector and valid bit */
5470 val |= (msix_vector | E1000_IVAR_VALID) << offset;
5471
5472 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
5473 }
5474
5475 static void
5476 eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
5477 uint8_t queue, uint8_t msix_vector)
5478 {
5479 uint32_t tmp = 0;
5480
5481 if (hw->mac.type == e1000_82575) {
5482 if (direction == 0)
5483 tmp = E1000_EICR_RX_QUEUE0 << queue;
5484 else if (direction == 1)
5485 tmp = E1000_EICR_TX_QUEUE0 << queue;
5486 E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
5487 } else if (hw->mac.type == e1000_82576) {
5488 if ((direction == 0) || (direction == 1))
5489 eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
5490 ((queue & 0x8) << 1) +
5491 8 * direction);
5492 } else if ((hw->mac.type == e1000_82580) ||
5493 (hw->mac.type == e1000_i350) ||
5494 (hw->mac.type == e1000_i354) ||
5495 (hw->mac.type == e1000_i210) ||
5496 (hw->mac.type == e1000_i211)) {
5497 if ((direction == 0) || (direction == 1))
5498 eth_igb_write_ivar(hw, msix_vector,
5499 queue >> 1,
5500 ((queue & 0x1) << 4) +
5501 8 * direction);
5502 }
5503 }
5504
5505 /* Sets up the hardware to generate MSI-X interrupts properly
5506 * @hw
5507 * board private structure
5508 */
5509 static void
5510 eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
5511 {
5512 int queue_id;
5513 uint32_t tmpval, regval, intr_mask;
5514 struct e1000_hw *hw =
5515 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5516 uint32_t vec = E1000_MISC_VEC_ID;
5517 uint32_t base = E1000_MISC_VEC_ID;
5518 uint32_t misc_shift = 0;
5519 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
5520 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5521
5522 /* won't configure msix register if no mapping is done
5523 * between intr vector and event fd
5524 */
5525 if (!rte_intr_dp_is_en(intr_handle))
5526 return;
5527
5528 if (rte_intr_allow_others(intr_handle)) {
5529 vec = base = E1000_RX_VEC_START;
5530 misc_shift = 1;
5531 }
5532
5533 /* set interrupt vector for other causes */
5534 if (hw->mac.type == e1000_82575) {
5535 tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
5536 /* enable MSI-X PBA support */
5537 tmpval |= E1000_CTRL_EXT_PBA_CLR;
5538
5539 /* Auto-Mask interrupts upon ICR read */
5540 tmpval |= E1000_CTRL_EXT_EIAME;
5541 tmpval |= E1000_CTRL_EXT_IRCA;
5542
5543 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
5544
5545 /* enable msix_other interrupt */
5546 E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
5547 regval = E1000_READ_REG(hw, E1000_EIAC);
5548 E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
5549 regval = E1000_READ_REG(hw, E1000_EIAM);
5550 E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
5551 } else if ((hw->mac.type == e1000_82576) ||
5552 (hw->mac.type == e1000_82580) ||
5553 (hw->mac.type == e1000_i350) ||
5554 (hw->mac.type == e1000_i354) ||
5555 (hw->mac.type == e1000_i210) ||
5556 (hw->mac.type == e1000_i211)) {
5557 /* turn on MSI-X capability first */
5558 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
5559 E1000_GPIE_PBA | E1000_GPIE_EIAME |
5560 E1000_GPIE_NSICR);
5561 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5562 misc_shift;
5563 regval = E1000_READ_REG(hw, E1000_EIAC);
5564 E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
5565
5566 /* enable msix_other interrupt */
5567 regval = E1000_READ_REG(hw, E1000_EIMS);
5568 E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
5569 tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
5570 E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
5571 }
5572
5573 /* use EIAM to auto-mask when MSI-X interrupt
5574 * is asserted, this saves a register write for every interrupt
5575 */
5576 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
5577 misc_shift;
5578 regval = E1000_READ_REG(hw, E1000_EIAM);
5579 E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
5580
5581 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
5582 eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
5583 intr_handle->intr_vec[queue_id] = vec;
5584 if (vec < base + intr_handle->nb_efd - 1)
5585 vec++;
5586 }
5587
5588 E1000_WRITE_FLUSH(hw);
5589 }
5590
5591 /* restore n-tuple filter */
5592 static inline void
5593 igb_ntuple_filter_restore(struct rte_eth_dev *dev)
5594 {
5595 struct e1000_filter_info *filter_info =
5596 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5597 struct e1000_5tuple_filter *p_5tuple;
5598 struct e1000_2tuple_filter *p_2tuple;
5599
5600 TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) {
5601 igb_inject_5tuple_filter_82576(dev, p_5tuple);
5602 }
5603
5604 TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) {
5605 igb_inject_2uple_filter(dev, p_2tuple);
5606 }
5607 }
5608
5609 /* restore SYN filter */
5610 static inline void
5611 igb_syn_filter_restore(struct rte_eth_dev *dev)
5612 {
5613 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5614 struct e1000_filter_info *filter_info =
5615 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5616 uint32_t synqf;
5617
5618 synqf = filter_info->syn_info;
5619
5620 if (synqf & E1000_SYN_FILTER_ENABLE) {
5621 E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf);
5622 E1000_WRITE_FLUSH(hw);
5623 }
5624 }
5625
5626 /* restore ethernet type filter */
5627 static inline void
5628 igb_ethertype_filter_restore(struct rte_eth_dev *dev)
5629 {
5630 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5631 struct e1000_filter_info *filter_info =
5632 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5633 int i;
5634
5635 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
5636 if (filter_info->ethertype_mask & (1 << i)) {
5637 E1000_WRITE_REG(hw, E1000_ETQF(i),
5638 filter_info->ethertype_filters[i].etqf);
5639 E1000_WRITE_FLUSH(hw);
5640 }
5641 }
5642 }
5643
5644 /* restore flex byte filter */
5645 static inline void
5646 igb_flex_filter_restore(struct rte_eth_dev *dev)
5647 {
5648 struct e1000_filter_info *filter_info =
5649 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5650 struct e1000_flex_filter *flex_filter;
5651
5652 TAILQ_FOREACH(flex_filter, &filter_info->flex_list, entries) {
5653 igb_inject_flex_filter(dev, flex_filter);
5654 }
5655 }
5656
5657 /* restore rss filter */
5658 static inline void
5659 igb_rss_filter_restore(struct rte_eth_dev *dev)
5660 {
5661 struct e1000_filter_info *filter_info =
5662 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
5663
5664 if (filter_info->rss_info.conf.queue_num)
5665 igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
5666 }
5667
5668 /* restore all types filter */
5669 static int
5670 igb_filter_restore(struct rte_eth_dev *dev)
5671 {
5672 igb_ntuple_filter_restore(dev);
5673 igb_ethertype_filter_restore(dev);
5674 igb_syn_filter_restore(dev);
5675 igb_flex_filter_restore(dev);
5676 igb_rss_filter_restore(dev);
5677
5678 return 0;
5679 }
5680
5681 RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
5682 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
5683 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci");
5684 RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
5685 RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
5686 RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
5687
5688 /* see e1000_logs.c */
5689 RTE_INIT(e1000_init_log)
5690 {
5691 e1000_igb_init_log();
5692 }