]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2017 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #include <sys/queue.h> | |
6 | #include <stdio.h> | |
7 | #include <errno.h> | |
8 | #include <stdint.h> | |
9 | #include <string.h> | |
10 | #include <unistd.h> | |
11 | #include <stdarg.h> | |
12 | #include <inttypes.h> | |
13 | #include <netinet/in.h> | |
9f95a23c | 14 | #include <rte_string_fns.h> |
7c673cae FG |
15 | #include <rte_byteorder.h> |
16 | #include <rte_common.h> | |
17 | #include <rte_cycles.h> | |
18 | ||
19 | #include <rte_interrupts.h> | |
20 | #include <rte_log.h> | |
21 | #include <rte_debug.h> | |
22 | #include <rte_pci.h> | |
11fdf7f2 | 23 | #include <rte_bus_pci.h> |
7c673cae FG |
24 | #include <rte_branch_prediction.h> |
25 | #include <rte_memory.h> | |
7c673cae FG |
26 | #include <rte_eal.h> |
27 | #include <rte_alarm.h> | |
28 | #include <rte_ether.h> | |
11fdf7f2 TL |
29 | #include <rte_ethdev_driver.h> |
30 | #include <rte_ethdev_pci.h> | |
7c673cae FG |
31 | #include <rte_malloc.h> |
32 | #include <rte_random.h> | |
33 | #include <rte_dev.h> | |
11fdf7f2 TL |
34 | #include <rte_hash_crc.h> |
35 | #ifdef RTE_LIBRTE_SECURITY | |
36 | #include <rte_security_driver.h> | |
37 | #endif | |
7c673cae FG |
38 | |
39 | #include "ixgbe_logs.h" | |
40 | #include "base/ixgbe_api.h" | |
41 | #include "base/ixgbe_vf.h" | |
42 | #include "base/ixgbe_common.h" | |
43 | #include "ixgbe_ethdev.h" | |
44 | #include "ixgbe_bypass.h" | |
45 | #include "ixgbe_rxtx.h" | |
46 | #include "base/ixgbe_type.h" | |
47 | #include "base/ixgbe_phy.h" | |
48 | #include "ixgbe_regs.h" | |
49 | ||
7c673cae FG |
50 | /* |
51 | * High threshold controlling when to start sending XOFF frames. Must be at | |
52 | * least 8 bytes less than receive packet buffer size. This value is in units | |
53 | * of 1024 bytes. | |
54 | */ | |
55 | #define IXGBE_FC_HI 0x80 | |
56 | ||
57 | /* | |
58 | * Low threshold controlling when to start sending XON frames. This value is | |
59 | * in units of 1024 bytes. | |
60 | */ | |
61 | #define IXGBE_FC_LO 0x40 | |
62 | ||
7c673cae FG |
63 | /* Timer value included in XOFF frames. */ |
64 | #define IXGBE_FC_PAUSE 0x680 | |
65 | ||
11fdf7f2 TL |
66 | /*Default value of Max Rx Queue*/ |
67 | #define IXGBE_MAX_RX_QUEUE_NUM 128 | |
68 | ||
7c673cae FG |
69 | #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ |
70 | #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ | |
71 | #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ | |
72 | ||
73 | #define IXGBE_MMW_SIZE_DEFAULT 0x4 | |
74 | #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 | |
75 | #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ | |
76 | ||
77 | /* | |
78 | * Default values for RX/TX configuration | |
79 | */ | |
80 | #define IXGBE_DEFAULT_RX_FREE_THRESH 32 | |
81 | #define IXGBE_DEFAULT_RX_PTHRESH 8 | |
82 | #define IXGBE_DEFAULT_RX_HTHRESH 8 | |
83 | #define IXGBE_DEFAULT_RX_WTHRESH 0 | |
84 | ||
85 | #define IXGBE_DEFAULT_TX_FREE_THRESH 32 | |
86 | #define IXGBE_DEFAULT_TX_PTHRESH 32 | |
87 | #define IXGBE_DEFAULT_TX_HTHRESH 0 | |
88 | #define IXGBE_DEFAULT_TX_WTHRESH 0 | |
89 | #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 | |
90 | ||
91 | /* Bit shift and mask */ | |
92 | #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) | |
93 | #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) | |
94 | #define IXGBE_8_BIT_WIDTH CHAR_BIT | |
95 | #define IXGBE_8_BIT_MASK UINT8_MAX | |
96 | ||
97 | #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ | |
98 | ||
99 | #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) | |
100 | ||
7c673cae FG |
101 | /* Additional timesync values. */ |
102 | #define NSEC_PER_SEC 1000000000L | |
103 | #define IXGBE_INCVAL_10GB 0x66666666 | |
104 | #define IXGBE_INCVAL_1GB 0x40000000 | |
105 | #define IXGBE_INCVAL_100 0x50000000 | |
106 | #define IXGBE_INCVAL_SHIFT_10GB 28 | |
107 | #define IXGBE_INCVAL_SHIFT_1GB 24 | |
108 | #define IXGBE_INCVAL_SHIFT_100 21 | |
109 | #define IXGBE_INCVAL_SHIFT_82599 7 | |
110 | #define IXGBE_INCPER_SHIFT_82599 24 | |
111 | ||
112 | #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL | |
113 | ||
114 | #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 | |
115 | #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 | |
7c673cae FG |
116 | #define IXGBE_ETAG_ETYPE 0x00005084 |
117 | #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff | |
118 | #define IXGBE_ETAG_ETYPE_VALID 0x80000000 | |
119 | #define IXGBE_RAH_ADTYPE 0x40000000 | |
120 | #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff | |
121 | #define IXGBE_VMVIR_TAGA_MASK 0x18000000 | |
122 | #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 | |
123 | #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ | |
124 | #define IXGBE_QDE_STRIP_TAG 0x00000004 | |
125 | #define IXGBE_VTEICR_MASK 0x07 | |
126 | ||
7c673cae FG |
127 | #define IXGBE_EXVET_VET_EXT_SHIFT 16 |
128 | #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 | |
129 | ||
11fdf7f2 | 130 | static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); |
7c673cae | 131 | static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); |
11fdf7f2 TL |
132 | static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); |
133 | static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); | |
134 | static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); | |
135 | static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); | |
136 | static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); | |
7c673cae FG |
137 | static int ixgbe_dev_configure(struct rte_eth_dev *dev); |
138 | static int ixgbe_dev_start(struct rte_eth_dev *dev); | |
139 | static void ixgbe_dev_stop(struct rte_eth_dev *dev); | |
140 | static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); | |
141 | static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); | |
142 | static void ixgbe_dev_close(struct rte_eth_dev *dev); | |
11fdf7f2 | 143 | static int ixgbe_dev_reset(struct rte_eth_dev *dev); |
7c673cae FG |
144 | static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); |
145 | static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); | |
146 | static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); | |
147 | static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); | |
148 | static int ixgbe_dev_link_update(struct rte_eth_dev *dev, | |
149 | int wait_to_complete); | |
11fdf7f2 | 150 | static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, |
7c673cae FG |
151 | struct rte_eth_stats *stats); |
152 | static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, | |
153 | struct rte_eth_xstat *xstats, unsigned n); | |
154 | static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, | |
155 | struct rte_eth_xstat *xstats, unsigned n); | |
11fdf7f2 TL |
156 | static int |
157 | ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, | |
158 | uint64_t *values, unsigned int n); | |
7c673cae FG |
159 | static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); |
160 | static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); | |
11fdf7f2 TL |
161 | static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, |
162 | struct rte_eth_xstat_name *xstats_names, | |
163 | unsigned int size); | |
164 | static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, | |
165 | struct rte_eth_xstat_name *xstats_names, unsigned limit); | |
166 | static int ixgbe_dev_xstats_get_names_by_id( | |
167 | struct rte_eth_dev *dev, | |
168 | struct rte_eth_xstat_name *xstats_names, | |
169 | const uint64_t *ids, | |
170 | unsigned int limit); | |
7c673cae FG |
171 | static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, |
172 | uint16_t queue_id, | |
173 | uint8_t stat_idx, | |
174 | uint8_t is_rx); | |
11fdf7f2 TL |
175 | static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, |
176 | size_t fw_size); | |
7c673cae FG |
177 | static void ixgbe_dev_info_get(struct rte_eth_dev *dev, |
178 | struct rte_eth_dev_info *dev_info); | |
179 | static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); | |
180 | static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, | |
181 | struct rte_eth_dev_info *dev_info); | |
182 | static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); | |
183 | ||
184 | static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, | |
185 | uint16_t vlan_id, int on); | |
186 | static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, | |
187 | enum rte_vlan_type vlan_type, | |
188 | uint16_t tpid_id); | |
189 | static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, | |
190 | uint16_t queue, bool on); | |
191 | static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, | |
192 | int on); | |
11fdf7f2 TL |
193 | static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, |
194 | int mask); | |
195 | static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); | |
196 | static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); | |
7c673cae FG |
197 | static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); |
198 | static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); | |
199 | static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); | |
200 | static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); | |
201 | ||
202 | static int ixgbe_dev_led_on(struct rte_eth_dev *dev); | |
203 | static int ixgbe_dev_led_off(struct rte_eth_dev *dev); | |
204 | static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, | |
205 | struct rte_eth_fc_conf *fc_conf); | |
206 | static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, | |
207 | struct rte_eth_fc_conf *fc_conf); | |
208 | static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, | |
209 | struct rte_eth_pfc_conf *pfc_conf); | |
210 | static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, | |
211 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
212 | uint16_t reta_size); | |
213 | static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, | |
214 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
215 | uint16_t reta_size); | |
216 | static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); | |
11fdf7f2 TL |
217 | static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); |
218 | static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); | |
7c673cae FG |
219 | static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); |
220 | static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); | |
9f95a23c | 221 | static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); |
11fdf7f2 | 222 | static void ixgbe_dev_interrupt_handler(void *param); |
7c673cae | 223 | static void ixgbe_dev_interrupt_delayed_handler(void *param); |
9f95a23c TL |
224 | static void ixgbe_dev_setup_link_alarm_handler(void *param); |
225 | ||
11fdf7f2 TL |
226 | static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, |
227 | uint32_t index, uint32_t pool); | |
7c673cae | 228 | static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); |
11fdf7f2 | 229 | static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, |
7c673cae FG |
230 | struct ether_addr *mac_addr); |
231 | static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); | |
11fdf7f2 TL |
232 | static bool is_device_supported(struct rte_eth_dev *dev, |
233 | struct rte_pci_driver *drv); | |
7c673cae FG |
234 | |
235 | /* For Virtual Function support */ | |
236 | static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); | |
237 | static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); | |
238 | static int ixgbevf_dev_configure(struct rte_eth_dev *dev); | |
239 | static int ixgbevf_dev_start(struct rte_eth_dev *dev); | |
11fdf7f2 TL |
240 | static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, |
241 | int wait_to_complete); | |
7c673cae FG |
242 | static void ixgbevf_dev_stop(struct rte_eth_dev *dev); |
243 | static void ixgbevf_dev_close(struct rte_eth_dev *dev); | |
11fdf7f2 TL |
244 | static int ixgbevf_dev_reset(struct rte_eth_dev *dev); |
245 | static void ixgbevf_intr_disable(struct rte_eth_dev *dev); | |
246 | static void ixgbevf_intr_enable(struct rte_eth_dev *dev); | |
247 | static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, | |
7c673cae FG |
248 | struct rte_eth_stats *stats); |
249 | static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); | |
250 | static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, | |
251 | uint16_t vlan_id, int on); | |
252 | static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, | |
253 | uint16_t queue, int on); | |
11fdf7f2 TL |
254 | static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); |
255 | static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); | |
7c673cae FG |
256 | static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); |
257 | static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, | |
258 | uint16_t queue_id); | |
259 | static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, | |
260 | uint16_t queue_id); | |
261 | static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, | |
262 | uint8_t queue, uint8_t msix_vector); | |
263 | static void ixgbevf_configure_msix(struct rte_eth_dev *dev); | |
9f95a23c TL |
264 | static void ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); |
265 | static void ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); | |
7c673cae FG |
266 | static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); |
267 | static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); | |
268 | ||
269 | /* For Eth VMDQ APIs support */ | |
270 | static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct | |
271 | ether_addr * mac_addr, uint8_t on); | |
272 | static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); | |
7c673cae FG |
273 | static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, |
274 | struct rte_eth_mirror_conf *mirror_conf, | |
275 | uint8_t rule_id, uint8_t on); | |
276 | static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, | |
277 | uint8_t rule_id); | |
278 | static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, | |
279 | uint16_t queue_id); | |
280 | static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, | |
281 | uint16_t queue_id); | |
282 | static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, | |
283 | uint8_t queue, uint8_t msix_vector); | |
284 | static void ixgbe_configure_msix(struct rte_eth_dev *dev); | |
285 | ||
11fdf7f2 TL |
286 | static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, |
287 | struct ether_addr *mac_addr, | |
288 | uint32_t index, uint32_t pool); | |
7c673cae | 289 | static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); |
11fdf7f2 | 290 | static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, |
7c673cae | 291 | struct ether_addr *mac_addr); |
7c673cae FG |
292 | static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, |
293 | struct rte_eth_syn_filter *filter); | |
294 | static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, | |
295 | enum rte_filter_op filter_op, | |
296 | void *arg); | |
297 | static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, | |
298 | struct ixgbe_5tuple_filter *filter); | |
299 | static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, | |
300 | struct ixgbe_5tuple_filter *filter); | |
7c673cae FG |
301 | static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, |
302 | enum rte_filter_op filter_op, | |
303 | void *arg); | |
304 | static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, | |
305 | struct rte_eth_ntuple_filter *filter); | |
7c673cae FG |
306 | static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, |
307 | enum rte_filter_op filter_op, | |
308 | void *arg); | |
309 | static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, | |
310 | struct rte_eth_ethertype_filter *filter); | |
311 | static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, | |
312 | enum rte_filter_type filter_type, | |
313 | enum rte_filter_op filter_op, | |
314 | void *arg); | |
315 | static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); | |
316 | ||
317 | static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, | |
318 | struct ether_addr *mc_addr_set, | |
319 | uint32_t nb_mc_addr); | |
320 | static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, | |
321 | struct rte_eth_dcb_info *dcb_info); | |
322 | ||
323 | static int ixgbe_get_reg_length(struct rte_eth_dev *dev); | |
324 | static int ixgbe_get_regs(struct rte_eth_dev *dev, | |
325 | struct rte_dev_reg_info *regs); | |
326 | static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); | |
327 | static int ixgbe_get_eeprom(struct rte_eth_dev *dev, | |
328 | struct rte_dev_eeprom_info *eeprom); | |
329 | static int ixgbe_set_eeprom(struct rte_eth_dev *dev, | |
330 | struct rte_dev_eeprom_info *eeprom); | |
331 | ||
11fdf7f2 TL |
332 | static int ixgbe_get_module_info(struct rte_eth_dev *dev, |
333 | struct rte_eth_dev_module_info *modinfo); | |
334 | static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, | |
335 | struct rte_dev_eeprom_info *info); | |
336 | ||
7c673cae FG |
337 | static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); |
338 | static int ixgbevf_get_regs(struct rte_eth_dev *dev, | |
339 | struct rte_dev_reg_info *regs); | |
340 | ||
341 | static int ixgbe_timesync_enable(struct rte_eth_dev *dev); | |
342 | static int ixgbe_timesync_disable(struct rte_eth_dev *dev); | |
343 | static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, | |
344 | struct timespec *timestamp, | |
345 | uint32_t flags); | |
346 | static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, | |
347 | struct timespec *timestamp); | |
348 | static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); | |
349 | static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, | |
350 | struct timespec *timestamp); | |
351 | static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, | |
352 | const struct timespec *timestamp); | |
11fdf7f2 | 353 | static void ixgbevf_dev_interrupt_handler(void *param); |
7c673cae FG |
354 | |
355 | static int ixgbe_dev_l2_tunnel_eth_type_conf | |
356 | (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); | |
357 | static int ixgbe_dev_l2_tunnel_offload_set | |
358 | (struct rte_eth_dev *dev, | |
359 | struct rte_eth_l2_tunnel_conf *l2_tunnel, | |
360 | uint32_t mask, | |
361 | uint8_t en); | |
362 | static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, | |
363 | enum rte_filter_op filter_op, | |
364 | void *arg); | |
365 | ||
366 | static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, | |
367 | struct rte_eth_udp_tunnel *udp_tunnel); | |
368 | static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, | |
369 | struct rte_eth_udp_tunnel *udp_tunnel); | |
11fdf7f2 TL |
370 | static int ixgbe_filter_restore(struct rte_eth_dev *dev); |
371 | static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); | |
7c673cae FG |
372 | |
373 | /* | |
374 | * Define VF Stats MACRO for Non "cleared on read" register | |
375 | */ | |
376 | #define UPDATE_VF_STAT(reg, last, cur) \ | |
377 | { \ | |
378 | uint32_t latest = IXGBE_READ_REG(hw, reg); \ | |
379 | cur += (latest - last) & UINT_MAX; \ | |
380 | last = latest; \ | |
381 | } | |
382 | ||
383 | #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ | |
384 | { \ | |
385 | u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ | |
386 | u64 new_msb = IXGBE_READ_REG(hw, msb); \ | |
387 | u64 latest = ((new_msb << 32) | new_lsb); \ | |
388 | cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ | |
389 | last = latest; \ | |
390 | } | |
391 | ||
392 | #define IXGBE_SET_HWSTRIP(h, q) do {\ | |
393 | uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ | |
394 | uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ | |
395 | (h)->bitmap[idx] |= 1 << bit;\ | |
396 | } while (0) | |
397 | ||
398 | #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ | |
399 | uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ | |
400 | uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ | |
401 | (h)->bitmap[idx] &= ~(1 << bit);\ | |
402 | } while (0) | |
403 | ||
404 | #define IXGBE_GET_HWSTRIP(h, q, r) do {\ | |
405 | uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ | |
406 | uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ | |
407 | (r) = (h)->bitmap[idx] >> bit & 1;\ | |
408 | } while (0) | |
409 | ||
11fdf7f2 TL |
410 | int ixgbe_logtype_init; |
411 | int ixgbe_logtype_driver; | |
412 | ||
7c673cae FG |
413 | /* |
414 | * The set of PCI devices this driver supports | |
415 | */ | |
416 | static const struct rte_pci_id pci_id_ixgbe_map[] = { | |
417 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, | |
418 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, | |
419 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, | |
420 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, | |
421 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, | |
422 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, | |
423 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, | |
424 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, | |
425 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, | |
426 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, | |
427 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, | |
428 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, | |
429 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, | |
430 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, | |
431 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, | |
432 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, | |
7c673cae FG |
433 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, |
434 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, | |
7c673cae FG |
435 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, |
436 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, | |
437 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, | |
438 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, | |
439 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, | |
440 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, | |
441 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, | |
442 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, | |
443 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, | |
7c673cae FG |
444 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, |
445 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, | |
446 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, | |
447 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, | |
448 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, | |
449 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, | |
450 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, | |
451 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, | |
452 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, | |
453 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, | |
454 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, | |
455 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, | |
456 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, | |
457 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, | |
458 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, | |
459 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, | |
460 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, | |
461 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, | |
462 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, | |
463 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, | |
11fdf7f2 | 464 | #ifdef RTE_LIBRTE_IXGBE_BYPASS |
7c673cae FG |
465 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, |
466 | #endif | |
467 | { .vendor_id = 0, /* sentinel */ }, | |
468 | }; | |
469 | ||
470 | /* | |
471 | * The set of PCI devices this driver supports (for 82599 VF) | |
472 | */ | |
473 | static const struct rte_pci_id pci_id_ixgbevf_map[] = { | |
474 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, | |
475 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, | |
476 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, | |
477 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, | |
478 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, | |
479 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, | |
480 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, | |
481 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, | |
482 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, | |
483 | { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, | |
484 | { .vendor_id = 0, /* sentinel */ }, | |
485 | }; | |
486 | ||
487 | static const struct rte_eth_desc_lim rx_desc_lim = { | |
488 | .nb_max = IXGBE_MAX_RING_DESC, | |
489 | .nb_min = IXGBE_MIN_RING_DESC, | |
490 | .nb_align = IXGBE_RXD_ALIGN, | |
491 | }; | |
492 | ||
493 | static const struct rte_eth_desc_lim tx_desc_lim = { | |
494 | .nb_max = IXGBE_MAX_RING_DESC, | |
495 | .nb_min = IXGBE_MIN_RING_DESC, | |
496 | .nb_align = IXGBE_TXD_ALIGN, | |
11fdf7f2 TL |
497 | .nb_seg_max = IXGBE_TX_MAX_SEG, |
498 | .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, | |
7c673cae FG |
499 | }; |
500 | ||
501 | static const struct eth_dev_ops ixgbe_eth_dev_ops = { | |
502 | .dev_configure = ixgbe_dev_configure, | |
503 | .dev_start = ixgbe_dev_start, | |
504 | .dev_stop = ixgbe_dev_stop, | |
505 | .dev_set_link_up = ixgbe_dev_set_link_up, | |
506 | .dev_set_link_down = ixgbe_dev_set_link_down, | |
507 | .dev_close = ixgbe_dev_close, | |
11fdf7f2 | 508 | .dev_reset = ixgbe_dev_reset, |
7c673cae FG |
509 | .promiscuous_enable = ixgbe_dev_promiscuous_enable, |
510 | .promiscuous_disable = ixgbe_dev_promiscuous_disable, | |
511 | .allmulticast_enable = ixgbe_dev_allmulticast_enable, | |
512 | .allmulticast_disable = ixgbe_dev_allmulticast_disable, | |
513 | .link_update = ixgbe_dev_link_update, | |
514 | .stats_get = ixgbe_dev_stats_get, | |
515 | .xstats_get = ixgbe_dev_xstats_get, | |
11fdf7f2 | 516 | .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, |
7c673cae FG |
517 | .stats_reset = ixgbe_dev_stats_reset, |
518 | .xstats_reset = ixgbe_dev_xstats_reset, | |
519 | .xstats_get_names = ixgbe_dev_xstats_get_names, | |
11fdf7f2 | 520 | .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, |
7c673cae | 521 | .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, |
11fdf7f2 | 522 | .fw_version_get = ixgbe_fw_version_get, |
7c673cae FG |
523 | .dev_infos_get = ixgbe_dev_info_get, |
524 | .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, | |
525 | .mtu_set = ixgbe_dev_mtu_set, | |
526 | .vlan_filter_set = ixgbe_vlan_filter_set, | |
527 | .vlan_tpid_set = ixgbe_vlan_tpid_set, | |
528 | .vlan_offload_set = ixgbe_vlan_offload_set, | |
529 | .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, | |
530 | .rx_queue_start = ixgbe_dev_rx_queue_start, | |
531 | .rx_queue_stop = ixgbe_dev_rx_queue_stop, | |
532 | .tx_queue_start = ixgbe_dev_tx_queue_start, | |
533 | .tx_queue_stop = ixgbe_dev_tx_queue_stop, | |
534 | .rx_queue_setup = ixgbe_dev_rx_queue_setup, | |
535 | .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, | |
536 | .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, | |
537 | .rx_queue_release = ixgbe_dev_rx_queue_release, | |
538 | .rx_queue_count = ixgbe_dev_rx_queue_count, | |
539 | .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, | |
11fdf7f2 TL |
540 | .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, |
541 | .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, | |
7c673cae FG |
542 | .tx_queue_setup = ixgbe_dev_tx_queue_setup, |
543 | .tx_queue_release = ixgbe_dev_tx_queue_release, | |
544 | .dev_led_on = ixgbe_dev_led_on, | |
545 | .dev_led_off = ixgbe_dev_led_off, | |
546 | .flow_ctrl_get = ixgbe_flow_ctrl_get, | |
547 | .flow_ctrl_set = ixgbe_flow_ctrl_set, | |
548 | .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, | |
549 | .mac_addr_add = ixgbe_add_rar, | |
550 | .mac_addr_remove = ixgbe_remove_rar, | |
551 | .mac_addr_set = ixgbe_set_default_mac_addr, | |
552 | .uc_hash_table_set = ixgbe_uc_hash_table_set, | |
553 | .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, | |
554 | .mirror_rule_set = ixgbe_mirror_rule_set, | |
555 | .mirror_rule_reset = ixgbe_mirror_rule_reset, | |
7c673cae | 556 | .set_queue_rate_limit = ixgbe_set_queue_rate_limit, |
7c673cae FG |
557 | .reta_update = ixgbe_dev_rss_reta_update, |
558 | .reta_query = ixgbe_dev_rss_reta_query, | |
7c673cae FG |
559 | .rss_hash_update = ixgbe_dev_rss_hash_update, |
560 | .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, | |
561 | .filter_ctrl = ixgbe_dev_filter_ctrl, | |
562 | .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, | |
563 | .rxq_info_get = ixgbe_rxq_info_get, | |
564 | .txq_info_get = ixgbe_txq_info_get, | |
565 | .timesync_enable = ixgbe_timesync_enable, | |
566 | .timesync_disable = ixgbe_timesync_disable, | |
567 | .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, | |
568 | .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, | |
569 | .get_reg = ixgbe_get_regs, | |
570 | .get_eeprom_length = ixgbe_get_eeprom_length, | |
571 | .get_eeprom = ixgbe_get_eeprom, | |
572 | .set_eeprom = ixgbe_set_eeprom, | |
11fdf7f2 TL |
573 | .get_module_info = ixgbe_get_module_info, |
574 | .get_module_eeprom = ixgbe_get_module_eeprom, | |
7c673cae FG |
575 | .get_dcb_info = ixgbe_dev_get_dcb_info, |
576 | .timesync_adjust_time = ixgbe_timesync_adjust_time, | |
577 | .timesync_read_time = ixgbe_timesync_read_time, | |
578 | .timesync_write_time = ixgbe_timesync_write_time, | |
579 | .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, | |
580 | .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, | |
581 | .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, | |
582 | .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, | |
11fdf7f2 | 583 | .tm_ops_get = ixgbe_tm_ops_get, |
7c673cae FG |
584 | }; |
585 | ||
586 | /* | |
587 | * dev_ops for virtual function, bare necessities for basic vf | |
588 | * operation have been implemented | |
589 | */ | |
590 | static const struct eth_dev_ops ixgbevf_eth_dev_ops = { | |
591 | .dev_configure = ixgbevf_dev_configure, | |
592 | .dev_start = ixgbevf_dev_start, | |
593 | .dev_stop = ixgbevf_dev_stop, | |
11fdf7f2 | 594 | .link_update = ixgbevf_dev_link_update, |
7c673cae FG |
595 | .stats_get = ixgbevf_dev_stats_get, |
596 | .xstats_get = ixgbevf_dev_xstats_get, | |
597 | .stats_reset = ixgbevf_dev_stats_reset, | |
598 | .xstats_reset = ixgbevf_dev_stats_reset, | |
599 | .xstats_get_names = ixgbevf_dev_xstats_get_names, | |
600 | .dev_close = ixgbevf_dev_close, | |
11fdf7f2 | 601 | .dev_reset = ixgbevf_dev_reset, |
9f95a23c TL |
602 | .promiscuous_enable = ixgbevf_dev_promiscuous_enable, |
603 | .promiscuous_disable = ixgbevf_dev_promiscuous_disable, | |
7c673cae FG |
604 | .allmulticast_enable = ixgbevf_dev_allmulticast_enable, |
605 | .allmulticast_disable = ixgbevf_dev_allmulticast_disable, | |
606 | .dev_infos_get = ixgbevf_dev_info_get, | |
607 | .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, | |
608 | .mtu_set = ixgbevf_dev_set_mtu, | |
609 | .vlan_filter_set = ixgbevf_vlan_filter_set, | |
610 | .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, | |
611 | .vlan_offload_set = ixgbevf_vlan_offload_set, | |
612 | .rx_queue_setup = ixgbe_dev_rx_queue_setup, | |
613 | .rx_queue_release = ixgbe_dev_rx_queue_release, | |
614 | .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, | |
11fdf7f2 TL |
615 | .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, |
616 | .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, | |
7c673cae FG |
617 | .tx_queue_setup = ixgbe_dev_tx_queue_setup, |
618 | .tx_queue_release = ixgbe_dev_tx_queue_release, | |
619 | .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, | |
620 | .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, | |
621 | .mac_addr_add = ixgbevf_add_mac_addr, | |
622 | .mac_addr_remove = ixgbevf_remove_mac_addr, | |
623 | .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, | |
624 | .rxq_info_get = ixgbe_rxq_info_get, | |
625 | .txq_info_get = ixgbe_txq_info_get, | |
626 | .mac_addr_set = ixgbevf_set_default_mac_addr, | |
627 | .get_reg = ixgbevf_get_regs, | |
628 | .reta_update = ixgbe_dev_rss_reta_update, | |
629 | .reta_query = ixgbe_dev_rss_reta_query, | |
630 | .rss_hash_update = ixgbe_dev_rss_hash_update, | |
631 | .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, | |
632 | }; | |
633 | ||
634 | /* store statistics names and its offset in stats structure */ | |
635 | struct rte_ixgbe_xstats_name_off { | |
636 | char name[RTE_ETH_XSTATS_NAME_SIZE]; | |
637 | unsigned offset; | |
638 | }; | |
639 | ||
640 | static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { | |
641 | {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, | |
642 | {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, | |
643 | {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, | |
644 | {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, | |
645 | {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, | |
646 | {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, | |
647 | {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, | |
648 | {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, | |
649 | {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, | |
650 | {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, | |
651 | {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, | |
652 | {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, | |
653 | {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, | |
654 | {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, | |
655 | {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, | |
656 | prc1023)}, | |
657 | {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, | |
658 | prc1522)}, | |
659 | {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, | |
660 | {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, | |
661 | {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, | |
662 | {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, | |
663 | {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, | |
664 | {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, | |
665 | {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, | |
666 | {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, | |
667 | {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, | |
668 | {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, | |
669 | {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, | |
670 | {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, | |
671 | {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, | |
672 | {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, | |
673 | {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, | |
674 | {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, | |
675 | {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, | |
676 | ptc1023)}, | |
677 | {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, | |
678 | ptc1522)}, | |
679 | {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, | |
680 | {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, | |
681 | {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, | |
682 | {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, | |
683 | ||
684 | {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, | |
685 | fdirustat_add)}, | |
686 | {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, | |
687 | fdirustat_remove)}, | |
688 | {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, | |
689 | fdirfstat_fadd)}, | |
690 | {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, | |
691 | fdirfstat_fremove)}, | |
692 | {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, | |
693 | fdirmatch)}, | |
694 | {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, | |
695 | fdirmiss)}, | |
696 | ||
697 | {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, | |
698 | {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, | |
699 | {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, | |
700 | fclast)}, | |
701 | {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, | |
702 | {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, | |
703 | {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, | |
704 | {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, | |
705 | {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, | |
706 | fcoe_noddp)}, | |
707 | {"rx_fcoe_no_direct_data_placement_ext_buff", | |
708 | offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, | |
709 | ||
710 | {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, | |
711 | lxontxc)}, | |
712 | {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, | |
713 | lxonrxc)}, | |
714 | {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, | |
715 | lxofftxc)}, | |
716 | {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, | |
717 | lxoffrxc)}, | |
718 | {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, | |
719 | }; | |
720 | ||
721 | #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ | |
722 | sizeof(rte_ixgbe_stats_strings[0])) | |
723 | ||
11fdf7f2 TL |
724 | /* MACsec statistics */ |
725 | static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { | |
726 | {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, | |
727 | out_pkts_untagged)}, | |
728 | {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, | |
729 | out_pkts_encrypted)}, | |
730 | {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, | |
731 | out_pkts_protected)}, | |
732 | {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, | |
733 | out_octets_encrypted)}, | |
734 | {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, | |
735 | out_octets_protected)}, | |
736 | {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, | |
737 | in_pkts_untagged)}, | |
738 | {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, | |
739 | in_pkts_badtag)}, | |
740 | {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, | |
741 | in_pkts_nosci)}, | |
742 | {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, | |
743 | in_pkts_unknownsci)}, | |
744 | {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, | |
745 | in_octets_decrypted)}, | |
746 | {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, | |
747 | in_octets_validated)}, | |
748 | {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, | |
749 | in_pkts_unchecked)}, | |
750 | {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, | |
751 | in_pkts_delayed)}, | |
752 | {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, | |
753 | in_pkts_late)}, | |
754 | {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, | |
755 | in_pkts_ok)}, | |
756 | {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, | |
757 | in_pkts_invalid)}, | |
758 | {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, | |
759 | in_pkts_notvalid)}, | |
760 | {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, | |
761 | in_pkts_unusedsa)}, | |
762 | {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, | |
763 | in_pkts_notusingsa)}, | |
764 | }; | |
765 | ||
766 | #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ | |
767 | sizeof(rte_ixgbe_macsec_strings[0])) | |
768 | ||
7c673cae FG |
769 | /* Per-queue statistics */ |
770 | static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { | |
771 | {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, | |
772 | {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, | |
773 | {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, | |
774 | {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, | |
775 | }; | |
776 | ||
777 | #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ | |
778 | sizeof(rte_ixgbe_rxq_strings[0])) | |
779 | #define IXGBE_NB_RXQ_PRIO_VALUES 8 | |
780 | ||
781 | static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { | |
782 | {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, | |
783 | {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, | |
784 | {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, | |
785 | pxon2offc)}, | |
786 | }; | |
787 | ||
788 | #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ | |
789 | sizeof(rte_ixgbe_txq_strings[0])) | |
790 | #define IXGBE_NB_TXQ_PRIO_VALUES 8 | |
791 | ||
792 | static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { | |
793 | {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, | |
794 | }; | |
795 | ||
796 | #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ | |
797 | sizeof(rte_ixgbevf_stats_strings[0])) | |
798 | ||
7c673cae FG |
799 | /* |
800 | * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. | |
801 | */ | |
802 | static inline int | |
803 | ixgbe_is_sfp(struct ixgbe_hw *hw) | |
804 | { | |
805 | switch (hw->phy.type) { | |
806 | case ixgbe_phy_sfp_avago: | |
807 | case ixgbe_phy_sfp_ftl: | |
808 | case ixgbe_phy_sfp_intel: | |
809 | case ixgbe_phy_sfp_unknown: | |
810 | case ixgbe_phy_sfp_passive_tyco: | |
811 | case ixgbe_phy_sfp_passive_unknown: | |
812 | return 1; | |
813 | default: | |
814 | return 0; | |
815 | } | |
816 | } | |
817 | ||
818 | static inline int32_t | |
819 | ixgbe_pf_reset_hw(struct ixgbe_hw *hw) | |
820 | { | |
821 | uint32_t ctrl_ext; | |
822 | int32_t status; | |
823 | ||
824 | status = ixgbe_reset_hw(hw); | |
825 | ||
826 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | |
827 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | |
828 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | |
829 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | |
830 | IXGBE_WRITE_FLUSH(hw); | |
831 | ||
11fdf7f2 TL |
832 | if (status == IXGBE_ERR_SFP_NOT_PRESENT) |
833 | status = IXGBE_SUCCESS; | |
7c673cae FG |
834 | return status; |
835 | } | |
836 | ||
837 | static inline void | |
838 | ixgbe_enable_intr(struct rte_eth_dev *dev) | |
839 | { | |
840 | struct ixgbe_interrupt *intr = | |
841 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
842 | struct ixgbe_hw *hw = | |
843 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
844 | ||
845 | IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); | |
846 | IXGBE_WRITE_FLUSH(hw); | |
847 | } | |
848 | ||
849 | /* | |
850 | * This function is based on ixgbe_disable_intr() in base/ixgbe.h. | |
851 | */ | |
852 | static void | |
853 | ixgbe_disable_intr(struct ixgbe_hw *hw) | |
854 | { | |
855 | PMD_INIT_FUNC_TRACE(); | |
856 | ||
857 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
858 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); | |
859 | } else { | |
860 | IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); | |
861 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); | |
862 | IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); | |
863 | } | |
864 | IXGBE_WRITE_FLUSH(hw); | |
865 | } | |
866 | ||
867 | /* | |
868 | * This function resets queue statistics mapping registers. | |
869 | * From Niantic datasheet, Initialization of Statistics section: | |
870 | * "...if software requires the queue counters, the RQSMR and TQSM registers | |
871 | * must be re-programmed following a device reset. | |
872 | */ | |
873 | static void | |
874 | ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) | |
875 | { | |
876 | uint32_t i; | |
877 | ||
878 | for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { | |
879 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); | |
880 | IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); | |
881 | } | |
882 | } | |
883 | ||
884 | ||
885 | static int | |
886 | ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, | |
887 | uint16_t queue_id, | |
888 | uint8_t stat_idx, | |
889 | uint8_t is_rx) | |
890 | { | |
891 | #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 | |
892 | #define NB_QMAP_FIELDS_PER_QSM_REG 4 | |
893 | #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f | |
894 | ||
895 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
896 | struct ixgbe_stat_mapping_registers *stat_mappings = | |
897 | IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); | |
898 | uint32_t qsmr_mask = 0; | |
899 | uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; | |
900 | uint32_t q_map; | |
901 | uint8_t n, offset; | |
902 | ||
903 | if ((hw->mac.type != ixgbe_mac_82599EB) && | |
904 | (hw->mac.type != ixgbe_mac_X540) && | |
905 | (hw->mac.type != ixgbe_mac_X550) && | |
906 | (hw->mac.type != ixgbe_mac_X550EM_x) && | |
907 | (hw->mac.type != ixgbe_mac_X550EM_a)) | |
908 | return -ENOSYS; | |
909 | ||
910 | PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", | |
911 | (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", | |
912 | queue_id, stat_idx); | |
913 | ||
914 | n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); | |
915 | if (n >= IXGBE_NB_STAT_MAPPING_REGS) { | |
916 | PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); | |
917 | return -EIO; | |
918 | } | |
919 | offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); | |
920 | ||
921 | /* Now clear any previous stat_idx set */ | |
922 | clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); | |
923 | if (!is_rx) | |
924 | stat_mappings->tqsm[n] &= ~clearing_mask; | |
925 | else | |
926 | stat_mappings->rqsmr[n] &= ~clearing_mask; | |
927 | ||
928 | q_map = (uint32_t)stat_idx; | |
929 | q_map &= QMAP_FIELD_RESERVED_BITS_MASK; | |
930 | qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); | |
931 | if (!is_rx) | |
932 | stat_mappings->tqsm[n] |= qsmr_mask; | |
933 | else | |
934 | stat_mappings->rqsmr[n] |= qsmr_mask; | |
935 | ||
936 | PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", | |
937 | (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", | |
938 | queue_id, stat_idx); | |
939 | PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, | |
940 | is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); | |
941 | ||
942 | /* Now write the mapping in the appropriate register */ | |
943 | if (is_rx) { | |
944 | PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", | |
945 | stat_mappings->rqsmr[n], n); | |
946 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); | |
947 | } else { | |
948 | PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", | |
949 | stat_mappings->tqsm[n], n); | |
950 | IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); | |
951 | } | |
952 | return 0; | |
953 | } | |
954 | ||
955 | static void | |
956 | ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) | |
957 | { | |
958 | struct ixgbe_stat_mapping_registers *stat_mappings = | |
959 | IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); | |
960 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
961 | int i; | |
962 | ||
963 | /* write whatever was in stat mapping table to the NIC */ | |
964 | for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { | |
965 | /* rx */ | |
966 | IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); | |
967 | ||
968 | /* tx */ | |
969 | IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); | |
970 | } | |
971 | } | |
972 | ||
973 | static void | |
974 | ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) | |
975 | { | |
976 | uint8_t i; | |
977 | struct ixgbe_dcb_tc_config *tc; | |
978 | uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; | |
979 | ||
980 | dcb_config->num_tcs.pg_tcs = dcb_max_tc; | |
981 | dcb_config->num_tcs.pfc_tcs = dcb_max_tc; | |
982 | for (i = 0; i < dcb_max_tc; i++) { | |
983 | tc = &dcb_config->tc_config[i]; | |
984 | tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; | |
985 | tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = | |
986 | (uint8_t)(100/dcb_max_tc + (i & 1)); | |
987 | tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; | |
988 | tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = | |
989 | (uint8_t)(100/dcb_max_tc + (i & 1)); | |
990 | tc->pfc = ixgbe_dcb_pfc_disabled; | |
991 | } | |
992 | ||
993 | /* Initialize default user to priority mapping, UPx->TC0 */ | |
994 | tc = &dcb_config->tc_config[0]; | |
995 | tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; | |
996 | tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; | |
997 | for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { | |
998 | dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; | |
999 | dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; | |
1000 | } | |
1001 | dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; | |
1002 | dcb_config->pfc_mode_enable = false; | |
1003 | dcb_config->vt_mode = true; | |
1004 | dcb_config->round_robin_enable = false; | |
1005 | /* support all DCB capabilities in 82599 */ | |
1006 | dcb_config->support.capabilities = 0xFF; | |
1007 | ||
1008 | /*we only support 4 Tcs for X540, X550 */ | |
1009 | if (hw->mac.type == ixgbe_mac_X540 || | |
1010 | hw->mac.type == ixgbe_mac_X550 || | |
1011 | hw->mac.type == ixgbe_mac_X550EM_x || | |
1012 | hw->mac.type == ixgbe_mac_X550EM_a) { | |
1013 | dcb_config->num_tcs.pg_tcs = 4; | |
1014 | dcb_config->num_tcs.pfc_tcs = 4; | |
1015 | } | |
1016 | } | |
1017 | ||
1018 | /* | |
1019 | * Ensure that all locks are released before first NVM or PHY access | |
1020 | */ | |
1021 | static void | |
1022 | ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) | |
1023 | { | |
1024 | uint16_t mask; | |
1025 | ||
1026 | /* | |
1027 | * Phy lock should not fail in this early stage. If this is the case, | |
1028 | * it is due to an improper exit of the application. | |
1029 | * So force the release of the faulty lock. Release of common lock | |
1030 | * is done automatically by swfw_sync function. | |
1031 | */ | |
1032 | mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; | |
1033 | if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { | |
1034 | PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); | |
1035 | } | |
1036 | ixgbe_release_swfw_semaphore(hw, mask); | |
1037 | ||
1038 | /* | |
1039 | * These ones are more tricky since they are common to all ports; but | |
1040 | * swfw_sync retries last long enough (1s) to be almost sure that if | |
1041 | * lock can not be taken it is due to an improper lock of the | |
1042 | * semaphore. | |
1043 | */ | |
1044 | mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; | |
1045 | if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { | |
1046 | PMD_DRV_LOG(DEBUG, "SWFW common locks released"); | |
1047 | } | |
1048 | ixgbe_release_swfw_semaphore(hw, mask); | |
1049 | } | |
1050 | ||
1051 | /* | |
1052 | * This function is based on code in ixgbe_attach() in base/ixgbe.c. | |
1053 | * It returns 0 on success. | |
1054 | */ | |
1055 | static int | |
11fdf7f2 | 1056 | eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) |
7c673cae | 1057 | { |
11fdf7f2 TL |
1058 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
1059 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
1060 | struct ixgbe_hw *hw = |
1061 | IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
1062 | struct ixgbe_vfta *shadow_vfta = | |
1063 | IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); | |
1064 | struct ixgbe_hwstrip *hwstrip = | |
1065 | IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); | |
1066 | struct ixgbe_dcb_config *dcb_config = | |
1067 | IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); | |
1068 | struct ixgbe_filter_info *filter_info = | |
1069 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); | |
11fdf7f2 TL |
1070 | struct ixgbe_bw_conf *bw_conf = |
1071 | IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); | |
7c673cae FG |
1072 | uint32_t ctrl_ext; |
1073 | uint16_t csum; | |
1074 | int diag, i; | |
1075 | ||
1076 | PMD_INIT_FUNC_TRACE(); | |
1077 | ||
1078 | eth_dev->dev_ops = &ixgbe_eth_dev_ops; | |
1079 | eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; | |
1080 | eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; | |
11fdf7f2 | 1081 | eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; |
7c673cae FG |
1082 | |
1083 | /* | |
1084 | * For secondary processes, we don't initialise any further as primary | |
1085 | * has already done this work. Only check we don't need a different | |
1086 | * RX and TX function. | |
1087 | */ | |
1088 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { | |
1089 | struct ixgbe_tx_queue *txq; | |
1090 | /* TX queue function in primary, set by last queue initialized | |
1091 | * Tx queue may not initialized by primary process | |
1092 | */ | |
1093 | if (eth_dev->data->tx_queues) { | |
1094 | txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; | |
1095 | ixgbe_set_tx_function(eth_dev, txq); | |
1096 | } else { | |
1097 | /* Use default TX function if we get here */ | |
1098 | PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " | |
1099 | "Using default TX function."); | |
1100 | } | |
1101 | ||
1102 | ixgbe_set_rx_function(eth_dev); | |
1103 | ||
1104 | return 0; | |
1105 | } | |
7c673cae FG |
1106 | |
1107 | rte_eth_copy_pci_info(eth_dev, pci_dev); | |
1108 | ||
1109 | /* Vendor and Device ID need to be set before init of shared code */ | |
1110 | hw->device_id = pci_dev->id.device_id; | |
1111 | hw->vendor_id = pci_dev->id.vendor_id; | |
1112 | hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; | |
1113 | hw->allow_unsupported_sfp = 1; | |
1114 | ||
1115 | /* Initialize the shared code (base driver) */ | |
11fdf7f2 | 1116 | #ifdef RTE_LIBRTE_IXGBE_BYPASS |
7c673cae FG |
1117 | diag = ixgbe_bypass_init_shared_code(hw); |
1118 | #else | |
1119 | diag = ixgbe_init_shared_code(hw); | |
11fdf7f2 | 1120 | #endif /* RTE_LIBRTE_IXGBE_BYPASS */ |
7c673cae FG |
1121 | |
1122 | if (diag != IXGBE_SUCCESS) { | |
1123 | PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); | |
1124 | return -EIO; | |
1125 | } | |
1126 | ||
9f95a23c TL |
1127 | if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { |
1128 | PMD_INIT_LOG(ERR, "\nERROR: " | |
1129 | "Firmware recovery mode detected. Limiting functionality.\n" | |
1130 | "Refer to the Intel(R) Ethernet Adapters and Devices " | |
1131 | "User Guide for details on firmware recovery mode."); | |
1132 | return -EIO; | |
1133 | } | |
1134 | ||
7c673cae FG |
1135 | /* pick up the PCI bus settings for reporting later */ |
1136 | ixgbe_get_bus_info(hw); | |
1137 | ||
1138 | /* Unlock any pending hardware semaphore */ | |
1139 | ixgbe_swfw_lock_reset(hw); | |
1140 | ||
11fdf7f2 TL |
1141 | #ifdef RTE_LIBRTE_SECURITY |
1142 | /* Initialize security_ctx only for primary process*/ | |
1143 | if (ixgbe_ipsec_ctx_create(eth_dev)) | |
1144 | return -ENOMEM; | |
1145 | #endif | |
1146 | ||
7c673cae FG |
1147 | /* Initialize DCB configuration*/ |
1148 | memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); | |
1149 | ixgbe_dcb_init(hw, dcb_config); | |
1150 | /* Get Hardware Flow Control setting */ | |
1151 | hw->fc.requested_mode = ixgbe_fc_full; | |
1152 | hw->fc.current_mode = ixgbe_fc_full; | |
1153 | hw->fc.pause_time = IXGBE_FC_PAUSE; | |
1154 | for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { | |
1155 | hw->fc.low_water[i] = IXGBE_FC_LO; | |
1156 | hw->fc.high_water[i] = IXGBE_FC_HI; | |
1157 | } | |
1158 | hw->fc.send_xon = 1; | |
1159 | ||
1160 | /* Make sure we have a good EEPROM before we read from it */ | |
1161 | diag = ixgbe_validate_eeprom_checksum(hw, &csum); | |
1162 | if (diag != IXGBE_SUCCESS) { | |
1163 | PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); | |
1164 | return -EIO; | |
1165 | } | |
1166 | ||
11fdf7f2 | 1167 | #ifdef RTE_LIBRTE_IXGBE_BYPASS |
7c673cae FG |
1168 | diag = ixgbe_bypass_init_hw(hw); |
1169 | #else | |
1170 | diag = ixgbe_init_hw(hw); | |
11fdf7f2 | 1171 | #endif /* RTE_LIBRTE_IXGBE_BYPASS */ |
7c673cae FG |
1172 | |
1173 | /* | |
1174 | * Devices with copper phys will fail to initialise if ixgbe_init_hw() | |
1175 | * is called too soon after the kernel driver unbinding/binding occurs. | |
1176 | * The failure occurs in ixgbe_identify_phy_generic() for all devices, | |
1177 | * but for non-copper devies, ixgbe_identify_sfp_module_generic() is | |
1178 | * also called. See ixgbe_identify_phy_82599(). The reason for the | |
1179 | * failure is not known, and only occuts when virtualisation features | |
1180 | * are disabled in the bios. A delay of 100ms was found to be enough by | |
1181 | * trial-and-error, and is doubled to be safe. | |
1182 | */ | |
1183 | if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { | |
1184 | rte_delay_ms(200); | |
1185 | diag = ixgbe_init_hw(hw); | |
1186 | } | |
1187 | ||
11fdf7f2 TL |
1188 | if (diag == IXGBE_ERR_SFP_NOT_PRESENT) |
1189 | diag = IXGBE_SUCCESS; | |
1190 | ||
7c673cae FG |
1191 | if (diag == IXGBE_ERR_EEPROM_VERSION) { |
1192 | PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" | |
1193 | "LOM. Please be aware there may be issues associated " | |
1194 | "with your hardware."); | |
1195 | PMD_INIT_LOG(ERR, "If you are experiencing problems " | |
1196 | "please contact your Intel or hardware representative " | |
1197 | "who provided you with this hardware."); | |
1198 | } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) | |
1199 | PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); | |
1200 | if (diag) { | |
1201 | PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); | |
1202 | return -EIO; | |
1203 | } | |
1204 | ||
1205 | /* Reset the hw statistics */ | |
1206 | ixgbe_dev_stats_reset(eth_dev); | |
1207 | ||
1208 | /* disable interrupt */ | |
1209 | ixgbe_disable_intr(hw); | |
1210 | ||
1211 | /* reset mappings for queue statistics hw counters*/ | |
1212 | ixgbe_reset_qstat_mappings(hw); | |
1213 | ||
1214 | /* Allocate memory for storing MAC addresses */ | |
1215 | eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * | |
1216 | hw->mac.num_rar_entries, 0); | |
1217 | if (eth_dev->data->mac_addrs == NULL) { | |
1218 | PMD_INIT_LOG(ERR, | |
1219 | "Failed to allocate %u bytes needed to store " | |
1220 | "MAC addresses", | |
1221 | ETHER_ADDR_LEN * hw->mac.num_rar_entries); | |
1222 | return -ENOMEM; | |
1223 | } | |
1224 | /* Copy the permanent MAC address */ | |
1225 | ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, | |
1226 | ð_dev->data->mac_addrs[0]); | |
1227 | ||
1228 | /* Allocate memory for storing hash filter MAC addresses */ | |
1229 | eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * | |
1230 | IXGBE_VMDQ_NUM_UC_MAC, 0); | |
1231 | if (eth_dev->data->hash_mac_addrs == NULL) { | |
1232 | PMD_INIT_LOG(ERR, | |
1233 | "Failed to allocate %d bytes needed to store MAC addresses", | |
1234 | ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); | |
1235 | return -ENOMEM; | |
1236 | } | |
1237 | ||
1238 | /* initialize the vfta */ | |
1239 | memset(shadow_vfta, 0, sizeof(*shadow_vfta)); | |
1240 | ||
1241 | /* initialize the hw strip bitmap*/ | |
1242 | memset(hwstrip, 0, sizeof(*hwstrip)); | |
1243 | ||
1244 | /* initialize PF if max_vfs not zero */ | |
1245 | ixgbe_pf_host_init(eth_dev); | |
1246 | ||
1247 | ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | |
1248 | /* let hardware know driver is loaded */ | |
1249 | ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; | |
1250 | /* Set PF Reset Done bit so PF/VF Mail Ops can work */ | |
1251 | ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; | |
1252 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); | |
1253 | IXGBE_WRITE_FLUSH(hw); | |
1254 | ||
1255 | if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) | |
1256 | PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", | |
1257 | (int) hw->mac.type, (int) hw->phy.type, | |
1258 | (int) hw->phy.sfp_type); | |
1259 | else | |
1260 | PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", | |
1261 | (int) hw->mac.type, (int) hw->phy.type); | |
1262 | ||
1263 | PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", | |
1264 | eth_dev->data->port_id, pci_dev->id.vendor_id, | |
1265 | pci_dev->id.device_id); | |
1266 | ||
11fdf7f2 TL |
1267 | rte_intr_callback_register(intr_handle, |
1268 | ixgbe_dev_interrupt_handler, eth_dev); | |
7c673cae FG |
1269 | |
1270 | /* enable uio/vfio intr/eventfd mapping */ | |
11fdf7f2 | 1271 | rte_intr_enable(intr_handle); |
7c673cae FG |
1272 | |
1273 | /* enable support intr */ | |
1274 | ixgbe_enable_intr(eth_dev); | |
1275 | ||
11fdf7f2 TL |
1276 | /* initialize filter info */ |
1277 | memset(filter_info, 0, | |
1278 | sizeof(struct ixgbe_filter_info)); | |
1279 | ||
7c673cae FG |
1280 | /* initialize 5tuple filter list */ |
1281 | TAILQ_INIT(&filter_info->fivetuple_list); | |
11fdf7f2 TL |
1282 | |
1283 | /* initialize flow director filter list & hash */ | |
1284 | ixgbe_fdir_filter_init(eth_dev); | |
1285 | ||
1286 | /* initialize l2 tunnel filter list & hash */ | |
1287 | ixgbe_l2_tn_filter_init(eth_dev); | |
1288 | ||
1289 | /* initialize flow filter lists */ | |
1290 | ixgbe_filterlist_init(); | |
1291 | ||
1292 | /* initialize bandwidth configuration info */ | |
1293 | memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); | |
1294 | ||
1295 | /* initialize Traffic Manager configuration */ | |
1296 | ixgbe_tm_conf_init(eth_dev); | |
7c673cae FG |
1297 | |
1298 | return 0; | |
1299 | } | |
1300 | ||
1301 | static int | |
1302 | eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) | |
1303 | { | |
11fdf7f2 TL |
1304 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
1305 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae | 1306 | struct ixgbe_hw *hw; |
11fdf7f2 TL |
1307 | int retries = 0; |
1308 | int ret; | |
7c673cae FG |
1309 | |
1310 | PMD_INIT_FUNC_TRACE(); | |
1311 | ||
1312 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
9f95a23c | 1313 | return 0; |
7c673cae FG |
1314 | |
1315 | hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
7c673cae FG |
1316 | |
1317 | if (hw->adapter_stopped == 0) | |
1318 | ixgbe_dev_close(eth_dev); | |
1319 | ||
1320 | eth_dev->dev_ops = NULL; | |
1321 | eth_dev->rx_pkt_burst = NULL; | |
1322 | eth_dev->tx_pkt_burst = NULL; | |
1323 | ||
1324 | /* Unlock any pending hardware semaphore */ | |
1325 | ixgbe_swfw_lock_reset(hw); | |
1326 | ||
1327 | /* disable uio intr before callback unregister */ | |
11fdf7f2 TL |
1328 | rte_intr_disable(intr_handle); |
1329 | ||
1330 | do { | |
1331 | ret = rte_intr_callback_unregister(intr_handle, | |
1332 | ixgbe_dev_interrupt_handler, eth_dev); | |
1333 | if (ret >= 0) { | |
1334 | break; | |
1335 | } else if (ret != -EAGAIN) { | |
1336 | PMD_INIT_LOG(ERR, | |
1337 | "intr callback unregister failed: %d", | |
1338 | ret); | |
1339 | return ret; | |
1340 | } | |
1341 | rte_delay_ms(100); | |
1342 | } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); | |
7c673cae | 1343 | |
9f95a23c TL |
1344 | /* cancel the delay handler before remove dev */ |
1345 | rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, eth_dev); | |
1346 | ||
7c673cae FG |
1347 | /* uninitialize PF if max_vfs not zero */ |
1348 | ixgbe_pf_host_uninit(eth_dev); | |
1349 | ||
11fdf7f2 TL |
1350 | /* remove all the fdir filters & hash */ |
1351 | ixgbe_fdir_filter_uninit(eth_dev); | |
1352 | ||
1353 | /* remove all the L2 tunnel filters & hash */ | |
1354 | ixgbe_l2_tn_filter_uninit(eth_dev); | |
1355 | ||
1356 | /* Remove all ntuple filters of the device */ | |
1357 | ixgbe_ntuple_filter_uninit(eth_dev); | |
1358 | ||
1359 | /* clear all the filters list */ | |
1360 | ixgbe_filterlist_flush(); | |
1361 | ||
1362 | /* Remove all Traffic Manager configuration */ | |
1363 | ixgbe_tm_conf_uninit(eth_dev); | |
1364 | ||
1365 | #ifdef RTE_LIBRTE_SECURITY | |
1366 | rte_free(eth_dev->security_ctx); | |
1367 | #endif | |
1368 | ||
1369 | return 0; | |
1370 | } | |
1371 | ||
1372 | static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) | |
1373 | { | |
1374 | struct ixgbe_filter_info *filter_info = | |
1375 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); | |
1376 | struct ixgbe_5tuple_filter *p_5tuple; | |
1377 | ||
1378 | while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { | |
1379 | TAILQ_REMOVE(&filter_info->fivetuple_list, | |
1380 | p_5tuple, | |
1381 | entries); | |
1382 | rte_free(p_5tuple); | |
1383 | } | |
1384 | memset(filter_info->fivetuple_mask, 0, | |
1385 | sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); | |
1386 | ||
1387 | return 0; | |
1388 | } | |
1389 | ||
1390 | static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) | |
1391 | { | |
1392 | struct ixgbe_hw_fdir_info *fdir_info = | |
1393 | IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); | |
1394 | struct ixgbe_fdir_filter *fdir_filter; | |
1395 | ||
1396 | if (fdir_info->hash_map) | |
1397 | rte_free(fdir_info->hash_map); | |
1398 | if (fdir_info->hash_handle) | |
1399 | rte_hash_free(fdir_info->hash_handle); | |
1400 | ||
1401 | while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { | |
1402 | TAILQ_REMOVE(&fdir_info->fdir_list, | |
1403 | fdir_filter, | |
1404 | entries); | |
1405 | rte_free(fdir_filter); | |
1406 | } | |
1407 | ||
1408 | return 0; | |
1409 | } | |
1410 | ||
1411 | static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) | |
1412 | { | |
1413 | struct ixgbe_l2_tn_info *l2_tn_info = | |
1414 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); | |
1415 | struct ixgbe_l2_tn_filter *l2_tn_filter; | |
1416 | ||
1417 | if (l2_tn_info->hash_map) | |
1418 | rte_free(l2_tn_info->hash_map); | |
1419 | if (l2_tn_info->hash_handle) | |
1420 | rte_hash_free(l2_tn_info->hash_handle); | |
1421 | ||
1422 | while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { | |
1423 | TAILQ_REMOVE(&l2_tn_info->l2_tn_list, | |
1424 | l2_tn_filter, | |
1425 | entries); | |
1426 | rte_free(l2_tn_filter); | |
1427 | } | |
1428 | ||
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) | |
1433 | { | |
1434 | struct ixgbe_hw_fdir_info *fdir_info = | |
1435 | IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); | |
1436 | char fdir_hash_name[RTE_HASH_NAMESIZE]; | |
1437 | struct rte_hash_parameters fdir_hash_params = { | |
1438 | .name = fdir_hash_name, | |
1439 | .entries = IXGBE_MAX_FDIR_FILTER_NUM, | |
1440 | .key_len = sizeof(union ixgbe_atr_input), | |
1441 | .hash_func = rte_hash_crc, | |
1442 | .hash_func_init_val = 0, | |
1443 | .socket_id = rte_socket_id(), | |
1444 | }; | |
1445 | ||
1446 | TAILQ_INIT(&fdir_info->fdir_list); | |
1447 | snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, | |
1448 | "fdir_%s", eth_dev->device->name); | |
1449 | fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); | |
1450 | if (!fdir_info->hash_handle) { | |
1451 | PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); | |
1452 | return -EINVAL; | |
1453 | } | |
1454 | fdir_info->hash_map = rte_zmalloc("ixgbe", | |
1455 | sizeof(struct ixgbe_fdir_filter *) * | |
1456 | IXGBE_MAX_FDIR_FILTER_NUM, | |
1457 | 0); | |
1458 | if (!fdir_info->hash_map) { | |
1459 | PMD_INIT_LOG(ERR, | |
1460 | "Failed to allocate memory for fdir hash map!"); | |
1461 | return -ENOMEM; | |
1462 | } | |
1463 | fdir_info->mask_added = FALSE; | |
1464 | ||
7c673cae FG |
1465 | return 0; |
1466 | } | |
1467 | ||
11fdf7f2 TL |
1468 | static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) |
1469 | { | |
1470 | struct ixgbe_l2_tn_info *l2_tn_info = | |
1471 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); | |
1472 | char l2_tn_hash_name[RTE_HASH_NAMESIZE]; | |
1473 | struct rte_hash_parameters l2_tn_hash_params = { | |
1474 | .name = l2_tn_hash_name, | |
1475 | .entries = IXGBE_MAX_L2_TN_FILTER_NUM, | |
1476 | .key_len = sizeof(struct ixgbe_l2_tn_key), | |
1477 | .hash_func = rte_hash_crc, | |
1478 | .hash_func_init_val = 0, | |
1479 | .socket_id = rte_socket_id(), | |
1480 | }; | |
1481 | ||
1482 | TAILQ_INIT(&l2_tn_info->l2_tn_list); | |
1483 | snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, | |
1484 | "l2_tn_%s", eth_dev->device->name); | |
1485 | l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); | |
1486 | if (!l2_tn_info->hash_handle) { | |
1487 | PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); | |
1488 | return -EINVAL; | |
1489 | } | |
1490 | l2_tn_info->hash_map = rte_zmalloc("ixgbe", | |
1491 | sizeof(struct ixgbe_l2_tn_filter *) * | |
1492 | IXGBE_MAX_L2_TN_FILTER_NUM, | |
1493 | 0); | |
1494 | if (!l2_tn_info->hash_map) { | |
1495 | PMD_INIT_LOG(ERR, | |
1496 | "Failed to allocate memory for L2 TN hash map!"); | |
1497 | return -ENOMEM; | |
1498 | } | |
1499 | l2_tn_info->e_tag_en = FALSE; | |
1500 | l2_tn_info->e_tag_fwd_en = FALSE; | |
1501 | l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG; | |
1502 | ||
1503 | return 0; | |
1504 | } | |
7c673cae FG |
1505 | /* |
1506 | * Negotiate mailbox API version with the PF. | |
1507 | * After reset API version is always set to the basic one (ixgbe_mbox_api_10). | |
1508 | * Then we try to negotiate starting with the most recent one. | |
1509 | * If all negotiation attempts fail, then we will proceed with | |
1510 | * the default one (ixgbe_mbox_api_10). | |
1511 | */ | |
1512 | static void | |
1513 | ixgbevf_negotiate_api(struct ixgbe_hw *hw) | |
1514 | { | |
1515 | int32_t i; | |
1516 | ||
1517 | /* start with highest supported, proceed down */ | |
1518 | static const enum ixgbe_pfvf_api_rev sup_ver[] = { | |
9f95a23c | 1519 | ixgbe_mbox_api_13, |
7c673cae FG |
1520 | ixgbe_mbox_api_12, |
1521 | ixgbe_mbox_api_11, | |
1522 | ixgbe_mbox_api_10, | |
1523 | }; | |
1524 | ||
1525 | for (i = 0; | |
1526 | i != RTE_DIM(sup_ver) && | |
1527 | ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; | |
1528 | i++) | |
1529 | ; | |
1530 | } | |
1531 | ||
1532 | static void | |
1533 | generate_random_mac_addr(struct ether_addr *mac_addr) | |
1534 | { | |
1535 | uint64_t random; | |
1536 | ||
1537 | /* Set Organizationally Unique Identifier (OUI) prefix. */ | |
1538 | mac_addr->addr_bytes[0] = 0x00; | |
1539 | mac_addr->addr_bytes[1] = 0x09; | |
1540 | mac_addr->addr_bytes[2] = 0xC0; | |
1541 | /* Force indication of locally assigned MAC address. */ | |
1542 | mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; | |
1543 | /* Generate the last 3 bytes of the MAC address with a random number. */ | |
1544 | random = rte_rand(); | |
1545 | memcpy(&mac_addr->addr_bytes[3], &random, 3); | |
1546 | } | |
1547 | ||
1548 | /* | |
1549 | * Virtual Function device init | |
1550 | */ | |
1551 | static int | |
1552 | eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) | |
1553 | { | |
1554 | int diag; | |
1555 | uint32_t tc, tcs; | |
11fdf7f2 TL |
1556 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
1557 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
1558 | struct ixgbe_hw *hw = |
1559 | IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
1560 | struct ixgbe_vfta *shadow_vfta = | |
1561 | IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); | |
1562 | struct ixgbe_hwstrip *hwstrip = | |
1563 | IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); | |
1564 | struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; | |
1565 | ||
1566 | PMD_INIT_FUNC_TRACE(); | |
1567 | ||
1568 | eth_dev->dev_ops = &ixgbevf_eth_dev_ops; | |
1569 | eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; | |
1570 | eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; | |
1571 | ||
1572 | /* for secondary processes, we don't initialise any further as primary | |
1573 | * has already done this work. Only check we don't need a different | |
1574 | * RX function | |
1575 | */ | |
1576 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { | |
1577 | struct ixgbe_tx_queue *txq; | |
1578 | /* TX queue function in primary, set by last queue initialized | |
1579 | * Tx queue may not initialized by primary process | |
1580 | */ | |
1581 | if (eth_dev->data->tx_queues) { | |
1582 | txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; | |
1583 | ixgbe_set_tx_function(eth_dev, txq); | |
1584 | } else { | |
1585 | /* Use default TX function if we get here */ | |
1586 | PMD_INIT_LOG(NOTICE, | |
1587 | "No TX queues configured yet. Using default TX function."); | |
1588 | } | |
1589 | ||
1590 | ixgbe_set_rx_function(eth_dev); | |
1591 | ||
1592 | return 0; | |
1593 | } | |
1594 | ||
7c673cae FG |
1595 | rte_eth_copy_pci_info(eth_dev, pci_dev); |
1596 | ||
1597 | hw->device_id = pci_dev->id.device_id; | |
1598 | hw->vendor_id = pci_dev->id.vendor_id; | |
1599 | hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; | |
1600 | ||
1601 | /* initialize the vfta */ | |
1602 | memset(shadow_vfta, 0, sizeof(*shadow_vfta)); | |
1603 | ||
1604 | /* initialize the hw strip bitmap*/ | |
1605 | memset(hwstrip, 0, sizeof(*hwstrip)); | |
1606 | ||
1607 | /* Initialize the shared code (base driver) */ | |
1608 | diag = ixgbe_init_shared_code(hw); | |
1609 | if (diag != IXGBE_SUCCESS) { | |
1610 | PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); | |
1611 | return -EIO; | |
1612 | } | |
1613 | ||
1614 | /* init_mailbox_params */ | |
1615 | hw->mbx.ops.init_params(hw); | |
1616 | ||
1617 | /* Reset the hw statistics */ | |
1618 | ixgbevf_dev_stats_reset(eth_dev); | |
1619 | ||
1620 | /* Disable the interrupts for VF */ | |
11fdf7f2 | 1621 | ixgbevf_intr_disable(eth_dev); |
7c673cae FG |
1622 | |
1623 | hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ | |
1624 | diag = hw->mac.ops.reset_hw(hw); | |
1625 | ||
1626 | /* | |
1627 | * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when | |
1628 | * the underlying PF driver has not assigned a MAC address to the VF. | |
1629 | * In this case, assign a random MAC address. | |
1630 | */ | |
1631 | if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { | |
1632 | PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); | |
9f95a23c TL |
1633 | /* |
1634 | * This error code will be propagated to the app by | |
1635 | * rte_eth_dev_reset, so use a public error code rather than | |
1636 | * the internal-only IXGBE_ERR_RESET_FAILED | |
1637 | */ | |
1638 | return -EAGAIN; | |
7c673cae FG |
1639 | } |
1640 | ||
1641 | /* negotiate mailbox API version to use with the PF. */ | |
1642 | ixgbevf_negotiate_api(hw); | |
1643 | ||
1644 | /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ | |
1645 | ixgbevf_get_queues(hw, &tcs, &tc); | |
1646 | ||
1647 | /* Allocate memory for storing MAC addresses */ | |
1648 | eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * | |
1649 | hw->mac.num_rar_entries, 0); | |
1650 | if (eth_dev->data->mac_addrs == NULL) { | |
1651 | PMD_INIT_LOG(ERR, | |
1652 | "Failed to allocate %u bytes needed to store " | |
1653 | "MAC addresses", | |
1654 | ETHER_ADDR_LEN * hw->mac.num_rar_entries); | |
1655 | return -ENOMEM; | |
1656 | } | |
1657 | ||
1658 | /* Generate a random MAC address, if none was assigned by PF. */ | |
1659 | if (is_zero_ether_addr(perm_addr)) { | |
1660 | generate_random_mac_addr(perm_addr); | |
1661 | diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); | |
1662 | if (diag) { | |
1663 | rte_free(eth_dev->data->mac_addrs); | |
1664 | eth_dev->data->mac_addrs = NULL; | |
1665 | return diag; | |
1666 | } | |
1667 | PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); | |
1668 | PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " | |
1669 | "%02x:%02x:%02x:%02x:%02x:%02x", | |
1670 | perm_addr->addr_bytes[0], | |
1671 | perm_addr->addr_bytes[1], | |
1672 | perm_addr->addr_bytes[2], | |
1673 | perm_addr->addr_bytes[3], | |
1674 | perm_addr->addr_bytes[4], | |
1675 | perm_addr->addr_bytes[5]); | |
1676 | } | |
1677 | ||
1678 | /* Copy the permanent MAC address */ | |
1679 | ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); | |
1680 | ||
1681 | /* reset the hardware with the new settings */ | |
1682 | diag = hw->mac.ops.start_hw(hw); | |
1683 | switch (diag) { | |
1684 | case 0: | |
1685 | break; | |
1686 | ||
1687 | default: | |
1688 | PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); | |
1689 | return -EIO; | |
1690 | } | |
1691 | ||
11fdf7f2 TL |
1692 | rte_intr_callback_register(intr_handle, |
1693 | ixgbevf_dev_interrupt_handler, eth_dev); | |
1694 | rte_intr_enable(intr_handle); | |
1695 | ixgbevf_intr_enable(eth_dev); | |
7c673cae FG |
1696 | |
1697 | PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", | |
1698 | eth_dev->data->port_id, pci_dev->id.vendor_id, | |
1699 | pci_dev->id.device_id, "ixgbe_mac_82599_vf"); | |
1700 | ||
1701 | return 0; | |
1702 | } | |
1703 | ||
1704 | /* Virtual Function device uninit */ | |
1705 | ||
1706 | static int | |
1707 | eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) | |
1708 | { | |
11fdf7f2 TL |
1709 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
1710 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae | 1711 | struct ixgbe_hw *hw; |
7c673cae FG |
1712 | |
1713 | PMD_INIT_FUNC_TRACE(); | |
1714 | ||
1715 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
9f95a23c | 1716 | return 0; |
7c673cae FG |
1717 | |
1718 | hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
1719 | ||
1720 | if (hw->adapter_stopped == 0) | |
1721 | ixgbevf_dev_close(eth_dev); | |
1722 | ||
1723 | eth_dev->dev_ops = NULL; | |
1724 | eth_dev->rx_pkt_burst = NULL; | |
1725 | eth_dev->tx_pkt_burst = NULL; | |
1726 | ||
1727 | /* Disable the interrupts for VF */ | |
11fdf7f2 | 1728 | ixgbevf_intr_disable(eth_dev); |
7c673cae | 1729 | |
11fdf7f2 TL |
1730 | rte_intr_disable(intr_handle); |
1731 | rte_intr_callback_unregister(intr_handle, | |
1732 | ixgbevf_dev_interrupt_handler, eth_dev); | |
1733 | ||
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | static int | |
1738 | eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, | |
1739 | struct rte_pci_device *pci_dev) | |
1740 | { | |
1741 | char name[RTE_ETH_NAME_MAX_LEN]; | |
1742 | struct rte_eth_dev *pf_ethdev; | |
1743 | struct rte_eth_devargs eth_da; | |
1744 | int i, retval; | |
1745 | ||
1746 | if (pci_dev->device.devargs) { | |
1747 | retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, | |
1748 | ð_da); | |
1749 | if (retval) | |
1750 | return retval; | |
1751 | } else | |
1752 | memset(ð_da, 0, sizeof(eth_da)); | |
1753 | ||
1754 | retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, | |
1755 | sizeof(struct ixgbe_adapter), | |
1756 | eth_dev_pci_specific_init, pci_dev, | |
1757 | eth_ixgbe_dev_init, NULL); | |
1758 | ||
1759 | if (retval || eth_da.nb_representor_ports < 1) | |
1760 | return retval; | |
1761 | ||
1762 | pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); | |
1763 | if (pf_ethdev == NULL) | |
1764 | return -ENODEV; | |
1765 | ||
1766 | /* probe VF representor ports */ | |
1767 | for (i = 0; i < eth_da.nb_representor_ports; i++) { | |
1768 | struct ixgbe_vf_info *vfinfo; | |
1769 | struct ixgbe_vf_representor representor; | |
1770 | ||
1771 | vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( | |
1772 | pf_ethdev->data->dev_private); | |
1773 | if (vfinfo == NULL) { | |
1774 | PMD_DRV_LOG(ERR, | |
1775 | "no virtual functions supported by PF"); | |
1776 | break; | |
1777 | } | |
1778 | ||
1779 | representor.vf_id = eth_da.representor_ports[i]; | |
1780 | representor.switch_domain_id = vfinfo->switch_domain_id; | |
1781 | representor.pf_ethdev = pf_ethdev; | |
1782 | ||
1783 | /* representor port net_bdf_port */ | |
1784 | snprintf(name, sizeof(name), "net_%s_representor_%d", | |
1785 | pci_dev->device.name, | |
1786 | eth_da.representor_ports[i]); | |
1787 | ||
1788 | retval = rte_eth_dev_create(&pci_dev->device, name, | |
1789 | sizeof(struct ixgbe_vf_representor), NULL, NULL, | |
1790 | ixgbe_vf_representor_init, &representor); | |
1791 | ||
1792 | if (retval) | |
1793 | PMD_DRV_LOG(ERR, "failed to create ixgbe vf " | |
1794 | "representor %s.", name); | |
1795 | } | |
7c673cae FG |
1796 | |
1797 | return 0; | |
1798 | } | |
1799 | ||
11fdf7f2 TL |
1800 | static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) |
1801 | { | |
1802 | struct rte_eth_dev *ethdev; | |
1803 | ||
1804 | ethdev = rte_eth_dev_allocated(pci_dev->device.name); | |
1805 | if (!ethdev) | |
1806 | return -ENODEV; | |
1807 | ||
1808 | if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) | |
1809 | return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit); | |
1810 | else | |
1811 | return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit); | |
1812 | } | |
1813 | ||
1814 | static struct rte_pci_driver rte_ixgbe_pmd = { | |
1815 | .id_table = pci_id_ixgbe_map, | |
1816 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | | |
1817 | RTE_PCI_DRV_IOVA_AS_VA, | |
1818 | .probe = eth_ixgbe_pci_probe, | |
1819 | .remove = eth_ixgbe_pci_remove, | |
7c673cae FG |
1820 | }; |
1821 | ||
11fdf7f2 TL |
1822 | static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, |
1823 | struct rte_pci_device *pci_dev) | |
1824 | { | |
1825 | return rte_eth_dev_pci_generic_probe(pci_dev, | |
1826 | sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); | |
1827 | } | |
1828 | ||
1829 | static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) | |
1830 | { | |
1831 | return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); | |
1832 | } | |
1833 | ||
7c673cae FG |
1834 | /* |
1835 | * virtual function driver struct | |
1836 | */ | |
11fdf7f2 TL |
1837 | static struct rte_pci_driver rte_ixgbevf_pmd = { |
1838 | .id_table = pci_id_ixgbevf_map, | |
1839 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, | |
1840 | .probe = eth_ixgbevf_pci_probe, | |
1841 | .remove = eth_ixgbevf_pci_remove, | |
7c673cae FG |
1842 | }; |
1843 | ||
1844 | static int | |
1845 | ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) | |
1846 | { | |
1847 | struct ixgbe_hw *hw = | |
1848 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1849 | struct ixgbe_vfta *shadow_vfta = | |
1850 | IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); | |
1851 | uint32_t vfta; | |
1852 | uint32_t vid_idx; | |
1853 | uint32_t vid_bit; | |
1854 | ||
1855 | vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); | |
1856 | vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); | |
1857 | vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); | |
1858 | if (on) | |
1859 | vfta |= vid_bit; | |
1860 | else | |
1861 | vfta &= ~vid_bit; | |
1862 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); | |
1863 | ||
1864 | /* update local VFTA copy */ | |
1865 | shadow_vfta->vfta[vid_idx] = vfta; | |
1866 | ||
1867 | return 0; | |
1868 | } | |
1869 | ||
1870 | static void | |
1871 | ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) | |
1872 | { | |
1873 | if (on) | |
1874 | ixgbe_vlan_hw_strip_enable(dev, queue); | |
1875 | else | |
1876 | ixgbe_vlan_hw_strip_disable(dev, queue); | |
1877 | } | |
1878 | ||
1879 | static int | |
1880 | ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, | |
1881 | enum rte_vlan_type vlan_type, | |
1882 | uint16_t tpid) | |
1883 | { | |
1884 | struct ixgbe_hw *hw = | |
1885 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1886 | int ret = 0; | |
1887 | uint32_t reg; | |
1888 | uint32_t qinq; | |
1889 | ||
1890 | qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | |
1891 | qinq &= IXGBE_DMATXCTL_GDV; | |
1892 | ||
1893 | switch (vlan_type) { | |
1894 | case ETH_VLAN_TYPE_INNER: | |
1895 | if (qinq) { | |
1896 | reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
1897 | reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; | |
1898 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); | |
1899 | reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | |
1900 | reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) | |
1901 | | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); | |
1902 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); | |
1903 | } else { | |
1904 | ret = -ENOTSUP; | |
1905 | PMD_DRV_LOG(ERR, "Inner type is not supported" | |
1906 | " by single VLAN"); | |
1907 | } | |
1908 | break; | |
1909 | case ETH_VLAN_TYPE_OUTER: | |
1910 | if (qinq) { | |
1911 | /* Only the high 16-bits is valid */ | |
1912 | IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << | |
1913 | IXGBE_EXVET_VET_EXT_SHIFT); | |
1914 | } else { | |
1915 | reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
1916 | reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; | |
1917 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); | |
1918 | reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | |
1919 | reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) | |
1920 | | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); | |
1921 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); | |
1922 | } | |
1923 | ||
1924 | break; | |
1925 | default: | |
1926 | ret = -EINVAL; | |
1927 | PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); | |
1928 | break; | |
1929 | } | |
1930 | ||
1931 | return ret; | |
1932 | } | |
1933 | ||
1934 | void | |
1935 | ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) | |
1936 | { | |
1937 | struct ixgbe_hw *hw = | |
1938 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1939 | uint32_t vlnctrl; | |
1940 | ||
1941 | PMD_INIT_FUNC_TRACE(); | |
1942 | ||
1943 | /* Filter Table Disable */ | |
1944 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
1945 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; | |
1946 | ||
1947 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | |
1948 | } | |
1949 | ||
1950 | void | |
1951 | ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) | |
1952 | { | |
1953 | struct ixgbe_hw *hw = | |
1954 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
1955 | struct ixgbe_vfta *shadow_vfta = | |
1956 | IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); | |
1957 | uint32_t vlnctrl; | |
1958 | uint16_t i; | |
1959 | ||
1960 | PMD_INIT_FUNC_TRACE(); | |
1961 | ||
1962 | /* Filter Table Enable */ | |
1963 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
1964 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | |
1965 | vlnctrl |= IXGBE_VLNCTRL_VFE; | |
1966 | ||
1967 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | |
1968 | ||
1969 | /* write whatever is in local vfta copy */ | |
1970 | for (i = 0; i < IXGBE_VFTA_SIZE; i++) | |
1971 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); | |
1972 | } | |
1973 | ||
1974 | static void | |
1975 | ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) | |
1976 | { | |
1977 | struct ixgbe_hwstrip *hwstrip = | |
1978 | IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); | |
1979 | struct ixgbe_rx_queue *rxq; | |
1980 | ||
1981 | if (queue >= IXGBE_MAX_RX_QUEUE_NUM) | |
1982 | return; | |
1983 | ||
1984 | if (on) | |
1985 | IXGBE_SET_HWSTRIP(hwstrip, queue); | |
1986 | else | |
1987 | IXGBE_CLEAR_HWSTRIP(hwstrip, queue); | |
1988 | ||
1989 | if (queue >= dev->data->nb_rx_queues) | |
1990 | return; | |
1991 | ||
1992 | rxq = dev->data->rx_queues[queue]; | |
1993 | ||
11fdf7f2 TL |
1994 | if (on) { |
1995 | rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; | |
1996 | rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; | |
1997 | } else { | |
1998 | rxq->vlan_flags = PKT_RX_VLAN; | |
1999 | rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; | |
2000 | } | |
7c673cae FG |
2001 | } |
2002 | ||
2003 | static void | |
2004 | ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) | |
2005 | { | |
2006 | struct ixgbe_hw *hw = | |
2007 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2008 | uint32_t ctrl; | |
2009 | ||
2010 | PMD_INIT_FUNC_TRACE(); | |
2011 | ||
2012 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
2013 | /* No queue level support */ | |
2014 | PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); | |
2015 | return; | |
2016 | } | |
2017 | ||
2018 | /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ | |
2019 | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); | |
2020 | ctrl &= ~IXGBE_RXDCTL_VME; | |
2021 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); | |
2022 | ||
2023 | /* record those setting for HW strip per queue */ | |
2024 | ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); | |
2025 | } | |
2026 | ||
2027 | static void | |
2028 | ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) | |
2029 | { | |
2030 | struct ixgbe_hw *hw = | |
2031 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2032 | uint32_t ctrl; | |
2033 | ||
2034 | PMD_INIT_FUNC_TRACE(); | |
2035 | ||
2036 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
2037 | /* No queue level supported */ | |
2038 | PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); | |
2039 | return; | |
2040 | } | |
2041 | ||
2042 | /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ | |
2043 | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); | |
2044 | ctrl |= IXGBE_RXDCTL_VME; | |
2045 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); | |
2046 | ||
2047 | /* record those setting for HW strip per queue */ | |
2048 | ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); | |
2049 | } | |
2050 | ||
7c673cae FG |
2051 | static void |
2052 | ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) | |
2053 | { | |
2054 | struct ixgbe_hw *hw = | |
2055 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2056 | uint32_t ctrl; | |
2057 | ||
2058 | PMD_INIT_FUNC_TRACE(); | |
2059 | ||
2060 | /* DMATXCTRL: Geric Double VLAN Disable */ | |
2061 | ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | |
2062 | ctrl &= ~IXGBE_DMATXCTL_GDV; | |
2063 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); | |
2064 | ||
2065 | /* CTRL_EXT: Global Double VLAN Disable */ | |
2066 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | |
2067 | ctrl &= ~IXGBE_EXTENDED_VLAN; | |
2068 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); | |
2069 | ||
2070 | } | |
2071 | ||
2072 | static void | |
2073 | ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) | |
2074 | { | |
2075 | struct ixgbe_hw *hw = | |
2076 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2077 | uint32_t ctrl; | |
2078 | ||
2079 | PMD_INIT_FUNC_TRACE(); | |
2080 | ||
2081 | /* DMATXCTRL: Geric Double VLAN Enable */ | |
2082 | ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | |
2083 | ctrl |= IXGBE_DMATXCTL_GDV; | |
2084 | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); | |
2085 | ||
2086 | /* CTRL_EXT: Global Double VLAN Enable */ | |
2087 | ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); | |
2088 | ctrl |= IXGBE_EXTENDED_VLAN; | |
2089 | IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); | |
2090 | ||
2091 | /* Clear pooling mode of PFVTCTL. It's required by X550. */ | |
2092 | if (hw->mac.type == ixgbe_mac_X550 || | |
2093 | hw->mac.type == ixgbe_mac_X550EM_x || | |
2094 | hw->mac.type == ixgbe_mac_X550EM_a) { | |
2095 | ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | |
2096 | ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; | |
2097 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); | |
2098 | } | |
2099 | ||
2100 | /* | |
2101 | * VET EXT field in the EXVET register = 0x8100 by default | |
2102 | * So no need to change. Same to VT field of DMATXCTL register | |
2103 | */ | |
2104 | } | |
2105 | ||
11fdf7f2 TL |
2106 | void |
2107 | ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) | |
2108 | { | |
2109 | struct ixgbe_hw *hw = | |
2110 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2111 | struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; | |
2112 | uint32_t ctrl; | |
2113 | uint16_t i; | |
2114 | struct ixgbe_rx_queue *rxq; | |
2115 | bool on; | |
2116 | ||
2117 | PMD_INIT_FUNC_TRACE(); | |
2118 | ||
2119 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
2120 | if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { | |
2121 | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
2122 | ctrl |= IXGBE_VLNCTRL_VME; | |
2123 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); | |
2124 | } else { | |
2125 | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
2126 | ctrl &= ~IXGBE_VLNCTRL_VME; | |
2127 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); | |
2128 | } | |
2129 | } else { | |
2130 | /* | |
2131 | * Other 10G NIC, the VLAN strip can be setup | |
2132 | * per queue in RXDCTL | |
2133 | */ | |
2134 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
2135 | rxq = dev->data->rx_queues[i]; | |
2136 | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); | |
2137 | if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { | |
2138 | ctrl |= IXGBE_RXDCTL_VME; | |
2139 | on = TRUE; | |
2140 | } else { | |
2141 | ctrl &= ~IXGBE_RXDCTL_VME; | |
2142 | on = FALSE; | |
2143 | } | |
2144 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); | |
2145 | ||
2146 | /* record those setting for HW strip per queue */ | |
2147 | ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); | |
2148 | } | |
2149 | } | |
2150 | } | |
2151 | ||
7c673cae | 2152 | static void |
11fdf7f2 | 2153 | ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) |
7c673cae | 2154 | { |
11fdf7f2 TL |
2155 | uint16_t i; |
2156 | struct rte_eth_rxmode *rxmode; | |
2157 | struct ixgbe_rx_queue *rxq; | |
2158 | ||
7c673cae | 2159 | if (mask & ETH_VLAN_STRIP_MASK) { |
11fdf7f2 TL |
2160 | rxmode = &dev->data->dev_conf.rxmode; |
2161 | if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) | |
2162 | for (i = 0; i < dev->data->nb_rx_queues; i++) { | |
2163 | rxq = dev->data->rx_queues[i]; | |
2164 | rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; | |
2165 | } | |
7c673cae | 2166 | else |
11fdf7f2 TL |
2167 | for (i = 0; i < dev->data->nb_rx_queues; i++) { |
2168 | rxq = dev->data->rx_queues[i]; | |
2169 | rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; | |
2170 | } | |
2171 | } | |
2172 | } | |
2173 | ||
2174 | static int | |
2175 | ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) | |
2176 | { | |
2177 | struct rte_eth_rxmode *rxmode; | |
2178 | rxmode = &dev->data->dev_conf.rxmode; | |
2179 | ||
2180 | if (mask & ETH_VLAN_STRIP_MASK) { | |
2181 | ixgbe_vlan_hw_strip_config(dev); | |
7c673cae FG |
2182 | } |
2183 | ||
2184 | if (mask & ETH_VLAN_FILTER_MASK) { | |
11fdf7f2 | 2185 | if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) |
7c673cae FG |
2186 | ixgbe_vlan_hw_filter_enable(dev); |
2187 | else | |
2188 | ixgbe_vlan_hw_filter_disable(dev); | |
2189 | } | |
2190 | ||
2191 | if (mask & ETH_VLAN_EXTEND_MASK) { | |
11fdf7f2 | 2192 | if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) |
7c673cae FG |
2193 | ixgbe_vlan_hw_extend_enable(dev); |
2194 | else | |
2195 | ixgbe_vlan_hw_extend_disable(dev); | |
2196 | } | |
11fdf7f2 TL |
2197 | |
2198 | return 0; | |
2199 | } | |
2200 | ||
2201 | static int | |
2202 | ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) | |
2203 | { | |
2204 | ixgbe_config_vlan_strip_on_all_queues(dev, mask); | |
2205 | ||
2206 | ixgbe_vlan_offload_config(dev, mask); | |
2207 | ||
2208 | return 0; | |
7c673cae FG |
2209 | } |
2210 | ||
2211 | static void | |
2212 | ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) | |
2213 | { | |
2214 | struct ixgbe_hw *hw = | |
2215 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2216 | /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ | |
2217 | uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | |
2218 | ||
2219 | vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ | |
2220 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); | |
2221 | } | |
2222 | ||
2223 | static int | |
2224 | ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) | |
2225 | { | |
11fdf7f2 TL |
2226 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
2227 | ||
7c673cae FG |
2228 | switch (nb_rx_q) { |
2229 | case 1: | |
2230 | case 2: | |
2231 | RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; | |
2232 | break; | |
2233 | case 4: | |
2234 | RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; | |
2235 | break; | |
2236 | default: | |
2237 | return -EINVAL; | |
2238 | } | |
2239 | ||
11fdf7f2 TL |
2240 | RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = |
2241 | IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; | |
2242 | RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = | |
2243 | pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; | |
7c673cae FG |
2244 | return 0; |
2245 | } | |
2246 | ||
2247 | static int | |
2248 | ixgbe_check_mq_mode(struct rte_eth_dev *dev) | |
2249 | { | |
2250 | struct rte_eth_conf *dev_conf = &dev->data->dev_conf; | |
2251 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2252 | uint16_t nb_rx_q = dev->data->nb_rx_queues; | |
2253 | uint16_t nb_tx_q = dev->data->nb_tx_queues; | |
2254 | ||
2255 | if (RTE_ETH_DEV_SRIOV(dev).active != 0) { | |
2256 | /* check multi-queue mode */ | |
2257 | switch (dev_conf->rxmode.mq_mode) { | |
2258 | case ETH_MQ_RX_VMDQ_DCB: | |
2259 | PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); | |
2260 | break; | |
2261 | case ETH_MQ_RX_VMDQ_DCB_RSS: | |
2262 | /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ | |
2263 | PMD_INIT_LOG(ERR, "SRIOV active," | |
2264 | " unsupported mq_mode rx %d.", | |
2265 | dev_conf->rxmode.mq_mode); | |
2266 | return -EINVAL; | |
2267 | case ETH_MQ_RX_RSS: | |
2268 | case ETH_MQ_RX_VMDQ_RSS: | |
2269 | dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; | |
2270 | if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) | |
2271 | if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { | |
2272 | PMD_INIT_LOG(ERR, "SRIOV is active," | |
2273 | " invalid queue number" | |
2274 | " for VMDQ RSS, allowed" | |
2275 | " value are 1, 2 or 4."); | |
2276 | return -EINVAL; | |
2277 | } | |
2278 | break; | |
2279 | case ETH_MQ_RX_VMDQ_ONLY: | |
2280 | case ETH_MQ_RX_NONE: | |
2281 | /* if nothing mq mode configure, use default scheme */ | |
2282 | dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; | |
7c673cae FG |
2283 | break; |
2284 | default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ | |
2285 | /* SRIOV only works in VMDq enable mode */ | |
2286 | PMD_INIT_LOG(ERR, "SRIOV is active," | |
2287 | " wrong mq_mode rx %d.", | |
2288 | dev_conf->rxmode.mq_mode); | |
2289 | return -EINVAL; | |
2290 | } | |
2291 | ||
2292 | switch (dev_conf->txmode.mq_mode) { | |
2293 | case ETH_MQ_TX_VMDQ_DCB: | |
2294 | PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); | |
2295 | dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; | |
2296 | break; | |
2297 | default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ | |
2298 | dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; | |
2299 | break; | |
2300 | } | |
2301 | ||
2302 | /* check valid queue number */ | |
2303 | if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || | |
2304 | (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { | |
2305 | PMD_INIT_LOG(ERR, "SRIOV is active," | |
2306 | " nb_rx_q=%d nb_tx_q=%d queue number" | |
2307 | " must be less than or equal to %d.", | |
2308 | nb_rx_q, nb_tx_q, | |
2309 | RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); | |
2310 | return -EINVAL; | |
2311 | } | |
2312 | } else { | |
2313 | if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { | |
2314 | PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" | |
2315 | " not supported."); | |
2316 | return -EINVAL; | |
2317 | } | |
2318 | /* check configuration for vmdb+dcb mode */ | |
2319 | if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { | |
2320 | const struct rte_eth_vmdq_dcb_conf *conf; | |
2321 | ||
2322 | if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { | |
2323 | PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", | |
2324 | IXGBE_VMDQ_DCB_NB_QUEUES); | |
2325 | return -EINVAL; | |
2326 | } | |
2327 | conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; | |
2328 | if (!(conf->nb_queue_pools == ETH_16_POOLS || | |
2329 | conf->nb_queue_pools == ETH_32_POOLS)) { | |
2330 | PMD_INIT_LOG(ERR, "VMDQ+DCB selected," | |
2331 | " nb_queue_pools must be %d or %d.", | |
2332 | ETH_16_POOLS, ETH_32_POOLS); | |
2333 | return -EINVAL; | |
2334 | } | |
2335 | } | |
2336 | if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { | |
2337 | const struct rte_eth_vmdq_dcb_tx_conf *conf; | |
2338 | ||
2339 | if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { | |
2340 | PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", | |
2341 | IXGBE_VMDQ_DCB_NB_QUEUES); | |
2342 | return -EINVAL; | |
2343 | } | |
2344 | conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; | |
2345 | if (!(conf->nb_queue_pools == ETH_16_POOLS || | |
2346 | conf->nb_queue_pools == ETH_32_POOLS)) { | |
2347 | PMD_INIT_LOG(ERR, "VMDQ+DCB selected," | |
2348 | " nb_queue_pools != %d and" | |
2349 | " nb_queue_pools != %d.", | |
2350 | ETH_16_POOLS, ETH_32_POOLS); | |
2351 | return -EINVAL; | |
2352 | } | |
2353 | } | |
2354 | ||
2355 | /* For DCB mode check our configuration before we go further */ | |
2356 | if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { | |
2357 | const struct rte_eth_dcb_rx_conf *conf; | |
2358 | ||
7c673cae FG |
2359 | conf = &dev_conf->rx_adv_conf.dcb_rx_conf; |
2360 | if (!(conf->nb_tcs == ETH_4_TCS || | |
2361 | conf->nb_tcs == ETH_8_TCS)) { | |
2362 | PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" | |
2363 | " and nb_tcs != %d.", | |
2364 | ETH_4_TCS, ETH_8_TCS); | |
2365 | return -EINVAL; | |
2366 | } | |
2367 | } | |
2368 | ||
2369 | if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { | |
2370 | const struct rte_eth_dcb_tx_conf *conf; | |
2371 | ||
7c673cae FG |
2372 | conf = &dev_conf->tx_adv_conf.dcb_tx_conf; |
2373 | if (!(conf->nb_tcs == ETH_4_TCS || | |
2374 | conf->nb_tcs == ETH_8_TCS)) { | |
2375 | PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" | |
2376 | " and nb_tcs != %d.", | |
2377 | ETH_4_TCS, ETH_8_TCS); | |
2378 | return -EINVAL; | |
2379 | } | |
2380 | } | |
2381 | ||
2382 | /* | |
2383 | * When DCB/VT is off, maximum number of queues changes, | |
2384 | * except for 82598EB, which remains constant. | |
2385 | */ | |
2386 | if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && | |
2387 | hw->mac.type != ixgbe_mac_82598EB) { | |
2388 | if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { | |
2389 | PMD_INIT_LOG(ERR, | |
2390 | "Neither VT nor DCB are enabled, " | |
2391 | "nb_tx_q > %d.", | |
2392 | IXGBE_NONE_MODE_TX_NB_QUEUES); | |
2393 | return -EINVAL; | |
2394 | } | |
2395 | } | |
2396 | } | |
2397 | return 0; | |
2398 | } | |
2399 | ||
2400 | static int | |
2401 | ixgbe_dev_configure(struct rte_eth_dev *dev) | |
2402 | { | |
2403 | struct ixgbe_interrupt *intr = | |
2404 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
2405 | struct ixgbe_adapter *adapter = | |
2406 | (struct ixgbe_adapter *)dev->data->dev_private; | |
2407 | int ret; | |
2408 | ||
2409 | PMD_INIT_FUNC_TRACE(); | |
2410 | /* multipe queue mode checking */ | |
2411 | ret = ixgbe_check_mq_mode(dev); | |
2412 | if (ret != 0) { | |
2413 | PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", | |
2414 | ret); | |
2415 | return ret; | |
2416 | } | |
2417 | ||
2418 | /* set flag to update link status after init */ | |
2419 | intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | |
2420 | ||
2421 | /* | |
2422 | * Initialize to TRUE. If any of Rx queues doesn't meet the bulk | |
2423 | * allocation or vector Rx preconditions we will reset it. | |
2424 | */ | |
2425 | adapter->rx_bulk_alloc_allowed = true; | |
2426 | adapter->rx_vec_allowed = true; | |
2427 | ||
2428 | return 0; | |
2429 | } | |
2430 | ||
2431 | static void | |
2432 | ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) | |
2433 | { | |
2434 | struct ixgbe_hw *hw = | |
2435 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2436 | struct ixgbe_interrupt *intr = | |
2437 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
2438 | uint32_t gpie; | |
2439 | ||
2440 | /* only set up it on X550EM_X */ | |
2441 | if (hw->mac.type == ixgbe_mac_X550EM_x) { | |
2442 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | |
2443 | gpie |= IXGBE_SDP0_GPIEN_X550EM_x; | |
2444 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | |
2445 | if (hw->phy.type == ixgbe_phy_x550em_ext_t) | |
2446 | intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; | |
2447 | } | |
2448 | } | |
2449 | ||
11fdf7f2 TL |
2450 | int |
2451 | ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, | |
2452 | uint16_t tx_rate, uint64_t q_msk) | |
2453 | { | |
2454 | struct ixgbe_hw *hw; | |
2455 | struct ixgbe_vf_info *vfinfo; | |
2456 | struct rte_eth_link link; | |
2457 | uint8_t nb_q_per_pool; | |
2458 | uint32_t queue_stride; | |
2459 | uint32_t queue_idx, idx = 0, vf_idx; | |
2460 | uint32_t queue_end; | |
2461 | uint16_t total_rate = 0; | |
2462 | struct rte_pci_device *pci_dev; | |
2463 | ||
2464 | pci_dev = RTE_ETH_DEV_TO_PCI(dev); | |
2465 | rte_eth_link_get_nowait(dev->data->port_id, &link); | |
2466 | ||
2467 | if (vf >= pci_dev->max_vfs) | |
2468 | return -EINVAL; | |
2469 | ||
2470 | if (tx_rate > link.link_speed) | |
2471 | return -EINVAL; | |
2472 | ||
2473 | if (q_msk == 0) | |
2474 | return 0; | |
2475 | ||
2476 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2477 | vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); | |
2478 | nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; | |
2479 | queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; | |
2480 | queue_idx = vf * queue_stride; | |
2481 | queue_end = queue_idx + nb_q_per_pool - 1; | |
2482 | if (queue_end >= hw->mac.max_tx_queues) | |
2483 | return -EINVAL; | |
2484 | ||
2485 | if (vfinfo) { | |
2486 | for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { | |
2487 | if (vf_idx == vf) | |
2488 | continue; | |
2489 | for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); | |
2490 | idx++) | |
2491 | total_rate += vfinfo[vf_idx].tx_rate[idx]; | |
2492 | } | |
2493 | } else { | |
2494 | return -EINVAL; | |
2495 | } | |
2496 | ||
2497 | /* Store tx_rate for this vf. */ | |
2498 | for (idx = 0; idx < nb_q_per_pool; idx++) { | |
2499 | if (((uint64_t)0x1 << idx) & q_msk) { | |
2500 | if (vfinfo[vf].tx_rate[idx] != tx_rate) | |
2501 | vfinfo[vf].tx_rate[idx] = tx_rate; | |
2502 | total_rate += tx_rate; | |
2503 | } | |
2504 | } | |
2505 | ||
2506 | if (total_rate > dev->data->dev_link.link_speed) { | |
2507 | /* Reset stored TX rate of the VF if it causes exceed | |
2508 | * link speed. | |
2509 | */ | |
2510 | memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); | |
2511 | return -EINVAL; | |
2512 | } | |
2513 | ||
2514 | /* Set RTTBCNRC of each queue/pool for vf X */ | |
2515 | for (; queue_idx <= queue_end; queue_idx++) { | |
2516 | if (0x1 & q_msk) | |
2517 | ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); | |
2518 | q_msk = q_msk >> 1; | |
2519 | } | |
2520 | ||
2521 | return 0; | |
2522 | } | |
2523 | ||
7c673cae FG |
2524 | /* |
2525 | * Configure device link speed and setup link. | |
2526 | * It returns 0 on success. | |
2527 | */ | |
2528 | static int | |
2529 | ixgbe_dev_start(struct rte_eth_dev *dev) | |
2530 | { | |
2531 | struct ixgbe_hw *hw = | |
2532 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2533 | struct ixgbe_vf_info *vfinfo = | |
2534 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
11fdf7f2 TL |
2535 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
2536 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
2537 | uint32_t intr_vector = 0; |
2538 | int err, link_up = 0, negotiate = 0; | |
2539 | uint32_t speed = 0; | |
11fdf7f2 | 2540 | uint32_t allowed_speeds = 0; |
7c673cae FG |
2541 | int mask = 0; |
2542 | int status; | |
2543 | uint16_t vf, idx; | |
2544 | uint32_t *link_speeds; | |
11fdf7f2 TL |
2545 | struct ixgbe_tm_conf *tm_conf = |
2546 | IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); | |
7c673cae FG |
2547 | |
2548 | PMD_INIT_FUNC_TRACE(); | |
2549 | ||
2550 | /* IXGBE devices don't support: | |
2551 | * - half duplex (checked afterwards for valid speeds) | |
2552 | * - fixed speed: TODO implement | |
2553 | */ | |
2554 | if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { | |
11fdf7f2 TL |
2555 | PMD_INIT_LOG(ERR, |
2556 | "Invalid link_speeds for port %u, fix speed not supported", | |
2557 | dev->data->port_id); | |
7c673cae FG |
2558 | return -EINVAL; |
2559 | } | |
2560 | ||
9f95a23c TL |
2561 | /* Stop the link setup handler before resetting the HW. */ |
2562 | rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); | |
2563 | ||
7c673cae FG |
2564 | /* disable uio/vfio intr/eventfd mapping */ |
2565 | rte_intr_disable(intr_handle); | |
2566 | ||
2567 | /* stop adapter */ | |
2568 | hw->adapter_stopped = 0; | |
2569 | ixgbe_stop_adapter(hw); | |
2570 | ||
2571 | /* reinitialize adapter | |
2572 | * this calls reset and start | |
2573 | */ | |
2574 | status = ixgbe_pf_reset_hw(hw); | |
2575 | if (status != 0) | |
2576 | return -1; | |
2577 | hw->mac.ops.start_hw(hw); | |
2578 | hw->mac.get_link_status = true; | |
2579 | ||
2580 | /* configure PF module if SRIOV enabled */ | |
2581 | ixgbe_pf_host_configure(dev); | |
2582 | ||
2583 | ixgbe_dev_phy_intr_setup(dev); | |
2584 | ||
2585 | /* check and configure queue intr-vector mapping */ | |
2586 | if ((rte_intr_cap_multiple(intr_handle) || | |
2587 | !RTE_ETH_DEV_SRIOV(dev).active) && | |
2588 | dev->data->dev_conf.intr_conf.rxq != 0) { | |
2589 | intr_vector = dev->data->nb_rx_queues; | |
2590 | if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { | |
2591 | PMD_INIT_LOG(ERR, "At most %d intr queues supported", | |
2592 | IXGBE_MAX_INTR_QUEUE_NUM); | |
2593 | return -ENOTSUP; | |
2594 | } | |
2595 | if (rte_intr_efd_enable(intr_handle, intr_vector)) | |
2596 | return -1; | |
2597 | } | |
2598 | ||
2599 | if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { | |
2600 | intr_handle->intr_vec = | |
2601 | rte_zmalloc("intr_vec", | |
2602 | dev->data->nb_rx_queues * sizeof(int), 0); | |
2603 | if (intr_handle->intr_vec == NULL) { | |
2604 | PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" | |
11fdf7f2 | 2605 | " intr_vec", dev->data->nb_rx_queues); |
7c673cae FG |
2606 | return -ENOMEM; |
2607 | } | |
2608 | } | |
2609 | ||
2610 | /* confiugre msix for sleep until rx interrupt */ | |
2611 | ixgbe_configure_msix(dev); | |
2612 | ||
2613 | /* initialize transmission unit */ | |
2614 | ixgbe_dev_tx_init(dev); | |
2615 | ||
2616 | /* This can fail when allocating mbufs for descriptor rings */ | |
2617 | err = ixgbe_dev_rx_init(dev); | |
2618 | if (err) { | |
2619 | PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); | |
2620 | goto error; | |
2621 | } | |
2622 | ||
11fdf7f2 | 2623 | mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | |
7c673cae | 2624 | ETH_VLAN_EXTEND_MASK; |
11fdf7f2 TL |
2625 | err = ixgbe_vlan_offload_config(dev, mask); |
2626 | if (err) { | |
2627 | PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); | |
2628 | goto error; | |
2629 | } | |
7c673cae FG |
2630 | |
2631 | if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { | |
2632 | /* Enable vlan filtering for VMDq */ | |
2633 | ixgbe_vmdq_vlan_hw_filter_enable(dev); | |
2634 | } | |
2635 | ||
2636 | /* Configure DCB hw */ | |
2637 | ixgbe_configure_dcb(dev); | |
2638 | ||
2639 | if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { | |
2640 | err = ixgbe_fdir_configure(dev); | |
2641 | if (err) | |
2642 | goto error; | |
2643 | } | |
2644 | ||
2645 | /* Restore vf rate limit */ | |
2646 | if (vfinfo != NULL) { | |
11fdf7f2 | 2647 | for (vf = 0; vf < pci_dev->max_vfs; vf++) |
7c673cae FG |
2648 | for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) |
2649 | if (vfinfo[vf].tx_rate[idx] != 0) | |
11fdf7f2 TL |
2650 | ixgbe_set_vf_rate_limit( |
2651 | dev, vf, | |
7c673cae FG |
2652 | vfinfo[vf].tx_rate[idx], |
2653 | 1 << idx); | |
2654 | } | |
2655 | ||
2656 | ixgbe_restore_statistics_mapping(dev); | |
2657 | ||
2658 | err = ixgbe_dev_rxtx_start(dev); | |
2659 | if (err < 0) { | |
2660 | PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); | |
2661 | goto error; | |
2662 | } | |
2663 | ||
9f95a23c TL |
2664 | /* Skip link setup if loopback mode is enabled. */ |
2665 | if (dev->data->dev_conf.lpbk_mode != 0) { | |
2666 | err = ixgbe_check_supported_loopback_mode(dev); | |
2667 | if (err < 0) { | |
2668 | PMD_INIT_LOG(ERR, "Unsupported loopback mode"); | |
2669 | goto error; | |
2670 | } else { | |
2671 | goto skip_link_setup; | |
2672 | } | |
2673 | } | |
7c673cae FG |
2674 | |
2675 | if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { | |
2676 | err = hw->mac.ops.setup_sfp(hw); | |
2677 | if (err) | |
2678 | goto error; | |
2679 | } | |
2680 | ||
2681 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { | |
2682 | /* Turn on the copper */ | |
2683 | ixgbe_set_phy_power(hw, true); | |
2684 | } else { | |
2685 | /* Turn on the laser */ | |
2686 | ixgbe_enable_tx_laser(hw); | |
2687 | } | |
2688 | ||
2689 | err = ixgbe_check_link(hw, &speed, &link_up, 0); | |
2690 | if (err) | |
2691 | goto error; | |
2692 | dev->data->dev_link.link_status = link_up; | |
2693 | ||
2694 | err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); | |
2695 | if (err) | |
2696 | goto error; | |
2697 | ||
11fdf7f2 TL |
2698 | switch (hw->mac.type) { |
2699 | case ixgbe_mac_X550: | |
2700 | case ixgbe_mac_X550EM_x: | |
2701 | case ixgbe_mac_X550EM_a: | |
2702 | allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | | |
2703 | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | | |
2704 | ETH_LINK_SPEED_10G; | |
9f95a23c TL |
2705 | if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || |
2706 | hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) | |
2707 | allowed_speeds = ETH_LINK_SPEED_10M | | |
2708 | ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; | |
11fdf7f2 TL |
2709 | break; |
2710 | default: | |
2711 | allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | | |
2712 | ETH_LINK_SPEED_10G; | |
2713 | } | |
2714 | ||
7c673cae | 2715 | link_speeds = &dev->data->dev_conf.link_speeds; |
11fdf7f2 | 2716 | if (*link_speeds & ~allowed_speeds) { |
7c673cae FG |
2717 | PMD_INIT_LOG(ERR, "Invalid link setting"); |
2718 | goto error; | |
2719 | } | |
2720 | ||
2721 | speed = 0x0; | |
2722 | if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { | |
11fdf7f2 TL |
2723 | switch (hw->mac.type) { |
2724 | case ixgbe_mac_82598EB: | |
2725 | speed = IXGBE_LINK_SPEED_82598_AUTONEG; | |
2726 | break; | |
2727 | case ixgbe_mac_82599EB: | |
2728 | case ixgbe_mac_X540: | |
2729 | speed = IXGBE_LINK_SPEED_82599_AUTONEG; | |
2730 | break; | |
2731 | case ixgbe_mac_X550: | |
2732 | case ixgbe_mac_X550EM_x: | |
2733 | case ixgbe_mac_X550EM_a: | |
2734 | speed = IXGBE_LINK_SPEED_X550_AUTONEG; | |
2735 | break; | |
2736 | default: | |
2737 | speed = IXGBE_LINK_SPEED_82599_AUTONEG; | |
2738 | } | |
7c673cae FG |
2739 | } else { |
2740 | if (*link_speeds & ETH_LINK_SPEED_10G) | |
2741 | speed |= IXGBE_LINK_SPEED_10GB_FULL; | |
11fdf7f2 TL |
2742 | if (*link_speeds & ETH_LINK_SPEED_5G) |
2743 | speed |= IXGBE_LINK_SPEED_5GB_FULL; | |
2744 | if (*link_speeds & ETH_LINK_SPEED_2_5G) | |
2745 | speed |= IXGBE_LINK_SPEED_2_5GB_FULL; | |
7c673cae FG |
2746 | if (*link_speeds & ETH_LINK_SPEED_1G) |
2747 | speed |= IXGBE_LINK_SPEED_1GB_FULL; | |
2748 | if (*link_speeds & ETH_LINK_SPEED_100M) | |
2749 | speed |= IXGBE_LINK_SPEED_100_FULL; | |
9f95a23c TL |
2750 | if (*link_speeds & ETH_LINK_SPEED_10M) |
2751 | speed |= IXGBE_LINK_SPEED_10_FULL; | |
7c673cae FG |
2752 | } |
2753 | ||
2754 | err = ixgbe_setup_link(hw, speed, link_up); | |
2755 | if (err) | |
2756 | goto error; | |
2757 | ||
2758 | skip_link_setup: | |
2759 | ||
2760 | if (rte_intr_allow_others(intr_handle)) { | |
2761 | /* check if lsc interrupt is enabled */ | |
2762 | if (dev->data->dev_conf.intr_conf.lsc != 0) | |
11fdf7f2 TL |
2763 | ixgbe_dev_lsc_interrupt_setup(dev, TRUE); |
2764 | else | |
2765 | ixgbe_dev_lsc_interrupt_setup(dev, FALSE); | |
2766 | ixgbe_dev_macsec_interrupt_setup(dev); | |
7c673cae FG |
2767 | } else { |
2768 | rte_intr_callback_unregister(intr_handle, | |
11fdf7f2 | 2769 | ixgbe_dev_interrupt_handler, dev); |
7c673cae FG |
2770 | if (dev->data->dev_conf.intr_conf.lsc != 0) |
2771 | PMD_INIT_LOG(INFO, "lsc won't enable because of" | |
11fdf7f2 | 2772 | " no intr multiplex"); |
7c673cae FG |
2773 | } |
2774 | ||
2775 | /* check if rxq interrupt is enabled */ | |
2776 | if (dev->data->dev_conf.intr_conf.rxq != 0 && | |
2777 | rte_intr_dp_is_en(intr_handle)) | |
2778 | ixgbe_dev_rxq_interrupt_setup(dev); | |
2779 | ||
2780 | /* enable uio/vfio intr/eventfd mapping */ | |
2781 | rte_intr_enable(intr_handle); | |
2782 | ||
2783 | /* resume enabled intr since hw reset */ | |
2784 | ixgbe_enable_intr(dev); | |
11fdf7f2 TL |
2785 | ixgbe_l2_tunnel_conf(dev); |
2786 | ixgbe_filter_restore(dev); | |
2787 | ||
2788 | if (tm_conf->root && !tm_conf->committed) | |
2789 | PMD_DRV_LOG(WARNING, | |
2790 | "please call hierarchy_commit() " | |
2791 | "before starting the port"); | |
7c673cae | 2792 | |
9f95a23c TL |
2793 | /* |
2794 | * Update link status right before return, because it may | |
2795 | * start link configuration process in a separate thread. | |
2796 | */ | |
2797 | ixgbe_dev_link_update(dev, 0); | |
2798 | ||
7c673cae FG |
2799 | return 0; |
2800 | ||
2801 | error: | |
2802 | PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); | |
2803 | ixgbe_dev_clear_queues(dev); | |
2804 | return -EIO; | |
2805 | } | |
2806 | ||
2807 | /* | |
2808 | * Stop device: disable rx and tx functions to allow for reconfiguring. | |
2809 | */ | |
2810 | static void | |
2811 | ixgbe_dev_stop(struct rte_eth_dev *dev) | |
2812 | { | |
2813 | struct rte_eth_link link; | |
9f95a23c TL |
2814 | struct ixgbe_adapter *adapter = |
2815 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7c673cae FG |
2816 | struct ixgbe_hw *hw = |
2817 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2818 | struct ixgbe_vf_info *vfinfo = | |
2819 | *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); | |
11fdf7f2 TL |
2820 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
2821 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae | 2822 | int vf; |
11fdf7f2 TL |
2823 | struct ixgbe_tm_conf *tm_conf = |
2824 | IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); | |
7c673cae FG |
2825 | |
2826 | PMD_INIT_FUNC_TRACE(); | |
2827 | ||
9f95a23c TL |
2828 | rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); |
2829 | ||
7c673cae FG |
2830 | /* disable interrupts */ |
2831 | ixgbe_disable_intr(hw); | |
2832 | ||
2833 | /* reset the NIC */ | |
2834 | ixgbe_pf_reset_hw(hw); | |
2835 | hw->adapter_stopped = 0; | |
2836 | ||
2837 | /* stop adapter */ | |
2838 | ixgbe_stop_adapter(hw); | |
2839 | ||
11fdf7f2 | 2840 | for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) |
7c673cae FG |
2841 | vfinfo[vf].clear_to_send = false; |
2842 | ||
2843 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { | |
2844 | /* Turn off the copper */ | |
2845 | ixgbe_set_phy_power(hw, false); | |
2846 | } else { | |
2847 | /* Turn off the laser */ | |
2848 | ixgbe_disable_tx_laser(hw); | |
2849 | } | |
2850 | ||
2851 | ixgbe_dev_clear_queues(dev); | |
2852 | ||
2853 | /* Clear stored conf */ | |
2854 | dev->data->scattered_rx = 0; | |
2855 | dev->data->lro = 0; | |
2856 | ||
2857 | /* Clear recorded link status */ | |
2858 | memset(&link, 0, sizeof(link)); | |
11fdf7f2 | 2859 | rte_eth_linkstatus_set(dev, &link); |
7c673cae FG |
2860 | |
2861 | if (!rte_intr_allow_others(intr_handle)) | |
2862 | /* resume to the default handler */ | |
2863 | rte_intr_callback_register(intr_handle, | |
2864 | ixgbe_dev_interrupt_handler, | |
2865 | (void *)dev); | |
2866 | ||
2867 | /* Clean datapath event and queue/vec mapping */ | |
2868 | rte_intr_efd_disable(intr_handle); | |
2869 | if (intr_handle->intr_vec != NULL) { | |
2870 | rte_free(intr_handle->intr_vec); | |
2871 | intr_handle->intr_vec = NULL; | |
2872 | } | |
11fdf7f2 TL |
2873 | |
2874 | /* reset hierarchy commit */ | |
2875 | tm_conf->committed = false; | |
9f95a23c TL |
2876 | |
2877 | adapter->rss_reta_updated = 0; | |
7c673cae FG |
2878 | } |
2879 | ||
2880 | /* | |
2881 | * Set device link up: enable tx. | |
2882 | */ | |
2883 | static int | |
2884 | ixgbe_dev_set_link_up(struct rte_eth_dev *dev) | |
2885 | { | |
2886 | struct ixgbe_hw *hw = | |
2887 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2888 | if (hw->mac.type == ixgbe_mac_82599EB) { | |
11fdf7f2 | 2889 | #ifdef RTE_LIBRTE_IXGBE_BYPASS |
7c673cae FG |
2890 | if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { |
2891 | /* Not suported in bypass mode */ | |
2892 | PMD_INIT_LOG(ERR, "Set link up is not supported " | |
2893 | "by device id 0x%x", hw->device_id); | |
2894 | return -ENOTSUP; | |
2895 | } | |
2896 | #endif | |
2897 | } | |
2898 | ||
2899 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { | |
2900 | /* Turn on the copper */ | |
2901 | ixgbe_set_phy_power(hw, true); | |
2902 | } else { | |
2903 | /* Turn on the laser */ | |
2904 | ixgbe_enable_tx_laser(hw); | |
2905 | } | |
2906 | ||
2907 | return 0; | |
2908 | } | |
2909 | ||
2910 | /* | |
2911 | * Set device link down: disable tx. | |
2912 | */ | |
2913 | static int | |
2914 | ixgbe_dev_set_link_down(struct rte_eth_dev *dev) | |
2915 | { | |
2916 | struct ixgbe_hw *hw = | |
2917 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2918 | if (hw->mac.type == ixgbe_mac_82599EB) { | |
11fdf7f2 | 2919 | #ifdef RTE_LIBRTE_IXGBE_BYPASS |
7c673cae FG |
2920 | if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { |
2921 | /* Not suported in bypass mode */ | |
2922 | PMD_INIT_LOG(ERR, "Set link down is not supported " | |
2923 | "by device id 0x%x", hw->device_id); | |
2924 | return -ENOTSUP; | |
2925 | } | |
2926 | #endif | |
2927 | } | |
2928 | ||
2929 | if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { | |
2930 | /* Turn off the copper */ | |
2931 | ixgbe_set_phy_power(hw, false); | |
2932 | } else { | |
2933 | /* Turn off the laser */ | |
2934 | ixgbe_disable_tx_laser(hw); | |
2935 | } | |
2936 | ||
2937 | return 0; | |
2938 | } | |
2939 | ||
2940 | /* | |
11fdf7f2 | 2941 | * Reset and stop device. |
7c673cae FG |
2942 | */ |
2943 | static void | |
2944 | ixgbe_dev_close(struct rte_eth_dev *dev) | |
2945 | { | |
2946 | struct ixgbe_hw *hw = | |
2947 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
2948 | ||
2949 | PMD_INIT_FUNC_TRACE(); | |
2950 | ||
2951 | ixgbe_pf_reset_hw(hw); | |
2952 | ||
2953 | ixgbe_dev_stop(dev); | |
2954 | hw->adapter_stopped = 1; | |
2955 | ||
2956 | ixgbe_dev_free_queues(dev); | |
2957 | ||
2958 | ixgbe_disable_pcie_master(hw); | |
2959 | ||
2960 | /* reprogram the RAR[0] in case user changed it. */ | |
2961 | ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); | |
2962 | } | |
2963 | ||
11fdf7f2 TL |
2964 | /* |
2965 | * Reset PF device. | |
2966 | */ | |
2967 | static int | |
2968 | ixgbe_dev_reset(struct rte_eth_dev *dev) | |
2969 | { | |
2970 | int ret; | |
2971 | ||
2972 | /* When a DPDK PMD PF begin to reset PF port, it should notify all | |
2973 | * its VF to make them align with it. The detailed notification | |
2974 | * mechanism is PMD specific. As to ixgbe PF, it is rather complex. | |
2975 | * To avoid unexpected behavior in VF, currently reset of PF with | |
2976 | * SR-IOV activation is not supported. It might be supported later. | |
2977 | */ | |
2978 | if (dev->data->sriov.active) | |
2979 | return -ENOTSUP; | |
2980 | ||
2981 | ret = eth_ixgbe_dev_uninit(dev); | |
2982 | if (ret) | |
2983 | return ret; | |
2984 | ||
2985 | ret = eth_ixgbe_dev_init(dev, NULL); | |
2986 | ||
2987 | return ret; | |
2988 | } | |
2989 | ||
7c673cae FG |
2990 | static void |
2991 | ixgbe_read_stats_registers(struct ixgbe_hw *hw, | |
2992 | struct ixgbe_hw_stats *hw_stats, | |
11fdf7f2 | 2993 | struct ixgbe_macsec_stats *macsec_stats, |
7c673cae FG |
2994 | uint64_t *total_missed_rx, uint64_t *total_qbrc, |
2995 | uint64_t *total_qprc, uint64_t *total_qprdc) | |
2996 | { | |
2997 | uint32_t bprc, lxon, lxoff, total; | |
2998 | uint32_t delta_gprc = 0; | |
2999 | unsigned i; | |
3000 | /* Workaround for RX byte count not including CRC bytes when CRC | |
11fdf7f2 | 3001 | * strip is enabled. CRC bytes are removed from counters when crc_strip |
7c673cae | 3002 | * is disabled. |
11fdf7f2 | 3003 | */ |
7c673cae FG |
3004 | int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & |
3005 | IXGBE_HLREG0_RXCRCSTRP); | |
3006 | ||
3007 | hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | |
3008 | hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); | |
3009 | hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); | |
3010 | hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); | |
3011 | ||
3012 | for (i = 0; i < 8; i++) { | |
3013 | uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); | |
3014 | ||
3015 | /* global total per queue */ | |
3016 | hw_stats->mpc[i] += mp; | |
3017 | /* Running comprehensive total for stats display */ | |
3018 | *total_missed_rx += hw_stats->mpc[i]; | |
3019 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
3020 | hw_stats->rnbc[i] += | |
3021 | IXGBE_READ_REG(hw, IXGBE_RNBC(i)); | |
3022 | hw_stats->pxonrxc[i] += | |
3023 | IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); | |
3024 | hw_stats->pxoffrxc[i] += | |
3025 | IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); | |
3026 | } else { | |
3027 | hw_stats->pxonrxc[i] += | |
3028 | IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); | |
3029 | hw_stats->pxoffrxc[i] += | |
3030 | IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); | |
3031 | hw_stats->pxon2offc[i] += | |
3032 | IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); | |
3033 | } | |
3034 | hw_stats->pxontxc[i] += | |
3035 | IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); | |
3036 | hw_stats->pxofftxc[i] += | |
3037 | IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); | |
3038 | } | |
3039 | for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { | |
3040 | uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); | |
3041 | uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); | |
3042 | uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); | |
3043 | ||
3044 | delta_gprc += delta_qprc; | |
3045 | ||
3046 | hw_stats->qprc[i] += delta_qprc; | |
3047 | hw_stats->qptc[i] += delta_qptc; | |
3048 | ||
3049 | hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); | |
3050 | hw_stats->qbrc[i] += | |
3051 | ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); | |
3052 | if (crc_strip == 0) | |
3053 | hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; | |
3054 | ||
3055 | hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); | |
3056 | hw_stats->qbtc[i] += | |
3057 | ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); | |
3058 | ||
3059 | hw_stats->qprdc[i] += delta_qprdc; | |
3060 | *total_qprdc += hw_stats->qprdc[i]; | |
3061 | ||
3062 | *total_qprc += hw_stats->qprc[i]; | |
3063 | *total_qbrc += hw_stats->qbrc[i]; | |
3064 | } | |
3065 | hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); | |
3066 | hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); | |
3067 | hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); | |
3068 | ||
3069 | /* | |
3070 | * An errata states that gprc actually counts good + missed packets: | |
3071 | * Workaround to set gprc to summated queue packet receives | |
3072 | */ | |
3073 | hw_stats->gprc = *total_qprc; | |
3074 | ||
3075 | if (hw->mac.type != ixgbe_mac_82598EB) { | |
3076 | hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); | |
3077 | hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); | |
3078 | hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); | |
3079 | hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); | |
3080 | hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); | |
3081 | hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); | |
3082 | hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); | |
3083 | hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); | |
3084 | } else { | |
3085 | hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); | |
3086 | hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); | |
3087 | /* 82598 only has a counter in the high register */ | |
3088 | hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); | |
3089 | hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); | |
3090 | hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | |
3091 | } | |
3092 | uint64_t old_tpr = hw_stats->tpr; | |
3093 | ||
3094 | hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); | |
3095 | hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); | |
3096 | ||
3097 | if (crc_strip == 0) | |
3098 | hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; | |
3099 | ||
3100 | uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); | |
3101 | hw_stats->gptc += delta_gptc; | |
3102 | hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; | |
3103 | hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; | |
3104 | ||
3105 | /* | |
3106 | * Workaround: mprc hardware is incorrectly counting | |
3107 | * broadcasts, so for now we subtract those. | |
3108 | */ | |
3109 | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); | |
3110 | hw_stats->bprc += bprc; | |
3111 | hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); | |
3112 | if (hw->mac.type == ixgbe_mac_82598EB) | |
3113 | hw_stats->mprc -= bprc; | |
3114 | ||
3115 | hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); | |
3116 | hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); | |
3117 | hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); | |
3118 | hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); | |
3119 | hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); | |
3120 | hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); | |
3121 | ||
3122 | lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); | |
3123 | hw_stats->lxontxc += lxon; | |
3124 | lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); | |
3125 | hw_stats->lxofftxc += lxoff; | |
3126 | total = lxon + lxoff; | |
3127 | ||
3128 | hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); | |
3129 | hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); | |
3130 | hw_stats->gptc -= total; | |
3131 | hw_stats->mptc -= total; | |
3132 | hw_stats->ptc64 -= total; | |
3133 | hw_stats->gotc -= total * ETHER_MIN_LEN; | |
3134 | ||
3135 | hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); | |
3136 | hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); | |
3137 | hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); | |
3138 | hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); | |
3139 | hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); | |
3140 | hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); | |
3141 | hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); | |
3142 | hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); | |
3143 | hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); | |
3144 | hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); | |
3145 | hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); | |
3146 | hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); | |
3147 | hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); | |
3148 | hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); | |
3149 | hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); | |
3150 | hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); | |
3151 | /* Only read FCOE on 82599 */ | |
3152 | if (hw->mac.type != ixgbe_mac_82598EB) { | |
3153 | hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | |
3154 | hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | |
3155 | hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | |
3156 | hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | |
3157 | hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | |
3158 | } | |
3159 | ||
3160 | /* Flow Director Stats registers */ | |
11fdf7f2 TL |
3161 | if (hw->mac.type != ixgbe_mac_82598EB) { |
3162 | hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | |
3163 | hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | |
3164 | hw_stats->fdirustat_add += IXGBE_READ_REG(hw, | |
3165 | IXGBE_FDIRUSTAT) & 0xFFFF; | |
3166 | hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, | |
3167 | IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; | |
3168 | hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, | |
3169 | IXGBE_FDIRFSTAT) & 0xFFFF; | |
3170 | hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, | |
3171 | IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; | |
3172 | } | |
3173 | /* MACsec Stats registers */ | |
3174 | macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); | |
3175 | macsec_stats->out_pkts_encrypted += | |
3176 | IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); | |
3177 | macsec_stats->out_pkts_protected += | |
3178 | IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); | |
3179 | macsec_stats->out_octets_encrypted += | |
3180 | IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); | |
3181 | macsec_stats->out_octets_protected += | |
3182 | IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); | |
3183 | macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); | |
3184 | macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); | |
3185 | macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); | |
3186 | macsec_stats->in_pkts_unknownsci += | |
3187 | IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); | |
3188 | macsec_stats->in_octets_decrypted += | |
3189 | IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); | |
3190 | macsec_stats->in_octets_validated += | |
3191 | IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); | |
3192 | macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); | |
3193 | macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); | |
3194 | macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); | |
3195 | for (i = 0; i < 2; i++) { | |
3196 | macsec_stats->in_pkts_ok += | |
3197 | IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); | |
3198 | macsec_stats->in_pkts_invalid += | |
3199 | IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); | |
3200 | macsec_stats->in_pkts_notvalid += | |
3201 | IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); | |
3202 | } | |
3203 | macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); | |
3204 | macsec_stats->in_pkts_notusingsa += | |
3205 | IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); | |
7c673cae FG |
3206 | } |
3207 | ||
3208 | /* | |
3209 | * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c | |
3210 | */ | |
11fdf7f2 | 3211 | static int |
7c673cae FG |
3212 | ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) |
3213 | { | |
3214 | struct ixgbe_hw *hw = | |
3215 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
3216 | struct ixgbe_hw_stats *hw_stats = | |
3217 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
11fdf7f2 TL |
3218 | struct ixgbe_macsec_stats *macsec_stats = |
3219 | IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( | |
3220 | dev->data->dev_private); | |
7c673cae FG |
3221 | uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; |
3222 | unsigned i; | |
3223 | ||
3224 | total_missed_rx = 0; | |
3225 | total_qbrc = 0; | |
3226 | total_qprc = 0; | |
3227 | total_qprdc = 0; | |
3228 | ||
11fdf7f2 TL |
3229 | ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, |
3230 | &total_qbrc, &total_qprc, &total_qprdc); | |
7c673cae FG |
3231 | |
3232 | if (stats == NULL) | |
11fdf7f2 | 3233 | return -EINVAL; |
7c673cae FG |
3234 | |
3235 | /* Fill out the rte_eth_stats statistics structure */ | |
3236 | stats->ipackets = total_qprc; | |
3237 | stats->ibytes = total_qbrc; | |
3238 | stats->opackets = hw_stats->gptc; | |
3239 | stats->obytes = hw_stats->gotc; | |
3240 | ||
3241 | for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { | |
3242 | stats->q_ipackets[i] = hw_stats->qprc[i]; | |
3243 | stats->q_opackets[i] = hw_stats->qptc[i]; | |
3244 | stats->q_ibytes[i] = hw_stats->qbrc[i]; | |
3245 | stats->q_obytes[i] = hw_stats->qbtc[i]; | |
3246 | stats->q_errors[i] = hw_stats->qprdc[i]; | |
3247 | } | |
3248 | ||
3249 | /* Rx Errors */ | |
3250 | stats->imissed = total_missed_rx; | |
3251 | stats->ierrors = hw_stats->crcerrs + | |
3252 | hw_stats->mspdc + | |
3253 | hw_stats->rlec + | |
3254 | hw_stats->ruc + | |
3255 | hw_stats->roc + | |
3256 | hw_stats->illerrc + | |
3257 | hw_stats->errbc + | |
3258 | hw_stats->rfc + | |
3259 | hw_stats->fccrc + | |
3260 | hw_stats->fclast; | |
3261 | ||
3262 | /* Tx Errors */ | |
3263 | stats->oerrors = 0; | |
11fdf7f2 | 3264 | return 0; |
7c673cae FG |
3265 | } |
3266 | ||
3267 | static void | |
3268 | ixgbe_dev_stats_reset(struct rte_eth_dev *dev) | |
3269 | { | |
3270 | struct ixgbe_hw_stats *stats = | |
3271 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
3272 | ||
3273 | /* HW registers are cleared on read */ | |
3274 | ixgbe_dev_stats_get(dev, NULL); | |
3275 | ||
3276 | /* Reset software totals */ | |
3277 | memset(stats, 0, sizeof(*stats)); | |
3278 | } | |
3279 | ||
3280 | /* This function calculates the number of xstats based on the current config */ | |
3281 | static unsigned | |
3282 | ixgbe_xstats_calc_num(void) { | |
11fdf7f2 | 3283 | return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + |
7c673cae FG |
3284 | (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + |
3285 | (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); | |
3286 | } | |
3287 | ||
3288 | static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, | |
11fdf7f2 | 3289 | struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) |
7c673cae FG |
3290 | { |
3291 | const unsigned cnt_stats = ixgbe_xstats_calc_num(); | |
3292 | unsigned stat, i, count; | |
3293 | ||
3294 | if (xstats_names != NULL) { | |
3295 | count = 0; | |
3296 | ||
3297 | /* Note: limit >= cnt_stats checked upstream | |
3298 | * in rte_eth_xstats_names() | |
3299 | */ | |
3300 | ||
3301 | /* Extended stats from ixgbe_hw_stats */ | |
3302 | for (i = 0; i < IXGBE_NB_HW_STATS; i++) { | |
9f95a23c TL |
3303 | strlcpy(xstats_names[count].name, |
3304 | rte_ixgbe_stats_strings[i].name, | |
3305 | sizeof(xstats_names[count].name)); | |
7c673cae FG |
3306 | count++; |
3307 | } | |
3308 | ||
11fdf7f2 TL |
3309 | /* MACsec Stats */ |
3310 | for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { | |
9f95a23c TL |
3311 | strlcpy(xstats_names[count].name, |
3312 | rte_ixgbe_macsec_strings[i].name, | |
3313 | sizeof(xstats_names[count].name)); | |
11fdf7f2 TL |
3314 | count++; |
3315 | } | |
3316 | ||
7c673cae FG |
3317 | /* RX Priority Stats */ |
3318 | for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { | |
3319 | for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { | |
3320 | snprintf(xstats_names[count].name, | |
3321 | sizeof(xstats_names[count].name), | |
3322 | "rx_priority%u_%s", i, | |
3323 | rte_ixgbe_rxq_strings[stat].name); | |
3324 | count++; | |
3325 | } | |
3326 | } | |
3327 | ||
3328 | /* TX Priority Stats */ | |
3329 | for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { | |
3330 | for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { | |
3331 | snprintf(xstats_names[count].name, | |
3332 | sizeof(xstats_names[count].name), | |
3333 | "tx_priority%u_%s", i, | |
3334 | rte_ixgbe_txq_strings[stat].name); | |
3335 | count++; | |
3336 | } | |
3337 | } | |
3338 | } | |
3339 | return cnt_stats; | |
3340 | } | |
3341 | ||
11fdf7f2 TL |
3342 | static int ixgbe_dev_xstats_get_names_by_id( |
3343 | struct rte_eth_dev *dev, | |
3344 | struct rte_eth_xstat_name *xstats_names, | |
3345 | const uint64_t *ids, | |
3346 | unsigned int limit) | |
3347 | { | |
3348 | if (!ids) { | |
3349 | const unsigned int cnt_stats = ixgbe_xstats_calc_num(); | |
3350 | unsigned int stat, i, count; | |
3351 | ||
3352 | if (xstats_names != NULL) { | |
3353 | count = 0; | |
3354 | ||
3355 | /* Note: limit >= cnt_stats checked upstream | |
3356 | * in rte_eth_xstats_names() | |
3357 | */ | |
3358 | ||
3359 | /* Extended stats from ixgbe_hw_stats */ | |
3360 | for (i = 0; i < IXGBE_NB_HW_STATS; i++) { | |
9f95a23c TL |
3361 | strlcpy(xstats_names[count].name, |
3362 | rte_ixgbe_stats_strings[i].name, | |
3363 | sizeof(xstats_names[count].name)); | |
11fdf7f2 TL |
3364 | count++; |
3365 | } | |
3366 | ||
3367 | /* MACsec Stats */ | |
3368 | for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { | |
9f95a23c TL |
3369 | strlcpy(xstats_names[count].name, |
3370 | rte_ixgbe_macsec_strings[i].name, | |
3371 | sizeof(xstats_names[count].name)); | |
11fdf7f2 TL |
3372 | count++; |
3373 | } | |
3374 | ||
3375 | /* RX Priority Stats */ | |
3376 | for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { | |
3377 | for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { | |
3378 | snprintf(xstats_names[count].name, | |
3379 | sizeof(xstats_names[count].name), | |
3380 | "rx_priority%u_%s", i, | |
3381 | rte_ixgbe_rxq_strings[stat].name); | |
3382 | count++; | |
3383 | } | |
3384 | } | |
3385 | ||
3386 | /* TX Priority Stats */ | |
3387 | for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { | |
3388 | for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { | |
3389 | snprintf(xstats_names[count].name, | |
3390 | sizeof(xstats_names[count].name), | |
3391 | "tx_priority%u_%s", i, | |
3392 | rte_ixgbe_txq_strings[stat].name); | |
3393 | count++; | |
3394 | } | |
3395 | } | |
3396 | } | |
3397 | return cnt_stats; | |
3398 | } | |
3399 | ||
3400 | uint16_t i; | |
3401 | uint16_t size = ixgbe_xstats_calc_num(); | |
3402 | struct rte_eth_xstat_name xstats_names_copy[size]; | |
3403 | ||
3404 | ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, | |
3405 | size); | |
3406 | ||
3407 | for (i = 0; i < limit; i++) { | |
3408 | if (ids[i] >= size) { | |
3409 | PMD_INIT_LOG(ERR, "id value isn't valid"); | |
3410 | return -1; | |
3411 | } | |
3412 | strcpy(xstats_names[i].name, | |
3413 | xstats_names_copy[ids[i]].name); | |
3414 | } | |
3415 | return limit; | |
3416 | } | |
3417 | ||
7c673cae FG |
3418 | static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, |
3419 | struct rte_eth_xstat_name *xstats_names, unsigned limit) | |
3420 | { | |
3421 | unsigned i; | |
3422 | ||
3423 | if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) | |
3424 | return -ENOMEM; | |
3425 | ||
3426 | if (xstats_names != NULL) | |
3427 | for (i = 0; i < IXGBEVF_NB_XSTATS; i++) | |
9f95a23c TL |
3428 | strlcpy(xstats_names[i].name, |
3429 | rte_ixgbevf_stats_strings[i].name, | |
3430 | sizeof(xstats_names[i].name)); | |
7c673cae FG |
3431 | return IXGBEVF_NB_XSTATS; |
3432 | } | |
3433 | ||
3434 | static int | |
3435 | ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, | |
3436 | unsigned n) | |
3437 | { | |
3438 | struct ixgbe_hw *hw = | |
3439 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
3440 | struct ixgbe_hw_stats *hw_stats = | |
3441 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
11fdf7f2 TL |
3442 | struct ixgbe_macsec_stats *macsec_stats = |
3443 | IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( | |
3444 | dev->data->dev_private); | |
7c673cae FG |
3445 | uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; |
3446 | unsigned i, stat, count = 0; | |
3447 | ||
3448 | count = ixgbe_xstats_calc_num(); | |
3449 | ||
3450 | if (n < count) | |
3451 | return count; | |
3452 | ||
3453 | total_missed_rx = 0; | |
3454 | total_qbrc = 0; | |
3455 | total_qprc = 0; | |
3456 | total_qprdc = 0; | |
3457 | ||
11fdf7f2 TL |
3458 | ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, |
3459 | &total_qbrc, &total_qprc, &total_qprdc); | |
7c673cae FG |
3460 | |
3461 | /* If this is a reset xstats is NULL, and we have cleared the | |
3462 | * registers by reading them. | |
3463 | */ | |
3464 | if (!xstats) | |
3465 | return 0; | |
3466 | ||
3467 | /* Extended stats from ixgbe_hw_stats */ | |
3468 | count = 0; | |
3469 | for (i = 0; i < IXGBE_NB_HW_STATS; i++) { | |
3470 | xstats[count].value = *(uint64_t *)(((char *)hw_stats) + | |
3471 | rte_ixgbe_stats_strings[i].offset); | |
11fdf7f2 TL |
3472 | xstats[count].id = count; |
3473 | count++; | |
3474 | } | |
3475 | ||
3476 | /* MACsec Stats */ | |
3477 | for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { | |
3478 | xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + | |
3479 | rte_ixgbe_macsec_strings[i].offset); | |
3480 | xstats[count].id = count; | |
7c673cae FG |
3481 | count++; |
3482 | } | |
3483 | ||
3484 | /* RX Priority Stats */ | |
3485 | for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { | |
3486 | for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { | |
3487 | xstats[count].value = *(uint64_t *)(((char *)hw_stats) + | |
3488 | rte_ixgbe_rxq_strings[stat].offset + | |
3489 | (sizeof(uint64_t) * i)); | |
11fdf7f2 | 3490 | xstats[count].id = count; |
7c673cae FG |
3491 | count++; |
3492 | } | |
3493 | } | |
3494 | ||
3495 | /* TX Priority Stats */ | |
3496 | for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { | |
3497 | for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { | |
3498 | xstats[count].value = *(uint64_t *)(((char *)hw_stats) + | |
3499 | rte_ixgbe_txq_strings[stat].offset + | |
3500 | (sizeof(uint64_t) * i)); | |
11fdf7f2 | 3501 | xstats[count].id = count; |
7c673cae FG |
3502 | count++; |
3503 | } | |
3504 | } | |
3505 | return count; | |
3506 | } | |
3507 | ||
11fdf7f2 TL |
3508 | static int |
3509 | ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, | |
3510 | uint64_t *values, unsigned int n) | |
3511 | { | |
3512 | if (!ids) { | |
3513 | struct ixgbe_hw *hw = | |
3514 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
3515 | struct ixgbe_hw_stats *hw_stats = | |
3516 | IXGBE_DEV_PRIVATE_TO_STATS( | |
3517 | dev->data->dev_private); | |
3518 | struct ixgbe_macsec_stats *macsec_stats = | |
3519 | IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( | |
3520 | dev->data->dev_private); | |
3521 | uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; | |
3522 | unsigned int i, stat, count = 0; | |
3523 | ||
3524 | count = ixgbe_xstats_calc_num(); | |
3525 | ||
3526 | if (!ids && n < count) | |
3527 | return count; | |
3528 | ||
3529 | total_missed_rx = 0; | |
3530 | total_qbrc = 0; | |
3531 | total_qprc = 0; | |
3532 | total_qprdc = 0; | |
3533 | ||
3534 | ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, | |
3535 | &total_missed_rx, &total_qbrc, &total_qprc, | |
3536 | &total_qprdc); | |
3537 | ||
3538 | /* If this is a reset xstats is NULL, and we have cleared the | |
3539 | * registers by reading them. | |
3540 | */ | |
3541 | if (!ids && !values) | |
3542 | return 0; | |
3543 | ||
3544 | /* Extended stats from ixgbe_hw_stats */ | |
3545 | count = 0; | |
3546 | for (i = 0; i < IXGBE_NB_HW_STATS; i++) { | |
3547 | values[count] = *(uint64_t *)(((char *)hw_stats) + | |
3548 | rte_ixgbe_stats_strings[i].offset); | |
3549 | count++; | |
3550 | } | |
3551 | ||
3552 | /* MACsec Stats */ | |
3553 | for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { | |
3554 | values[count] = *(uint64_t *)(((char *)macsec_stats) + | |
3555 | rte_ixgbe_macsec_strings[i].offset); | |
3556 | count++; | |
3557 | } | |
3558 | ||
3559 | /* RX Priority Stats */ | |
3560 | for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { | |
3561 | for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { | |
3562 | values[count] = | |
3563 | *(uint64_t *)(((char *)hw_stats) + | |
3564 | rte_ixgbe_rxq_strings[stat].offset + | |
3565 | (sizeof(uint64_t) * i)); | |
3566 | count++; | |
3567 | } | |
3568 | } | |
3569 | ||
3570 | /* TX Priority Stats */ | |
3571 | for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { | |
3572 | for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { | |
3573 | values[count] = | |
3574 | *(uint64_t *)(((char *)hw_stats) + | |
3575 | rte_ixgbe_txq_strings[stat].offset + | |
3576 | (sizeof(uint64_t) * i)); | |
3577 | count++; | |
3578 | } | |
3579 | } | |
3580 | return count; | |
3581 | } | |
3582 | ||
3583 | uint16_t i; | |
3584 | uint16_t size = ixgbe_xstats_calc_num(); | |
3585 | uint64_t values_copy[size]; | |
3586 | ||
3587 | ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); | |
3588 | ||
3589 | for (i = 0; i < n; i++) { | |
3590 | if (ids[i] >= size) { | |
3591 | PMD_INIT_LOG(ERR, "id value isn't valid"); | |
3592 | return -1; | |
3593 | } | |
3594 | values[i] = values_copy[ids[i]]; | |
3595 | } | |
3596 | return n; | |
3597 | } | |
3598 | ||
7c673cae FG |
3599 | static void |
3600 | ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) | |
3601 | { | |
3602 | struct ixgbe_hw_stats *stats = | |
3603 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
11fdf7f2 TL |
3604 | struct ixgbe_macsec_stats *macsec_stats = |
3605 | IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( | |
3606 | dev->data->dev_private); | |
7c673cae FG |
3607 | |
3608 | unsigned count = ixgbe_xstats_calc_num(); | |
3609 | ||
3610 | /* HW registers are cleared on read */ | |
3611 | ixgbe_dev_xstats_get(dev, NULL, count); | |
3612 | ||
3613 | /* Reset software totals */ | |
3614 | memset(stats, 0, sizeof(*stats)); | |
11fdf7f2 | 3615 | memset(macsec_stats, 0, sizeof(*macsec_stats)); |
7c673cae FG |
3616 | } |
3617 | ||
3618 | static void | |
3619 | ixgbevf_update_stats(struct rte_eth_dev *dev) | |
3620 | { | |
3621 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
3622 | struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) | |
3623 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
3624 | ||
3625 | /* Good Rx packet, include VF loopback */ | |
3626 | UPDATE_VF_STAT(IXGBE_VFGPRC, | |
3627 | hw_stats->last_vfgprc, hw_stats->vfgprc); | |
3628 | ||
3629 | /* Good Rx octets, include VF loopback */ | |
3630 | UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, | |
3631 | hw_stats->last_vfgorc, hw_stats->vfgorc); | |
3632 | ||
3633 | /* Good Tx packet, include VF loopback */ | |
3634 | UPDATE_VF_STAT(IXGBE_VFGPTC, | |
3635 | hw_stats->last_vfgptc, hw_stats->vfgptc); | |
3636 | ||
3637 | /* Good Tx octets, include VF loopback */ | |
3638 | UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, | |
3639 | hw_stats->last_vfgotc, hw_stats->vfgotc); | |
3640 | ||
3641 | /* Rx Multicst Packet */ | |
3642 | UPDATE_VF_STAT(IXGBE_VFMPRC, | |
3643 | hw_stats->last_vfmprc, hw_stats->vfmprc); | |
3644 | } | |
3645 | ||
3646 | static int | |
3647 | ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, | |
3648 | unsigned n) | |
3649 | { | |
3650 | struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) | |
3651 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
3652 | unsigned i; | |
3653 | ||
3654 | if (n < IXGBEVF_NB_XSTATS) | |
3655 | return IXGBEVF_NB_XSTATS; | |
3656 | ||
3657 | ixgbevf_update_stats(dev); | |
3658 | ||
3659 | if (!xstats) | |
3660 | return 0; | |
3661 | ||
3662 | /* Extended stats */ | |
3663 | for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { | |
11fdf7f2 | 3664 | xstats[i].id = i; |
7c673cae FG |
3665 | xstats[i].value = *(uint64_t *)(((char *)hw_stats) + |
3666 | rte_ixgbevf_stats_strings[i].offset); | |
3667 | } | |
3668 | ||
3669 | return IXGBEVF_NB_XSTATS; | |
3670 | } | |
3671 | ||
11fdf7f2 | 3672 | static int |
7c673cae FG |
3673 | ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) |
3674 | { | |
3675 | struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) | |
3676 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
3677 | ||
3678 | ixgbevf_update_stats(dev); | |
3679 | ||
3680 | if (stats == NULL) | |
11fdf7f2 | 3681 | return -EINVAL; |
7c673cae FG |
3682 | |
3683 | stats->ipackets = hw_stats->vfgprc; | |
3684 | stats->ibytes = hw_stats->vfgorc; | |
3685 | stats->opackets = hw_stats->vfgptc; | |
3686 | stats->obytes = hw_stats->vfgotc; | |
11fdf7f2 | 3687 | return 0; |
7c673cae FG |
3688 | } |
3689 | ||
3690 | static void | |
3691 | ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) | |
3692 | { | |
3693 | struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) | |
3694 | IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); | |
3695 | ||
3696 | /* Sync HW register to the last stats */ | |
3697 | ixgbevf_dev_stats_get(dev, NULL); | |
3698 | ||
3699 | /* reset HW current stats*/ | |
3700 | hw_stats->vfgprc = 0; | |
3701 | hw_stats->vfgorc = 0; | |
3702 | hw_stats->vfgptc = 0; | |
3703 | hw_stats->vfgotc = 0; | |
3704 | } | |
3705 | ||
11fdf7f2 TL |
3706 | static int |
3707 | ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) | |
3708 | { | |
3709 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
3710 | u16 eeprom_verh, eeprom_verl; | |
3711 | u32 etrack_id; | |
3712 | int ret; | |
3713 | ||
3714 | ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); | |
3715 | ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); | |
3716 | ||
3717 | etrack_id = (eeprom_verh << 16) | eeprom_verl; | |
3718 | ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); | |
3719 | ||
3720 | ret += 1; /* add the size of '\0' */ | |
3721 | if (fw_size < (u32)ret) | |
3722 | return ret; | |
3723 | else | |
3724 | return 0; | |
3725 | } | |
3726 | ||
7c673cae FG |
3727 | static void |
3728 | ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) | |
3729 | { | |
11fdf7f2 | 3730 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
7c673cae FG |
3731 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); |
3732 | struct rte_eth_conf *dev_conf = &dev->data->dev_conf; | |
3733 | ||
3734 | dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; | |
3735 | dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; | |
3736 | if (RTE_ETH_DEV_SRIOV(dev).active == 0) { | |
3737 | /* | |
3738 | * When DCB/VT is off, maximum number of queues changes, | |
3739 | * except for 82598EB, which remains constant. | |
3740 | */ | |
3741 | if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && | |
3742 | hw->mac.type != ixgbe_mac_82598EB) | |
3743 | dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; | |
3744 | } | |
3745 | dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ | |
3746 | dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ | |
3747 | dev_info->max_mac_addrs = hw->mac.num_rar_entries; | |
3748 | dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; | |
11fdf7f2 | 3749 | dev_info->max_vfs = pci_dev->max_vfs; |
7c673cae FG |
3750 | if (hw->mac.type == ixgbe_mac_82598EB) |
3751 | dev_info->max_vmdq_pools = ETH_16_POOLS; | |
3752 | else | |
3753 | dev_info->max_vmdq_pools = ETH_64_POOLS; | |
9f95a23c TL |
3754 | dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; |
3755 | dev_info->min_mtu = ETHER_MIN_MTU; | |
7c673cae | 3756 | dev_info->vmdq_queue_num = dev_info->max_rx_queues; |
11fdf7f2 TL |
3757 | dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); |
3758 | dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | | |
3759 | dev_info->rx_queue_offload_capa); | |
3760 | dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); | |
3761 | dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); | |
7c673cae FG |
3762 | |
3763 | dev_info->default_rxconf = (struct rte_eth_rxconf) { | |
3764 | .rx_thresh = { | |
3765 | .pthresh = IXGBE_DEFAULT_RX_PTHRESH, | |
3766 | .hthresh = IXGBE_DEFAULT_RX_HTHRESH, | |
3767 | .wthresh = IXGBE_DEFAULT_RX_WTHRESH, | |
3768 | }, | |
3769 | .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, | |
3770 | .rx_drop_en = 0, | |
11fdf7f2 | 3771 | .offloads = 0, |
7c673cae FG |
3772 | }; |
3773 | ||
3774 | dev_info->default_txconf = (struct rte_eth_txconf) { | |
3775 | .tx_thresh = { | |
3776 | .pthresh = IXGBE_DEFAULT_TX_PTHRESH, | |
3777 | .hthresh = IXGBE_DEFAULT_TX_HTHRESH, | |
3778 | .wthresh = IXGBE_DEFAULT_TX_WTHRESH, | |
3779 | }, | |
3780 | .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, | |
3781 | .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, | |
11fdf7f2 | 3782 | .offloads = 0, |
7c673cae FG |
3783 | }; |
3784 | ||
3785 | dev_info->rx_desc_lim = rx_desc_lim; | |
3786 | dev_info->tx_desc_lim = tx_desc_lim; | |
3787 | ||
3788 | dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); | |
3789 | dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); | |
3790 | dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; | |
3791 | ||
3792 | dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; | |
3793 | if (hw->mac.type == ixgbe_mac_X540 || | |
3794 | hw->mac.type == ixgbe_mac_X540_vf || | |
3795 | hw->mac.type == ixgbe_mac_X550 || | |
3796 | hw->mac.type == ixgbe_mac_X550_vf) { | |
3797 | dev_info->speed_capa |= ETH_LINK_SPEED_100M; | |
3798 | } | |
11fdf7f2 TL |
3799 | if (hw->mac.type == ixgbe_mac_X550) { |
3800 | dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; | |
3801 | dev_info->speed_capa |= ETH_LINK_SPEED_5G; | |
3802 | } | |
3803 | ||
3804 | /* Driver-preferred Rx/Tx parameters */ | |
3805 | dev_info->default_rxportconf.burst_size = 32; | |
3806 | dev_info->default_txportconf.burst_size = 32; | |
3807 | dev_info->default_rxportconf.nb_queues = 1; | |
3808 | dev_info->default_txportconf.nb_queues = 1; | |
3809 | dev_info->default_rxportconf.ring_size = 256; | |
3810 | dev_info->default_txportconf.ring_size = 256; | |
7c673cae FG |
3811 | } |
3812 | ||
3813 | static const uint32_t * | |
3814 | ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) | |
3815 | { | |
3816 | static const uint32_t ptypes[] = { | |
3817 | /* For non-vec functions, | |
3818 | * refers to ixgbe_rxd_pkt_info_to_pkt_type(); | |
3819 | * for vec functions, | |
3820 | * refers to _recv_raw_pkts_vec(). | |
3821 | */ | |
3822 | RTE_PTYPE_L2_ETHER, | |
3823 | RTE_PTYPE_L3_IPV4, | |
3824 | RTE_PTYPE_L3_IPV4_EXT, | |
3825 | RTE_PTYPE_L3_IPV6, | |
3826 | RTE_PTYPE_L3_IPV6_EXT, | |
3827 | RTE_PTYPE_L4_SCTP, | |
3828 | RTE_PTYPE_L4_TCP, | |
3829 | RTE_PTYPE_L4_UDP, | |
3830 | RTE_PTYPE_TUNNEL_IP, | |
3831 | RTE_PTYPE_INNER_L3_IPV6, | |
3832 | RTE_PTYPE_INNER_L3_IPV6_EXT, | |
3833 | RTE_PTYPE_INNER_L4_TCP, | |
3834 | RTE_PTYPE_INNER_L4_UDP, | |
3835 | RTE_PTYPE_UNKNOWN | |
3836 | }; | |
3837 | ||
3838 | if (dev->rx_pkt_burst == ixgbe_recv_pkts || | |
3839 | dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || | |
3840 | dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || | |
3841 | dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) | |
3842 | return ptypes; | |
11fdf7f2 TL |
3843 | |
3844 | #if defined(RTE_ARCH_X86) | |
3845 | if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || | |
3846 | dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) | |
3847 | return ptypes; | |
3848 | #endif | |
7c673cae FG |
3849 | return NULL; |
3850 | } | |
3851 | ||
3852 | static void | |
3853 | ixgbevf_dev_info_get(struct rte_eth_dev *dev, | |
3854 | struct rte_eth_dev_info *dev_info) | |
3855 | { | |
11fdf7f2 | 3856 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
7c673cae FG |
3857 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); |
3858 | ||
3859 | dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; | |
3860 | dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; | |
3861 | dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ | |
11fdf7f2 | 3862 | dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ |
9f95a23c | 3863 | dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; |
7c673cae FG |
3864 | dev_info->max_mac_addrs = hw->mac.num_rar_entries; |
3865 | dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; | |
11fdf7f2 | 3866 | dev_info->max_vfs = pci_dev->max_vfs; |
7c673cae FG |
3867 | if (hw->mac.type == ixgbe_mac_82598EB) |
3868 | dev_info->max_vmdq_pools = ETH_16_POOLS; | |
3869 | else | |
3870 | dev_info->max_vmdq_pools = ETH_64_POOLS; | |
11fdf7f2 TL |
3871 | dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); |
3872 | dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | | |
3873 | dev_info->rx_queue_offload_capa); | |
3874 | dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); | |
3875 | dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); | |
7c673cae FG |
3876 | |
3877 | dev_info->default_rxconf = (struct rte_eth_rxconf) { | |
3878 | .rx_thresh = { | |
3879 | .pthresh = IXGBE_DEFAULT_RX_PTHRESH, | |
3880 | .hthresh = IXGBE_DEFAULT_RX_HTHRESH, | |
3881 | .wthresh = IXGBE_DEFAULT_RX_WTHRESH, | |
3882 | }, | |
3883 | .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, | |
3884 | .rx_drop_en = 0, | |
11fdf7f2 | 3885 | .offloads = 0, |
7c673cae FG |
3886 | }; |
3887 | ||
3888 | dev_info->default_txconf = (struct rte_eth_txconf) { | |
3889 | .tx_thresh = { | |
3890 | .pthresh = IXGBE_DEFAULT_TX_PTHRESH, | |
3891 | .hthresh = IXGBE_DEFAULT_TX_HTHRESH, | |
3892 | .wthresh = IXGBE_DEFAULT_TX_WTHRESH, | |
3893 | }, | |
3894 | .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, | |
3895 | .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, | |
11fdf7f2 | 3896 | .offloads = 0, |
7c673cae FG |
3897 | }; |
3898 | ||
3899 | dev_info->rx_desc_lim = rx_desc_lim; | |
3900 | dev_info->tx_desc_lim = tx_desc_lim; | |
3901 | } | |
3902 | ||
7c673cae | 3903 | static int |
11fdf7f2 TL |
3904 | ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, |
3905 | int *link_up, int wait_to_complete) | |
7c673cae | 3906 | { |
11fdf7f2 TL |
3907 | struct ixgbe_mbx_info *mbx = &hw->mbx; |
3908 | struct ixgbe_mac_info *mac = &hw->mac; | |
3909 | uint32_t links_reg, in_msg; | |
3910 | int ret_val = 0; | |
7c673cae | 3911 | |
11fdf7f2 TL |
3912 | /* If we were hit with a reset drop the link */ |
3913 | if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) | |
3914 | mac->get_link_status = true; | |
7c673cae | 3915 | |
11fdf7f2 TL |
3916 | if (!mac->get_link_status) |
3917 | goto out; | |
7c673cae | 3918 | |
11fdf7f2 TL |
3919 | /* if link status is down no point in checking to see if pf is up */ |
3920 | links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); | |
3921 | if (!(links_reg & IXGBE_LINKS_UP)) | |
3922 | goto out; | |
7c673cae | 3923 | |
11fdf7f2 TL |
3924 | /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs |
3925 | * before the link status is correct | |
3926 | */ | |
3927 | if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { | |
3928 | int i; | |
3929 | ||
3930 | for (i = 0; i < 5; i++) { | |
3931 | rte_delay_us(100); | |
3932 | links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); | |
3933 | ||
3934 | if (!(links_reg & IXGBE_LINKS_UP)) | |
3935 | goto out; | |
3936 | } | |
3937 | } | |
3938 | ||
3939 | switch (links_reg & IXGBE_LINKS_SPEED_82599) { | |
3940 | case IXGBE_LINKS_SPEED_10G_82599: | |
3941 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | |
3942 | if (hw->mac.type >= ixgbe_mac_X550) { | |
3943 | if (links_reg & IXGBE_LINKS_SPEED_NON_STD) | |
3944 | *speed = IXGBE_LINK_SPEED_2_5GB_FULL; | |
3945 | } | |
3946 | break; | |
3947 | case IXGBE_LINKS_SPEED_1G_82599: | |
3948 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | |
3949 | break; | |
3950 | case IXGBE_LINKS_SPEED_100_82599: | |
3951 | *speed = IXGBE_LINK_SPEED_100_FULL; | |
3952 | if (hw->mac.type == ixgbe_mac_X550) { | |
3953 | if (links_reg & IXGBE_LINKS_SPEED_NON_STD) | |
3954 | *speed = IXGBE_LINK_SPEED_5GB_FULL; | |
3955 | } | |
3956 | break; | |
3957 | case IXGBE_LINKS_SPEED_10_X550EM_A: | |
3958 | *speed = IXGBE_LINK_SPEED_UNKNOWN; | |
3959 | /* Since Reserved in older MAC's */ | |
3960 | if (hw->mac.type >= ixgbe_mac_X550) | |
3961 | *speed = IXGBE_LINK_SPEED_10_FULL; | |
3962 | break; | |
3963 | default: | |
3964 | *speed = IXGBE_LINK_SPEED_UNKNOWN; | |
3965 | } | |
3966 | ||
11fdf7f2 TL |
3967 | /* if the read failed it could just be a mailbox collision, best wait |
3968 | * until we are called again and don't report an error | |
3969 | */ | |
3970 | if (mbx->ops.read(hw, &in_msg, 1, 0)) | |
3971 | goto out; | |
3972 | ||
3973 | if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { | |
3974 | /* msg is not CTS and is NACK we must have lost CTS status */ | |
3975 | if (in_msg & IXGBE_VT_MSGTYPE_NACK) | |
9f95a23c | 3976 | mac->get_link_status = false; |
11fdf7f2 TL |
3977 | goto out; |
3978 | } | |
3979 | ||
3980 | /* the pf is talking, if we timed out in the past we reinit */ | |
3981 | if (!mbx->timeout) { | |
3982 | ret_val = -1; | |
3983 | goto out; | |
3984 | } | |
3985 | ||
3986 | /* if we passed all the tests above then the link is up and we no | |
3987 | * longer need to check for link | |
3988 | */ | |
3989 | mac->get_link_status = false; | |
3990 | ||
3991 | out: | |
3992 | *link_up = !mac->get_link_status; | |
3993 | return ret_val; | |
3994 | } | |
3995 | ||
9f95a23c TL |
3996 | static void |
3997 | ixgbe_dev_setup_link_alarm_handler(void *param) | |
3998 | { | |
3999 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
4000 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4001 | struct ixgbe_interrupt *intr = | |
4002 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4003 | u32 speed; | |
4004 | bool autoneg = false; | |
4005 | ||
4006 | speed = hw->phy.autoneg_advertised; | |
4007 | if (!speed) | |
4008 | ixgbe_get_link_capabilities(hw, &speed, &autoneg); | |
4009 | ||
4010 | ixgbe_setup_link(hw, speed, true); | |
4011 | ||
4012 | intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; | |
4013 | } | |
4014 | ||
11fdf7f2 TL |
4015 | /* return 0 means link status changed, -1 means not changed */ |
4016 | int | |
4017 | ixgbe_dev_link_update_share(struct rte_eth_dev *dev, | |
4018 | int wait_to_complete, int vf) | |
4019 | { | |
4020 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4021 | struct rte_eth_link link; | |
4022 | ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; | |
4023 | struct ixgbe_interrupt *intr = | |
4024 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4025 | int link_up; | |
4026 | int diag; | |
11fdf7f2 | 4027 | int wait = 1; |
11fdf7f2 TL |
4028 | |
4029 | memset(&link, 0, sizeof(link)); | |
4030 | link.link_status = ETH_LINK_DOWN; | |
4031 | link.link_speed = ETH_SPEED_NUM_NONE; | |
4032 | link.link_duplex = ETH_LINK_HALF_DUPLEX; | |
4033 | link.link_autoneg = ETH_LINK_AUTONEG; | |
4034 | ||
4035 | hw->mac.get_link_status = true; | |
4036 | ||
9f95a23c TL |
4037 | if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) |
4038 | return rte_eth_linkstatus_set(dev, &link); | |
11fdf7f2 TL |
4039 | |
4040 | /* check if it needs to wait to complete, if lsc interrupt is enabled */ | |
4041 | if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) | |
4042 | wait = 0; | |
4043 | ||
4044 | if (vf) | |
4045 | diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); | |
4046 | else | |
4047 | diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); | |
4048 | ||
4049 | if (diag != 0) { | |
4050 | link.link_speed = ETH_SPEED_NUM_100M; | |
4051 | link.link_duplex = ETH_LINK_FULL_DUPLEX; | |
4052 | return rte_eth_linkstatus_set(dev, &link); | |
7c673cae FG |
4053 | } |
4054 | ||
4055 | if (link_up == 0) { | |
9f95a23c TL |
4056 | if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { |
4057 | intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; | |
4058 | rte_eal_alarm_set(10, | |
4059 | ixgbe_dev_setup_link_alarm_handler, dev); | |
4060 | } | |
11fdf7f2 | 4061 | return rte_eth_linkstatus_set(dev, &link); |
7c673cae | 4062 | } |
11fdf7f2 | 4063 | |
7c673cae FG |
4064 | link.link_status = ETH_LINK_UP; |
4065 | link.link_duplex = ETH_LINK_FULL_DUPLEX; | |
4066 | ||
4067 | switch (link_speed) { | |
4068 | default: | |
4069 | case IXGBE_LINK_SPEED_UNKNOWN: | |
9f95a23c TL |
4070 | if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || |
4071 | hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) | |
4072 | link.link_speed = ETH_SPEED_NUM_10M; | |
4073 | else | |
4074 | link.link_speed = ETH_SPEED_NUM_100M; | |
7c673cae | 4075 | link.link_duplex = ETH_LINK_FULL_DUPLEX; |
7c673cae FG |
4076 | break; |
4077 | ||
4078 | case IXGBE_LINK_SPEED_100_FULL: | |
4079 | link.link_speed = ETH_SPEED_NUM_100M; | |
4080 | break; | |
4081 | ||
4082 | case IXGBE_LINK_SPEED_1GB_FULL: | |
4083 | link.link_speed = ETH_SPEED_NUM_1G; | |
4084 | break; | |
4085 | ||
11fdf7f2 TL |
4086 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
4087 | link.link_speed = ETH_SPEED_NUM_2_5G; | |
4088 | break; | |
4089 | ||
4090 | case IXGBE_LINK_SPEED_5GB_FULL: | |
4091 | link.link_speed = ETH_SPEED_NUM_5G; | |
4092 | break; | |
4093 | ||
7c673cae FG |
4094 | case IXGBE_LINK_SPEED_10GB_FULL: |
4095 | link.link_speed = ETH_SPEED_NUM_10G; | |
4096 | break; | |
4097 | } | |
7c673cae | 4098 | |
11fdf7f2 TL |
4099 | return rte_eth_linkstatus_set(dev, &link); |
4100 | } | |
7c673cae | 4101 | |
11fdf7f2 TL |
4102 | static int |
4103 | ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) | |
4104 | { | |
4105 | return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); | |
4106 | } | |
4107 | ||
4108 | static int | |
4109 | ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) | |
4110 | { | |
4111 | return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); | |
7c673cae FG |
4112 | } |
4113 | ||
4114 | static void | |
4115 | ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) | |
4116 | { | |
4117 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4118 | uint32_t fctrl; | |
4119 | ||
4120 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
4121 | fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); | |
4122 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
4123 | } | |
4124 | ||
4125 | static void | |
4126 | ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) | |
4127 | { | |
4128 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4129 | uint32_t fctrl; | |
4130 | ||
4131 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
4132 | fctrl &= (~IXGBE_FCTRL_UPE); | |
4133 | if (dev->data->all_multicast == 1) | |
4134 | fctrl |= IXGBE_FCTRL_MPE; | |
4135 | else | |
4136 | fctrl &= (~IXGBE_FCTRL_MPE); | |
4137 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
4138 | } | |
4139 | ||
4140 | static void | |
4141 | ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) | |
4142 | { | |
4143 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4144 | uint32_t fctrl; | |
4145 | ||
4146 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
4147 | fctrl |= IXGBE_FCTRL_MPE; | |
4148 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
4149 | } | |
4150 | ||
4151 | static void | |
4152 | ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) | |
4153 | { | |
4154 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4155 | uint32_t fctrl; | |
4156 | ||
4157 | if (dev->data->promiscuous == 1) | |
4158 | return; /* must remain in all_multicast mode */ | |
4159 | ||
4160 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | |
4161 | fctrl &= (~IXGBE_FCTRL_MPE); | |
4162 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | |
4163 | } | |
4164 | ||
4165 | /** | |
4166 | * It clears the interrupt causes and enables the interrupt. | |
4167 | * It will be called once only during nic initialized. | |
4168 | * | |
4169 | * @param dev | |
4170 | * Pointer to struct rte_eth_dev. | |
11fdf7f2 TL |
4171 | * @param on |
4172 | * Enable or Disable. | |
7c673cae FG |
4173 | * |
4174 | * @return | |
4175 | * - On success, zero. | |
4176 | * - On failure, a negative value. | |
4177 | */ | |
4178 | static int | |
11fdf7f2 | 4179 | ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) |
7c673cae FG |
4180 | { |
4181 | struct ixgbe_interrupt *intr = | |
4182 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4183 | ||
4184 | ixgbe_dev_link_status_print(dev); | |
11fdf7f2 TL |
4185 | if (on) |
4186 | intr->mask |= IXGBE_EICR_LSC; | |
4187 | else | |
4188 | intr->mask &= ~IXGBE_EICR_LSC; | |
7c673cae FG |
4189 | |
4190 | return 0; | |
4191 | } | |
4192 | ||
4193 | /** | |
4194 | * It clears the interrupt causes and enables the interrupt. | |
4195 | * It will be called once only during nic initialized. | |
4196 | * | |
4197 | * @param dev | |
4198 | * Pointer to struct rte_eth_dev. | |
4199 | * | |
4200 | * @return | |
4201 | * - On success, zero. | |
4202 | * - On failure, a negative value. | |
4203 | */ | |
4204 | static int | |
4205 | ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) | |
4206 | { | |
4207 | struct ixgbe_interrupt *intr = | |
4208 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4209 | ||
4210 | intr->mask |= IXGBE_EICR_RTX_QUEUE; | |
4211 | ||
4212 | return 0; | |
4213 | } | |
4214 | ||
11fdf7f2 TL |
4215 | /** |
4216 | * It clears the interrupt causes and enables the interrupt. | |
4217 | * It will be called once only during nic initialized. | |
4218 | * | |
4219 | * @param dev | |
4220 | * Pointer to struct rte_eth_dev. | |
4221 | * | |
4222 | * @return | |
4223 | * - On success, zero. | |
4224 | * - On failure, a negative value. | |
4225 | */ | |
4226 | static int | |
4227 | ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) | |
4228 | { | |
4229 | struct ixgbe_interrupt *intr = | |
4230 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4231 | ||
4232 | intr->mask |= IXGBE_EICR_LINKSEC; | |
4233 | ||
4234 | return 0; | |
4235 | } | |
4236 | ||
7c673cae FG |
4237 | /* |
4238 | * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. | |
4239 | * | |
4240 | * @param dev | |
4241 | * Pointer to struct rte_eth_dev. | |
4242 | * | |
4243 | * @return | |
4244 | * - On success, zero. | |
4245 | * - On failure, a negative value. | |
4246 | */ | |
4247 | static int | |
4248 | ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) | |
4249 | { | |
4250 | uint32_t eicr; | |
4251 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4252 | struct ixgbe_interrupt *intr = | |
4253 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4254 | ||
4255 | /* clear all cause mask */ | |
4256 | ixgbe_disable_intr(hw); | |
4257 | ||
4258 | /* read-on-clear nic registers here */ | |
4259 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); | |
4260 | PMD_DRV_LOG(DEBUG, "eicr %x", eicr); | |
4261 | ||
4262 | intr->flags = 0; | |
4263 | ||
4264 | /* set flag for async link update */ | |
4265 | if (eicr & IXGBE_EICR_LSC) | |
4266 | intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; | |
4267 | ||
4268 | if (eicr & IXGBE_EICR_MAILBOX) | |
4269 | intr->flags |= IXGBE_FLAG_MAILBOX; | |
4270 | ||
11fdf7f2 TL |
4271 | if (eicr & IXGBE_EICR_LINKSEC) |
4272 | intr->flags |= IXGBE_FLAG_MACSEC; | |
4273 | ||
7c673cae FG |
4274 | if (hw->mac.type == ixgbe_mac_X550EM_x && |
4275 | hw->phy.type == ixgbe_phy_x550em_ext_t && | |
4276 | (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) | |
4277 | intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; | |
4278 | ||
4279 | return 0; | |
4280 | } | |
4281 | ||
4282 | /** | |
4283 | * It gets and then prints the link status. | |
4284 | * | |
4285 | * @param dev | |
4286 | * Pointer to struct rte_eth_dev. | |
4287 | * | |
4288 | * @return | |
4289 | * - On success, zero. | |
4290 | * - On failure, a negative value. | |
4291 | */ | |
4292 | static void | |
4293 | ixgbe_dev_link_status_print(struct rte_eth_dev *dev) | |
4294 | { | |
11fdf7f2 | 4295 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
7c673cae FG |
4296 | struct rte_eth_link link; |
4297 | ||
11fdf7f2 TL |
4298 | rte_eth_linkstatus_get(dev, &link); |
4299 | ||
7c673cae FG |
4300 | if (link.link_status) { |
4301 | PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", | |
4302 | (int)(dev->data->port_id), | |
4303 | (unsigned)link.link_speed, | |
4304 | link.link_duplex == ETH_LINK_FULL_DUPLEX ? | |
4305 | "full-duplex" : "half-duplex"); | |
4306 | } else { | |
4307 | PMD_INIT_LOG(INFO, " Port %d: Link Down", | |
4308 | (int)(dev->data->port_id)); | |
4309 | } | |
4310 | PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, | |
11fdf7f2 TL |
4311 | pci_dev->addr.domain, |
4312 | pci_dev->addr.bus, | |
4313 | pci_dev->addr.devid, | |
4314 | pci_dev->addr.function); | |
7c673cae FG |
4315 | } |
4316 | ||
4317 | /* | |
4318 | * It executes link_update after knowing an interrupt occurred. | |
4319 | * | |
4320 | * @param dev | |
4321 | * Pointer to struct rte_eth_dev. | |
4322 | * | |
4323 | * @return | |
4324 | * - On success, zero. | |
4325 | * - On failure, a negative value. | |
4326 | */ | |
4327 | static int | |
9f95a23c | 4328 | ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) |
7c673cae FG |
4329 | { |
4330 | struct ixgbe_interrupt *intr = | |
4331 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4332 | int64_t timeout; | |
7c673cae FG |
4333 | struct ixgbe_hw *hw = |
4334 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4335 | ||
4336 | PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); | |
4337 | ||
4338 | if (intr->flags & IXGBE_FLAG_MAILBOX) { | |
4339 | ixgbe_pf_mbx_process(dev); | |
4340 | intr->flags &= ~IXGBE_FLAG_MAILBOX; | |
4341 | } | |
4342 | ||
4343 | if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { | |
4344 | ixgbe_handle_lasi(hw); | |
4345 | intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; | |
4346 | } | |
4347 | ||
4348 | if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | |
11fdf7f2 TL |
4349 | struct rte_eth_link link; |
4350 | ||
7c673cae | 4351 | /* get the link status before link update, for predicting later */ |
11fdf7f2 | 4352 | rte_eth_linkstatus_get(dev, &link); |
7c673cae FG |
4353 | |
4354 | ixgbe_dev_link_update(dev, 0); | |
4355 | ||
4356 | /* likely to up */ | |
4357 | if (!link.link_status) | |
4358 | /* handle it 1 sec later, wait it being stable */ | |
4359 | timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; | |
4360 | /* likely to down */ | |
4361 | else | |
4362 | /* handle it 4 sec later, wait it being stable */ | |
4363 | timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; | |
4364 | ||
4365 | ixgbe_dev_link_status_print(dev); | |
7c673cae FG |
4366 | if (rte_eal_alarm_set(timeout * 1000, |
4367 | ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) | |
4368 | PMD_DRV_LOG(ERR, "Error setting alarm"); | |
11fdf7f2 TL |
4369 | else { |
4370 | /* remember original mask */ | |
4371 | intr->mask_original = intr->mask; | |
4372 | /* only disable lsc interrupt */ | |
4373 | intr->mask &= ~IXGBE_EIMS_LSC; | |
4374 | } | |
7c673cae FG |
4375 | } |
4376 | ||
11fdf7f2 TL |
4377 | PMD_DRV_LOG(DEBUG, "enable intr immediately"); |
4378 | ixgbe_enable_intr(dev); | |
7c673cae FG |
4379 | |
4380 | return 0; | |
4381 | } | |
4382 | ||
4383 | /** | |
4384 | * Interrupt handler which shall be registered for alarm callback for delayed | |
4385 | * handling specific interrupt to wait for the stable nic state. As the | |
4386 | * NIC interrupt state is not stable for ixgbe after link is just down, | |
4387 | * it needs to wait 4 seconds to get the stable status. | |
4388 | * | |
4389 | * @param handle | |
4390 | * Pointer to interrupt handle. | |
4391 | * @param param | |
4392 | * The address of parameter (struct rte_eth_dev *) regsitered before. | |
4393 | * | |
4394 | * @return | |
4395 | * void | |
4396 | */ | |
4397 | static void | |
4398 | ixgbe_dev_interrupt_delayed_handler(void *param) | |
4399 | { | |
4400 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
11fdf7f2 TL |
4401 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
4402 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
4403 | struct ixgbe_interrupt *intr = |
4404 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
4405 | struct ixgbe_hw *hw = | |
4406 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4407 | uint32_t eicr; | |
4408 | ||
11fdf7f2 TL |
4409 | ixgbe_disable_intr(hw); |
4410 | ||
7c673cae FG |
4411 | eicr = IXGBE_READ_REG(hw, IXGBE_EICR); |
4412 | if (eicr & IXGBE_EICR_MAILBOX) | |
4413 | ixgbe_pf_mbx_process(dev); | |
4414 | ||
4415 | if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { | |
4416 | ixgbe_handle_lasi(hw); | |
4417 | intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; | |
4418 | } | |
4419 | ||
4420 | if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { | |
4421 | ixgbe_dev_link_update(dev, 0); | |
4422 | intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; | |
4423 | ixgbe_dev_link_status_print(dev); | |
11fdf7f2 TL |
4424 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, |
4425 | NULL); | |
7c673cae FG |
4426 | } |
4427 | ||
11fdf7f2 TL |
4428 | if (intr->flags & IXGBE_FLAG_MACSEC) { |
4429 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, | |
4430 | NULL); | |
4431 | intr->flags &= ~IXGBE_FLAG_MACSEC; | |
4432 | } | |
4433 | ||
4434 | /* restore original mask */ | |
4435 | intr->mask = intr->mask_original; | |
4436 | intr->mask_original = 0; | |
4437 | ||
7c673cae FG |
4438 | PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); |
4439 | ixgbe_enable_intr(dev); | |
11fdf7f2 | 4440 | rte_intr_enable(intr_handle); |
7c673cae FG |
4441 | } |
4442 | ||
4443 | /** | |
4444 | * Interrupt handler triggered by NIC for handling | |
4445 | * specific interrupt. | |
4446 | * | |
4447 | * @param handle | |
4448 | * Pointer to interrupt handle. | |
4449 | * @param param | |
4450 | * The address of parameter (struct rte_eth_dev *) regsitered before. | |
4451 | * | |
4452 | * @return | |
4453 | * void | |
4454 | */ | |
4455 | static void | |
11fdf7f2 | 4456 | ixgbe_dev_interrupt_handler(void *param) |
7c673cae FG |
4457 | { |
4458 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
4459 | ||
4460 | ixgbe_dev_interrupt_get_status(dev); | |
9f95a23c | 4461 | ixgbe_dev_interrupt_action(dev); |
7c673cae FG |
4462 | } |
4463 | ||
4464 | static int | |
4465 | ixgbe_dev_led_on(struct rte_eth_dev *dev) | |
4466 | { | |
4467 | struct ixgbe_hw *hw; | |
4468 | ||
4469 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4470 | return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; | |
4471 | } | |
4472 | ||
4473 | static int | |
4474 | ixgbe_dev_led_off(struct rte_eth_dev *dev) | |
4475 | { | |
4476 | struct ixgbe_hw *hw; | |
4477 | ||
4478 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4479 | return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; | |
4480 | } | |
4481 | ||
4482 | static int | |
4483 | ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) | |
4484 | { | |
4485 | struct ixgbe_hw *hw; | |
4486 | uint32_t mflcn_reg; | |
4487 | uint32_t fccfg_reg; | |
4488 | int rx_pause; | |
4489 | int tx_pause; | |
4490 | ||
4491 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4492 | ||
4493 | fc_conf->pause_time = hw->fc.pause_time; | |
4494 | fc_conf->high_water = hw->fc.high_water[0]; | |
4495 | fc_conf->low_water = hw->fc.low_water[0]; | |
4496 | fc_conf->send_xon = hw->fc.send_xon; | |
4497 | fc_conf->autoneg = !hw->fc.disable_fc_autoneg; | |
4498 | ||
4499 | /* | |
4500 | * Return rx_pause status according to actual setting of | |
4501 | * MFLCN register. | |
4502 | */ | |
4503 | mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); | |
4504 | if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) | |
4505 | rx_pause = 1; | |
4506 | else | |
4507 | rx_pause = 0; | |
4508 | ||
4509 | /* | |
4510 | * Return tx_pause status according to actual setting of | |
4511 | * FCCFG register. | |
4512 | */ | |
4513 | fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | |
4514 | if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) | |
4515 | tx_pause = 1; | |
4516 | else | |
4517 | tx_pause = 0; | |
4518 | ||
4519 | if (rx_pause && tx_pause) | |
4520 | fc_conf->mode = RTE_FC_FULL; | |
4521 | else if (rx_pause) | |
4522 | fc_conf->mode = RTE_FC_RX_PAUSE; | |
4523 | else if (tx_pause) | |
4524 | fc_conf->mode = RTE_FC_TX_PAUSE; | |
4525 | else | |
4526 | fc_conf->mode = RTE_FC_NONE; | |
4527 | ||
4528 | return 0; | |
4529 | } | |
4530 | ||
4531 | static int | |
4532 | ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) | |
4533 | { | |
4534 | struct ixgbe_hw *hw; | |
4535 | int err; | |
4536 | uint32_t rx_buf_size; | |
4537 | uint32_t max_high_water; | |
4538 | uint32_t mflcn; | |
4539 | enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { | |
4540 | ixgbe_fc_none, | |
4541 | ixgbe_fc_rx_pause, | |
4542 | ixgbe_fc_tx_pause, | |
4543 | ixgbe_fc_full | |
4544 | }; | |
4545 | ||
4546 | PMD_INIT_FUNC_TRACE(); | |
4547 | ||
4548 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4549 | rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); | |
4550 | PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); | |
4551 | ||
4552 | /* | |
4553 | * At least reserve one Ethernet frame for watermark | |
4554 | * high_water/low_water in kilo bytes for ixgbe | |
4555 | */ | |
4556 | max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; | |
4557 | if ((fc_conf->high_water > max_high_water) || | |
4558 | (fc_conf->high_water < fc_conf->low_water)) { | |
4559 | PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); | |
4560 | PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); | |
4561 | return -EINVAL; | |
4562 | } | |
4563 | ||
4564 | hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; | |
4565 | hw->fc.pause_time = fc_conf->pause_time; | |
4566 | hw->fc.high_water[0] = fc_conf->high_water; | |
4567 | hw->fc.low_water[0] = fc_conf->low_water; | |
4568 | hw->fc.send_xon = fc_conf->send_xon; | |
4569 | hw->fc.disable_fc_autoneg = !fc_conf->autoneg; | |
4570 | ||
4571 | err = ixgbe_fc_enable(hw); | |
4572 | ||
4573 | /* Not negotiated is not an error case */ | |
4574 | if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { | |
4575 | ||
4576 | /* check if we want to forward MAC frames - driver doesn't have native | |
4577 | * capability to do that, so we'll write the registers ourselves */ | |
4578 | ||
4579 | mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); | |
4580 | ||
4581 | /* set or clear MFLCN.PMCF bit depending on configuration */ | |
4582 | if (fc_conf->mac_ctrl_frame_fwd != 0) | |
4583 | mflcn |= IXGBE_MFLCN_PMCF; | |
4584 | else | |
4585 | mflcn &= ~IXGBE_MFLCN_PMCF; | |
4586 | ||
4587 | IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); | |
4588 | IXGBE_WRITE_FLUSH(hw); | |
4589 | ||
4590 | return 0; | |
4591 | } | |
4592 | ||
4593 | PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); | |
4594 | return -EIO; | |
4595 | } | |
4596 | ||
4597 | /** | |
4598 | * ixgbe_pfc_enable_generic - Enable flow control | |
4599 | * @hw: pointer to hardware structure | |
4600 | * @tc_num: traffic class number | |
4601 | * Enable flow control according to the current settings. | |
4602 | */ | |
4603 | static int | |
4604 | ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) | |
4605 | { | |
4606 | int ret_val = 0; | |
4607 | uint32_t mflcn_reg, fccfg_reg; | |
4608 | uint32_t reg; | |
4609 | uint32_t fcrtl, fcrth; | |
4610 | uint8_t i; | |
4611 | uint8_t nb_rx_en; | |
4612 | ||
4613 | /* Validate the water mark configuration */ | |
4614 | if (!hw->fc.pause_time) { | |
4615 | ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; | |
4616 | goto out; | |
4617 | } | |
4618 | ||
4619 | /* Low water mark of zero causes XOFF floods */ | |
4620 | if (hw->fc.current_mode & ixgbe_fc_tx_pause) { | |
4621 | /* High/Low water can not be 0 */ | |
4622 | if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { | |
4623 | PMD_INIT_LOG(ERR, "Invalid water mark configuration"); | |
4624 | ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; | |
4625 | goto out; | |
4626 | } | |
4627 | ||
4628 | if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { | |
4629 | PMD_INIT_LOG(ERR, "Invalid water mark configuration"); | |
4630 | ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; | |
4631 | goto out; | |
4632 | } | |
4633 | } | |
4634 | /* Negotiate the fc mode to use */ | |
4635 | ixgbe_fc_autoneg(hw); | |
4636 | ||
4637 | /* Disable any previous flow control settings */ | |
4638 | mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); | |
4639 | mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); | |
4640 | ||
4641 | fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); | |
4642 | fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); | |
4643 | ||
4644 | switch (hw->fc.current_mode) { | |
4645 | case ixgbe_fc_none: | |
4646 | /* | |
4647 | * If the count of enabled RX Priority Flow control >1, | |
4648 | * and the TX pause can not be disabled | |
4649 | */ | |
4650 | nb_rx_en = 0; | |
4651 | for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { | |
4652 | reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); | |
4653 | if (reg & IXGBE_FCRTH_FCEN) | |
4654 | nb_rx_en++; | |
4655 | } | |
4656 | if (nb_rx_en > 1) | |
4657 | fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; | |
4658 | break; | |
4659 | case ixgbe_fc_rx_pause: | |
4660 | /* | |
4661 | * Rx Flow control is enabled and Tx Flow control is | |
4662 | * disabled by software override. Since there really | |
4663 | * isn't a way to advertise that we are capable of RX | |
4664 | * Pause ONLY, we will advertise that we support both | |
4665 | * symmetric and asymmetric Rx PAUSE. Later, we will | |
4666 | * disable the adapter's ability to send PAUSE frames. | |
4667 | */ | |
4668 | mflcn_reg |= IXGBE_MFLCN_RPFCE; | |
4669 | /* | |
4670 | * If the count of enabled RX Priority Flow control >1, | |
4671 | * and the TX pause can not be disabled | |
4672 | */ | |
4673 | nb_rx_en = 0; | |
4674 | for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { | |
4675 | reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); | |
4676 | if (reg & IXGBE_FCRTH_FCEN) | |
4677 | nb_rx_en++; | |
4678 | } | |
4679 | if (nb_rx_en > 1) | |
4680 | fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; | |
4681 | break; | |
4682 | case ixgbe_fc_tx_pause: | |
4683 | /* | |
4684 | * Tx Flow control is enabled, and Rx Flow control is | |
4685 | * disabled by software override. | |
4686 | */ | |
4687 | fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; | |
4688 | break; | |
4689 | case ixgbe_fc_full: | |
4690 | /* Flow control (both Rx and Tx) is enabled by SW override. */ | |
4691 | mflcn_reg |= IXGBE_MFLCN_RPFCE; | |
4692 | fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; | |
4693 | break; | |
4694 | default: | |
4695 | PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); | |
4696 | ret_val = IXGBE_ERR_CONFIG; | |
4697 | goto out; | |
4698 | } | |
4699 | ||
4700 | /* Set 802.3x based flow control settings. */ | |
4701 | mflcn_reg |= IXGBE_MFLCN_DPF; | |
4702 | IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); | |
4703 | IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); | |
4704 | ||
4705 | /* Set up and enable Rx high/low water mark thresholds, enable XON. */ | |
4706 | if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && | |
4707 | hw->fc.high_water[tc_num]) { | |
4708 | fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; | |
4709 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); | |
4710 | fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; | |
4711 | } else { | |
4712 | IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); | |
4713 | /* | |
4714 | * In order to prevent Tx hangs when the internal Tx | |
4715 | * switch is enabled we must set the high water mark | |
4716 | * to the maximum FCRTH value. This allows the Tx | |
4717 | * switch to function even under heavy Rx workloads. | |
4718 | */ | |
4719 | fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; | |
4720 | } | |
4721 | IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); | |
4722 | ||
4723 | /* Configure pause time (2 TCs per register) */ | |
4724 | reg = hw->fc.pause_time * 0x00010001; | |
4725 | for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) | |
4726 | IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); | |
4727 | ||
4728 | /* Configure flow control refresh threshold value */ | |
4729 | IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); | |
4730 | ||
4731 | out: | |
4732 | return ret_val; | |
4733 | } | |
4734 | ||
4735 | static int | |
4736 | ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) | |
4737 | { | |
4738 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4739 | int32_t ret_val = IXGBE_NOT_IMPLEMENTED; | |
4740 | ||
4741 | if (hw->mac.type != ixgbe_mac_82598EB) { | |
4742 | ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); | |
4743 | } | |
4744 | return ret_val; | |
4745 | } | |
4746 | ||
4747 | static int | |
4748 | ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) | |
4749 | { | |
4750 | int err; | |
4751 | uint32_t rx_buf_size; | |
4752 | uint32_t max_high_water; | |
4753 | uint8_t tc_num; | |
4754 | uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; | |
4755 | struct ixgbe_hw *hw = | |
4756 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4757 | struct ixgbe_dcb_config *dcb_config = | |
4758 | IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); | |
4759 | ||
4760 | enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { | |
4761 | ixgbe_fc_none, | |
4762 | ixgbe_fc_rx_pause, | |
4763 | ixgbe_fc_tx_pause, | |
4764 | ixgbe_fc_full | |
4765 | }; | |
4766 | ||
4767 | PMD_INIT_FUNC_TRACE(); | |
4768 | ||
4769 | ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); | |
4770 | tc_num = map[pfc_conf->priority]; | |
4771 | rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); | |
4772 | PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); | |
4773 | /* | |
4774 | * At least reserve one Ethernet frame for watermark | |
4775 | * high_water/low_water in kilo bytes for ixgbe | |
4776 | */ | |
4777 | max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; | |
4778 | if ((pfc_conf->fc.high_water > max_high_water) || | |
4779 | (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { | |
4780 | PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); | |
4781 | PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); | |
4782 | return -EINVAL; | |
4783 | } | |
4784 | ||
4785 | hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; | |
4786 | hw->fc.pause_time = pfc_conf->fc.pause_time; | |
4787 | hw->fc.send_xon = pfc_conf->fc.send_xon; | |
4788 | hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; | |
4789 | hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; | |
4790 | ||
4791 | err = ixgbe_dcb_pfc_enable(dev, tc_num); | |
4792 | ||
4793 | /* Not negotiated is not an error case */ | |
4794 | if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) | |
4795 | return 0; | |
4796 | ||
4797 | PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); | |
4798 | return -EIO; | |
4799 | } | |
4800 | ||
4801 | static int | |
4802 | ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, | |
4803 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
4804 | uint16_t reta_size) | |
4805 | { | |
4806 | uint16_t i, sp_reta_size; | |
4807 | uint8_t j, mask; | |
4808 | uint32_t reta, r; | |
4809 | uint16_t idx, shift; | |
9f95a23c TL |
4810 | struct ixgbe_adapter *adapter = |
4811 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7c673cae FG |
4812 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); |
4813 | uint32_t reta_reg; | |
4814 | ||
4815 | PMD_INIT_FUNC_TRACE(); | |
4816 | ||
4817 | if (!ixgbe_rss_update_sp(hw->mac.type)) { | |
4818 | PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " | |
4819 | "NIC."); | |
4820 | return -ENOTSUP; | |
4821 | } | |
4822 | ||
4823 | sp_reta_size = ixgbe_reta_size_get(hw->mac.type); | |
4824 | if (reta_size != sp_reta_size) { | |
4825 | PMD_DRV_LOG(ERR, "The size of hash lookup table configured " | |
4826 | "(%d) doesn't match the number hardware can supported " | |
11fdf7f2 | 4827 | "(%d)", reta_size, sp_reta_size); |
7c673cae FG |
4828 | return -EINVAL; |
4829 | } | |
4830 | ||
4831 | for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { | |
4832 | idx = i / RTE_RETA_GROUP_SIZE; | |
4833 | shift = i % RTE_RETA_GROUP_SIZE; | |
4834 | mask = (uint8_t)((reta_conf[idx].mask >> shift) & | |
4835 | IXGBE_4_BIT_MASK); | |
4836 | if (!mask) | |
4837 | continue; | |
4838 | reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); | |
4839 | if (mask == IXGBE_4_BIT_MASK) | |
4840 | r = 0; | |
4841 | else | |
4842 | r = IXGBE_READ_REG(hw, reta_reg); | |
4843 | for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { | |
4844 | if (mask & (0x1 << j)) | |
4845 | reta |= reta_conf[idx].reta[shift + j] << | |
4846 | (CHAR_BIT * j); | |
4847 | else | |
4848 | reta |= r & (IXGBE_8_BIT_MASK << | |
4849 | (CHAR_BIT * j)); | |
4850 | } | |
4851 | IXGBE_WRITE_REG(hw, reta_reg, reta); | |
4852 | } | |
9f95a23c | 4853 | adapter->rss_reta_updated = 1; |
7c673cae FG |
4854 | |
4855 | return 0; | |
4856 | } | |
4857 | ||
4858 | static int | |
4859 | ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, | |
4860 | struct rte_eth_rss_reta_entry64 *reta_conf, | |
4861 | uint16_t reta_size) | |
4862 | { | |
4863 | uint16_t i, sp_reta_size; | |
4864 | uint8_t j, mask; | |
4865 | uint32_t reta; | |
4866 | uint16_t idx, shift; | |
4867 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4868 | uint32_t reta_reg; | |
4869 | ||
4870 | PMD_INIT_FUNC_TRACE(); | |
4871 | sp_reta_size = ixgbe_reta_size_get(hw->mac.type); | |
4872 | if (reta_size != sp_reta_size) { | |
4873 | PMD_DRV_LOG(ERR, "The size of hash lookup table configured " | |
4874 | "(%d) doesn't match the number hardware can supported " | |
11fdf7f2 | 4875 | "(%d)", reta_size, sp_reta_size); |
7c673cae FG |
4876 | return -EINVAL; |
4877 | } | |
4878 | ||
4879 | for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { | |
4880 | idx = i / RTE_RETA_GROUP_SIZE; | |
4881 | shift = i % RTE_RETA_GROUP_SIZE; | |
4882 | mask = (uint8_t)((reta_conf[idx].mask >> shift) & | |
4883 | IXGBE_4_BIT_MASK); | |
4884 | if (!mask) | |
4885 | continue; | |
4886 | ||
4887 | reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); | |
4888 | reta = IXGBE_READ_REG(hw, reta_reg); | |
4889 | for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { | |
4890 | if (mask & (0x1 << j)) | |
4891 | reta_conf[idx].reta[shift + j] = | |
4892 | ((reta >> (CHAR_BIT * j)) & | |
4893 | IXGBE_8_BIT_MASK); | |
4894 | } | |
4895 | } | |
4896 | ||
4897 | return 0; | |
4898 | } | |
4899 | ||
11fdf7f2 | 4900 | static int |
7c673cae FG |
4901 | ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, |
4902 | uint32_t index, uint32_t pool) | |
4903 | { | |
4904 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4905 | uint32_t enable_addr = 1; | |
4906 | ||
11fdf7f2 TL |
4907 | return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, |
4908 | pool, enable_addr); | |
7c673cae FG |
4909 | } |
4910 | ||
4911 | static void | |
4912 | ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) | |
4913 | { | |
4914 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4915 | ||
4916 | ixgbe_clear_rar(hw, index); | |
4917 | } | |
4918 | ||
11fdf7f2 | 4919 | static int |
7c673cae FG |
4920 | ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) |
4921 | { | |
11fdf7f2 TL |
4922 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
4923 | ||
7c673cae | 4924 | ixgbe_remove_rar(dev, 0); |
11fdf7f2 | 4925 | ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); |
7c673cae | 4926 | |
11fdf7f2 | 4927 | return 0; |
7c673cae FG |
4928 | } |
4929 | ||
11fdf7f2 TL |
4930 | static bool |
4931 | is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) | |
7c673cae | 4932 | { |
11fdf7f2 TL |
4933 | if (strcmp(dev->device->driver->name, drv->driver.name)) |
4934 | return false; | |
7c673cae | 4935 | |
11fdf7f2 TL |
4936 | return true; |
4937 | } | |
7c673cae | 4938 | |
11fdf7f2 TL |
4939 | bool |
4940 | is_ixgbe_supported(struct rte_eth_dev *dev) | |
4941 | { | |
4942 | return is_device_supported(dev, &rte_ixgbe_pmd); | |
7c673cae FG |
4943 | } |
4944 | ||
4945 | static int | |
4946 | ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) | |
4947 | { | |
4948 | uint32_t hlreg0; | |
4949 | uint32_t maxfrs; | |
4950 | struct ixgbe_hw *hw; | |
4951 | struct rte_eth_dev_info dev_info; | |
9f95a23c | 4952 | uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; |
11fdf7f2 | 4953 | struct rte_eth_dev_data *dev_data = dev->data; |
7c673cae FG |
4954 | |
4955 | ixgbe_dev_info_get(dev, &dev_info); | |
4956 | ||
4957 | /* check that mtu is within the allowed range */ | |
4958 | if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) | |
4959 | return -EINVAL; | |
4960 | ||
11fdf7f2 TL |
4961 | /* If device is started, refuse mtu that requires the support of |
4962 | * scattered packets when this feature has not been enabled before. | |
7c673cae | 4963 | */ |
11fdf7f2 | 4964 | if (dev_data->dev_started && !dev_data->scattered_rx && |
7c673cae | 4965 | (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > |
11fdf7f2 TL |
4966 | dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { |
4967 | PMD_INIT_LOG(ERR, "Stop port first."); | |
7c673cae | 4968 | return -EINVAL; |
11fdf7f2 | 4969 | } |
7c673cae FG |
4970 | |
4971 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
4972 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); | |
4973 | ||
4974 | /* switch to jumbo mode if needed */ | |
4975 | if (frame_size > ETHER_MAX_LEN) { | |
11fdf7f2 TL |
4976 | dev->data->dev_conf.rxmode.offloads |= |
4977 | DEV_RX_OFFLOAD_JUMBO_FRAME; | |
7c673cae FG |
4978 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
4979 | } else { | |
11fdf7f2 TL |
4980 | dev->data->dev_conf.rxmode.offloads &= |
4981 | ~DEV_RX_OFFLOAD_JUMBO_FRAME; | |
7c673cae FG |
4982 | hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; |
4983 | } | |
4984 | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); | |
4985 | ||
4986 | /* update max frame size */ | |
4987 | dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; | |
4988 | ||
4989 | maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); | |
4990 | maxfrs &= 0x0000FFFF; | |
4991 | maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); | |
4992 | IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); | |
4993 | ||
4994 | return 0; | |
4995 | } | |
4996 | ||
4997 | /* | |
4998 | * Virtual Function operations | |
4999 | */ | |
5000 | static void | |
11fdf7f2 | 5001 | ixgbevf_intr_disable(struct rte_eth_dev *dev) |
7c673cae | 5002 | { |
11fdf7f2 TL |
5003 | struct ixgbe_interrupt *intr = |
5004 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
5005 | struct ixgbe_hw *hw = | |
5006 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5007 | ||
7c673cae FG |
5008 | PMD_INIT_FUNC_TRACE(); |
5009 | ||
5010 | /* Clear interrupt mask to stop from interrupts being generated */ | |
5011 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); | |
5012 | ||
5013 | IXGBE_WRITE_FLUSH(hw); | |
11fdf7f2 TL |
5014 | |
5015 | /* Clear mask value. */ | |
5016 | intr->mask = 0; | |
7c673cae FG |
5017 | } |
5018 | ||
5019 | static void | |
11fdf7f2 | 5020 | ixgbevf_intr_enable(struct rte_eth_dev *dev) |
7c673cae | 5021 | { |
11fdf7f2 TL |
5022 | struct ixgbe_interrupt *intr = |
5023 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
5024 | struct ixgbe_hw *hw = | |
5025 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5026 | ||
7c673cae FG |
5027 | PMD_INIT_FUNC_TRACE(); |
5028 | ||
5029 | /* VF enable interrupt autoclean */ | |
5030 | IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); | |
5031 | IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); | |
5032 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); | |
5033 | ||
5034 | IXGBE_WRITE_FLUSH(hw); | |
11fdf7f2 TL |
5035 | |
5036 | /* Save IXGBE_VTEIMS value to mask. */ | |
5037 | intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; | |
7c673cae FG |
5038 | } |
5039 | ||
5040 | static int | |
5041 | ixgbevf_dev_configure(struct rte_eth_dev *dev) | |
5042 | { | |
5043 | struct rte_eth_conf *conf = &dev->data->dev_conf; | |
5044 | struct ixgbe_adapter *adapter = | |
5045 | (struct ixgbe_adapter *)dev->data->dev_private; | |
5046 | ||
5047 | PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", | |
5048 | dev->data->port_id); | |
5049 | ||
5050 | /* | |
5051 | * VF has no ability to enable/disable HW CRC | |
5052 | * Keep the persistent behavior the same as Host PF | |
5053 | */ | |
5054 | #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC | |
9f95a23c | 5055 | if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { |
7c673cae | 5056 | PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); |
9f95a23c | 5057 | conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; |
7c673cae FG |
5058 | } |
5059 | #else | |
9f95a23c | 5060 | if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { |
7c673cae | 5061 | PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); |
9f95a23c | 5062 | conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; |
7c673cae FG |
5063 | } |
5064 | #endif | |
5065 | ||
5066 | /* | |
5067 | * Initialize to TRUE. If any of Rx queues doesn't meet the bulk | |
5068 | * allocation or vector Rx preconditions we will reset it. | |
5069 | */ | |
5070 | adapter->rx_bulk_alloc_allowed = true; | |
5071 | adapter->rx_vec_allowed = true; | |
5072 | ||
5073 | return 0; | |
5074 | } | |
5075 | ||
5076 | static int | |
5077 | ixgbevf_dev_start(struct rte_eth_dev *dev) | |
5078 | { | |
5079 | struct ixgbe_hw *hw = | |
5080 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5081 | uint32_t intr_vector = 0; | |
11fdf7f2 TL |
5082 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5083 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
5084 | |
5085 | int err, mask = 0; | |
5086 | ||
5087 | PMD_INIT_FUNC_TRACE(); | |
5088 | ||
9f95a23c TL |
5089 | /* Stop the link setup handler before resetting the HW. */ |
5090 | rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); | |
5091 | ||
11fdf7f2 TL |
5092 | err = hw->mac.ops.reset_hw(hw); |
5093 | if (err) { | |
5094 | PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); | |
5095 | return err; | |
5096 | } | |
7c673cae FG |
5097 | hw->mac.get_link_status = true; |
5098 | ||
5099 | /* negotiate mailbox API version to use with the PF. */ | |
5100 | ixgbevf_negotiate_api(hw); | |
5101 | ||
5102 | ixgbevf_dev_tx_init(dev); | |
5103 | ||
5104 | /* This can fail when allocating mbufs for descriptor rings */ | |
5105 | err = ixgbevf_dev_rx_init(dev); | |
5106 | if (err) { | |
5107 | PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); | |
5108 | ixgbe_dev_clear_queues(dev); | |
5109 | return err; | |
5110 | } | |
5111 | ||
5112 | /* Set vfta */ | |
5113 | ixgbevf_set_vfta_all(dev, 1); | |
5114 | ||
5115 | /* Set HW strip */ | |
5116 | mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | | |
5117 | ETH_VLAN_EXTEND_MASK; | |
11fdf7f2 TL |
5118 | err = ixgbevf_vlan_offload_config(dev, mask); |
5119 | if (err) { | |
5120 | PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); | |
5121 | ixgbe_dev_clear_queues(dev); | |
5122 | return err; | |
5123 | } | |
7c673cae FG |
5124 | |
5125 | ixgbevf_dev_rxtx_start(dev); | |
5126 | ||
5127 | /* check and configure queue intr-vector mapping */ | |
11fdf7f2 TL |
5128 | if (rte_intr_cap_multiple(intr_handle) && |
5129 | dev->data->dev_conf.intr_conf.rxq) { | |
5130 | /* According to datasheet, only vector 0/1/2 can be used, | |
5131 | * now only one vector is used for Rx queue | |
5132 | */ | |
5133 | intr_vector = 1; | |
7c673cae FG |
5134 | if (rte_intr_efd_enable(intr_handle, intr_vector)) |
5135 | return -1; | |
5136 | } | |
5137 | ||
5138 | if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { | |
5139 | intr_handle->intr_vec = | |
5140 | rte_zmalloc("intr_vec", | |
5141 | dev->data->nb_rx_queues * sizeof(int), 0); | |
5142 | if (intr_handle->intr_vec == NULL) { | |
5143 | PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" | |
11fdf7f2 | 5144 | " intr_vec", dev->data->nb_rx_queues); |
7c673cae FG |
5145 | return -ENOMEM; |
5146 | } | |
5147 | } | |
5148 | ixgbevf_configure_msix(dev); | |
5149 | ||
11fdf7f2 TL |
5150 | /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt |
5151 | * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). | |
5152 | * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) | |
5153 | * is not cleared, it will fail when following rte_intr_enable( ) tries | |
5154 | * to map Rx queue interrupt to other VFIO vectors. | |
5155 | * So clear uio/vfio intr/evevnfd first to avoid failure. | |
5156 | */ | |
5157 | rte_intr_disable(intr_handle); | |
5158 | ||
7c673cae FG |
5159 | rte_intr_enable(intr_handle); |
5160 | ||
5161 | /* Re-enable interrupt for VF */ | |
11fdf7f2 | 5162 | ixgbevf_intr_enable(dev); |
7c673cae | 5163 | |
9f95a23c TL |
5164 | /* |
5165 | * Update link status right before return, because it may | |
5166 | * start link configuration process in a separate thread. | |
5167 | */ | |
5168 | ixgbevf_dev_link_update(dev, 0); | |
5169 | ||
7c673cae FG |
5170 | return 0; |
5171 | } | |
5172 | ||
5173 | static void | |
5174 | ixgbevf_dev_stop(struct rte_eth_dev *dev) | |
5175 | { | |
5176 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
9f95a23c TL |
5177 | struct ixgbe_adapter *adapter = |
5178 | (struct ixgbe_adapter *)dev->data->dev_private; | |
11fdf7f2 TL |
5179 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5180 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
5181 | |
5182 | PMD_INIT_FUNC_TRACE(); | |
5183 | ||
9f95a23c TL |
5184 | rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); |
5185 | ||
11fdf7f2 | 5186 | ixgbevf_intr_disable(dev); |
7c673cae FG |
5187 | |
5188 | hw->adapter_stopped = 1; | |
5189 | ixgbe_stop_adapter(hw); | |
5190 | ||
5191 | /* | |
5192 | * Clear what we set, but we still keep shadow_vfta to | |
5193 | * restore after device starts | |
5194 | */ | |
5195 | ixgbevf_set_vfta_all(dev, 0); | |
5196 | ||
5197 | /* Clear stored conf */ | |
5198 | dev->data->scattered_rx = 0; | |
5199 | ||
5200 | ixgbe_dev_clear_queues(dev); | |
5201 | ||
5202 | /* Clean datapath event and queue/vec mapping */ | |
5203 | rte_intr_efd_disable(intr_handle); | |
5204 | if (intr_handle->intr_vec != NULL) { | |
5205 | rte_free(intr_handle->intr_vec); | |
5206 | intr_handle->intr_vec = NULL; | |
5207 | } | |
9f95a23c TL |
5208 | |
5209 | adapter->rss_reta_updated = 0; | |
7c673cae FG |
5210 | } |
5211 | ||
5212 | static void | |
5213 | ixgbevf_dev_close(struct rte_eth_dev *dev) | |
5214 | { | |
5215 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5216 | ||
5217 | PMD_INIT_FUNC_TRACE(); | |
5218 | ||
5219 | ixgbe_reset_hw(hw); | |
5220 | ||
5221 | ixgbevf_dev_stop(dev); | |
5222 | ||
5223 | ixgbe_dev_free_queues(dev); | |
5224 | ||
5225 | /** | |
5226 | * Remove the VF MAC address ro ensure | |
5227 | * that the VF traffic goes to the PF | |
5228 | * after stop, close and detach of the VF | |
5229 | **/ | |
5230 | ixgbevf_remove_mac_addr(dev, 0); | |
5231 | } | |
5232 | ||
11fdf7f2 TL |
5233 | /* |
5234 | * Reset VF device | |
5235 | */ | |
5236 | static int | |
5237 | ixgbevf_dev_reset(struct rte_eth_dev *dev) | |
5238 | { | |
5239 | int ret; | |
5240 | ||
5241 | ret = eth_ixgbevf_dev_uninit(dev); | |
5242 | if (ret) | |
5243 | return ret; | |
5244 | ||
5245 | ret = eth_ixgbevf_dev_init(dev); | |
5246 | ||
5247 | return ret; | |
5248 | } | |
5249 | ||
7c673cae FG |
5250 | static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) |
5251 | { | |
5252 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5253 | struct ixgbe_vfta *shadow_vfta = | |
5254 | IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); | |
5255 | int i = 0, j = 0, vfta = 0, mask = 1; | |
5256 | ||
5257 | for (i = 0; i < IXGBE_VFTA_SIZE; i++) { | |
5258 | vfta = shadow_vfta->vfta[i]; | |
5259 | if (vfta) { | |
5260 | mask = 1; | |
5261 | for (j = 0; j < 32; j++) { | |
5262 | if (vfta & mask) | |
5263 | ixgbe_set_vfta(hw, (i<<5)+j, 0, | |
5264 | on, false); | |
5265 | mask <<= 1; | |
5266 | } | |
5267 | } | |
5268 | } | |
5269 | ||
5270 | } | |
5271 | ||
5272 | static int | |
5273 | ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) | |
5274 | { | |
5275 | struct ixgbe_hw *hw = | |
5276 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5277 | struct ixgbe_vfta *shadow_vfta = | |
5278 | IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); | |
5279 | uint32_t vid_idx = 0; | |
5280 | uint32_t vid_bit = 0; | |
5281 | int ret = 0; | |
5282 | ||
5283 | PMD_INIT_FUNC_TRACE(); | |
5284 | ||
5285 | /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ | |
5286 | ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); | |
5287 | if (ret) { | |
5288 | PMD_INIT_LOG(ERR, "Unable to set VF vlan"); | |
5289 | return ret; | |
5290 | } | |
5291 | vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); | |
5292 | vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); | |
5293 | ||
5294 | /* Save what we set and retore it after device reset */ | |
5295 | if (on) | |
5296 | shadow_vfta->vfta[vid_idx] |= vid_bit; | |
5297 | else | |
5298 | shadow_vfta->vfta[vid_idx] &= ~vid_bit; | |
5299 | ||
5300 | return 0; | |
5301 | } | |
5302 | ||
5303 | static void | |
5304 | ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) | |
5305 | { | |
5306 | struct ixgbe_hw *hw = | |
5307 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5308 | uint32_t ctrl; | |
5309 | ||
5310 | PMD_INIT_FUNC_TRACE(); | |
5311 | ||
5312 | if (queue >= hw->mac.max_rx_queues) | |
5313 | return; | |
5314 | ||
5315 | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); | |
5316 | if (on) | |
5317 | ctrl |= IXGBE_RXDCTL_VME; | |
5318 | else | |
5319 | ctrl &= ~IXGBE_RXDCTL_VME; | |
5320 | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); | |
5321 | ||
5322 | ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); | |
5323 | } | |
5324 | ||
11fdf7f2 TL |
5325 | static int |
5326 | ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) | |
7c673cae | 5327 | { |
11fdf7f2 | 5328 | struct ixgbe_rx_queue *rxq; |
7c673cae FG |
5329 | uint16_t i; |
5330 | int on = 0; | |
5331 | ||
5332 | /* VF function only support hw strip feature, others are not support */ | |
5333 | if (mask & ETH_VLAN_STRIP_MASK) { | |
11fdf7f2 TL |
5334 | for (i = 0; i < dev->data->nb_rx_queues; i++) { |
5335 | rxq = dev->data->rx_queues[i]; | |
5336 | on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); | |
7c673cae | 5337 | ixgbevf_vlan_strip_queue_set(dev, i, on); |
11fdf7f2 | 5338 | } |
7c673cae | 5339 | } |
11fdf7f2 TL |
5340 | |
5341 | return 0; | |
7c673cae FG |
5342 | } |
5343 | ||
5344 | static int | |
11fdf7f2 TL |
5345 | ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) |
5346 | { | |
5347 | ixgbe_config_vlan_strip_on_all_queues(dev, mask); | |
5348 | ||
5349 | ixgbevf_vlan_offload_config(dev, mask); | |
5350 | ||
5351 | return 0; | |
5352 | } | |
5353 | ||
5354 | int | |
5355 | ixgbe_vt_check(struct ixgbe_hw *hw) | |
7c673cae FG |
5356 | { |
5357 | uint32_t reg_val; | |
5358 | ||
11fdf7f2 | 5359 | /* if Virtualization Technology is enabled */ |
7c673cae FG |
5360 | reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); |
5361 | if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { | |
11fdf7f2 | 5362 | PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); |
7c673cae FG |
5363 | return -1; |
5364 | } | |
5365 | ||
5366 | return 0; | |
5367 | } | |
5368 | ||
5369 | static uint32_t | |
5370 | ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) | |
5371 | { | |
5372 | uint32_t vector = 0; | |
5373 | ||
5374 | switch (hw->mac.mc_filter_type) { | |
5375 | case 0: /* use bits [47:36] of the address */ | |
5376 | vector = ((uc_addr->addr_bytes[4] >> 4) | | |
5377 | (((uint16_t)uc_addr->addr_bytes[5]) << 4)); | |
5378 | break; | |
5379 | case 1: /* use bits [46:35] of the address */ | |
5380 | vector = ((uc_addr->addr_bytes[4] >> 3) | | |
5381 | (((uint16_t)uc_addr->addr_bytes[5]) << 5)); | |
5382 | break; | |
5383 | case 2: /* use bits [45:34] of the address */ | |
5384 | vector = ((uc_addr->addr_bytes[4] >> 2) | | |
5385 | (((uint16_t)uc_addr->addr_bytes[5]) << 6)); | |
5386 | break; | |
5387 | case 3: /* use bits [43:32] of the address */ | |
5388 | vector = ((uc_addr->addr_bytes[4]) | | |
5389 | (((uint16_t)uc_addr->addr_bytes[5]) << 8)); | |
5390 | break; | |
5391 | default: /* Invalid mc_filter_type */ | |
5392 | break; | |
5393 | } | |
5394 | ||
5395 | /* vector can only be 12-bits or boundary will be exceeded */ | |
5396 | vector &= 0xFFF; | |
5397 | return vector; | |
5398 | } | |
5399 | ||
5400 | static int | |
5401 | ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, | |
5402 | uint8_t on) | |
5403 | { | |
5404 | uint32_t vector; | |
5405 | uint32_t uta_idx; | |
5406 | uint32_t reg_val; | |
5407 | uint32_t uta_shift; | |
5408 | uint32_t rc; | |
5409 | const uint32_t ixgbe_uta_idx_mask = 0x7F; | |
5410 | const uint32_t ixgbe_uta_bit_shift = 5; | |
5411 | const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; | |
5412 | const uint32_t bit1 = 0x1; | |
5413 | ||
5414 | struct ixgbe_hw *hw = | |
5415 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5416 | struct ixgbe_uta_info *uta_info = | |
5417 | IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); | |
5418 | ||
5419 | /* The UTA table only exists on 82599 hardware and newer */ | |
5420 | if (hw->mac.type < ixgbe_mac_82599EB) | |
5421 | return -ENOTSUP; | |
5422 | ||
5423 | vector = ixgbe_uta_vector(hw, mac_addr); | |
5424 | uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; | |
5425 | uta_shift = vector & ixgbe_uta_bit_mask; | |
5426 | ||
5427 | rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); | |
5428 | if (rc == on) | |
5429 | return 0; | |
5430 | ||
5431 | reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); | |
5432 | if (on) { | |
5433 | uta_info->uta_in_use++; | |
5434 | reg_val |= (bit1 << uta_shift); | |
5435 | uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); | |
5436 | } else { | |
5437 | uta_info->uta_in_use--; | |
5438 | reg_val &= ~(bit1 << uta_shift); | |
5439 | uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); | |
5440 | } | |
5441 | ||
5442 | IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); | |
5443 | ||
5444 | if (uta_info->uta_in_use > 0) | |
5445 | IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, | |
5446 | IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); | |
5447 | else | |
5448 | IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); | |
5449 | ||
5450 | return 0; | |
5451 | } | |
5452 | ||
5453 | static int | |
5454 | ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) | |
5455 | { | |
5456 | int i; | |
5457 | struct ixgbe_hw *hw = | |
5458 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5459 | struct ixgbe_uta_info *uta_info = | |
5460 | IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); | |
5461 | ||
5462 | /* The UTA table only exists on 82599 hardware and newer */ | |
5463 | if (hw->mac.type < ixgbe_mac_82599EB) | |
5464 | return -ENOTSUP; | |
5465 | ||
5466 | if (on) { | |
5467 | for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { | |
5468 | uta_info->uta_shadow[i] = ~0; | |
5469 | IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); | |
5470 | } | |
5471 | } else { | |
5472 | for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { | |
5473 | uta_info->uta_shadow[i] = 0; | |
5474 | IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); | |
5475 | } | |
5476 | } | |
5477 | return 0; | |
5478 | ||
5479 | } | |
5480 | ||
5481 | uint32_t | |
5482 | ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) | |
5483 | { | |
5484 | uint32_t new_val = orig_val; | |
5485 | ||
5486 | if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) | |
5487 | new_val |= IXGBE_VMOLR_AUPE; | |
5488 | if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) | |
5489 | new_val |= IXGBE_VMOLR_ROMPE; | |
5490 | if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) | |
5491 | new_val |= IXGBE_VMOLR_ROPE; | |
5492 | if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) | |
5493 | new_val |= IXGBE_VMOLR_BAM; | |
5494 | if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) | |
5495 | new_val |= IXGBE_VMOLR_MPE; | |
5496 | ||
5497 | return new_val; | |
5498 | } | |
5499 | ||
11fdf7f2 TL |
5500 | #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ |
5501 | #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ | |
5502 | #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ | |
5503 | #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ | |
5504 | #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ | |
5505 | ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ | |
5506 | ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) | |
7c673cae FG |
5507 | |
5508 | static int | |
11fdf7f2 TL |
5509 | ixgbe_mirror_rule_set(struct rte_eth_dev *dev, |
5510 | struct rte_eth_mirror_conf *mirror_conf, | |
5511 | uint8_t rule_id, uint8_t on) | |
7c673cae | 5512 | { |
11fdf7f2 TL |
5513 | uint32_t mr_ctl, vlvf; |
5514 | uint32_t mp_lsb = 0; | |
5515 | uint32_t mv_msb = 0; | |
5516 | uint32_t mv_lsb = 0; | |
5517 | uint32_t mp_msb = 0; | |
5518 | uint8_t i = 0; | |
5519 | int reg_index = 0; | |
5520 | uint64_t vlan_mask = 0; | |
5521 | ||
5522 | const uint8_t pool_mask_offset = 32; | |
5523 | const uint8_t vlan_mask_offset = 32; | |
5524 | const uint8_t dst_pool_offset = 8; | |
5525 | const uint8_t rule_mr_offset = 4; | |
5526 | const uint8_t mirror_rule_mask = 0x0F; | |
7c673cae | 5527 | |
11fdf7f2 TL |
5528 | struct ixgbe_mirror_info *mr_info = |
5529 | (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); | |
7c673cae FG |
5530 | struct ixgbe_hw *hw = |
5531 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 | 5532 | uint8_t mirror_type = 0; |
7c673cae | 5533 | |
11fdf7f2 | 5534 | if (ixgbe_vt_check(hw) < 0) |
7c673cae FG |
5535 | return -ENOTSUP; |
5536 | ||
11fdf7f2 | 5537 | if (rule_id >= IXGBE_MAX_MIRROR_RULES) |
7c673cae FG |
5538 | return -EINVAL; |
5539 | ||
11fdf7f2 TL |
5540 | if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { |
5541 | PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", | |
5542 | mirror_conf->rule_type); | |
5543 | return -EINVAL; | |
7c673cae FG |
5544 | } |
5545 | ||
5546 | if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { | |
5547 | mirror_type |= IXGBE_MRCTL_VLME; | |
11fdf7f2 TL |
5548 | /* Check if vlan id is valid and find conresponding VLAN ID |
5549 | * index in VLVF | |
5550 | */ | |
7c673cae FG |
5551 | for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { |
5552 | if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { | |
11fdf7f2 TL |
5553 | /* search vlan id related pool vlan filter |
5554 | * index | |
5555 | */ | |
5556 | reg_index = ixgbe_find_vlvf_slot( | |
5557 | hw, | |
5558 | mirror_conf->vlan.vlan_id[i], | |
5559 | false); | |
7c673cae FG |
5560 | if (reg_index < 0) |
5561 | return -EINVAL; | |
11fdf7f2 TL |
5562 | vlvf = IXGBE_READ_REG(hw, |
5563 | IXGBE_VLVF(reg_index)); | |
7c673cae FG |
5564 | if ((vlvf & IXGBE_VLVF_VIEN) && |
5565 | ((vlvf & IXGBE_VLVF_VLANID_MASK) == | |
5566 | mirror_conf->vlan.vlan_id[i])) | |
5567 | vlan_mask |= (1ULL << reg_index); | |
5568 | else | |
5569 | return -EINVAL; | |
5570 | } | |
5571 | } | |
5572 | ||
5573 | if (on) { | |
5574 | mv_lsb = vlan_mask & 0xFFFFFFFF; | |
5575 | mv_msb = vlan_mask >> vlan_mask_offset; | |
5576 | ||
5577 | mr_info->mr_conf[rule_id].vlan.vlan_mask = | |
5578 | mirror_conf->vlan.vlan_mask; | |
5579 | for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { | |
5580 | if (mirror_conf->vlan.vlan_mask & (1ULL << i)) | |
5581 | mr_info->mr_conf[rule_id].vlan.vlan_id[i] = | |
5582 | mirror_conf->vlan.vlan_id[i]; | |
5583 | } | |
5584 | } else { | |
5585 | mv_lsb = 0; | |
5586 | mv_msb = 0; | |
5587 | mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; | |
5588 | for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) | |
5589 | mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; | |
5590 | } | |
5591 | } | |
5592 | ||
11fdf7f2 | 5593 | /** |
7c673cae FG |
5594 | * if enable pool mirror, write related pool mask register,if disable |
5595 | * pool mirror, clear PFMRVM register | |
5596 | */ | |
5597 | if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { | |
5598 | mirror_type |= IXGBE_MRCTL_VPME; | |
5599 | if (on) { | |
5600 | mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; | |
5601 | mp_msb = mirror_conf->pool_mask >> pool_mask_offset; | |
5602 | mr_info->mr_conf[rule_id].pool_mask = | |
5603 | mirror_conf->pool_mask; | |
5604 | ||
5605 | } else { | |
5606 | mp_lsb = 0; | |
5607 | mp_msb = 0; | |
5608 | mr_info->mr_conf[rule_id].pool_mask = 0; | |
5609 | } | |
5610 | } | |
5611 | if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) | |
5612 | mirror_type |= IXGBE_MRCTL_UPME; | |
5613 | if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) | |
5614 | mirror_type |= IXGBE_MRCTL_DPME; | |
5615 | ||
5616 | /* read mirror control register and recalculate it */ | |
5617 | mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); | |
5618 | ||
5619 | if (on) { | |
5620 | mr_ctl |= mirror_type; | |
5621 | mr_ctl &= mirror_rule_mask; | |
5622 | mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; | |
11fdf7f2 | 5623 | } else { |
7c673cae | 5624 | mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); |
11fdf7f2 | 5625 | } |
7c673cae FG |
5626 | |
5627 | mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; | |
5628 | mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; | |
5629 | ||
5630 | /* write mirrror control register */ | |
5631 | IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); | |
5632 | ||
5633 | /* write pool mirrror control register */ | |
11fdf7f2 | 5634 | if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { |
7c673cae FG |
5635 | IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); |
5636 | IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), | |
5637 | mp_msb); | |
5638 | } | |
5639 | /* write VLAN mirrror control register */ | |
11fdf7f2 | 5640 | if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { |
7c673cae FG |
5641 | IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); |
5642 | IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), | |
5643 | mv_msb); | |
5644 | } | |
5645 | ||
5646 | return 0; | |
5647 | } | |
5648 | ||
5649 | static int | |
5650 | ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) | |
5651 | { | |
5652 | int mr_ctl = 0; | |
5653 | uint32_t lsb_val = 0; | |
5654 | uint32_t msb_val = 0; | |
5655 | const uint8_t rule_mr_offset = 4; | |
5656 | ||
5657 | struct ixgbe_hw *hw = | |
5658 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5659 | struct ixgbe_mirror_info *mr_info = | |
5660 | (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); | |
5661 | ||
11fdf7f2 | 5662 | if (ixgbe_vt_check(hw) < 0) |
7c673cae FG |
5663 | return -ENOTSUP; |
5664 | ||
11fdf7f2 TL |
5665 | if (rule_id >= IXGBE_MAX_MIRROR_RULES) |
5666 | return -EINVAL; | |
5667 | ||
7c673cae | 5668 | memset(&mr_info->mr_conf[rule_id], 0, |
11fdf7f2 | 5669 | sizeof(struct rte_eth_mirror_conf)); |
7c673cae FG |
5670 | |
5671 | /* clear PFVMCTL register */ | |
5672 | IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); | |
5673 | ||
5674 | /* clear pool mask register */ | |
5675 | IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); | |
5676 | IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); | |
5677 | ||
5678 | /* clear vlan mask register */ | |
5679 | IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); | |
5680 | IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); | |
5681 | ||
5682 | return 0; | |
5683 | } | |
5684 | ||
5685 | static int | |
5686 | ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) | |
5687 | { | |
11fdf7f2 TL |
5688 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5689 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
5690 | struct ixgbe_interrupt *intr = | |
5691 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
7c673cae FG |
5692 | struct ixgbe_hw *hw = |
5693 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 | 5694 | uint32_t vec = IXGBE_MISC_VEC_ID; |
7c673cae | 5695 | |
11fdf7f2 TL |
5696 | if (rte_intr_allow_others(intr_handle)) |
5697 | vec = IXGBE_RX_VEC_START; | |
5698 | intr->mask |= (1 << vec); | |
7c673cae | 5699 | RTE_SET_USED(queue_id); |
11fdf7f2 | 5700 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); |
7c673cae | 5701 | |
11fdf7f2 | 5702 | rte_intr_enable(intr_handle); |
7c673cae FG |
5703 | |
5704 | return 0; | |
5705 | } | |
5706 | ||
5707 | static int | |
5708 | ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) | |
5709 | { | |
11fdf7f2 TL |
5710 | struct ixgbe_interrupt *intr = |
5711 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
7c673cae FG |
5712 | struct ixgbe_hw *hw = |
5713 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 TL |
5714 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5715 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
5716 | uint32_t vec = IXGBE_MISC_VEC_ID; | |
7c673cae | 5717 | |
11fdf7f2 TL |
5718 | if (rte_intr_allow_others(intr_handle)) |
5719 | vec = IXGBE_RX_VEC_START; | |
5720 | intr->mask &= ~(1 << vec); | |
7c673cae | 5721 | RTE_SET_USED(queue_id); |
11fdf7f2 | 5722 | IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); |
7c673cae FG |
5723 | |
5724 | return 0; | |
5725 | } | |
5726 | ||
5727 | static int | |
5728 | ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) | |
5729 | { | |
11fdf7f2 TL |
5730 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5731 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
5732 | uint32_t mask; |
5733 | struct ixgbe_hw *hw = | |
5734 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5735 | struct ixgbe_interrupt *intr = | |
5736 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
5737 | ||
5738 | if (queue_id < 16) { | |
5739 | ixgbe_disable_intr(hw); | |
5740 | intr->mask |= (1 << queue_id); | |
5741 | ixgbe_enable_intr(dev); | |
5742 | } else if (queue_id < 32) { | |
5743 | mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); | |
5744 | mask &= (1 << queue_id); | |
5745 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); | |
5746 | } else if (queue_id < 64) { | |
5747 | mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); | |
5748 | mask &= (1 << (queue_id - 32)); | |
5749 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); | |
5750 | } | |
11fdf7f2 | 5751 | rte_intr_enable(intr_handle); |
7c673cae FG |
5752 | |
5753 | return 0; | |
5754 | } | |
5755 | ||
5756 | static int | |
5757 | ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) | |
5758 | { | |
5759 | uint32_t mask; | |
5760 | struct ixgbe_hw *hw = | |
5761 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5762 | struct ixgbe_interrupt *intr = | |
5763 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
5764 | ||
5765 | if (queue_id < 16) { | |
5766 | ixgbe_disable_intr(hw); | |
5767 | intr->mask &= ~(1 << queue_id); | |
5768 | ixgbe_enable_intr(dev); | |
5769 | } else if (queue_id < 32) { | |
5770 | mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); | |
5771 | mask &= ~(1 << queue_id); | |
5772 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); | |
5773 | } else if (queue_id < 64) { | |
5774 | mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); | |
5775 | mask &= ~(1 << (queue_id - 32)); | |
5776 | IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); | |
5777 | } | |
5778 | ||
5779 | return 0; | |
5780 | } | |
5781 | ||
5782 | static void | |
5783 | ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, | |
5784 | uint8_t queue, uint8_t msix_vector) | |
5785 | { | |
5786 | uint32_t tmp, idx; | |
5787 | ||
5788 | if (direction == -1) { | |
5789 | /* other causes */ | |
5790 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | |
5791 | tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); | |
5792 | tmp &= ~0xFF; | |
5793 | tmp |= msix_vector; | |
5794 | IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); | |
5795 | } else { | |
5796 | /* rx or tx cause */ | |
5797 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | |
5798 | idx = ((16 * (queue & 1)) + (8 * direction)); | |
5799 | tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); | |
5800 | tmp &= ~(0xFF << idx); | |
5801 | tmp |= (msix_vector << idx); | |
5802 | IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); | |
5803 | } | |
5804 | } | |
5805 | ||
5806 | /** | |
5807 | * set the IVAR registers, mapping interrupt causes to vectors | |
5808 | * @param hw | |
5809 | * pointer to ixgbe_hw struct | |
5810 | * @direction | |
5811 | * 0 for Rx, 1 for Tx, -1 for other causes | |
5812 | * @queue | |
5813 | * queue to map the corresponding interrupt to | |
5814 | * @msix_vector | |
5815 | * the vector to map to the corresponding queue | |
5816 | */ | |
5817 | static void | |
5818 | ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, | |
5819 | uint8_t queue, uint8_t msix_vector) | |
5820 | { | |
5821 | uint32_t tmp, idx; | |
5822 | ||
5823 | msix_vector |= IXGBE_IVAR_ALLOC_VAL; | |
5824 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
5825 | if (direction == -1) | |
5826 | direction = 0; | |
5827 | idx = (((direction * 64) + queue) >> 2) & 0x1F; | |
5828 | tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); | |
5829 | tmp &= ~(0xFF << (8 * (queue & 0x3))); | |
5830 | tmp |= (msix_vector << (8 * (queue & 0x3))); | |
5831 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); | |
5832 | } else if ((hw->mac.type == ixgbe_mac_82599EB) || | |
11fdf7f2 TL |
5833 | (hw->mac.type == ixgbe_mac_X540) || |
5834 | (hw->mac.type == ixgbe_mac_X550)) { | |
7c673cae FG |
5835 | if (direction == -1) { |
5836 | /* other causes */ | |
5837 | idx = ((queue & 1) * 8); | |
5838 | tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); | |
5839 | tmp &= ~(0xFF << idx); | |
5840 | tmp |= (msix_vector << idx); | |
5841 | IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); | |
5842 | } else { | |
5843 | /* rx or tx causes */ | |
5844 | idx = ((16 * (queue & 1)) + (8 * direction)); | |
5845 | tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); | |
5846 | tmp &= ~(0xFF << idx); | |
5847 | tmp |= (msix_vector << idx); | |
5848 | IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); | |
5849 | } | |
5850 | } | |
5851 | } | |
5852 | ||
5853 | static void | |
5854 | ixgbevf_configure_msix(struct rte_eth_dev *dev) | |
5855 | { | |
11fdf7f2 TL |
5856 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5857 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
5858 | struct ixgbe_hw *hw = |
5859 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5860 | uint32_t q_idx; | |
5861 | uint32_t vector_idx = IXGBE_MISC_VEC_ID; | |
11fdf7f2 | 5862 | uint32_t base = IXGBE_MISC_VEC_ID; |
7c673cae FG |
5863 | |
5864 | /* Configure VF other cause ivar */ | |
5865 | ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); | |
5866 | ||
5867 | /* won't configure msix register if no mapping is done | |
5868 | * between intr vector and event fd. | |
5869 | */ | |
5870 | if (!rte_intr_dp_is_en(intr_handle)) | |
5871 | return; | |
5872 | ||
11fdf7f2 TL |
5873 | if (rte_intr_allow_others(intr_handle)) { |
5874 | base = IXGBE_RX_VEC_START; | |
5875 | vector_idx = IXGBE_RX_VEC_START; | |
5876 | } | |
5877 | ||
7c673cae FG |
5878 | /* Configure all RX queues of VF */ |
5879 | for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { | |
5880 | /* Force all queue use vector 0, | |
5881 | * as IXGBE_VF_MAXMSIVECOTR = 1 | |
5882 | */ | |
5883 | ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); | |
5884 | intr_handle->intr_vec[q_idx] = vector_idx; | |
11fdf7f2 TL |
5885 | if (vector_idx < base + intr_handle->nb_efd - 1) |
5886 | vector_idx++; | |
7c673cae | 5887 | } |
11fdf7f2 TL |
5888 | |
5889 | /* As RX queue setting above show, all queues use the vector 0. | |
5890 | * Set only the ITR value of IXGBE_MISC_VEC_ID. | |
5891 | */ | |
5892 | IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), | |
5893 | IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) | |
5894 | | IXGBE_EITR_CNT_WDIS); | |
7c673cae FG |
5895 | } |
5896 | ||
5897 | /** | |
5898 | * Sets up the hardware to properly generate MSI-X interrupts | |
5899 | * @hw | |
5900 | * board private structure | |
5901 | */ | |
5902 | static void | |
5903 | ixgbe_configure_msix(struct rte_eth_dev *dev) | |
5904 | { | |
11fdf7f2 TL |
5905 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
5906 | struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; | |
7c673cae FG |
5907 | struct ixgbe_hw *hw = |
5908 | IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
5909 | uint32_t queue_id, base = IXGBE_MISC_VEC_ID; | |
5910 | uint32_t vec = IXGBE_MISC_VEC_ID; | |
5911 | uint32_t mask; | |
5912 | uint32_t gpie; | |
5913 | ||
5914 | /* won't configure msix register if no mapping is done | |
5915 | * between intr vector and event fd | |
11fdf7f2 TL |
5916 | * but if misx has been enabled already, need to configure |
5917 | * auto clean, auto mask and throttling. | |
7c673cae | 5918 | */ |
11fdf7f2 TL |
5919 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); |
5920 | if (!rte_intr_dp_is_en(intr_handle) && | |
5921 | !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) | |
7c673cae FG |
5922 | return; |
5923 | ||
5924 | if (rte_intr_allow_others(intr_handle)) | |
5925 | vec = base = IXGBE_RX_VEC_START; | |
5926 | ||
5927 | /* setup GPIE for MSI-x mode */ | |
5928 | gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); | |
5929 | gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | | |
5930 | IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; | |
5931 | /* auto clearing and auto setting corresponding bits in EIMS | |
5932 | * when MSI-X interrupt is triggered | |
5933 | */ | |
5934 | if (hw->mac.type == ixgbe_mac_82598EB) { | |
5935 | IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); | |
5936 | } else { | |
5937 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); | |
5938 | IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); | |
5939 | } | |
5940 | IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); | |
5941 | ||
5942 | /* Populate the IVAR table and set the ITR values to the | |
5943 | * corresponding register. | |
5944 | */ | |
11fdf7f2 TL |
5945 | if (rte_intr_dp_is_en(intr_handle)) { |
5946 | for (queue_id = 0; queue_id < dev->data->nb_rx_queues; | |
5947 | queue_id++) { | |
5948 | /* by default, 1:1 mapping */ | |
5949 | ixgbe_set_ivar_map(hw, 0, queue_id, vec); | |
5950 | intr_handle->intr_vec[queue_id] = vec; | |
5951 | if (vec < base + intr_handle->nb_efd - 1) | |
5952 | vec++; | |
5953 | } | |
7c673cae | 5954 | |
11fdf7f2 TL |
5955 | switch (hw->mac.type) { |
5956 | case ixgbe_mac_82598EB: | |
5957 | ixgbe_set_ivar_map(hw, -1, | |
5958 | IXGBE_IVAR_OTHER_CAUSES_INDEX, | |
5959 | IXGBE_MISC_VEC_ID); | |
5960 | break; | |
5961 | case ixgbe_mac_82599EB: | |
5962 | case ixgbe_mac_X540: | |
5963 | case ixgbe_mac_X550: | |
5964 | ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); | |
5965 | break; | |
5966 | default: | |
5967 | break; | |
5968 | } | |
7c673cae FG |
5969 | } |
5970 | IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), | |
11fdf7f2 TL |
5971 | IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) |
5972 | | IXGBE_EITR_CNT_WDIS); | |
7c673cae FG |
5973 | |
5974 | /* set up to autoclear timer, and the vectors */ | |
5975 | mask = IXGBE_EIMS_ENABLE_MASK; | |
5976 | mask &= ~(IXGBE_EIMS_OTHER | | |
5977 | IXGBE_EIMS_MAILBOX | | |
5978 | IXGBE_EIMS_LSC); | |
5979 | ||
5980 | IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); | |
5981 | } | |
5982 | ||
11fdf7f2 TL |
5983 | int |
5984 | ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, | |
5985 | uint16_t queue_idx, uint16_t tx_rate) | |
7c673cae FG |
5986 | { |
5987 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 | 5988 | struct rte_eth_rxmode *rxmode; |
7c673cae FG |
5989 | uint32_t rf_dec, rf_int; |
5990 | uint32_t bcnrc_val; | |
5991 | uint16_t link_speed = dev->data->dev_link.link_speed; | |
5992 | ||
5993 | if (queue_idx >= hw->mac.max_tx_queues) | |
5994 | return -EINVAL; | |
5995 | ||
5996 | if (tx_rate != 0) { | |
5997 | /* Calculate the rate factor values to set */ | |
5998 | rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; | |
5999 | rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; | |
6000 | rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; | |
6001 | ||
6002 | bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; | |
6003 | bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & | |
6004 | IXGBE_RTTBCNRC_RF_INT_MASK_M); | |
6005 | bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); | |
6006 | } else { | |
6007 | bcnrc_val = 0; | |
6008 | } | |
6009 | ||
11fdf7f2 | 6010 | rxmode = &dev->data->dev_conf.rxmode; |
7c673cae FG |
6011 | /* |
6012 | * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM | |
6013 | * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise | |
6014 | * set as 0x4. | |
6015 | */ | |
11fdf7f2 TL |
6016 | if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && |
6017 | (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) | |
7c673cae FG |
6018 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, |
6019 | IXGBE_MMW_SIZE_JUMBO_FRAME); | |
6020 | else | |
6021 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, | |
6022 | IXGBE_MMW_SIZE_DEFAULT); | |
6023 | ||
6024 | /* Set RTTBCNRC of queue X */ | |
6025 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); | |
6026 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); | |
6027 | IXGBE_WRITE_FLUSH(hw); | |
6028 | ||
6029 | return 0; | |
6030 | } | |
6031 | ||
11fdf7f2 | 6032 | static int |
7c673cae FG |
6033 | ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, |
6034 | __attribute__((unused)) uint32_t index, | |
6035 | __attribute__((unused)) uint32_t pool) | |
6036 | { | |
6037 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6038 | int diag; | |
6039 | ||
6040 | /* | |
6041 | * On a 82599 VF, adding again the same MAC addr is not an idempotent | |
6042 | * operation. Trap this case to avoid exhausting the [very limited] | |
6043 | * set of PF resources used to store VF MAC addresses. | |
6044 | */ | |
6045 | if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) | |
11fdf7f2 | 6046 | return -1; |
7c673cae | 6047 | diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); |
11fdf7f2 TL |
6048 | if (diag != 0) |
6049 | PMD_DRV_LOG(ERR, "Unable to add MAC address " | |
6050 | "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", | |
6051 | mac_addr->addr_bytes[0], | |
6052 | mac_addr->addr_bytes[1], | |
6053 | mac_addr->addr_bytes[2], | |
6054 | mac_addr->addr_bytes[3], | |
6055 | mac_addr->addr_bytes[4], | |
6056 | mac_addr->addr_bytes[5], | |
6057 | diag); | |
6058 | return diag; | |
7c673cae FG |
6059 | } |
6060 | ||
6061 | static void | |
6062 | ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) | |
6063 | { | |
6064 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6065 | struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; | |
6066 | struct ether_addr *mac_addr; | |
6067 | uint32_t i; | |
6068 | int diag; | |
6069 | ||
6070 | /* | |
6071 | * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does | |
6072 | * not support the deletion of a given MAC address. | |
6073 | * Instead, it imposes to delete all MAC addresses, then to add again | |
6074 | * all MAC addresses with the exception of the one to be deleted. | |
6075 | */ | |
6076 | (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); | |
6077 | ||
6078 | /* | |
6079 | * Add again all MAC addresses, with the exception of the deleted one | |
6080 | * and of the permanent MAC address. | |
6081 | */ | |
6082 | for (i = 0, mac_addr = dev->data->mac_addrs; | |
6083 | i < hw->mac.num_rar_entries; i++, mac_addr++) { | |
6084 | /* Skip the deleted MAC address */ | |
6085 | if (i == index) | |
6086 | continue; | |
6087 | /* Skip NULL MAC addresses */ | |
6088 | if (is_zero_ether_addr(mac_addr)) | |
6089 | continue; | |
6090 | /* Skip the permanent MAC address */ | |
6091 | if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) | |
6092 | continue; | |
6093 | diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); | |
6094 | if (diag != 0) | |
6095 | PMD_DRV_LOG(ERR, | |
6096 | "Adding again MAC address " | |
6097 | "%02x:%02x:%02x:%02x:%02x:%02x failed " | |
6098 | "diag=%d", | |
6099 | mac_addr->addr_bytes[0], | |
6100 | mac_addr->addr_bytes[1], | |
6101 | mac_addr->addr_bytes[2], | |
6102 | mac_addr->addr_bytes[3], | |
6103 | mac_addr->addr_bytes[4], | |
6104 | mac_addr->addr_bytes[5], | |
6105 | diag); | |
6106 | } | |
6107 | } | |
6108 | ||
11fdf7f2 | 6109 | static int |
7c673cae FG |
6110 | ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) |
6111 | { | |
6112 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6113 | ||
6114 | hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); | |
7c673cae | 6115 | |
11fdf7f2 TL |
6116 | return 0; |
6117 | } | |
7c673cae | 6118 | |
11fdf7f2 | 6119 | int |
7c673cae FG |
6120 | ixgbe_syn_filter_set(struct rte_eth_dev *dev, |
6121 | struct rte_eth_syn_filter *filter, | |
6122 | bool add) | |
6123 | { | |
6124 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 TL |
6125 | struct ixgbe_filter_info *filter_info = |
6126 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6127 | uint32_t syn_info; | |
7c673cae FG |
6128 | uint32_t synqf; |
6129 | ||
6130 | if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) | |
6131 | return -EINVAL; | |
6132 | ||
11fdf7f2 | 6133 | syn_info = filter_info->syn_info; |
7c673cae FG |
6134 | |
6135 | if (add) { | |
11fdf7f2 | 6136 | if (syn_info & IXGBE_SYN_FILTER_ENABLE) |
7c673cae FG |
6137 | return -EINVAL; |
6138 | synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & | |
6139 | IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); | |
6140 | ||
6141 | if (filter->hig_pri) | |
6142 | synqf |= IXGBE_SYN_FILTER_SYNQFP; | |
6143 | else | |
6144 | synqf &= ~IXGBE_SYN_FILTER_SYNQFP; | |
6145 | } else { | |
11fdf7f2 TL |
6146 | synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); |
6147 | if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) | |
7c673cae FG |
6148 | return -ENOENT; |
6149 | synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); | |
6150 | } | |
11fdf7f2 TL |
6151 | |
6152 | filter_info->syn_info = synqf; | |
7c673cae FG |
6153 | IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); |
6154 | IXGBE_WRITE_FLUSH(hw); | |
6155 | return 0; | |
6156 | } | |
6157 | ||
6158 | static int | |
6159 | ixgbe_syn_filter_get(struct rte_eth_dev *dev, | |
6160 | struct rte_eth_syn_filter *filter) | |
6161 | { | |
6162 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6163 | uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); | |
6164 | ||
6165 | if (synqf & IXGBE_SYN_FILTER_ENABLE) { | |
6166 | filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; | |
6167 | filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); | |
6168 | return 0; | |
6169 | } | |
6170 | return -ENOENT; | |
6171 | } | |
6172 | ||
6173 | static int | |
6174 | ixgbe_syn_filter_handle(struct rte_eth_dev *dev, | |
6175 | enum rte_filter_op filter_op, | |
6176 | void *arg) | |
6177 | { | |
6178 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6179 | int ret; | |
6180 | ||
6181 | MAC_TYPE_FILTER_SUP(hw->mac.type); | |
6182 | ||
6183 | if (filter_op == RTE_ETH_FILTER_NOP) | |
6184 | return 0; | |
6185 | ||
6186 | if (arg == NULL) { | |
6187 | PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", | |
6188 | filter_op); | |
6189 | return -EINVAL; | |
6190 | } | |
6191 | ||
6192 | switch (filter_op) { | |
6193 | case RTE_ETH_FILTER_ADD: | |
6194 | ret = ixgbe_syn_filter_set(dev, | |
6195 | (struct rte_eth_syn_filter *)arg, | |
6196 | TRUE); | |
6197 | break; | |
6198 | case RTE_ETH_FILTER_DELETE: | |
6199 | ret = ixgbe_syn_filter_set(dev, | |
6200 | (struct rte_eth_syn_filter *)arg, | |
6201 | FALSE); | |
6202 | break; | |
6203 | case RTE_ETH_FILTER_GET: | |
6204 | ret = ixgbe_syn_filter_get(dev, | |
6205 | (struct rte_eth_syn_filter *)arg); | |
6206 | break; | |
6207 | default: | |
11fdf7f2 | 6208 | PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); |
7c673cae FG |
6209 | ret = -EINVAL; |
6210 | break; | |
6211 | } | |
6212 | ||
6213 | return ret; | |
6214 | } | |
6215 | ||
6216 | ||
6217 | static inline enum ixgbe_5tuple_protocol | |
6218 | convert_protocol_type(uint8_t protocol_value) | |
6219 | { | |
6220 | if (protocol_value == IPPROTO_TCP) | |
6221 | return IXGBE_FILTER_PROTOCOL_TCP; | |
6222 | else if (protocol_value == IPPROTO_UDP) | |
6223 | return IXGBE_FILTER_PROTOCOL_UDP; | |
6224 | else if (protocol_value == IPPROTO_SCTP) | |
6225 | return IXGBE_FILTER_PROTOCOL_SCTP; | |
6226 | else | |
6227 | return IXGBE_FILTER_PROTOCOL_NONE; | |
6228 | } | |
6229 | ||
11fdf7f2 TL |
6230 | /* inject a 5-tuple filter to HW */ |
6231 | static inline void | |
6232 | ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, | |
6233 | struct ixgbe_5tuple_filter *filter) | |
6234 | { | |
6235 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6236 | int i; | |
6237 | uint32_t ftqf, sdpqf; | |
6238 | uint32_t l34timir = 0; | |
6239 | uint8_t mask = 0xff; | |
6240 | ||
6241 | i = filter->index; | |
6242 | ||
6243 | sdpqf = (uint32_t)(filter->filter_info.dst_port << | |
6244 | IXGBE_SDPQF_DSTPORT_SHIFT); | |
6245 | sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); | |
6246 | ||
6247 | ftqf = (uint32_t)(filter->filter_info.proto & | |
6248 | IXGBE_FTQF_PROTOCOL_MASK); | |
6249 | ftqf |= (uint32_t)((filter->filter_info.priority & | |
6250 | IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); | |
6251 | if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ | |
6252 | mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; | |
6253 | if (filter->filter_info.dst_ip_mask == 0) | |
6254 | mask &= IXGBE_FTQF_DEST_ADDR_MASK; | |
6255 | if (filter->filter_info.src_port_mask == 0) | |
6256 | mask &= IXGBE_FTQF_SOURCE_PORT_MASK; | |
6257 | if (filter->filter_info.dst_port_mask == 0) | |
6258 | mask &= IXGBE_FTQF_DEST_PORT_MASK; | |
6259 | if (filter->filter_info.proto_mask == 0) | |
6260 | mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; | |
6261 | ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; | |
6262 | ftqf |= IXGBE_FTQF_POOL_MASK_EN; | |
6263 | ftqf |= IXGBE_FTQF_QUEUE_ENABLE; | |
6264 | ||
6265 | IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); | |
6266 | IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); | |
6267 | IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); | |
6268 | IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); | |
6269 | ||
6270 | l34timir |= IXGBE_L34T_IMIR_RESERVE; | |
6271 | l34timir |= (uint32_t)(filter->queue << | |
6272 | IXGBE_L34T_IMIR_QUEUE_SHIFT); | |
6273 | IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); | |
6274 | } | |
6275 | ||
7c673cae FG |
6276 | /* |
6277 | * add a 5tuple filter | |
6278 | * | |
6279 | * @param | |
6280 | * dev: Pointer to struct rte_eth_dev. | |
6281 | * index: the index the filter allocates. | |
6282 | * filter: ponter to the filter that will be added. | |
6283 | * rx_queue: the queue id the filter assigned to. | |
6284 | * | |
6285 | * @return | |
6286 | * - On success, zero. | |
6287 | * - On failure, a negative value. | |
6288 | */ | |
6289 | static int | |
6290 | ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, | |
6291 | struct ixgbe_5tuple_filter *filter) | |
6292 | { | |
7c673cae FG |
6293 | struct ixgbe_filter_info *filter_info = |
6294 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6295 | int i, idx, shift; | |
7c673cae FG |
6296 | |
6297 | /* | |
6298 | * look for an unused 5tuple filter index, | |
6299 | * and insert the filter to list. | |
6300 | */ | |
6301 | for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { | |
6302 | idx = i / (sizeof(uint32_t) * NBBY); | |
6303 | shift = i % (sizeof(uint32_t) * NBBY); | |
6304 | if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { | |
6305 | filter_info->fivetuple_mask[idx] |= 1 << shift; | |
6306 | filter->index = i; | |
6307 | TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, | |
6308 | filter, | |
6309 | entries); | |
6310 | break; | |
6311 | } | |
6312 | } | |
6313 | if (i >= IXGBE_MAX_FTQF_FILTERS) { | |
6314 | PMD_DRV_LOG(ERR, "5tuple filters are full."); | |
6315 | return -ENOSYS; | |
6316 | } | |
6317 | ||
11fdf7f2 | 6318 | ixgbe_inject_5tuple_filter(dev, filter); |
7c673cae | 6319 | |
7c673cae FG |
6320 | return 0; |
6321 | } | |
6322 | ||
6323 | /* | |
6324 | * remove a 5tuple filter | |
6325 | * | |
6326 | * @param | |
6327 | * dev: Pointer to struct rte_eth_dev. | |
6328 | * filter: the pointer of the filter will be removed. | |
6329 | */ | |
6330 | static void | |
6331 | ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, | |
6332 | struct ixgbe_5tuple_filter *filter) | |
6333 | { | |
6334 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6335 | struct ixgbe_filter_info *filter_info = | |
6336 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6337 | uint16_t index = filter->index; | |
6338 | ||
6339 | filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= | |
6340 | ~(1 << (index % (sizeof(uint32_t) * NBBY))); | |
6341 | TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); | |
6342 | rte_free(filter); | |
6343 | ||
6344 | IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); | |
6345 | IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); | |
6346 | IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); | |
6347 | IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); | |
6348 | IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); | |
6349 | } | |
6350 | ||
6351 | static int | |
6352 | ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) | |
6353 | { | |
6354 | struct ixgbe_hw *hw; | |
9f95a23c TL |
6355 | uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; |
6356 | struct rte_eth_dev_data *dev_data = dev->data; | |
7c673cae FG |
6357 | |
6358 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6359 | ||
6360 | if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) | |
6361 | return -EINVAL; | |
6362 | ||
9f95a23c TL |
6363 | /* If device is started, refuse mtu that requires the support of |
6364 | * scattered packets when this feature has not been enabled before. | |
7c673cae | 6365 | */ |
9f95a23c | 6366 | if (dev_data->dev_started && !dev_data->scattered_rx && |
7c673cae | 6367 | (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > |
9f95a23c TL |
6368 | dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { |
6369 | PMD_INIT_LOG(ERR, "Stop port first."); | |
7c673cae | 6370 | return -EINVAL; |
9f95a23c | 6371 | } |
7c673cae FG |
6372 | |
6373 | /* | |
6374 | * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU | |
6375 | * request of the version 2.0 of the mailbox API. | |
6376 | * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 | |
6377 | * of the mailbox API. | |
6378 | * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers | |
6379 | * prior to 3.11.33 which contains the following change: | |
6380 | * "ixgbe: Enable jumbo frames support w/ SR-IOV" | |
6381 | */ | |
6382 | ixgbevf_rlpml_set_vf(hw, max_frame); | |
6383 | ||
6384 | /* update max frame size */ | |
6385 | dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; | |
6386 | return 0; | |
6387 | } | |
6388 | ||
7c673cae FG |
6389 | static inline struct ixgbe_5tuple_filter * |
6390 | ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, | |
6391 | struct ixgbe_5tuple_filter_info *key) | |
6392 | { | |
6393 | struct ixgbe_5tuple_filter *it; | |
6394 | ||
6395 | TAILQ_FOREACH(it, filter_list, entries) { | |
6396 | if (memcmp(key, &it->filter_info, | |
6397 | sizeof(struct ixgbe_5tuple_filter_info)) == 0) { | |
6398 | return it; | |
6399 | } | |
6400 | } | |
6401 | return NULL; | |
6402 | } | |
6403 | ||
6404 | /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ | |
6405 | static inline int | |
6406 | ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, | |
6407 | struct ixgbe_5tuple_filter_info *filter_info) | |
6408 | { | |
6409 | if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || | |
6410 | filter->priority > IXGBE_5TUPLE_MAX_PRI || | |
6411 | filter->priority < IXGBE_5TUPLE_MIN_PRI) | |
6412 | return -EINVAL; | |
6413 | ||
6414 | switch (filter->dst_ip_mask) { | |
6415 | case UINT32_MAX: | |
6416 | filter_info->dst_ip_mask = 0; | |
6417 | filter_info->dst_ip = filter->dst_ip; | |
6418 | break; | |
6419 | case 0: | |
6420 | filter_info->dst_ip_mask = 1; | |
6421 | break; | |
6422 | default: | |
6423 | PMD_DRV_LOG(ERR, "invalid dst_ip mask."); | |
6424 | return -EINVAL; | |
6425 | } | |
6426 | ||
6427 | switch (filter->src_ip_mask) { | |
6428 | case UINT32_MAX: | |
6429 | filter_info->src_ip_mask = 0; | |
6430 | filter_info->src_ip = filter->src_ip; | |
6431 | break; | |
6432 | case 0: | |
6433 | filter_info->src_ip_mask = 1; | |
6434 | break; | |
6435 | default: | |
6436 | PMD_DRV_LOG(ERR, "invalid src_ip mask."); | |
6437 | return -EINVAL; | |
6438 | } | |
6439 | ||
6440 | switch (filter->dst_port_mask) { | |
6441 | case UINT16_MAX: | |
6442 | filter_info->dst_port_mask = 0; | |
6443 | filter_info->dst_port = filter->dst_port; | |
6444 | break; | |
6445 | case 0: | |
6446 | filter_info->dst_port_mask = 1; | |
6447 | break; | |
6448 | default: | |
6449 | PMD_DRV_LOG(ERR, "invalid dst_port mask."); | |
6450 | return -EINVAL; | |
6451 | } | |
6452 | ||
6453 | switch (filter->src_port_mask) { | |
6454 | case UINT16_MAX: | |
6455 | filter_info->src_port_mask = 0; | |
6456 | filter_info->src_port = filter->src_port; | |
6457 | break; | |
6458 | case 0: | |
6459 | filter_info->src_port_mask = 1; | |
6460 | break; | |
6461 | default: | |
6462 | PMD_DRV_LOG(ERR, "invalid src_port mask."); | |
6463 | return -EINVAL; | |
6464 | } | |
6465 | ||
6466 | switch (filter->proto_mask) { | |
6467 | case UINT8_MAX: | |
6468 | filter_info->proto_mask = 0; | |
6469 | filter_info->proto = | |
6470 | convert_protocol_type(filter->proto); | |
6471 | break; | |
6472 | case 0: | |
6473 | filter_info->proto_mask = 1; | |
6474 | break; | |
6475 | default: | |
6476 | PMD_DRV_LOG(ERR, "invalid protocol mask."); | |
6477 | return -EINVAL; | |
6478 | } | |
6479 | ||
6480 | filter_info->priority = (uint8_t)filter->priority; | |
6481 | return 0; | |
6482 | } | |
6483 | ||
6484 | /* | |
6485 | * add or delete a ntuple filter | |
6486 | * | |
6487 | * @param | |
6488 | * dev: Pointer to struct rte_eth_dev. | |
6489 | * ntuple_filter: Pointer to struct rte_eth_ntuple_filter | |
6490 | * add: if true, add filter, if false, remove filter | |
6491 | * | |
6492 | * @return | |
6493 | * - On success, zero. | |
6494 | * - On failure, a negative value. | |
6495 | */ | |
11fdf7f2 | 6496 | int |
7c673cae FG |
6497 | ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, |
6498 | struct rte_eth_ntuple_filter *ntuple_filter, | |
6499 | bool add) | |
6500 | { | |
6501 | struct ixgbe_filter_info *filter_info = | |
6502 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6503 | struct ixgbe_5tuple_filter_info filter_5tuple; | |
6504 | struct ixgbe_5tuple_filter *filter; | |
6505 | int ret; | |
6506 | ||
6507 | if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { | |
6508 | PMD_DRV_LOG(ERR, "only 5tuple is supported."); | |
6509 | return -EINVAL; | |
6510 | } | |
6511 | ||
6512 | memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); | |
6513 | ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); | |
6514 | if (ret < 0) | |
6515 | return ret; | |
6516 | ||
6517 | filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, | |
6518 | &filter_5tuple); | |
6519 | if (filter != NULL && add) { | |
6520 | PMD_DRV_LOG(ERR, "filter exists."); | |
6521 | return -EEXIST; | |
6522 | } | |
6523 | if (filter == NULL && !add) { | |
6524 | PMD_DRV_LOG(ERR, "filter doesn't exist."); | |
6525 | return -ENOENT; | |
6526 | } | |
6527 | ||
6528 | if (add) { | |
6529 | filter = rte_zmalloc("ixgbe_5tuple_filter", | |
6530 | sizeof(struct ixgbe_5tuple_filter), 0); | |
6531 | if (filter == NULL) | |
6532 | return -ENOMEM; | |
11fdf7f2 | 6533 | rte_memcpy(&filter->filter_info, |
7c673cae FG |
6534 | &filter_5tuple, |
6535 | sizeof(struct ixgbe_5tuple_filter_info)); | |
6536 | filter->queue = ntuple_filter->queue; | |
6537 | ret = ixgbe_add_5tuple_filter(dev, filter); | |
6538 | if (ret < 0) { | |
6539 | rte_free(filter); | |
6540 | return ret; | |
6541 | } | |
6542 | } else | |
6543 | ixgbe_remove_5tuple_filter(dev, filter); | |
6544 | ||
6545 | return 0; | |
6546 | } | |
6547 | ||
6548 | /* | |
6549 | * get a ntuple filter | |
6550 | * | |
6551 | * @param | |
6552 | * dev: Pointer to struct rte_eth_dev. | |
6553 | * ntuple_filter: Pointer to struct rte_eth_ntuple_filter | |
6554 | * | |
6555 | * @return | |
6556 | * - On success, zero. | |
6557 | * - On failure, a negative value. | |
6558 | */ | |
6559 | static int | |
6560 | ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, | |
6561 | struct rte_eth_ntuple_filter *ntuple_filter) | |
6562 | { | |
6563 | struct ixgbe_filter_info *filter_info = | |
6564 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6565 | struct ixgbe_5tuple_filter_info filter_5tuple; | |
6566 | struct ixgbe_5tuple_filter *filter; | |
6567 | int ret; | |
6568 | ||
6569 | if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { | |
6570 | PMD_DRV_LOG(ERR, "only 5tuple is supported."); | |
6571 | return -EINVAL; | |
6572 | } | |
6573 | ||
6574 | memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); | |
6575 | ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); | |
6576 | if (ret < 0) | |
6577 | return ret; | |
6578 | ||
6579 | filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, | |
6580 | &filter_5tuple); | |
6581 | if (filter == NULL) { | |
6582 | PMD_DRV_LOG(ERR, "filter doesn't exist."); | |
6583 | return -ENOENT; | |
6584 | } | |
6585 | ntuple_filter->queue = filter->queue; | |
6586 | return 0; | |
6587 | } | |
6588 | ||
6589 | /* | |
6590 | * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. | |
6591 | * @dev: pointer to rte_eth_dev structure | |
6592 | * @filter_op:operation will be taken. | |
6593 | * @arg: a pointer to specific structure corresponding to the filter_op | |
6594 | * | |
6595 | * @return | |
6596 | * - On success, zero. | |
6597 | * - On failure, a negative value. | |
6598 | */ | |
6599 | static int | |
6600 | ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, | |
6601 | enum rte_filter_op filter_op, | |
6602 | void *arg) | |
6603 | { | |
6604 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6605 | int ret; | |
6606 | ||
6607 | MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); | |
6608 | ||
6609 | if (filter_op == RTE_ETH_FILTER_NOP) | |
6610 | return 0; | |
6611 | ||
6612 | if (arg == NULL) { | |
6613 | PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", | |
6614 | filter_op); | |
6615 | return -EINVAL; | |
6616 | } | |
6617 | ||
6618 | switch (filter_op) { | |
6619 | case RTE_ETH_FILTER_ADD: | |
6620 | ret = ixgbe_add_del_ntuple_filter(dev, | |
6621 | (struct rte_eth_ntuple_filter *)arg, | |
6622 | TRUE); | |
6623 | break; | |
6624 | case RTE_ETH_FILTER_DELETE: | |
6625 | ret = ixgbe_add_del_ntuple_filter(dev, | |
6626 | (struct rte_eth_ntuple_filter *)arg, | |
6627 | FALSE); | |
6628 | break; | |
6629 | case RTE_ETH_FILTER_GET: | |
6630 | ret = ixgbe_get_ntuple_filter(dev, | |
6631 | (struct rte_eth_ntuple_filter *)arg); | |
6632 | break; | |
6633 | default: | |
6634 | PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); | |
6635 | ret = -EINVAL; | |
11fdf7f2 | 6636 | break; |
7c673cae | 6637 | } |
11fdf7f2 | 6638 | return ret; |
7c673cae FG |
6639 | } |
6640 | ||
11fdf7f2 | 6641 | int |
7c673cae FG |
6642 | ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, |
6643 | struct rte_eth_ethertype_filter *filter, | |
6644 | bool add) | |
6645 | { | |
6646 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6647 | struct ixgbe_filter_info *filter_info = | |
6648 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6649 | uint32_t etqf = 0; | |
6650 | uint32_t etqs = 0; | |
6651 | int ret; | |
11fdf7f2 | 6652 | struct ixgbe_ethertype_filter ethertype_filter; |
7c673cae FG |
6653 | |
6654 | if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) | |
6655 | return -EINVAL; | |
6656 | ||
6657 | if (filter->ether_type == ETHER_TYPE_IPv4 || | |
6658 | filter->ether_type == ETHER_TYPE_IPv6) { | |
6659 | PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" | |
6660 | " ethertype filter.", filter->ether_type); | |
6661 | return -EINVAL; | |
6662 | } | |
6663 | ||
6664 | if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { | |
6665 | PMD_DRV_LOG(ERR, "mac compare is unsupported."); | |
6666 | return -EINVAL; | |
6667 | } | |
6668 | if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { | |
6669 | PMD_DRV_LOG(ERR, "drop option is unsupported."); | |
6670 | return -EINVAL; | |
6671 | } | |
6672 | ||
6673 | ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); | |
6674 | if (ret >= 0 && add) { | |
6675 | PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", | |
6676 | filter->ether_type); | |
6677 | return -EEXIST; | |
6678 | } | |
6679 | if (ret < 0 && !add) { | |
6680 | PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", | |
6681 | filter->ether_type); | |
6682 | return -ENOENT; | |
6683 | } | |
6684 | ||
6685 | if (add) { | |
7c673cae FG |
6686 | etqf = IXGBE_ETQF_FILTER_EN; |
6687 | etqf |= (uint32_t)filter->ether_type; | |
6688 | etqs |= (uint32_t)((filter->queue << | |
6689 | IXGBE_ETQS_RX_QUEUE_SHIFT) & | |
6690 | IXGBE_ETQS_RX_QUEUE); | |
6691 | etqs |= IXGBE_ETQS_QUEUE_EN; | |
11fdf7f2 TL |
6692 | |
6693 | ethertype_filter.ethertype = filter->ether_type; | |
6694 | ethertype_filter.etqf = etqf; | |
6695 | ethertype_filter.etqs = etqs; | |
6696 | ethertype_filter.conf = FALSE; | |
6697 | ret = ixgbe_ethertype_filter_insert(filter_info, | |
6698 | ðertype_filter); | |
6699 | if (ret < 0) { | |
6700 | PMD_DRV_LOG(ERR, "ethertype filters are full."); | |
6701 | return -ENOSPC; | |
6702 | } | |
7c673cae FG |
6703 | } else { |
6704 | ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); | |
6705 | if (ret < 0) | |
6706 | return -ENOSYS; | |
6707 | } | |
6708 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); | |
6709 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); | |
6710 | IXGBE_WRITE_FLUSH(hw); | |
6711 | ||
6712 | return 0; | |
6713 | } | |
6714 | ||
6715 | static int | |
6716 | ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, | |
6717 | struct rte_eth_ethertype_filter *filter) | |
6718 | { | |
6719 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6720 | struct ixgbe_filter_info *filter_info = | |
6721 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
6722 | uint32_t etqf, etqs; | |
6723 | int ret; | |
6724 | ||
6725 | ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); | |
6726 | if (ret < 0) { | |
6727 | PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", | |
6728 | filter->ether_type); | |
6729 | return -ENOENT; | |
6730 | } | |
6731 | ||
6732 | etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); | |
6733 | if (etqf & IXGBE_ETQF_FILTER_EN) { | |
6734 | etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); | |
6735 | filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; | |
6736 | filter->flags = 0; | |
6737 | filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> | |
6738 | IXGBE_ETQS_RX_QUEUE_SHIFT; | |
6739 | return 0; | |
6740 | } | |
6741 | return -ENOENT; | |
6742 | } | |
6743 | ||
6744 | /* | |
6745 | * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. | |
6746 | * @dev: pointer to rte_eth_dev structure | |
6747 | * @filter_op:operation will be taken. | |
6748 | * @arg: a pointer to specific structure corresponding to the filter_op | |
6749 | */ | |
6750 | static int | |
6751 | ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, | |
6752 | enum rte_filter_op filter_op, | |
6753 | void *arg) | |
6754 | { | |
6755 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6756 | int ret; | |
6757 | ||
6758 | MAC_TYPE_FILTER_SUP(hw->mac.type); | |
6759 | ||
6760 | if (filter_op == RTE_ETH_FILTER_NOP) | |
6761 | return 0; | |
6762 | ||
6763 | if (arg == NULL) { | |
6764 | PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", | |
6765 | filter_op); | |
6766 | return -EINVAL; | |
6767 | } | |
6768 | ||
6769 | switch (filter_op) { | |
6770 | case RTE_ETH_FILTER_ADD: | |
6771 | ret = ixgbe_add_del_ethertype_filter(dev, | |
6772 | (struct rte_eth_ethertype_filter *)arg, | |
6773 | TRUE); | |
6774 | break; | |
6775 | case RTE_ETH_FILTER_DELETE: | |
6776 | ret = ixgbe_add_del_ethertype_filter(dev, | |
6777 | (struct rte_eth_ethertype_filter *)arg, | |
6778 | FALSE); | |
6779 | break; | |
6780 | case RTE_ETH_FILTER_GET: | |
6781 | ret = ixgbe_get_ethertype_filter(dev, | |
6782 | (struct rte_eth_ethertype_filter *)arg); | |
6783 | break; | |
6784 | default: | |
6785 | PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); | |
6786 | ret = -EINVAL; | |
6787 | break; | |
6788 | } | |
6789 | return ret; | |
6790 | } | |
6791 | ||
6792 | static int | |
6793 | ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, | |
6794 | enum rte_filter_type filter_type, | |
6795 | enum rte_filter_op filter_op, | |
6796 | void *arg) | |
6797 | { | |
11fdf7f2 | 6798 | int ret = 0; |
7c673cae FG |
6799 | |
6800 | switch (filter_type) { | |
6801 | case RTE_ETH_FILTER_NTUPLE: | |
6802 | ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); | |
6803 | break; | |
6804 | case RTE_ETH_FILTER_ETHERTYPE: | |
6805 | ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); | |
6806 | break; | |
6807 | case RTE_ETH_FILTER_SYN: | |
6808 | ret = ixgbe_syn_filter_handle(dev, filter_op, arg); | |
6809 | break; | |
6810 | case RTE_ETH_FILTER_FDIR: | |
6811 | ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); | |
6812 | break; | |
6813 | case RTE_ETH_FILTER_L2_TUNNEL: | |
6814 | ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); | |
6815 | break; | |
11fdf7f2 TL |
6816 | case RTE_ETH_FILTER_GENERIC: |
6817 | if (filter_op != RTE_ETH_FILTER_GET) | |
6818 | return -EINVAL; | |
6819 | *(const void **)arg = &ixgbe_flow_ops; | |
6820 | break; | |
7c673cae FG |
6821 | default: |
6822 | PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", | |
6823 | filter_type); | |
11fdf7f2 | 6824 | ret = -EINVAL; |
7c673cae FG |
6825 | break; |
6826 | } | |
6827 | ||
6828 | return ret; | |
6829 | } | |
6830 | ||
6831 | static u8 * | |
6832 | ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, | |
6833 | u8 **mc_addr_ptr, u32 *vmdq) | |
6834 | { | |
6835 | u8 *mc_addr; | |
6836 | ||
6837 | *vmdq = 0; | |
6838 | mc_addr = *mc_addr_ptr; | |
6839 | *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); | |
6840 | return mc_addr; | |
6841 | } | |
6842 | ||
6843 | static int | |
6844 | ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, | |
6845 | struct ether_addr *mc_addr_set, | |
6846 | uint32_t nb_mc_addr) | |
6847 | { | |
6848 | struct ixgbe_hw *hw; | |
6849 | u8 *mc_addr_list; | |
6850 | ||
6851 | hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6852 | mc_addr_list = (u8 *)mc_addr_set; | |
6853 | return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, | |
6854 | ixgbe_dev_addr_list_itr, TRUE); | |
6855 | } | |
6856 | ||
6857 | static uint64_t | |
6858 | ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) | |
6859 | { | |
6860 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6861 | uint64_t systime_cycles; | |
6862 | ||
6863 | switch (hw->mac.type) { | |
6864 | case ixgbe_mac_X550: | |
6865 | case ixgbe_mac_X550EM_x: | |
6866 | case ixgbe_mac_X550EM_a: | |
6867 | /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ | |
6868 | systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); | |
6869 | systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) | |
6870 | * NSEC_PER_SEC; | |
6871 | break; | |
6872 | default: | |
6873 | systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); | |
6874 | systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) | |
6875 | << 32; | |
6876 | } | |
6877 | ||
6878 | return systime_cycles; | |
6879 | } | |
6880 | ||
6881 | static uint64_t | |
6882 | ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) | |
6883 | { | |
6884 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6885 | uint64_t rx_tstamp_cycles; | |
6886 | ||
6887 | switch (hw->mac.type) { | |
6888 | case ixgbe_mac_X550: | |
6889 | case ixgbe_mac_X550EM_x: | |
6890 | case ixgbe_mac_X550EM_a: | |
6891 | /* RXSTMPL stores ns and RXSTMPH stores seconds. */ | |
6892 | rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); | |
6893 | rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) | |
6894 | * NSEC_PER_SEC; | |
6895 | break; | |
6896 | default: | |
6897 | /* RXSTMPL stores ns and RXSTMPH stores seconds. */ | |
6898 | rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); | |
6899 | rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) | |
6900 | << 32; | |
6901 | } | |
6902 | ||
6903 | return rx_tstamp_cycles; | |
6904 | } | |
6905 | ||
6906 | static uint64_t | |
6907 | ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) | |
6908 | { | |
6909 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6910 | uint64_t tx_tstamp_cycles; | |
6911 | ||
6912 | switch (hw->mac.type) { | |
6913 | case ixgbe_mac_X550: | |
6914 | case ixgbe_mac_X550EM_x: | |
6915 | case ixgbe_mac_X550EM_a: | |
6916 | /* TXSTMPL stores ns and TXSTMPH stores seconds. */ | |
6917 | tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); | |
6918 | tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) | |
6919 | * NSEC_PER_SEC; | |
6920 | break; | |
6921 | default: | |
6922 | /* TXSTMPL stores ns and TXSTMPH stores seconds. */ | |
6923 | tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); | |
6924 | tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) | |
6925 | << 32; | |
6926 | } | |
6927 | ||
6928 | return tx_tstamp_cycles; | |
6929 | } | |
6930 | ||
6931 | static void | |
6932 | ixgbe_start_timecounters(struct rte_eth_dev *dev) | |
6933 | { | |
6934 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
6935 | struct ixgbe_adapter *adapter = | |
6936 | (struct ixgbe_adapter *)dev->data->dev_private; | |
6937 | struct rte_eth_link link; | |
6938 | uint32_t incval = 0; | |
6939 | uint32_t shift = 0; | |
6940 | ||
6941 | /* Get current link speed. */ | |
7c673cae | 6942 | ixgbe_dev_link_update(dev, 1); |
11fdf7f2 | 6943 | rte_eth_linkstatus_get(dev, &link); |
7c673cae FG |
6944 | |
6945 | switch (link.link_speed) { | |
6946 | case ETH_SPEED_NUM_100M: | |
6947 | incval = IXGBE_INCVAL_100; | |
6948 | shift = IXGBE_INCVAL_SHIFT_100; | |
6949 | break; | |
6950 | case ETH_SPEED_NUM_1G: | |
6951 | incval = IXGBE_INCVAL_1GB; | |
6952 | shift = IXGBE_INCVAL_SHIFT_1GB; | |
6953 | break; | |
6954 | case ETH_SPEED_NUM_10G: | |
6955 | default: | |
6956 | incval = IXGBE_INCVAL_10GB; | |
6957 | shift = IXGBE_INCVAL_SHIFT_10GB; | |
6958 | break; | |
6959 | } | |
6960 | ||
6961 | switch (hw->mac.type) { | |
6962 | case ixgbe_mac_X550: | |
6963 | case ixgbe_mac_X550EM_x: | |
6964 | case ixgbe_mac_X550EM_a: | |
6965 | /* Independent of link speed. */ | |
6966 | incval = 1; | |
6967 | /* Cycles read will be interpreted as ns. */ | |
6968 | shift = 0; | |
6969 | /* Fall-through */ | |
6970 | case ixgbe_mac_X540: | |
6971 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); | |
6972 | break; | |
6973 | case ixgbe_mac_82599EB: | |
6974 | incval >>= IXGBE_INCVAL_SHIFT_82599; | |
6975 | shift -= IXGBE_INCVAL_SHIFT_82599; | |
6976 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, | |
6977 | (1 << IXGBE_INCPER_SHIFT_82599) | incval); | |
6978 | break; | |
6979 | default: | |
6980 | /* Not supported. */ | |
6981 | return; | |
6982 | } | |
6983 | ||
6984 | memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); | |
6985 | memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); | |
6986 | memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); | |
6987 | ||
6988 | adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; | |
6989 | adapter->systime_tc.cc_shift = shift; | |
6990 | adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; | |
6991 | ||
6992 | adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; | |
6993 | adapter->rx_tstamp_tc.cc_shift = shift; | |
6994 | adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; | |
6995 | ||
6996 | adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; | |
6997 | adapter->tx_tstamp_tc.cc_shift = shift; | |
6998 | adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; | |
6999 | } | |
7000 | ||
7001 | static int | |
7002 | ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) | |
7003 | { | |
7004 | struct ixgbe_adapter *adapter = | |
7005 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7006 | ||
7007 | adapter->systime_tc.nsec += delta; | |
7008 | adapter->rx_tstamp_tc.nsec += delta; | |
7009 | adapter->tx_tstamp_tc.nsec += delta; | |
7010 | ||
7011 | return 0; | |
7012 | } | |
7013 | ||
7014 | static int | |
7015 | ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) | |
7016 | { | |
7017 | uint64_t ns; | |
7018 | struct ixgbe_adapter *adapter = | |
7019 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7020 | ||
7021 | ns = rte_timespec_to_ns(ts); | |
7022 | /* Set the timecounters to a new value. */ | |
7023 | adapter->systime_tc.nsec = ns; | |
7024 | adapter->rx_tstamp_tc.nsec = ns; | |
7025 | adapter->tx_tstamp_tc.nsec = ns; | |
7026 | ||
7027 | return 0; | |
7028 | } | |
7029 | ||
7030 | static int | |
7031 | ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) | |
7032 | { | |
7033 | uint64_t ns, systime_cycles; | |
7034 | struct ixgbe_adapter *adapter = | |
7035 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7036 | ||
7037 | systime_cycles = ixgbe_read_systime_cyclecounter(dev); | |
7038 | ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); | |
7039 | *ts = rte_ns_to_timespec(ns); | |
7040 | ||
7041 | return 0; | |
7042 | } | |
7043 | ||
7044 | static int | |
7045 | ixgbe_timesync_enable(struct rte_eth_dev *dev) | |
7046 | { | |
7047 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7048 | uint32_t tsync_ctl; | |
7049 | uint32_t tsauxc; | |
7050 | ||
7051 | /* Stop the timesync system time. */ | |
7052 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); | |
7053 | /* Reset the timesync system time value. */ | |
7054 | IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); | |
7055 | IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); | |
7056 | ||
7057 | /* Enable system time for platforms where it isn't on by default. */ | |
7058 | tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); | |
7059 | tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; | |
7060 | IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); | |
7061 | ||
7062 | ixgbe_start_timecounters(dev); | |
7063 | ||
7064 | /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ | |
7065 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), | |
7066 | (ETHER_TYPE_1588 | | |
7067 | IXGBE_ETQF_FILTER_EN | | |
7068 | IXGBE_ETQF_1588)); | |
7069 | ||
7070 | /* Enable timestamping of received PTP packets. */ | |
7071 | tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); | |
7072 | tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; | |
7073 | IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); | |
7074 | ||
7075 | /* Enable timestamping of transmitted PTP packets. */ | |
7076 | tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); | |
7077 | tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; | |
7078 | IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); | |
7079 | ||
7080 | IXGBE_WRITE_FLUSH(hw); | |
7081 | ||
7082 | return 0; | |
7083 | } | |
7084 | ||
7085 | static int | |
7086 | ixgbe_timesync_disable(struct rte_eth_dev *dev) | |
7087 | { | |
7088 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7089 | uint32_t tsync_ctl; | |
7090 | ||
7091 | /* Disable timestamping of transmitted PTP packets. */ | |
7092 | tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); | |
7093 | tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; | |
7094 | IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); | |
7095 | ||
7096 | /* Disable timestamping of received PTP packets. */ | |
7097 | tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); | |
7098 | tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; | |
7099 | IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); | |
7100 | ||
7101 | /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ | |
7102 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); | |
7103 | ||
7104 | /* Stop incrementating the System Time registers. */ | |
7105 | IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); | |
7106 | ||
7107 | return 0; | |
7108 | } | |
7109 | ||
7110 | static int | |
7111 | ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, | |
7112 | struct timespec *timestamp, | |
7113 | uint32_t flags __rte_unused) | |
7114 | { | |
7115 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7116 | struct ixgbe_adapter *adapter = | |
7117 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7118 | uint32_t tsync_rxctl; | |
7119 | uint64_t rx_tstamp_cycles; | |
7120 | uint64_t ns; | |
7121 | ||
7122 | tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); | |
7123 | if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) | |
7124 | return -EINVAL; | |
7125 | ||
7126 | rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); | |
7127 | ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); | |
7128 | *timestamp = rte_ns_to_timespec(ns); | |
7129 | ||
7130 | return 0; | |
7131 | } | |
7132 | ||
7133 | static int | |
7134 | ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, | |
7135 | struct timespec *timestamp) | |
7136 | { | |
7137 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7138 | struct ixgbe_adapter *adapter = | |
7139 | (struct ixgbe_adapter *)dev->data->dev_private; | |
7140 | uint32_t tsync_txctl; | |
7141 | uint64_t tx_tstamp_cycles; | |
7142 | uint64_t ns; | |
7143 | ||
7144 | tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); | |
7145 | if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) | |
7146 | return -EINVAL; | |
7147 | ||
7148 | tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); | |
7149 | ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); | |
7150 | *timestamp = rte_ns_to_timespec(ns); | |
7151 | ||
7152 | return 0; | |
7153 | } | |
7154 | ||
7155 | static int | |
7156 | ixgbe_get_reg_length(struct rte_eth_dev *dev) | |
7157 | { | |
7158 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7159 | int count = 0; | |
7160 | int g_ind = 0; | |
7161 | const struct reg_info *reg_group; | |
7162 | const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? | |
7163 | ixgbe_regs_mac_82598EB : ixgbe_regs_others; | |
7164 | ||
7165 | while ((reg_group = reg_set[g_ind++])) | |
7166 | count += ixgbe_regs_group_count(reg_group); | |
7167 | ||
7168 | return count; | |
7169 | } | |
7170 | ||
7171 | static int | |
7172 | ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) | |
7173 | { | |
7174 | int count = 0; | |
7175 | int g_ind = 0; | |
7176 | const struct reg_info *reg_group; | |
7177 | ||
7178 | while ((reg_group = ixgbevf_regs[g_ind++])) | |
7179 | count += ixgbe_regs_group_count(reg_group); | |
7180 | ||
7181 | return count; | |
7182 | } | |
7183 | ||
7184 | static int | |
7185 | ixgbe_get_regs(struct rte_eth_dev *dev, | |
7186 | struct rte_dev_reg_info *regs) | |
7187 | { | |
7188 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7189 | uint32_t *data = regs->data; | |
7190 | int g_ind = 0; | |
7191 | int count = 0; | |
7192 | const struct reg_info *reg_group; | |
7193 | const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? | |
7194 | ixgbe_regs_mac_82598EB : ixgbe_regs_others; | |
7195 | ||
7196 | if (data == NULL) { | |
7197 | regs->length = ixgbe_get_reg_length(dev); | |
7198 | regs->width = sizeof(uint32_t); | |
7199 | return 0; | |
7200 | } | |
7201 | ||
7202 | /* Support only full register dump */ | |
7203 | if ((regs->length == 0) || | |
7204 | (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { | |
7205 | regs->version = hw->mac.type << 24 | hw->revision_id << 16 | | |
7206 | hw->device_id; | |
7207 | while ((reg_group = reg_set[g_ind++])) | |
7208 | count += ixgbe_read_regs_group(dev, &data[count], | |
7209 | reg_group); | |
7210 | return 0; | |
7211 | } | |
7212 | ||
7213 | return -ENOTSUP; | |
7214 | } | |
7215 | ||
7216 | static int | |
7217 | ixgbevf_get_regs(struct rte_eth_dev *dev, | |
7218 | struct rte_dev_reg_info *regs) | |
7219 | { | |
7220 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7221 | uint32_t *data = regs->data; | |
7222 | int g_ind = 0; | |
7223 | int count = 0; | |
7224 | const struct reg_info *reg_group; | |
7225 | ||
7226 | if (data == NULL) { | |
7227 | regs->length = ixgbevf_get_reg_length(dev); | |
7228 | regs->width = sizeof(uint32_t); | |
7229 | return 0; | |
7230 | } | |
7231 | ||
7232 | /* Support only full register dump */ | |
7233 | if ((regs->length == 0) || | |
7234 | (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { | |
7235 | regs->version = hw->mac.type << 24 | hw->revision_id << 16 | | |
7236 | hw->device_id; | |
7237 | while ((reg_group = ixgbevf_regs[g_ind++])) | |
7238 | count += ixgbe_read_regs_group(dev, &data[count], | |
7239 | reg_group); | |
7240 | return 0; | |
7241 | } | |
7242 | ||
7243 | return -ENOTSUP; | |
7244 | } | |
7245 | ||
7246 | static int | |
7247 | ixgbe_get_eeprom_length(struct rte_eth_dev *dev) | |
7248 | { | |
7249 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7250 | ||
7251 | /* Return unit is byte count */ | |
7252 | return hw->eeprom.word_size * 2; | |
7253 | } | |
7254 | ||
7255 | static int | |
7256 | ixgbe_get_eeprom(struct rte_eth_dev *dev, | |
7257 | struct rte_dev_eeprom_info *in_eeprom) | |
7258 | { | |
7259 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7260 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; | |
7261 | uint16_t *data = in_eeprom->data; | |
7262 | int first, length; | |
7263 | ||
7264 | first = in_eeprom->offset >> 1; | |
7265 | length = in_eeprom->length >> 1; | |
7266 | if ((first > hw->eeprom.word_size) || | |
7267 | ((first + length) > hw->eeprom.word_size)) | |
7268 | return -EINVAL; | |
7269 | ||
7270 | in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); | |
7271 | ||
7272 | return eeprom->ops.read_buffer(hw, first, length, data); | |
7273 | } | |
7274 | ||
7275 | static int | |
7276 | ixgbe_set_eeprom(struct rte_eth_dev *dev, | |
7277 | struct rte_dev_eeprom_info *in_eeprom) | |
7278 | { | |
7279 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7280 | struct ixgbe_eeprom_info *eeprom = &hw->eeprom; | |
7281 | uint16_t *data = in_eeprom->data; | |
7282 | int first, length; | |
7283 | ||
7284 | first = in_eeprom->offset >> 1; | |
7285 | length = in_eeprom->length >> 1; | |
7286 | if ((first > hw->eeprom.word_size) || | |
7287 | ((first + length) > hw->eeprom.word_size)) | |
7288 | return -EINVAL; | |
7289 | ||
7290 | in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); | |
7291 | ||
7292 | return eeprom->ops.write_buffer(hw, first, length, data); | |
7293 | } | |
7294 | ||
11fdf7f2 TL |
7295 | static int |
7296 | ixgbe_get_module_info(struct rte_eth_dev *dev, | |
7297 | struct rte_eth_dev_module_info *modinfo) | |
7298 | { | |
7299 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7300 | uint32_t status; | |
7301 | uint8_t sff8472_rev, addr_mode; | |
7302 | bool page_swap = false; | |
7303 | ||
7304 | /* Check whether we support SFF-8472 or not */ | |
7305 | status = hw->phy.ops.read_i2c_eeprom(hw, | |
7306 | IXGBE_SFF_SFF_8472_COMP, | |
7307 | &sff8472_rev); | |
7308 | if (status != 0) | |
7309 | return -EIO; | |
7310 | ||
7311 | /* addressing mode is not supported */ | |
7312 | status = hw->phy.ops.read_i2c_eeprom(hw, | |
7313 | IXGBE_SFF_SFF_8472_SWAP, | |
7314 | &addr_mode); | |
7315 | if (status != 0) | |
7316 | return -EIO; | |
7317 | ||
7318 | if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { | |
7319 | PMD_DRV_LOG(ERR, | |
7320 | "Address change required to access page 0xA2, " | |
7321 | "but not supported. Please report the module " | |
7322 | "type to the driver maintainers."); | |
7323 | page_swap = true; | |
7324 | } | |
7325 | ||
7326 | if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { | |
7327 | /* We have a SFP, but it does not support SFF-8472 */ | |
7328 | modinfo->type = RTE_ETH_MODULE_SFF_8079; | |
7329 | modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; | |
7330 | } else { | |
7331 | /* We have a SFP which supports a revision of SFF-8472. */ | |
7332 | modinfo->type = RTE_ETH_MODULE_SFF_8472; | |
7333 | modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; | |
7334 | } | |
7335 | ||
7336 | return 0; | |
7337 | } | |
7338 | ||
7339 | static int | |
7340 | ixgbe_get_module_eeprom(struct rte_eth_dev *dev, | |
7341 | struct rte_dev_eeprom_info *info) | |
7342 | { | |
7343 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7344 | uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; | |
7345 | uint8_t databyte = 0xFF; | |
7346 | uint8_t *data = info->data; | |
7347 | uint32_t i = 0; | |
7348 | ||
7349 | if (info->length == 0) | |
7350 | return -EINVAL; | |
7351 | ||
7352 | for (i = info->offset; i < info->offset + info->length; i++) { | |
7353 | if (i < RTE_ETH_MODULE_SFF_8079_LEN) | |
7354 | status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); | |
7355 | else | |
7356 | status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); | |
7357 | ||
7358 | if (status != 0) | |
7359 | return -EIO; | |
7360 | ||
7361 | data[i - info->offset] = databyte; | |
7362 | } | |
7363 | ||
7364 | return 0; | |
7365 | } | |
7366 | ||
7c673cae FG |
7367 | uint16_t |
7368 | ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { | |
7369 | switch (mac_type) { | |
7370 | case ixgbe_mac_X550: | |
7371 | case ixgbe_mac_X550EM_x: | |
7372 | case ixgbe_mac_X550EM_a: | |
7373 | return ETH_RSS_RETA_SIZE_512; | |
7374 | case ixgbe_mac_X550_vf: | |
7375 | case ixgbe_mac_X550EM_x_vf: | |
7376 | case ixgbe_mac_X550EM_a_vf: | |
7377 | return ETH_RSS_RETA_SIZE_64; | |
7378 | default: | |
7379 | return ETH_RSS_RETA_SIZE_128; | |
7380 | } | |
7381 | } | |
7382 | ||
7383 | uint32_t | |
7384 | ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { | |
7385 | switch (mac_type) { | |
7386 | case ixgbe_mac_X550: | |
7387 | case ixgbe_mac_X550EM_x: | |
7388 | case ixgbe_mac_X550EM_a: | |
7389 | if (reta_idx < ETH_RSS_RETA_SIZE_128) | |
7390 | return IXGBE_RETA(reta_idx >> 2); | |
7391 | else | |
7392 | return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); | |
7393 | case ixgbe_mac_X550_vf: | |
7394 | case ixgbe_mac_X550EM_x_vf: | |
7395 | case ixgbe_mac_X550EM_a_vf: | |
7396 | return IXGBE_VFRETA(reta_idx >> 2); | |
7397 | default: | |
7398 | return IXGBE_RETA(reta_idx >> 2); | |
7399 | } | |
7400 | } | |
7401 | ||
7402 | uint32_t | |
7403 | ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { | |
7404 | switch (mac_type) { | |
7405 | case ixgbe_mac_X550_vf: | |
7406 | case ixgbe_mac_X550EM_x_vf: | |
7407 | case ixgbe_mac_X550EM_a_vf: | |
7408 | return IXGBE_VFMRQC; | |
7409 | default: | |
7410 | return IXGBE_MRQC; | |
7411 | } | |
7412 | } | |
7413 | ||
7414 | uint32_t | |
7415 | ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { | |
7416 | switch (mac_type) { | |
7417 | case ixgbe_mac_X550_vf: | |
7418 | case ixgbe_mac_X550EM_x_vf: | |
7419 | case ixgbe_mac_X550EM_a_vf: | |
7420 | return IXGBE_VFRSSRK(i); | |
7421 | default: | |
7422 | return IXGBE_RSSRK(i); | |
7423 | } | |
7424 | } | |
7425 | ||
7426 | bool | |
7427 | ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { | |
7428 | switch (mac_type) { | |
7429 | case ixgbe_mac_82599_vf: | |
7430 | case ixgbe_mac_X540_vf: | |
7431 | return 0; | |
7432 | default: | |
7433 | return 1; | |
7434 | } | |
7435 | } | |
7436 | ||
7437 | static int | |
7438 | ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, | |
7439 | struct rte_eth_dcb_info *dcb_info) | |
7440 | { | |
7441 | struct ixgbe_dcb_config *dcb_config = | |
7442 | IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); | |
7443 | struct ixgbe_dcb_tc_config *tc; | |
11fdf7f2 TL |
7444 | struct rte_eth_dcb_tc_queue_mapping *tc_queue; |
7445 | uint8_t nb_tcs; | |
7c673cae FG |
7446 | uint8_t i, j; |
7447 | ||
7448 | if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) | |
7449 | dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; | |
7450 | else | |
7451 | dcb_info->nb_tcs = 1; | |
7452 | ||
11fdf7f2 TL |
7453 | tc_queue = &dcb_info->tc_queue; |
7454 | nb_tcs = dcb_info->nb_tcs; | |
7455 | ||
7c673cae FG |
7456 | if (dcb_config->vt_mode) { /* vt is enabled*/ |
7457 | struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = | |
7458 | &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; | |
7459 | for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) | |
7460 | dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; | |
11fdf7f2 TL |
7461 | if (RTE_ETH_DEV_SRIOV(dev).active > 0) { |
7462 | for (j = 0; j < nb_tcs; j++) { | |
7463 | tc_queue->tc_rxq[0][j].base = j; | |
7464 | tc_queue->tc_rxq[0][j].nb_queue = 1; | |
7465 | tc_queue->tc_txq[0][j].base = j; | |
7466 | tc_queue->tc_txq[0][j].nb_queue = 1; | |
7467 | } | |
7468 | } else { | |
7469 | for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { | |
7470 | for (j = 0; j < nb_tcs; j++) { | |
7471 | tc_queue->tc_rxq[i][j].base = | |
7472 | i * nb_tcs + j; | |
7473 | tc_queue->tc_rxq[i][j].nb_queue = 1; | |
7474 | tc_queue->tc_txq[i][j].base = | |
7475 | i * nb_tcs + j; | |
7476 | tc_queue->tc_txq[i][j].nb_queue = 1; | |
7477 | } | |
7c673cae FG |
7478 | } |
7479 | } | |
7480 | } else { /* vt is disabled*/ | |
7481 | struct rte_eth_dcb_rx_conf *rx_conf = | |
7482 | &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; | |
7483 | for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) | |
7484 | dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; | |
7485 | if (dcb_info->nb_tcs == ETH_4_TCS) { | |
7486 | for (i = 0; i < dcb_info->nb_tcs; i++) { | |
7487 | dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; | |
7488 | dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; | |
7489 | } | |
7490 | dcb_info->tc_queue.tc_txq[0][0].base = 0; | |
7491 | dcb_info->tc_queue.tc_txq[0][1].base = 64; | |
7492 | dcb_info->tc_queue.tc_txq[0][2].base = 96; | |
7493 | dcb_info->tc_queue.tc_txq[0][3].base = 112; | |
7494 | dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; | |
7495 | dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; | |
7496 | dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; | |
7497 | dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; | |
7498 | } else if (dcb_info->nb_tcs == ETH_8_TCS) { | |
7499 | for (i = 0; i < dcb_info->nb_tcs; i++) { | |
7500 | dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; | |
7501 | dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; | |
7502 | } | |
7503 | dcb_info->tc_queue.tc_txq[0][0].base = 0; | |
7504 | dcb_info->tc_queue.tc_txq[0][1].base = 32; | |
7505 | dcb_info->tc_queue.tc_txq[0][2].base = 64; | |
7506 | dcb_info->tc_queue.tc_txq[0][3].base = 80; | |
7507 | dcb_info->tc_queue.tc_txq[0][4].base = 96; | |
7508 | dcb_info->tc_queue.tc_txq[0][5].base = 104; | |
7509 | dcb_info->tc_queue.tc_txq[0][6].base = 112; | |
7510 | dcb_info->tc_queue.tc_txq[0][7].base = 120; | |
7511 | dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; | |
7512 | dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; | |
7513 | dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; | |
7514 | dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; | |
7515 | dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; | |
7516 | dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; | |
7517 | dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; | |
7518 | dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; | |
7519 | } | |
7520 | } | |
7521 | for (i = 0; i < dcb_info->nb_tcs; i++) { | |
7522 | tc = &dcb_config->tc_config[i]; | |
7523 | dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; | |
7524 | } | |
7525 | return 0; | |
7526 | } | |
7527 | ||
7528 | /* Update e-tag ether type */ | |
7529 | static int | |
7530 | ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, | |
7531 | uint16_t ether_type) | |
7532 | { | |
7533 | uint32_t etag_etype; | |
7534 | ||
7535 | if (hw->mac.type != ixgbe_mac_X550 && | |
7536 | hw->mac.type != ixgbe_mac_X550EM_x && | |
7537 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
7538 | return -ENOTSUP; | |
7539 | } | |
7540 | ||
7541 | etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); | |
7542 | etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; | |
7543 | etag_etype |= ether_type; | |
7544 | IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); | |
7545 | IXGBE_WRITE_FLUSH(hw); | |
7546 | ||
7547 | return 0; | |
7548 | } | |
7549 | ||
7550 | /* Config l2 tunnel ether type */ | |
7551 | static int | |
7552 | ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, | |
7553 | struct rte_eth_l2_tunnel_conf *l2_tunnel) | |
7554 | { | |
7555 | int ret = 0; | |
7556 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 TL |
7557 | struct ixgbe_l2_tn_info *l2_tn_info = |
7558 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7c673cae FG |
7559 | |
7560 | if (l2_tunnel == NULL) | |
7561 | return -EINVAL; | |
7562 | ||
7563 | switch (l2_tunnel->l2_tunnel_type) { | |
7564 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
11fdf7f2 | 7565 | l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; |
7c673cae FG |
7566 | ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); |
7567 | break; | |
7568 | default: | |
7569 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7570 | ret = -EINVAL; | |
7571 | break; | |
7572 | } | |
7573 | ||
7574 | return ret; | |
7575 | } | |
7576 | ||
7577 | /* Enable e-tag tunnel */ | |
7578 | static int | |
7579 | ixgbe_e_tag_enable(struct ixgbe_hw *hw) | |
7580 | { | |
7581 | uint32_t etag_etype; | |
7582 | ||
7583 | if (hw->mac.type != ixgbe_mac_X550 && | |
7584 | hw->mac.type != ixgbe_mac_X550EM_x && | |
7585 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
7586 | return -ENOTSUP; | |
7587 | } | |
7588 | ||
7589 | etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); | |
7590 | etag_etype |= IXGBE_ETAG_ETYPE_VALID; | |
7591 | IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); | |
7592 | IXGBE_WRITE_FLUSH(hw); | |
7593 | ||
7594 | return 0; | |
7595 | } | |
7596 | ||
7597 | /* Enable l2 tunnel */ | |
7598 | static int | |
7599 | ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, | |
7600 | enum rte_eth_tunnel_type l2_tunnel_type) | |
7601 | { | |
7602 | int ret = 0; | |
7603 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 TL |
7604 | struct ixgbe_l2_tn_info *l2_tn_info = |
7605 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7c673cae FG |
7606 | |
7607 | switch (l2_tunnel_type) { | |
7608 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
11fdf7f2 | 7609 | l2_tn_info->e_tag_en = TRUE; |
7c673cae FG |
7610 | ret = ixgbe_e_tag_enable(hw); |
7611 | break; | |
7612 | default: | |
7613 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7614 | ret = -EINVAL; | |
7615 | break; | |
7616 | } | |
7617 | ||
7618 | return ret; | |
7619 | } | |
7620 | ||
7621 | /* Disable e-tag tunnel */ | |
7622 | static int | |
7623 | ixgbe_e_tag_disable(struct ixgbe_hw *hw) | |
7624 | { | |
7625 | uint32_t etag_etype; | |
7626 | ||
7627 | if (hw->mac.type != ixgbe_mac_X550 && | |
7628 | hw->mac.type != ixgbe_mac_X550EM_x && | |
7629 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
7630 | return -ENOTSUP; | |
7631 | } | |
7632 | ||
7633 | etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); | |
7634 | etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; | |
7635 | IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); | |
7636 | IXGBE_WRITE_FLUSH(hw); | |
7637 | ||
7638 | return 0; | |
7639 | } | |
7640 | ||
7641 | /* Disable l2 tunnel */ | |
7642 | static int | |
7643 | ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, | |
7644 | enum rte_eth_tunnel_type l2_tunnel_type) | |
7645 | { | |
7646 | int ret = 0; | |
7647 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
11fdf7f2 TL |
7648 | struct ixgbe_l2_tn_info *l2_tn_info = |
7649 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7c673cae FG |
7650 | |
7651 | switch (l2_tunnel_type) { | |
7652 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
11fdf7f2 | 7653 | l2_tn_info->e_tag_en = FALSE; |
7c673cae FG |
7654 | ret = ixgbe_e_tag_disable(hw); |
7655 | break; | |
7656 | default: | |
7657 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7658 | ret = -EINVAL; | |
7659 | break; | |
7660 | } | |
7661 | ||
7662 | return ret; | |
7663 | } | |
7664 | ||
7665 | static int | |
7666 | ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, | |
7667 | struct rte_eth_l2_tunnel_conf *l2_tunnel) | |
7668 | { | |
7669 | int ret = 0; | |
7670 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7671 | uint32_t i, rar_entries; | |
7672 | uint32_t rar_low, rar_high; | |
7673 | ||
7674 | if (hw->mac.type != ixgbe_mac_X550 && | |
7675 | hw->mac.type != ixgbe_mac_X550EM_x && | |
7676 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
7677 | return -ENOTSUP; | |
7678 | } | |
7679 | ||
7680 | rar_entries = ixgbe_get_num_rx_addrs(hw); | |
7681 | ||
7682 | for (i = 1; i < rar_entries; i++) { | |
7683 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); | |
7684 | rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); | |
7685 | if ((rar_high & IXGBE_RAH_AV) && | |
7686 | (rar_high & IXGBE_RAH_ADTYPE) && | |
7687 | ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == | |
7688 | l2_tunnel->tunnel_id)) { | |
7689 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); | |
7690 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); | |
7691 | ||
7692 | ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); | |
7693 | ||
7694 | return ret; | |
7695 | } | |
7696 | } | |
7697 | ||
7698 | return ret; | |
7699 | } | |
7700 | ||
7701 | static int | |
7702 | ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, | |
7703 | struct rte_eth_l2_tunnel_conf *l2_tunnel) | |
7704 | { | |
7705 | int ret = 0; | |
7706 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7707 | uint32_t i, rar_entries; | |
7708 | uint32_t rar_low, rar_high; | |
7709 | ||
7710 | if (hw->mac.type != ixgbe_mac_X550 && | |
7711 | hw->mac.type != ixgbe_mac_X550EM_x && | |
7712 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
7713 | return -ENOTSUP; | |
7714 | } | |
7715 | ||
7716 | /* One entry for one tunnel. Try to remove potential existing entry. */ | |
7717 | ixgbe_e_tag_filter_del(dev, l2_tunnel); | |
7718 | ||
7719 | rar_entries = ixgbe_get_num_rx_addrs(hw); | |
7720 | ||
7721 | for (i = 1; i < rar_entries; i++) { | |
7722 | rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); | |
7723 | if (rar_high & IXGBE_RAH_AV) { | |
7724 | continue; | |
7725 | } else { | |
7726 | ixgbe_set_vmdq(hw, i, l2_tunnel->pool); | |
7727 | rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; | |
7728 | rar_low = l2_tunnel->tunnel_id; | |
7729 | ||
7730 | IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); | |
7731 | IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); | |
7732 | ||
7733 | return ret; | |
7734 | } | |
7735 | } | |
7736 | ||
7737 | PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." | |
7738 | " Please remove a rule before adding a new one."); | |
7739 | return -EINVAL; | |
7740 | } | |
7741 | ||
11fdf7f2 TL |
7742 | static inline struct ixgbe_l2_tn_filter * |
7743 | ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, | |
7744 | struct ixgbe_l2_tn_key *key) | |
7745 | { | |
7746 | int ret; | |
7747 | ||
7748 | ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); | |
7749 | if (ret < 0) | |
7750 | return NULL; | |
7751 | ||
7752 | return l2_tn_info->hash_map[ret]; | |
7753 | } | |
7754 | ||
7755 | static inline int | |
7756 | ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, | |
7757 | struct ixgbe_l2_tn_filter *l2_tn_filter) | |
7758 | { | |
7759 | int ret; | |
7760 | ||
7761 | ret = rte_hash_add_key(l2_tn_info->hash_handle, | |
7762 | &l2_tn_filter->key); | |
7763 | ||
7764 | if (ret < 0) { | |
7765 | PMD_DRV_LOG(ERR, | |
7766 | "Failed to insert L2 tunnel filter" | |
7767 | " to hash table %d!", | |
7768 | ret); | |
7769 | return ret; | |
7770 | } | |
7771 | ||
7772 | l2_tn_info->hash_map[ret] = l2_tn_filter; | |
7773 | ||
7774 | TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); | |
7775 | ||
7776 | return 0; | |
7777 | } | |
7778 | ||
7779 | static inline int | |
7780 | ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, | |
7781 | struct ixgbe_l2_tn_key *key) | |
7782 | { | |
7783 | int ret; | |
7784 | struct ixgbe_l2_tn_filter *l2_tn_filter; | |
7785 | ||
7786 | ret = rte_hash_del_key(l2_tn_info->hash_handle, key); | |
7787 | ||
7788 | if (ret < 0) { | |
7789 | PMD_DRV_LOG(ERR, | |
7790 | "No such L2 tunnel filter to delete %d!", | |
7791 | ret); | |
7792 | return ret; | |
7793 | } | |
7794 | ||
7795 | l2_tn_filter = l2_tn_info->hash_map[ret]; | |
7796 | l2_tn_info->hash_map[ret] = NULL; | |
7797 | ||
7798 | TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); | |
7799 | rte_free(l2_tn_filter); | |
7800 | ||
7801 | return 0; | |
7802 | } | |
7803 | ||
7c673cae | 7804 | /* Add l2 tunnel filter */ |
11fdf7f2 | 7805 | int |
7c673cae | 7806 | ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, |
11fdf7f2 TL |
7807 | struct rte_eth_l2_tunnel_conf *l2_tunnel, |
7808 | bool restore) | |
7c673cae | 7809 | { |
11fdf7f2 TL |
7810 | int ret; |
7811 | struct ixgbe_l2_tn_info *l2_tn_info = | |
7812 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7813 | struct ixgbe_l2_tn_key key; | |
7814 | struct ixgbe_l2_tn_filter *node; | |
7815 | ||
7816 | if (!restore) { | |
7817 | key.l2_tn_type = l2_tunnel->l2_tunnel_type; | |
7818 | key.tn_id = l2_tunnel->tunnel_id; | |
7819 | ||
7820 | node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); | |
7821 | ||
7822 | if (node) { | |
7823 | PMD_DRV_LOG(ERR, | |
7824 | "The L2 tunnel filter already exists!"); | |
7825 | return -EINVAL; | |
7826 | } | |
7827 | ||
7828 | node = rte_zmalloc("ixgbe_l2_tn", | |
7829 | sizeof(struct ixgbe_l2_tn_filter), | |
7830 | 0); | |
7831 | if (!node) | |
7832 | return -ENOMEM; | |
7833 | ||
7834 | rte_memcpy(&node->key, | |
7835 | &key, | |
7836 | sizeof(struct ixgbe_l2_tn_key)); | |
7837 | node->pool = l2_tunnel->pool; | |
7838 | ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); | |
7839 | if (ret < 0) { | |
7840 | rte_free(node); | |
7841 | return ret; | |
7842 | } | |
7843 | } | |
7c673cae FG |
7844 | |
7845 | switch (l2_tunnel->l2_tunnel_type) { | |
7846 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
7847 | ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); | |
7848 | break; | |
7849 | default: | |
7850 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7851 | ret = -EINVAL; | |
7852 | break; | |
7853 | } | |
7854 | ||
11fdf7f2 TL |
7855 | if ((!restore) && (ret < 0)) |
7856 | (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); | |
7857 | ||
7c673cae FG |
7858 | return ret; |
7859 | } | |
7860 | ||
7861 | /* Delete l2 tunnel filter */ | |
11fdf7f2 | 7862 | int |
7c673cae FG |
7863 | ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, |
7864 | struct rte_eth_l2_tunnel_conf *l2_tunnel) | |
7865 | { | |
11fdf7f2 TL |
7866 | int ret; |
7867 | struct ixgbe_l2_tn_info *l2_tn_info = | |
7868 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7869 | struct ixgbe_l2_tn_key key; | |
7870 | ||
7871 | key.l2_tn_type = l2_tunnel->l2_tunnel_type; | |
7872 | key.tn_id = l2_tunnel->tunnel_id; | |
7873 | ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); | |
7874 | if (ret < 0) | |
7875 | return ret; | |
7c673cae FG |
7876 | |
7877 | switch (l2_tunnel->l2_tunnel_type) { | |
7878 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
7879 | ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); | |
7880 | break; | |
7881 | default: | |
7882 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7883 | ret = -EINVAL; | |
7884 | break; | |
7885 | } | |
7886 | ||
7887 | return ret; | |
7888 | } | |
7889 | ||
7890 | /** | |
7891 | * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. | |
7892 | * @dev: pointer to rte_eth_dev structure | |
7893 | * @filter_op:operation will be taken. | |
7894 | * @arg: a pointer to specific structure corresponding to the filter_op | |
7895 | */ | |
7896 | static int | |
7897 | ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, | |
7898 | enum rte_filter_op filter_op, | |
7899 | void *arg) | |
7900 | { | |
11fdf7f2 | 7901 | int ret; |
7c673cae FG |
7902 | |
7903 | if (filter_op == RTE_ETH_FILTER_NOP) | |
7904 | return 0; | |
7905 | ||
7906 | if (arg == NULL) { | |
7907 | PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", | |
7908 | filter_op); | |
7909 | return -EINVAL; | |
7910 | } | |
7911 | ||
7912 | switch (filter_op) { | |
7913 | case RTE_ETH_FILTER_ADD: | |
7914 | ret = ixgbe_dev_l2_tunnel_filter_add | |
7915 | (dev, | |
11fdf7f2 TL |
7916 | (struct rte_eth_l2_tunnel_conf *)arg, |
7917 | FALSE); | |
7c673cae FG |
7918 | break; |
7919 | case RTE_ETH_FILTER_DELETE: | |
7920 | ret = ixgbe_dev_l2_tunnel_filter_del | |
7921 | (dev, | |
7922 | (struct rte_eth_l2_tunnel_conf *)arg); | |
7923 | break; | |
7924 | default: | |
7925 | PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); | |
7926 | ret = -EINVAL; | |
7927 | break; | |
7928 | } | |
7929 | return ret; | |
7930 | } | |
7931 | ||
7932 | static int | |
7933 | ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) | |
7934 | { | |
7935 | int ret = 0; | |
7936 | uint32_t ctrl; | |
7937 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
7938 | ||
7939 | if (hw->mac.type != ixgbe_mac_X550 && | |
7940 | hw->mac.type != ixgbe_mac_X550EM_x && | |
7941 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
7942 | return -ENOTSUP; | |
7943 | } | |
7944 | ||
7945 | ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); | |
7946 | ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; | |
7947 | if (en) | |
7948 | ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; | |
7949 | IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); | |
7950 | ||
7951 | return ret; | |
7952 | } | |
7953 | ||
7954 | /* Enable l2 tunnel forwarding */ | |
7955 | static int | |
7956 | ixgbe_dev_l2_tunnel_forwarding_enable | |
7957 | (struct rte_eth_dev *dev, | |
7958 | enum rte_eth_tunnel_type l2_tunnel_type) | |
7959 | { | |
11fdf7f2 TL |
7960 | struct ixgbe_l2_tn_info *l2_tn_info = |
7961 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7c673cae FG |
7962 | int ret = 0; |
7963 | ||
7964 | switch (l2_tunnel_type) { | |
7965 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
11fdf7f2 | 7966 | l2_tn_info->e_tag_fwd_en = TRUE; |
7c673cae FG |
7967 | ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); |
7968 | break; | |
7969 | default: | |
7970 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7971 | ret = -EINVAL; | |
7972 | break; | |
7973 | } | |
7974 | ||
7975 | return ret; | |
7976 | } | |
7977 | ||
7978 | /* Disable l2 tunnel forwarding */ | |
7979 | static int | |
7980 | ixgbe_dev_l2_tunnel_forwarding_disable | |
7981 | (struct rte_eth_dev *dev, | |
7982 | enum rte_eth_tunnel_type l2_tunnel_type) | |
7983 | { | |
11fdf7f2 TL |
7984 | struct ixgbe_l2_tn_info *l2_tn_info = |
7985 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
7c673cae FG |
7986 | int ret = 0; |
7987 | ||
7988 | switch (l2_tunnel_type) { | |
7989 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
11fdf7f2 | 7990 | l2_tn_info->e_tag_fwd_en = FALSE; |
7c673cae FG |
7991 | ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); |
7992 | break; | |
7993 | default: | |
7994 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
7995 | ret = -EINVAL; | |
7996 | break; | |
7997 | } | |
7998 | ||
7999 | return ret; | |
8000 | } | |
8001 | ||
8002 | static int | |
8003 | ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, | |
8004 | struct rte_eth_l2_tunnel_conf *l2_tunnel, | |
8005 | bool en) | |
8006 | { | |
11fdf7f2 | 8007 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); |
7c673cae FG |
8008 | int ret = 0; |
8009 | uint32_t vmtir, vmvir; | |
8010 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8011 | ||
11fdf7f2 | 8012 | if (l2_tunnel->vf_id >= pci_dev->max_vfs) { |
7c673cae FG |
8013 | PMD_DRV_LOG(ERR, |
8014 | "VF id %u should be less than %u", | |
8015 | l2_tunnel->vf_id, | |
11fdf7f2 | 8016 | pci_dev->max_vfs); |
7c673cae FG |
8017 | return -EINVAL; |
8018 | } | |
8019 | ||
8020 | if (hw->mac.type != ixgbe_mac_X550 && | |
8021 | hw->mac.type != ixgbe_mac_X550EM_x && | |
8022 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
8023 | return -ENOTSUP; | |
8024 | } | |
8025 | ||
8026 | if (en) | |
8027 | vmtir = l2_tunnel->tunnel_id; | |
8028 | else | |
8029 | vmtir = 0; | |
8030 | ||
8031 | IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); | |
8032 | ||
8033 | vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); | |
8034 | vmvir &= ~IXGBE_VMVIR_TAGA_MASK; | |
8035 | if (en) | |
8036 | vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; | |
8037 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); | |
8038 | ||
8039 | return ret; | |
8040 | } | |
8041 | ||
8042 | /* Enable l2 tunnel tag insertion */ | |
8043 | static int | |
8044 | ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, | |
8045 | struct rte_eth_l2_tunnel_conf *l2_tunnel) | |
8046 | { | |
8047 | int ret = 0; | |
8048 | ||
8049 | switch (l2_tunnel->l2_tunnel_type) { | |
8050 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
8051 | ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); | |
8052 | break; | |
8053 | default: | |
8054 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
8055 | ret = -EINVAL; | |
8056 | break; | |
8057 | } | |
8058 | ||
8059 | return ret; | |
8060 | } | |
8061 | ||
8062 | /* Disable l2 tunnel tag insertion */ | |
8063 | static int | |
8064 | ixgbe_dev_l2_tunnel_insertion_disable | |
8065 | (struct rte_eth_dev *dev, | |
8066 | struct rte_eth_l2_tunnel_conf *l2_tunnel) | |
8067 | { | |
8068 | int ret = 0; | |
8069 | ||
8070 | switch (l2_tunnel->l2_tunnel_type) { | |
8071 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
8072 | ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); | |
8073 | break; | |
8074 | default: | |
8075 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
8076 | ret = -EINVAL; | |
8077 | break; | |
8078 | } | |
8079 | ||
8080 | return ret; | |
8081 | } | |
8082 | ||
8083 | static int | |
8084 | ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, | |
8085 | bool en) | |
8086 | { | |
8087 | int ret = 0; | |
8088 | uint32_t qde; | |
8089 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8090 | ||
8091 | if (hw->mac.type != ixgbe_mac_X550 && | |
8092 | hw->mac.type != ixgbe_mac_X550EM_x && | |
8093 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
8094 | return -ENOTSUP; | |
8095 | } | |
8096 | ||
8097 | qde = IXGBE_READ_REG(hw, IXGBE_QDE); | |
8098 | if (en) | |
8099 | qde |= IXGBE_QDE_STRIP_TAG; | |
8100 | else | |
8101 | qde &= ~IXGBE_QDE_STRIP_TAG; | |
8102 | qde &= ~IXGBE_QDE_READ; | |
8103 | qde |= IXGBE_QDE_WRITE; | |
8104 | IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); | |
8105 | ||
8106 | return ret; | |
8107 | } | |
8108 | ||
8109 | /* Enable l2 tunnel tag stripping */ | |
8110 | static int | |
8111 | ixgbe_dev_l2_tunnel_stripping_enable | |
8112 | (struct rte_eth_dev *dev, | |
8113 | enum rte_eth_tunnel_type l2_tunnel_type) | |
8114 | { | |
8115 | int ret = 0; | |
8116 | ||
8117 | switch (l2_tunnel_type) { | |
8118 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
8119 | ret = ixgbe_e_tag_stripping_en_dis(dev, 1); | |
8120 | break; | |
8121 | default: | |
8122 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
8123 | ret = -EINVAL; | |
8124 | break; | |
8125 | } | |
8126 | ||
8127 | return ret; | |
8128 | } | |
8129 | ||
8130 | /* Disable l2 tunnel tag stripping */ | |
8131 | static int | |
8132 | ixgbe_dev_l2_tunnel_stripping_disable | |
8133 | (struct rte_eth_dev *dev, | |
8134 | enum rte_eth_tunnel_type l2_tunnel_type) | |
8135 | { | |
8136 | int ret = 0; | |
8137 | ||
8138 | switch (l2_tunnel_type) { | |
8139 | case RTE_L2_TUNNEL_TYPE_E_TAG: | |
8140 | ret = ixgbe_e_tag_stripping_en_dis(dev, 0); | |
8141 | break; | |
8142 | default: | |
8143 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
8144 | ret = -EINVAL; | |
8145 | break; | |
8146 | } | |
8147 | ||
8148 | return ret; | |
8149 | } | |
8150 | ||
8151 | /* Enable/disable l2 tunnel offload functions */ | |
8152 | static int | |
8153 | ixgbe_dev_l2_tunnel_offload_set | |
8154 | (struct rte_eth_dev *dev, | |
8155 | struct rte_eth_l2_tunnel_conf *l2_tunnel, | |
8156 | uint32_t mask, | |
8157 | uint8_t en) | |
8158 | { | |
8159 | int ret = 0; | |
8160 | ||
8161 | if (l2_tunnel == NULL) | |
8162 | return -EINVAL; | |
8163 | ||
8164 | ret = -EINVAL; | |
8165 | if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { | |
8166 | if (en) | |
8167 | ret = ixgbe_dev_l2_tunnel_enable( | |
8168 | dev, | |
8169 | l2_tunnel->l2_tunnel_type); | |
8170 | else | |
8171 | ret = ixgbe_dev_l2_tunnel_disable( | |
8172 | dev, | |
8173 | l2_tunnel->l2_tunnel_type); | |
8174 | } | |
8175 | ||
8176 | if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { | |
8177 | if (en) | |
8178 | ret = ixgbe_dev_l2_tunnel_insertion_enable( | |
8179 | dev, | |
8180 | l2_tunnel); | |
8181 | else | |
8182 | ret = ixgbe_dev_l2_tunnel_insertion_disable( | |
8183 | dev, | |
8184 | l2_tunnel); | |
8185 | } | |
8186 | ||
8187 | if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { | |
8188 | if (en) | |
8189 | ret = ixgbe_dev_l2_tunnel_stripping_enable( | |
8190 | dev, | |
8191 | l2_tunnel->l2_tunnel_type); | |
8192 | else | |
8193 | ret = ixgbe_dev_l2_tunnel_stripping_disable( | |
8194 | dev, | |
8195 | l2_tunnel->l2_tunnel_type); | |
8196 | } | |
8197 | ||
8198 | if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { | |
8199 | if (en) | |
8200 | ret = ixgbe_dev_l2_tunnel_forwarding_enable( | |
8201 | dev, | |
8202 | l2_tunnel->l2_tunnel_type); | |
8203 | else | |
8204 | ret = ixgbe_dev_l2_tunnel_forwarding_disable( | |
8205 | dev, | |
8206 | l2_tunnel->l2_tunnel_type); | |
8207 | } | |
8208 | ||
8209 | return ret; | |
8210 | } | |
8211 | ||
8212 | static int | |
8213 | ixgbe_update_vxlan_port(struct ixgbe_hw *hw, | |
8214 | uint16_t port) | |
8215 | { | |
8216 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); | |
8217 | IXGBE_WRITE_FLUSH(hw); | |
8218 | ||
8219 | return 0; | |
8220 | } | |
8221 | ||
8222 | /* There's only one register for VxLAN UDP port. | |
8223 | * So, we cannot add several ports. Will update it. | |
8224 | */ | |
8225 | static int | |
8226 | ixgbe_add_vxlan_port(struct ixgbe_hw *hw, | |
8227 | uint16_t port) | |
8228 | { | |
8229 | if (port == 0) { | |
8230 | PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); | |
8231 | return -EINVAL; | |
8232 | } | |
8233 | ||
8234 | return ixgbe_update_vxlan_port(hw, port); | |
8235 | } | |
8236 | ||
8237 | /* We cannot delete the VxLAN port. For there's a register for VxLAN | |
8238 | * UDP port, it must have a value. | |
8239 | * So, will reset it to the original value 0. | |
8240 | */ | |
8241 | static int | |
8242 | ixgbe_del_vxlan_port(struct ixgbe_hw *hw, | |
8243 | uint16_t port) | |
8244 | { | |
8245 | uint16_t cur_port; | |
8246 | ||
8247 | cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); | |
8248 | ||
8249 | if (cur_port != port) { | |
8250 | PMD_DRV_LOG(ERR, "Port %u does not exist.", port); | |
8251 | return -EINVAL; | |
8252 | } | |
8253 | ||
8254 | return ixgbe_update_vxlan_port(hw, 0); | |
8255 | } | |
8256 | ||
8257 | /* Add UDP tunneling port */ | |
8258 | static int | |
8259 | ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, | |
8260 | struct rte_eth_udp_tunnel *udp_tunnel) | |
8261 | { | |
8262 | int ret = 0; | |
8263 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8264 | ||
8265 | if (hw->mac.type != ixgbe_mac_X550 && | |
8266 | hw->mac.type != ixgbe_mac_X550EM_x && | |
8267 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
8268 | return -ENOTSUP; | |
8269 | } | |
8270 | ||
8271 | if (udp_tunnel == NULL) | |
8272 | return -EINVAL; | |
8273 | ||
8274 | switch (udp_tunnel->prot_type) { | |
8275 | case RTE_TUNNEL_TYPE_VXLAN: | |
8276 | ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); | |
8277 | break; | |
8278 | ||
8279 | case RTE_TUNNEL_TYPE_GENEVE: | |
8280 | case RTE_TUNNEL_TYPE_TEREDO: | |
8281 | PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); | |
8282 | ret = -EINVAL; | |
8283 | break; | |
8284 | ||
8285 | default: | |
8286 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
8287 | ret = -EINVAL; | |
8288 | break; | |
8289 | } | |
8290 | ||
8291 | return ret; | |
8292 | } | |
8293 | ||
8294 | /* Remove UDP tunneling port */ | |
8295 | static int | |
8296 | ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, | |
8297 | struct rte_eth_udp_tunnel *udp_tunnel) | |
8298 | { | |
8299 | int ret = 0; | |
8300 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8301 | ||
8302 | if (hw->mac.type != ixgbe_mac_X550 && | |
8303 | hw->mac.type != ixgbe_mac_X550EM_x && | |
8304 | hw->mac.type != ixgbe_mac_X550EM_a) { | |
8305 | return -ENOTSUP; | |
8306 | } | |
8307 | ||
8308 | if (udp_tunnel == NULL) | |
8309 | return -EINVAL; | |
8310 | ||
8311 | switch (udp_tunnel->prot_type) { | |
8312 | case RTE_TUNNEL_TYPE_VXLAN: | |
8313 | ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); | |
8314 | break; | |
8315 | case RTE_TUNNEL_TYPE_GENEVE: | |
8316 | case RTE_TUNNEL_TYPE_TEREDO: | |
8317 | PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); | |
8318 | ret = -EINVAL; | |
8319 | break; | |
8320 | default: | |
8321 | PMD_DRV_LOG(ERR, "Invalid tunnel type"); | |
8322 | ret = -EINVAL; | |
8323 | break; | |
8324 | } | |
8325 | ||
8326 | return ret; | |
8327 | } | |
8328 | ||
9f95a23c TL |
8329 | static void |
8330 | ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) | |
8331 | { | |
8332 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8333 | ||
8334 | hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC); | |
8335 | } | |
8336 | ||
8337 | static void | |
8338 | ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) | |
8339 | { | |
8340 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8341 | ||
8342 | hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); | |
8343 | } | |
8344 | ||
7c673cae FG |
8345 | static void |
8346 | ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) | |
8347 | { | |
8348 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8349 | ||
8350 | hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); | |
8351 | } | |
8352 | ||
8353 | static void | |
8354 | ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) | |
8355 | { | |
8356 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8357 | ||
11fdf7f2 | 8358 | hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); |
7c673cae FG |
8359 | } |
8360 | ||
8361 | static void ixgbevf_mbx_process(struct rte_eth_dev *dev) | |
8362 | { | |
8363 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8364 | u32 in_msg = 0; | |
8365 | ||
11fdf7f2 TL |
8366 | /* peek the message first */ |
8367 | in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); | |
7c673cae FG |
8368 | |
8369 | /* PF reset VF event */ | |
11fdf7f2 TL |
8370 | if (in_msg == IXGBE_PF_CONTROL_MSG) { |
8371 | /* dummy mbx read to ack pf */ | |
8372 | if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) | |
8373 | return; | |
8374 | _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, | |
8375 | NULL); | |
8376 | } | |
7c673cae FG |
8377 | } |
8378 | ||
8379 | static int | |
8380 | ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) | |
8381 | { | |
8382 | uint32_t eicr; | |
8383 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8384 | struct ixgbe_interrupt *intr = | |
8385 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
11fdf7f2 | 8386 | ixgbevf_intr_disable(dev); |
7c673cae FG |
8387 | |
8388 | /* read-on-clear nic registers here */ | |
8389 | eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); | |
8390 | intr->flags = 0; | |
8391 | ||
8392 | /* only one misc vector supported - mailbox */ | |
8393 | eicr &= IXGBE_VTEICR_MASK; | |
8394 | if (eicr == IXGBE_MISC_VEC_ID) | |
8395 | intr->flags |= IXGBE_FLAG_MAILBOX; | |
8396 | ||
8397 | return 0; | |
8398 | } | |
8399 | ||
8400 | static int | |
8401 | ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) | |
8402 | { | |
7c673cae FG |
8403 | struct ixgbe_interrupt *intr = |
8404 | IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); | |
8405 | ||
8406 | if (intr->flags & IXGBE_FLAG_MAILBOX) { | |
8407 | ixgbevf_mbx_process(dev); | |
8408 | intr->flags &= ~IXGBE_FLAG_MAILBOX; | |
8409 | } | |
8410 | ||
11fdf7f2 | 8411 | ixgbevf_intr_enable(dev); |
7c673cae FG |
8412 | |
8413 | return 0; | |
8414 | } | |
8415 | ||
8416 | static void | |
11fdf7f2 | 8417 | ixgbevf_dev_interrupt_handler(void *param) |
7c673cae FG |
8418 | { |
8419 | struct rte_eth_dev *dev = (struct rte_eth_dev *)param; | |
8420 | ||
8421 | ixgbevf_dev_interrupt_get_status(dev); | |
8422 | ixgbevf_dev_interrupt_action(dev); | |
8423 | } | |
8424 | ||
11fdf7f2 TL |
8425 | /** |
8426 | * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path | |
8427 | * @hw: pointer to hardware structure | |
8428 | * | |
8429 | * Stops the transmit data path and waits for the HW to internally empty | |
8430 | * the Tx security block | |
8431 | **/ | |
8432 | int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) | |
8433 | { | |
8434 | #define IXGBE_MAX_SECTX_POLL 40 | |
8435 | ||
8436 | int i; | |
8437 | int sectxreg; | |
8438 | ||
8439 | sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); | |
8440 | sectxreg |= IXGBE_SECTXCTRL_TX_DIS; | |
8441 | IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); | |
8442 | for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { | |
8443 | sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); | |
8444 | if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) | |
8445 | break; | |
8446 | /* Use interrupt-safe sleep just in case */ | |
8447 | usec_delay(1000); | |
8448 | } | |
8449 | ||
8450 | /* For informational purposes only */ | |
8451 | if (i >= IXGBE_MAX_SECTX_POLL) | |
8452 | PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " | |
8453 | "path fully disabled. Continuing with init."); | |
8454 | ||
8455 | return IXGBE_SUCCESS; | |
8456 | } | |
8457 | ||
8458 | /** | |
8459 | * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path | |
8460 | * @hw: pointer to hardware structure | |
8461 | * | |
8462 | * Enables the transmit data path. | |
8463 | **/ | |
8464 | int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) | |
8465 | { | |
8466 | uint32_t sectxreg; | |
8467 | ||
8468 | sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); | |
8469 | sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; | |
8470 | IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); | |
8471 | IXGBE_WRITE_FLUSH(hw); | |
8472 | ||
8473 | return IXGBE_SUCCESS; | |
8474 | } | |
8475 | ||
8476 | /* restore n-tuple filter */ | |
8477 | static inline void | |
8478 | ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) | |
8479 | { | |
8480 | struct ixgbe_filter_info *filter_info = | |
8481 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8482 | struct ixgbe_5tuple_filter *node; | |
8483 | ||
8484 | TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { | |
8485 | ixgbe_inject_5tuple_filter(dev, node); | |
8486 | } | |
8487 | } | |
8488 | ||
8489 | /* restore ethernet type filter */ | |
8490 | static inline void | |
8491 | ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) | |
8492 | { | |
8493 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8494 | struct ixgbe_filter_info *filter_info = | |
8495 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8496 | int i; | |
8497 | ||
8498 | for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { | |
8499 | if (filter_info->ethertype_mask & (1 << i)) { | |
8500 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), | |
8501 | filter_info->ethertype_filters[i].etqf); | |
8502 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), | |
8503 | filter_info->ethertype_filters[i].etqs); | |
8504 | IXGBE_WRITE_FLUSH(hw); | |
8505 | } | |
8506 | } | |
8507 | } | |
8508 | ||
8509 | /* restore SYN filter */ | |
8510 | static inline void | |
8511 | ixgbe_syn_filter_restore(struct rte_eth_dev *dev) | |
8512 | { | |
8513 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8514 | struct ixgbe_filter_info *filter_info = | |
8515 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8516 | uint32_t synqf; | |
8517 | ||
8518 | synqf = filter_info->syn_info; | |
8519 | ||
8520 | if (synqf & IXGBE_SYN_FILTER_ENABLE) { | |
8521 | IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); | |
8522 | IXGBE_WRITE_FLUSH(hw); | |
8523 | } | |
8524 | } | |
8525 | ||
8526 | /* restore L2 tunnel filter */ | |
8527 | static inline void | |
8528 | ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) | |
8529 | { | |
8530 | struct ixgbe_l2_tn_info *l2_tn_info = | |
8531 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
8532 | struct ixgbe_l2_tn_filter *node; | |
8533 | struct rte_eth_l2_tunnel_conf l2_tn_conf; | |
8534 | ||
8535 | TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { | |
8536 | l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; | |
8537 | l2_tn_conf.tunnel_id = node->key.tn_id; | |
8538 | l2_tn_conf.pool = node->pool; | |
8539 | (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); | |
8540 | } | |
8541 | } | |
8542 | ||
8543 | /* restore rss filter */ | |
8544 | static inline void | |
8545 | ixgbe_rss_filter_restore(struct rte_eth_dev *dev) | |
8546 | { | |
8547 | struct ixgbe_filter_info *filter_info = | |
8548 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8549 | ||
8550 | if (filter_info->rss_info.conf.queue_num) | |
8551 | ixgbe_config_rss_filter(dev, | |
8552 | &filter_info->rss_info, TRUE); | |
8553 | } | |
8554 | ||
8555 | static int | |
8556 | ixgbe_filter_restore(struct rte_eth_dev *dev) | |
8557 | { | |
8558 | ixgbe_ntuple_filter_restore(dev); | |
8559 | ixgbe_ethertype_filter_restore(dev); | |
8560 | ixgbe_syn_filter_restore(dev); | |
8561 | ixgbe_fdir_filter_restore(dev); | |
8562 | ixgbe_l2_tn_filter_restore(dev); | |
8563 | ixgbe_rss_filter_restore(dev); | |
8564 | ||
8565 | return 0; | |
8566 | } | |
8567 | ||
8568 | static void | |
8569 | ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) | |
8570 | { | |
8571 | struct ixgbe_l2_tn_info *l2_tn_info = | |
8572 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
8573 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8574 | ||
8575 | if (l2_tn_info->e_tag_en) | |
8576 | (void)ixgbe_e_tag_enable(hw); | |
8577 | ||
8578 | if (l2_tn_info->e_tag_fwd_en) | |
8579 | (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); | |
8580 | ||
8581 | (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); | |
8582 | } | |
8583 | ||
8584 | /* remove all the n-tuple filters */ | |
8585 | void | |
8586 | ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) | |
8587 | { | |
8588 | struct ixgbe_filter_info *filter_info = | |
8589 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8590 | struct ixgbe_5tuple_filter *p_5tuple; | |
8591 | ||
8592 | while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) | |
8593 | ixgbe_remove_5tuple_filter(dev, p_5tuple); | |
8594 | } | |
8595 | ||
8596 | /* remove all the ether type filters */ | |
8597 | void | |
8598 | ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) | |
8599 | { | |
8600 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8601 | struct ixgbe_filter_info *filter_info = | |
8602 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8603 | int i; | |
8604 | ||
8605 | for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { | |
8606 | if (filter_info->ethertype_mask & (1 << i) && | |
8607 | !filter_info->ethertype_filters[i].conf) { | |
8608 | (void)ixgbe_ethertype_filter_remove(filter_info, | |
8609 | (uint8_t)i); | |
8610 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); | |
8611 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); | |
8612 | IXGBE_WRITE_FLUSH(hw); | |
8613 | } | |
8614 | } | |
8615 | } | |
8616 | ||
8617 | /* remove the SYN filter */ | |
8618 | void | |
8619 | ixgbe_clear_syn_filter(struct rte_eth_dev *dev) | |
8620 | { | |
8621 | struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); | |
8622 | struct ixgbe_filter_info *filter_info = | |
8623 | IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); | |
8624 | ||
8625 | if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { | |
8626 | filter_info->syn_info = 0; | |
8627 | ||
8628 | IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); | |
8629 | IXGBE_WRITE_FLUSH(hw); | |
8630 | } | |
8631 | } | |
8632 | ||
8633 | /* remove all the L2 tunnel filters */ | |
8634 | int | |
8635 | ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) | |
8636 | { | |
8637 | struct ixgbe_l2_tn_info *l2_tn_info = | |
8638 | IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); | |
8639 | struct ixgbe_l2_tn_filter *l2_tn_filter; | |
8640 | struct rte_eth_l2_tunnel_conf l2_tn_conf; | |
8641 | int ret = 0; | |
8642 | ||
8643 | while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { | |
8644 | l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; | |
8645 | l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; | |
8646 | l2_tn_conf.pool = l2_tn_filter->pool; | |
8647 | ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); | |
8648 | if (ret < 0) | |
8649 | return ret; | |
8650 | } | |
8651 | ||
8652 | return 0; | |
8653 | } | |
8654 | ||
8655 | RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); | |
7c673cae | 8656 | RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); |
11fdf7f2 TL |
8657 | RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); |
8658 | RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); | |
7c673cae | 8659 | RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); |
11fdf7f2 TL |
8660 | RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); |
8661 | ||
8662 | RTE_INIT(ixgbe_init_log) | |
8663 | { | |
8664 | ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); | |
8665 | if (ixgbe_logtype_init >= 0) | |
8666 | rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); | |
8667 | ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); | |
8668 | if (ixgbe_logtype_driver >= 0) | |
8669 | rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); | |
8670 | } |