]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/i40e/i40e_ethdev.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / i40e / i40e_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <stdarg.h>
11 #include <inttypes.h>
12 #include <assert.h>
13
14 #include <rte_common.h>
15 #include <rte_eal.h>
16 #include <rte_string_fns.h>
17 #include <rte_pci.h>
18 #include <rte_bus_pci.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_pci.h>
22 #include <rte_memzone.h>
23 #include <rte_malloc.h>
24 #include <rte_memcpy.h>
25 #include <rte_alarm.h>
26 #include <rte_dev.h>
27 #include <rte_eth_ctrl.h>
28 #include <rte_tailq.h>
29 #include <rte_hash_crc.h>
30
31 #include "i40e_logs.h"
32 #include "base/i40e_prototype.h"
33 #include "base/i40e_adminq_cmd.h"
34 #include "base/i40e_type.h"
35 #include "base/i40e_register.h"
36 #include "base/i40e_dcb.h"
37 #include "i40e_ethdev.h"
38 #include "i40e_rxtx.h"
39 #include "i40e_pf.h"
40 #include "i40e_regs.h"
41 #include "rte_pmd_i40e.h"
42
43 #define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
44 #define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
45 #define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
46 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
47
48 #define I40E_CLEAR_PXE_WAIT_MS 200
49
50 /* Maximun number of capability elements */
51 #define I40E_MAX_CAP_ELE_NUM 128
52
53 /* Wait count and interval */
54 #define I40E_CHK_Q_ENA_COUNT 1000
55 #define I40E_CHK_Q_ENA_INTERVAL_US 1000
56
57 /* Maximun number of VSI */
58 #define I40E_MAX_NUM_VSIS (384UL)
59
60 #define I40E_PRE_TX_Q_CFG_WAIT_US 10 /* 10 us */
61
62 /* Flow control default timer */
63 #define I40E_DEFAULT_PAUSE_TIME 0xFFFFU
64
65 /* Flow control enable fwd bit */
66 #define I40E_PRTMAC_FWD_CTRL 0x00000001
67
68 /* Receive Packet Buffer size */
69 #define I40E_RXPBSIZE (968 * 1024)
70
71 /* Kilobytes shift */
72 #define I40E_KILOSHIFT 10
73
74 /* Flow control default high water */
75 #define I40E_DEFAULT_HIGH_WATER (0xF2000 >> I40E_KILOSHIFT)
76
77 /* Flow control default low water */
78 #define I40E_DEFAULT_LOW_WATER (0xF2000 >> I40E_KILOSHIFT)
79
80 /* Receive Average Packet Size in Byte*/
81 #define I40E_PACKET_AVERAGE_SIZE 128
82
83 /* Mask of PF interrupt causes */
84 #define I40E_PFINT_ICR0_ENA_MASK ( \
85 I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | \
86 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | \
87 I40E_PFINT_ICR0_ENA_GRST_MASK | \
88 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
89 I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
90 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
91 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
92 I40E_PFINT_ICR0_ENA_VFLR_MASK | \
93 I40E_PFINT_ICR0_ENA_ADMINQ_MASK)
94
95 #define I40E_FLOW_TYPES ( \
96 (1UL << RTE_ETH_FLOW_FRAG_IPV4) | \
97 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
98 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
99 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
100 (1UL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
101 (1UL << RTE_ETH_FLOW_FRAG_IPV6) | \
102 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
103 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
104 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
105 (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
106 (1UL << RTE_ETH_FLOW_L2_PAYLOAD))
107
108 /* Additional timesync values. */
109 #define I40E_PTP_40GB_INCVAL 0x0199999999ULL
110 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
111 #define I40E_PTP_1GB_INCVAL 0x2000000000ULL
112 #define I40E_PRTTSYN_TSYNENA 0x80000000
113 #define I40E_PRTTSYN_TSYNTYPE 0x0e000000
114 #define I40E_CYCLECOUNTER_MASK 0xffffffffffffffffULL
115
116 /**
117 * Below are values for writing un-exposed registers suggested
118 * by silicon experts
119 */
120 /* Destination MAC address */
121 #define I40E_REG_INSET_L2_DMAC 0xE000000000000000ULL
122 /* Source MAC address */
123 #define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
124 /* Outer (S-Tag) VLAN tag in the outer L2 header */
125 #define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL
126 /* Inner (C-Tag) or single VLAN tag in the outer L2 header */
127 #define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
128 /* Single VLAN tag in the inner L2 header */
129 #define I40E_REG_INSET_TUNNEL_VLAN 0x0100000000000000ULL
130 /* Source IPv4 address */
131 #define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
132 /* Destination IPv4 address */
133 #define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
134 /* Source IPv4 address for X722 */
135 #define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL
136 /* Destination IPv4 address for X722 */
137 #define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL
138 /* IPv4 Protocol for X722 */
139 #define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL
140 /* IPv4 Time to Live for X722 */
141 #define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL
142 /* IPv4 Type of Service (TOS) */
143 #define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
144 /* IPv4 Protocol */
145 #define I40E_REG_INSET_L3_IP4_PROTO 0x0004000000000000ULL
146 /* IPv4 Time to Live */
147 #define I40E_REG_INSET_L3_IP4_TTL 0x0004000000000000ULL
148 /* Source IPv6 address */
149 #define I40E_REG_INSET_L3_SRC_IP6 0x0007F80000000000ULL
150 /* Destination IPv6 address */
151 #define I40E_REG_INSET_L3_DST_IP6 0x000007F800000000ULL
152 /* IPv6 Traffic Class (TC) */
153 #define I40E_REG_INSET_L3_IP6_TC 0x0040000000000000ULL
154 /* IPv6 Next Header */
155 #define I40E_REG_INSET_L3_IP6_NEXT_HDR 0x0008000000000000ULL
156 /* IPv6 Hop Limit */
157 #define I40E_REG_INSET_L3_IP6_HOP_LIMIT 0x0008000000000000ULL
158 /* Source L4 port */
159 #define I40E_REG_INSET_L4_SRC_PORT 0x0000000400000000ULL
160 /* Destination L4 port */
161 #define I40E_REG_INSET_L4_DST_PORT 0x0000000200000000ULL
162 /* SCTP verification tag */
163 #define I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG 0x0000000180000000ULL
164 /* Inner destination MAC address (MAC-in-UDP/MAC-in-GRE)*/
165 #define I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC 0x0000000001C00000ULL
166 /* Source port of tunneling UDP */
167 #define I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT 0x0000000000200000ULL
168 /* Destination port of tunneling UDP */
169 #define I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT 0x0000000000100000ULL
170 /* UDP Tunneling ID, NVGRE/GRE key */
171 #define I40E_REG_INSET_TUNNEL_ID 0x00000000000C0000ULL
172 /* Last ether type */
173 #define I40E_REG_INSET_LAST_ETHER_TYPE 0x0000000000004000ULL
174 /* Tunneling outer destination IPv4 address */
175 #define I40E_REG_INSET_TUNNEL_L3_DST_IP4 0x00000000000000C0ULL
176 /* Tunneling outer destination IPv6 address */
177 #define I40E_REG_INSET_TUNNEL_L3_DST_IP6 0x0000000000003FC0ULL
178 /* 1st word of flex payload */
179 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD1 0x0000000000002000ULL
180 /* 2nd word of flex payload */
181 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD2 0x0000000000001000ULL
182 /* 3rd word of flex payload */
183 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD3 0x0000000000000800ULL
184 /* 4th word of flex payload */
185 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD4 0x0000000000000400ULL
186 /* 5th word of flex payload */
187 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD5 0x0000000000000200ULL
188 /* 6th word of flex payload */
189 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD6 0x0000000000000100ULL
190 /* 7th word of flex payload */
191 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD7 0x0000000000000080ULL
192 /* 8th word of flex payload */
193 #define I40E_REG_INSET_FLEX_PAYLOAD_WORD8 0x0000000000000040ULL
194 /* all 8 words flex payload */
195 #define I40E_REG_INSET_FLEX_PAYLOAD_WORDS 0x0000000000003FC0ULL
196 #define I40E_REG_INSET_MASK_DEFAULT 0x0000000000000000ULL
197
198 #define I40E_TRANSLATE_INSET 0
199 #define I40E_TRANSLATE_REG 1
200
201 #define I40E_INSET_IPV4_TOS_MASK 0x0009FF00UL
202 #define I40E_INSET_IPv4_TTL_MASK 0x000D00FFUL
203 #define I40E_INSET_IPV4_PROTO_MASK 0x000DFF00UL
204 #define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL
205 #define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
206 #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
207
208 /* PCI offset for querying capability */
209 #define PCI_DEV_CAP_REG 0xA4
210 /* PCI offset for enabling/disabling Extended Tag */
211 #define PCI_DEV_CTRL_REG 0xA8
212 /* Bit mask of Extended Tag capability */
213 #define PCI_DEV_CAP_EXT_TAG_MASK 0x20
214 /* Bit shift of Extended Tag enable/disable */
215 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
216 /* Bit mask of Extended Tag enable/disable */
217 #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
218
219 static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
220 static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
221 static int i40e_dev_configure(struct rte_eth_dev *dev);
222 static int i40e_dev_start(struct rte_eth_dev *dev);
223 static void i40e_dev_stop(struct rte_eth_dev *dev);
224 static void i40e_dev_close(struct rte_eth_dev *dev);
225 static int i40e_dev_reset(struct rte_eth_dev *dev);
226 static void i40e_dev_promiscuous_enable(struct rte_eth_dev *dev);
227 static void i40e_dev_promiscuous_disable(struct rte_eth_dev *dev);
228 static void i40e_dev_allmulticast_enable(struct rte_eth_dev *dev);
229 static void i40e_dev_allmulticast_disable(struct rte_eth_dev *dev);
230 static int i40e_dev_set_link_up(struct rte_eth_dev *dev);
231 static int i40e_dev_set_link_down(struct rte_eth_dev *dev);
232 static int i40e_dev_stats_get(struct rte_eth_dev *dev,
233 struct rte_eth_stats *stats);
234 static int i40e_dev_xstats_get(struct rte_eth_dev *dev,
235 struct rte_eth_xstat *xstats, unsigned n);
236 static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev,
237 struct rte_eth_xstat_name *xstats_names,
238 unsigned limit);
239 static void i40e_dev_stats_reset(struct rte_eth_dev *dev);
240 static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
241 uint16_t queue_id,
242 uint8_t stat_idx,
243 uint8_t is_rx);
244 static int i40e_fw_version_get(struct rte_eth_dev *dev,
245 char *fw_version, size_t fw_size);
246 static void i40e_dev_info_get(struct rte_eth_dev *dev,
247 struct rte_eth_dev_info *dev_info);
248 static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
249 uint16_t vlan_id,
250 int on);
251 static int i40e_vlan_tpid_set(struct rte_eth_dev *dev,
252 enum rte_vlan_type vlan_type,
253 uint16_t tpid);
254 static int i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask);
255 static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev,
256 uint16_t queue,
257 int on);
258 static int i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on);
259 static int i40e_dev_led_on(struct rte_eth_dev *dev);
260 static int i40e_dev_led_off(struct rte_eth_dev *dev);
261 static int i40e_flow_ctrl_get(struct rte_eth_dev *dev,
262 struct rte_eth_fc_conf *fc_conf);
263 static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
264 struct rte_eth_fc_conf *fc_conf);
265 static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
266 struct rte_eth_pfc_conf *pfc_conf);
267 static int i40e_macaddr_add(struct rte_eth_dev *dev,
268 struct ether_addr *mac_addr,
269 uint32_t index,
270 uint32_t pool);
271 static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
272 static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
273 struct rte_eth_rss_reta_entry64 *reta_conf,
274 uint16_t reta_size);
275 static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
276 struct rte_eth_rss_reta_entry64 *reta_conf,
277 uint16_t reta_size);
278
279 static int i40e_get_cap(struct i40e_hw *hw);
280 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
281 static int i40e_pf_setup(struct i40e_pf *pf);
282 static int i40e_dev_rxtx_init(struct i40e_pf *pf);
283 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
284 static int i40e_dcb_setup(struct rte_eth_dev *dev);
285 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
286 bool offset_loaded, uint64_t *offset, uint64_t *stat);
287 static void i40e_stat_update_48(struct i40e_hw *hw,
288 uint32_t hireg,
289 uint32_t loreg,
290 bool offset_loaded,
291 uint64_t *offset,
292 uint64_t *stat);
293 static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
294 static void i40e_dev_interrupt_handler(void *param);
295 static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
296 uint32_t base, uint32_t num);
297 static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
298 static int i40e_res_pool_free(struct i40e_res_pool_info *pool,
299 uint32_t base);
300 static int i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
301 uint16_t num);
302 static int i40e_dev_init_vlan(struct rte_eth_dev *dev);
303 static int i40e_veb_release(struct i40e_veb *veb);
304 static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
305 struct i40e_vsi *vsi);
306 static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
307 static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
308 static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
309 struct i40e_macvlan_filter *mv_f,
310 int num,
311 uint16_t vlan);
312 static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi);
313 static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
314 struct rte_eth_rss_conf *rss_conf);
315 static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
316 struct rte_eth_rss_conf *rss_conf);
317 static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
318 struct rte_eth_udp_tunnel *udp_tunnel);
319 static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
320 struct rte_eth_udp_tunnel *udp_tunnel);
321 static void i40e_filter_input_set_init(struct i40e_pf *pf);
322 static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
323 enum rte_filter_op filter_op,
324 void *arg);
325 static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
326 enum rte_filter_type filter_type,
327 enum rte_filter_op filter_op,
328 void *arg);
329 static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
330 struct rte_eth_dcb_info *dcb_info);
331 static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
332 static void i40e_configure_registers(struct i40e_hw *hw);
333 static void i40e_hw_init(struct rte_eth_dev *dev);
334 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
335 static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
336 uint16_t seid,
337 uint16_t rule_type,
338 uint16_t *entries,
339 uint16_t count,
340 uint16_t rule_id);
341 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
342 struct rte_eth_mirror_conf *mirror_conf,
343 uint8_t sw_id, uint8_t on);
344 static int i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id);
345
346 static int i40e_timesync_enable(struct rte_eth_dev *dev);
347 static int i40e_timesync_disable(struct rte_eth_dev *dev);
348 static int i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
349 struct timespec *timestamp,
350 uint32_t flags);
351 static int i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
352 struct timespec *timestamp);
353 static void i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw);
354
355 static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
356
357 static int i40e_timesync_read_time(struct rte_eth_dev *dev,
358 struct timespec *timestamp);
359 static int i40e_timesync_write_time(struct rte_eth_dev *dev,
360 const struct timespec *timestamp);
361
362 static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
363 uint16_t queue_id);
364 static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
365 uint16_t queue_id);
366
367 static int i40e_get_regs(struct rte_eth_dev *dev,
368 struct rte_dev_reg_info *regs);
369
370 static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
371
372 static int i40e_get_eeprom(struct rte_eth_dev *dev,
373 struct rte_dev_eeprom_info *eeprom);
374
375 static int i40e_get_module_info(struct rte_eth_dev *dev,
376 struct rte_eth_dev_module_info *modinfo);
377 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
378 struct rte_dev_eeprom_info *info);
379
380 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
381 struct ether_addr *mac_addr);
382
383 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
384
385 static int i40e_ethertype_filter_convert(
386 const struct rte_eth_ethertype_filter *input,
387 struct i40e_ethertype_filter *filter);
388 static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
389 struct i40e_ethertype_filter *filter);
390
391 static int i40e_tunnel_filter_convert(
392 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
393 struct i40e_tunnel_filter *tunnel_filter);
394 static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
395 struct i40e_tunnel_filter *tunnel_filter);
396 static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
397
398 static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
399 static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
400 static void i40e_filter_restore(struct i40e_pf *pf);
401 static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
402
403 int i40e_logtype_init;
404 int i40e_logtype_driver;
405
406 static const char *const valid_keys[] = {
407 ETH_I40E_FLOATING_VEB_ARG,
408 ETH_I40E_FLOATING_VEB_LIST_ARG,
409 ETH_I40E_SUPPORT_MULTI_DRIVER,
410 ETH_I40E_QUEUE_NUM_PER_VF_ARG,
411 NULL};
412
413 static const struct rte_pci_id pci_id_i40e_map[] = {
414 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
415 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
416 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) },
417 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) },
418 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) },
419 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) },
420 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) },
421 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) },
422 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) },
423 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) },
424 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) },
425 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) },
426 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) },
427 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) },
428 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) },
429 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) },
430 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) },
431 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
432 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
433 { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
434 { .vendor_id = 0, /* sentinel */ },
435 };
436
437 static const struct eth_dev_ops i40e_eth_dev_ops = {
438 .dev_configure = i40e_dev_configure,
439 .dev_start = i40e_dev_start,
440 .dev_stop = i40e_dev_stop,
441 .dev_close = i40e_dev_close,
442 .dev_reset = i40e_dev_reset,
443 .promiscuous_enable = i40e_dev_promiscuous_enable,
444 .promiscuous_disable = i40e_dev_promiscuous_disable,
445 .allmulticast_enable = i40e_dev_allmulticast_enable,
446 .allmulticast_disable = i40e_dev_allmulticast_disable,
447 .dev_set_link_up = i40e_dev_set_link_up,
448 .dev_set_link_down = i40e_dev_set_link_down,
449 .link_update = i40e_dev_link_update,
450 .stats_get = i40e_dev_stats_get,
451 .xstats_get = i40e_dev_xstats_get,
452 .xstats_get_names = i40e_dev_xstats_get_names,
453 .stats_reset = i40e_dev_stats_reset,
454 .xstats_reset = i40e_dev_stats_reset,
455 .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
456 .fw_version_get = i40e_fw_version_get,
457 .dev_infos_get = i40e_dev_info_get,
458 .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
459 .vlan_filter_set = i40e_vlan_filter_set,
460 .vlan_tpid_set = i40e_vlan_tpid_set,
461 .vlan_offload_set = i40e_vlan_offload_set,
462 .vlan_strip_queue_set = i40e_vlan_strip_queue_set,
463 .vlan_pvid_set = i40e_vlan_pvid_set,
464 .rx_queue_start = i40e_dev_rx_queue_start,
465 .rx_queue_stop = i40e_dev_rx_queue_stop,
466 .tx_queue_start = i40e_dev_tx_queue_start,
467 .tx_queue_stop = i40e_dev_tx_queue_stop,
468 .rx_queue_setup = i40e_dev_rx_queue_setup,
469 .rx_queue_intr_enable = i40e_dev_rx_queue_intr_enable,
470 .rx_queue_intr_disable = i40e_dev_rx_queue_intr_disable,
471 .rx_queue_release = i40e_dev_rx_queue_release,
472 .rx_queue_count = i40e_dev_rx_queue_count,
473 .rx_descriptor_done = i40e_dev_rx_descriptor_done,
474 .rx_descriptor_status = i40e_dev_rx_descriptor_status,
475 .tx_descriptor_status = i40e_dev_tx_descriptor_status,
476 .tx_queue_setup = i40e_dev_tx_queue_setup,
477 .tx_queue_release = i40e_dev_tx_queue_release,
478 .dev_led_on = i40e_dev_led_on,
479 .dev_led_off = i40e_dev_led_off,
480 .flow_ctrl_get = i40e_flow_ctrl_get,
481 .flow_ctrl_set = i40e_flow_ctrl_set,
482 .priority_flow_ctrl_set = i40e_priority_flow_ctrl_set,
483 .mac_addr_add = i40e_macaddr_add,
484 .mac_addr_remove = i40e_macaddr_remove,
485 .reta_update = i40e_dev_rss_reta_update,
486 .reta_query = i40e_dev_rss_reta_query,
487 .rss_hash_update = i40e_dev_rss_hash_update,
488 .rss_hash_conf_get = i40e_dev_rss_hash_conf_get,
489 .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add,
490 .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del,
491 .filter_ctrl = i40e_dev_filter_ctrl,
492 .rxq_info_get = i40e_rxq_info_get,
493 .txq_info_get = i40e_txq_info_get,
494 .mirror_rule_set = i40e_mirror_rule_set,
495 .mirror_rule_reset = i40e_mirror_rule_reset,
496 .timesync_enable = i40e_timesync_enable,
497 .timesync_disable = i40e_timesync_disable,
498 .timesync_read_rx_timestamp = i40e_timesync_read_rx_timestamp,
499 .timesync_read_tx_timestamp = i40e_timesync_read_tx_timestamp,
500 .get_dcb_info = i40e_dev_get_dcb_info,
501 .timesync_adjust_time = i40e_timesync_adjust_time,
502 .timesync_read_time = i40e_timesync_read_time,
503 .timesync_write_time = i40e_timesync_write_time,
504 .get_reg = i40e_get_regs,
505 .get_eeprom_length = i40e_get_eeprom_length,
506 .get_eeprom = i40e_get_eeprom,
507 .get_module_info = i40e_get_module_info,
508 .get_module_eeprom = i40e_get_module_eeprom,
509 .mac_addr_set = i40e_set_default_mac_addr,
510 .mtu_set = i40e_dev_mtu_set,
511 .tm_ops_get = i40e_tm_ops_get,
512 };
513
514 /* store statistics names and its offset in stats structure */
515 struct rte_i40e_xstats_name_off {
516 char name[RTE_ETH_XSTATS_NAME_SIZE];
517 unsigned offset;
518 };
519
520 static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
521 {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
522 {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
523 {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
524 {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)},
525 {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
526 rx_unknown_protocol)},
527 {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
528 {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
529 {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
530 {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)},
531 };
532
533 #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
534 sizeof(rte_i40e_stats_strings[0]))
535
536 static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
537 {"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
538 tx_dropped_link_down)},
539 {"rx_crc_errors", offsetof(struct i40e_hw_port_stats, crc_errors)},
540 {"rx_illegal_byte_errors", offsetof(struct i40e_hw_port_stats,
541 illegal_bytes)},
542 {"rx_error_bytes", offsetof(struct i40e_hw_port_stats, error_bytes)},
543 {"mac_local_errors", offsetof(struct i40e_hw_port_stats,
544 mac_local_faults)},
545 {"mac_remote_errors", offsetof(struct i40e_hw_port_stats,
546 mac_remote_faults)},
547 {"rx_length_errors", offsetof(struct i40e_hw_port_stats,
548 rx_length_errors)},
549 {"tx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_tx)},
550 {"rx_xon_packets", offsetof(struct i40e_hw_port_stats, link_xon_rx)},
551 {"tx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_tx)},
552 {"rx_xoff_packets", offsetof(struct i40e_hw_port_stats, link_xoff_rx)},
553 {"rx_size_64_packets", offsetof(struct i40e_hw_port_stats, rx_size_64)},
554 {"rx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
555 rx_size_127)},
556 {"rx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
557 rx_size_255)},
558 {"rx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
559 rx_size_511)},
560 {"rx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
561 rx_size_1023)},
562 {"rx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
563 rx_size_1522)},
564 {"rx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
565 rx_size_big)},
566 {"rx_undersized_errors", offsetof(struct i40e_hw_port_stats,
567 rx_undersize)},
568 {"rx_oversize_errors", offsetof(struct i40e_hw_port_stats,
569 rx_oversize)},
570 {"rx_mac_short_dropped", offsetof(struct i40e_hw_port_stats,
571 mac_short_packet_dropped)},
572 {"rx_fragmented_errors", offsetof(struct i40e_hw_port_stats,
573 rx_fragments)},
574 {"rx_jabber_errors", offsetof(struct i40e_hw_port_stats, rx_jabber)},
575 {"tx_size_64_packets", offsetof(struct i40e_hw_port_stats, tx_size_64)},
576 {"tx_size_65_to_127_packets", offsetof(struct i40e_hw_port_stats,
577 tx_size_127)},
578 {"tx_size_128_to_255_packets", offsetof(struct i40e_hw_port_stats,
579 tx_size_255)},
580 {"tx_size_256_to_511_packets", offsetof(struct i40e_hw_port_stats,
581 tx_size_511)},
582 {"tx_size_512_to_1023_packets", offsetof(struct i40e_hw_port_stats,
583 tx_size_1023)},
584 {"tx_size_1024_to_1522_packets", offsetof(struct i40e_hw_port_stats,
585 tx_size_1522)},
586 {"tx_size_1523_to_max_packets", offsetof(struct i40e_hw_port_stats,
587 tx_size_big)},
588 {"rx_flow_director_atr_match_packets",
589 offsetof(struct i40e_hw_port_stats, fd_atr_match)},
590 {"rx_flow_director_sb_match_packets",
591 offsetof(struct i40e_hw_port_stats, fd_sb_match)},
592 {"tx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
593 tx_lpi_status)},
594 {"rx_low_power_idle_status", offsetof(struct i40e_hw_port_stats,
595 rx_lpi_status)},
596 {"tx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
597 tx_lpi_count)},
598 {"rx_low_power_idle_count", offsetof(struct i40e_hw_port_stats,
599 rx_lpi_count)},
600 };
601
602 #define I40E_NB_HW_PORT_XSTATS (sizeof(rte_i40e_hw_port_strings) / \
603 sizeof(rte_i40e_hw_port_strings[0]))
604
605 static const struct rte_i40e_xstats_name_off rte_i40e_rxq_prio_strings[] = {
606 {"xon_packets", offsetof(struct i40e_hw_port_stats,
607 priority_xon_rx)},
608 {"xoff_packets", offsetof(struct i40e_hw_port_stats,
609 priority_xoff_rx)},
610 };
611
612 #define I40E_NB_RXQ_PRIO_XSTATS (sizeof(rte_i40e_rxq_prio_strings) / \
613 sizeof(rte_i40e_rxq_prio_strings[0]))
614
615 static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
616 {"xon_packets", offsetof(struct i40e_hw_port_stats,
617 priority_xon_tx)},
618 {"xoff_packets", offsetof(struct i40e_hw_port_stats,
619 priority_xoff_tx)},
620 {"xon_to_xoff_packets", offsetof(struct i40e_hw_port_stats,
621 priority_xon_2_xoff)},
622 };
623
624 #define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
625 sizeof(rte_i40e_txq_prio_strings[0]))
626
627 static int
628 eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
629 struct rte_pci_device *pci_dev)
630 {
631 char name[RTE_ETH_NAME_MAX_LEN];
632 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
633 int i, retval;
634
635 if (pci_dev->device.devargs) {
636 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
637 &eth_da);
638 if (retval)
639 return retval;
640 }
641
642 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
643 sizeof(struct i40e_adapter),
644 eth_dev_pci_specific_init, pci_dev,
645 eth_i40e_dev_init, NULL);
646
647 if (retval || eth_da.nb_representor_ports < 1)
648 return retval;
649
650 /* probe VF representor ports */
651 struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
652 pci_dev->device.name);
653
654 if (pf_ethdev == NULL)
655 return -ENODEV;
656
657 for (i = 0; i < eth_da.nb_representor_ports; i++) {
658 struct i40e_vf_representor representor = {
659 .vf_id = eth_da.representor_ports[i],
660 .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
661 pf_ethdev->data->dev_private)->switch_domain_id,
662 .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
663 pf_ethdev->data->dev_private)
664 };
665
666 /* representor port net_bdf_port */
667 snprintf(name, sizeof(name), "net_%s_representor_%d",
668 pci_dev->device.name, eth_da.representor_ports[i]);
669
670 retval = rte_eth_dev_create(&pci_dev->device, name,
671 sizeof(struct i40e_vf_representor), NULL, NULL,
672 i40e_vf_representor_init, &representor);
673
674 if (retval)
675 PMD_DRV_LOG(ERR, "failed to create i40e vf "
676 "representor %s.", name);
677 }
678
679 return 0;
680 }
681
682 static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
683 {
684 struct rte_eth_dev *ethdev;
685
686 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
687 if (!ethdev)
688 return -ENODEV;
689
690
691 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
692 return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
693 else
694 return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
695 }
696
697 static struct rte_pci_driver rte_i40e_pmd = {
698 .id_table = pci_id_i40e_map,
699 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
700 RTE_PCI_DRV_IOVA_AS_VA,
701 .probe = eth_i40e_pci_probe,
702 .remove = eth_i40e_pci_remove,
703 };
704
705 static inline void
706 i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
707 uint32_t reg_val)
708 {
709 uint32_t ori_reg_val;
710 struct rte_eth_dev *dev;
711
712 ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
713 dev = ((struct i40e_adapter *)hw->back)->eth_dev;
714 i40e_write_rx_ctl(hw, reg_addr, reg_val);
715 if (ori_reg_val != reg_val)
716 PMD_DRV_LOG(WARNING,
717 "i40e device %s changed global register [0x%08x]."
718 " original: 0x%08x, new: 0x%08x",
719 dev->device->name, reg_addr, ori_reg_val, reg_val);
720 }
721
722 RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
723 RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
724 RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci");
725
726 #ifndef I40E_GLQF_ORT
727 #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
728 #endif
729 #ifndef I40E_GLQF_PIT
730 #define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
731 #endif
732 #ifndef I40E_GLQF_L3_MAP
733 #define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
734 #endif
735
736 static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
737 {
738 /*
739 * Initialize registers for parsing packet type of QinQ
740 * This should be removed from code once proper
741 * configuration API is added to avoid configuration conflicts
742 * between ports of the same device.
743 */
744 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
745 I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
746 }
747
748 static inline void i40e_config_automask(struct i40e_pf *pf)
749 {
750 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
751 uint32_t val;
752
753 /* INTENA flag is not auto-cleared for interrupt */
754 val = I40E_READ_REG(hw, I40E_GLINT_CTL);
755 val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK |
756 I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
757
758 /* If support multi-driver, PF will use INT0. */
759 if (!pf->support_multi_driver)
760 val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK;
761
762 I40E_WRITE_REG(hw, I40E_GLINT_CTL, val);
763 }
764
765 #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
766
767 /*
768 * Add a ethertype filter to drop all flow control frames transmitted
769 * from VSIs.
770 */
771 static void
772 i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
773 {
774 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
775 uint16_t flags = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
776 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
777 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
778 int ret;
779
780 ret = i40e_aq_add_rem_control_packet_filter(hw, NULL,
781 I40E_FLOW_CONTROL_ETHERTYPE, flags,
782 pf->main_vsi_seid, 0,
783 TRUE, NULL, NULL);
784 if (ret)
785 PMD_INIT_LOG(ERR,
786 "Failed to add filter to drop flow control frames from VSIs.");
787 }
788
789 static int
790 floating_veb_list_handler(__rte_unused const char *key,
791 const char *floating_veb_value,
792 void *opaque)
793 {
794 int idx = 0;
795 unsigned int count = 0;
796 char *end = NULL;
797 int min, max;
798 bool *vf_floating_veb = opaque;
799
800 while (isblank(*floating_veb_value))
801 floating_veb_value++;
802
803 /* Reset floating VEB configuration for VFs */
804 for (idx = 0; idx < I40E_MAX_VF; idx++)
805 vf_floating_veb[idx] = false;
806
807 min = I40E_MAX_VF;
808 do {
809 while (isblank(*floating_veb_value))
810 floating_veb_value++;
811 if (*floating_veb_value == '\0')
812 return -1;
813 errno = 0;
814 idx = strtoul(floating_veb_value, &end, 10);
815 if (errno || end == NULL)
816 return -1;
817 while (isblank(*end))
818 end++;
819 if (*end == '-') {
820 min = idx;
821 } else if ((*end == ';') || (*end == '\0')) {
822 max = idx;
823 if (min == I40E_MAX_VF)
824 min = idx;
825 if (max >= I40E_MAX_VF)
826 max = I40E_MAX_VF - 1;
827 for (idx = min; idx <= max; idx++) {
828 vf_floating_veb[idx] = true;
829 count++;
830 }
831 min = I40E_MAX_VF;
832 } else {
833 return -1;
834 }
835 floating_veb_value = end + 1;
836 } while (*end != '\0');
837
838 if (count == 0)
839 return -1;
840
841 return 0;
842 }
843
844 static void
845 config_vf_floating_veb(struct rte_devargs *devargs,
846 uint16_t floating_veb,
847 bool *vf_floating_veb)
848 {
849 struct rte_kvargs *kvlist;
850 int i;
851 const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG;
852
853 if (!floating_veb)
854 return;
855 /* All the VFs attach to the floating VEB by default
856 * when the floating VEB is enabled.
857 */
858 for (i = 0; i < I40E_MAX_VF; i++)
859 vf_floating_veb[i] = true;
860
861 if (devargs == NULL)
862 return;
863
864 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
865 if (kvlist == NULL)
866 return;
867
868 if (!rte_kvargs_count(kvlist, floating_veb_list)) {
869 rte_kvargs_free(kvlist);
870 return;
871 }
872 /* When the floating_veb_list parameter exists, all the VFs
873 * will attach to the legacy VEB firstly, then configure VFs
874 * to the floating VEB according to the floating_veb_list.
875 */
876 if (rte_kvargs_process(kvlist, floating_veb_list,
877 floating_veb_list_handler,
878 vf_floating_veb) < 0) {
879 rte_kvargs_free(kvlist);
880 return;
881 }
882 rte_kvargs_free(kvlist);
883 }
884
885 static int
886 i40e_check_floating_handler(__rte_unused const char *key,
887 const char *value,
888 __rte_unused void *opaque)
889 {
890 if (strcmp(value, "1"))
891 return -1;
892
893 return 0;
894 }
895
896 static int
897 is_floating_veb_supported(struct rte_devargs *devargs)
898 {
899 struct rte_kvargs *kvlist;
900 const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG;
901
902 if (devargs == NULL)
903 return 0;
904
905 kvlist = rte_kvargs_parse(devargs->args, valid_keys);
906 if (kvlist == NULL)
907 return 0;
908
909 if (!rte_kvargs_count(kvlist, floating_veb_key)) {
910 rte_kvargs_free(kvlist);
911 return 0;
912 }
913 /* Floating VEB is enabled when there's key-value:
914 * enable_floating_veb=1
915 */
916 if (rte_kvargs_process(kvlist, floating_veb_key,
917 i40e_check_floating_handler, NULL) < 0) {
918 rte_kvargs_free(kvlist);
919 return 0;
920 }
921 rte_kvargs_free(kvlist);
922
923 return 1;
924 }
925
926 static void
927 config_floating_veb(struct rte_eth_dev *dev)
928 {
929 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
930 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
931 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
932
933 memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
934
935 if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
936 pf->floating_veb =
937 is_floating_veb_supported(pci_dev->device.devargs);
938 config_vf_floating_veb(pci_dev->device.devargs,
939 pf->floating_veb,
940 pf->floating_veb_list);
941 } else {
942 pf->floating_veb = false;
943 }
944 }
945
946 #define I40E_L2_TAGS_S_TAG_SHIFT 1
947 #define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
948
949 static int
950 i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
951 {
952 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
953 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
954 char ethertype_hash_name[RTE_HASH_NAMESIZE];
955 int ret;
956
957 struct rte_hash_parameters ethertype_hash_params = {
958 .name = ethertype_hash_name,
959 .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
960 .key_len = sizeof(struct i40e_ethertype_filter_input),
961 .hash_func = rte_hash_crc,
962 .hash_func_init_val = 0,
963 .socket_id = rte_socket_id(),
964 };
965
966 /* Initialize ethertype filter rule list and hash */
967 TAILQ_INIT(&ethertype_rule->ethertype_list);
968 snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
969 "ethertype_%s", dev->device->name);
970 ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
971 if (!ethertype_rule->hash_table) {
972 PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
973 return -EINVAL;
974 }
975 ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
976 sizeof(struct i40e_ethertype_filter *) *
977 I40E_MAX_ETHERTYPE_FILTER_NUM,
978 0);
979 if (!ethertype_rule->hash_map) {
980 PMD_INIT_LOG(ERR,
981 "Failed to allocate memory for ethertype hash map!");
982 ret = -ENOMEM;
983 goto err_ethertype_hash_map_alloc;
984 }
985
986 return 0;
987
988 err_ethertype_hash_map_alloc:
989 rte_hash_free(ethertype_rule->hash_table);
990
991 return ret;
992 }
993
994 static int
995 i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
996 {
997 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
998 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
999 char tunnel_hash_name[RTE_HASH_NAMESIZE];
1000 int ret;
1001
1002 struct rte_hash_parameters tunnel_hash_params = {
1003 .name = tunnel_hash_name,
1004 .entries = I40E_MAX_TUNNEL_FILTER_NUM,
1005 .key_len = sizeof(struct i40e_tunnel_filter_input),
1006 .hash_func = rte_hash_crc,
1007 .hash_func_init_val = 0,
1008 .socket_id = rte_socket_id(),
1009 };
1010
1011 /* Initialize tunnel filter rule list and hash */
1012 TAILQ_INIT(&tunnel_rule->tunnel_list);
1013 snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
1014 "tunnel_%s", dev->device->name);
1015 tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
1016 if (!tunnel_rule->hash_table) {
1017 PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
1018 return -EINVAL;
1019 }
1020 tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
1021 sizeof(struct i40e_tunnel_filter *) *
1022 I40E_MAX_TUNNEL_FILTER_NUM,
1023 0);
1024 if (!tunnel_rule->hash_map) {
1025 PMD_INIT_LOG(ERR,
1026 "Failed to allocate memory for tunnel hash map!");
1027 ret = -ENOMEM;
1028 goto err_tunnel_hash_map_alloc;
1029 }
1030
1031 return 0;
1032
1033 err_tunnel_hash_map_alloc:
1034 rte_hash_free(tunnel_rule->hash_table);
1035
1036 return ret;
1037 }
1038
1039 static int
1040 i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
1041 {
1042 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1043 struct i40e_fdir_info *fdir_info = &pf->fdir;
1044 char fdir_hash_name[RTE_HASH_NAMESIZE];
1045 int ret;
1046
1047 struct rte_hash_parameters fdir_hash_params = {
1048 .name = fdir_hash_name,
1049 .entries = I40E_MAX_FDIR_FILTER_NUM,
1050 .key_len = sizeof(struct i40e_fdir_input),
1051 .hash_func = rte_hash_crc,
1052 .hash_func_init_val = 0,
1053 .socket_id = rte_socket_id(),
1054 };
1055
1056 /* Initialize flow director filter rule list and hash */
1057 TAILQ_INIT(&fdir_info->fdir_list);
1058 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
1059 "fdir_%s", dev->device->name);
1060 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
1061 if (!fdir_info->hash_table) {
1062 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
1063 return -EINVAL;
1064 }
1065 fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
1066 sizeof(struct i40e_fdir_filter *) *
1067 I40E_MAX_FDIR_FILTER_NUM,
1068 0);
1069 if (!fdir_info->hash_map) {
1070 PMD_INIT_LOG(ERR,
1071 "Failed to allocate memory for fdir hash map!");
1072 ret = -ENOMEM;
1073 goto err_fdir_hash_map_alloc;
1074 }
1075 return 0;
1076
1077 err_fdir_hash_map_alloc:
1078 rte_hash_free(fdir_info->hash_table);
1079
1080 return ret;
1081 }
1082
1083 static void
1084 i40e_init_customized_info(struct i40e_pf *pf)
1085 {
1086 int i;
1087
1088 /* Initialize customized pctype */
1089 for (i = I40E_CUSTOMIZED_GTPC; i < I40E_CUSTOMIZED_MAX; i++) {
1090 pf->customized_pctype[i].index = i;
1091 pf->customized_pctype[i].pctype = I40E_FILTER_PCTYPE_INVALID;
1092 pf->customized_pctype[i].valid = false;
1093 }
1094
1095 pf->gtp_support = false;
1096 }
1097
1098 void
1099 i40e_init_queue_region_conf(struct rte_eth_dev *dev)
1100 {
1101 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1102 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1103 struct i40e_queue_regions *info = &pf->queue_region;
1104 uint16_t i;
1105
1106 for (i = 0; i < I40E_PFQF_HREGION_MAX_INDEX; i++)
1107 i40e_write_rx_ctl(hw, I40E_PFQF_HREGION(i), 0);
1108
1109 memset(info, 0, sizeof(struct i40e_queue_regions));
1110 }
1111
1112 static int
1113 i40e_parse_multi_drv_handler(__rte_unused const char *key,
1114 const char *value,
1115 void *opaque)
1116 {
1117 struct i40e_pf *pf;
1118 unsigned long support_multi_driver;
1119 char *end;
1120
1121 pf = (struct i40e_pf *)opaque;
1122
1123 errno = 0;
1124 support_multi_driver = strtoul(value, &end, 10);
1125 if (errno != 0 || end == value || *end != 0) {
1126 PMD_DRV_LOG(WARNING, "Wrong global configuration");
1127 return -(EINVAL);
1128 }
1129
1130 if (support_multi_driver == 1 || support_multi_driver == 0)
1131 pf->support_multi_driver = (bool)support_multi_driver;
1132 else
1133 PMD_DRV_LOG(WARNING, "%s must be 1 or 0,",
1134 "enable global configuration by default."
1135 ETH_I40E_SUPPORT_MULTI_DRIVER);
1136 return 0;
1137 }
1138
1139 static int
1140 i40e_support_multi_driver(struct rte_eth_dev *dev)
1141 {
1142 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1143 struct rte_kvargs *kvlist;
1144 int kvargs_count;
1145
1146 /* Enable global configuration by default */
1147 pf->support_multi_driver = false;
1148
1149 if (!dev->device->devargs)
1150 return 0;
1151
1152 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1153 if (!kvlist)
1154 return -EINVAL;
1155
1156 kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
1157 if (!kvargs_count) {
1158 rte_kvargs_free(kvlist);
1159 return 0;
1160 }
1161
1162 if (kvargs_count > 1)
1163 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
1164 "the first invalid or last valid one is used !",
1165 ETH_I40E_SUPPORT_MULTI_DRIVER);
1166
1167 if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER,
1168 i40e_parse_multi_drv_handler, pf) < 0) {
1169 rte_kvargs_free(kvlist);
1170 return -EINVAL;
1171 }
1172
1173 rte_kvargs_free(kvlist);
1174 return 0;
1175 }
1176
1177 static int
1178 i40e_aq_debug_write_global_register(struct i40e_hw *hw,
1179 uint32_t reg_addr, uint64_t reg_val,
1180 struct i40e_asq_cmd_details *cmd_details)
1181 {
1182 uint64_t ori_reg_val;
1183 struct rte_eth_dev *dev;
1184 int ret;
1185
1186 ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
1187 if (ret != I40E_SUCCESS) {
1188 PMD_DRV_LOG(ERR,
1189 "Fail to debug read from 0x%08x",
1190 reg_addr);
1191 return -EIO;
1192 }
1193 dev = ((struct i40e_adapter *)hw->back)->eth_dev;
1194
1195 if (ori_reg_val != reg_val)
1196 PMD_DRV_LOG(WARNING,
1197 "i40e device %s changed global register [0x%08x]."
1198 " original: 0x%"PRIx64", after: 0x%"PRIx64,
1199 dev->device->name, reg_addr, ori_reg_val, reg_val);
1200
1201 return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
1202 }
1203
1204 static int
1205 eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
1206 {
1207 struct rte_pci_device *pci_dev;
1208 struct rte_intr_handle *intr_handle;
1209 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1210 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1211 struct i40e_vsi *vsi;
1212 int ret;
1213 uint32_t len;
1214 uint8_t aq_fail = 0;
1215
1216 PMD_INIT_FUNC_TRACE();
1217
1218 dev->dev_ops = &i40e_eth_dev_ops;
1219 dev->rx_pkt_burst = i40e_recv_pkts;
1220 dev->tx_pkt_burst = i40e_xmit_pkts;
1221 dev->tx_pkt_prepare = i40e_prep_pkts;
1222
1223 /* for secondary processes, we don't initialise any further as primary
1224 * has already done this work. Only check we don't need a different
1225 * RX function */
1226 if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1227 i40e_set_rx_function(dev);
1228 i40e_set_tx_function(dev);
1229 return 0;
1230 }
1231 i40e_set_default_ptype_table(dev);
1232 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1233 intr_handle = &pci_dev->intr_handle;
1234
1235 rte_eth_copy_pci_info(dev, pci_dev);
1236
1237 pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1238 pf->adapter->eth_dev = dev;
1239 pf->dev_data = dev->data;
1240
1241 hw->back = I40E_PF_TO_ADAPTER(pf);
1242 hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
1243 if (!hw->hw_addr) {
1244 PMD_INIT_LOG(ERR,
1245 "Hardware is not available, as address is NULL");
1246 return -ENODEV;
1247 }
1248
1249 hw->vendor_id = pci_dev->id.vendor_id;
1250 hw->device_id = pci_dev->id.device_id;
1251 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1252 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1253 hw->bus.device = pci_dev->addr.devid;
1254 hw->bus.func = pci_dev->addr.function;
1255 hw->adapter_stopped = 0;
1256
1257 /*
1258 * Switch Tag value should not be identical to either the First Tag
1259 * or Second Tag values. So set something other than common Ethertype
1260 * for internal switching.
1261 */
1262 hw->switch_tag = 0xffff;
1263
1264 /* Check if need to support multi-driver */
1265 i40e_support_multi_driver(dev);
1266
1267 /* Make sure all is clean before doing PF reset */
1268 i40e_clear_hw(hw);
1269
1270 /* Initialize the hardware */
1271 i40e_hw_init(dev);
1272
1273 /* Reset here to make sure all is clean for each PF */
1274 ret = i40e_pf_reset(hw);
1275 if (ret) {
1276 PMD_INIT_LOG(ERR, "Failed to reset pf: %d", ret);
1277 return ret;
1278 }
1279
1280 /* Initialize the shared code (base driver) */
1281 ret = i40e_init_shared_code(hw);
1282 if (ret) {
1283 PMD_INIT_LOG(ERR, "Failed to init shared code (base driver): %d", ret);
1284 return ret;
1285 }
1286
1287 i40e_config_automask(pf);
1288
1289 i40e_set_default_pctype_table(dev);
1290
1291 /*
1292 * To work around the NVM issue, initialize registers
1293 * for packet type of QinQ by software.
1294 * It should be removed once issues are fixed in NVM.
1295 */
1296 if (!pf->support_multi_driver)
1297 i40e_GLQF_reg_init(hw);
1298
1299 /* Initialize the input set for filters (hash and fd) to default value */
1300 i40e_filter_input_set_init(pf);
1301
1302 /* Initialize the parameters for adminq */
1303 i40e_init_adminq_parameter(hw);
1304 ret = i40e_init_adminq(hw);
1305 if (ret != I40E_SUCCESS) {
1306 PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
1307 return -EIO;
1308 }
1309 PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
1310 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
1311 hw->aq.api_maj_ver, hw->aq.api_min_ver,
1312 ((hw->nvm.version >> 12) & 0xf),
1313 ((hw->nvm.version >> 4) & 0xff),
1314 (hw->nvm.version & 0xf), hw->nvm.eetrack);
1315
1316 /* initialise the L3_MAP register */
1317 if (!pf->support_multi_driver) {
1318 ret = i40e_aq_debug_write_global_register(hw,
1319 I40E_GLQF_L3_MAP(40),
1320 0x00000028, NULL);
1321 if (ret)
1322 PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
1323 ret);
1324 PMD_INIT_LOG(DEBUG,
1325 "Global register 0x%08x is changed with 0x28",
1326 I40E_GLQF_L3_MAP(40));
1327 }
1328
1329 /* Need the special FW version to support floating VEB */
1330 config_floating_veb(dev);
1331 /* Clear PXE mode */
1332 i40e_clear_pxe_mode(hw);
1333 i40e_dev_sync_phy_type(hw);
1334
1335 /*
1336 * On X710, performance number is far from the expectation on recent
1337 * firmware versions. The fix for this issue may not be integrated in
1338 * the following firmware version. So the workaround in software driver
1339 * is needed. It needs to modify the initial values of 3 internal only
1340 * registers. Note that the workaround can be removed when it is fixed
1341 * in firmware in the future.
1342 */
1343 i40e_configure_registers(hw);
1344
1345 /* Get hw capabilities */
1346 ret = i40e_get_cap(hw);
1347 if (ret != I40E_SUCCESS) {
1348 PMD_INIT_LOG(ERR, "Failed to get capabilities: %d", ret);
1349 goto err_get_capabilities;
1350 }
1351
1352 /* Initialize parameters for PF */
1353 ret = i40e_pf_parameter_init(dev);
1354 if (ret != 0) {
1355 PMD_INIT_LOG(ERR, "Failed to do parameter init: %d", ret);
1356 goto err_parameter_init;
1357 }
1358
1359 /* Initialize the queue management */
1360 ret = i40e_res_pool_init(&pf->qp_pool, 0, hw->func_caps.num_tx_qp);
1361 if (ret < 0) {
1362 PMD_INIT_LOG(ERR, "Failed to init queue pool");
1363 goto err_qp_pool_init;
1364 }
1365 ret = i40e_res_pool_init(&pf->msix_pool, 1,
1366 hw->func_caps.num_msix_vectors - 1);
1367 if (ret < 0) {
1368 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1369 goto err_msix_pool_init;
1370 }
1371
1372 /* Initialize lan hmc */
1373 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1374 hw->func_caps.num_rx_qp, 0, 0);
1375 if (ret != I40E_SUCCESS) {
1376 PMD_INIT_LOG(ERR, "Failed to init lan hmc: %d", ret);
1377 goto err_init_lan_hmc;
1378 }
1379
1380 /* Configure lan hmc */
1381 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1382 if (ret != I40E_SUCCESS) {
1383 PMD_INIT_LOG(ERR, "Failed to configure lan hmc: %d", ret);
1384 goto err_configure_lan_hmc;
1385 }
1386
1387 /* Get and check the mac address */
1388 i40e_get_mac_addr(hw, hw->mac.addr);
1389 if (i40e_validate_mac_addr(hw->mac.addr) != I40E_SUCCESS) {
1390 PMD_INIT_LOG(ERR, "mac address is not valid");
1391 ret = -EIO;
1392 goto err_get_mac_addr;
1393 }
1394 /* Copy the permanent MAC address */
1395 ether_addr_copy((struct ether_addr *) hw->mac.addr,
1396 (struct ether_addr *) hw->mac.perm_addr);
1397
1398 /* Disable flow control */
1399 hw->fc.requested_mode = I40E_FC_NONE;
1400 i40e_set_fc(hw, &aq_fail, TRUE);
1401
1402 /* Set the global registers with default ether type value */
1403 if (!pf->support_multi_driver) {
1404 ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1405 ETHER_TYPE_VLAN);
1406 if (ret != I40E_SUCCESS) {
1407 PMD_INIT_LOG(ERR,
1408 "Failed to set the default outer "
1409 "VLAN ether type");
1410 goto err_setup_pf_switch;
1411 }
1412 }
1413
1414 /* PF setup, which includes VSI setup */
1415 ret = i40e_pf_setup(pf);
1416 if (ret) {
1417 PMD_INIT_LOG(ERR, "Failed to setup pf switch: %d", ret);
1418 goto err_setup_pf_switch;
1419 }
1420
1421 /* reset all stats of the device, including pf and main vsi */
1422 i40e_dev_stats_reset(dev);
1423
1424 vsi = pf->main_vsi;
1425
1426 /* Disable double vlan by default */
1427 i40e_vsi_config_double_vlan(vsi, FALSE);
1428
1429 /* Disable S-TAG identification when floating_veb is disabled */
1430 if (!pf->floating_veb) {
1431 ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
1432 if (ret & I40E_L2_TAGS_S_TAG_MASK) {
1433 ret &= ~I40E_L2_TAGS_S_TAG_MASK;
1434 I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
1435 }
1436 }
1437
1438 if (!vsi->max_macaddrs)
1439 len = ETHER_ADDR_LEN;
1440 else
1441 len = ETHER_ADDR_LEN * vsi->max_macaddrs;
1442
1443 /* Should be after VSI initialized */
1444 dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
1445 if (!dev->data->mac_addrs) {
1446 PMD_INIT_LOG(ERR,
1447 "Failed to allocated memory for storing mac address");
1448 goto err_mac_alloc;
1449 }
1450 ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
1451 &dev->data->mac_addrs[0]);
1452
1453 /* Init dcb to sw mode by default */
1454 ret = i40e_dcb_init_configure(dev, TRUE);
1455 if (ret != I40E_SUCCESS) {
1456 PMD_INIT_LOG(INFO, "Failed to init dcb.");
1457 pf->flags &= ~I40E_FLAG_DCB;
1458 }
1459 /* Update HW struct after DCB configuration */
1460 i40e_get_cap(hw);
1461
1462 /* initialize pf host driver to setup SRIOV resource if applicable */
1463 i40e_pf_host_init(dev);
1464
1465 /* register callback func to eal lib */
1466 rte_intr_callback_register(intr_handle,
1467 i40e_dev_interrupt_handler, dev);
1468
1469 /* configure and enable device interrupt */
1470 i40e_pf_config_irq0(hw, TRUE);
1471 i40e_pf_enable_irq0(hw);
1472
1473 /* enable uio intr after callback register */
1474 rte_intr_enable(intr_handle);
1475
1476 /* By default disable flexible payload in global configuration */
1477 if (!pf->support_multi_driver)
1478 i40e_flex_payload_reg_set_default(hw);
1479
1480 /*
1481 * Add an ethertype filter to drop all flow control frames transmitted
1482 * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
1483 * frames to wire.
1484 */
1485 i40e_add_tx_flow_control_drop_filter(pf);
1486
1487 /* Set the max frame size to 0x2600 by default,
1488 * in case other drivers changed the default value.
1489 */
1490 i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL);
1491
1492 /* initialize mirror rule list */
1493 TAILQ_INIT(&pf->mirror_list);
1494
1495 /* initialize Traffic Manager configuration */
1496 i40e_tm_conf_init(dev);
1497
1498 /* Initialize customized information */
1499 i40e_init_customized_info(pf);
1500
1501 ret = i40e_init_ethtype_filter_list(dev);
1502 if (ret < 0)
1503 goto err_init_ethtype_filter_list;
1504 ret = i40e_init_tunnel_filter_list(dev);
1505 if (ret < 0)
1506 goto err_init_tunnel_filter_list;
1507 ret = i40e_init_fdir_filter_list(dev);
1508 if (ret < 0)
1509 goto err_init_fdir_filter_list;
1510
1511 /* initialize queue region configuration */
1512 i40e_init_queue_region_conf(dev);
1513
1514 /* initialize rss configuration from rte_flow */
1515 memset(&pf->rss_info, 0,
1516 sizeof(struct i40e_rte_flow_rss_conf));
1517
1518 return 0;
1519
1520 err_init_fdir_filter_list:
1521 rte_free(pf->tunnel.hash_table);
1522 rte_free(pf->tunnel.hash_map);
1523 err_init_tunnel_filter_list:
1524 rte_free(pf->ethertype.hash_table);
1525 rte_free(pf->ethertype.hash_map);
1526 err_init_ethtype_filter_list:
1527 rte_free(dev->data->mac_addrs);
1528 err_mac_alloc:
1529 i40e_vsi_release(pf->main_vsi);
1530 err_setup_pf_switch:
1531 err_get_mac_addr:
1532 err_configure_lan_hmc:
1533 (void)i40e_shutdown_lan_hmc(hw);
1534 err_init_lan_hmc:
1535 i40e_res_pool_destroy(&pf->msix_pool);
1536 err_msix_pool_init:
1537 i40e_res_pool_destroy(&pf->qp_pool);
1538 err_qp_pool_init:
1539 err_parameter_init:
1540 err_get_capabilities:
1541 (void)i40e_shutdown_adminq(hw);
1542
1543 return ret;
1544 }
1545
1546 static void
1547 i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
1548 {
1549 struct i40e_ethertype_filter *p_ethertype;
1550 struct i40e_ethertype_rule *ethertype_rule;
1551
1552 ethertype_rule = &pf->ethertype;
1553 /* Remove all ethertype filter rules and hash */
1554 if (ethertype_rule->hash_map)
1555 rte_free(ethertype_rule->hash_map);
1556 if (ethertype_rule->hash_table)
1557 rte_hash_free(ethertype_rule->hash_table);
1558
1559 while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
1560 TAILQ_REMOVE(&ethertype_rule->ethertype_list,
1561 p_ethertype, rules);
1562 rte_free(p_ethertype);
1563 }
1564 }
1565
1566 static void
1567 i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
1568 {
1569 struct i40e_tunnel_filter *p_tunnel;
1570 struct i40e_tunnel_rule *tunnel_rule;
1571
1572 tunnel_rule = &pf->tunnel;
1573 /* Remove all tunnel director rules and hash */
1574 if (tunnel_rule->hash_map)
1575 rte_free(tunnel_rule->hash_map);
1576 if (tunnel_rule->hash_table)
1577 rte_hash_free(tunnel_rule->hash_table);
1578
1579 while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
1580 TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
1581 rte_free(p_tunnel);
1582 }
1583 }
1584
1585 static void
1586 i40e_rm_fdir_filter_list(struct i40e_pf *pf)
1587 {
1588 struct i40e_fdir_filter *p_fdir;
1589 struct i40e_fdir_info *fdir_info;
1590
1591 fdir_info = &pf->fdir;
1592 /* Remove all flow director rules and hash */
1593 if (fdir_info->hash_map)
1594 rte_free(fdir_info->hash_map);
1595 if (fdir_info->hash_table)
1596 rte_hash_free(fdir_info->hash_table);
1597
1598 while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
1599 TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
1600 rte_free(p_fdir);
1601 }
1602 }
1603
1604 void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
1605 {
1606 /*
1607 * Disable by default flexible payload
1608 * for corresponding L2/L3/L4 layers.
1609 */
1610 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
1611 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
1612 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
1613 }
1614
1615 static int
1616 eth_i40e_dev_uninit(struct rte_eth_dev *dev)
1617 {
1618 struct i40e_pf *pf;
1619 struct rte_pci_device *pci_dev;
1620 struct rte_intr_handle *intr_handle;
1621 struct i40e_hw *hw;
1622 struct i40e_filter_control_settings settings;
1623 struct rte_flow *p_flow;
1624 int ret;
1625 uint8_t aq_fail = 0;
1626 int retries = 0;
1627
1628 PMD_INIT_FUNC_TRACE();
1629
1630 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1631 return 0;
1632
1633 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1634 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1635 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1636 intr_handle = &pci_dev->intr_handle;
1637
1638 ret = rte_eth_switch_domain_free(pf->switch_domain_id);
1639 if (ret)
1640 PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
1641
1642 if (hw->adapter_stopped == 0)
1643 i40e_dev_close(dev);
1644
1645 dev->dev_ops = NULL;
1646 dev->rx_pkt_burst = NULL;
1647 dev->tx_pkt_burst = NULL;
1648
1649 /* Clear PXE mode */
1650 i40e_clear_pxe_mode(hw);
1651
1652 /* Unconfigure filter control */
1653 memset(&settings, 0, sizeof(settings));
1654 ret = i40e_set_filter_control(hw, &settings);
1655 if (ret)
1656 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
1657 ret);
1658
1659 /* Disable flow control */
1660 hw->fc.requested_mode = I40E_FC_NONE;
1661 i40e_set_fc(hw, &aq_fail, TRUE);
1662
1663 /* uninitialize pf host driver */
1664 i40e_pf_host_uninit(dev);
1665
1666 rte_free(dev->data->mac_addrs);
1667 dev->data->mac_addrs = NULL;
1668
1669 /* disable uio intr before callback unregister */
1670 rte_intr_disable(intr_handle);
1671
1672 /* unregister callback func to eal lib */
1673 do {
1674 ret = rte_intr_callback_unregister(intr_handle,
1675 i40e_dev_interrupt_handler, dev);
1676 if (ret >= 0) {
1677 break;
1678 } else if (ret != -EAGAIN) {
1679 PMD_INIT_LOG(ERR,
1680 "intr callback unregister failed: %d",
1681 ret);
1682 return ret;
1683 }
1684 i40e_msec_delay(500);
1685 } while (retries++ < 5);
1686
1687 i40e_rm_ethtype_filter_list(pf);
1688 i40e_rm_tunnel_filter_list(pf);
1689 i40e_rm_fdir_filter_list(pf);
1690
1691 /* Remove all flows */
1692 while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
1693 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
1694 rte_free(p_flow);
1695 }
1696
1697 /* Remove all Traffic Manager configuration */
1698 i40e_tm_conf_uninit(dev);
1699
1700 return 0;
1701 }
1702
1703 static int
1704 i40e_dev_configure(struct rte_eth_dev *dev)
1705 {
1706 struct i40e_adapter *ad =
1707 I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1708 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1709 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1710 enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
1711 int i, ret;
1712
1713 ret = i40e_dev_sync_phy_type(hw);
1714 if (ret)
1715 return ret;
1716
1717 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1718 * bulk allocation or vector Rx preconditions we will reset it.
1719 */
1720 ad->rx_bulk_alloc_allowed = true;
1721 ad->rx_vec_allowed = true;
1722 ad->tx_simple_allowed = true;
1723 ad->tx_vec_allowed = true;
1724
1725 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
1726 ret = i40e_fdir_setup(pf);
1727 if (ret != I40E_SUCCESS) {
1728 PMD_DRV_LOG(ERR, "Failed to setup flow director.");
1729 return -ENOTSUP;
1730 }
1731 ret = i40e_fdir_configure(dev);
1732 if (ret < 0) {
1733 PMD_DRV_LOG(ERR, "failed to configure fdir.");
1734 goto err;
1735 }
1736 } else
1737 i40e_fdir_teardown(pf);
1738
1739 ret = i40e_dev_init_vlan(dev);
1740 if (ret < 0)
1741 goto err;
1742
1743 /* VMDQ setup.
1744 * Needs to move VMDQ setting out of i40e_pf_config_mq_rx() as VMDQ and
1745 * RSS setting have different requirements.
1746 * General PMD driver call sequence are NIC init, configure,
1747 * rx/tx_queue_setup and dev_start. In rx/tx_queue_setup() function, it
1748 * will try to lookup the VSI that specific queue belongs to if VMDQ
1749 * applicable. So, VMDQ setting has to be done before
1750 * rx/tx_queue_setup(). This function is good to place vmdq_setup.
1751 * For RSS setting, it will try to calculate actual configured RX queue
1752 * number, which will be available after rx_queue_setup(). dev_start()
1753 * function is good to place RSS setup.
1754 */
1755 if (mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
1756 ret = i40e_vmdq_setup(dev);
1757 if (ret)
1758 goto err;
1759 }
1760
1761 if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
1762 ret = i40e_dcb_setup(dev);
1763 if (ret) {
1764 PMD_DRV_LOG(ERR, "failed to configure DCB.");
1765 goto err_dcb;
1766 }
1767 }
1768
1769 TAILQ_INIT(&pf->flow_list);
1770
1771 return 0;
1772
1773 err_dcb:
1774 /* need to release vmdq resource if exists */
1775 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
1776 i40e_vsi_release(pf->vmdq[i].vsi);
1777 pf->vmdq[i].vsi = NULL;
1778 }
1779 rte_free(pf->vmdq);
1780 pf->vmdq = NULL;
1781 err:
1782 /* need to release fdir resource if exists */
1783 i40e_fdir_teardown(pf);
1784 return ret;
1785 }
1786
1787 void
1788 i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
1789 {
1790 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1791 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1792 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1793 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1794 uint16_t msix_vect = vsi->msix_intr;
1795 uint16_t i;
1796
1797 for (i = 0; i < vsi->nb_qps; i++) {
1798 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1799 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1800 rte_wmb();
1801 }
1802
1803 if (vsi->type != I40E_VSI_SRIOV) {
1804 if (!rte_intr_allow_others(intr_handle)) {
1805 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1806 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
1807 I40E_WRITE_REG(hw,
1808 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1809 0);
1810 } else {
1811 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1812 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
1813 I40E_WRITE_REG(hw,
1814 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1815 msix_vect - 1), 0);
1816 }
1817 } else {
1818 uint32_t reg;
1819 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1820 vsi->user_param + (msix_vect - 1);
1821
1822 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1823 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1824 }
1825 I40E_WRITE_FLUSH(hw);
1826 }
1827
1828 static void
1829 __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
1830 int base_queue, int nb_queue,
1831 uint16_t itr_idx)
1832 {
1833 int i;
1834 uint32_t val;
1835 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1836 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1837
1838 /* Bind all RX queues to allocated MSIX interrupt */
1839 for (i = 0; i < nb_queue; i++) {
1840 val = (msix_vect << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1841 itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT |
1842 ((base_queue + i + 1) <<
1843 I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1844 (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1845 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1846
1847 if (i == nb_queue - 1)
1848 val |= I40E_QINT_RQCTL_NEXTQ_INDX_MASK;
1849 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(base_queue + i), val);
1850 }
1851
1852 /* Write first RX queue to Link list register as the head element */
1853 if (vsi->type != I40E_VSI_SRIOV) {
1854 uint16_t interval =
1855 i40e_calc_itr_interval(1, pf->support_multi_driver);
1856
1857 if (msix_vect == I40E_MISC_VEC_ID) {
1858 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
1859 (base_queue <<
1860 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1861 (0x0 <<
1862 I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1863 I40E_WRITE_REG(hw,
1864 I40E_PFINT_ITR0(I40E_ITR_INDEX_DEFAULT),
1865 interval);
1866 } else {
1867 I40E_WRITE_REG(hw, I40E_PFINT_LNKLSTN(msix_vect - 1),
1868 (base_queue <<
1869 I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1870 (0x0 <<
1871 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1872 I40E_WRITE_REG(hw,
1873 I40E_PFINT_ITRN(I40E_ITR_INDEX_DEFAULT,
1874 msix_vect - 1),
1875 interval);
1876 }
1877 } else {
1878 uint32_t reg;
1879
1880 if (msix_vect == I40E_MISC_VEC_ID) {
1881 I40E_WRITE_REG(hw,
1882 I40E_VPINT_LNKLST0(vsi->user_param),
1883 (base_queue <<
1884 I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1885 (0x0 <<
1886 I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
1887 } else {
1888 /* num_msix_vectors_vf needs to minus irq0 */
1889 reg = (hw->func_caps.num_msix_vectors_vf - 1) *
1890 vsi->user_param + (msix_vect - 1);
1891
1892 I40E_WRITE_REG(hw, I40E_VPINT_LNKLSTN(reg),
1893 (base_queue <<
1894 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
1895 (0x0 <<
1896 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1897 }
1898 }
1899
1900 I40E_WRITE_FLUSH(hw);
1901 }
1902
1903 void
1904 i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx)
1905 {
1906 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1907 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1908 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1909 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1910 uint16_t msix_vect = vsi->msix_intr;
1911 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1912 uint16_t queue_idx = 0;
1913 int record = 0;
1914 int i;
1915
1916 for (i = 0; i < vsi->nb_qps; i++) {
1917 I40E_WRITE_REG(hw, I40E_QINT_TQCTL(vsi->base_queue + i), 0);
1918 I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0);
1919 }
1920
1921 /* VF bind interrupt */
1922 if (vsi->type == I40E_VSI_SRIOV) {
1923 __vsi_queues_bind_intr(vsi, msix_vect,
1924 vsi->base_queue, vsi->nb_qps,
1925 itr_idx);
1926 return;
1927 }
1928
1929 /* PF & VMDq bind interrupt */
1930 if (rte_intr_dp_is_en(intr_handle)) {
1931 if (vsi->type == I40E_VSI_MAIN) {
1932 queue_idx = 0;
1933 record = 1;
1934 } else if (vsi->type == I40E_VSI_VMDQ2) {
1935 struct i40e_vsi *main_vsi =
1936 I40E_DEV_PRIVATE_TO_MAIN_VSI(vsi->adapter);
1937 queue_idx = vsi->base_queue - main_vsi->nb_qps;
1938 record = 1;
1939 }
1940 }
1941
1942 for (i = 0; i < vsi->nb_used_qps; i++) {
1943 if (nb_msix <= 1) {
1944 if (!rte_intr_allow_others(intr_handle))
1945 /* allow to share MISC_VEC_ID */
1946 msix_vect = I40E_MISC_VEC_ID;
1947
1948 /* no enough msix_vect, map all to one */
1949 __vsi_queues_bind_intr(vsi, msix_vect,
1950 vsi->base_queue + i,
1951 vsi->nb_used_qps - i,
1952 itr_idx);
1953 for (; !!record && i < vsi->nb_used_qps; i++)
1954 intr_handle->intr_vec[queue_idx + i] =
1955 msix_vect;
1956 break;
1957 }
1958 /* 1:1 queue/msix_vect mapping */
1959 __vsi_queues_bind_intr(vsi, msix_vect,
1960 vsi->base_queue + i, 1,
1961 itr_idx);
1962 if (!!record)
1963 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1964
1965 msix_vect++;
1966 nb_msix--;
1967 }
1968 }
1969
1970 static void
1971 i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
1972 {
1973 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1974 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1975 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1976 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
1977 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
1978 uint16_t msix_intr, i;
1979
1980 if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
1981 for (i = 0; i < vsi->nb_msix; i++) {
1982 msix_intr = vsi->msix_intr + i;
1983 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
1984 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1985 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1986 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
1987 }
1988 else
1989 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
1990 I40E_PFINT_DYN_CTL0_INTENA_MASK |
1991 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1992 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
1993
1994 I40E_WRITE_FLUSH(hw);
1995 }
1996
1997 static void
1998 i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
1999 {
2000 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
2001 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2002 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2003 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2004 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
2005 uint16_t msix_intr, i;
2006
2007 if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver)
2008 for (i = 0; i < vsi->nb_msix; i++) {
2009 msix_intr = vsi->msix_intr + i;
2010 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1),
2011 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
2012 }
2013 else
2014 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
2015 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
2016
2017 I40E_WRITE_FLUSH(hw);
2018 }
2019
2020 static inline uint8_t
2021 i40e_parse_link_speeds(uint16_t link_speeds)
2022 {
2023 uint8_t link_speed = I40E_LINK_SPEED_UNKNOWN;
2024
2025 if (link_speeds & ETH_LINK_SPEED_40G)
2026 link_speed |= I40E_LINK_SPEED_40GB;
2027 if (link_speeds & ETH_LINK_SPEED_25G)
2028 link_speed |= I40E_LINK_SPEED_25GB;
2029 if (link_speeds & ETH_LINK_SPEED_20G)
2030 link_speed |= I40E_LINK_SPEED_20GB;
2031 if (link_speeds & ETH_LINK_SPEED_10G)
2032 link_speed |= I40E_LINK_SPEED_10GB;
2033 if (link_speeds & ETH_LINK_SPEED_1G)
2034 link_speed |= I40E_LINK_SPEED_1GB;
2035 if (link_speeds & ETH_LINK_SPEED_100M)
2036 link_speed |= I40E_LINK_SPEED_100MB;
2037
2038 return link_speed;
2039 }
2040
2041 static int
2042 i40e_phy_conf_link(struct i40e_hw *hw,
2043 uint8_t abilities,
2044 uint8_t force_speed,
2045 bool is_up)
2046 {
2047 enum i40e_status_code status;
2048 struct i40e_aq_get_phy_abilities_resp phy_ab;
2049 struct i40e_aq_set_phy_config phy_conf;
2050 enum i40e_aq_phy_type cnt;
2051 uint8_t avail_speed;
2052 uint32_t phy_type_mask = 0;
2053
2054 const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
2055 I40E_AQ_PHY_FLAG_PAUSE_RX |
2056 I40E_AQ_PHY_FLAG_PAUSE_RX |
2057 I40E_AQ_PHY_FLAG_LOW_POWER;
2058 int ret = -ENOTSUP;
2059
2060 /* To get phy capabilities of available speeds. */
2061 status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
2062 NULL);
2063 if (status) {
2064 PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
2065 status);
2066 return ret;
2067 }
2068 avail_speed = phy_ab.link_speed;
2069
2070 /* To get the current phy config. */
2071 status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
2072 NULL);
2073 if (status) {
2074 PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
2075 status);
2076 return ret;
2077 }
2078
2079 /* If link needs to go up and it is in autoneg mode the speed is OK,
2080 * no need to set up again.
2081 */
2082 if (is_up && phy_ab.phy_type != 0 &&
2083 abilities & I40E_AQ_PHY_AN_ENABLED &&
2084 phy_ab.link_speed != 0)
2085 return I40E_SUCCESS;
2086
2087 memset(&phy_conf, 0, sizeof(phy_conf));
2088
2089 /* bits 0-2 use the values from get_phy_abilities_resp */
2090 abilities &= ~mask;
2091 abilities |= phy_ab.abilities & mask;
2092
2093 phy_conf.abilities = abilities;
2094
2095 /* If link needs to go up, but the force speed is not supported,
2096 * Warn users and config the default available speeds.
2097 */
2098 if (is_up && !(force_speed & avail_speed)) {
2099 PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
2100 phy_conf.link_speed = avail_speed;
2101 } else {
2102 phy_conf.link_speed = is_up ? force_speed : avail_speed;
2103 }
2104
2105 /* PHY type mask needs to include each type except PHY type extension */
2106 for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
2107 phy_type_mask |= 1 << cnt;
2108
2109 /* use get_phy_abilities_resp value for the rest */
2110 phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
2111 phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
2112 I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
2113 I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
2114 phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
2115 phy_conf.eee_capability = phy_ab.eee_capability;
2116 phy_conf.eeer = phy_ab.eeer_val;
2117 phy_conf.low_power_ctrl = phy_ab.d3_lpan;
2118
2119 PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x",
2120 phy_ab.abilities, phy_ab.link_speed);
2121 PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x",
2122 phy_conf.abilities, phy_conf.link_speed);
2123
2124 status = i40e_aq_set_phy_config(hw, &phy_conf, NULL);
2125 if (status)
2126 return ret;
2127
2128 return I40E_SUCCESS;
2129 }
2130
2131 static int
2132 i40e_apply_link_speed(struct rte_eth_dev *dev)
2133 {
2134 uint8_t speed;
2135 uint8_t abilities = 0;
2136 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2137 struct rte_eth_conf *conf = &dev->data->dev_conf;
2138
2139 if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
2140 conf->link_speeds = ETH_LINK_SPEED_40G |
2141 ETH_LINK_SPEED_25G |
2142 ETH_LINK_SPEED_20G |
2143 ETH_LINK_SPEED_10G |
2144 ETH_LINK_SPEED_1G |
2145 ETH_LINK_SPEED_100M;
2146 }
2147 speed = i40e_parse_link_speeds(conf->link_speeds);
2148 abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
2149 I40E_AQ_PHY_AN_ENABLED |
2150 I40E_AQ_PHY_LINK_ENABLED;
2151
2152 return i40e_phy_conf_link(hw, abilities, speed, true);
2153 }
2154
2155 static int
2156 i40e_dev_start(struct rte_eth_dev *dev)
2157 {
2158 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2159 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2160 struct i40e_vsi *main_vsi = pf->main_vsi;
2161 int ret, i;
2162 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2163 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2164 uint32_t intr_vector = 0;
2165 struct i40e_vsi *vsi;
2166
2167 hw->adapter_stopped = 0;
2168
2169 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
2170 PMD_INIT_LOG(ERR,
2171 "Invalid link_speeds for port %u, autonegotiation disabled",
2172 dev->data->port_id);
2173 return -EINVAL;
2174 }
2175
2176 rte_intr_disable(intr_handle);
2177
2178 if ((rte_intr_cap_multiple(intr_handle) ||
2179 !RTE_ETH_DEV_SRIOV(dev).active) &&
2180 dev->data->dev_conf.intr_conf.rxq != 0) {
2181 intr_vector = dev->data->nb_rx_queues;
2182 ret = rte_intr_efd_enable(intr_handle, intr_vector);
2183 if (ret)
2184 return ret;
2185 }
2186
2187 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2188 intr_handle->intr_vec =
2189 rte_zmalloc("intr_vec",
2190 dev->data->nb_rx_queues * sizeof(int),
2191 0);
2192 if (!intr_handle->intr_vec) {
2193 PMD_INIT_LOG(ERR,
2194 "Failed to allocate %d rx_queues intr_vec",
2195 dev->data->nb_rx_queues);
2196 return -ENOMEM;
2197 }
2198 }
2199
2200 /* Initialize VSI */
2201 ret = i40e_dev_rxtx_init(pf);
2202 if (ret != I40E_SUCCESS) {
2203 PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
2204 goto err_up;
2205 }
2206
2207 /* Map queues with MSIX interrupt */
2208 main_vsi->nb_used_qps = dev->data->nb_rx_queues -
2209 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2210 i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT);
2211 i40e_vsi_enable_queues_intr(main_vsi);
2212
2213 /* Map VMDQ VSI queues with MSIX interrupt */
2214 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2215 pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
2216 i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi,
2217 I40E_ITR_INDEX_DEFAULT);
2218 i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
2219 }
2220
2221 /* enable FDIR MSIX interrupt */
2222 if (pf->fdir.fdir_vsi) {
2223 i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi,
2224 I40E_ITR_INDEX_NONE);
2225 i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi);
2226 }
2227
2228 /* Enable all queues which have been configured */
2229 ret = i40e_dev_switch_queues(pf, TRUE);
2230 if (ret != I40E_SUCCESS) {
2231 PMD_DRV_LOG(ERR, "Failed to enable VSI");
2232 goto err_up;
2233 }
2234
2235 /* Enable receiving broadcast packets */
2236 ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
2237 if (ret != I40E_SUCCESS)
2238 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2239
2240 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2241 ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
2242 true, NULL);
2243 if (ret != I40E_SUCCESS)
2244 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
2245 }
2246
2247 /* Enable the VLAN promiscuous mode. */
2248 if (pf->vfs) {
2249 for (i = 0; i < pf->vf_num; i++) {
2250 vsi = pf->vfs[i].vsi;
2251 i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
2252 true, NULL);
2253 }
2254 }
2255
2256 /* Enable mac loopback mode */
2257 if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE ||
2258 dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) {
2259 ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL);
2260 if (ret != I40E_SUCCESS) {
2261 PMD_DRV_LOG(ERR, "fail to set loopback link");
2262 goto err_up;
2263 }
2264 }
2265
2266 /* Apply link configure */
2267 ret = i40e_apply_link_speed(dev);
2268 if (I40E_SUCCESS != ret) {
2269 PMD_DRV_LOG(ERR, "Fail to apply link setting");
2270 goto err_up;
2271 }
2272
2273 if (!rte_intr_allow_others(intr_handle)) {
2274 rte_intr_callback_unregister(intr_handle,
2275 i40e_dev_interrupt_handler,
2276 (void *)dev);
2277 /* configure and enable device interrupt */
2278 i40e_pf_config_irq0(hw, FALSE);
2279 i40e_pf_enable_irq0(hw);
2280
2281 if (dev->data->dev_conf.intr_conf.lsc != 0)
2282 PMD_INIT_LOG(INFO,
2283 "lsc won't enable because of no intr multiplex");
2284 } else {
2285 ret = i40e_aq_set_phy_int_mask(hw,
2286 ~(I40E_AQ_EVENT_LINK_UPDOWN |
2287 I40E_AQ_EVENT_MODULE_QUAL_FAIL |
2288 I40E_AQ_EVENT_MEDIA_NA), NULL);
2289 if (ret != I40E_SUCCESS)
2290 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
2291
2292 /* Call get_link_info aq commond to enable/disable LSE */
2293 i40e_dev_link_update(dev, 0);
2294 }
2295
2296 /* enable uio intr after callback register */
2297 rte_intr_enable(intr_handle);
2298
2299 i40e_filter_restore(pf);
2300
2301 if (pf->tm_conf.root && !pf->tm_conf.committed)
2302 PMD_DRV_LOG(WARNING,
2303 "please call hierarchy_commit() "
2304 "before starting the port");
2305
2306 return I40E_SUCCESS;
2307
2308 err_up:
2309 i40e_dev_switch_queues(pf, FALSE);
2310 i40e_dev_clear_queues(dev);
2311
2312 return ret;
2313 }
2314
2315 static void
2316 i40e_dev_stop(struct rte_eth_dev *dev)
2317 {
2318 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2319 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2320 struct i40e_vsi *main_vsi = pf->main_vsi;
2321 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2322 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2323 int i;
2324
2325 if (hw->adapter_stopped == 1)
2326 return;
2327 /* Disable all queues */
2328 i40e_dev_switch_queues(pf, FALSE);
2329
2330 /* un-map queues with interrupt registers */
2331 i40e_vsi_disable_queues_intr(main_vsi);
2332 i40e_vsi_queues_unbind_intr(main_vsi);
2333
2334 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2335 i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
2336 i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
2337 }
2338
2339 if (pf->fdir.fdir_vsi) {
2340 i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi);
2341 i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi);
2342 }
2343 /* Clear all queues and release memory */
2344 i40e_dev_clear_queues(dev);
2345
2346 /* Set link down */
2347 i40e_dev_set_link_down(dev);
2348
2349 if (!rte_intr_allow_others(intr_handle))
2350 /* resume to the default handler */
2351 rte_intr_callback_register(intr_handle,
2352 i40e_dev_interrupt_handler,
2353 (void *)dev);
2354
2355 /* Clean datapath event and queue/vec mapping */
2356 rte_intr_efd_disable(intr_handle);
2357 if (intr_handle->intr_vec) {
2358 rte_free(intr_handle->intr_vec);
2359 intr_handle->intr_vec = NULL;
2360 }
2361
2362 /* reset hierarchy commit */
2363 pf->tm_conf.committed = false;
2364
2365 hw->adapter_stopped = 1;
2366 }
2367
2368 static void
2369 i40e_dev_close(struct rte_eth_dev *dev)
2370 {
2371 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2372 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2373 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2374 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2375 struct i40e_mirror_rule *p_mirror;
2376 uint32_t reg;
2377 int i;
2378 int ret;
2379
2380 PMD_INIT_FUNC_TRACE();
2381
2382 i40e_dev_stop(dev);
2383
2384 /* Remove all mirror rules */
2385 while ((p_mirror = TAILQ_FIRST(&pf->mirror_list))) {
2386 ret = i40e_aq_del_mirror_rule(hw,
2387 pf->main_vsi->veb->seid,
2388 p_mirror->rule_type,
2389 p_mirror->entries,
2390 p_mirror->num_entries,
2391 p_mirror->id);
2392 if (ret < 0)
2393 PMD_DRV_LOG(ERR, "failed to remove mirror rule: "
2394 "status = %d, aq_err = %d.", ret,
2395 hw->aq.asq_last_status);
2396
2397 /* remove mirror software resource anyway */
2398 TAILQ_REMOVE(&pf->mirror_list, p_mirror, rules);
2399 rte_free(p_mirror);
2400 pf->nb_mirror_rule--;
2401 }
2402
2403 i40e_dev_free_queues(dev);
2404
2405 /* Disable interrupt */
2406 i40e_pf_disable_irq0(hw);
2407 rte_intr_disable(intr_handle);
2408
2409 i40e_fdir_teardown(pf);
2410
2411 /* shutdown and destroy the HMC */
2412 i40e_shutdown_lan_hmc(hw);
2413
2414 for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
2415 i40e_vsi_release(pf->vmdq[i].vsi);
2416 pf->vmdq[i].vsi = NULL;
2417 }
2418 rte_free(pf->vmdq);
2419 pf->vmdq = NULL;
2420
2421 /* release all the existing VSIs and VEBs */
2422 i40e_vsi_release(pf->main_vsi);
2423
2424 /* shutdown the adminq */
2425 i40e_aq_queue_shutdown(hw, true);
2426 i40e_shutdown_adminq(hw);
2427
2428 i40e_res_pool_destroy(&pf->qp_pool);
2429 i40e_res_pool_destroy(&pf->msix_pool);
2430
2431 /* Disable flexible payload in global configuration */
2432 if (!pf->support_multi_driver)
2433 i40e_flex_payload_reg_set_default(hw);
2434
2435 /* force a PF reset to clean anything leftover */
2436 reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL);
2437 I40E_WRITE_REG(hw, I40E_PFGEN_CTRL,
2438 (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
2439 I40E_WRITE_FLUSH(hw);
2440 }
2441
2442 /*
2443 * Reset PF device only to re-initialize resources in PMD layer
2444 */
2445 static int
2446 i40e_dev_reset(struct rte_eth_dev *dev)
2447 {
2448 int ret;
2449
2450 /* When a DPDK PMD PF begin to reset PF port, it should notify all
2451 * its VF to make them align with it. The detailed notification
2452 * mechanism is PMD specific. As to i40e PF, it is rather complex.
2453 * To avoid unexpected behavior in VF, currently reset of PF with
2454 * SR-IOV activation is not supported. It might be supported later.
2455 */
2456 if (dev->data->sriov.active)
2457 return -ENOTSUP;
2458
2459 ret = eth_i40e_dev_uninit(dev);
2460 if (ret)
2461 return ret;
2462
2463 ret = eth_i40e_dev_init(dev, NULL);
2464
2465 return ret;
2466 }
2467
2468 static void
2469 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
2470 {
2471 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2472 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2473 struct i40e_vsi *vsi = pf->main_vsi;
2474 int status;
2475
2476 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2477 true, NULL, true);
2478 if (status != I40E_SUCCESS)
2479 PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous");
2480
2481 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2482 TRUE, NULL);
2483 if (status != I40E_SUCCESS)
2484 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2485
2486 }
2487
2488 static void
2489 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
2490 {
2491 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2492 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2493 struct i40e_vsi *vsi = pf->main_vsi;
2494 int status;
2495
2496 status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
2497 false, NULL, true);
2498 if (status != I40E_SUCCESS)
2499 PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
2500
2501 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
2502 false, NULL);
2503 if (status != I40E_SUCCESS)
2504 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2505 }
2506
2507 static void
2508 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
2509 {
2510 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2511 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2512 struct i40e_vsi *vsi = pf->main_vsi;
2513 int ret;
2514
2515 ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL);
2516 if (ret != I40E_SUCCESS)
2517 PMD_DRV_LOG(ERR, "Failed to enable multicast promiscuous");
2518 }
2519
2520 static void
2521 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
2522 {
2523 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2524 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2525 struct i40e_vsi *vsi = pf->main_vsi;
2526 int ret;
2527
2528 if (dev->data->promiscuous == 1)
2529 return; /* must remain in all_multicast mode */
2530
2531 ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
2532 vsi->seid, FALSE, NULL);
2533 if (ret != I40E_SUCCESS)
2534 PMD_DRV_LOG(ERR, "Failed to disable multicast promiscuous");
2535 }
2536
2537 /*
2538 * Set device link up.
2539 */
2540 static int
2541 i40e_dev_set_link_up(struct rte_eth_dev *dev)
2542 {
2543 /* re-apply link speed setting */
2544 return i40e_apply_link_speed(dev);
2545 }
2546
2547 /*
2548 * Set device link down.
2549 */
2550 static int
2551 i40e_dev_set_link_down(struct rte_eth_dev *dev)
2552 {
2553 uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
2554 uint8_t abilities = 0;
2555 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2556
2557 abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2558 return i40e_phy_conf_link(hw, abilities, speed, false);
2559 }
2560
2561 static __rte_always_inline void
2562 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
2563 {
2564 /* Link status registers and values*/
2565 #define I40E_PRTMAC_LINKSTA 0x001E2420
2566 #define I40E_REG_LINK_UP 0x40000080
2567 #define I40E_PRTMAC_MACC 0x001E24E0
2568 #define I40E_REG_MACC_25GB 0x00020000
2569 #define I40E_REG_SPEED_MASK 0x38000000
2570 #define I40E_REG_SPEED_100MB 0x00000000
2571 #define I40E_REG_SPEED_1GB 0x08000000
2572 #define I40E_REG_SPEED_10GB 0x10000000
2573 #define I40E_REG_SPEED_20GB 0x20000000
2574 #define I40E_REG_SPEED_25_40GB 0x18000000
2575 uint32_t link_speed;
2576 uint32_t reg_val;
2577
2578 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
2579 link_speed = reg_val & I40E_REG_SPEED_MASK;
2580 reg_val &= I40E_REG_LINK_UP;
2581 link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
2582
2583 if (unlikely(link->link_status == 0))
2584 return;
2585
2586 /* Parse the link status */
2587 switch (link_speed) {
2588 case I40E_REG_SPEED_100MB:
2589 link->link_speed = ETH_SPEED_NUM_100M;
2590 break;
2591 case I40E_REG_SPEED_1GB:
2592 link->link_speed = ETH_SPEED_NUM_1G;
2593 break;
2594 case I40E_REG_SPEED_10GB:
2595 link->link_speed = ETH_SPEED_NUM_10G;
2596 break;
2597 case I40E_REG_SPEED_20GB:
2598 link->link_speed = ETH_SPEED_NUM_20G;
2599 break;
2600 case I40E_REG_SPEED_25_40GB:
2601 reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
2602
2603 if (reg_val & I40E_REG_MACC_25GB)
2604 link->link_speed = ETH_SPEED_NUM_25G;
2605 else
2606 link->link_speed = ETH_SPEED_NUM_40G;
2607
2608 break;
2609 default:
2610 PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
2611 break;
2612 }
2613 }
2614
2615 static __rte_always_inline void
2616 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
2617 bool enable_lse, int wait_to_complete)
2618 {
2619 #define CHECK_INTERVAL 100 /* 100ms */
2620 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
2621 uint32_t rep_cnt = MAX_REPEAT_TIME;
2622 struct i40e_link_status link_status;
2623 int status;
2624
2625 memset(&link_status, 0, sizeof(link_status));
2626
2627 do {
2628 memset(&link_status, 0, sizeof(link_status));
2629
2630 /* Get link status information from hardware */
2631 status = i40e_aq_get_link_info(hw, enable_lse,
2632 &link_status, NULL);
2633 if (unlikely(status != I40E_SUCCESS)) {
2634 link->link_speed = ETH_SPEED_NUM_100M;
2635 link->link_duplex = ETH_LINK_FULL_DUPLEX;
2636 PMD_DRV_LOG(ERR, "Failed to get link info");
2637 return;
2638 }
2639
2640 link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
2641 if (!wait_to_complete || link->link_status)
2642 break;
2643
2644 rte_delay_ms(CHECK_INTERVAL);
2645 } while (--rep_cnt);
2646
2647 /* Parse the link status */
2648 switch (link_status.link_speed) {
2649 case I40E_LINK_SPEED_100MB:
2650 link->link_speed = ETH_SPEED_NUM_100M;
2651 break;
2652 case I40E_LINK_SPEED_1GB:
2653 link->link_speed = ETH_SPEED_NUM_1G;
2654 break;
2655 case I40E_LINK_SPEED_10GB:
2656 link->link_speed = ETH_SPEED_NUM_10G;
2657 break;
2658 case I40E_LINK_SPEED_20GB:
2659 link->link_speed = ETH_SPEED_NUM_20G;
2660 break;
2661 case I40E_LINK_SPEED_25GB:
2662 link->link_speed = ETH_SPEED_NUM_25G;
2663 break;
2664 case I40E_LINK_SPEED_40GB:
2665 link->link_speed = ETH_SPEED_NUM_40G;
2666 break;
2667 default:
2668 link->link_speed = ETH_SPEED_NUM_100M;
2669 break;
2670 }
2671 }
2672
2673 int
2674 i40e_dev_link_update(struct rte_eth_dev *dev,
2675 int wait_to_complete)
2676 {
2677 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2678 struct rte_eth_link link;
2679 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2680 int ret;
2681
2682 memset(&link, 0, sizeof(link));
2683
2684 /* i40e uses full duplex only */
2685 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2686 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2687 ETH_LINK_SPEED_FIXED);
2688
2689 if (!wait_to_complete && !enable_lse)
2690 update_link_reg(hw, &link);
2691 else
2692 update_link_aq(hw, &link, enable_lse, wait_to_complete);
2693
2694 ret = rte_eth_linkstatus_set(dev, &link);
2695 i40e_notify_all_vfs_link_status(dev);
2696
2697 return ret;
2698 }
2699
2700 /* Get all the statistics of a VSI */
2701 void
2702 i40e_update_vsi_stats(struct i40e_vsi *vsi)
2703 {
2704 struct i40e_eth_stats *oes = &vsi->eth_stats_offset;
2705 struct i40e_eth_stats *nes = &vsi->eth_stats;
2706 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2707 int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
2708
2709 i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
2710 vsi->offset_loaded, &oes->rx_bytes,
2711 &nes->rx_bytes);
2712 i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
2713 vsi->offset_loaded, &oes->rx_unicast,
2714 &nes->rx_unicast);
2715 i40e_stat_update_48(hw, I40E_GLV_MPRCH(idx), I40E_GLV_MPRCL(idx),
2716 vsi->offset_loaded, &oes->rx_multicast,
2717 &nes->rx_multicast);
2718 i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
2719 vsi->offset_loaded, &oes->rx_broadcast,
2720 &nes->rx_broadcast);
2721 /* exclude CRC bytes */
2722 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2723 nes->rx_broadcast) * ETHER_CRC_LEN;
2724
2725 i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
2726 &oes->rx_discards, &nes->rx_discards);
2727 /* GLV_REPC not supported */
2728 /* GLV_RMPC not supported */
2729 i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
2730 &oes->rx_unknown_protocol,
2731 &nes->rx_unknown_protocol);
2732 i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
2733 vsi->offset_loaded, &oes->tx_bytes,
2734 &nes->tx_bytes);
2735 i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
2736 vsi->offset_loaded, &oes->tx_unicast,
2737 &nes->tx_unicast);
2738 i40e_stat_update_48(hw, I40E_GLV_MPTCH(idx), I40E_GLV_MPTCL(idx),
2739 vsi->offset_loaded, &oes->tx_multicast,
2740 &nes->tx_multicast);
2741 i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
2742 vsi->offset_loaded, &oes->tx_broadcast,
2743 &nes->tx_broadcast);
2744 /* GLV_TDPC not supported */
2745 i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
2746 &oes->tx_errors, &nes->tx_errors);
2747 vsi->offset_loaded = true;
2748
2749 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats start *******************",
2750 vsi->vsi_id);
2751 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
2752 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
2753 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
2754 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
2755 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
2756 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2757 nes->rx_unknown_protocol);
2758 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
2759 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
2760 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
2761 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
2762 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
2763 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
2764 PMD_DRV_LOG(DEBUG, "***************** VSI[%u] stats end *******************",
2765 vsi->vsi_id);
2766 }
2767
2768 static void
2769 i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
2770 {
2771 unsigned int i;
2772 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
2773 struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
2774
2775 /* Get rx/tx bytes of internal transfer packets */
2776 i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
2777 I40E_GLV_GORCL(hw->port),
2778 pf->offset_loaded,
2779 &pf->internal_stats_offset.rx_bytes,
2780 &pf->internal_stats.rx_bytes);
2781
2782 i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
2783 I40E_GLV_GOTCL(hw->port),
2784 pf->offset_loaded,
2785 &pf->internal_stats_offset.tx_bytes,
2786 &pf->internal_stats.tx_bytes);
2787 /* Get total internal rx packet count */
2788 i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
2789 I40E_GLV_UPRCL(hw->port),
2790 pf->offset_loaded,
2791 &pf->internal_stats_offset.rx_unicast,
2792 &pf->internal_stats.rx_unicast);
2793 i40e_stat_update_48(hw, I40E_GLV_MPRCH(hw->port),
2794 I40E_GLV_MPRCL(hw->port),
2795 pf->offset_loaded,
2796 &pf->internal_stats_offset.rx_multicast,
2797 &pf->internal_stats.rx_multicast);
2798 i40e_stat_update_48(hw, I40E_GLV_BPRCH(hw->port),
2799 I40E_GLV_BPRCL(hw->port),
2800 pf->offset_loaded,
2801 &pf->internal_stats_offset.rx_broadcast,
2802 &pf->internal_stats.rx_broadcast);
2803 /* Get total internal tx packet count */
2804 i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port),
2805 I40E_GLV_UPTCL(hw->port),
2806 pf->offset_loaded,
2807 &pf->internal_stats_offset.tx_unicast,
2808 &pf->internal_stats.tx_unicast);
2809 i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port),
2810 I40E_GLV_MPTCL(hw->port),
2811 pf->offset_loaded,
2812 &pf->internal_stats_offset.tx_multicast,
2813 &pf->internal_stats.tx_multicast);
2814 i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port),
2815 I40E_GLV_BPTCL(hw->port),
2816 pf->offset_loaded,
2817 &pf->internal_stats_offset.tx_broadcast,
2818 &pf->internal_stats.tx_broadcast);
2819
2820 /* exclude CRC size */
2821 pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast +
2822 pf->internal_stats.rx_multicast +
2823 pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
2824
2825 /* Get statistics of struct i40e_eth_stats */
2826 i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
2827 I40E_GLPRT_GORCL(hw->port),
2828 pf->offset_loaded, &os->eth.rx_bytes,
2829 &ns->eth.rx_bytes);
2830 i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
2831 I40E_GLPRT_UPRCL(hw->port),
2832 pf->offset_loaded, &os->eth.rx_unicast,
2833 &ns->eth.rx_unicast);
2834 i40e_stat_update_48(hw, I40E_GLPRT_MPRCH(hw->port),
2835 I40E_GLPRT_MPRCL(hw->port),
2836 pf->offset_loaded, &os->eth.rx_multicast,
2837 &ns->eth.rx_multicast);
2838 i40e_stat_update_48(hw, I40E_GLPRT_BPRCH(hw->port),
2839 I40E_GLPRT_BPRCL(hw->port),
2840 pf->offset_loaded, &os->eth.rx_broadcast,
2841 &ns->eth.rx_broadcast);
2842 /* Workaround: CRC size should not be included in byte statistics,
2843 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2844 */
2845 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2846 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2847
2848 /* exclude internal rx bytes
2849 * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before
2850 * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative
2851 * value.
2852 * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L].
2853 */
2854 if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes)
2855 ns->eth.rx_bytes = 0;
2856 else
2857 ns->eth.rx_bytes -= pf->internal_stats.rx_bytes;
2858
2859 if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast)
2860 ns->eth.rx_unicast = 0;
2861 else
2862 ns->eth.rx_unicast -= pf->internal_stats.rx_unicast;
2863
2864 if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast)
2865 ns->eth.rx_multicast = 0;
2866 else
2867 ns->eth.rx_multicast -= pf->internal_stats.rx_multicast;
2868
2869 if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast)
2870 ns->eth.rx_broadcast = 0;
2871 else
2872 ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast;
2873
2874 i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
2875 pf->offset_loaded, &os->eth.rx_discards,
2876 &ns->eth.rx_discards);
2877 /* GLPRT_REPC not supported */
2878 /* GLPRT_RMPC not supported */
2879 i40e_stat_update_32(hw, I40E_GLPRT_RUPP(hw->port),
2880 pf->offset_loaded,
2881 &os->eth.rx_unknown_protocol,
2882 &ns->eth.rx_unknown_protocol);
2883 i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
2884 I40E_GLPRT_GOTCL(hw->port),
2885 pf->offset_loaded, &os->eth.tx_bytes,
2886 &ns->eth.tx_bytes);
2887 i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
2888 I40E_GLPRT_UPTCL(hw->port),
2889 pf->offset_loaded, &os->eth.tx_unicast,
2890 &ns->eth.tx_unicast);
2891 i40e_stat_update_48(hw, I40E_GLPRT_MPTCH(hw->port),
2892 I40E_GLPRT_MPTCL(hw->port),
2893 pf->offset_loaded, &os->eth.tx_multicast,
2894 &ns->eth.tx_multicast);
2895 i40e_stat_update_48(hw, I40E_GLPRT_BPTCH(hw->port),
2896 I40E_GLPRT_BPTCL(hw->port),
2897 pf->offset_loaded, &os->eth.tx_broadcast,
2898 &ns->eth.tx_broadcast);
2899 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2900 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2901
2902 /* exclude internal tx bytes
2903 * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before
2904 * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative
2905 * value.
2906 * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L].
2907 */
2908 if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes)
2909 ns->eth.tx_bytes = 0;
2910 else
2911 ns->eth.tx_bytes -= pf->internal_stats.tx_bytes;
2912
2913 if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast)
2914 ns->eth.tx_unicast = 0;
2915 else
2916 ns->eth.tx_unicast -= pf->internal_stats.tx_unicast;
2917
2918 if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast)
2919 ns->eth.tx_multicast = 0;
2920 else
2921 ns->eth.tx_multicast -= pf->internal_stats.tx_multicast;
2922
2923 if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast)
2924 ns->eth.tx_broadcast = 0;
2925 else
2926 ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast;
2927
2928 /* GLPRT_TEPC not supported */
2929
2930 /* additional port specific stats */
2931 i40e_stat_update_32(hw, I40E_GLPRT_TDOLD(hw->port),
2932 pf->offset_loaded, &os->tx_dropped_link_down,
2933 &ns->tx_dropped_link_down);
2934 i40e_stat_update_32(hw, I40E_GLPRT_CRCERRS(hw->port),
2935 pf->offset_loaded, &os->crc_errors,
2936 &ns->crc_errors);
2937 i40e_stat_update_32(hw, I40E_GLPRT_ILLERRC(hw->port),
2938 pf->offset_loaded, &os->illegal_bytes,
2939 &ns->illegal_bytes);
2940 /* GLPRT_ERRBC not supported */
2941 i40e_stat_update_32(hw, I40E_GLPRT_MLFC(hw->port),
2942 pf->offset_loaded, &os->mac_local_faults,
2943 &ns->mac_local_faults);
2944 i40e_stat_update_32(hw, I40E_GLPRT_MRFC(hw->port),
2945 pf->offset_loaded, &os->mac_remote_faults,
2946 &ns->mac_remote_faults);
2947 i40e_stat_update_32(hw, I40E_GLPRT_RLEC(hw->port),
2948 pf->offset_loaded, &os->rx_length_errors,
2949 &ns->rx_length_errors);
2950 i40e_stat_update_32(hw, I40E_GLPRT_LXONRXC(hw->port),
2951 pf->offset_loaded, &os->link_xon_rx,
2952 &ns->link_xon_rx);
2953 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2954 pf->offset_loaded, &os->link_xoff_rx,
2955 &ns->link_xoff_rx);
2956 for (i = 0; i < 8; i++) {
2957 i40e_stat_update_32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
2958 pf->offset_loaded,
2959 &os->priority_xon_rx[i],
2960 &ns->priority_xon_rx[i]);
2961 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
2962 pf->offset_loaded,
2963 &os->priority_xoff_rx[i],
2964 &ns->priority_xoff_rx[i]);
2965 }
2966 i40e_stat_update_32(hw, I40E_GLPRT_LXONTXC(hw->port),
2967 pf->offset_loaded, &os->link_xon_tx,
2968 &ns->link_xon_tx);
2969 i40e_stat_update_32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2970 pf->offset_loaded, &os->link_xoff_tx,
2971 &ns->link_xoff_tx);
2972 for (i = 0; i < 8; i++) {
2973 i40e_stat_update_32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
2974 pf->offset_loaded,
2975 &os->priority_xon_tx[i],
2976 &ns->priority_xon_tx[i]);
2977 i40e_stat_update_32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
2978 pf->offset_loaded,
2979 &os->priority_xoff_tx[i],
2980 &ns->priority_xoff_tx[i]);
2981 i40e_stat_update_32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i),
2982 pf->offset_loaded,
2983 &os->priority_xon_2_xoff[i],
2984 &ns->priority_xon_2_xoff[i]);
2985 }
2986 i40e_stat_update_48(hw, I40E_GLPRT_PRC64H(hw->port),
2987 I40E_GLPRT_PRC64L(hw->port),
2988 pf->offset_loaded, &os->rx_size_64,
2989 &ns->rx_size_64);
2990 i40e_stat_update_48(hw, I40E_GLPRT_PRC127H(hw->port),
2991 I40E_GLPRT_PRC127L(hw->port),
2992 pf->offset_loaded, &os->rx_size_127,
2993 &ns->rx_size_127);
2994 i40e_stat_update_48(hw, I40E_GLPRT_PRC255H(hw->port),
2995 I40E_GLPRT_PRC255L(hw->port),
2996 pf->offset_loaded, &os->rx_size_255,
2997 &ns->rx_size_255);
2998 i40e_stat_update_48(hw, I40E_GLPRT_PRC511H(hw->port),
2999 I40E_GLPRT_PRC511L(hw->port),
3000 pf->offset_loaded, &os->rx_size_511,
3001 &ns->rx_size_511);
3002 i40e_stat_update_48(hw, I40E_GLPRT_PRC1023H(hw->port),
3003 I40E_GLPRT_PRC1023L(hw->port),
3004 pf->offset_loaded, &os->rx_size_1023,
3005 &ns->rx_size_1023);
3006 i40e_stat_update_48(hw, I40E_GLPRT_PRC1522H(hw->port),
3007 I40E_GLPRT_PRC1522L(hw->port),
3008 pf->offset_loaded, &os->rx_size_1522,
3009 &ns->rx_size_1522);
3010 i40e_stat_update_48(hw, I40E_GLPRT_PRC9522H(hw->port),
3011 I40E_GLPRT_PRC9522L(hw->port),
3012 pf->offset_loaded, &os->rx_size_big,
3013 &ns->rx_size_big);
3014 i40e_stat_update_32(hw, I40E_GLPRT_RUC(hw->port),
3015 pf->offset_loaded, &os->rx_undersize,
3016 &ns->rx_undersize);
3017 i40e_stat_update_32(hw, I40E_GLPRT_RFC(hw->port),
3018 pf->offset_loaded, &os->rx_fragments,
3019 &ns->rx_fragments);
3020 i40e_stat_update_32(hw, I40E_GLPRT_ROC(hw->port),
3021 pf->offset_loaded, &os->rx_oversize,
3022 &ns->rx_oversize);
3023 i40e_stat_update_32(hw, I40E_GLPRT_RJC(hw->port),
3024 pf->offset_loaded, &os->rx_jabber,
3025 &ns->rx_jabber);
3026 i40e_stat_update_48(hw, I40E_GLPRT_PTC64H(hw->port),
3027 I40E_GLPRT_PTC64L(hw->port),
3028 pf->offset_loaded, &os->tx_size_64,
3029 &ns->tx_size_64);
3030 i40e_stat_update_48(hw, I40E_GLPRT_PTC127H(hw->port),
3031 I40E_GLPRT_PTC127L(hw->port),
3032 pf->offset_loaded, &os->tx_size_127,
3033 &ns->tx_size_127);
3034 i40e_stat_update_48(hw, I40E_GLPRT_PTC255H(hw->port),
3035 I40E_GLPRT_PTC255L(hw->port),
3036 pf->offset_loaded, &os->tx_size_255,
3037 &ns->tx_size_255);
3038 i40e_stat_update_48(hw, I40E_GLPRT_PTC511H(hw->port),
3039 I40E_GLPRT_PTC511L(hw->port),
3040 pf->offset_loaded, &os->tx_size_511,
3041 &ns->tx_size_511);
3042 i40e_stat_update_48(hw, I40E_GLPRT_PTC1023H(hw->port),
3043 I40E_GLPRT_PTC1023L(hw->port),
3044 pf->offset_loaded, &os->tx_size_1023,
3045 &ns->tx_size_1023);
3046 i40e_stat_update_48(hw, I40E_GLPRT_PTC1522H(hw->port),
3047 I40E_GLPRT_PTC1522L(hw->port),
3048 pf->offset_loaded, &os->tx_size_1522,
3049 &ns->tx_size_1522);
3050 i40e_stat_update_48(hw, I40E_GLPRT_PTC9522H(hw->port),
3051 I40E_GLPRT_PTC9522L(hw->port),
3052 pf->offset_loaded, &os->tx_size_big,
3053 &ns->tx_size_big);
3054 i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
3055 pf->offset_loaded,
3056 &os->fd_sb_match, &ns->fd_sb_match);
3057 /* GLPRT_MSPDC not supported */
3058 /* GLPRT_XEC not supported */
3059
3060 pf->offset_loaded = true;
3061
3062 if (pf->main_vsi)
3063 i40e_update_vsi_stats(pf->main_vsi);
3064 }
3065
3066 /* Get all statistics of a port */
3067 static int
3068 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3069 {
3070 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3071 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3072 struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
3073 unsigned i;
3074
3075 /* call read registers - updates values, now write them to struct */
3076 i40e_read_stats_registers(pf, hw);
3077
3078 stats->ipackets = ns->eth.rx_unicast +
3079 ns->eth.rx_multicast +
3080 ns->eth.rx_broadcast -
3081 ns->eth.rx_discards -
3082 pf->main_vsi->eth_stats.rx_discards;
3083 stats->opackets = ns->eth.tx_unicast +
3084 ns->eth.tx_multicast +
3085 ns->eth.tx_broadcast;
3086 stats->ibytes = ns->eth.rx_bytes;
3087 stats->obytes = ns->eth.tx_bytes;
3088 stats->oerrors = ns->eth.tx_errors +
3089 pf->main_vsi->eth_stats.tx_errors;
3090
3091 /* Rx Errors */
3092 stats->imissed = ns->eth.rx_discards +
3093 pf->main_vsi->eth_stats.rx_discards;
3094 stats->ierrors = ns->crc_errors +
3095 ns->rx_length_errors + ns->rx_undersize +
3096 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3097
3098 PMD_DRV_LOG(DEBUG, "***************** PF stats start *******************");
3099 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
3100 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3101 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", ns->eth.rx_multicast);
3102 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", ns->eth.rx_broadcast);
3103 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", ns->eth.rx_discards);
3104 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3105 ns->eth.rx_unknown_protocol);
3106 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
3107 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3108 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", ns->eth.tx_multicast);
3109 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", ns->eth.tx_broadcast);
3110 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", ns->eth.tx_discards);
3111 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
3112
3113 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
3114 ns->tx_dropped_link_down);
3115 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3116 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
3117 ns->illegal_bytes);
3118 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
3119 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
3120 ns->mac_local_faults);
3121 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
3122 ns->mac_remote_faults);
3123 PMD_DRV_LOG(DEBUG, "rx_length_errors: %"PRIu64"",
3124 ns->rx_length_errors);
3125 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
3126 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
3127 for (i = 0; i < 8; i++) {
3128 PMD_DRV_LOG(DEBUG, "priority_xon_rx[%d]: %"PRIu64"",
3129 i, ns->priority_xon_rx[i]);
3130 PMD_DRV_LOG(DEBUG, "priority_xoff_rx[%d]: %"PRIu64"",
3131 i, ns->priority_xoff_rx[i]);
3132 }
3133 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
3134 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
3135 for (i = 0; i < 8; i++) {
3136 PMD_DRV_LOG(DEBUG, "priority_xon_tx[%d]: %"PRIu64"",
3137 i, ns->priority_xon_tx[i]);
3138 PMD_DRV_LOG(DEBUG, "priority_xoff_tx[%d]: %"PRIu64"",
3139 i, ns->priority_xoff_tx[i]);
3140 PMD_DRV_LOG(DEBUG, "priority_xon_2_xoff[%d]: %"PRIu64"",
3141 i, ns->priority_xon_2_xoff[i]);
3142 }
3143 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
3144 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
3145 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
3146 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
3147 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
3148 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
3149 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
3150 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
3151 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
3152 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
3153 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
3154 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
3155 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
3156 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
3157 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
3158 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
3159 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
3160 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
3161 PMD_DRV_LOG(DEBUG, "mac_short_packet_dropped: %"PRIu64"",
3162 ns->mac_short_packet_dropped);
3163 PMD_DRV_LOG(DEBUG, "checksum_error: %"PRIu64"",
3164 ns->checksum_error);
3165 PMD_DRV_LOG(DEBUG, "fdir_match: %"PRIu64"", ns->fd_sb_match);
3166 PMD_DRV_LOG(DEBUG, "***************** PF stats end ********************");
3167 return 0;
3168 }
3169
3170 /* Reset the statistics */
3171 static void
3172 i40e_dev_stats_reset(struct rte_eth_dev *dev)
3173 {
3174 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3175 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3176
3177 /* Mark PF and VSI stats to update the offset, aka "reset" */
3178 pf->offset_loaded = false;
3179 if (pf->main_vsi)
3180 pf->main_vsi->offset_loaded = false;
3181
3182 /* read the stats, reading current register values into offset */
3183 i40e_read_stats_registers(pf, hw);
3184 }
3185
3186 static uint32_t
3187 i40e_xstats_calc_num(void)
3188 {
3189 return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
3190 (I40E_NB_RXQ_PRIO_XSTATS * 8) +
3191 (I40E_NB_TXQ_PRIO_XSTATS * 8);
3192 }
3193
3194 static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3195 struct rte_eth_xstat_name *xstats_names,
3196 __rte_unused unsigned limit)
3197 {
3198 unsigned count = 0;
3199 unsigned i, prio;
3200
3201 if (xstats_names == NULL)
3202 return i40e_xstats_calc_num();
3203
3204 /* Note: limit checked in rte_eth_xstats_names() */
3205
3206 /* Get stats from i40e_eth_stats struct */
3207 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3208 snprintf(xstats_names[count].name,
3209 sizeof(xstats_names[count].name),
3210 "%s", rte_i40e_stats_strings[i].name);
3211 count++;
3212 }
3213
3214 /* Get individiual stats from i40e_hw_port struct */
3215 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3216 snprintf(xstats_names[count].name,
3217 sizeof(xstats_names[count].name),
3218 "%s", rte_i40e_hw_port_strings[i].name);
3219 count++;
3220 }
3221
3222 for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3223 for (prio = 0; prio < 8; prio++) {
3224 snprintf(xstats_names[count].name,
3225 sizeof(xstats_names[count].name),
3226 "rx_priority%u_%s", prio,
3227 rte_i40e_rxq_prio_strings[i].name);
3228 count++;
3229 }
3230 }
3231
3232 for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3233 for (prio = 0; prio < 8; prio++) {
3234 snprintf(xstats_names[count].name,
3235 sizeof(xstats_names[count].name),
3236 "tx_priority%u_%s", prio,
3237 rte_i40e_txq_prio_strings[i].name);
3238 count++;
3239 }
3240 }
3241 return count;
3242 }
3243
3244 static int
3245 i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3246 unsigned n)
3247 {
3248 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3249 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3250 unsigned i, count, prio;
3251 struct i40e_hw_port_stats *hw_stats = &pf->stats;
3252
3253 count = i40e_xstats_calc_num();
3254 if (n < count)
3255 return count;
3256
3257 i40e_read_stats_registers(pf, hw);
3258
3259 if (xstats == NULL)
3260 return 0;
3261
3262 count = 0;
3263
3264 /* Get stats from i40e_eth_stats struct */
3265 for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
3266 xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
3267 rte_i40e_stats_strings[i].offset);
3268 xstats[count].id = count;
3269 count++;
3270 }
3271
3272 /* Get individiual stats from i40e_hw_port struct */
3273 for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
3274 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
3275 rte_i40e_hw_port_strings[i].offset);
3276 xstats[count].id = count;
3277 count++;
3278 }
3279
3280 for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) {
3281 for (prio = 0; prio < 8; prio++) {
3282 xstats[count].value =
3283 *(uint64_t *)(((char *)hw_stats) +
3284 rte_i40e_rxq_prio_strings[i].offset +
3285 (sizeof(uint64_t) * prio));
3286 xstats[count].id = count;
3287 count++;
3288 }
3289 }
3290
3291 for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) {
3292 for (prio = 0; prio < 8; prio++) {
3293 xstats[count].value =
3294 *(uint64_t *)(((char *)hw_stats) +
3295 rte_i40e_txq_prio_strings[i].offset +
3296 (sizeof(uint64_t) * prio));
3297 xstats[count].id = count;
3298 count++;
3299 }
3300 }
3301
3302 return count;
3303 }
3304
3305 static int
3306 i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
3307 __rte_unused uint16_t queue_id,
3308 __rte_unused uint8_t stat_idx,
3309 __rte_unused uint8_t is_rx)
3310 {
3311 PMD_INIT_FUNC_TRACE();
3312
3313 return -ENOSYS;
3314 }
3315
3316 static int
3317 i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
3318 {
3319 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3320 u32 full_ver;
3321 u8 ver, patch;
3322 u16 build;
3323 int ret;
3324
3325 full_ver = hw->nvm.oem_ver;
3326 ver = (u8)(full_ver >> 24);
3327 build = (u16)((full_ver >> 8) & 0xffff);
3328 patch = (u8)(full_ver & 0xff);
3329
3330 ret = snprintf(fw_version, fw_size,
3331 "%d.%d%d 0x%08x %d.%d.%d",
3332 ((hw->nvm.version >> 12) & 0xf),
3333 ((hw->nvm.version >> 4) & 0xff),
3334 (hw->nvm.version & 0xf), hw->nvm.eetrack,
3335 ver, build, patch);
3336
3337 ret += 1; /* add the size of '\0' */
3338 if (fw_size < (u32)ret)
3339 return ret;
3340 else
3341 return 0;
3342 }
3343
3344 static void
3345 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
3346 {
3347 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3348 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3349 struct i40e_vsi *vsi = pf->main_vsi;
3350 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
3351
3352 dev_info->max_rx_queues = vsi->nb_qps;
3353 dev_info->max_tx_queues = vsi->nb_qps;
3354 dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
3355 dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
3356 dev_info->max_mac_addrs = vsi->max_macaddrs;
3357 dev_info->max_vfs = pci_dev->max_vfs;
3358 dev_info->rx_queue_offload_capa = 0;
3359 dev_info->rx_offload_capa =
3360 DEV_RX_OFFLOAD_VLAN_STRIP |
3361 DEV_RX_OFFLOAD_QINQ_STRIP |
3362 DEV_RX_OFFLOAD_IPV4_CKSUM |
3363 DEV_RX_OFFLOAD_UDP_CKSUM |
3364 DEV_RX_OFFLOAD_TCP_CKSUM |
3365 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
3366 DEV_RX_OFFLOAD_CRC_STRIP |
3367 DEV_RX_OFFLOAD_KEEP_CRC |
3368 DEV_RX_OFFLOAD_VLAN_EXTEND |
3369 DEV_RX_OFFLOAD_VLAN_FILTER |
3370 DEV_RX_OFFLOAD_JUMBO_FRAME;
3371
3372 dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
3373 dev_info->tx_offload_capa =
3374 DEV_TX_OFFLOAD_VLAN_INSERT |
3375 DEV_TX_OFFLOAD_QINQ_INSERT |
3376 DEV_TX_OFFLOAD_IPV4_CKSUM |
3377 DEV_TX_OFFLOAD_UDP_CKSUM |
3378 DEV_TX_OFFLOAD_TCP_CKSUM |
3379 DEV_TX_OFFLOAD_SCTP_CKSUM |
3380 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
3381 DEV_TX_OFFLOAD_TCP_TSO |
3382 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
3383 DEV_TX_OFFLOAD_GRE_TNL_TSO |
3384 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
3385 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
3386 DEV_TX_OFFLOAD_MULTI_SEGS |
3387 dev_info->tx_queue_offload_capa;
3388 dev_info->dev_capa =
3389 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
3390 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
3391
3392 dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
3393 sizeof(uint32_t);
3394 dev_info->reta_size = pf->hash_lut_size;
3395 dev_info->flow_type_rss_offloads = pf->adapter->flow_types_mask;
3396
3397 dev_info->default_rxconf = (struct rte_eth_rxconf) {
3398 .rx_thresh = {
3399 .pthresh = I40E_DEFAULT_RX_PTHRESH,
3400 .hthresh = I40E_DEFAULT_RX_HTHRESH,
3401 .wthresh = I40E_DEFAULT_RX_WTHRESH,
3402 },
3403 .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
3404 .rx_drop_en = 0,
3405 .offloads = 0,
3406 };
3407
3408 dev_info->default_txconf = (struct rte_eth_txconf) {
3409 .tx_thresh = {
3410 .pthresh = I40E_DEFAULT_TX_PTHRESH,
3411 .hthresh = I40E_DEFAULT_TX_HTHRESH,
3412 .wthresh = I40E_DEFAULT_TX_WTHRESH,
3413 },
3414 .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
3415 .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
3416 .offloads = 0,
3417 };
3418
3419 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
3420 .nb_max = I40E_MAX_RING_DESC,
3421 .nb_min = I40E_MIN_RING_DESC,
3422 .nb_align = I40E_ALIGN_RING_DESC,
3423 };
3424
3425 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
3426 .nb_max = I40E_MAX_RING_DESC,
3427 .nb_min = I40E_MIN_RING_DESC,
3428 .nb_align = I40E_ALIGN_RING_DESC,
3429 .nb_seg_max = I40E_TX_MAX_SEG,
3430 .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
3431 };
3432
3433 if (pf->flags & I40E_FLAG_VMDQ) {
3434 dev_info->max_vmdq_pools = pf->max_nb_vmdq_vsi;
3435 dev_info->vmdq_queue_base = dev_info->max_rx_queues;
3436 dev_info->vmdq_queue_num = pf->vmdq_nb_qps *
3437 pf->max_nb_vmdq_vsi;
3438 dev_info->vmdq_pool_base = I40E_VMDQ_POOL_BASE;
3439 dev_info->max_rx_queues += dev_info->vmdq_queue_num;
3440 dev_info->max_tx_queues += dev_info->vmdq_queue_num;
3441 }
3442
3443 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3444 /* For XL710 */
3445 dev_info->speed_capa = ETH_LINK_SPEED_40G;
3446 dev_info->default_rxportconf.nb_queues = 2;
3447 dev_info->default_txportconf.nb_queues = 2;
3448 if (dev->data->nb_rx_queues == 1)
3449 dev_info->default_rxportconf.ring_size = 2048;
3450 else
3451 dev_info->default_rxportconf.ring_size = 1024;
3452 if (dev->data->nb_tx_queues == 1)
3453 dev_info->default_txportconf.ring_size = 1024;
3454 else
3455 dev_info->default_txportconf.ring_size = 512;
3456
3457 } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
3458 /* For XXV710 */
3459 dev_info->speed_capa = ETH_LINK_SPEED_25G;
3460 dev_info->default_rxportconf.nb_queues = 1;
3461 dev_info->default_txportconf.nb_queues = 1;
3462 dev_info->default_rxportconf.ring_size = 256;
3463 dev_info->default_txportconf.ring_size = 256;
3464 } else {
3465 /* For X710 */
3466 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
3467 dev_info->default_rxportconf.nb_queues = 1;
3468 dev_info->default_txportconf.nb_queues = 1;
3469 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
3470 dev_info->default_rxportconf.ring_size = 512;
3471 dev_info->default_txportconf.ring_size = 256;
3472 } else {
3473 dev_info->default_rxportconf.ring_size = 256;
3474 dev_info->default_txportconf.ring_size = 256;
3475 }
3476 }
3477 dev_info->default_rxportconf.burst_size = 32;
3478 dev_info->default_txportconf.burst_size = 32;
3479 }
3480
3481 static int
3482 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
3483 {
3484 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3485 struct i40e_vsi *vsi = pf->main_vsi;
3486 PMD_INIT_FUNC_TRACE();
3487
3488 if (on)
3489 return i40e_vsi_add_vlan(vsi, vlan_id);
3490 else
3491 return i40e_vsi_delete_vlan(vsi, vlan_id);
3492 }
3493
3494 static int
3495 i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
3496 enum rte_vlan_type vlan_type,
3497 uint16_t tpid, int qinq)
3498 {
3499 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3500 uint64_t reg_r = 0;
3501 uint64_t reg_w = 0;
3502 uint16_t reg_id = 3;
3503 int ret;
3504
3505 if (qinq) {
3506 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3507 reg_id = 2;
3508 }
3509
3510 ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
3511 &reg_r, NULL);
3512 if (ret != I40E_SUCCESS) {
3513 PMD_DRV_LOG(ERR,
3514 "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
3515 reg_id);
3516 return -EIO;
3517 }
3518 PMD_DRV_LOG(DEBUG,
3519 "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
3520 reg_id, reg_r);
3521
3522 reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
3523 reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
3524 if (reg_r == reg_w) {
3525 PMD_DRV_LOG(DEBUG, "No need to write");
3526 return 0;
3527 }
3528
3529 ret = i40e_aq_debug_write_global_register(hw,
3530 I40E_GL_SWT_L2TAGCTRL(reg_id),
3531 reg_w, NULL);
3532 if (ret != I40E_SUCCESS) {
3533 PMD_DRV_LOG(ERR,
3534 "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
3535 reg_id);
3536 return -EIO;
3537 }
3538 PMD_DRV_LOG(DEBUG,
3539 "Global register 0x%08x is changed with value 0x%08x",
3540 I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w);
3541
3542 return 0;
3543 }
3544
3545 static int
3546 i40e_vlan_tpid_set(struct rte_eth_dev *dev,
3547 enum rte_vlan_type vlan_type,
3548 uint16_t tpid)
3549 {
3550 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3551 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3552 int qinq = dev->data->dev_conf.rxmode.offloads &
3553 DEV_RX_OFFLOAD_VLAN_EXTEND;
3554 int ret = 0;
3555
3556 if ((vlan_type != ETH_VLAN_TYPE_INNER &&
3557 vlan_type != ETH_VLAN_TYPE_OUTER) ||
3558 (!qinq && vlan_type == ETH_VLAN_TYPE_INNER)) {
3559 PMD_DRV_LOG(ERR,
3560 "Unsupported vlan type.");
3561 return -EINVAL;
3562 }
3563
3564 if (pf->support_multi_driver) {
3565 PMD_DRV_LOG(ERR, "Setting TPID is not supported.");
3566 return -ENOTSUP;
3567 }
3568
3569 /* 802.1ad frames ability is added in NVM API 1.7*/
3570 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
3571 if (qinq) {
3572 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3573 hw->first_tag = rte_cpu_to_le_16(tpid);
3574 else if (vlan_type == ETH_VLAN_TYPE_INNER)
3575 hw->second_tag = rte_cpu_to_le_16(tpid);
3576 } else {
3577 if (vlan_type == ETH_VLAN_TYPE_OUTER)
3578 hw->second_tag = rte_cpu_to_le_16(tpid);
3579 }
3580 ret = i40e_aq_set_switch_config(hw, 0, 0, NULL);
3581 if (ret != I40E_SUCCESS) {
3582 PMD_DRV_LOG(ERR,
3583 "Set switch config failed aq_err: %d",
3584 hw->aq.asq_last_status);
3585 ret = -EIO;
3586 }
3587 } else
3588 /* If NVM API < 1.7, keep the register setting */
3589 ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
3590 tpid, qinq);
3591
3592 return ret;
3593 }
3594
3595 static int
3596 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
3597 {
3598 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3599 struct i40e_vsi *vsi = pf->main_vsi;
3600 struct rte_eth_rxmode *rxmode;
3601
3602 rxmode = &dev->data->dev_conf.rxmode;
3603 if (mask & ETH_VLAN_FILTER_MASK) {
3604 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3605 i40e_vsi_config_vlan_filter(vsi, TRUE);
3606 else
3607 i40e_vsi_config_vlan_filter(vsi, FALSE);
3608 }
3609
3610 if (mask & ETH_VLAN_STRIP_MASK) {
3611 /* Enable or disable VLAN stripping */
3612 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
3613 i40e_vsi_config_vlan_stripping(vsi, TRUE);
3614 else
3615 i40e_vsi_config_vlan_stripping(vsi, FALSE);
3616 }
3617
3618 if (mask & ETH_VLAN_EXTEND_MASK) {
3619 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
3620 i40e_vsi_config_double_vlan(vsi, TRUE);
3621 /* Set global registers with default ethertype. */
3622 i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
3623 ETHER_TYPE_VLAN);
3624 i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
3625 ETHER_TYPE_VLAN);
3626 }
3627 else
3628 i40e_vsi_config_double_vlan(vsi, FALSE);
3629 }
3630
3631 return 0;
3632 }
3633
3634 static void
3635 i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
3636 __rte_unused uint16_t queue,
3637 __rte_unused int on)
3638 {
3639 PMD_INIT_FUNC_TRACE();
3640 }
3641
3642 static int
3643 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3644 {
3645 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3646 struct i40e_vsi *vsi = pf->main_vsi;
3647 struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
3648 struct i40e_vsi_vlan_pvid_info info;
3649
3650 memset(&info, 0, sizeof(info));
3651 info.on = on;
3652 if (info.on)
3653 info.config.pvid = pvid;
3654 else {
3655 info.config.reject.tagged =
3656 data->dev_conf.txmode.hw_vlan_reject_tagged;
3657 info.config.reject.untagged =
3658 data->dev_conf.txmode.hw_vlan_reject_untagged;
3659 }
3660
3661 return i40e_vsi_vlan_pvid_set(vsi, &info);
3662 }
3663
3664 static int
3665 i40e_dev_led_on(struct rte_eth_dev *dev)
3666 {
3667 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3668 uint32_t mode = i40e_led_get(hw);
3669
3670 if (mode == 0)
3671 i40e_led_set(hw, 0xf, true); /* 0xf means led always true */
3672
3673 return 0;
3674 }
3675
3676 static int
3677 i40e_dev_led_off(struct rte_eth_dev *dev)
3678 {
3679 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3680 uint32_t mode = i40e_led_get(hw);
3681
3682 if (mode != 0)
3683 i40e_led_set(hw, 0, false);
3684
3685 return 0;
3686 }
3687
3688 static int
3689 i40e_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3690 {
3691 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3692 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3693
3694 fc_conf->pause_time = pf->fc_conf.pause_time;
3695
3696 /* read out from register, in case they are modified by other port */
3697 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] =
3698 I40E_READ_REG(hw, I40E_GLRPB_GHW) >> I40E_KILOSHIFT;
3699 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] =
3700 I40E_READ_REG(hw, I40E_GLRPB_GLW) >> I40E_KILOSHIFT;
3701
3702 fc_conf->high_water = pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS];
3703 fc_conf->low_water = pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS];
3704
3705 /* Return current mode according to actual setting*/
3706 switch (hw->fc.current_mode) {
3707 case I40E_FC_FULL:
3708 fc_conf->mode = RTE_FC_FULL;
3709 break;
3710 case I40E_FC_TX_PAUSE:
3711 fc_conf->mode = RTE_FC_TX_PAUSE;
3712 break;
3713 case I40E_FC_RX_PAUSE:
3714 fc_conf->mode = RTE_FC_RX_PAUSE;
3715 break;
3716 case I40E_FC_NONE:
3717 default:
3718 fc_conf->mode = RTE_FC_NONE;
3719 };
3720
3721 return 0;
3722 }
3723
3724 static int
3725 i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
3726 {
3727 uint32_t mflcn_reg, fctrl_reg, reg;
3728 uint32_t max_high_water;
3729 uint8_t i, aq_failure;
3730 int err;
3731 struct i40e_hw *hw;
3732 struct i40e_pf *pf;
3733 enum i40e_fc_mode rte_fcmode_2_i40e_fcmode[] = {
3734 [RTE_FC_NONE] = I40E_FC_NONE,
3735 [RTE_FC_RX_PAUSE] = I40E_FC_RX_PAUSE,
3736 [RTE_FC_TX_PAUSE] = I40E_FC_TX_PAUSE,
3737 [RTE_FC_FULL] = I40E_FC_FULL
3738 };
3739
3740 /* high_water field in the rte_eth_fc_conf using the kilobytes unit */
3741
3742 max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
3743 if ((fc_conf->high_water > max_high_water) ||
3744 (fc_conf->high_water < fc_conf->low_water)) {
3745 PMD_INIT_LOG(ERR,
3746 "Invalid high/low water setup value in KB, High_water must be <= %d.",
3747 max_high_water);
3748 return -EINVAL;
3749 }
3750
3751 hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3752 pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3753 hw->fc.requested_mode = rte_fcmode_2_i40e_fcmode[fc_conf->mode];
3754
3755 pf->fc_conf.pause_time = fc_conf->pause_time;
3756 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->high_water;
3757 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = fc_conf->low_water;
3758
3759 PMD_INIT_FUNC_TRACE();
3760
3761 /* All the link flow control related enable/disable register
3762 * configuration is handle by the F/W
3763 */
3764 err = i40e_set_fc(hw, &aq_failure, true);
3765 if (err < 0)
3766 return -ENOSYS;
3767
3768 if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
3769 /* Configure flow control refresh threshold,
3770 * the value for stat_tx_pause_refresh_timer[8]
3771 * is used for global pause operation.
3772 */
3773
3774 I40E_WRITE_REG(hw,
3775 I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(8),
3776 pf->fc_conf.pause_time);
3777
3778 /* configure the timer value included in transmitted pause
3779 * frame,
3780 * the value for stat_tx_pause_quanta[8] is used for global
3781 * pause operation
3782 */
3783 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(8),
3784 pf->fc_conf.pause_time);
3785
3786 fctrl_reg = I40E_READ_REG(hw,
3787 I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL);
3788
3789 if (fc_conf->mac_ctrl_frame_fwd != 0)
3790 fctrl_reg |= I40E_PRTMAC_FWD_CTRL;
3791 else
3792 fctrl_reg &= ~I40E_PRTMAC_FWD_CTRL;
3793
3794 I40E_WRITE_REG(hw, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL,
3795 fctrl_reg);
3796 } else {
3797 /* Configure pause time (2 TCs per register) */
3798 reg = (uint32_t)pf->fc_conf.pause_time * (uint32_t)0x00010001;
3799 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS / 2; i++)
3800 I40E_WRITE_REG(hw, I40E_PRTDCB_FCTTVN(i), reg);
3801
3802 /* Configure flow control refresh threshold value */
3803 I40E_WRITE_REG(hw, I40E_PRTDCB_FCRTV,
3804 pf->fc_conf.pause_time / 2);
3805
3806 mflcn_reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
3807
3808 /* set or clear MFLCN.PMCF & MFLCN.DPF bits
3809 *depending on configuration
3810 */
3811 if (fc_conf->mac_ctrl_frame_fwd != 0) {
3812 mflcn_reg |= I40E_PRTDCB_MFLCN_PMCF_MASK;
3813 mflcn_reg &= ~I40E_PRTDCB_MFLCN_DPF_MASK;
3814 } else {
3815 mflcn_reg &= ~I40E_PRTDCB_MFLCN_PMCF_MASK;
3816 mflcn_reg |= I40E_PRTDCB_MFLCN_DPF_MASK;
3817 }
3818
3819 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg);
3820 }
3821
3822 if (!pf->support_multi_driver) {
3823 /* config water marker both based on the packets and bytes */
3824 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW,
3825 (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3826 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3827 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW,
3828 (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3829 << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE);
3830 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW,
3831 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS]
3832 << I40E_KILOSHIFT);
3833 I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
3834 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
3835 << I40E_KILOSHIFT);
3836 } else {
3837 PMD_DRV_LOG(ERR,
3838 "Water marker configuration is not supported.");
3839 }
3840
3841 I40E_WRITE_FLUSH(hw);
3842
3843 return 0;
3844 }
3845
3846 static int
3847 i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
3848 __rte_unused struct rte_eth_pfc_conf *pfc_conf)
3849 {
3850 PMD_INIT_FUNC_TRACE();
3851
3852 return -ENOSYS;
3853 }
3854
3855 /* Add a MAC address, and update filters */
3856 static int
3857 i40e_macaddr_add(struct rte_eth_dev *dev,
3858 struct ether_addr *mac_addr,
3859 __rte_unused uint32_t index,
3860 uint32_t pool)
3861 {
3862 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3863 struct i40e_mac_filter_info mac_filter;
3864 struct i40e_vsi *vsi;
3865 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
3866 int ret;
3867
3868 /* If VMDQ not enabled or configured, return */
3869 if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) ||
3870 !pf->nb_cfg_vmdq_vsi)) {
3871 PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
3872 pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
3873 pool);
3874 return -ENOTSUP;
3875 }
3876
3877 if (pool > pf->nb_cfg_vmdq_vsi) {
3878 PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
3879 pool, pf->nb_cfg_vmdq_vsi);
3880 return -EINVAL;
3881 }
3882
3883 rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
3884 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
3885 mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
3886 else
3887 mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
3888
3889 if (pool == 0)
3890 vsi = pf->main_vsi;
3891 else
3892 vsi = pf->vmdq[pool - 1].vsi;
3893
3894 ret = i40e_vsi_add_mac(vsi, &mac_filter);
3895 if (ret != I40E_SUCCESS) {
3896 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
3897 return -ENODEV;
3898 }
3899 return 0;
3900 }
3901
3902 /* Remove a MAC address, and update filters */
3903 static void
3904 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
3905 {
3906 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3907 struct i40e_vsi *vsi;
3908 struct rte_eth_dev_data *data = dev->data;
3909 struct ether_addr *macaddr;
3910 int ret;
3911 uint32_t i;
3912 uint64_t pool_sel;
3913
3914 macaddr = &(data->mac_addrs[index]);
3915
3916 pool_sel = dev->data->mac_pool_sel[index];
3917
3918 for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
3919 if (pool_sel & (1ULL << i)) {
3920 if (i == 0)
3921 vsi = pf->main_vsi;
3922 else {
3923 /* No VMDQ pool enabled or configured */
3924 if (!(pf->flags & I40E_FLAG_VMDQ) ||
3925 (i > pf->nb_cfg_vmdq_vsi)) {
3926 PMD_DRV_LOG(ERR,
3927 "No VMDQ pool enabled/configured");
3928 return;
3929 }
3930 vsi = pf->vmdq[i - 1].vsi;
3931 }
3932 ret = i40e_vsi_delete_mac(vsi, macaddr);
3933
3934 if (ret) {
3935 PMD_DRV_LOG(ERR, "Failed to remove MACVLAN filter");
3936 return;
3937 }
3938 }
3939 }
3940 }
3941
3942 /* Set perfect match or hash match of MAC and VLAN for a VF */
3943 static int
3944 i40e_vf_mac_filter_set(struct i40e_pf *pf,
3945 struct rte_eth_mac_filter *filter,
3946 bool add)
3947 {
3948 struct i40e_hw *hw;
3949 struct i40e_mac_filter_info mac_filter;
3950 struct ether_addr old_mac;
3951 struct ether_addr *new_mac;
3952 struct i40e_pf_vf *vf = NULL;
3953 uint16_t vf_id;
3954 int ret;
3955
3956 if (pf == NULL) {
3957 PMD_DRV_LOG(ERR, "Invalid PF argument.");
3958 return -EINVAL;
3959 }
3960 hw = I40E_PF_TO_HW(pf);
3961
3962 if (filter == NULL) {
3963 PMD_DRV_LOG(ERR, "Invalid mac filter argument.");
3964 return -EINVAL;
3965 }
3966
3967 new_mac = &filter->mac_addr;
3968
3969 if (is_zero_ether_addr(new_mac)) {
3970 PMD_DRV_LOG(ERR, "Invalid ethernet address.");
3971 return -EINVAL;
3972 }
3973
3974 vf_id = filter->dst_id;
3975
3976 if (vf_id > pf->vf_num - 1 || !pf->vfs) {
3977 PMD_DRV_LOG(ERR, "Invalid argument.");
3978 return -EINVAL;
3979 }
3980 vf = &pf->vfs[vf_id];
3981
3982 if (add && is_same_ether_addr(new_mac, &(pf->dev_addr))) {
3983 PMD_DRV_LOG(INFO, "Ignore adding permanent MAC address.");
3984 return -EINVAL;
3985 }
3986
3987 if (add) {
3988 rte_memcpy(&old_mac, hw->mac.addr, ETHER_ADDR_LEN);
3989 rte_memcpy(hw->mac.addr, new_mac->addr_bytes,
3990 ETHER_ADDR_LEN);
3991 rte_memcpy(&mac_filter.mac_addr, &filter->mac_addr,
3992 ETHER_ADDR_LEN);
3993
3994 mac_filter.filter_type = filter->filter_type;
3995 ret = i40e_vsi_add_mac(vf->vsi, &mac_filter);
3996 if (ret != I40E_SUCCESS) {
3997 PMD_DRV_LOG(ERR, "Failed to add MAC filter.");
3998 return -1;
3999 }
4000 ether_addr_copy(new_mac, &pf->dev_addr);
4001 } else {
4002 rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
4003 ETHER_ADDR_LEN);
4004 ret = i40e_vsi_delete_mac(vf->vsi, &filter->mac_addr);
4005 if (ret != I40E_SUCCESS) {
4006 PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
4007 return -1;
4008 }
4009
4010 /* Clear device address as it has been removed */
4011 if (is_same_ether_addr(&(pf->dev_addr), new_mac))
4012 memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
4013 }
4014
4015 return 0;
4016 }
4017
4018 /* MAC filter handle */
4019 static int
4020 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
4021 void *arg)
4022 {
4023 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4024 struct rte_eth_mac_filter *filter;
4025 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4026 int ret = I40E_NOT_SUPPORTED;
4027
4028 filter = (struct rte_eth_mac_filter *)(arg);
4029
4030 switch (filter_op) {
4031 case RTE_ETH_FILTER_NOP:
4032 ret = I40E_SUCCESS;
4033 break;
4034 case RTE_ETH_FILTER_ADD:
4035 i40e_pf_disable_irq0(hw);
4036 if (filter->is_vf)
4037 ret = i40e_vf_mac_filter_set(pf, filter, 1);
4038 i40e_pf_enable_irq0(hw);
4039 break;
4040 case RTE_ETH_FILTER_DELETE:
4041 i40e_pf_disable_irq0(hw);
4042 if (filter->is_vf)
4043 ret = i40e_vf_mac_filter_set(pf, filter, 0);
4044 i40e_pf_enable_irq0(hw);
4045 break;
4046 default:
4047 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
4048 ret = I40E_ERR_PARAM;
4049 break;
4050 }
4051
4052 return ret;
4053 }
4054
4055 static int
4056 i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4057 {
4058 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
4059 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4060 uint32_t reg;
4061 int ret;
4062
4063 if (!lut)
4064 return -EINVAL;
4065
4066 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4067 ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, TRUE,
4068 lut, lut_size);
4069 if (ret) {
4070 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
4071 return ret;
4072 }
4073 } else {
4074 uint32_t *lut_dw = (uint32_t *)lut;
4075 uint16_t i, lut_size_dw = lut_size / 4;
4076
4077 if (vsi->type == I40E_VSI_SRIOV) {
4078 for (i = 0; i <= lut_size_dw; i++) {
4079 reg = I40E_VFQF_HLUT1(i, vsi->user_param);
4080 lut_dw[i] = i40e_read_rx_ctl(hw, reg);
4081 }
4082 } else {
4083 for (i = 0; i < lut_size_dw; i++)
4084 lut_dw[i] = I40E_READ_REG(hw,
4085 I40E_PFQF_HLUT(i));
4086 }
4087 }
4088
4089 return 0;
4090 }
4091
4092 int
4093 i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
4094 {
4095 struct i40e_pf *pf;
4096 struct i40e_hw *hw;
4097 int ret;
4098
4099 if (!vsi || !lut)
4100 return -EINVAL;
4101
4102 pf = I40E_VSI_TO_PF(vsi);
4103 hw = I40E_VSI_TO_HW(vsi);
4104
4105 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
4106 ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE,
4107 lut, lut_size);
4108 if (ret) {
4109 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
4110 return ret;
4111 }
4112 } else {
4113 uint32_t *lut_dw = (uint32_t *)lut;
4114 uint16_t i, lut_size_dw = lut_size / 4;
4115
4116 if (vsi->type == I40E_VSI_SRIOV) {
4117 for (i = 0; i < lut_size_dw; i++)
4118 I40E_WRITE_REG(
4119 hw,
4120 I40E_VFQF_HLUT1(i, vsi->user_param),
4121 lut_dw[i]);
4122 } else {
4123 for (i = 0; i < lut_size_dw; i++)
4124 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i),
4125 lut_dw[i]);
4126 }
4127 I40E_WRITE_FLUSH(hw);
4128 }
4129
4130 return 0;
4131 }
4132
4133 static int
4134 i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
4135 struct rte_eth_rss_reta_entry64 *reta_conf,
4136 uint16_t reta_size)
4137 {
4138 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4139 uint16_t i, lut_size = pf->hash_lut_size;
4140 uint16_t idx, shift;
4141 uint8_t *lut;
4142 int ret;
4143
4144 if (reta_size != lut_size ||
4145 reta_size > ETH_RSS_RETA_SIZE_512) {
4146 PMD_DRV_LOG(ERR,
4147 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4148 reta_size, lut_size);
4149 return -EINVAL;
4150 }
4151
4152 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4153 if (!lut) {
4154 PMD_DRV_LOG(ERR, "No memory can be allocated");
4155 return -ENOMEM;
4156 }
4157 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4158 if (ret)
4159 goto out;
4160 for (i = 0; i < reta_size; i++) {
4161 idx = i / RTE_RETA_GROUP_SIZE;
4162 shift = i % RTE_RETA_GROUP_SIZE;
4163 if (reta_conf[idx].mask & (1ULL << shift))
4164 lut[i] = reta_conf[idx].reta[shift];
4165 }
4166 ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
4167
4168 out:
4169 rte_free(lut);
4170
4171 return ret;
4172 }
4173
4174 static int
4175 i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
4176 struct rte_eth_rss_reta_entry64 *reta_conf,
4177 uint16_t reta_size)
4178 {
4179 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4180 uint16_t i, lut_size = pf->hash_lut_size;
4181 uint16_t idx, shift;
4182 uint8_t *lut;
4183 int ret;
4184
4185 if (reta_size != lut_size ||
4186 reta_size > ETH_RSS_RETA_SIZE_512) {
4187 PMD_DRV_LOG(ERR,
4188 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
4189 reta_size, lut_size);
4190 return -EINVAL;
4191 }
4192
4193 lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
4194 if (!lut) {
4195 PMD_DRV_LOG(ERR, "No memory can be allocated");
4196 return -ENOMEM;
4197 }
4198
4199 ret = i40e_get_rss_lut(pf->main_vsi, lut, reta_size);
4200 if (ret)
4201 goto out;
4202 for (i = 0; i < reta_size; i++) {
4203 idx = i / RTE_RETA_GROUP_SIZE;
4204 shift = i % RTE_RETA_GROUP_SIZE;
4205 if (reta_conf[idx].mask & (1ULL << shift))
4206 reta_conf[idx].reta[shift] = lut[i];
4207 }
4208
4209 out:
4210 rte_free(lut);
4211
4212 return ret;
4213 }
4214
4215 /**
4216 * i40e_allocate_dma_mem_d - specific memory alloc for shared code (base driver)
4217 * @hw: pointer to the HW structure
4218 * @mem: pointer to mem struct to fill out
4219 * @size: size of memory requested
4220 * @alignment: what to align the allocation to
4221 **/
4222 enum i40e_status_code
4223 i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4224 struct i40e_dma_mem *mem,
4225 u64 size,
4226 u32 alignment)
4227 {
4228 const struct rte_memzone *mz = NULL;
4229 char z_name[RTE_MEMZONE_NAMESIZE];
4230
4231 if (!mem)
4232 return I40E_ERR_PARAM;
4233
4234 snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
4235 mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
4236 RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
4237 if (!mz)
4238 return I40E_ERR_NO_MEMORY;
4239
4240 mem->size = size;
4241 mem->va = mz->addr;
4242 mem->pa = mz->iova;
4243 mem->zone = (const void *)mz;
4244 PMD_DRV_LOG(DEBUG,
4245 "memzone %s allocated with physical address: %"PRIu64,
4246 mz->name, mem->pa);
4247
4248 return I40E_SUCCESS;
4249 }
4250
4251 /**
4252 * i40e_free_dma_mem_d - specific memory free for shared code (base driver)
4253 * @hw: pointer to the HW structure
4254 * @mem: ptr to mem struct to free
4255 **/
4256 enum i40e_status_code
4257 i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4258 struct i40e_dma_mem *mem)
4259 {
4260 if (!mem)
4261 return I40E_ERR_PARAM;
4262
4263 PMD_DRV_LOG(DEBUG,
4264 "memzone %s to be freed with physical address: %"PRIu64,
4265 ((const struct rte_memzone *)mem->zone)->name, mem->pa);
4266 rte_memzone_free((const struct rte_memzone *)mem->zone);
4267 mem->zone = NULL;
4268 mem->va = NULL;
4269 mem->pa = (u64)0;
4270
4271 return I40E_SUCCESS;
4272 }
4273
4274 /**
4275 * i40e_allocate_virt_mem_d - specific memory alloc for shared code (base driver)
4276 * @hw: pointer to the HW structure
4277 * @mem: pointer to mem struct to fill out
4278 * @size: size of memory requested
4279 **/
4280 enum i40e_status_code
4281 i40e_allocate_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4282 struct i40e_virt_mem *mem,
4283 u32 size)
4284 {
4285 if (!mem)
4286 return I40E_ERR_PARAM;
4287
4288 mem->size = size;
4289 mem->va = rte_zmalloc("i40e", size, 0);
4290
4291 if (mem->va)
4292 return I40E_SUCCESS;
4293 else
4294 return I40E_ERR_NO_MEMORY;
4295 }
4296
4297 /**
4298 * i40e_free_virt_mem_d - specific memory free for shared code (base driver)
4299 * @hw: pointer to the HW structure
4300 * @mem: pointer to mem struct to free
4301 **/
4302 enum i40e_status_code
4303 i40e_free_virt_mem_d(__attribute__((unused)) struct i40e_hw *hw,
4304 struct i40e_virt_mem *mem)
4305 {
4306 if (!mem)
4307 return I40E_ERR_PARAM;
4308
4309 rte_free(mem->va);
4310 mem->va = NULL;
4311
4312 return I40E_SUCCESS;
4313 }
4314
4315 void
4316 i40e_init_spinlock_d(struct i40e_spinlock *sp)
4317 {
4318 rte_spinlock_init(&sp->spinlock);
4319 }
4320
4321 void
4322 i40e_acquire_spinlock_d(struct i40e_spinlock *sp)
4323 {
4324 rte_spinlock_lock(&sp->spinlock);
4325 }
4326
4327 void
4328 i40e_release_spinlock_d(struct i40e_spinlock *sp)
4329 {
4330 rte_spinlock_unlock(&sp->spinlock);
4331 }
4332
4333 void
4334 i40e_destroy_spinlock_d(__attribute__((unused)) struct i40e_spinlock *sp)
4335 {
4336 return;
4337 }
4338
4339 /**
4340 * Get the hardware capabilities, which will be parsed
4341 * and saved into struct i40e_hw.
4342 */
4343 static int
4344 i40e_get_cap(struct i40e_hw *hw)
4345 {
4346 struct i40e_aqc_list_capabilities_element_resp *buf;
4347 uint16_t len, size = 0;
4348 int ret;
4349
4350 /* Calculate a huge enough buff for saving response data temporarily */
4351 len = sizeof(struct i40e_aqc_list_capabilities_element_resp) *
4352 I40E_MAX_CAP_ELE_NUM;
4353 buf = rte_zmalloc("i40e", len, 0);
4354 if (!buf) {
4355 PMD_DRV_LOG(ERR, "Failed to allocate memory");
4356 return I40E_ERR_NO_MEMORY;
4357 }
4358
4359 /* Get, parse the capabilities and save it to hw */
4360 ret = i40e_aq_discover_capabilities(hw, buf, len, &size,
4361 i40e_aqc_opc_list_func_capabilities, NULL);
4362 if (ret != I40E_SUCCESS)
4363 PMD_DRV_LOG(ERR, "Failed to discover capabilities");
4364
4365 /* Free the temporary buffer after being used */
4366 rte_free(buf);
4367
4368 return ret;
4369 }
4370
4371 #define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
4372
4373 static int i40e_pf_parse_vf_queue_number_handler(const char *key,
4374 const char *value,
4375 void *opaque)
4376 {
4377 struct i40e_pf *pf;
4378 unsigned long num;
4379 char *end;
4380
4381 pf = (struct i40e_pf *)opaque;
4382 RTE_SET_USED(key);
4383
4384 errno = 0;
4385 num = strtoul(value, &end, 0);
4386 if (errno != 0 || end == value || *end != 0) {
4387 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is "
4388 "kept the value = %hu", value, pf->vf_nb_qp_max);
4389 return -(EINVAL);
4390 }
4391
4392 if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num))
4393 pf->vf_nb_qp_max = (uint16_t)num;
4394 else
4395 /* here return 0 to make next valid same argument work */
4396 PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be "
4397 "power of 2 and equal or less than 16 !, Now it is "
4398 "kept the value = %hu", num, pf->vf_nb_qp_max);
4399
4400 return 0;
4401 }
4402
4403 static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
4404 {
4405 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4406 struct rte_kvargs *kvlist;
4407 int kvargs_count;
4408
4409 /* set default queue number per VF as 4 */
4410 pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
4411
4412 if (dev->device->devargs == NULL)
4413 return 0;
4414
4415 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
4416 if (kvlist == NULL)
4417 return -(EINVAL);
4418
4419 kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4420 if (!kvargs_count) {
4421 rte_kvargs_free(kvlist);
4422 return 0;
4423 }
4424
4425 if (kvargs_count > 1)
4426 PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
4427 "the first invalid or last valid one is used !",
4428 ETH_I40E_QUEUE_NUM_PER_VF_ARG);
4429
4430 rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
4431 i40e_pf_parse_vf_queue_number_handler, pf);
4432
4433 rte_kvargs_free(kvlist);
4434
4435 return 0;
4436 }
4437
4438 static int
4439 i40e_pf_parameter_init(struct rte_eth_dev *dev)
4440 {
4441 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4442 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4443 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
4444 uint16_t qp_count = 0, vsi_count = 0;
4445
4446 if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
4447 PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
4448 return -EINVAL;
4449 }
4450
4451 i40e_pf_config_vf_rxq_number(dev);
4452
4453 /* Add the parameter init for LFC */
4454 pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME;
4455 pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER;
4456 pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
4457
4458 pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
4459 pf->max_num_vsi = hw->func_caps.num_vsis;
4460 pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
4461 pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
4462
4463 /* FDir queue/VSI allocation */
4464 pf->fdir_qp_offset = 0;
4465 if (hw->func_caps.fd) {
4466 pf->flags |= I40E_FLAG_FDIR;
4467 pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
4468 } else {
4469 pf->fdir_nb_qps = 0;
4470 }
4471 qp_count += pf->fdir_nb_qps;
4472 vsi_count += 1;
4473
4474 /* LAN queue/VSI allocation */
4475 pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
4476 if (!hw->func_caps.rss) {
4477 pf->lan_nb_qps = 1;
4478 } else {
4479 pf->flags |= I40E_FLAG_RSS;
4480 if (hw->mac.type == I40E_MAC_X722)
4481 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
4482 pf->lan_nb_qps = pf->lan_nb_qp_max;
4483 }
4484 qp_count += pf->lan_nb_qps;
4485 vsi_count += 1;
4486
4487 /* VF queue/VSI allocation */
4488 pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
4489 if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
4490 pf->flags |= I40E_FLAG_SRIOV;
4491 pf->vf_nb_qps = pf->vf_nb_qp_max;
4492 pf->vf_num = pci_dev->max_vfs;
4493 PMD_DRV_LOG(DEBUG,
4494 "%u VF VSIs, %u queues per VF VSI, in total %u queues",
4495 pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
4496 } else {
4497 pf->vf_nb_qps = 0;
4498 pf->vf_num = 0;
4499 }
4500 qp_count += pf->vf_nb_qps * pf->vf_num;
4501 vsi_count += pf->vf_num;
4502
4503 /* VMDq queue/VSI allocation */
4504 pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
4505 pf->vmdq_nb_qps = 0;
4506 pf->max_nb_vmdq_vsi = 0;
4507 if (hw->func_caps.vmdq) {
4508 if (qp_count < hw->func_caps.num_tx_qp &&
4509 vsi_count < hw->func_caps.num_vsis) {
4510 pf->max_nb_vmdq_vsi = (hw->func_caps.num_tx_qp -
4511 qp_count) / pf->vmdq_nb_qp_max;
4512
4513 /* Limit the maximum number of VMDq vsi to the maximum
4514 * ethdev can support
4515 */
4516 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4517 hw->func_caps.num_vsis - vsi_count);
4518 pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
4519 ETH_64_POOLS);
4520 if (pf->max_nb_vmdq_vsi) {
4521 pf->flags |= I40E_FLAG_VMDQ;
4522 pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
4523 PMD_DRV_LOG(DEBUG,
4524 "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
4525 pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
4526 pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
4527 } else {
4528 PMD_DRV_LOG(INFO,
4529 "No enough queues left for VMDq");
4530 }
4531 } else {
4532 PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
4533 }
4534 }
4535 qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
4536 vsi_count += pf->max_nb_vmdq_vsi;
4537
4538 if (hw->func_caps.dcb)
4539 pf->flags |= I40E_FLAG_DCB;
4540
4541 if (qp_count > hw->func_caps.num_tx_qp) {
4542 PMD_DRV_LOG(ERR,
4543 "Failed to allocate %u queues, which exceeds the hardware maximum %u",
4544 qp_count, hw->func_caps.num_tx_qp);
4545 return -EINVAL;
4546 }
4547 if (vsi_count > hw->func_caps.num_vsis) {
4548 PMD_DRV_LOG(ERR,
4549 "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
4550 vsi_count, hw->func_caps.num_vsis);
4551 return -EINVAL;
4552 }
4553
4554 return 0;
4555 }
4556
4557 static int
4558 i40e_pf_get_switch_config(struct i40e_pf *pf)
4559 {
4560 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4561 struct i40e_aqc_get_switch_config_resp *switch_config;
4562 struct i40e_aqc_switch_config_element_resp *element;
4563 uint16_t start_seid = 0, num_reported;
4564 int ret;
4565
4566 switch_config = (struct i40e_aqc_get_switch_config_resp *)\
4567 rte_zmalloc("i40e", I40E_AQ_LARGE_BUF, 0);
4568 if (!switch_config) {
4569 PMD_DRV_LOG(ERR, "Failed to allocated memory");
4570 return -ENOMEM;
4571 }
4572
4573 /* Get the switch configurations */
4574 ret = i40e_aq_get_switch_config(hw, switch_config,
4575 I40E_AQ_LARGE_BUF, &start_seid, NULL);
4576 if (ret != I40E_SUCCESS) {
4577 PMD_DRV_LOG(ERR, "Failed to get switch configurations");
4578 goto fail;
4579 }
4580 num_reported = rte_le_to_cpu_16(switch_config->header.num_reported);
4581 if (num_reported != 1) { /* The number should be 1 */
4582 PMD_DRV_LOG(ERR, "Wrong number of switch config reported");
4583 goto fail;
4584 }
4585
4586 /* Parse the switch configuration elements */
4587 element = &(switch_config->element[0]);
4588 if (element->element_type == I40E_SWITCH_ELEMENT_TYPE_VSI) {
4589 pf->mac_seid = rte_le_to_cpu_16(element->uplink_seid);
4590 pf->main_vsi_seid = rte_le_to_cpu_16(element->seid);
4591 } else
4592 PMD_DRV_LOG(INFO, "Unknown element type");
4593
4594 fail:
4595 rte_free(switch_config);
4596
4597 return ret;
4598 }
4599
4600 static int
4601 i40e_res_pool_init (struct i40e_res_pool_info *pool, uint32_t base,
4602 uint32_t num)
4603 {
4604 struct pool_entry *entry;
4605
4606 if (pool == NULL || num == 0)
4607 return -EINVAL;
4608
4609 entry = rte_zmalloc("i40e", sizeof(*entry), 0);
4610 if (entry == NULL) {
4611 PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool");
4612 return -ENOMEM;
4613 }
4614
4615 /* queue heap initialize */
4616 pool->num_free = num;
4617 pool->num_alloc = 0;
4618 pool->base = base;
4619 LIST_INIT(&pool->alloc_list);
4620 LIST_INIT(&pool->free_list);
4621
4622 /* Initialize element */
4623 entry->base = 0;
4624 entry->len = num;
4625
4626 LIST_INSERT_HEAD(&pool->free_list, entry, next);
4627 return 0;
4628 }
4629
4630 static void
4631 i40e_res_pool_destroy(struct i40e_res_pool_info *pool)
4632 {
4633 struct pool_entry *entry, *next_entry;
4634
4635 if (pool == NULL)
4636 return;
4637
4638 for (entry = LIST_FIRST(&pool->alloc_list);
4639 entry && (next_entry = LIST_NEXT(entry, next), 1);
4640 entry = next_entry) {
4641 LIST_REMOVE(entry, next);
4642 rte_free(entry);
4643 }
4644
4645 for (entry = LIST_FIRST(&pool->free_list);
4646 entry && (next_entry = LIST_NEXT(entry, next), 1);
4647 entry = next_entry) {
4648 LIST_REMOVE(entry, next);
4649 rte_free(entry);
4650 }
4651
4652 pool->num_free = 0;
4653 pool->num_alloc = 0;
4654 pool->base = 0;
4655 LIST_INIT(&pool->alloc_list);
4656 LIST_INIT(&pool->free_list);
4657 }
4658
4659 static int
4660 i40e_res_pool_free(struct i40e_res_pool_info *pool,
4661 uint32_t base)
4662 {
4663 struct pool_entry *entry, *next, *prev, *valid_entry = NULL;
4664 uint32_t pool_offset;
4665 int insert;
4666
4667 if (pool == NULL) {
4668 PMD_DRV_LOG(ERR, "Invalid parameter");
4669 return -EINVAL;
4670 }
4671
4672 pool_offset = base - pool->base;
4673 /* Lookup in alloc list */
4674 LIST_FOREACH(entry, &pool->alloc_list, next) {
4675 if (entry->base == pool_offset) {
4676 valid_entry = entry;
4677 LIST_REMOVE(entry, next);
4678 break;
4679 }
4680 }
4681
4682 /* Not find, return */
4683 if (valid_entry == NULL) {
4684 PMD_DRV_LOG(ERR, "Failed to find entry");
4685 return -EINVAL;
4686 }
4687
4688 /**
4689 * Found it, move it to free list and try to merge.
4690 * In order to make merge easier, always sort it by qbase.
4691 * Find adjacent prev and last entries.
4692 */
4693 prev = next = NULL;
4694 LIST_FOREACH(entry, &pool->free_list, next) {
4695 if (entry->base > valid_entry->base) {
4696 next = entry;
4697 break;
4698 }
4699 prev = entry;
4700 }
4701
4702 insert = 0;
4703 /* Try to merge with next one*/
4704 if (next != NULL) {
4705 /* Merge with next one */
4706 if (valid_entry->base + valid_entry->len == next->base) {
4707 next->base = valid_entry->base;
4708 next->len += valid_entry->len;
4709 rte_free(valid_entry);
4710 valid_entry = next;
4711 insert = 1;
4712 }
4713 }
4714
4715 if (prev != NULL) {
4716 /* Merge with previous one */
4717 if (prev->base + prev->len == valid_entry->base) {
4718 prev->len += valid_entry->len;
4719 /* If it merge with next one, remove next node */
4720 if (insert == 1) {
4721 LIST_REMOVE(valid_entry, next);
4722 rte_free(valid_entry);
4723 } else {
4724 rte_free(valid_entry);
4725 insert = 1;
4726 }
4727 }
4728 }
4729
4730 /* Not find any entry to merge, insert */
4731 if (insert == 0) {
4732 if (prev != NULL)
4733 LIST_INSERT_AFTER(prev, valid_entry, next);
4734 else if (next != NULL)
4735 LIST_INSERT_BEFORE(next, valid_entry, next);
4736 else /* It's empty list, insert to head */
4737 LIST_INSERT_HEAD(&pool->free_list, valid_entry, next);
4738 }
4739
4740 pool->num_free += valid_entry->len;
4741 pool->num_alloc -= valid_entry->len;
4742
4743 return 0;
4744 }
4745
4746 static int
4747 i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
4748 uint16_t num)
4749 {
4750 struct pool_entry *entry, *valid_entry;
4751
4752 if (pool == NULL || num == 0) {
4753 PMD_DRV_LOG(ERR, "Invalid parameter");
4754 return -EINVAL;
4755 }
4756
4757 if (pool->num_free < num) {
4758 PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u",
4759 num, pool->num_free);
4760 return -ENOMEM;
4761 }
4762
4763 valid_entry = NULL;
4764 /* Lookup in free list and find most fit one */
4765 LIST_FOREACH(entry, &pool->free_list, next) {
4766 if (entry->len >= num) {
4767 /* Find best one */
4768 if (entry->len == num) {
4769 valid_entry = entry;
4770 break;
4771 }
4772 if (valid_entry == NULL || valid_entry->len > entry->len)
4773 valid_entry = entry;
4774 }
4775 }
4776
4777 /* Not find one to satisfy the request, return */
4778 if (valid_entry == NULL) {
4779 PMD_DRV_LOG(ERR, "No valid entry found");
4780 return -ENOMEM;
4781 }
4782 /**
4783 * The entry have equal queue number as requested,
4784 * remove it from alloc_list.
4785 */
4786 if (valid_entry->len == num) {
4787 LIST_REMOVE(valid_entry, next);
4788 } else {
4789 /**
4790 * The entry have more numbers than requested,
4791 * create a new entry for alloc_list and minus its
4792 * queue base and number in free_list.
4793 */
4794 entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
4795 if (entry == NULL) {
4796 PMD_DRV_LOG(ERR,
4797 "Failed to allocate memory for resource pool");
4798 return -ENOMEM;
4799 }
4800 entry->base = valid_entry->base;
4801 entry->len = num;
4802 valid_entry->base += num;
4803 valid_entry->len -= num;
4804 valid_entry = entry;
4805 }
4806
4807 /* Insert it into alloc list, not sorted */
4808 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
4809
4810 pool->num_free -= valid_entry->len;
4811 pool->num_alloc += valid_entry->len;
4812
4813 return valid_entry->base + pool->base;
4814 }
4815
4816 /**
4817 * bitmap_is_subset - Check whether src2 is subset of src1
4818 **/
4819 static inline int
4820 bitmap_is_subset(uint8_t src1, uint8_t src2)
4821 {
4822 return !((src1 ^ src2) & src2);
4823 }
4824
4825 static enum i40e_status_code
4826 validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4827 {
4828 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4829
4830 /* If DCB is not supported, only default TC is supported */
4831 if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
4832 PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported");
4833 return I40E_NOT_SUPPORTED;
4834 }
4835
4836 if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
4837 PMD_DRV_LOG(ERR,
4838 "Enabled TC map 0x%x not applicable to HW support 0x%x",
4839 hw->func_caps.enabled_tcmap, enabled_tcmap);
4840 return I40E_NOT_SUPPORTED;
4841 }
4842 return I40E_SUCCESS;
4843 }
4844
4845 int
4846 i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
4847 struct i40e_vsi_vlan_pvid_info *info)
4848 {
4849 struct i40e_hw *hw;
4850 struct i40e_vsi_context ctxt;
4851 uint8_t vlan_flags = 0;
4852 int ret;
4853
4854 if (vsi == NULL || info == NULL) {
4855 PMD_DRV_LOG(ERR, "invalid parameters");
4856 return I40E_ERR_PARAM;
4857 }
4858
4859 if (info->on) {
4860 vsi->info.pvid = info->config.pvid;
4861 /**
4862 * If insert pvid is enabled, only tagged pkts are
4863 * allowed to be sent out.
4864 */
4865 vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID |
4866 I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4867 } else {
4868 vsi->info.pvid = 0;
4869 if (info->config.reject.tagged == 0)
4870 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
4871
4872 if (info->config.reject.untagged == 0)
4873 vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
4874 }
4875 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_INSERT_PVID |
4876 I40E_AQ_VSI_PVLAN_MODE_MASK);
4877 vsi->info.port_vlan_flags |= vlan_flags;
4878 vsi->info.valid_sections =
4879 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
4880 memset(&ctxt, 0, sizeof(ctxt));
4881 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4882 ctxt.seid = vsi->seid;
4883
4884 hw = I40E_VSI_TO_HW(vsi);
4885 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4886 if (ret != I40E_SUCCESS)
4887 PMD_DRV_LOG(ERR, "Failed to update VSI params");
4888
4889 return ret;
4890 }
4891
4892 static int
4893 i40e_vsi_update_tc_bandwidth(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
4894 {
4895 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
4896 int i, ret;
4897 struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
4898
4899 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4900 if (ret != I40E_SUCCESS)
4901 return ret;
4902
4903 if (!vsi->seid) {
4904 PMD_DRV_LOG(ERR, "seid not valid");
4905 return -EINVAL;
4906 }
4907
4908 memset(&tc_bw_data, 0, sizeof(tc_bw_data));
4909 tc_bw_data.tc_valid_bits = enabled_tcmap;
4910 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4911 tc_bw_data.tc_bw_credits[i] =
4912 (enabled_tcmap & (1 << i)) ? 1 : 0;
4913
4914 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
4915 if (ret != I40E_SUCCESS) {
4916 PMD_DRV_LOG(ERR, "Failed to configure TC BW");
4917 return ret;
4918 }
4919
4920 rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
4921 sizeof(vsi->info.qs_handle));
4922 return I40E_SUCCESS;
4923 }
4924
4925 static enum i40e_status_code
4926 i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
4927 struct i40e_aqc_vsi_properties_data *info,
4928 uint8_t enabled_tcmap)
4929 {
4930 enum i40e_status_code ret;
4931 int i, total_tc = 0;
4932 uint16_t qpnum_per_tc, bsf, qp_idx;
4933
4934 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
4935 if (ret != I40E_SUCCESS)
4936 return ret;
4937
4938 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4939 if (enabled_tcmap & (1 << i))
4940 total_tc++;
4941 if (total_tc == 0)
4942 total_tc = 1;
4943 vsi->enabled_tc = enabled_tcmap;
4944
4945 /* Number of queues per enabled TC */
4946 qpnum_per_tc = i40e_align_floor(vsi->nb_qps / total_tc);
4947 qpnum_per_tc = RTE_MIN(qpnum_per_tc, I40E_MAX_Q_PER_TC);
4948 bsf = rte_bsf32(qpnum_per_tc);
4949
4950 /* Adjust the queue number to actual queues that can be applied */
4951 if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
4952 vsi->nb_qps = qpnum_per_tc * total_tc;
4953
4954 /**
4955 * Configure TC and queue mapping parameters, for enabled TC,
4956 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
4957 * default queue will serve it.
4958 */
4959 qp_idx = 0;
4960 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4961 if (vsi->enabled_tc & (1 << i)) {
4962 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
4963 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
4964 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
4965 qp_idx += qpnum_per_tc;
4966 } else
4967 info->tc_mapping[i] = 0;
4968 }
4969
4970 /* Associate queue number with VSI */
4971 if (vsi->type == I40E_VSI_SRIOV) {
4972 info->mapping_flags |=
4973 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
4974 for (i = 0; i < vsi->nb_qps; i++)
4975 info->queue_mapping[i] =
4976 rte_cpu_to_le_16(vsi->base_queue + i);
4977 } else {
4978 info->mapping_flags |=
4979 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
4980 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
4981 }
4982 info->valid_sections |=
4983 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
4984
4985 return I40E_SUCCESS;
4986 }
4987
4988 static int
4989 i40e_veb_release(struct i40e_veb *veb)
4990 {
4991 struct i40e_vsi *vsi;
4992 struct i40e_hw *hw;
4993
4994 if (veb == NULL)
4995 return -EINVAL;
4996
4997 if (!TAILQ_EMPTY(&veb->head)) {
4998 PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove");
4999 return -EACCES;
5000 }
5001 /* associate_vsi field is NULL for floating VEB */
5002 if (veb->associate_vsi != NULL) {
5003 vsi = veb->associate_vsi;
5004 hw = I40E_VSI_TO_HW(vsi);
5005
5006 vsi->uplink_seid = veb->uplink_seid;
5007 vsi->veb = NULL;
5008 } else {
5009 veb->associate_pf->main_vsi->floating_veb = NULL;
5010 hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi);
5011 }
5012
5013 i40e_aq_delete_element(hw, veb->seid, NULL);
5014 rte_free(veb);
5015 return I40E_SUCCESS;
5016 }
5017
5018 /* Setup a veb */
5019 static struct i40e_veb *
5020 i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
5021 {
5022 struct i40e_veb *veb;
5023 int ret;
5024 struct i40e_hw *hw;
5025
5026 if (pf == NULL) {
5027 PMD_DRV_LOG(ERR,
5028 "veb setup failed, associated PF shouldn't null");
5029 return NULL;
5030 }
5031 hw = I40E_PF_TO_HW(pf);
5032
5033 veb = rte_zmalloc("i40e_veb", sizeof(struct i40e_veb), 0);
5034 if (!veb) {
5035 PMD_DRV_LOG(ERR, "Failed to allocate memory for veb");
5036 goto fail;
5037 }
5038
5039 veb->associate_vsi = vsi;
5040 veb->associate_pf = pf;
5041 TAILQ_INIT(&veb->head);
5042 veb->uplink_seid = vsi ? vsi->uplink_seid : 0;
5043
5044 /* create floating veb if vsi is NULL */
5045 if (vsi != NULL) {
5046 ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
5047 I40E_DEFAULT_TCMAP, false,
5048 &veb->seid, false, NULL);
5049 } else {
5050 ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
5051 true, &veb->seid, false, NULL);
5052 }
5053
5054 if (ret != I40E_SUCCESS) {
5055 PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d",
5056 hw->aq.asq_last_status);
5057 goto fail;
5058 }
5059 veb->enabled_tc = I40E_DEFAULT_TCMAP;
5060
5061 /* get statistics index */
5062 ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
5063 &veb->stats_idx, NULL, NULL, NULL);
5064 if (ret != I40E_SUCCESS) {
5065 PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
5066 hw->aq.asq_last_status);
5067 goto fail;
5068 }
5069 /* Get VEB bandwidth, to be implemented */
5070 /* Now associated vsi binding to the VEB, set uplink to this VEB */
5071 if (vsi)
5072 vsi->uplink_seid = veb->seid;
5073
5074 return veb;
5075 fail:
5076 rte_free(veb);
5077 return NULL;
5078 }
5079
5080 int
5081 i40e_vsi_release(struct i40e_vsi *vsi)
5082 {
5083 struct i40e_pf *pf;
5084 struct i40e_hw *hw;
5085 struct i40e_vsi_list *vsi_list;
5086 void *temp;
5087 int ret;
5088 struct i40e_mac_filter *f;
5089 uint16_t user_param;
5090
5091 if (!vsi)
5092 return I40E_SUCCESS;
5093
5094 if (!vsi->adapter)
5095 return -EFAULT;
5096
5097 user_param = vsi->user_param;
5098
5099 pf = I40E_VSI_TO_PF(vsi);
5100 hw = I40E_VSI_TO_HW(vsi);
5101
5102 /* VSI has child to attach, release child first */
5103 if (vsi->veb) {
5104 TAILQ_FOREACH_SAFE(vsi_list, &vsi->veb->head, list, temp) {
5105 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5106 return -1;
5107 }
5108 i40e_veb_release(vsi->veb);
5109 }
5110
5111 if (vsi->floating_veb) {
5112 TAILQ_FOREACH_SAFE(vsi_list, &vsi->floating_veb->head, list, temp) {
5113 if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS)
5114 return -1;
5115 }
5116 }
5117
5118 /* Remove all macvlan filters of the VSI */
5119 i40e_vsi_remove_all_macvlan_filter(vsi);
5120 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
5121 rte_free(f);
5122
5123 if (vsi->type != I40E_VSI_MAIN &&
5124 ((vsi->type != I40E_VSI_SRIOV) ||
5125 !pf->floating_veb_list[user_param])) {
5126 /* Remove vsi from parent's sibling list */
5127 if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) {
5128 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5129 return I40E_ERR_PARAM;
5130 }
5131 TAILQ_REMOVE(&vsi->parent_vsi->veb->head,
5132 &vsi->sib_vsi_list, list);
5133
5134 /* Remove all switch element of the VSI */
5135 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5136 if (ret != I40E_SUCCESS)
5137 PMD_DRV_LOG(ERR, "Failed to delete element");
5138 }
5139
5140 if ((vsi->type == I40E_VSI_SRIOV) &&
5141 pf->floating_veb_list[user_param]) {
5142 /* Remove vsi from parent's sibling list */
5143 if (vsi->parent_vsi == NULL ||
5144 vsi->parent_vsi->floating_veb == NULL) {
5145 PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL");
5146 return I40E_ERR_PARAM;
5147 }
5148 TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head,
5149 &vsi->sib_vsi_list, list);
5150
5151 /* Remove all switch element of the VSI */
5152 ret = i40e_aq_delete_element(hw, vsi->seid, NULL);
5153 if (ret != I40E_SUCCESS)
5154 PMD_DRV_LOG(ERR, "Failed to delete element");
5155 }
5156
5157 i40e_res_pool_free(&pf->qp_pool, vsi->base_queue);
5158
5159 if (vsi->type != I40E_VSI_SRIOV)
5160 i40e_res_pool_free(&pf->msix_pool, vsi->msix_intr);
5161 rte_free(vsi);
5162
5163 return I40E_SUCCESS;
5164 }
5165
5166 static int
5167 i40e_update_default_filter_setting(struct i40e_vsi *vsi)
5168 {
5169 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5170 struct i40e_aqc_remove_macvlan_element_data def_filter;
5171 struct i40e_mac_filter_info filter;
5172 int ret;
5173
5174 if (vsi->type != I40E_VSI_MAIN)
5175 return I40E_ERR_CONFIG;
5176 memset(&def_filter, 0, sizeof(def_filter));
5177 rte_memcpy(def_filter.mac_addr, hw->mac.perm_addr,
5178 ETH_ADDR_LEN);
5179 def_filter.vlan_tag = 0;
5180 def_filter.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
5181 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
5182 ret = i40e_aq_remove_macvlan(hw, vsi->seid, &def_filter, 1, NULL);
5183 if (ret != I40E_SUCCESS) {
5184 struct i40e_mac_filter *f;
5185 struct ether_addr *mac;
5186
5187 PMD_DRV_LOG(DEBUG,
5188 "Cannot remove the default macvlan filter");
5189 /* It needs to add the permanent mac into mac list */
5190 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
5191 if (f == NULL) {
5192 PMD_DRV_LOG(ERR, "failed to allocate memory");
5193 return I40E_ERR_NO_MEMORY;
5194 }
5195 mac = &f->mac_info.mac_addr;
5196 rte_memcpy(&mac->addr_bytes, hw->mac.perm_addr,
5197 ETH_ADDR_LEN);
5198 f->mac_info.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5199 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
5200 vsi->mac_num++;
5201
5202 return ret;
5203 }
5204 rte_memcpy(&filter.mac_addr,
5205 (struct ether_addr *)(hw->mac.perm_addr), ETH_ADDR_LEN);
5206 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5207 return i40e_vsi_add_mac(vsi, &filter);
5208 }
5209
5210 /*
5211 * i40e_vsi_get_bw_config - Query VSI BW Information
5212 * @vsi: the VSI to be queried
5213 *
5214 * Returns 0 on success, negative value on failure
5215 */
5216 static enum i40e_status_code
5217 i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
5218 {
5219 struct i40e_aqc_query_vsi_bw_config_resp bw_config;
5220 struct i40e_aqc_query_vsi_ets_sla_config_resp ets_sla_config;
5221 struct i40e_hw *hw = &vsi->adapter->hw;
5222 i40e_status ret;
5223 int i;
5224 uint32_t bw_max;
5225
5226 memset(&bw_config, 0, sizeof(bw_config));
5227 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5228 if (ret != I40E_SUCCESS) {
5229 PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u",
5230 hw->aq.asq_last_status);
5231 return ret;
5232 }
5233
5234 memset(&ets_sla_config, 0, sizeof(ets_sla_config));
5235 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
5236 &ets_sla_config, NULL);
5237 if (ret != I40E_SUCCESS) {
5238 PMD_DRV_LOG(ERR,
5239 "VSI failed to get TC bandwdith configuration %u",
5240 hw->aq.asq_last_status);
5241 return ret;
5242 }
5243
5244 /* store and print out BW info */
5245 vsi->bw_info.bw_limit = rte_le_to_cpu_16(bw_config.port_bw_limit);
5246 vsi->bw_info.bw_max = bw_config.max_bw;
5247 PMD_DRV_LOG(DEBUG, "VSI bw limit:%u", vsi->bw_info.bw_limit);
5248 PMD_DRV_LOG(DEBUG, "VSI max_bw:%u", vsi->bw_info.bw_max);
5249 bw_max = rte_le_to_cpu_16(ets_sla_config.tc_bw_max[0]) |
5250 (rte_le_to_cpu_16(ets_sla_config.tc_bw_max[1]) <<
5251 I40E_16_BIT_WIDTH);
5252 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5253 vsi->bw_info.bw_ets_share_credits[i] =
5254 ets_sla_config.share_credits[i];
5255 vsi->bw_info.bw_ets_credits[i] =
5256 rte_le_to_cpu_16(ets_sla_config.credits[i]);
5257 /* 4 bits per TC, 4th bit is reserved */
5258 vsi->bw_info.bw_ets_max[i] =
5259 (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
5260 RTE_LEN2MASK(3, uint8_t));
5261 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i,
5262 vsi->bw_info.bw_ets_share_credits[i]);
5263 PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i,
5264 vsi->bw_info.bw_ets_credits[i]);
5265 PMD_DRV_LOG(DEBUG, "\tVSI TC%u: max credits: %u", i,
5266 vsi->bw_info.bw_ets_max[i]);
5267 }
5268
5269 return I40E_SUCCESS;
5270 }
5271
5272 /* i40e_enable_pf_lb
5273 * @pf: pointer to the pf structure
5274 *
5275 * allow loopback on pf
5276 */
5277 static inline void
5278 i40e_enable_pf_lb(struct i40e_pf *pf)
5279 {
5280 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5281 struct i40e_vsi_context ctxt;
5282 int ret;
5283
5284 /* Use the FW API if FW >= v5.0 */
5285 if (hw->aq.fw_maj_ver < 5) {
5286 PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
5287 return;
5288 }
5289
5290 memset(&ctxt, 0, sizeof(ctxt));
5291 ctxt.seid = pf->main_vsi_seid;
5292 ctxt.pf_num = hw->pf_id;
5293 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5294 if (ret) {
5295 PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d",
5296 ret, hw->aq.asq_last_status);
5297 return;
5298 }
5299 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5300 ctxt.info.valid_sections =
5301 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5302 ctxt.info.switch_id |=
5303 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5304
5305 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5306 if (ret)
5307 PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
5308 hw->aq.asq_last_status);
5309 }
5310
5311 /* Setup a VSI */
5312 struct i40e_vsi *
5313 i40e_vsi_setup(struct i40e_pf *pf,
5314 enum i40e_vsi_type type,
5315 struct i40e_vsi *uplink_vsi,
5316 uint16_t user_param)
5317 {
5318 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5319 struct i40e_vsi *vsi;
5320 struct i40e_mac_filter_info filter;
5321 int ret;
5322 struct i40e_vsi_context ctxt;
5323 struct ether_addr broadcast =
5324 {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
5325
5326 if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
5327 uplink_vsi == NULL) {
5328 PMD_DRV_LOG(ERR,
5329 "VSI setup failed, VSI link shouldn't be NULL");
5330 return NULL;
5331 }
5332
5333 if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
5334 PMD_DRV_LOG(ERR,
5335 "VSI setup failed, MAIN VSI uplink VSI should be NULL");
5336 return NULL;
5337 }
5338
5339 /* two situations
5340 * 1.type is not MAIN and uplink vsi is not NULL
5341 * If uplink vsi didn't setup VEB, create one first under veb field
5342 * 2.type is SRIOV and the uplink is NULL
5343 * If floating VEB is NULL, create one veb under floating veb field
5344 */
5345
5346 if (type != I40E_VSI_MAIN && uplink_vsi != NULL &&
5347 uplink_vsi->veb == NULL) {
5348 uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi);
5349
5350 if (uplink_vsi->veb == NULL) {
5351 PMD_DRV_LOG(ERR, "VEB setup failed");
5352 return NULL;
5353 }
5354 /* set ALLOWLOOPBACk on pf, when veb is created */
5355 i40e_enable_pf_lb(pf);
5356 }
5357
5358 if (type == I40E_VSI_SRIOV && uplink_vsi == NULL &&
5359 pf->main_vsi->floating_veb == NULL) {
5360 pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi);
5361
5362 if (pf->main_vsi->floating_veb == NULL) {
5363 PMD_DRV_LOG(ERR, "VEB setup failed");
5364 return NULL;
5365 }
5366 }
5367
5368 vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0);
5369 if (!vsi) {
5370 PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi");
5371 return NULL;
5372 }
5373 TAILQ_INIT(&vsi->mac_list);
5374 vsi->type = type;
5375 vsi->adapter = I40E_PF_TO_ADAPTER(pf);
5376 vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
5377 vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
5378 vsi->user_param = user_param;
5379 vsi->vlan_anti_spoof_on = 0;
5380 vsi->vlan_filter_on = 0;
5381 /* Allocate queues */
5382 switch (vsi->type) {
5383 case I40E_VSI_MAIN :
5384 vsi->nb_qps = pf->lan_nb_qps;
5385 break;
5386 case I40E_VSI_SRIOV :
5387 vsi->nb_qps = pf->vf_nb_qps;
5388 break;
5389 case I40E_VSI_VMDQ2:
5390 vsi->nb_qps = pf->vmdq_nb_qps;
5391 break;
5392 case I40E_VSI_FDIR:
5393 vsi->nb_qps = pf->fdir_nb_qps;
5394 break;
5395 default:
5396 goto fail_mem;
5397 }
5398 /*
5399 * The filter status descriptor is reported in rx queue 0,
5400 * while the tx queue for fdir filter programming has no
5401 * such constraints, can be non-zero queues.
5402 * To simplify it, choose FDIR vsi use queue 0 pair.
5403 * To make sure it will use queue 0 pair, queue allocation
5404 * need be done before this function is called
5405 */
5406 if (type != I40E_VSI_FDIR) {
5407 ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps);
5408 if (ret < 0) {
5409 PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d",
5410 vsi->seid, ret);
5411 goto fail_mem;
5412 }
5413 vsi->base_queue = ret;
5414 } else
5415 vsi->base_queue = I40E_FDIR_QUEUE_ID;
5416
5417 /* VF has MSIX interrupt in VF range, don't allocate here */
5418 if (type == I40E_VSI_MAIN) {
5419 if (pf->support_multi_driver) {
5420 /* If support multi-driver, need to use INT0 instead of
5421 * allocating from msix pool. The Msix pool is init from
5422 * INT1, so it's OK just set msix_intr to 0 and nb_msix
5423 * to 1 without calling i40e_res_pool_alloc.
5424 */
5425 vsi->msix_intr = 0;
5426 vsi->nb_msix = 1;
5427 } else {
5428 ret = i40e_res_pool_alloc(&pf->msix_pool,
5429 RTE_MIN(vsi->nb_qps,
5430 RTE_MAX_RXTX_INTR_VEC_ID));
5431 if (ret < 0) {
5432 PMD_DRV_LOG(ERR,
5433 "VSI MAIN %d get heap failed %d",
5434 vsi->seid, ret);
5435 goto fail_queue_alloc;
5436 }
5437 vsi->msix_intr = ret;
5438 vsi->nb_msix = RTE_MIN(vsi->nb_qps,
5439 RTE_MAX_RXTX_INTR_VEC_ID);
5440 }
5441 } else if (type != I40E_VSI_SRIOV) {
5442 ret = i40e_res_pool_alloc(&pf->msix_pool, 1);
5443 if (ret < 0) {
5444 PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", vsi->seid, ret);
5445 goto fail_queue_alloc;
5446 }
5447 vsi->msix_intr = ret;
5448 vsi->nb_msix = 1;
5449 } else {
5450 vsi->msix_intr = 0;
5451 vsi->nb_msix = 0;
5452 }
5453
5454 /* Add VSI */
5455 if (type == I40E_VSI_MAIN) {
5456 /* For main VSI, no need to add since it's default one */
5457 vsi->uplink_seid = pf->mac_seid;
5458 vsi->seid = pf->main_vsi_seid;
5459 /* Bind queues with specific MSIX interrupt */
5460 /**
5461 * Needs 2 interrupt at least, one for misc cause which will
5462 * enabled from OS side, Another for queues binding the
5463 * interrupt from device side only.
5464 */
5465
5466 /* Get default VSI parameters from hardware */
5467 memset(&ctxt, 0, sizeof(ctxt));
5468 ctxt.seid = vsi->seid;
5469 ctxt.pf_num = hw->pf_id;
5470 ctxt.uplink_seid = vsi->uplink_seid;
5471 ctxt.vf_num = 0;
5472 ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
5473 if (ret != I40E_SUCCESS) {
5474 PMD_DRV_LOG(ERR, "Failed to get VSI params");
5475 goto fail_msix_alloc;
5476 }
5477 rte_memcpy(&vsi->info, &ctxt.info,
5478 sizeof(struct i40e_aqc_vsi_properties_data));
5479 vsi->vsi_id = ctxt.vsi_number;
5480 vsi->info.valid_sections = 0;
5481
5482 /* Configure tc, enabled TC0 only */
5483 if (i40e_vsi_update_tc_bandwidth(vsi, I40E_DEFAULT_TCMAP) !=
5484 I40E_SUCCESS) {
5485 PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
5486 goto fail_msix_alloc;
5487 }
5488
5489 /* TC, queue mapping */
5490 memset(&ctxt, 0, sizeof(ctxt));
5491 vsi->info.valid_sections |=
5492 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5493 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
5494 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5495 rte_memcpy(&ctxt.info, &vsi->info,
5496 sizeof(struct i40e_aqc_vsi_properties_data));
5497 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5498 I40E_DEFAULT_TCMAP);
5499 if (ret != I40E_SUCCESS) {
5500 PMD_DRV_LOG(ERR,
5501 "Failed to configure TC queue mapping");
5502 goto fail_msix_alloc;
5503 }
5504 ctxt.seid = vsi->seid;
5505 ctxt.pf_num = hw->pf_id;
5506 ctxt.uplink_seid = vsi->uplink_seid;
5507 ctxt.vf_num = 0;
5508
5509 /* Update VSI parameters */
5510 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5511 if (ret != I40E_SUCCESS) {
5512 PMD_DRV_LOG(ERR, "Failed to update VSI params");
5513 goto fail_msix_alloc;
5514 }
5515
5516 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
5517 sizeof(vsi->info.tc_mapping));
5518 rte_memcpy(&vsi->info.queue_mapping,
5519 &ctxt.info.queue_mapping,
5520 sizeof(vsi->info.queue_mapping));
5521 vsi->info.mapping_flags = ctxt.info.mapping_flags;
5522 vsi->info.valid_sections = 0;
5523
5524 rte_memcpy(pf->dev_addr.addr_bytes, hw->mac.perm_addr,
5525 ETH_ADDR_LEN);
5526
5527 /**
5528 * Updating default filter settings are necessary to prevent
5529 * reception of tagged packets.
5530 * Some old firmware configurations load a default macvlan
5531 * filter which accepts both tagged and untagged packets.
5532 * The updating is to use a normal filter instead if needed.
5533 * For NVM 4.2.2 or after, the updating is not needed anymore.
5534 * The firmware with correct configurations load the default
5535 * macvlan filter which is expected and cannot be removed.
5536 */
5537 i40e_update_default_filter_setting(vsi);
5538 i40e_config_qinq(hw, vsi);
5539 } else if (type == I40E_VSI_SRIOV) {
5540 memset(&ctxt, 0, sizeof(ctxt));
5541 /**
5542 * For other VSI, the uplink_seid equals to uplink VSI's
5543 * uplink_seid since they share same VEB
5544 */
5545 if (uplink_vsi == NULL)
5546 vsi->uplink_seid = pf->main_vsi->floating_veb->seid;
5547 else
5548 vsi->uplink_seid = uplink_vsi->uplink_seid;
5549 ctxt.pf_num = hw->pf_id;
5550 ctxt.vf_num = hw->func_caps.vf_base_id + user_param;
5551 ctxt.uplink_seid = vsi->uplink_seid;
5552 ctxt.connection_type = 0x1;
5553 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5554
5555 /* Use the VEB configuration if FW >= v5.0 */
5556 if (hw->aq.fw_maj_ver >= 5) {
5557 /* Configure switch ID */
5558 ctxt.info.valid_sections |=
5559 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5560 ctxt.info.switch_id =
5561 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5562 }
5563
5564 /* Configure port/vlan */
5565 ctxt.info.valid_sections |=
5566 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5567 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5568 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5569 hw->func_caps.enabled_tcmap);
5570 if (ret != I40E_SUCCESS) {
5571 PMD_DRV_LOG(ERR,
5572 "Failed to configure TC queue mapping");
5573 goto fail_msix_alloc;
5574 }
5575
5576 ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
5577 ctxt.info.valid_sections |=
5578 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5579 /**
5580 * Since VSI is not created yet, only configure parameter,
5581 * will add vsi below.
5582 */
5583
5584 i40e_config_qinq(hw, vsi);
5585 } else if (type == I40E_VSI_VMDQ2) {
5586 memset(&ctxt, 0, sizeof(ctxt));
5587 /*
5588 * For other VSI, the uplink_seid equals to uplink VSI's
5589 * uplink_seid since they share same VEB
5590 */
5591 vsi->uplink_seid = uplink_vsi->uplink_seid;
5592 ctxt.pf_num = hw->pf_id;
5593 ctxt.vf_num = 0;
5594 ctxt.uplink_seid = vsi->uplink_seid;
5595 ctxt.connection_type = 0x1;
5596 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5597
5598 ctxt.info.valid_sections |=
5599 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5600 /* user_param carries flag to enable loop back */
5601 if (user_param) {
5602 ctxt.info.switch_id =
5603 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
5604 ctxt.info.switch_id |=
5605 rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5606 }
5607
5608 /* Configure port/vlan */
5609 ctxt.info.valid_sections |=
5610 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5611 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
5612 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5613 I40E_DEFAULT_TCMAP);
5614 if (ret != I40E_SUCCESS) {
5615 PMD_DRV_LOG(ERR,
5616 "Failed to configure TC queue mapping");
5617 goto fail_msix_alloc;
5618 }
5619 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5620 ctxt.info.valid_sections |=
5621 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5622 } else if (type == I40E_VSI_FDIR) {
5623 memset(&ctxt, 0, sizeof(ctxt));
5624 vsi->uplink_seid = uplink_vsi->uplink_seid;
5625 ctxt.pf_num = hw->pf_id;
5626 ctxt.vf_num = 0;
5627 ctxt.uplink_seid = vsi->uplink_seid;
5628 ctxt.connection_type = 0x1; /* regular data port */
5629 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
5630 ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
5631 I40E_DEFAULT_TCMAP);
5632 if (ret != I40E_SUCCESS) {
5633 PMD_DRV_LOG(ERR,
5634 "Failed to configure TC queue mapping.");
5635 goto fail_msix_alloc;
5636 }
5637 ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
5638 ctxt.info.valid_sections |=
5639 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
5640 } else {
5641 PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet");
5642 goto fail_msix_alloc;
5643 }
5644
5645 if (vsi->type != I40E_VSI_MAIN) {
5646 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5647 if (ret != I40E_SUCCESS) {
5648 PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d",
5649 hw->aq.asq_last_status);
5650 goto fail_msix_alloc;
5651 }
5652 memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
5653 vsi->info.valid_sections = 0;
5654 vsi->seid = ctxt.seid;
5655 vsi->vsi_id = ctxt.vsi_number;
5656 vsi->sib_vsi_list.vsi = vsi;
5657 if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) {
5658 TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head,
5659 &vsi->sib_vsi_list, list);
5660 } else {
5661 TAILQ_INSERT_TAIL(&uplink_vsi->veb->head,
5662 &vsi->sib_vsi_list, list);
5663 }
5664 }
5665
5666 /* MAC/VLAN configuration */
5667 rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
5668 filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
5669
5670 ret = i40e_vsi_add_mac(vsi, &filter);
5671 if (ret != I40E_SUCCESS) {
5672 PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
5673 goto fail_msix_alloc;
5674 }
5675
5676 /* Get VSI BW information */
5677 i40e_vsi_get_bw_config(vsi);
5678 return vsi;
5679 fail_msix_alloc:
5680 i40e_res_pool_free(&pf->msix_pool,vsi->msix_intr);
5681 fail_queue_alloc:
5682 i40e_res_pool_free(&pf->qp_pool,vsi->base_queue);
5683 fail_mem:
5684 rte_free(vsi);
5685 return NULL;
5686 }
5687
5688 /* Configure vlan filter on or off */
5689 int
5690 i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on)
5691 {
5692 int i, num;
5693 struct i40e_mac_filter *f;
5694 void *temp;
5695 struct i40e_mac_filter_info *mac_filter;
5696 enum rte_mac_filter_type desired_filter;
5697 int ret = I40E_SUCCESS;
5698
5699 if (on) {
5700 /* Filter to match MAC and VLAN */
5701 desired_filter = RTE_MACVLAN_PERFECT_MATCH;
5702 } else {
5703 /* Filter to match only MAC */
5704 desired_filter = RTE_MAC_PERFECT_MATCH;
5705 }
5706
5707 num = vsi->mac_num;
5708
5709 mac_filter = rte_zmalloc("mac_filter_info_data",
5710 num * sizeof(*mac_filter), 0);
5711 if (mac_filter == NULL) {
5712 PMD_DRV_LOG(ERR, "failed to allocate memory");
5713 return I40E_ERR_NO_MEMORY;
5714 }
5715
5716 i = 0;
5717
5718 /* Remove all existing mac */
5719 TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
5720 mac_filter[i] = f->mac_info;
5721 ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
5722 if (ret) {
5723 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5724 on ? "enable" : "disable");
5725 goto DONE;
5726 }
5727 i++;
5728 }
5729
5730 /* Override with new filter */
5731 for (i = 0; i < num; i++) {
5732 mac_filter[i].filter_type = desired_filter;
5733 ret = i40e_vsi_add_mac(vsi, &mac_filter[i]);
5734 if (ret) {
5735 PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter",
5736 on ? "enable" : "disable");
5737 goto DONE;
5738 }
5739 }
5740
5741 DONE:
5742 rte_free(mac_filter);
5743 return ret;
5744 }
5745
5746 /* Configure vlan stripping on or off */
5747 int
5748 i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on)
5749 {
5750 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5751 struct i40e_vsi_context ctxt;
5752 uint8_t vlan_flags;
5753 int ret = I40E_SUCCESS;
5754
5755 /* Check if it has been already on or off */
5756 if (vsi->info.valid_sections &
5757 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID)) {
5758 if (on) {
5759 if ((vsi->info.port_vlan_flags &
5760 I40E_AQ_VSI_PVLAN_EMOD_MASK) == 0)
5761 return 0; /* already on */
5762 } else {
5763 if ((vsi->info.port_vlan_flags &
5764 I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
5765 I40E_AQ_VSI_PVLAN_EMOD_MASK)
5766 return 0; /* already off */
5767 }
5768 }
5769
5770 if (on)
5771 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
5772 else
5773 vlan_flags = I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
5774 vsi->info.valid_sections =
5775 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
5776 vsi->info.port_vlan_flags &= ~(I40E_AQ_VSI_PVLAN_EMOD_MASK);
5777 vsi->info.port_vlan_flags |= vlan_flags;
5778 ctxt.seid = vsi->seid;
5779 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
5780 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5781 if (ret)
5782 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
5783 on ? "enable" : "disable");
5784
5785 return ret;
5786 }
5787
5788 static int
5789 i40e_dev_init_vlan(struct rte_eth_dev *dev)
5790 {
5791 struct rte_eth_dev_data *data = dev->data;
5792 int ret;
5793 int mask = 0;
5794
5795 /* Apply vlan offload setting */
5796 mask = ETH_VLAN_STRIP_MASK |
5797 ETH_VLAN_FILTER_MASK |
5798 ETH_VLAN_EXTEND_MASK;
5799 ret = i40e_vlan_offload_set(dev, mask);
5800 if (ret) {
5801 PMD_DRV_LOG(INFO, "Failed to update vlan offload");
5802 return ret;
5803 }
5804
5805 /* Apply pvid setting */
5806 ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
5807 data->dev_conf.txmode.hw_vlan_insert_pvid);
5808 if (ret)
5809 PMD_DRV_LOG(INFO, "Failed to update VSI params");
5810
5811 return ret;
5812 }
5813
5814 static int
5815 i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on)
5816 {
5817 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
5818
5819 return i40e_aq_set_port_parameters(hw, vsi->seid, 0, 1, on, NULL);
5820 }
5821
5822 static int
5823 i40e_update_flow_control(struct i40e_hw *hw)
5824 {
5825 #define I40E_LINK_PAUSE_RXTX (I40E_AQ_LINK_PAUSE_RX | I40E_AQ_LINK_PAUSE_TX)
5826 struct i40e_link_status link_status;
5827 uint32_t rxfc = 0, txfc = 0, reg;
5828 uint8_t an_info;
5829 int ret;
5830
5831 memset(&link_status, 0, sizeof(link_status));
5832 ret = i40e_aq_get_link_info(hw, FALSE, &link_status, NULL);
5833 if (ret != I40E_SUCCESS) {
5834 PMD_DRV_LOG(ERR, "Failed to get link status information");
5835 goto write_reg; /* Disable flow control */
5836 }
5837
5838 an_info = hw->phy.link_info.an_info;
5839 if (!(an_info & I40E_AQ_AN_COMPLETED)) {
5840 PMD_DRV_LOG(INFO, "Link auto negotiation not completed");
5841 ret = I40E_ERR_NOT_READY;
5842 goto write_reg; /* Disable flow control */
5843 }
5844 /**
5845 * If link auto negotiation is enabled, flow control needs to
5846 * be configured according to it
5847 */
5848 switch (an_info & I40E_LINK_PAUSE_RXTX) {
5849 case I40E_LINK_PAUSE_RXTX:
5850 rxfc = 1;
5851 txfc = 1;
5852 hw->fc.current_mode = I40E_FC_FULL;
5853 break;
5854 case I40E_AQ_LINK_PAUSE_RX:
5855 rxfc = 1;
5856 hw->fc.current_mode = I40E_FC_RX_PAUSE;
5857 break;
5858 case I40E_AQ_LINK_PAUSE_TX:
5859 txfc = 1;
5860 hw->fc.current_mode = I40E_FC_TX_PAUSE;
5861 break;
5862 default:
5863 hw->fc.current_mode = I40E_FC_NONE;
5864 break;
5865 }
5866
5867 write_reg:
5868 I40E_WRITE_REG(hw, I40E_PRTDCB_FCCFG,
5869 txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
5870 reg = I40E_READ_REG(hw, I40E_PRTDCB_MFLCN);
5871 reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
5872 reg |= rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT;
5873 I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, reg);
5874
5875 return ret;
5876 }
5877
5878 /* PF setup */
5879 static int
5880 i40e_pf_setup(struct i40e_pf *pf)
5881 {
5882 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5883 struct i40e_filter_control_settings settings;
5884 struct i40e_vsi *vsi;
5885 int ret;
5886
5887 /* Clear all stats counters */
5888 pf->offset_loaded = FALSE;
5889 memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
5890 memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
5891 memset(&pf->internal_stats, 0, sizeof(struct i40e_eth_stats));
5892 memset(&pf->internal_stats_offset, 0, sizeof(struct i40e_eth_stats));
5893
5894 ret = i40e_pf_get_switch_config(pf);
5895 if (ret != I40E_SUCCESS) {
5896 PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
5897 return ret;
5898 }
5899
5900 ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
5901 if (ret)
5902 PMD_INIT_LOG(WARNING,
5903 "failed to allocate switch domain for device %d", ret);
5904
5905 if (pf->flags & I40E_FLAG_FDIR) {
5906 /* make queue allocated first, let FDIR use queue pair 0*/
5907 ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
5908 if (ret != I40E_FDIR_QUEUE_ID) {
5909 PMD_DRV_LOG(ERR,
5910 "queue allocation fails for FDIR: ret =%d",
5911 ret);
5912 pf->flags &= ~I40E_FLAG_FDIR;
5913 }
5914 }
5915 /* main VSI setup */
5916 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0);
5917 if (!vsi) {
5918 PMD_DRV_LOG(ERR, "Setup of main vsi failed");
5919 return I40E_ERR_NOT_READY;
5920 }
5921 pf->main_vsi = vsi;
5922
5923 /* Configure filter control */
5924 memset(&settings, 0, sizeof(settings));
5925 if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
5926 settings.hash_lut_size = I40E_HASH_LUT_SIZE_128;
5927 else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
5928 settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
5929 else {
5930 PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
5931 hw->func_caps.rss_table_size);
5932 return I40E_ERR_PARAM;
5933 }
5934 PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
5935 hw->func_caps.rss_table_size);
5936 pf->hash_lut_size = hw->func_caps.rss_table_size;
5937
5938 /* Enable ethtype and macvlan filters */
5939 settings.enable_ethtype = TRUE;
5940 settings.enable_macvlan = TRUE;
5941 ret = i40e_set_filter_control(hw, &settings);
5942 if (ret)
5943 PMD_INIT_LOG(WARNING, "setup_pf_filter_control failed: %d",
5944 ret);
5945
5946 /* Update flow control according to the auto negotiation */
5947 i40e_update_flow_control(hw);
5948
5949 return I40E_SUCCESS;
5950 }
5951
5952 int
5953 i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
5954 {
5955 uint32_t reg;
5956 uint16_t j;
5957
5958 /**
5959 * Set or clear TX Queue Disable flags,
5960 * which is required by hardware.
5961 */
5962 i40e_pre_tx_queue_cfg(hw, q_idx, on);
5963 rte_delay_us(I40E_PRE_TX_Q_CFG_WAIT_US);
5964
5965 /* Wait until the request is finished */
5966 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5967 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5968 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5969 if (!(((reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 0x1) ^
5970 ((reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)
5971 & 0x1))) {
5972 break;
5973 }
5974 }
5975 if (on) {
5976 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
5977 return I40E_SUCCESS; /* already on, skip next steps */
5978
5979 I40E_WRITE_REG(hw, I40E_QTX_HEAD(q_idx), 0);
5980 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
5981 } else {
5982 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5983 return I40E_SUCCESS; /* already off, skip next steps */
5984 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
5985 }
5986 /* Write the register */
5987 I40E_WRITE_REG(hw, I40E_QTX_ENA(q_idx), reg);
5988 /* Check the result */
5989 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
5990 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
5991 reg = I40E_READ_REG(hw, I40E_QTX_ENA(q_idx));
5992 if (on) {
5993 if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5994 (reg & I40E_QTX_ENA_QENA_STAT_MASK))
5995 break;
5996 } else {
5997 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK) &&
5998 !(reg & I40E_QTX_ENA_QENA_STAT_MASK))
5999 break;
6000 }
6001 }
6002 /* Check if it is timeout */
6003 if (j >= I40E_CHK_Q_ENA_COUNT) {
6004 PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]",
6005 (on ? "enable" : "disable"), q_idx);
6006 return I40E_ERR_TIMEOUT;
6007 }
6008
6009 return I40E_SUCCESS;
6010 }
6011
6012 /* Swith on or off the tx queues */
6013 static int
6014 i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
6015 {
6016 struct rte_eth_dev_data *dev_data = pf->dev_data;
6017 struct i40e_tx_queue *txq;
6018 struct rte_eth_dev *dev = pf->adapter->eth_dev;
6019 uint16_t i;
6020 int ret;
6021
6022 for (i = 0; i < dev_data->nb_tx_queues; i++) {
6023 txq = dev_data->tx_queues[i];
6024 /* Don't operate the queue if not configured or
6025 * if starting only per queue */
6026 if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
6027 continue;
6028 if (on)
6029 ret = i40e_dev_tx_queue_start(dev, i);
6030 else
6031 ret = i40e_dev_tx_queue_stop(dev, i);
6032 if ( ret != I40E_SUCCESS)
6033 return ret;
6034 }
6035
6036 return I40E_SUCCESS;
6037 }
6038
6039 int
6040 i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
6041 {
6042 uint32_t reg;
6043 uint16_t j;
6044
6045 /* Wait until the request is finished */
6046 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6047 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6048 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6049 if (!((reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 0x1) ^
6050 ((reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 0x1))
6051 break;
6052 }
6053
6054 if (on) {
6055 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
6056 return I40E_SUCCESS; /* Already on, skip next steps */
6057 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
6058 } else {
6059 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6060 return I40E_SUCCESS; /* Already off, skip next steps */
6061 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
6062 }
6063
6064 /* Write the register */
6065 I40E_WRITE_REG(hw, I40E_QRX_ENA(q_idx), reg);
6066 /* Check the result */
6067 for (j = 0; j < I40E_CHK_Q_ENA_COUNT; j++) {
6068 rte_delay_us(I40E_CHK_Q_ENA_INTERVAL_US);
6069 reg = I40E_READ_REG(hw, I40E_QRX_ENA(q_idx));
6070 if (on) {
6071 if ((reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6072 (reg & I40E_QRX_ENA_QENA_STAT_MASK))
6073 break;
6074 } else {
6075 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK) &&
6076 !(reg & I40E_QRX_ENA_QENA_STAT_MASK))
6077 break;
6078 }
6079 }
6080
6081 /* Check if it is timeout */
6082 if (j >= I40E_CHK_Q_ENA_COUNT) {
6083 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
6084 (on ? "enable" : "disable"), q_idx);
6085 return I40E_ERR_TIMEOUT;
6086 }
6087
6088 return I40E_SUCCESS;
6089 }
6090 /* Switch on or off the rx queues */
6091 static int
6092 i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
6093 {
6094 struct rte_eth_dev_data *dev_data = pf->dev_data;
6095 struct i40e_rx_queue *rxq;
6096 struct rte_eth_dev *dev = pf->adapter->eth_dev;
6097 uint16_t i;
6098 int ret;
6099
6100 for (i = 0; i < dev_data->nb_rx_queues; i++) {
6101 rxq = dev_data->rx_queues[i];
6102 /* Don't operate the queue if not configured or
6103 * if starting only per queue */
6104 if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
6105 continue;
6106 if (on)
6107 ret = i40e_dev_rx_queue_start(dev, i);
6108 else
6109 ret = i40e_dev_rx_queue_stop(dev, i);
6110 if (ret != I40E_SUCCESS)
6111 return ret;
6112 }
6113
6114 return I40E_SUCCESS;
6115 }
6116
6117 /* Switch on or off all the rx/tx queues */
6118 int
6119 i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
6120 {
6121 int ret;
6122
6123 if (on) {
6124 /* enable rx queues before enabling tx queues */
6125 ret = i40e_dev_switch_rx_queues(pf, on);
6126 if (ret) {
6127 PMD_DRV_LOG(ERR, "Failed to switch rx queues");
6128 return ret;
6129 }
6130 ret = i40e_dev_switch_tx_queues(pf, on);
6131 } else {
6132 /* Stop tx queues before stopping rx queues */
6133 ret = i40e_dev_switch_tx_queues(pf, on);
6134 if (ret) {
6135 PMD_DRV_LOG(ERR, "Failed to switch tx queues");
6136 return ret;
6137 }
6138 ret = i40e_dev_switch_rx_queues(pf, on);
6139 }
6140
6141 return ret;
6142 }
6143
6144 /* Initialize VSI for TX */
6145 static int
6146 i40e_dev_tx_init(struct i40e_pf *pf)
6147 {
6148 struct rte_eth_dev_data *data = pf->dev_data;
6149 uint16_t i;
6150 uint32_t ret = I40E_SUCCESS;
6151 struct i40e_tx_queue *txq;
6152
6153 for (i = 0; i < data->nb_tx_queues; i++) {
6154 txq = data->tx_queues[i];
6155 if (!txq || !txq->q_set)
6156 continue;
6157 ret = i40e_tx_queue_init(txq);
6158 if (ret != I40E_SUCCESS)
6159 break;
6160 }
6161 if (ret == I40E_SUCCESS)
6162 i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
6163 ->eth_dev);
6164
6165 return ret;
6166 }
6167
6168 /* Initialize VSI for RX */
6169 static int
6170 i40e_dev_rx_init(struct i40e_pf *pf)
6171 {
6172 struct rte_eth_dev_data *data = pf->dev_data;
6173 int ret = I40E_SUCCESS;
6174 uint16_t i;
6175 struct i40e_rx_queue *rxq;
6176
6177 i40e_pf_config_mq_rx(pf);
6178 for (i = 0; i < data->nb_rx_queues; i++) {
6179 rxq = data->rx_queues[i];
6180 if (!rxq || !rxq->q_set)
6181 continue;
6182
6183 ret = i40e_rx_queue_init(rxq);
6184 if (ret != I40E_SUCCESS) {
6185 PMD_DRV_LOG(ERR,
6186 "Failed to do RX queue initialization");
6187 break;
6188 }
6189 }
6190 if (ret == I40E_SUCCESS)
6191 i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
6192 ->eth_dev);
6193
6194 return ret;
6195 }
6196
6197 static int
6198 i40e_dev_rxtx_init(struct i40e_pf *pf)
6199 {
6200 int err;
6201
6202 err = i40e_dev_tx_init(pf);
6203 if (err) {
6204 PMD_DRV_LOG(ERR, "Failed to do TX initialization");
6205 return err;
6206 }
6207 err = i40e_dev_rx_init(pf);
6208 if (err) {
6209 PMD_DRV_LOG(ERR, "Failed to do RX initialization");
6210 return err;
6211 }
6212
6213 return err;
6214 }
6215
6216 static int
6217 i40e_vmdq_setup(struct rte_eth_dev *dev)
6218 {
6219 struct rte_eth_conf *conf = &dev->data->dev_conf;
6220 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6221 int i, err, conf_vsis, j, loop;
6222 struct i40e_vsi *vsi;
6223 struct i40e_vmdq_info *vmdq_info;
6224 struct rte_eth_vmdq_rx_conf *vmdq_conf;
6225 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
6226
6227 /*
6228 * Disable interrupt to avoid message from VF. Furthermore, it will
6229 * avoid race condition in VSI creation/destroy.
6230 */
6231 i40e_pf_disable_irq0(hw);
6232
6233 if ((pf->flags & I40E_FLAG_VMDQ) == 0) {
6234 PMD_INIT_LOG(ERR, "FW doesn't support VMDQ");
6235 return -ENOTSUP;
6236 }
6237
6238 conf_vsis = conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools;
6239 if (conf_vsis > pf->max_nb_vmdq_vsi) {
6240 PMD_INIT_LOG(ERR, "VMDQ config: %u, max support:%u",
6241 conf->rx_adv_conf.vmdq_rx_conf.nb_queue_pools,
6242 pf->max_nb_vmdq_vsi);
6243 return -ENOTSUP;
6244 }
6245
6246 if (pf->vmdq != NULL) {
6247 PMD_INIT_LOG(INFO, "VMDQ already configured");
6248 return 0;
6249 }
6250
6251 pf->vmdq = rte_zmalloc("vmdq_info_struct",
6252 sizeof(*vmdq_info) * conf_vsis, 0);
6253
6254 if (pf->vmdq == NULL) {
6255 PMD_INIT_LOG(ERR, "Failed to allocate memory");
6256 return -ENOMEM;
6257 }
6258
6259 vmdq_conf = &conf->rx_adv_conf.vmdq_rx_conf;
6260
6261 /* Create VMDQ VSI */
6262 for (i = 0; i < conf_vsis; i++) {
6263 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, pf->main_vsi,
6264 vmdq_conf->enable_loop_back);
6265 if (vsi == NULL) {
6266 PMD_INIT_LOG(ERR, "Failed to create VMDQ VSI");
6267 err = -1;
6268 goto err_vsi_setup;
6269 }
6270 vmdq_info = &pf->vmdq[i];
6271 vmdq_info->pf = pf;
6272 vmdq_info->vsi = vsi;
6273 }
6274 pf->nb_cfg_vmdq_vsi = conf_vsis;
6275
6276 /* Configure Vlan */
6277 loop = sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT;
6278 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
6279 for (j = 0; j < loop && j < pf->nb_cfg_vmdq_vsi; j++) {
6280 if (vmdq_conf->pool_map[i].pools & (1UL << j)) {
6281 PMD_INIT_LOG(INFO, "Add vlan %u to vmdq pool %u",
6282 vmdq_conf->pool_map[i].vlan_id, j);
6283
6284 err = i40e_vsi_add_vlan(pf->vmdq[j].vsi,
6285 vmdq_conf->pool_map[i].vlan_id);
6286 if (err) {
6287 PMD_INIT_LOG(ERR, "Failed to add vlan");
6288 err = -1;
6289 goto err_vsi_setup;
6290 }
6291 }
6292 }
6293 }
6294
6295 i40e_pf_enable_irq0(hw);
6296
6297 return 0;
6298
6299 err_vsi_setup:
6300 for (i = 0; i < conf_vsis; i++)
6301 if (pf->vmdq[i].vsi == NULL)
6302 break;
6303 else
6304 i40e_vsi_release(pf->vmdq[i].vsi);
6305
6306 rte_free(pf->vmdq);
6307 pf->vmdq = NULL;
6308 i40e_pf_enable_irq0(hw);
6309 return err;
6310 }
6311
6312 static void
6313 i40e_stat_update_32(struct i40e_hw *hw,
6314 uint32_t reg,
6315 bool offset_loaded,
6316 uint64_t *offset,
6317 uint64_t *stat)
6318 {
6319 uint64_t new_data;
6320
6321 new_data = (uint64_t)I40E_READ_REG(hw, reg);
6322 if (!offset_loaded)
6323 *offset = new_data;
6324
6325 if (new_data >= *offset)
6326 *stat = (uint64_t)(new_data - *offset);
6327 else
6328 *stat = (uint64_t)((new_data +
6329 ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
6330 }
6331
6332 static void
6333 i40e_stat_update_48(struct i40e_hw *hw,
6334 uint32_t hireg,
6335 uint32_t loreg,
6336 bool offset_loaded,
6337 uint64_t *offset,
6338 uint64_t *stat)
6339 {
6340 uint64_t new_data;
6341
6342 new_data = (uint64_t)I40E_READ_REG(hw, loreg);
6343 new_data |= ((uint64_t)(I40E_READ_REG(hw, hireg) &
6344 I40E_16_BIT_MASK)) << I40E_32_BIT_WIDTH;
6345
6346 if (!offset_loaded)
6347 *offset = new_data;
6348
6349 if (new_data >= *offset)
6350 *stat = new_data - *offset;
6351 else
6352 *stat = (uint64_t)((new_data +
6353 ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
6354
6355 *stat &= I40E_48_BIT_MASK;
6356 }
6357
6358 /* Disable IRQ0 */
6359 void
6360 i40e_pf_disable_irq0(struct i40e_hw *hw)
6361 {
6362 /* Disable all interrupt types */
6363 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6364 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6365 I40E_WRITE_FLUSH(hw);
6366 }
6367
6368 /* Enable IRQ0 */
6369 void
6370 i40e_pf_enable_irq0(struct i40e_hw *hw)
6371 {
6372 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
6373 I40E_PFINT_DYN_CTL0_INTENA_MASK |
6374 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
6375 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
6376 I40E_WRITE_FLUSH(hw);
6377 }
6378
6379 static void
6380 i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue)
6381 {
6382 /* read pending request and disable first */
6383 i40e_pf_disable_irq0(hw);
6384 I40E_WRITE_REG(hw, I40E_PFINT_ICR0_ENA, I40E_PFINT_ICR0_ENA_MASK);
6385 I40E_WRITE_REG(hw, I40E_PFINT_STAT_CTL0,
6386 I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK);
6387
6388 if (no_queue)
6389 /* Link no queues with irq0 */
6390 I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
6391 I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK);
6392 }
6393
6394 static void
6395 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
6396 {
6397 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6398 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6399 int i;
6400 uint16_t abs_vf_id;
6401 uint32_t index, offset, val;
6402
6403 if (!pf->vfs)
6404 return;
6405 /**
6406 * Try to find which VF trigger a reset, use absolute VF id to access
6407 * since the reg is global register.
6408 */
6409 for (i = 0; i < pf->vf_num; i++) {
6410 abs_vf_id = hw->func_caps.vf_base_id + i;
6411 index = abs_vf_id / I40E_UINT32_BIT_SIZE;
6412 offset = abs_vf_id % I40E_UINT32_BIT_SIZE;
6413 val = I40E_READ_REG(hw, I40E_GLGEN_VFLRSTAT(index));
6414 /* VFR event occurred */
6415 if (val & (0x1 << offset)) {
6416 int ret;
6417
6418 /* Clear the event first */
6419 I40E_WRITE_REG(hw, I40E_GLGEN_VFLRSTAT(index),
6420 (0x1 << offset));
6421 PMD_DRV_LOG(INFO, "VF %u reset occurred", abs_vf_id);
6422 /**
6423 * Only notify a VF reset event occurred,
6424 * don't trigger another SW reset
6425 */
6426 ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
6427 if (ret != I40E_SUCCESS)
6428 PMD_DRV_LOG(ERR, "Failed to do VF reset");
6429 }
6430 }
6431 }
6432
6433 static void
6434 i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
6435 {
6436 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
6437 int i;
6438
6439 for (i = 0; i < pf->vf_num; i++)
6440 i40e_notify_vf_link_status(dev, &pf->vfs[i]);
6441 }
6442
6443 static void
6444 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
6445 {
6446 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6447 struct i40e_arq_event_info info;
6448 uint16_t pending, opcode;
6449 int ret;
6450
6451 info.buf_len = I40E_AQ_BUF_SZ;
6452 info.msg_buf = rte_zmalloc("msg_buffer", info.buf_len, 0);
6453 if (!info.msg_buf) {
6454 PMD_DRV_LOG(ERR, "Failed to allocate mem");
6455 return;
6456 }
6457
6458 pending = 1;
6459 while (pending) {
6460 ret = i40e_clean_arq_element(hw, &info, &pending);
6461
6462 if (ret != I40E_SUCCESS) {
6463 PMD_DRV_LOG(INFO,
6464 "Failed to read msg from AdminQ, aq_err: %u",
6465 hw->aq.asq_last_status);
6466 break;
6467 }
6468 opcode = rte_le_to_cpu_16(info.desc.opcode);
6469
6470 switch (opcode) {
6471 case i40e_aqc_opc_send_msg_to_pf:
6472 /* Refer to i40e_aq_send_msg_to_pf() for argument layout*/
6473 i40e_pf_host_handle_vf_msg(dev,
6474 rte_le_to_cpu_16(info.desc.retval),
6475 rte_le_to_cpu_32(info.desc.cookie_high),
6476 rte_le_to_cpu_32(info.desc.cookie_low),
6477 info.msg_buf,
6478 info.msg_len);
6479 break;
6480 case i40e_aqc_opc_get_link_status:
6481 ret = i40e_dev_link_update(dev, 0);
6482 if (!ret)
6483 _rte_eth_dev_callback_process(dev,
6484 RTE_ETH_EVENT_INTR_LSC, NULL);
6485 break;
6486 default:
6487 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
6488 opcode);
6489 break;
6490 }
6491 }
6492 rte_free(info.msg_buf);
6493 }
6494
6495 /**
6496 * Interrupt handler triggered by NIC for handling
6497 * specific interrupt.
6498 *
6499 * @param handle
6500 * Pointer to interrupt handle.
6501 * @param param
6502 * The address of parameter (struct rte_eth_dev *) regsitered before.
6503 *
6504 * @return
6505 * void
6506 */
6507 static void
6508 i40e_dev_interrupt_handler(void *param)
6509 {
6510 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
6511 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
6512 uint32_t icr0;
6513
6514 /* Disable interrupt */
6515 i40e_pf_disable_irq0(hw);
6516
6517 /* read out interrupt causes */
6518 icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
6519
6520 /* No interrupt event indicated */
6521 if (!(icr0 & I40E_PFINT_ICR0_INTEVENT_MASK)) {
6522 PMD_DRV_LOG(INFO, "No interrupt event");
6523 goto done;
6524 }
6525 if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
6526 PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
6527 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
6528 PMD_DRV_LOG(ERR, "ICR0: malicious programming detected");
6529 if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
6530 PMD_DRV_LOG(INFO, "ICR0: global reset requested");
6531 if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
6532 PMD_DRV_LOG(INFO, "ICR0: PCI exception activated");
6533 if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
6534 PMD_DRV_LOG(INFO, "ICR0: a change in the storm control state");
6535 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
6536 PMD_DRV_LOG(ERR, "ICR0: HMC error");
6537 if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
6538 PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
6539
6540 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
6541 PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
6542 i40e_dev_handle_vfr_event(dev);
6543 }
6544 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
6545 PMD_DRV_LOG(INFO, "ICR0: adminq event");
6546 i40e_dev_handle_aq_msg(dev);
6547 }
6548
6549 done:
6550 /* Enable interrupt */
6551 i40e_pf_enable_irq0(hw);
6552 rte_intr_enable(dev->intr_handle);
6553 }
6554
6555 int
6556 i40e_add_macvlan_filters(struct i40e_vsi *vsi,
6557 struct i40e_macvlan_filter *filter,
6558 int total)
6559 {
6560 int ele_num, ele_buff_size;
6561 int num, actual_num, i;
6562 uint16_t flags;
6563 int ret = I40E_SUCCESS;
6564 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6565 struct i40e_aqc_add_macvlan_element_data *req_list;
6566
6567 if (filter == NULL || total == 0)
6568 return I40E_ERR_PARAM;
6569 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6570 ele_buff_size = hw->aq.asq_buf_size;
6571
6572 req_list = rte_zmalloc("macvlan_add", ele_buff_size, 0);
6573 if (req_list == NULL) {
6574 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6575 return I40E_ERR_NO_MEMORY;
6576 }
6577
6578 num = 0;
6579 do {
6580 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6581 memset(req_list, 0, ele_buff_size);
6582
6583 for (i = 0; i < actual_num; i++) {
6584 rte_memcpy(req_list[i].mac_addr,
6585 &filter[num + i].macaddr, ETH_ADDR_LEN);
6586 req_list[i].vlan_tag =
6587 rte_cpu_to_le_16(filter[num + i].vlan_id);
6588
6589 switch (filter[num + i].filter_type) {
6590 case RTE_MAC_PERFECT_MATCH:
6591 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH |
6592 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6593 break;
6594 case RTE_MACVLAN_PERFECT_MATCH:
6595 flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
6596 break;
6597 case RTE_MAC_HASH_MATCH:
6598 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH |
6599 I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
6600 break;
6601 case RTE_MACVLAN_HASH_MATCH:
6602 flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
6603 break;
6604 default:
6605 PMD_DRV_LOG(ERR, "Invalid MAC match type");
6606 ret = I40E_ERR_PARAM;
6607 goto DONE;
6608 }
6609
6610 req_list[i].queue_number = 0;
6611
6612 req_list[i].flags = rte_cpu_to_le_16(flags);
6613 }
6614
6615 ret = i40e_aq_add_macvlan(hw, vsi->seid, req_list,
6616 actual_num, NULL);
6617 if (ret != I40E_SUCCESS) {
6618 PMD_DRV_LOG(ERR, "Failed to add macvlan filter");
6619 goto DONE;
6620 }
6621 num += actual_num;
6622 } while (num < total);
6623
6624 DONE:
6625 rte_free(req_list);
6626 return ret;
6627 }
6628
6629 int
6630 i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
6631 struct i40e_macvlan_filter *filter,
6632 int total)
6633 {
6634 int ele_num, ele_buff_size;
6635 int num, actual_num, i;
6636 uint16_t flags;
6637 int ret = I40E_SUCCESS;
6638 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6639 struct i40e_aqc_remove_macvlan_element_data *req_list;
6640
6641 if (filter == NULL || total == 0)
6642 return I40E_ERR_PARAM;
6643
6644 ele_num = hw->aq.asq_buf_size / sizeof(*req_list);
6645 ele_buff_size = hw->aq.asq_buf_size;
6646
6647 req_list = rte_zmalloc("macvlan_remove", ele_buff_size, 0);
6648 if (req_list == NULL) {
6649 PMD_DRV_LOG(ERR, "Fail to allocate memory");
6650 return I40E_ERR_NO_MEMORY;
6651 }
6652
6653 num = 0;
6654 do {
6655 actual_num = (num + ele_num > total) ? (total - num) : ele_num;
6656 memset(req_list, 0, ele_buff_size);
6657
6658 for (i = 0; i < actual_num; i++) {
6659 rte_memcpy(req_list[i].mac_addr,
6660 &filter[num + i].macaddr, ETH_ADDR_LEN);
6661 req_list[i].vlan_tag =
6662 rte_cpu_to_le_16(filter[num + i].vlan_id);
6663
6664 switch (filter[num + i].filter_type) {
6665 case RTE_MAC_PERFECT_MATCH:
6666 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
6667 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6668 break;
6669 case RTE_MACVLAN_PERFECT_MATCH:
6670 flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
6671 break;
6672 case RTE_MAC_HASH_MATCH:
6673 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH |
6674 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
6675 break;
6676 case RTE_MACVLAN_HASH_MATCH:
6677 flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
6678 break;
6679 default:
6680 PMD_DRV_LOG(ERR, "Invalid MAC filter type");
6681 ret = I40E_ERR_PARAM;
6682 goto DONE;
6683 }
6684 req_list[i].flags = rte_cpu_to_le_16(flags);
6685 }
6686
6687 ret = i40e_aq_remove_macvlan(hw, vsi->seid, req_list,
6688 actual_num, NULL);
6689 if (ret != I40E_SUCCESS) {
6690 PMD_DRV_LOG(ERR, "Failed to remove macvlan filter");
6691 goto DONE;
6692 }
6693 num += actual_num;
6694 } while (num < total);
6695
6696 DONE:
6697 rte_free(req_list);
6698 return ret;
6699 }
6700
6701 /* Find out specific MAC filter */
6702 static struct i40e_mac_filter *
6703 i40e_find_mac_filter(struct i40e_vsi *vsi,
6704 struct ether_addr *macaddr)
6705 {
6706 struct i40e_mac_filter *f;
6707
6708 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6709 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
6710 return f;
6711 }
6712
6713 return NULL;
6714 }
6715
6716 static bool
6717 i40e_find_vlan_filter(struct i40e_vsi *vsi,
6718 uint16_t vlan_id)
6719 {
6720 uint32_t vid_idx, vid_bit;
6721
6722 if (vlan_id > ETH_VLAN_ID_MAX)
6723 return 0;
6724
6725 vid_idx = I40E_VFTA_IDX(vlan_id);
6726 vid_bit = I40E_VFTA_BIT(vlan_id);
6727
6728 if (vsi->vfta[vid_idx] & vid_bit)
6729 return 1;
6730 else
6731 return 0;
6732 }
6733
6734 static void
6735 i40e_store_vlan_filter(struct i40e_vsi *vsi,
6736 uint16_t vlan_id, bool on)
6737 {
6738 uint32_t vid_idx, vid_bit;
6739
6740 vid_idx = I40E_VFTA_IDX(vlan_id);
6741 vid_bit = I40E_VFTA_BIT(vlan_id);
6742
6743 if (on)
6744 vsi->vfta[vid_idx] |= vid_bit;
6745 else
6746 vsi->vfta[vid_idx] &= ~vid_bit;
6747 }
6748
6749 void
6750 i40e_set_vlan_filter(struct i40e_vsi *vsi,
6751 uint16_t vlan_id, bool on)
6752 {
6753 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
6754 struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
6755 int ret;
6756
6757 if (vlan_id > ETH_VLAN_ID_MAX)
6758 return;
6759
6760 i40e_store_vlan_filter(vsi, vlan_id, on);
6761
6762 if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
6763 return;
6764
6765 vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
6766
6767 if (on) {
6768 ret = i40e_aq_add_vlan(hw, vsi->seid,
6769 &vlan_data, 1, NULL);
6770 if (ret != I40E_SUCCESS)
6771 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
6772 } else {
6773 ret = i40e_aq_remove_vlan(hw, vsi->seid,
6774 &vlan_data, 1, NULL);
6775 if (ret != I40E_SUCCESS)
6776 PMD_DRV_LOG(ERR,
6777 "Failed to remove vlan filter");
6778 }
6779 }
6780
6781 /**
6782 * Find all vlan options for specific mac addr,
6783 * return with actual vlan found.
6784 */
6785 int
6786 i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
6787 struct i40e_macvlan_filter *mv_f,
6788 int num, struct ether_addr *addr)
6789 {
6790 int i;
6791 uint32_t j, k;
6792
6793 /**
6794 * Not to use i40e_find_vlan_filter to decrease the loop time,
6795 * although the code looks complex.
6796 */
6797 if (num < vsi->vlan_num)
6798 return I40E_ERR_PARAM;
6799
6800 i = 0;
6801 for (j = 0; j < I40E_VFTA_SIZE; j++) {
6802 if (vsi->vfta[j]) {
6803 for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
6804 if (vsi->vfta[j] & (1 << k)) {
6805 if (i > num - 1) {
6806 PMD_DRV_LOG(ERR,
6807 "vlan number doesn't match");
6808 return I40E_ERR_PARAM;
6809 }
6810 rte_memcpy(&mv_f[i].macaddr,
6811 addr, ETH_ADDR_LEN);
6812 mv_f[i].vlan_id =
6813 j * I40E_UINT32_BIT_SIZE + k;
6814 i++;
6815 }
6816 }
6817 }
6818 }
6819 return I40E_SUCCESS;
6820 }
6821
6822 static inline int
6823 i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
6824 struct i40e_macvlan_filter *mv_f,
6825 int num,
6826 uint16_t vlan)
6827 {
6828 int i = 0;
6829 struct i40e_mac_filter *f;
6830
6831 if (num < vsi->mac_num)
6832 return I40E_ERR_PARAM;
6833
6834 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6835 if (i > num - 1) {
6836 PMD_DRV_LOG(ERR, "buffer number not match");
6837 return I40E_ERR_PARAM;
6838 }
6839 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
6840 ETH_ADDR_LEN);
6841 mv_f[i].vlan_id = vlan;
6842 mv_f[i].filter_type = f->mac_info.filter_type;
6843 i++;
6844 }
6845
6846 return I40E_SUCCESS;
6847 }
6848
6849 static int
6850 i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
6851 {
6852 int i, j, num;
6853 struct i40e_mac_filter *f;
6854 struct i40e_macvlan_filter *mv_f;
6855 int ret = I40E_SUCCESS;
6856
6857 if (vsi == NULL || vsi->mac_num == 0)
6858 return I40E_ERR_PARAM;
6859
6860 /* Case that no vlan is set */
6861 if (vsi->vlan_num == 0)
6862 num = vsi->mac_num;
6863 else
6864 num = vsi->mac_num * vsi->vlan_num;
6865
6866 mv_f = rte_zmalloc("macvlan_data", num * sizeof(*mv_f), 0);
6867 if (mv_f == NULL) {
6868 PMD_DRV_LOG(ERR, "failed to allocate memory");
6869 return I40E_ERR_NO_MEMORY;
6870 }
6871
6872 i = 0;
6873 if (vsi->vlan_num == 0) {
6874 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6875 rte_memcpy(&mv_f[i].macaddr,
6876 &f->mac_info.mac_addr, ETH_ADDR_LEN);
6877 mv_f[i].filter_type = f->mac_info.filter_type;
6878 mv_f[i].vlan_id = 0;
6879 i++;
6880 }
6881 } else {
6882 TAILQ_FOREACH(f, &vsi->mac_list, next) {
6883 ret = i40e_find_all_vlan_for_mac(vsi,&mv_f[i],
6884 vsi->vlan_num, &f->mac_info.mac_addr);
6885 if (ret != I40E_SUCCESS)
6886 goto DONE;
6887 for (j = i; j < i + vsi->vlan_num; j++)
6888 mv_f[j].filter_type = f->mac_info.filter_type;
6889 i += vsi->vlan_num;
6890 }
6891 }
6892
6893 ret = i40e_remove_macvlan_filters(vsi, mv_f, num);
6894 DONE:
6895 rte_free(mv_f);
6896
6897 return ret;
6898 }
6899
6900 int
6901 i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6902 {
6903 struct i40e_macvlan_filter *mv_f;
6904 int mac_num;
6905 int ret = I40E_SUCCESS;
6906
6907 if (!vsi || vlan > ETHER_MAX_VLAN_ID)
6908 return I40E_ERR_PARAM;
6909
6910 /* If it's already set, just return */
6911 if (i40e_find_vlan_filter(vsi,vlan))
6912 return I40E_SUCCESS;
6913
6914 mac_num = vsi->mac_num;
6915
6916 if (mac_num == 0) {
6917 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6918 return I40E_ERR_PARAM;
6919 }
6920
6921 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6922
6923 if (mv_f == NULL) {
6924 PMD_DRV_LOG(ERR, "failed to allocate memory");
6925 return I40E_ERR_NO_MEMORY;
6926 }
6927
6928 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6929
6930 if (ret != I40E_SUCCESS)
6931 goto DONE;
6932
6933 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6934
6935 if (ret != I40E_SUCCESS)
6936 goto DONE;
6937
6938 i40e_set_vlan_filter(vsi, vlan, 1);
6939
6940 vsi->vlan_num++;
6941 ret = I40E_SUCCESS;
6942 DONE:
6943 rte_free(mv_f);
6944 return ret;
6945 }
6946
6947 int
6948 i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan)
6949 {
6950 struct i40e_macvlan_filter *mv_f;
6951 int mac_num;
6952 int ret = I40E_SUCCESS;
6953
6954 /**
6955 * Vlan 0 is the generic filter for untagged packets
6956 * and can't be removed.
6957 */
6958 if (!vsi || vlan == 0 || vlan > ETHER_MAX_VLAN_ID)
6959 return I40E_ERR_PARAM;
6960
6961 /* If can't find it, just return */
6962 if (!i40e_find_vlan_filter(vsi, vlan))
6963 return I40E_ERR_PARAM;
6964
6965 mac_num = vsi->mac_num;
6966
6967 if (mac_num == 0) {
6968 PMD_DRV_LOG(ERR, "Error! VSI doesn't have a mac addr");
6969 return I40E_ERR_PARAM;
6970 }
6971
6972 mv_f = rte_zmalloc("macvlan_data", mac_num * sizeof(*mv_f), 0);
6973
6974 if (mv_f == NULL) {
6975 PMD_DRV_LOG(ERR, "failed to allocate memory");
6976 return I40E_ERR_NO_MEMORY;
6977 }
6978
6979 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, vlan);
6980
6981 if (ret != I40E_SUCCESS)
6982 goto DONE;
6983
6984 ret = i40e_remove_macvlan_filters(vsi, mv_f, mac_num);
6985
6986 if (ret != I40E_SUCCESS)
6987 goto DONE;
6988
6989 /* This is last vlan to remove, replace all mac filter with vlan 0 */
6990 if (vsi->vlan_num == 1) {
6991 ret = i40e_find_all_mac_for_vlan(vsi, mv_f, mac_num, 0);
6992 if (ret != I40E_SUCCESS)
6993 goto DONE;
6994
6995 ret = i40e_add_macvlan_filters(vsi, mv_f, mac_num);
6996 if (ret != I40E_SUCCESS)
6997 goto DONE;
6998 }
6999
7000 i40e_set_vlan_filter(vsi, vlan, 0);
7001
7002 vsi->vlan_num--;
7003 ret = I40E_SUCCESS;
7004 DONE:
7005 rte_free(mv_f);
7006 return ret;
7007 }
7008
7009 int
7010 i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *mac_filter)
7011 {
7012 struct i40e_mac_filter *f;
7013 struct i40e_macvlan_filter *mv_f;
7014 int i, vlan_num = 0;
7015 int ret = I40E_SUCCESS;
7016
7017 /* If it's add and we've config it, return */
7018 f = i40e_find_mac_filter(vsi, &mac_filter->mac_addr);
7019 if (f != NULL)
7020 return I40E_SUCCESS;
7021 if ((mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
7022 (mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH)) {
7023
7024 /**
7025 * If vlan_num is 0, that's the first time to add mac,
7026 * set mask for vlan_id 0.
7027 */
7028 if (vsi->vlan_num == 0) {
7029 i40e_set_vlan_filter(vsi, 0, 1);
7030 vsi->vlan_num = 1;
7031 }
7032 vlan_num = vsi->vlan_num;
7033 } else if ((mac_filter->filter_type == RTE_MAC_PERFECT_MATCH) ||
7034 (mac_filter->filter_type == RTE_MAC_HASH_MATCH))
7035 vlan_num = 1;
7036
7037 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7038 if (mv_f == NULL) {
7039 PMD_DRV_LOG(ERR, "failed to allocate memory");
7040 return I40E_ERR_NO_MEMORY;
7041 }
7042
7043 for (i = 0; i < vlan_num; i++) {
7044 mv_f[i].filter_type = mac_filter->filter_type;
7045 rte_memcpy(&mv_f[i].macaddr, &mac_filter->mac_addr,
7046 ETH_ADDR_LEN);
7047 }
7048
7049 if (mac_filter->filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7050 mac_filter->filter_type == RTE_MACVLAN_HASH_MATCH) {
7051 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
7052 &mac_filter->mac_addr);
7053 if (ret != I40E_SUCCESS)
7054 goto DONE;
7055 }
7056
7057 ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
7058 if (ret != I40E_SUCCESS)
7059 goto DONE;
7060
7061 /* Add the mac addr into mac list */
7062 f = rte_zmalloc("macv_filter", sizeof(*f), 0);
7063 if (f == NULL) {
7064 PMD_DRV_LOG(ERR, "failed to allocate memory");
7065 ret = I40E_ERR_NO_MEMORY;
7066 goto DONE;
7067 }
7068 rte_memcpy(&f->mac_info.mac_addr, &mac_filter->mac_addr,
7069 ETH_ADDR_LEN);
7070 f->mac_info.filter_type = mac_filter->filter_type;
7071 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
7072 vsi->mac_num++;
7073
7074 ret = I40E_SUCCESS;
7075 DONE:
7076 rte_free(mv_f);
7077
7078 return ret;
7079 }
7080
7081 int
7082 i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
7083 {
7084 struct i40e_mac_filter *f;
7085 struct i40e_macvlan_filter *mv_f;
7086 int i, vlan_num;
7087 enum rte_mac_filter_type filter_type;
7088 int ret = I40E_SUCCESS;
7089
7090 /* Can't find it, return an error */
7091 f = i40e_find_mac_filter(vsi, addr);
7092 if (f == NULL)
7093 return I40E_ERR_PARAM;
7094
7095 vlan_num = vsi->vlan_num;
7096 filter_type = f->mac_info.filter_type;
7097 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7098 filter_type == RTE_MACVLAN_HASH_MATCH) {
7099 if (vlan_num == 0) {
7100 PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
7101 return I40E_ERR_PARAM;
7102 }
7103 } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
7104 filter_type == RTE_MAC_HASH_MATCH)
7105 vlan_num = 1;
7106
7107 mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
7108 if (mv_f == NULL) {
7109 PMD_DRV_LOG(ERR, "failed to allocate memory");
7110 return I40E_ERR_NO_MEMORY;
7111 }
7112
7113 for (i = 0; i < vlan_num; i++) {
7114 mv_f[i].filter_type = filter_type;
7115 rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr,
7116 ETH_ADDR_LEN);
7117 }
7118 if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
7119 filter_type == RTE_MACVLAN_HASH_MATCH) {
7120 ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num, addr);
7121 if (ret != I40E_SUCCESS)
7122 goto DONE;
7123 }
7124
7125 ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
7126 if (ret != I40E_SUCCESS)
7127 goto DONE;
7128
7129 /* Remove the mac addr into mac list */
7130 TAILQ_REMOVE(&vsi->mac_list, f, next);
7131 rte_free(f);
7132 vsi->mac_num--;
7133
7134 ret = I40E_SUCCESS;
7135 DONE:
7136 rte_free(mv_f);
7137 return ret;
7138 }
7139
7140 /* Configure hash enable flags for RSS */
7141 uint64_t
7142 i40e_config_hena(const struct i40e_adapter *adapter, uint64_t flags)
7143 {
7144 uint64_t hena = 0;
7145 int i;
7146
7147 if (!flags)
7148 return hena;
7149
7150 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7151 if (flags & (1ULL << i))
7152 hena |= adapter->pctypes_tbl[i];
7153 }
7154
7155 return hena;
7156 }
7157
7158 /* Parse the hash enable flags */
7159 uint64_t
7160 i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags)
7161 {
7162 uint64_t rss_hf = 0;
7163
7164 if (!flags)
7165 return rss_hf;
7166 int i;
7167
7168 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < I40E_FLOW_TYPE_MAX; i++) {
7169 if (flags & adapter->pctypes_tbl[i])
7170 rss_hf |= (1ULL << i);
7171 }
7172 return rss_hf;
7173 }
7174
7175 /* Disable RSS */
7176 static void
7177 i40e_pf_disable_rss(struct i40e_pf *pf)
7178 {
7179 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7180
7181 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
7182 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
7183 I40E_WRITE_FLUSH(hw);
7184 }
7185
7186 int
7187 i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
7188 {
7189 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7190 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7191 uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ?
7192 I40E_VFQF_HKEY_MAX_INDEX :
7193 I40E_PFQF_HKEY_MAX_INDEX;
7194 int ret = 0;
7195
7196 if (!key || key_len == 0) {
7197 PMD_DRV_LOG(DEBUG, "No key to be configured");
7198 return 0;
7199 } else if (key_len != (key_idx + 1) *
7200 sizeof(uint32_t)) {
7201 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
7202 return -EINVAL;
7203 }
7204
7205 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7206 struct i40e_aqc_get_set_rss_key_data *key_dw =
7207 (struct i40e_aqc_get_set_rss_key_data *)key;
7208
7209 ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
7210 if (ret)
7211 PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
7212 } else {
7213 uint32_t *hash_key = (uint32_t *)key;
7214 uint16_t i;
7215
7216 if (vsi->type == I40E_VSI_SRIOV) {
7217 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
7218 I40E_WRITE_REG(
7219 hw,
7220 I40E_VFQF_HKEY1(i, vsi->user_param),
7221 hash_key[i]);
7222
7223 } else {
7224 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7225 I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i),
7226 hash_key[i]);
7227 }
7228 I40E_WRITE_FLUSH(hw);
7229 }
7230
7231 return ret;
7232 }
7233
7234 static int
7235 i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
7236 {
7237 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
7238 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
7239 uint32_t reg;
7240 int ret;
7241
7242 if (!key || !key_len)
7243 return -EINVAL;
7244
7245 if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
7246 ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
7247 (struct i40e_aqc_get_set_rss_key_data *)key);
7248 if (ret) {
7249 PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
7250 return ret;
7251 }
7252 } else {
7253 uint32_t *key_dw = (uint32_t *)key;
7254 uint16_t i;
7255
7256 if (vsi->type == I40E_VSI_SRIOV) {
7257 for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) {
7258 reg = I40E_VFQF_HKEY1(i, vsi->user_param);
7259 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7260 }
7261 *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
7262 sizeof(uint32_t);
7263 } else {
7264 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
7265 reg = I40E_PFQF_HKEY(i);
7266 key_dw[i] = i40e_read_rx_ctl(hw, reg);
7267 }
7268 *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
7269 sizeof(uint32_t);
7270 }
7271 }
7272 return 0;
7273 }
7274
7275 static int
7276 i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
7277 {
7278 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7279 uint64_t hena;
7280 int ret;
7281
7282 ret = i40e_set_rss_key(pf->main_vsi, rss_conf->rss_key,
7283 rss_conf->rss_key_len);
7284 if (ret)
7285 return ret;
7286
7287 hena = i40e_config_hena(pf->adapter, rss_conf->rss_hf);
7288 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
7289 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
7290 I40E_WRITE_FLUSH(hw);
7291
7292 return 0;
7293 }
7294
7295 static int
7296 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
7297 struct rte_eth_rss_conf *rss_conf)
7298 {
7299 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7300 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7301 uint64_t rss_hf = rss_conf->rss_hf & pf->adapter->flow_types_mask;
7302 uint64_t hena;
7303
7304 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7305 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7306
7307 if (!(hena & pf->adapter->pctypes_mask)) { /* RSS disabled */
7308 if (rss_hf != 0) /* Enable RSS */
7309 return -EINVAL;
7310 return 0; /* Nothing to do */
7311 }
7312 /* RSS enabled */
7313 if (rss_hf == 0) /* Disable RSS */
7314 return -EINVAL;
7315
7316 return i40e_hw_rss_hash_set(pf, rss_conf);
7317 }
7318
7319 static int
7320 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
7321 struct rte_eth_rss_conf *rss_conf)
7322 {
7323 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
7324 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
7325 uint64_t hena;
7326
7327 i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key,
7328 &rss_conf->rss_key_len);
7329
7330 hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
7331 hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
7332 rss_conf->rss_hf = i40e_parse_hena(pf->adapter, hena);
7333
7334 return 0;
7335 }
7336
7337 static int
7338 i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
7339 {
7340 switch (filter_type) {
7341 case RTE_TUNNEL_FILTER_IMAC_IVLAN:
7342 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7343 break;
7344 case RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID:
7345 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7346 break;
7347 case RTE_TUNNEL_FILTER_IMAC_TENID:
7348 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID;
7349 break;
7350 case RTE_TUNNEL_FILTER_OMAC_TENID_IMAC:
7351 *flag = I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC;
7352 break;
7353 case ETH_TUNNEL_FILTER_IMAC:
7354 *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7355 break;
7356 case ETH_TUNNEL_FILTER_OIP:
7357 *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP;
7358 break;
7359 case ETH_TUNNEL_FILTER_IIP:
7360 *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7361 break;
7362 default:
7363 PMD_DRV_LOG(ERR, "invalid tunnel filter type");
7364 return -EINVAL;
7365 }
7366
7367 return 0;
7368 }
7369
7370 /* Convert tunnel filter structure */
7371 static int
7372 i40e_tunnel_filter_convert(
7373 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
7374 struct i40e_tunnel_filter *tunnel_filter)
7375 {
7376 ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
7377 (struct ether_addr *)&tunnel_filter->input.outer_mac);
7378 ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
7379 (struct ether_addr *)&tunnel_filter->input.inner_mac);
7380 tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
7381 if ((rte_le_to_cpu_16(cld_filter->element.flags) &
7382 I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
7383 I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
7384 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
7385 else
7386 tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
7387 tunnel_filter->input.flags = cld_filter->element.flags;
7388 tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
7389 tunnel_filter->queue = cld_filter->element.queue_number;
7390 rte_memcpy(tunnel_filter->input.general_fields,
7391 cld_filter->general_fields,
7392 sizeof(cld_filter->general_fields));
7393
7394 return 0;
7395 }
7396
7397 /* Check if there exists the tunnel filter */
7398 struct i40e_tunnel_filter *
7399 i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
7400 const struct i40e_tunnel_filter_input *input)
7401 {
7402 int ret;
7403
7404 ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
7405 if (ret < 0)
7406 return NULL;
7407
7408 return tunnel_rule->hash_map[ret];
7409 }
7410
7411 /* Add a tunnel filter into the SW list */
7412 static int
7413 i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
7414 struct i40e_tunnel_filter *tunnel_filter)
7415 {
7416 struct i40e_tunnel_rule *rule = &pf->tunnel;
7417 int ret;
7418
7419 ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
7420 if (ret < 0) {
7421 PMD_DRV_LOG(ERR,
7422 "Failed to insert tunnel filter to hash table %d!",
7423 ret);
7424 return ret;
7425 }
7426 rule->hash_map[ret] = tunnel_filter;
7427
7428 TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
7429
7430 return 0;
7431 }
7432
7433 /* Delete a tunnel filter from the SW list */
7434 int
7435 i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
7436 struct i40e_tunnel_filter_input *input)
7437 {
7438 struct i40e_tunnel_rule *rule = &pf->tunnel;
7439 struct i40e_tunnel_filter *tunnel_filter;
7440 int ret;
7441
7442 ret = rte_hash_del_key(rule->hash_table, input);
7443 if (ret < 0) {
7444 PMD_DRV_LOG(ERR,
7445 "Failed to delete tunnel filter to hash table %d!",
7446 ret);
7447 return ret;
7448 }
7449 tunnel_filter = rule->hash_map[ret];
7450 rule->hash_map[ret] = NULL;
7451
7452 TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
7453 rte_free(tunnel_filter);
7454
7455 return 0;
7456 }
7457
7458 int
7459 i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
7460 struct rte_eth_tunnel_filter_conf *tunnel_filter,
7461 uint8_t add)
7462 {
7463 uint16_t ip_type;
7464 uint32_t ipv4_addr, ipv4_addr_le;
7465 uint8_t i, tun_type = 0;
7466 /* internal varialbe to convert ipv6 byte order */
7467 uint32_t convert_ipv6[4];
7468 int val, ret = 0;
7469 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7470 struct i40e_vsi *vsi = pf->main_vsi;
7471 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7472 struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7473 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7474 struct i40e_tunnel_filter *tunnel, *node;
7475 struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7476
7477 cld_filter = rte_zmalloc("tunnel_filter",
7478 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7479 0);
7480
7481 if (NULL == cld_filter) {
7482 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7483 return -ENOMEM;
7484 }
7485 pfilter = cld_filter;
7486
7487 ether_addr_copy(&tunnel_filter->outer_mac,
7488 (struct ether_addr *)&pfilter->element.outer_mac);
7489 ether_addr_copy(&tunnel_filter->inner_mac,
7490 (struct ether_addr *)&pfilter->element.inner_mac);
7491
7492 pfilter->element.inner_vlan =
7493 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7494 if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
7495 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7496 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7497 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7498 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7499 &ipv4_addr_le,
7500 sizeof(pfilter->element.ipaddr.v4.data));
7501 } else {
7502 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7503 for (i = 0; i < 4; i++) {
7504 convert_ipv6[i] =
7505 rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
7506 }
7507 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7508 &convert_ipv6,
7509 sizeof(pfilter->element.ipaddr.v6.data));
7510 }
7511
7512 /* check tunneled type */
7513 switch (tunnel_filter->tunnel_type) {
7514 case RTE_TUNNEL_TYPE_VXLAN:
7515 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7516 break;
7517 case RTE_TUNNEL_TYPE_NVGRE:
7518 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7519 break;
7520 case RTE_TUNNEL_TYPE_IP_IN_GRE:
7521 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7522 break;
7523 default:
7524 /* Other tunnel types is not supported. */
7525 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
7526 rte_free(cld_filter);
7527 return -EINVAL;
7528 }
7529
7530 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
7531 &pfilter->element.flags);
7532 if (val < 0) {
7533 rte_free(cld_filter);
7534 return -EINVAL;
7535 }
7536
7537 pfilter->element.flags |= rte_cpu_to_le_16(
7538 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
7539 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
7540 pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7541 pfilter->element.queue_number =
7542 rte_cpu_to_le_16(tunnel_filter->queue_id);
7543
7544 /* Check if there is the filter in SW list */
7545 memset(&check_filter, 0, sizeof(check_filter));
7546 i40e_tunnel_filter_convert(cld_filter, &check_filter);
7547 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
7548 if (add && node) {
7549 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
7550 rte_free(cld_filter);
7551 return -EINVAL;
7552 }
7553
7554 if (!add && !node) {
7555 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
7556 rte_free(cld_filter);
7557 return -EINVAL;
7558 }
7559
7560 if (add) {
7561 ret = i40e_aq_add_cloud_filters(hw,
7562 vsi->seid, &cld_filter->element, 1);
7563 if (ret < 0) {
7564 PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
7565 rte_free(cld_filter);
7566 return -ENOTSUP;
7567 }
7568 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
7569 if (tunnel == NULL) {
7570 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7571 rte_free(cld_filter);
7572 return -ENOMEM;
7573 }
7574
7575 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
7576 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
7577 if (ret < 0)
7578 rte_free(tunnel);
7579 } else {
7580 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
7581 &cld_filter->element, 1);
7582 if (ret < 0) {
7583 PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
7584 rte_free(cld_filter);
7585 return -ENOTSUP;
7586 }
7587 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
7588 }
7589
7590 rte_free(cld_filter);
7591 return ret;
7592 }
7593
7594 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
7595 #define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
7596 #define I40E_TR_GENEVE_KEY_MASK 0x8
7597 #define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
7598 #define I40E_TR_GRE_KEY_MASK 0x400
7599 #define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
7600 #define I40E_TR_GRE_NO_KEY_MASK 0x8000
7601
7602 static enum
7603 i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
7604 {
7605 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7606 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7607 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7608 struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7609 enum i40e_status_code status = I40E_SUCCESS;
7610
7611 if (pf->support_multi_driver) {
7612 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7613 return I40E_NOT_SUPPORTED;
7614 }
7615
7616 memset(&filter_replace, 0,
7617 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7618 memset(&filter_replace_buf, 0,
7619 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7620
7621 /* create L1 filter */
7622 filter_replace.old_filter_type =
7623 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7624 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X11;
7625 filter_replace.tr_bit = 0;
7626
7627 /* Prepare the buffer, 3 entries */
7628 filter_replace_buf.data[0] =
7629 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7630 filter_replace_buf.data[0] |=
7631 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7632 filter_replace_buf.data[2] = 0xFF;
7633 filter_replace_buf.data[3] = 0xFF;
7634 filter_replace_buf.data[4] =
7635 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7636 filter_replace_buf.data[4] |=
7637 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7638 filter_replace_buf.data[7] = 0xF0;
7639 filter_replace_buf.data[8]
7640 = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
7641 filter_replace_buf.data[8] |=
7642 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7643 filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
7644 I40E_TR_GENEVE_KEY_MASK |
7645 I40E_TR_GENERIC_UDP_TUNNEL_MASK;
7646 filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
7647 I40E_TR_GRE_KEY_WITH_XSUM_MASK |
7648 I40E_TR_GRE_NO_KEY_MASK) >> 8;
7649
7650 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7651 &filter_replace_buf);
7652 if (!status && (filter_replace.old_filter_type !=
7653 filter_replace.new_filter_type))
7654 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7655 " original: 0x%x, new: 0x%x",
7656 dev->device->name,
7657 filter_replace.old_filter_type,
7658 filter_replace.new_filter_type);
7659
7660 return status;
7661 }
7662
7663 static enum
7664 i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
7665 {
7666 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7667 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7668 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7669 struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7670 enum i40e_status_code status = I40E_SUCCESS;
7671
7672 if (pf->support_multi_driver) {
7673 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7674 return I40E_NOT_SUPPORTED;
7675 }
7676
7677 /* For MPLSoUDP */
7678 memset(&filter_replace, 0,
7679 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7680 memset(&filter_replace_buf, 0,
7681 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7682 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7683 I40E_AQC_MIRROR_CLOUD_FILTER;
7684 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
7685 filter_replace.new_filter_type =
7686 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7687 /* Prepare the buffer, 2 entries */
7688 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7689 filter_replace_buf.data[0] |=
7690 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7691 filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7692 filter_replace_buf.data[4] |=
7693 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7694 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7695 &filter_replace_buf);
7696 if (status < 0)
7697 return status;
7698 if (filter_replace.old_filter_type !=
7699 filter_replace.new_filter_type)
7700 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7701 " original: 0x%x, new: 0x%x",
7702 dev->device->name,
7703 filter_replace.old_filter_type,
7704 filter_replace.new_filter_type);
7705
7706 /* For MPLSoGRE */
7707 memset(&filter_replace, 0,
7708 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7709 memset(&filter_replace_buf, 0,
7710 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7711
7712 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
7713 I40E_AQC_MIRROR_CLOUD_FILTER;
7714 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
7715 filter_replace.new_filter_type =
7716 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7717 /* Prepare the buffer, 2 entries */
7718 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7719 filter_replace_buf.data[0] |=
7720 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7721 filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_0X11;
7722 filter_replace_buf.data[4] |=
7723 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7724
7725 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7726 &filter_replace_buf);
7727 if (!status && (filter_replace.old_filter_type !=
7728 filter_replace.new_filter_type))
7729 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7730 " original: 0x%x, new: 0x%x",
7731 dev->device->name,
7732 filter_replace.old_filter_type,
7733 filter_replace.new_filter_type);
7734
7735 return status;
7736 }
7737
7738 static enum i40e_status_code
7739 i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
7740 {
7741 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7742 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7743 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7744 struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7745 enum i40e_status_code status = I40E_SUCCESS;
7746
7747 if (pf->support_multi_driver) {
7748 PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
7749 return I40E_NOT_SUPPORTED;
7750 }
7751
7752 /* For GTP-C */
7753 memset(&filter_replace, 0,
7754 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7755 memset(&filter_replace_buf, 0,
7756 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7757 /* create L1 filter */
7758 filter_replace.old_filter_type =
7759 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
7760 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X12;
7761 filter_replace.tr_bit = I40E_AQC_NEW_TR_22 |
7762 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7763 /* Prepare the buffer, 2 entries */
7764 filter_replace_buf.data[0] =
7765 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7766 filter_replace_buf.data[0] |=
7767 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7768 filter_replace_buf.data[2] = 0xFF;
7769 filter_replace_buf.data[3] = 0xFF;
7770 filter_replace_buf.data[4] =
7771 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7772 filter_replace_buf.data[4] |=
7773 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7774 filter_replace_buf.data[6] = 0xFF;
7775 filter_replace_buf.data[7] = 0xFF;
7776 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7777 &filter_replace_buf);
7778 if (status < 0)
7779 return status;
7780 if (filter_replace.old_filter_type !=
7781 filter_replace.new_filter_type)
7782 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7783 " original: 0x%x, new: 0x%x",
7784 dev->device->name,
7785 filter_replace.old_filter_type,
7786 filter_replace.new_filter_type);
7787
7788 /* for GTP-U */
7789 memset(&filter_replace, 0,
7790 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7791 memset(&filter_replace_buf, 0,
7792 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7793 /* create L1 filter */
7794 filter_replace.old_filter_type =
7795 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
7796 filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_0X13;
7797 filter_replace.tr_bit = I40E_AQC_NEW_TR_21 |
7798 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7799 /* Prepare the buffer, 2 entries */
7800 filter_replace_buf.data[0] =
7801 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
7802 filter_replace_buf.data[0] |=
7803 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7804 filter_replace_buf.data[2] = 0xFF;
7805 filter_replace_buf.data[3] = 0xFF;
7806 filter_replace_buf.data[4] =
7807 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
7808 filter_replace_buf.data[4] |=
7809 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7810 filter_replace_buf.data[6] = 0xFF;
7811 filter_replace_buf.data[7] = 0xFF;
7812
7813 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7814 &filter_replace_buf);
7815 if (!status && (filter_replace.old_filter_type !=
7816 filter_replace.new_filter_type))
7817 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
7818 " original: 0x%x, new: 0x%x",
7819 dev->device->name,
7820 filter_replace.old_filter_type,
7821 filter_replace.new_filter_type);
7822
7823 return status;
7824 }
7825
7826 static enum
7827 i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
7828 {
7829 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
7830 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
7831 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7832 struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
7833 enum i40e_status_code status = I40E_SUCCESS;
7834
7835 if (pf->support_multi_driver) {
7836 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
7837 return I40E_NOT_SUPPORTED;
7838 }
7839
7840 /* for GTP-C */
7841 memset(&filter_replace, 0,
7842 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7843 memset(&filter_replace_buf, 0,
7844 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7845 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7846 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN;
7847 filter_replace.new_filter_type =
7848 I40E_AQC_ADD_CLOUD_FILTER_0X11;
7849 /* Prepare the buffer, 2 entries */
7850 filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X12;
7851 filter_replace_buf.data[0] |=
7852 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7853 filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7854 filter_replace_buf.data[4] |=
7855 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7856 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7857 &filter_replace_buf);
7858 if (status < 0)
7859 return status;
7860 if (filter_replace.old_filter_type !=
7861 filter_replace.new_filter_type)
7862 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7863 " original: 0x%x, new: 0x%x",
7864 dev->device->name,
7865 filter_replace.old_filter_type,
7866 filter_replace.new_filter_type);
7867
7868 /* for GTP-U */
7869 memset(&filter_replace, 0,
7870 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
7871 memset(&filter_replace_buf, 0,
7872 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
7873 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
7874 filter_replace.old_filter_type =
7875 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID;
7876 filter_replace.new_filter_type =
7877 I40E_AQC_ADD_CLOUD_FILTER_0X12;
7878 /* Prepare the buffer, 2 entries */
7879 filter_replace_buf.data[0] = I40E_AQC_ADD_L1_FILTER_0X13;
7880 filter_replace_buf.data[0] |=
7881 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7882 filter_replace_buf.data[4] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
7883 filter_replace_buf.data[4] |=
7884 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
7885
7886 status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
7887 &filter_replace_buf);
7888 if (!status && (filter_replace.old_filter_type !=
7889 filter_replace.new_filter_type))
7890 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
7891 " original: 0x%x, new: 0x%x",
7892 dev->device->name,
7893 filter_replace.old_filter_type,
7894 filter_replace.new_filter_type);
7895
7896 return status;
7897 }
7898
7899 int
7900 i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
7901 struct i40e_tunnel_filter_conf *tunnel_filter,
7902 uint8_t add)
7903 {
7904 uint16_t ip_type;
7905 uint32_t ipv4_addr, ipv4_addr_le;
7906 uint8_t i, tun_type = 0;
7907 /* internal variable to convert ipv6 byte order */
7908 uint32_t convert_ipv6[4];
7909 int val, ret = 0;
7910 struct i40e_pf_vf *vf = NULL;
7911 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
7912 struct i40e_vsi *vsi;
7913 struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
7914 struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
7915 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
7916 struct i40e_tunnel_filter *tunnel, *node;
7917 struct i40e_tunnel_filter check_filter; /* Check if filter exists */
7918 uint32_t teid_le;
7919 bool big_buffer = 0;
7920
7921 cld_filter = rte_zmalloc("tunnel_filter",
7922 sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
7923 0);
7924
7925 if (cld_filter == NULL) {
7926 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
7927 return -ENOMEM;
7928 }
7929 pfilter = cld_filter;
7930
7931 ether_addr_copy(&tunnel_filter->outer_mac,
7932 (struct ether_addr *)&pfilter->element.outer_mac);
7933 ether_addr_copy(&tunnel_filter->inner_mac,
7934 (struct ether_addr *)&pfilter->element.inner_mac);
7935
7936 pfilter->element.inner_vlan =
7937 rte_cpu_to_le_16(tunnel_filter->inner_vlan);
7938 if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
7939 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
7940 ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
7941 ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr);
7942 rte_memcpy(&pfilter->element.ipaddr.v4.data,
7943 &ipv4_addr_le,
7944 sizeof(pfilter->element.ipaddr.v4.data));
7945 } else {
7946 ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
7947 for (i = 0; i < 4; i++) {
7948 convert_ipv6[i] =
7949 rte_cpu_to_le_32(rte_be_to_cpu_32(
7950 tunnel_filter->ip_addr.ipv6_addr[i]));
7951 }
7952 rte_memcpy(&pfilter->element.ipaddr.v6.data,
7953 &convert_ipv6,
7954 sizeof(pfilter->element.ipaddr.v6.data));
7955 }
7956
7957 /* check tunneled type */
7958 switch (tunnel_filter->tunnel_type) {
7959 case I40E_TUNNEL_TYPE_VXLAN:
7960 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
7961 break;
7962 case I40E_TUNNEL_TYPE_NVGRE:
7963 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
7964 break;
7965 case I40E_TUNNEL_TYPE_IP_IN_GRE:
7966 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
7967 break;
7968 case I40E_TUNNEL_TYPE_MPLSoUDP:
7969 if (!pf->mpls_replace_flag) {
7970 i40e_replace_mpls_l1_filter(pf);
7971 i40e_replace_mpls_cloud_filter(pf);
7972 pf->mpls_replace_flag = 1;
7973 }
7974 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7975 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7976 teid_le >> 4;
7977 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7978 (teid_le & 0xF) << 12;
7979 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7980 0x40;
7981 big_buffer = 1;
7982 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP;
7983 break;
7984 case I40E_TUNNEL_TYPE_MPLSoGRE:
7985 if (!pf->mpls_replace_flag) {
7986 i40e_replace_mpls_l1_filter(pf);
7987 i40e_replace_mpls_cloud_filter(pf);
7988 pf->mpls_replace_flag = 1;
7989 }
7990 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
7991 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
7992 teid_le >> 4;
7993 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
7994 (teid_le & 0xF) << 12;
7995 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
7996 0x0;
7997 big_buffer = 1;
7998 tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE;
7999 break;
8000 case I40E_TUNNEL_TYPE_GTPC:
8001 if (!pf->gtp_replace_flag) {
8002 i40e_replace_gtp_l1_filter(pf);
8003 i40e_replace_gtp_cloud_filter(pf);
8004 pf->gtp_replace_flag = 1;
8005 }
8006 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8007 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0] =
8008 (teid_le >> 16) & 0xFFFF;
8009 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1] =
8010 teid_le & 0xFFFF;
8011 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2] =
8012 0x0;
8013 big_buffer = 1;
8014 break;
8015 case I40E_TUNNEL_TYPE_GTPU:
8016 if (!pf->gtp_replace_flag) {
8017 i40e_replace_gtp_l1_filter(pf);
8018 i40e_replace_gtp_cloud_filter(pf);
8019 pf->gtp_replace_flag = 1;
8020 }
8021 teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8022 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0] =
8023 (teid_le >> 16) & 0xFFFF;
8024 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1] =
8025 teid_le & 0xFFFF;
8026 pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2] =
8027 0x0;
8028 big_buffer = 1;
8029 break;
8030 case I40E_TUNNEL_TYPE_QINQ:
8031 if (!pf->qinq_replace_flag) {
8032 ret = i40e_cloud_filter_qinq_create(pf);
8033 if (ret < 0)
8034 PMD_DRV_LOG(DEBUG,
8035 "QinQ tunnel filter already created.");
8036 pf->qinq_replace_flag = 1;
8037 }
8038 /* Add in the General fields the values of
8039 * the Outer and Inner VLAN
8040 * Big Buffer should be set, see changes in
8041 * i40e_aq_add_cloud_filters
8042 */
8043 pfilter->general_fields[0] = tunnel_filter->inner_vlan;
8044 pfilter->general_fields[1] = tunnel_filter->outer_vlan;
8045 big_buffer = 1;
8046 break;
8047 default:
8048 /* Other tunnel types is not supported. */
8049 PMD_DRV_LOG(ERR, "tunnel type is not supported.");
8050 rte_free(cld_filter);
8051 return -EINVAL;
8052 }
8053
8054 if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
8055 pfilter->element.flags =
8056 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8057 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
8058 pfilter->element.flags =
8059 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8060 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPC)
8061 pfilter->element.flags =
8062 I40E_AQC_ADD_CLOUD_FILTER_0X11;
8063 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_GTPU)
8064 pfilter->element.flags =
8065 I40E_AQC_ADD_CLOUD_FILTER_0X12;
8066 else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
8067 pfilter->element.flags |=
8068 I40E_AQC_ADD_CLOUD_FILTER_0X10;
8069 else {
8070 val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
8071 &pfilter->element.flags);
8072 if (val < 0) {
8073 rte_free(cld_filter);
8074 return -EINVAL;
8075 }
8076 }
8077
8078 pfilter->element.flags |= rte_cpu_to_le_16(
8079 I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
8080 ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
8081 pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
8082 pfilter->element.queue_number =
8083 rte_cpu_to_le_16(tunnel_filter->queue_id);
8084
8085 if (!tunnel_filter->is_to_vf)
8086 vsi = pf->main_vsi;
8087 else {
8088 if (tunnel_filter->vf_id >= pf->vf_num) {
8089 PMD_DRV_LOG(ERR, "Invalid argument.");
8090 rte_free(cld_filter);
8091 return -EINVAL;
8092 }
8093 vf = &pf->vfs[tunnel_filter->vf_id];
8094 vsi = vf->vsi;
8095 }
8096
8097 /* Check if there is the filter in SW list */
8098 memset(&check_filter, 0, sizeof(check_filter));
8099 i40e_tunnel_filter_convert(cld_filter, &check_filter);
8100 check_filter.is_to_vf = tunnel_filter->is_to_vf;
8101 check_filter.vf_id = tunnel_filter->vf_id;
8102 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
8103 if (add && node) {
8104 PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
8105 rte_free(cld_filter);
8106 return -EINVAL;
8107 }
8108
8109 if (!add && !node) {
8110 PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
8111 rte_free(cld_filter);
8112 return -EINVAL;
8113 }
8114
8115 if (add) {
8116 if (big_buffer)
8117 ret = i40e_aq_add_cloud_filters_big_buffer(hw,
8118 vsi->seid, cld_filter, 1);
8119 else
8120 ret = i40e_aq_add_cloud_filters(hw,
8121 vsi->seid, &cld_filter->element, 1);
8122 if (ret < 0) {
8123 PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
8124 rte_free(cld_filter);
8125 return -ENOTSUP;
8126 }
8127 tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
8128 if (tunnel == NULL) {
8129 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
8130 rte_free(cld_filter);
8131 return -ENOMEM;
8132 }
8133
8134 rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
8135 ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
8136 if (ret < 0)
8137 rte_free(tunnel);
8138 } else {
8139 if (big_buffer)
8140 ret = i40e_aq_remove_cloud_filters_big_buffer(
8141 hw, vsi->seid, cld_filter, 1);
8142 else
8143 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
8144 &cld_filter->element, 1);
8145 if (ret < 0) {
8146 PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
8147 rte_free(cld_filter);
8148 return -ENOTSUP;
8149 }
8150 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
8151 }
8152
8153 rte_free(cld_filter);
8154 return ret;
8155 }
8156
8157 static int
8158 i40e_get_vxlan_port_idx(struct i40e_pf *pf, uint16_t port)
8159 {
8160 uint8_t i;
8161
8162 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8163 if (pf->vxlan_ports[i] == port)
8164 return i;
8165 }
8166
8167 return -1;
8168 }
8169
8170 static int
8171 i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
8172 {
8173 int idx, ret;
8174 uint8_t filter_idx;
8175 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8176
8177 idx = i40e_get_vxlan_port_idx(pf, port);
8178
8179 /* Check if port already exists */
8180 if (idx >= 0) {
8181 PMD_DRV_LOG(ERR, "Port %d already offloaded", port);
8182 return -EINVAL;
8183 }
8184
8185 /* Now check if there is space to add the new port */
8186 idx = i40e_get_vxlan_port_idx(pf, 0);
8187 if (idx < 0) {
8188 PMD_DRV_LOG(ERR,
8189 "Maximum number of UDP ports reached, not adding port %d",
8190 port);
8191 return -ENOSPC;
8192 }
8193
8194 ret = i40e_aq_add_udp_tunnel(hw, port, I40E_AQC_TUNNEL_TYPE_VXLAN,
8195 &filter_idx, NULL);
8196 if (ret < 0) {
8197 PMD_DRV_LOG(ERR, "Failed to add VXLAN UDP port %d", port);
8198 return -1;
8199 }
8200
8201 PMD_DRV_LOG(INFO, "Added port %d with AQ command with index %d",
8202 port, filter_idx);
8203
8204 /* New port: add it and mark its index in the bitmap */
8205 pf->vxlan_ports[idx] = port;
8206 pf->vxlan_bitmap |= (1 << idx);
8207
8208 if (!(pf->flags & I40E_FLAG_VXLAN))
8209 pf->flags |= I40E_FLAG_VXLAN;
8210
8211 return 0;
8212 }
8213
8214 static int
8215 i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port)
8216 {
8217 int idx;
8218 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8219
8220 if (!(pf->flags & I40E_FLAG_VXLAN)) {
8221 PMD_DRV_LOG(ERR, "VXLAN UDP port was not configured.");
8222 return -EINVAL;
8223 }
8224
8225 idx = i40e_get_vxlan_port_idx(pf, port);
8226
8227 if (idx < 0) {
8228 PMD_DRV_LOG(ERR, "Port %d doesn't exist", port);
8229 return -EINVAL;
8230 }
8231
8232 if (i40e_aq_del_udp_tunnel(hw, idx, NULL) < 0) {
8233 PMD_DRV_LOG(ERR, "Failed to delete VXLAN UDP port %d", port);
8234 return -1;
8235 }
8236
8237 PMD_DRV_LOG(INFO, "Deleted port %d with AQ command with index %d",
8238 port, idx);
8239
8240 pf->vxlan_ports[idx] = 0;
8241 pf->vxlan_bitmap &= ~(1 << idx);
8242
8243 if (!pf->vxlan_bitmap)
8244 pf->flags &= ~I40E_FLAG_VXLAN;
8245
8246 return 0;
8247 }
8248
8249 /* Add UDP tunneling port */
8250 static int
8251 i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
8252 struct rte_eth_udp_tunnel *udp_tunnel)
8253 {
8254 int ret = 0;
8255 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8256
8257 if (udp_tunnel == NULL)
8258 return -EINVAL;
8259
8260 switch (udp_tunnel->prot_type) {
8261 case RTE_TUNNEL_TYPE_VXLAN:
8262 ret = i40e_add_vxlan_port(pf, udp_tunnel->udp_port);
8263 break;
8264
8265 case RTE_TUNNEL_TYPE_GENEVE:
8266 case RTE_TUNNEL_TYPE_TEREDO:
8267 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8268 ret = -1;
8269 break;
8270
8271 default:
8272 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8273 ret = -1;
8274 break;
8275 }
8276
8277 return ret;
8278 }
8279
8280 /* Remove UDP tunneling port */
8281 static int
8282 i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
8283 struct rte_eth_udp_tunnel *udp_tunnel)
8284 {
8285 int ret = 0;
8286 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8287
8288 if (udp_tunnel == NULL)
8289 return -EINVAL;
8290
8291 switch (udp_tunnel->prot_type) {
8292 case RTE_TUNNEL_TYPE_VXLAN:
8293 ret = i40e_del_vxlan_port(pf, udp_tunnel->udp_port);
8294 break;
8295 case RTE_TUNNEL_TYPE_GENEVE:
8296 case RTE_TUNNEL_TYPE_TEREDO:
8297 PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
8298 ret = -1;
8299 break;
8300 default:
8301 PMD_DRV_LOG(ERR, "Invalid tunnel type");
8302 ret = -1;
8303 break;
8304 }
8305
8306 return ret;
8307 }
8308
8309 /* Calculate the maximum number of contiguous PF queues that are configured */
8310 static int
8311 i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
8312 {
8313 struct rte_eth_dev_data *data = pf->dev_data;
8314 int i, num;
8315 struct i40e_rx_queue *rxq;
8316
8317 num = 0;
8318 for (i = 0; i < pf->lan_nb_qps; i++) {
8319 rxq = data->rx_queues[i];
8320 if (rxq && rxq->q_set)
8321 num++;
8322 else
8323 break;
8324 }
8325
8326 return num;
8327 }
8328
8329 /* Configure RSS */
8330 static int
8331 i40e_pf_config_rss(struct i40e_pf *pf)
8332 {
8333 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
8334 struct rte_eth_rss_conf rss_conf;
8335 uint32_t i, lut = 0;
8336 uint16_t j, num;
8337
8338 /*
8339 * If both VMDQ and RSS enabled, not all of PF queues are configured.
8340 * It's necessary to calculate the actual PF queues that are configured.
8341 */
8342 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
8343 num = i40e_pf_calc_configured_queues_num(pf);
8344 else
8345 num = pf->dev_data->nb_rx_queues;
8346
8347 num = RTE_MIN(num, I40E_MAX_Q_PER_TC);
8348 PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
8349 num);
8350
8351 if (num == 0) {
8352 PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
8353 return -ENOTSUP;
8354 }
8355
8356 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
8357 if (j == num)
8358 j = 0;
8359 lut = (lut << 8) | (j & ((0x1 <<
8360 hw->func_caps.rss_table_entry_width) - 1));
8361 if ((i & 3) == 3)
8362 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
8363 }
8364
8365 rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
8366 if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
8367 i40e_pf_disable_rss(pf);
8368 return 0;
8369 }
8370 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
8371 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
8372 /* Random default keys */
8373 static uint32_t rss_key_default[] = {0x6b793944,
8374 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
8375 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
8376 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
8377
8378 rss_conf.rss_key = (uint8_t *)rss_key_default;
8379 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
8380 sizeof(uint32_t);
8381 }
8382
8383 return i40e_hw_rss_hash_set(pf, &rss_conf);
8384 }
8385
8386 static int
8387 i40e_tunnel_filter_param_check(struct i40e_pf *pf,
8388 struct rte_eth_tunnel_filter_conf *filter)
8389 {
8390 if (pf == NULL || filter == NULL) {
8391 PMD_DRV_LOG(ERR, "Invalid parameter");
8392 return -EINVAL;
8393 }
8394
8395 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
8396 PMD_DRV_LOG(ERR, "Invalid queue ID");
8397 return -EINVAL;
8398 }
8399
8400 if (filter->inner_vlan > ETHER_MAX_VLAN_ID) {
8401 PMD_DRV_LOG(ERR, "Invalid inner VLAN ID");
8402 return -EINVAL;
8403 }
8404
8405 if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) &&
8406 (is_zero_ether_addr(&filter->outer_mac))) {
8407 PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address");
8408 return -EINVAL;
8409 }
8410
8411 if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) &&
8412 (is_zero_ether_addr(&filter->inner_mac))) {
8413 PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address");
8414 return -EINVAL;
8415 }
8416
8417 return 0;
8418 }
8419
8420 #define I40E_GL_PRS_FVBM_MSK_ENA 0x80000000
8421 #define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4))
8422 static int
8423 i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
8424 {
8425 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8426 uint32_t val, reg;
8427 int ret = -EINVAL;
8428
8429 if (pf->support_multi_driver) {
8430 PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported");
8431 return -ENOTSUP;
8432 }
8433
8434 val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
8435 PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
8436
8437 if (len == 3) {
8438 reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
8439 } else if (len == 4) {
8440 reg = val & ~I40E_GL_PRS_FVBM_MSK_ENA;
8441 } else {
8442 PMD_DRV_LOG(ERR, "Unsupported GRE key length of %u", len);
8443 return ret;
8444 }
8445
8446 if (reg != val) {
8447 ret = i40e_aq_debug_write_global_register(hw,
8448 I40E_GL_PRS_FVBM(2),
8449 reg, NULL);
8450 if (ret != 0)
8451 return ret;
8452 PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
8453 "with value 0x%08x",
8454 I40E_GL_PRS_FVBM(2), reg);
8455 } else {
8456 ret = 0;
8457 }
8458 PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
8459 I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
8460
8461 return ret;
8462 }
8463
8464 static int
8465 i40e_dev_global_config_set(struct i40e_hw *hw, struct rte_eth_global_cfg *cfg)
8466 {
8467 int ret = -EINVAL;
8468
8469 if (!hw || !cfg)
8470 return -EINVAL;
8471
8472 switch (cfg->cfg_type) {
8473 case RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN:
8474 ret = i40e_dev_set_gre_key_len(hw, cfg->cfg.gre_key_len);
8475 break;
8476 default:
8477 PMD_DRV_LOG(ERR, "Unknown config type %u", cfg->cfg_type);
8478 break;
8479 }
8480
8481 return ret;
8482 }
8483
8484 static int
8485 i40e_filter_ctrl_global_config(struct rte_eth_dev *dev,
8486 enum rte_filter_op filter_op,
8487 void *arg)
8488 {
8489 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
8490 int ret = I40E_ERR_PARAM;
8491
8492 switch (filter_op) {
8493 case RTE_ETH_FILTER_SET:
8494 ret = i40e_dev_global_config_set(hw,
8495 (struct rte_eth_global_cfg *)arg);
8496 break;
8497 default:
8498 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8499 break;
8500 }
8501
8502 return ret;
8503 }
8504
8505 static int
8506 i40e_tunnel_filter_handle(struct rte_eth_dev *dev,
8507 enum rte_filter_op filter_op,
8508 void *arg)
8509 {
8510 struct rte_eth_tunnel_filter_conf *filter;
8511 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
8512 int ret = I40E_SUCCESS;
8513
8514 filter = (struct rte_eth_tunnel_filter_conf *)(arg);
8515
8516 if (i40e_tunnel_filter_param_check(pf, filter) < 0)
8517 return I40E_ERR_PARAM;
8518
8519 switch (filter_op) {
8520 case RTE_ETH_FILTER_NOP:
8521 if (!(pf->flags & I40E_FLAG_VXLAN))
8522 ret = I40E_NOT_SUPPORTED;
8523 break;
8524 case RTE_ETH_FILTER_ADD:
8525 ret = i40e_dev_tunnel_filter_set(pf, filter, 1);
8526 break;
8527 case RTE_ETH_FILTER_DELETE:
8528 ret = i40e_dev_tunnel_filter_set(pf, filter, 0);
8529 break;
8530 default:
8531 PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
8532 ret = I40E_ERR_PARAM;
8533 break;
8534 }
8535
8536 return ret;
8537 }
8538
8539 static int
8540 i40e_pf_config_mq_rx(struct i40e_pf *pf)
8541 {
8542 int ret = 0;
8543 enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
8544
8545 /* RSS setup */
8546 if (mq_mode & ETH_MQ_RX_RSS_FLAG)
8547 ret = i40e_pf_config_rss(pf);
8548 else
8549 i40e_pf_disable_rss(pf);
8550
8551 return ret;
8552 }
8553
8554 /* Get the symmetric hash enable configurations per port */
8555 static void
8556 i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable)
8557 {
8558 uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8559
8560 *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0;
8561 }
8562
8563 /* Set the symmetric hash enable configurations per port */
8564 static void
8565 i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
8566 {
8567 uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0);
8568
8569 if (enable > 0) {
8570 if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
8571 PMD_DRV_LOG(INFO,
8572 "Symmetric hash has already been enabled");
8573 return;
8574 }
8575 reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8576 } else {
8577 if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
8578 PMD_DRV_LOG(INFO,
8579 "Symmetric hash has already been disabled");
8580 return;
8581 }
8582 reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
8583 }
8584 i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg);
8585 I40E_WRITE_FLUSH(hw);
8586 }
8587
8588 /*
8589 * Get global configurations of hash function type and symmetric hash enable
8590 * per flow type (pctype). Note that global configuration means it affects all
8591 * the ports on the same NIC.
8592 */
8593 static int
8594 i40e_get_hash_filter_global_config(struct i40e_hw *hw,
8595 struct rte_eth_hash_global_conf *g_cfg)
8596 {
8597 struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8598 uint32_t reg;
8599 uint16_t i, j;
8600
8601 memset(g_cfg, 0, sizeof(*g_cfg));
8602 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8603 if (reg & I40E_GLQF_CTL_HTOEP_MASK)
8604 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
8605 else
8606 g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
8607 PMD_DRV_LOG(DEBUG, "Hash function is %s",
8608 (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR");
8609
8610 /*
8611 * As i40e supports less than 64 flow types, only first 64 bits need to
8612 * be checked.
8613 */
8614 for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8615 g_cfg->valid_bit_mask[i] = 0ULL;
8616 g_cfg->sym_hash_enable_mask[i] = 0ULL;
8617 }
8618
8619 g_cfg->valid_bit_mask[0] = adapter->flow_types_mask;
8620
8621 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) {
8622 if (!adapter->pctypes_tbl[i])
8623 continue;
8624 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8625 j < I40E_FILTER_PCTYPE_MAX; j++) {
8626 if (adapter->pctypes_tbl[i] & (1ULL << j)) {
8627 reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j));
8628 if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) {
8629 g_cfg->sym_hash_enable_mask[0] |=
8630 (1ULL << i);
8631 }
8632 }
8633 }
8634 }
8635
8636 return 0;
8637 }
8638
8639 static int
8640 i40e_hash_global_config_check(const struct i40e_adapter *adapter,
8641 const struct rte_eth_hash_global_conf *g_cfg)
8642 {
8643 uint32_t i;
8644 uint64_t mask0, i40e_mask = adapter->flow_types_mask;
8645
8646 if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ &&
8647 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
8648 g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_DEFAULT) {
8649 PMD_DRV_LOG(ERR, "Unsupported hash function type %d",
8650 g_cfg->hash_func);
8651 return -EINVAL;
8652 }
8653
8654 /*
8655 * As i40e supports less than 64 flow types, only first 64 bits need to
8656 * be checked.
8657 */
8658 mask0 = g_cfg->valid_bit_mask[0];
8659 for (i = 0; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) {
8660 if (i == 0) {
8661 /* Check if any unsupported flow type configured */
8662 if ((mask0 | i40e_mask) ^ i40e_mask)
8663 goto mask_err;
8664 } else {
8665 if (g_cfg->valid_bit_mask[i])
8666 goto mask_err;
8667 }
8668 }
8669
8670 return 0;
8671
8672 mask_err:
8673 PMD_DRV_LOG(ERR, "i40e unsupported flow type bit(s) configured");
8674
8675 return -EINVAL;
8676 }
8677
8678 /*
8679 * Set global configurations of hash function type and symmetric hash enable
8680 * per flow type (pctype). Note any modifying global configuration will affect
8681 * all the ports on the same NIC.
8682 */
8683 static int
8684 i40e_set_hash_filter_global_config(struct i40e_hw *hw,
8685 struct rte_eth_hash_global_conf *g_cfg)
8686 {
8687 struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back;
8688 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
8689 int ret;
8690 uint16_t i, j;
8691 uint32_t reg;
8692 uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask;
8693
8694 if (pf->support_multi_driver) {
8695 PMD_DRV_LOG(ERR, "Hash global configuration is not supported.");
8696 return -ENOTSUP;
8697 }
8698
8699 /* Check the input parameters */
8700 ret = i40e_hash_global_config_check(adapter, g_cfg);
8701 if (ret < 0)
8702 return ret;
8703
8704 /*
8705 * As i40e supports less than 64 flow types, only first 64 bits need to
8706 * be configured.
8707 */
8708 for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) {
8709 if (mask0 & (1UL << i)) {
8710 reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ?
8711 I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
8712
8713 for (j = I40E_FILTER_PCTYPE_INVALID + 1;
8714 j < I40E_FILTER_PCTYPE_MAX; j++) {
8715 if (adapter->pctypes_tbl[i] & (1ULL << j))
8716 i40e_write_global_rx_ctl(hw,
8717 I40E_GLQF_HSYM(j),
8718 reg);
8719 }
8720 }
8721 }
8722
8723 reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
8724 if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
8725 /* Toeplitz */
8726 if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
8727 PMD_DRV_LOG(DEBUG,
8728 "Hash function already set to Toeplitz");
8729 goto out;
8730 }
8731 reg |= I40E_GLQF_CTL_HTOEP_MASK;
8732 } else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
8733 /* Simple XOR */
8734 if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
8735 PMD_DRV_LOG(DEBUG,
8736 "Hash function already set to Simple XOR");
8737 goto out;
8738 }
8739 reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
8740 } else
8741 /* Use the default, and keep it as it is */
8742 goto out;
8743
8744 i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
8745
8746 out:
8747 I40E_WRITE_FLUSH(hw);
8748
8749 return 0;
8750 }
8751
8752 /**
8753 * Valid input sets for hash and flow director filters per PCTYPE
8754 */
8755 static uint64_t
8756 i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
8757 enum rte_filter_type filter)
8758 {
8759 uint64_t valid;
8760
8761 static const uint64_t valid_hash_inset_table[] = {
8762 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8763 I40E_INSET_DMAC | I40E_INSET_SMAC |
8764 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8765 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_SRC |
8766 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
8767 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8768 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8769 I40E_INSET_FLEX_PAYLOAD,
8770 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8771 I40E_INSET_DMAC | I40E_INSET_SMAC |
8772 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8773 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8774 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8775 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8776 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8777 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8778 I40E_INSET_FLEX_PAYLOAD,
8779 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8780 I40E_INSET_DMAC | I40E_INSET_SMAC |
8781 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8782 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8783 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8784 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8785 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8786 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8787 I40E_INSET_FLEX_PAYLOAD,
8788 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8789 I40E_INSET_DMAC | I40E_INSET_SMAC |
8790 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8791 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8792 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8793 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8794 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8795 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8796 I40E_INSET_FLEX_PAYLOAD,
8797 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8798 I40E_INSET_DMAC | I40E_INSET_SMAC |
8799 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8800 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8801 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8802 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8803 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8804 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8805 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8806 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8807 I40E_INSET_DMAC | I40E_INSET_SMAC |
8808 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8809 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8810 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8811 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8812 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8813 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8814 I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
8815 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8816 I40E_INSET_DMAC | I40E_INSET_SMAC |
8817 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8818 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8819 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8820 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8821 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8822 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8823 I40E_INSET_SCTP_VT | I40E_INSET_FLEX_PAYLOAD,
8824 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8825 I40E_INSET_DMAC | I40E_INSET_SMAC |
8826 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8827 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
8828 I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
8829 I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
8830 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8831 I40E_INSET_FLEX_PAYLOAD,
8832 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8833 I40E_INSET_DMAC | I40E_INSET_SMAC |
8834 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8835 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8836 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8837 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_TUNNEL_DMAC |
8838 I40E_INSET_TUNNEL_ID | I40E_INSET_IPV6_SRC |
8839 I40E_INSET_IPV6_DST | I40E_INSET_FLEX_PAYLOAD,
8840 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8841 I40E_INSET_DMAC | I40E_INSET_SMAC |
8842 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8843 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8844 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8845 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8846 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8847 I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
8848 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8849 I40E_INSET_DMAC | I40E_INSET_SMAC |
8850 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8851 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8852 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8853 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8854 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8855 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8856 I40E_INSET_FLEX_PAYLOAD,
8857 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8858 I40E_INSET_DMAC | I40E_INSET_SMAC |
8859 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8860 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8861 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8862 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8863 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8864 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8865 I40E_INSET_FLEX_PAYLOAD,
8866 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8867 I40E_INSET_DMAC | I40E_INSET_SMAC |
8868 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8869 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8870 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8871 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8872 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8873 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8874 I40E_INSET_FLEX_PAYLOAD,
8875 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8876 I40E_INSET_DMAC | I40E_INSET_SMAC |
8877 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8878 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8879 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8880 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8881 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8882 I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
8883 I40E_INSET_FLEX_PAYLOAD,
8884 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8885 I40E_INSET_DMAC | I40E_INSET_SMAC |
8886 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8887 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8888 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8889 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8890 I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
8891 I40E_INSET_DST_PORT | I40E_INSET_SCTP_VT |
8892 I40E_INSET_FLEX_PAYLOAD,
8893 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8894 I40E_INSET_DMAC | I40E_INSET_SMAC |
8895 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8896 I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
8897 I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
8898 I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
8899 I40E_INSET_IPV6_DST | I40E_INSET_TUNNEL_ID |
8900 I40E_INSET_FLEX_PAYLOAD,
8901 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8902 I40E_INSET_DMAC | I40E_INSET_SMAC |
8903 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8904 I40E_INSET_VLAN_TUNNEL | I40E_INSET_LAST_ETHER_TYPE |
8905 I40E_INSET_FLEX_PAYLOAD,
8906 };
8907
8908 /**
8909 * Flow director supports only fields defined in
8910 * union rte_eth_fdir_flow.
8911 */
8912 static const uint64_t valid_fdir_inset_table[] = {
8913 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
8914 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8915 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8916 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8917 I40E_INSET_IPV4_TTL,
8918 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
8919 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8920 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8921 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8922 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8923 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
8924 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8925 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8926 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8927 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8928 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
8929 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8930 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8931 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8932 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8933 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
8934 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8935 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8936 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8937 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8938 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
8939 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8940 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8941 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8942 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8943 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
8944 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8945 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8946 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
8947 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8948 I40E_INSET_SCTP_VT,
8949 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
8950 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8951 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
8952 I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_PROTO |
8953 I40E_INSET_IPV4_TTL,
8954 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
8955 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8956 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8957 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8958 I40E_INSET_IPV6_HOP_LIMIT,
8959 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
8960 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8961 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8962 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8963 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8964 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
8965 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8966 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8967 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8968 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8969 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
8970 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8971 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8972 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8973 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8974 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
8975 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8976 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8977 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8978 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8979 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
8980 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8981 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8982 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8983 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
8984 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
8985 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8986 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8987 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
8988 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
8989 I40E_INSET_SCTP_VT,
8990 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
8991 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8992 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
8993 I40E_INSET_IPV6_TC | I40E_INSET_IPV6_NEXT_HDR |
8994 I40E_INSET_IPV6_HOP_LIMIT,
8995 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
8996 I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
8997 I40E_INSET_LAST_ETHER_TYPE,
8998 };
8999
9000 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9001 return 0;
9002 if (filter == RTE_ETH_FILTER_HASH)
9003 valid = valid_hash_inset_table[pctype];
9004 else
9005 valid = valid_fdir_inset_table[pctype];
9006
9007 return valid;
9008 }
9009
9010 /**
9011 * Validate if the input set is allowed for a specific PCTYPE
9012 */
9013 int
9014 i40e_validate_input_set(enum i40e_filter_pctype pctype,
9015 enum rte_filter_type filter, uint64_t inset)
9016 {
9017 uint64_t valid;
9018
9019 valid = i40e_get_valid_input_set(pctype, filter);
9020 if (inset & (~valid))
9021 return -EINVAL;
9022
9023 return 0;
9024 }
9025
9026 /* default input set fields combination per pctype */
9027 uint64_t
9028 i40e_get_default_input_set(uint16_t pctype)
9029 {
9030 static const uint64_t default_inset_table[] = {
9031 [I40E_FILTER_PCTYPE_FRAG_IPV4] =
9032 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9033 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
9034 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9035 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9036 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
9037 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9038 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9039 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
9040 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9041 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9042 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
9043 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9044 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9045 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
9046 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9047 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9048 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
9049 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
9050 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9051 I40E_INSET_SCTP_VT,
9052 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
9053 I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST,
9054 [I40E_FILTER_PCTYPE_FRAG_IPV6] =
9055 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9056 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
9057 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9058 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9059 [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
9060 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9061 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9062 [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
9063 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9064 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9065 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
9066 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9067 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9068 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
9069 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9070 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
9071 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
9072 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
9073 I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
9074 I40E_INSET_SCTP_VT,
9075 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
9076 I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST,
9077 [I40E_FILTER_PCTYPE_L2_PAYLOAD] =
9078 I40E_INSET_LAST_ETHER_TYPE,
9079 };
9080
9081 if (pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD)
9082 return 0;
9083
9084 return default_inset_table[pctype];
9085 }
9086
9087 /**
9088 * Parse the input set from index to logical bit masks
9089 */
9090 static int
9091 i40e_parse_input_set(uint64_t *inset,
9092 enum i40e_filter_pctype pctype,
9093 enum rte_eth_input_set_field *field,
9094 uint16_t size)
9095 {
9096 uint16_t i, j;
9097 int ret = -EINVAL;
9098
9099 static const struct {
9100 enum rte_eth_input_set_field field;
9101 uint64_t inset;
9102 } inset_convert_table[] = {
9103 {RTE_ETH_INPUT_SET_NONE, I40E_INSET_NONE},
9104 {RTE_ETH_INPUT_SET_L2_SRC_MAC, I40E_INSET_SMAC},
9105 {RTE_ETH_INPUT_SET_L2_DST_MAC, I40E_INSET_DMAC},
9106 {RTE_ETH_INPUT_SET_L2_OUTER_VLAN, I40E_INSET_VLAN_OUTER},
9107 {RTE_ETH_INPUT_SET_L2_INNER_VLAN, I40E_INSET_VLAN_INNER},
9108 {RTE_ETH_INPUT_SET_L2_ETHERTYPE, I40E_INSET_LAST_ETHER_TYPE},
9109 {RTE_ETH_INPUT_SET_L3_SRC_IP4, I40E_INSET_IPV4_SRC},
9110 {RTE_ETH_INPUT_SET_L3_DST_IP4, I40E_INSET_IPV4_DST},
9111 {RTE_ETH_INPUT_SET_L3_IP4_TOS, I40E_INSET_IPV4_TOS},
9112 {RTE_ETH_INPUT_SET_L3_IP4_PROTO, I40E_INSET_IPV4_PROTO},
9113 {RTE_ETH_INPUT_SET_L3_IP4_TTL, I40E_INSET_IPV4_TTL},
9114 {RTE_ETH_INPUT_SET_L3_SRC_IP6, I40E_INSET_IPV6_SRC},
9115 {RTE_ETH_INPUT_SET_L3_DST_IP6, I40E_INSET_IPV6_DST},
9116 {RTE_ETH_INPUT_SET_L3_IP6_TC, I40E_INSET_IPV6_TC},
9117 {RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER,
9118 I40E_INSET_IPV6_NEXT_HDR},
9119 {RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS,
9120 I40E_INSET_IPV6_HOP_LIMIT},
9121 {RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT, I40E_INSET_SRC_PORT},
9122 {RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT, I40E_INSET_SRC_PORT},
9123 {RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT, I40E_INSET_SRC_PORT},
9124 {RTE_ETH_INPUT_SET_L4_UDP_DST_PORT, I40E_INSET_DST_PORT},
9125 {RTE_ETH_INPUT_SET_L4_TCP_DST_PORT, I40E_INSET_DST_PORT},
9126 {RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT, I40E_INSET_DST_PORT},
9127 {RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG,
9128 I40E_INSET_SCTP_VT},
9129 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_DST_MAC,
9130 I40E_INSET_TUNNEL_DMAC},
9131 {RTE_ETH_INPUT_SET_TUNNEL_L2_INNER_VLAN,
9132 I40E_INSET_VLAN_TUNNEL},
9133 {RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY,
9134 I40E_INSET_TUNNEL_ID},
9135 {RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY, I40E_INSET_TUNNEL_ID},
9136 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD,
9137 I40E_INSET_FLEX_PAYLOAD_W1},
9138 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD,
9139 I40E_INSET_FLEX_PAYLOAD_W2},
9140 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD,
9141 I40E_INSET_FLEX_PAYLOAD_W3},
9142 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD,
9143 I40E_INSET_FLEX_PAYLOAD_W4},
9144 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD,
9145 I40E_INSET_FLEX_PAYLOAD_W5},
9146 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD,
9147 I40E_INSET_FLEX_PAYLOAD_W6},
9148 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD,
9149 I40E_INSET_FLEX_PAYLOAD_W7},
9150 {RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD,
9151 I40E_INSET_FLEX_PAYLOAD_W8},
9152 };
9153
9154 if (!inset || !field || size > RTE_ETH_INSET_SIZE_MAX)
9155 return ret;
9156
9157 /* Only one item allowed for default or all */
9158 if (size == 1) {
9159 if (field[0] == RTE_ETH_INPUT_SET_DEFAULT) {
9160 *inset = i40e_get_default_input_set(pctype);
9161 return 0;
9162 } else if (field[0] == RTE_ETH_INPUT_SET_NONE) {
9163 *inset = I40E_INSET_NONE;
9164 return 0;
9165 }
9166 }
9167
9168 for (i = 0, *inset = 0; i < size; i++) {
9169 for (j = 0; j < RTE_DIM(inset_convert_table); j++) {
9170 if (field[i] == inset_convert_table[j].field) {
9171 *inset |= inset_convert_table[j].inset;
9172 break;
9173 }
9174 }
9175
9176 /* It contains unsupported input set, return immediately */
9177 if (j == RTE_DIM(inset_convert_table))
9178 return ret;
9179 }
9180
9181 return 0;
9182 }
9183
9184 /**
9185 * Translate the input set from bit masks to register aware bit masks
9186 * and vice versa
9187 */
9188 uint64_t
9189 i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
9190 {
9191 uint64_t val = 0;
9192 uint16_t i;
9193
9194 struct inset_map {
9195 uint64_t inset;
9196 uint64_t inset_reg;
9197 };
9198
9199 static const struct inset_map inset_map_common[] = {
9200 {I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
9201 {I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
9202 {I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
9203 {I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
9204 {I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
9205 {I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
9206 {I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
9207 {I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
9208 {I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
9209 {I40E_INSET_IPV6_NEXT_HDR, I40E_REG_INSET_L3_IP6_NEXT_HDR},
9210 {I40E_INSET_IPV6_HOP_LIMIT, I40E_REG_INSET_L3_IP6_HOP_LIMIT},
9211 {I40E_INSET_SRC_PORT, I40E_REG_INSET_L4_SRC_PORT},
9212 {I40E_INSET_DST_PORT, I40E_REG_INSET_L4_DST_PORT},
9213 {I40E_INSET_SCTP_VT, I40E_REG_INSET_L4_SCTP_VERIFICATION_TAG},
9214 {I40E_INSET_TUNNEL_ID, I40E_REG_INSET_TUNNEL_ID},
9215 {I40E_INSET_TUNNEL_DMAC,
9216 I40E_REG_INSET_TUNNEL_L2_INNER_DST_MAC},
9217 {I40E_INSET_TUNNEL_IPV4_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP4},
9218 {I40E_INSET_TUNNEL_IPV6_DST, I40E_REG_INSET_TUNNEL_L3_DST_IP6},
9219 {I40E_INSET_TUNNEL_SRC_PORT,
9220 I40E_REG_INSET_TUNNEL_L4_UDP_SRC_PORT},
9221 {I40E_INSET_TUNNEL_DST_PORT,
9222 I40E_REG_INSET_TUNNEL_L4_UDP_DST_PORT},
9223 {I40E_INSET_VLAN_TUNNEL, I40E_REG_INSET_TUNNEL_VLAN},
9224 {I40E_INSET_FLEX_PAYLOAD_W1, I40E_REG_INSET_FLEX_PAYLOAD_WORD1},
9225 {I40E_INSET_FLEX_PAYLOAD_W2, I40E_REG_INSET_FLEX_PAYLOAD_WORD2},
9226 {I40E_INSET_FLEX_PAYLOAD_W3, I40E_REG_INSET_FLEX_PAYLOAD_WORD3},
9227 {I40E_INSET_FLEX_PAYLOAD_W4, I40E_REG_INSET_FLEX_PAYLOAD_WORD4},
9228 {I40E_INSET_FLEX_PAYLOAD_W5, I40E_REG_INSET_FLEX_PAYLOAD_WORD5},
9229 {I40E_INSET_FLEX_PAYLOAD_W6, I40E_REG_INSET_FLEX_PAYLOAD_WORD6},
9230 {I40E_INSET_FLEX_PAYLOAD_W7, I40E_REG_INSET_FLEX_PAYLOAD_WORD7},
9231 {I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
9232 };
9233
9234 /* some different registers map in x722*/
9235 static const struct inset_map inset_map_diff_x722[] = {
9236 {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
9237 {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
9238 {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
9239 {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
9240 };
9241
9242 static const struct inset_map inset_map_diff_not_x722[] = {
9243 {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
9244 {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
9245 {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
9246 {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
9247 };
9248
9249 if (input == 0)
9250 return val;
9251
9252 /* Translate input set to register aware inset */
9253 if (type == I40E_MAC_X722) {
9254 for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
9255 if (input & inset_map_diff_x722[i].inset)
9256 val |= inset_map_diff_x722[i].inset_reg;
9257 }
9258 } else {
9259 for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
9260 if (input & inset_map_diff_not_x722[i].inset)
9261 val |= inset_map_diff_not_x722[i].inset_reg;
9262 }
9263 }
9264
9265 for (i = 0; i < RTE_DIM(inset_map_common); i++) {
9266 if (input & inset_map_common[i].inset)
9267 val |= inset_map_common[i].inset_reg;
9268 }
9269
9270 return val;
9271 }
9272
9273 int
9274 i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem)
9275 {
9276 uint8_t i, idx = 0;
9277 uint64_t inset_need_mask = inset;
9278
9279 static const struct {
9280 uint64_t inset;
9281 uint32_t mask;
9282 } inset_mask_map[] = {
9283 {I40E_INSET_IPV4_TOS, I40E_INSET_IPV4_TOS_MASK},
9284 {I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL, 0},
9285 {I40E_INSET_IPV4_PROTO, I40E_INSET_IPV4_PROTO_MASK},
9286 {I40E_INSET_IPV4_TTL, I40E_INSET_IPv4_TTL_MASK},
9287 {I40E_INSET_IPV6_TC, I40E_INSET_IPV6_TC_MASK},
9288 {I40E_INSET_IPV6_NEXT_HDR | I40E_INSET_IPV6_HOP_LIMIT, 0},
9289 {I40E_INSET_IPV6_NEXT_HDR, I40E_INSET_IPV6_NEXT_HDR_MASK},
9290 {I40E_INSET_IPV6_HOP_LIMIT, I40E_INSET_IPV6_HOP_LIMIT_MASK},
9291 };
9292
9293 if (!inset || !mask || !nb_elem)
9294 return 0;
9295
9296 for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9297 /* Clear the inset bit, if no MASK is required,
9298 * for example proto + ttl
9299 */
9300 if ((inset & inset_mask_map[i].inset) ==
9301 inset_mask_map[i].inset && inset_mask_map[i].mask == 0)
9302 inset_need_mask &= ~inset_mask_map[i].inset;
9303 if (!inset_need_mask)
9304 return 0;
9305 }
9306 for (i = 0, idx = 0; i < RTE_DIM(inset_mask_map); i++) {
9307 if ((inset_need_mask & inset_mask_map[i].inset) ==
9308 inset_mask_map[i].inset) {
9309 if (idx >= nb_elem) {
9310 PMD_DRV_LOG(ERR, "exceed maximal number of bitmasks");
9311 return -EINVAL;
9312 }
9313 mask[idx] = inset_mask_map[i].mask;
9314 idx++;
9315 }
9316 }
9317
9318 return idx;
9319 }
9320
9321 void
9322 i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9323 {
9324 uint32_t reg = i40e_read_rx_ctl(hw, addr);
9325
9326 PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
9327 if (reg != val)
9328 i40e_write_rx_ctl(hw, addr, val);
9329 PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
9330 (uint32_t)i40e_read_rx_ctl(hw, addr));
9331 }
9332
9333 void
9334 i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
9335 {
9336 uint32_t reg = i40e_read_rx_ctl(hw, addr);
9337 struct rte_eth_dev *dev;
9338
9339 dev = ((struct i40e_adapter *)hw->back)->eth_dev;
9340 if (reg != val) {
9341 i40e_write_rx_ctl(hw, addr, val);
9342 PMD_DRV_LOG(WARNING,
9343 "i40e device %s changed global register [0x%08x]."
9344 " original: 0x%08x, new: 0x%08x",
9345 dev->device->name, addr, reg,
9346 (uint32_t)i40e_read_rx_ctl(hw, addr));
9347 }
9348 }
9349
9350 static void
9351 i40e_filter_input_set_init(struct i40e_pf *pf)
9352 {
9353 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9354 enum i40e_filter_pctype pctype;
9355 uint64_t input_set, inset_reg;
9356 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9357 int num, i;
9358 uint16_t flow_type;
9359
9360 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
9361 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
9362 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
9363
9364 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
9365 continue;
9366
9367 input_set = i40e_get_default_input_set(pctype);
9368
9369 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9370 I40E_INSET_MASK_NUM_REG);
9371 if (num < 0)
9372 return;
9373 if (pf->support_multi_driver && num > 0) {
9374 PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9375 return;
9376 }
9377 inset_reg = i40e_translate_input_set_reg(hw->mac.type,
9378 input_set);
9379
9380 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9381 (uint32_t)(inset_reg & UINT32_MAX));
9382 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9383 (uint32_t)((inset_reg >>
9384 I40E_32_BIT_WIDTH) & UINT32_MAX));
9385 if (!pf->support_multi_driver) {
9386 i40e_check_write_global_reg(hw,
9387 I40E_GLQF_HASH_INSET(0, pctype),
9388 (uint32_t)(inset_reg & UINT32_MAX));
9389 i40e_check_write_global_reg(hw,
9390 I40E_GLQF_HASH_INSET(1, pctype),
9391 (uint32_t)((inset_reg >>
9392 I40E_32_BIT_WIDTH) & UINT32_MAX));
9393
9394 for (i = 0; i < num; i++) {
9395 i40e_check_write_global_reg(hw,
9396 I40E_GLQF_FD_MSK(i, pctype),
9397 mask_reg[i]);
9398 i40e_check_write_global_reg(hw,
9399 I40E_GLQF_HASH_MSK(i, pctype),
9400 mask_reg[i]);
9401 }
9402 /*clear unused mask registers of the pctype */
9403 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) {
9404 i40e_check_write_global_reg(hw,
9405 I40E_GLQF_FD_MSK(i, pctype),
9406 0);
9407 i40e_check_write_global_reg(hw,
9408 I40E_GLQF_HASH_MSK(i, pctype),
9409 0);
9410 }
9411 } else {
9412 PMD_DRV_LOG(ERR, "Input set setting is not supported.");
9413 }
9414 I40E_WRITE_FLUSH(hw);
9415
9416 /* store the default input set */
9417 if (!pf->support_multi_driver)
9418 pf->hash_input_set[pctype] = input_set;
9419 pf->fdir.input_set[pctype] = input_set;
9420 }
9421 }
9422
9423 int
9424 i40e_hash_filter_inset_select(struct i40e_hw *hw,
9425 struct rte_eth_input_set_conf *conf)
9426 {
9427 struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf;
9428 enum i40e_filter_pctype pctype;
9429 uint64_t input_set, inset_reg = 0;
9430 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9431 int ret, i, num;
9432
9433 if (!conf) {
9434 PMD_DRV_LOG(ERR, "Invalid pointer");
9435 return -EFAULT;
9436 }
9437 if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9438 conf->op != RTE_ETH_INPUT_SET_ADD) {
9439 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9440 return -EINVAL;
9441 }
9442
9443 if (pf->support_multi_driver) {
9444 PMD_DRV_LOG(ERR, "Hash input set setting is not supported.");
9445 return -ENOTSUP;
9446 }
9447
9448 pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9449 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9450 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9451 return -EINVAL;
9452 }
9453
9454 if (hw->mac.type == I40E_MAC_X722) {
9455 /* get translated pctype value in fd pctype register */
9456 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
9457 I40E_GLQF_FD_PCTYPES((int)pctype));
9458 }
9459
9460 ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9461 conf->inset_size);
9462 if (ret) {
9463 PMD_DRV_LOG(ERR, "Failed to parse input set");
9464 return -EINVAL;
9465 }
9466
9467 if (conf->op == RTE_ETH_INPUT_SET_ADD) {
9468 /* get inset value in register */
9469 inset_reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype));
9470 inset_reg <<= I40E_32_BIT_WIDTH;
9471 inset_reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype));
9472 input_set |= pf->hash_input_set[pctype];
9473 }
9474 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9475 I40E_INSET_MASK_NUM_REG);
9476 if (num < 0)
9477 return -EINVAL;
9478
9479 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9480
9481 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
9482 (uint32_t)(inset_reg & UINT32_MAX));
9483 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
9484 (uint32_t)((inset_reg >>
9485 I40E_32_BIT_WIDTH) & UINT32_MAX));
9486
9487 for (i = 0; i < num; i++)
9488 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9489 mask_reg[i]);
9490 /*clear unused mask registers of the pctype */
9491 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9492 i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
9493 0);
9494 I40E_WRITE_FLUSH(hw);
9495
9496 pf->hash_input_set[pctype] = input_set;
9497 return 0;
9498 }
9499
9500 int
9501 i40e_fdir_filter_inset_select(struct i40e_pf *pf,
9502 struct rte_eth_input_set_conf *conf)
9503 {
9504 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9505 enum i40e_filter_pctype pctype;
9506 uint64_t input_set, inset_reg = 0;
9507 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
9508 int ret, i, num;
9509
9510 if (!hw || !conf) {
9511 PMD_DRV_LOG(ERR, "Invalid pointer");
9512 return -EFAULT;
9513 }
9514 if (conf->op != RTE_ETH_INPUT_SET_SELECT &&
9515 conf->op != RTE_ETH_INPUT_SET_ADD) {
9516 PMD_DRV_LOG(ERR, "Unsupported input set operation");
9517 return -EINVAL;
9518 }
9519
9520 pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type);
9521
9522 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
9523 PMD_DRV_LOG(ERR, "invalid flow_type input.");
9524 return -EINVAL;
9525 }
9526
9527 ret = i40e_parse_input_set(&input_set, pctype, conf->field,
9528 conf->inset_size);
9529 if (ret) {
9530 PMD_DRV_LOG(ERR, "Failed to parse input set");
9531 return -EINVAL;
9532 }
9533
9534 /* get inset value in register */
9535 inset_reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1));
9536 inset_reg <<= I40E_32_BIT_WIDTH;
9537 inset_reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0));
9538
9539 /* Can not change the inset reg for flex payload for fdir,
9540 * it is done by writing I40E_PRTQF_FD_FLXINSET
9541 * in i40e_set_flex_mask_on_pctype.
9542 */
9543 if (conf->op == RTE_ETH_INPUT_SET_SELECT)
9544 inset_reg &= I40E_REG_INSET_FLEX_PAYLOAD_WORDS;
9545 else
9546 input_set |= pf->fdir.input_set[pctype];
9547 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
9548 I40E_INSET_MASK_NUM_REG);
9549 if (num < 0)
9550 return -EINVAL;
9551 if (pf->support_multi_driver && num > 0) {
9552 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9553 return -ENOTSUP;
9554 }
9555
9556 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
9557
9558 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
9559 (uint32_t)(inset_reg & UINT32_MAX));
9560 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
9561 (uint32_t)((inset_reg >>
9562 I40E_32_BIT_WIDTH) & UINT32_MAX));
9563
9564 if (!pf->support_multi_driver) {
9565 for (i = 0; i < num; i++)
9566 i40e_check_write_global_reg(hw,
9567 I40E_GLQF_FD_MSK(i, pctype),
9568 mask_reg[i]);
9569 /*clear unused mask registers of the pctype */
9570 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
9571 i40e_check_write_global_reg(hw,
9572 I40E_GLQF_FD_MSK(i, pctype),
9573 0);
9574 } else {
9575 PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
9576 }
9577 I40E_WRITE_FLUSH(hw);
9578
9579 pf->fdir.input_set[pctype] = input_set;
9580 return 0;
9581 }
9582
9583 static int
9584 i40e_hash_filter_get(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9585 {
9586 int ret = 0;
9587
9588 if (!hw || !info) {
9589 PMD_DRV_LOG(ERR, "Invalid pointer");
9590 return -EFAULT;
9591 }
9592
9593 switch (info->info_type) {
9594 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9595 i40e_get_symmetric_hash_enable_per_port(hw,
9596 &(info->info.enable));
9597 break;
9598 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9599 ret = i40e_get_hash_filter_global_config(hw,
9600 &(info->info.global_conf));
9601 break;
9602 default:
9603 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9604 info->info_type);
9605 ret = -EINVAL;
9606 break;
9607 }
9608
9609 return ret;
9610 }
9611
9612 static int
9613 i40e_hash_filter_set(struct i40e_hw *hw, struct rte_eth_hash_filter_info *info)
9614 {
9615 int ret = 0;
9616
9617 if (!hw || !info) {
9618 PMD_DRV_LOG(ERR, "Invalid pointer");
9619 return -EFAULT;
9620 }
9621
9622 switch (info->info_type) {
9623 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
9624 i40e_set_symmetric_hash_enable_per_port(hw, info->info.enable);
9625 break;
9626 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
9627 ret = i40e_set_hash_filter_global_config(hw,
9628 &(info->info.global_conf));
9629 break;
9630 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
9631 ret = i40e_hash_filter_inset_select(hw,
9632 &(info->info.input_set_conf));
9633 break;
9634
9635 default:
9636 PMD_DRV_LOG(ERR, "Hash filter info type (%d) not supported",
9637 info->info_type);
9638 ret = -EINVAL;
9639 break;
9640 }
9641
9642 return ret;
9643 }
9644
9645 /* Operations for hash function */
9646 static int
9647 i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
9648 enum rte_filter_op filter_op,
9649 void *arg)
9650 {
9651 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9652 int ret = 0;
9653
9654 switch (filter_op) {
9655 case RTE_ETH_FILTER_NOP:
9656 break;
9657 case RTE_ETH_FILTER_GET:
9658 ret = i40e_hash_filter_get(hw,
9659 (struct rte_eth_hash_filter_info *)arg);
9660 break;
9661 case RTE_ETH_FILTER_SET:
9662 ret = i40e_hash_filter_set(hw,
9663 (struct rte_eth_hash_filter_info *)arg);
9664 break;
9665 default:
9666 PMD_DRV_LOG(WARNING, "Filter operation (%d) not supported",
9667 filter_op);
9668 ret = -ENOTSUP;
9669 break;
9670 }
9671
9672 return ret;
9673 }
9674
9675 /* Convert ethertype filter structure */
9676 static int
9677 i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
9678 struct i40e_ethertype_filter *filter)
9679 {
9680 rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
9681 filter->input.ether_type = input->ether_type;
9682 filter->flags = input->flags;
9683 filter->queue = input->queue;
9684
9685 return 0;
9686 }
9687
9688 /* Check if there exists the ehtertype filter */
9689 struct i40e_ethertype_filter *
9690 i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
9691 const struct i40e_ethertype_filter_input *input)
9692 {
9693 int ret;
9694
9695 ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
9696 if (ret < 0)
9697 return NULL;
9698
9699 return ethertype_rule->hash_map[ret];
9700 }
9701
9702 /* Add ethertype filter in SW list */
9703 static int
9704 i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
9705 struct i40e_ethertype_filter *filter)
9706 {
9707 struct i40e_ethertype_rule *rule = &pf->ethertype;
9708 int ret;
9709
9710 ret = rte_hash_add_key(rule->hash_table, &filter->input);
9711 if (ret < 0) {
9712 PMD_DRV_LOG(ERR,
9713 "Failed to insert ethertype filter"
9714 " to hash table %d!",
9715 ret);
9716 return ret;
9717 }
9718 rule->hash_map[ret] = filter;
9719
9720 TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
9721
9722 return 0;
9723 }
9724
9725 /* Delete ethertype filter in SW list */
9726 int
9727 i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
9728 struct i40e_ethertype_filter_input *input)
9729 {
9730 struct i40e_ethertype_rule *rule = &pf->ethertype;
9731 struct i40e_ethertype_filter *filter;
9732 int ret;
9733
9734 ret = rte_hash_del_key(rule->hash_table, input);
9735 if (ret < 0) {
9736 PMD_DRV_LOG(ERR,
9737 "Failed to delete ethertype filter"
9738 " to hash table %d!",
9739 ret);
9740 return ret;
9741 }
9742 filter = rule->hash_map[ret];
9743 rule->hash_map[ret] = NULL;
9744
9745 TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
9746 rte_free(filter);
9747
9748 return 0;
9749 }
9750
9751 /*
9752 * Configure ethertype filter, which can director packet by filtering
9753 * with mac address and ether_type or only ether_type
9754 */
9755 int
9756 i40e_ethertype_filter_set(struct i40e_pf *pf,
9757 struct rte_eth_ethertype_filter *filter,
9758 bool add)
9759 {
9760 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
9761 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
9762 struct i40e_ethertype_filter *ethertype_filter, *node;
9763 struct i40e_ethertype_filter check_filter;
9764 struct i40e_control_filter_stats stats;
9765 uint16_t flags = 0;
9766 int ret;
9767
9768 if (filter->queue >= pf->dev_data->nb_rx_queues) {
9769 PMD_DRV_LOG(ERR, "Invalid queue ID");
9770 return -EINVAL;
9771 }
9772 if (filter->ether_type == ETHER_TYPE_IPv4 ||
9773 filter->ether_type == ETHER_TYPE_IPv6) {
9774 PMD_DRV_LOG(ERR,
9775 "unsupported ether_type(0x%04x) in control packet filter.",
9776 filter->ether_type);
9777 return -EINVAL;
9778 }
9779 if (filter->ether_type == ETHER_TYPE_VLAN)
9780 PMD_DRV_LOG(WARNING,
9781 "filter vlan ether_type in first tag is not supported.");
9782
9783 /* Check if there is the filter in SW list */
9784 memset(&check_filter, 0, sizeof(check_filter));
9785 i40e_ethertype_filter_convert(filter, &check_filter);
9786 node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
9787 &check_filter.input);
9788 if (add && node) {
9789 PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
9790 return -EINVAL;
9791 }
9792
9793 if (!add && !node) {
9794 PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
9795 return -EINVAL;
9796 }
9797
9798 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
9799 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
9800 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
9801 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
9802 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
9803
9804 memset(&stats, 0, sizeof(stats));
9805 ret = i40e_aq_add_rem_control_packet_filter(hw,
9806 filter->mac_addr.addr_bytes,
9807 filter->ether_type, flags,
9808 pf->main_vsi->seid,
9809 filter->queue, add, &stats, NULL);
9810
9811 PMD_DRV_LOG(INFO,
9812 "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
9813 ret, stats.mac_etype_used, stats.etype_used,
9814 stats.mac_etype_free, stats.etype_free);
9815 if (ret < 0)
9816 return -ENOSYS;
9817
9818 /* Add or delete a filter in SW list */
9819 if (add) {
9820 ethertype_filter = rte_zmalloc("ethertype_filter",
9821 sizeof(*ethertype_filter), 0);
9822 if (ethertype_filter == NULL) {
9823 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
9824 return -ENOMEM;
9825 }
9826
9827 rte_memcpy(ethertype_filter, &check_filter,
9828 sizeof(check_filter));
9829 ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
9830 if (ret < 0)
9831 rte_free(ethertype_filter);
9832 } else {
9833 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
9834 }
9835
9836 return ret;
9837 }
9838
9839 /*
9840 * Handle operations for ethertype filter.
9841 */
9842 static int
9843 i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
9844 enum rte_filter_op filter_op,
9845 void *arg)
9846 {
9847 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
9848 int ret = 0;
9849
9850 if (filter_op == RTE_ETH_FILTER_NOP)
9851 return ret;
9852
9853 if (arg == NULL) {
9854 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
9855 filter_op);
9856 return -EINVAL;
9857 }
9858
9859 switch (filter_op) {
9860 case RTE_ETH_FILTER_ADD:
9861 ret = i40e_ethertype_filter_set(pf,
9862 (struct rte_eth_ethertype_filter *)arg,
9863 TRUE);
9864 break;
9865 case RTE_ETH_FILTER_DELETE:
9866 ret = i40e_ethertype_filter_set(pf,
9867 (struct rte_eth_ethertype_filter *)arg,
9868 FALSE);
9869 break;
9870 default:
9871 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
9872 ret = -ENOSYS;
9873 break;
9874 }
9875 return ret;
9876 }
9877
9878 static int
9879 i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
9880 enum rte_filter_type filter_type,
9881 enum rte_filter_op filter_op,
9882 void *arg)
9883 {
9884 int ret = 0;
9885
9886 if (dev == NULL)
9887 return -EINVAL;
9888
9889 switch (filter_type) {
9890 case RTE_ETH_FILTER_NONE:
9891 /* For global configuration */
9892 ret = i40e_filter_ctrl_global_config(dev, filter_op, arg);
9893 break;
9894 case RTE_ETH_FILTER_HASH:
9895 ret = i40e_hash_filter_ctrl(dev, filter_op, arg);
9896 break;
9897 case RTE_ETH_FILTER_MACVLAN:
9898 ret = i40e_mac_filter_handle(dev, filter_op, arg);
9899 break;
9900 case RTE_ETH_FILTER_ETHERTYPE:
9901 ret = i40e_ethertype_filter_handle(dev, filter_op, arg);
9902 break;
9903 case RTE_ETH_FILTER_TUNNEL:
9904 ret = i40e_tunnel_filter_handle(dev, filter_op, arg);
9905 break;
9906 case RTE_ETH_FILTER_FDIR:
9907 ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
9908 break;
9909 case RTE_ETH_FILTER_GENERIC:
9910 if (filter_op != RTE_ETH_FILTER_GET)
9911 return -EINVAL;
9912 *(const void **)arg = &i40e_flow_ops;
9913 break;
9914 default:
9915 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
9916 filter_type);
9917 ret = -EINVAL;
9918 break;
9919 }
9920
9921 return ret;
9922 }
9923
9924 /*
9925 * Check and enable Extended Tag.
9926 * Enabling Extended Tag is important for 40G performance.
9927 */
9928 static void
9929 i40e_enable_extended_tag(struct rte_eth_dev *dev)
9930 {
9931 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
9932 uint32_t buf = 0;
9933 int ret;
9934
9935 ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9936 PCI_DEV_CAP_REG);
9937 if (ret < 0) {
9938 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9939 PCI_DEV_CAP_REG);
9940 return;
9941 }
9942 if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) {
9943 PMD_DRV_LOG(ERR, "Does not support Extended Tag");
9944 return;
9945 }
9946
9947 buf = 0;
9948 ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
9949 PCI_DEV_CTRL_REG);
9950 if (ret < 0) {
9951 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
9952 PCI_DEV_CTRL_REG);
9953 return;
9954 }
9955 if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) {
9956 PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled");
9957 return;
9958 }
9959 buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
9960 ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
9961 PCI_DEV_CTRL_REG);
9962 if (ret < 0) {
9963 PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
9964 PCI_DEV_CTRL_REG);
9965 return;
9966 }
9967 }
9968
9969 /*
9970 * As some registers wouldn't be reset unless a global hardware reset,
9971 * hardware initialization is needed to put those registers into an
9972 * expected initial state.
9973 */
9974 static void
9975 i40e_hw_init(struct rte_eth_dev *dev)
9976 {
9977 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
9978
9979 i40e_enable_extended_tag(dev);
9980
9981 /* clear the PF Queue Filter control register */
9982 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0);
9983
9984 /* Disable symmetric hash per port */
9985 i40e_set_symmetric_hash_enable_per_port(hw, 0);
9986 }
9987
9988 /*
9989 * For X722 it is possible to have multiple pctypes mapped to the same flowtype
9990 * however this function will return only one highest pctype index,
9991 * which is not quite correct. This is known problem of i40e driver
9992 * and needs to be fixed later.
9993 */
9994 enum i40e_filter_pctype
9995 i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type)
9996 {
9997 int i;
9998 uint64_t pctype_mask;
9999
10000 if (flow_type < I40E_FLOW_TYPE_MAX) {
10001 pctype_mask = adapter->pctypes_tbl[flow_type];
10002 for (i = I40E_FILTER_PCTYPE_MAX - 1; i > 0; i--) {
10003 if (pctype_mask & (1ULL << i))
10004 return (enum i40e_filter_pctype)i;
10005 }
10006 }
10007 return I40E_FILTER_PCTYPE_INVALID;
10008 }
10009
10010 uint16_t
10011 i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
10012 enum i40e_filter_pctype pctype)
10013 {
10014 uint16_t flowtype;
10015 uint64_t pctype_mask = 1ULL << pctype;
10016
10017 for (flowtype = RTE_ETH_FLOW_UNKNOWN + 1; flowtype < I40E_FLOW_TYPE_MAX;
10018 flowtype++) {
10019 if (adapter->pctypes_tbl[flowtype] & pctype_mask)
10020 return flowtype;
10021 }
10022
10023 return RTE_ETH_FLOW_UNKNOWN;
10024 }
10025
10026 /*
10027 * On X710, performance number is far from the expectation on recent firmware
10028 * versions; on XL710, performance number is also far from the expectation on
10029 * recent firmware versions, if promiscuous mode is disabled, or promiscuous
10030 * mode is enabled and port MAC address is equal to the packet destination MAC
10031 * address. The fix for this issue may not be integrated in the following
10032 * firmware version. So the workaround in software driver is needed. It needs
10033 * to modify the initial values of 3 internal only registers for both X710 and
10034 * XL710. Note that the values for X710 or XL710 could be different, and the
10035 * workaround can be removed when it is fixed in firmware in the future.
10036 */
10037
10038 /* For both X710 and XL710 */
10039 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
10040 #define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x203F0200
10041 #define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
10042
10043 #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
10044 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
10045
10046 /* For X722 */
10047 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
10048 #define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
10049
10050 /* For X710 */
10051 #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
10052 /* For XL710 */
10053 #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
10054 #define I40E_GL_SWR_PM_UP_THR 0x269FBC
10055
10056 /*
10057 * GL_SWR_PM_UP_THR:
10058 * The value is not impacted from the link speed, its value is set according
10059 * to the total number of ports for a better pipe-monitor configuration.
10060 */
10061 static bool
10062 i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
10063 {
10064 #define I40E_GL_SWR_PM_EF_DEVICE(dev) \
10065 .device_id = (dev), \
10066 .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
10067
10068 #define I40E_GL_SWR_PM_SF_DEVICE(dev) \
10069 .device_id = (dev), \
10070 .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
10071
10072 static const struct {
10073 uint16_t device_id;
10074 uint32_t val;
10075 } swr_pm_table[] = {
10076 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
10077 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
10078 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
10079 { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
10080
10081 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
10082 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
10083 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
10084 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
10085 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
10086 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
10087 { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
10088 };
10089 uint32_t i;
10090
10091 if (value == NULL) {
10092 PMD_DRV_LOG(ERR, "value is NULL");
10093 return false;
10094 }
10095
10096 for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
10097 if (hw->device_id == swr_pm_table[i].device_id) {
10098 *value = swr_pm_table[i].val;
10099
10100 PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
10101 "value - 0x%08x",
10102 hw->device_id, *value);
10103 return true;
10104 }
10105 }
10106
10107 return false;
10108 }
10109
10110 static int
10111 i40e_dev_sync_phy_type(struct i40e_hw *hw)
10112 {
10113 enum i40e_status_code status;
10114 struct i40e_aq_get_phy_abilities_resp phy_ab;
10115 int ret = -ENOTSUP;
10116 int retries = 0;
10117
10118 status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
10119 NULL);
10120
10121 while (status) {
10122 PMD_INIT_LOG(WARNING, "Failed to sync phy type: status=%d",
10123 status);
10124 retries++;
10125 rte_delay_us(100000);
10126 if (retries < 5)
10127 status = i40e_aq_get_phy_capabilities(hw, false,
10128 true, &phy_ab, NULL);
10129 else
10130 return ret;
10131 }
10132 return 0;
10133 }
10134
10135 static void
10136 i40e_configure_registers(struct i40e_hw *hw)
10137 {
10138 static struct {
10139 uint32_t addr;
10140 uint64_t val;
10141 } reg_table[] = {
10142 {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
10143 {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
10144 {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
10145 };
10146 uint64_t reg;
10147 uint32_t i;
10148 int ret;
10149
10150 for (i = 0; i < RTE_DIM(reg_table); i++) {
10151 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
10152 if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10153 reg_table[i].val =
10154 I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
10155 else /* For X710/XL710/XXV710 */
10156 if (hw->aq.fw_maj_ver < 6)
10157 reg_table[i].val =
10158 I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
10159 else
10160 reg_table[i].val =
10161 I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
10162 }
10163
10164 if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
10165 if (hw->mac.type == I40E_MAC_X722) /* For X722 */
10166 reg_table[i].val =
10167 I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10168 else /* For X710/XL710/XXV710 */
10169 reg_table[i].val =
10170 I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
10171 }
10172
10173 if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
10174 uint32_t cfg_val;
10175
10176 if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
10177 PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
10178 "GL_SWR_PM_UP_THR value fixup",
10179 hw->device_id);
10180 continue;
10181 }
10182
10183 reg_table[i].val = cfg_val;
10184 }
10185
10186 ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
10187 &reg, NULL);
10188 if (ret < 0) {
10189 PMD_DRV_LOG(ERR, "Failed to read from 0x%"PRIx32,
10190 reg_table[i].addr);
10191 break;
10192 }
10193 PMD_DRV_LOG(DEBUG, "Read from 0x%"PRIx32": 0x%"PRIx64,
10194 reg_table[i].addr, reg);
10195 if (reg == reg_table[i].val)
10196 continue;
10197
10198 ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
10199 reg_table[i].val, NULL);
10200 if (ret < 0) {
10201 PMD_DRV_LOG(ERR,
10202 "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
10203 reg_table[i].val, reg_table[i].addr);
10204 break;
10205 }
10206 PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
10207 "0x%"PRIx32, reg_table[i].val, reg_table[i].addr);
10208 }
10209 }
10210
10211 #define I40E_VSI_TSR(_i) (0x00050800 + ((_i) * 4))
10212 #define I40E_VSI_TSR_QINQ_CONFIG 0xc030
10213 #define I40E_VSI_L2TAGSTXVALID(_i) (0x00042800 + ((_i) * 4))
10214 #define I40E_VSI_L2TAGSTXVALID_QINQ 0xab
10215 static int
10216 i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
10217 {
10218 uint32_t reg;
10219 int ret;
10220
10221 if (vsi->vsi_id >= I40E_MAX_NUM_VSIS) {
10222 PMD_DRV_LOG(ERR, "VSI ID exceeds the maximum");
10223 return -EINVAL;
10224 }
10225
10226 /* Configure for double VLAN RX stripping */
10227 reg = I40E_READ_REG(hw, I40E_VSI_TSR(vsi->vsi_id));
10228 if ((reg & I40E_VSI_TSR_QINQ_CONFIG) != I40E_VSI_TSR_QINQ_CONFIG) {
10229 reg |= I40E_VSI_TSR_QINQ_CONFIG;
10230 ret = i40e_aq_debug_write_register(hw,
10231 I40E_VSI_TSR(vsi->vsi_id),
10232 reg, NULL);
10233 if (ret < 0) {
10234 PMD_DRV_LOG(ERR, "Failed to update VSI_TSR[%d]",
10235 vsi->vsi_id);
10236 return I40E_ERR_CONFIG;
10237 }
10238 }
10239
10240 /* Configure for double VLAN TX insertion */
10241 reg = I40E_READ_REG(hw, I40E_VSI_L2TAGSTXVALID(vsi->vsi_id));
10242 if ((reg & 0xff) != I40E_VSI_L2TAGSTXVALID_QINQ) {
10243 reg = I40E_VSI_L2TAGSTXVALID_QINQ;
10244 ret = i40e_aq_debug_write_register(hw,
10245 I40E_VSI_L2TAGSTXVALID(
10246 vsi->vsi_id), reg, NULL);
10247 if (ret < 0) {
10248 PMD_DRV_LOG(ERR,
10249 "Failed to update VSI_L2TAGSTXVALID[%d]",
10250 vsi->vsi_id);
10251 return I40E_ERR_CONFIG;
10252 }
10253 }
10254
10255 return 0;
10256 }
10257
10258 /**
10259 * i40e_aq_add_mirror_rule
10260 * @hw: pointer to the hardware structure
10261 * @seid: VEB seid to add mirror rule to
10262 * @dst_id: destination vsi seid
10263 * @entries: Buffer which contains the entities to be mirrored
10264 * @count: number of entities contained in the buffer
10265 * @rule_id:the rule_id of the rule to be added
10266 *
10267 * Add a mirror rule for a given veb.
10268 *
10269 **/
10270 static enum i40e_status_code
10271 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
10272 uint16_t seid, uint16_t dst_id,
10273 uint16_t rule_type, uint16_t *entries,
10274 uint16_t count, uint16_t *rule_id)
10275 {
10276 struct i40e_aq_desc desc;
10277 struct i40e_aqc_add_delete_mirror_rule cmd;
10278 struct i40e_aqc_add_delete_mirror_rule_completion *resp =
10279 (struct i40e_aqc_add_delete_mirror_rule_completion *)
10280 &desc.params.raw;
10281 uint16_t buff_len;
10282 enum i40e_status_code status;
10283
10284 i40e_fill_default_direct_cmd_desc(&desc,
10285 i40e_aqc_opc_add_mirror_rule);
10286 memset(&cmd, 0, sizeof(cmd));
10287
10288 buff_len = sizeof(uint16_t) * count;
10289 desc.datalen = rte_cpu_to_le_16(buff_len);
10290 if (buff_len > 0)
10291 desc.flags |= rte_cpu_to_le_16(
10292 (uint16_t)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
10293 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10294 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10295 cmd.num_entries = rte_cpu_to_le_16(count);
10296 cmd.seid = rte_cpu_to_le_16(seid);
10297 cmd.destination = rte_cpu_to_le_16(dst_id);
10298
10299 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10300 status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
10301 PMD_DRV_LOG(INFO,
10302 "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
10303 hw->aq.asq_last_status, resp->rule_id,
10304 resp->mirror_rules_used, resp->mirror_rules_free);
10305 *rule_id = rte_le_to_cpu_16(resp->rule_id);
10306
10307 return status;
10308 }
10309
10310 /**
10311 * i40e_aq_del_mirror_rule
10312 * @hw: pointer to the hardware structure
10313 * @seid: VEB seid to add mirror rule to
10314 * @entries: Buffer which contains the entities to be mirrored
10315 * @count: number of entities contained in the buffer
10316 * @rule_id:the rule_id of the rule to be delete
10317 *
10318 * Delete a mirror rule for a given veb.
10319 *
10320 **/
10321 static enum i40e_status_code
10322 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
10323 uint16_t seid, uint16_t rule_type, uint16_t *entries,
10324 uint16_t count, uint16_t rule_id)
10325 {
10326 struct i40e_aq_desc desc;
10327 struct i40e_aqc_add_delete_mirror_rule cmd;
10328 uint16_t buff_len = 0;
10329 enum i40e_status_code status;
10330 void *buff = NULL;
10331
10332 i40e_fill_default_direct_cmd_desc(&desc,
10333 i40e_aqc_opc_delete_mirror_rule);
10334 memset(&cmd, 0, sizeof(cmd));
10335 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
10336 desc.flags |= rte_cpu_to_le_16((uint16_t)(I40E_AQ_FLAG_BUF |
10337 I40E_AQ_FLAG_RD));
10338 cmd.num_entries = count;
10339 buff_len = sizeof(uint16_t) * count;
10340 desc.datalen = rte_cpu_to_le_16(buff_len);
10341 buff = (void *)entries;
10342 } else
10343 /* rule id is filled in destination field for deleting mirror rule */
10344 cmd.destination = rte_cpu_to_le_16(rule_id);
10345
10346 cmd.rule_type = rte_cpu_to_le_16(rule_type <<
10347 I40E_AQC_MIRROR_RULE_TYPE_SHIFT);
10348 cmd.seid = rte_cpu_to_le_16(seid);
10349
10350 rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
10351 status = i40e_asq_send_command(hw, &desc, buff, buff_len, NULL);
10352
10353 return status;
10354 }
10355
10356 /**
10357 * i40e_mirror_rule_set
10358 * @dev: pointer to the hardware structure
10359 * @mirror_conf: mirror rule info
10360 * @sw_id: mirror rule's sw_id
10361 * @on: enable/disable
10362 *
10363 * set a mirror rule.
10364 *
10365 **/
10366 static int
10367 i40e_mirror_rule_set(struct rte_eth_dev *dev,
10368 struct rte_eth_mirror_conf *mirror_conf,
10369 uint8_t sw_id, uint8_t on)
10370 {
10371 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10372 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10373 struct i40e_mirror_rule *it, *mirr_rule = NULL;
10374 struct i40e_mirror_rule *parent = NULL;
10375 uint16_t seid, dst_seid, rule_id;
10376 uint16_t i, j = 0;
10377 int ret;
10378
10379 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
10380
10381 if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
10382 PMD_DRV_LOG(ERR,
10383 "mirror rule can not be configured without veb or vfs.");
10384 return -ENOSYS;
10385 }
10386 if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
10387 PMD_DRV_LOG(ERR, "mirror table is full.");
10388 return -ENOSPC;
10389 }
10390 if (mirror_conf->dst_pool > pf->vf_num) {
10391 PMD_DRV_LOG(ERR, "invalid destination pool %u.",
10392 mirror_conf->dst_pool);
10393 return -EINVAL;
10394 }
10395
10396 seid = pf->main_vsi->veb->seid;
10397
10398 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10399 if (sw_id <= it->index) {
10400 mirr_rule = it;
10401 break;
10402 }
10403 parent = it;
10404 }
10405 if (mirr_rule && sw_id == mirr_rule->index) {
10406 if (on) {
10407 PMD_DRV_LOG(ERR, "mirror rule exists.");
10408 return -EEXIST;
10409 } else {
10410 ret = i40e_aq_del_mirror_rule(hw, seid,
10411 mirr_rule->rule_type,
10412 mirr_rule->entries,
10413 mirr_rule->num_entries, mirr_rule->id);
10414 if (ret < 0) {
10415 PMD_DRV_LOG(ERR,
10416 "failed to remove mirror rule: ret = %d, aq_err = %d.",
10417 ret, hw->aq.asq_last_status);
10418 return -ENOSYS;
10419 }
10420 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10421 rte_free(mirr_rule);
10422 pf->nb_mirror_rule--;
10423 return 0;
10424 }
10425 } else if (!on) {
10426 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10427 return -ENOENT;
10428 }
10429
10430 mirr_rule = rte_zmalloc("i40e_mirror_rule",
10431 sizeof(struct i40e_mirror_rule) , 0);
10432 if (!mirr_rule) {
10433 PMD_DRV_LOG(ERR, "failed to allocate memory");
10434 return I40E_ERR_NO_MEMORY;
10435 }
10436 switch (mirror_conf->rule_type) {
10437 case ETH_MIRROR_VLAN:
10438 for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) {
10439 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
10440 mirr_rule->entries[j] =
10441 mirror_conf->vlan.vlan_id[i];
10442 j++;
10443 }
10444 }
10445 if (j == 0) {
10446 PMD_DRV_LOG(ERR, "vlan is not specified.");
10447 rte_free(mirr_rule);
10448 return -EINVAL;
10449 }
10450 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN;
10451 break;
10452 case ETH_MIRROR_VIRTUAL_POOL_UP:
10453 case ETH_MIRROR_VIRTUAL_POOL_DOWN:
10454 /* check if the specified pool bit is out of range */
10455 if (mirror_conf->pool_mask > (uint64_t)(1ULL << (pf->vf_num + 1))) {
10456 PMD_DRV_LOG(ERR, "pool mask is out of range.");
10457 rte_free(mirr_rule);
10458 return -EINVAL;
10459 }
10460 for (i = 0, j = 0; i < pf->vf_num; i++) {
10461 if (mirror_conf->pool_mask & (1ULL << i)) {
10462 mirr_rule->entries[j] = pf->vfs[i].vsi->seid;
10463 j++;
10464 }
10465 }
10466 if (mirror_conf->pool_mask & (1ULL << pf->vf_num)) {
10467 /* add pf vsi to entries */
10468 mirr_rule->entries[j] = pf->main_vsi_seid;
10469 j++;
10470 }
10471 if (j == 0) {
10472 PMD_DRV_LOG(ERR, "pool is not specified.");
10473 rte_free(mirr_rule);
10474 return -EINVAL;
10475 }
10476 /* egress and ingress in aq commands means from switch but not port */
10477 mirr_rule->rule_type =
10478 (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) ?
10479 I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS :
10480 I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS;
10481 break;
10482 case ETH_MIRROR_UPLINK_PORT:
10483 /* egress and ingress in aq commands means from switch but not port*/
10484 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS;
10485 break;
10486 case ETH_MIRROR_DOWNLINK_PORT:
10487 mirr_rule->rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
10488 break;
10489 default:
10490 PMD_DRV_LOG(ERR, "unsupported mirror type %d.",
10491 mirror_conf->rule_type);
10492 rte_free(mirr_rule);
10493 return -EINVAL;
10494 }
10495
10496 /* If the dst_pool is equal to vf_num, consider it as PF */
10497 if (mirror_conf->dst_pool == pf->vf_num)
10498 dst_seid = pf->main_vsi_seid;
10499 else
10500 dst_seid = pf->vfs[mirror_conf->dst_pool].vsi->seid;
10501
10502 ret = i40e_aq_add_mirror_rule(hw, seid, dst_seid,
10503 mirr_rule->rule_type, mirr_rule->entries,
10504 j, &rule_id);
10505 if (ret < 0) {
10506 PMD_DRV_LOG(ERR,
10507 "failed to add mirror rule: ret = %d, aq_err = %d.",
10508 ret, hw->aq.asq_last_status);
10509 rte_free(mirr_rule);
10510 return -ENOSYS;
10511 }
10512
10513 mirr_rule->index = sw_id;
10514 mirr_rule->num_entries = j;
10515 mirr_rule->id = rule_id;
10516 mirr_rule->dst_vsi_seid = dst_seid;
10517
10518 if (parent)
10519 TAILQ_INSERT_AFTER(&pf->mirror_list, parent, mirr_rule, rules);
10520 else
10521 TAILQ_INSERT_HEAD(&pf->mirror_list, mirr_rule, rules);
10522
10523 pf->nb_mirror_rule++;
10524 return 0;
10525 }
10526
10527 /**
10528 * i40e_mirror_rule_reset
10529 * @dev: pointer to the device
10530 * @sw_id: mirror rule's sw_id
10531 *
10532 * reset a mirror rule.
10533 *
10534 **/
10535 static int
10536 i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
10537 {
10538 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
10539 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10540 struct i40e_mirror_rule *it, *mirr_rule = NULL;
10541 uint16_t seid;
10542 int ret;
10543
10544 PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_reset: sw_id = %d.", sw_id);
10545
10546 seid = pf->main_vsi->veb->seid;
10547
10548 TAILQ_FOREACH(it, &pf->mirror_list, rules) {
10549 if (sw_id == it->index) {
10550 mirr_rule = it;
10551 break;
10552 }
10553 }
10554 if (mirr_rule) {
10555 ret = i40e_aq_del_mirror_rule(hw, seid,
10556 mirr_rule->rule_type,
10557 mirr_rule->entries,
10558 mirr_rule->num_entries, mirr_rule->id);
10559 if (ret < 0) {
10560 PMD_DRV_LOG(ERR,
10561 "failed to remove mirror rule: status = %d, aq_err = %d.",
10562 ret, hw->aq.asq_last_status);
10563 return -ENOSYS;
10564 }
10565 TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
10566 rte_free(mirr_rule);
10567 pf->nb_mirror_rule--;
10568 } else {
10569 PMD_DRV_LOG(ERR, "mirror rule doesn't exist.");
10570 return -ENOENT;
10571 }
10572 return 0;
10573 }
10574
10575 static uint64_t
10576 i40e_read_systime_cyclecounter(struct rte_eth_dev *dev)
10577 {
10578 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10579 uint64_t systim_cycles;
10580
10581 systim_cycles = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_L);
10582 systim_cycles |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TIME_H)
10583 << 32;
10584
10585 return systim_cycles;
10586 }
10587
10588 static uint64_t
10589 i40e_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev, uint8_t index)
10590 {
10591 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10592 uint64_t rx_tstamp;
10593
10594 rx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_L(index));
10595 rx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(index))
10596 << 32;
10597
10598 return rx_tstamp;
10599 }
10600
10601 static uint64_t
10602 i40e_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
10603 {
10604 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10605 uint64_t tx_tstamp;
10606
10607 tx_tstamp = (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_L);
10608 tx_tstamp |= (uint64_t)I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H)
10609 << 32;
10610
10611 return tx_tstamp;
10612 }
10613
10614 static void
10615 i40e_start_timecounters(struct rte_eth_dev *dev)
10616 {
10617 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10618 struct i40e_adapter *adapter =
10619 (struct i40e_adapter *)dev->data->dev_private;
10620 struct rte_eth_link link;
10621 uint32_t tsync_inc_l;
10622 uint32_t tsync_inc_h;
10623
10624 /* Get current link speed. */
10625 i40e_dev_link_update(dev, 1);
10626 rte_eth_linkstatus_get(dev, &link);
10627
10628 switch (link.link_speed) {
10629 case ETH_SPEED_NUM_40G:
10630 tsync_inc_l = I40E_PTP_40GB_INCVAL & 0xFFFFFFFF;
10631 tsync_inc_h = I40E_PTP_40GB_INCVAL >> 32;
10632 break;
10633 case ETH_SPEED_NUM_10G:
10634 tsync_inc_l = I40E_PTP_10GB_INCVAL & 0xFFFFFFFF;
10635 tsync_inc_h = I40E_PTP_10GB_INCVAL >> 32;
10636 break;
10637 case ETH_SPEED_NUM_1G:
10638 tsync_inc_l = I40E_PTP_1GB_INCVAL & 0xFFFFFFFF;
10639 tsync_inc_h = I40E_PTP_1GB_INCVAL >> 32;
10640 break;
10641 default:
10642 tsync_inc_l = 0x0;
10643 tsync_inc_h = 0x0;
10644 }
10645
10646 /* Set the timesync increment value. */
10647 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, tsync_inc_l);
10648 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, tsync_inc_h);
10649
10650 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
10651 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10652 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
10653
10654 adapter->systime_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10655 adapter->systime_tc.cc_shift = 0;
10656 adapter->systime_tc.nsec_mask = 0;
10657
10658 adapter->rx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10659 adapter->rx_tstamp_tc.cc_shift = 0;
10660 adapter->rx_tstamp_tc.nsec_mask = 0;
10661
10662 adapter->tx_tstamp_tc.cc_mask = I40E_CYCLECOUNTER_MASK;
10663 adapter->tx_tstamp_tc.cc_shift = 0;
10664 adapter->tx_tstamp_tc.nsec_mask = 0;
10665 }
10666
10667 static int
10668 i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
10669 {
10670 struct i40e_adapter *adapter =
10671 (struct i40e_adapter *)dev->data->dev_private;
10672
10673 adapter->systime_tc.nsec += delta;
10674 adapter->rx_tstamp_tc.nsec += delta;
10675 adapter->tx_tstamp_tc.nsec += delta;
10676
10677 return 0;
10678 }
10679
10680 static int
10681 i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
10682 {
10683 uint64_t ns;
10684 struct i40e_adapter *adapter =
10685 (struct i40e_adapter *)dev->data->dev_private;
10686
10687 ns = rte_timespec_to_ns(ts);
10688
10689 /* Set the timecounters to a new value. */
10690 adapter->systime_tc.nsec = ns;
10691 adapter->rx_tstamp_tc.nsec = ns;
10692 adapter->tx_tstamp_tc.nsec = ns;
10693
10694 return 0;
10695 }
10696
10697 static int
10698 i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
10699 {
10700 uint64_t ns, systime_cycles;
10701 struct i40e_adapter *adapter =
10702 (struct i40e_adapter *)dev->data->dev_private;
10703
10704 systime_cycles = i40e_read_systime_cyclecounter(dev);
10705 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
10706 *ts = rte_ns_to_timespec(ns);
10707
10708 return 0;
10709 }
10710
10711 static int
10712 i40e_timesync_enable(struct rte_eth_dev *dev)
10713 {
10714 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10715 uint32_t tsync_ctl_l;
10716 uint32_t tsync_ctl_h;
10717
10718 /* Stop the timesync system time. */
10719 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10720 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10721 /* Reset the timesync system time value. */
10722 I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_L, 0x0);
10723 I40E_WRITE_REG(hw, I40E_PRTTSYN_TIME_H, 0x0);
10724
10725 i40e_start_timecounters(dev);
10726
10727 /* Clear timesync registers. */
10728 I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10729 I40E_READ_REG(hw, I40E_PRTTSYN_TXTIME_H);
10730 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(0));
10731 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(1));
10732 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(2));
10733 I40E_READ_REG(hw, I40E_PRTTSYN_RXTIME_H(3));
10734
10735 /* Enable timestamping of PTP packets. */
10736 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10737 tsync_ctl_l |= I40E_PRTTSYN_TSYNENA;
10738
10739 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10740 tsync_ctl_h |= I40E_PRTTSYN_TSYNENA;
10741 tsync_ctl_h |= I40E_PRTTSYN_TSYNTYPE;
10742
10743 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10744 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10745
10746 return 0;
10747 }
10748
10749 static int
10750 i40e_timesync_disable(struct rte_eth_dev *dev)
10751 {
10752 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10753 uint32_t tsync_ctl_l;
10754 uint32_t tsync_ctl_h;
10755
10756 /* Disable timestamping of transmitted PTP packets. */
10757 tsync_ctl_l = I40E_READ_REG(hw, I40E_PRTTSYN_CTL0);
10758 tsync_ctl_l &= ~I40E_PRTTSYN_TSYNENA;
10759
10760 tsync_ctl_h = I40E_READ_REG(hw, I40E_PRTTSYN_CTL1);
10761 tsync_ctl_h &= ~I40E_PRTTSYN_TSYNENA;
10762
10763 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL0, tsync_ctl_l);
10764 I40E_WRITE_REG(hw, I40E_PRTTSYN_CTL1, tsync_ctl_h);
10765
10766 /* Reset the timesync increment value. */
10767 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_L, 0x0);
10768 I40E_WRITE_REG(hw, I40E_PRTTSYN_INC_H, 0x0);
10769
10770 return 0;
10771 }
10772
10773 static int
10774 i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
10775 struct timespec *timestamp, uint32_t flags)
10776 {
10777 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10778 struct i40e_adapter *adapter =
10779 (struct i40e_adapter *)dev->data->dev_private;
10780
10781 uint32_t sync_status;
10782 uint32_t index = flags & 0x03;
10783 uint64_t rx_tstamp_cycles;
10784 uint64_t ns;
10785
10786 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_1);
10787 if ((sync_status & (1 << index)) == 0)
10788 return -EINVAL;
10789
10790 rx_tstamp_cycles = i40e_read_rx_tstamp_cyclecounter(dev, index);
10791 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
10792 *timestamp = rte_ns_to_timespec(ns);
10793
10794 return 0;
10795 }
10796
10797 static int
10798 i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
10799 struct timespec *timestamp)
10800 {
10801 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
10802 struct i40e_adapter *adapter =
10803 (struct i40e_adapter *)dev->data->dev_private;
10804
10805 uint32_t sync_status;
10806 uint64_t tx_tstamp_cycles;
10807 uint64_t ns;
10808
10809 sync_status = I40E_READ_REG(hw, I40E_PRTTSYN_STAT_0);
10810 if ((sync_status & I40E_PRTTSYN_STAT_0_TXTIME_MASK) == 0)
10811 return -EINVAL;
10812
10813 tx_tstamp_cycles = i40e_read_tx_tstamp_cyclecounter(dev);
10814 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
10815 *timestamp = rte_ns_to_timespec(ns);
10816
10817 return 0;
10818 }
10819
10820 /*
10821 * i40e_parse_dcb_configure - parse dcb configure from user
10822 * @dev: the device being configured
10823 * @dcb_cfg: pointer of the result of parse
10824 * @*tc_map: bit map of enabled traffic classes
10825 *
10826 * Returns 0 on success, negative value on failure
10827 */
10828 static int
10829 i40e_parse_dcb_configure(struct rte_eth_dev *dev,
10830 struct i40e_dcbx_config *dcb_cfg,
10831 uint8_t *tc_map)
10832 {
10833 struct rte_eth_dcb_rx_conf *dcb_rx_conf;
10834 uint8_t i, tc_bw, bw_lf;
10835
10836 memset(dcb_cfg, 0, sizeof(struct i40e_dcbx_config));
10837
10838 dcb_rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
10839 if (dcb_rx_conf->nb_tcs > I40E_MAX_TRAFFIC_CLASS) {
10840 PMD_INIT_LOG(ERR, "number of tc exceeds max.");
10841 return -EINVAL;
10842 }
10843
10844 /* assume each tc has the same bw */
10845 tc_bw = I40E_MAX_PERCENT / dcb_rx_conf->nb_tcs;
10846 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10847 dcb_cfg->etscfg.tcbwtable[i] = tc_bw;
10848 /* to ensure the sum of tcbw is equal to 100 */
10849 bw_lf = I40E_MAX_PERCENT % dcb_rx_conf->nb_tcs;
10850 for (i = 0; i < bw_lf; i++)
10851 dcb_cfg->etscfg.tcbwtable[i]++;
10852
10853 /* assume each tc has the same Transmission Selection Algorithm */
10854 for (i = 0; i < dcb_rx_conf->nb_tcs; i++)
10855 dcb_cfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS;
10856
10857 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
10858 dcb_cfg->etscfg.prioritytable[i] =
10859 dcb_rx_conf->dcb_tc[i];
10860
10861 /* FW needs one App to configure HW */
10862 dcb_cfg->numapps = I40E_DEFAULT_DCB_APP_NUM;
10863 dcb_cfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
10864 dcb_cfg->app[0].priority = I40E_DEFAULT_DCB_APP_PRIO;
10865 dcb_cfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
10866
10867 if (dcb_rx_conf->nb_tcs == 0)
10868 *tc_map = 1; /* tc0 only */
10869 else
10870 *tc_map = RTE_LEN2MASK(dcb_rx_conf->nb_tcs, uint8_t);
10871
10872 if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
10873 dcb_cfg->pfc.willing = 0;
10874 dcb_cfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
10875 dcb_cfg->pfc.pfcenable = *tc_map;
10876 }
10877 return 0;
10878 }
10879
10880
10881 static enum i40e_status_code
10882 i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi,
10883 struct i40e_aqc_vsi_properties_data *info,
10884 uint8_t enabled_tcmap)
10885 {
10886 enum i40e_status_code ret;
10887 int i, total_tc = 0;
10888 uint16_t qpnum_per_tc, bsf, qp_idx;
10889 struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
10890 struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
10891 uint16_t used_queues;
10892
10893 ret = validate_tcmap_parameter(vsi, enabled_tcmap);
10894 if (ret != I40E_SUCCESS)
10895 return ret;
10896
10897 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10898 if (enabled_tcmap & (1 << i))
10899 total_tc++;
10900 }
10901 if (total_tc == 0)
10902 total_tc = 1;
10903 vsi->enabled_tc = enabled_tcmap;
10904
10905 /* different VSI has different queues assigned */
10906 if (vsi->type == I40E_VSI_MAIN)
10907 used_queues = dev_data->nb_rx_queues -
10908 pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10909 else if (vsi->type == I40E_VSI_VMDQ2)
10910 used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
10911 else {
10912 PMD_INIT_LOG(ERR, "unsupported VSI type.");
10913 return I40E_ERR_NO_AVAILABLE_VSI;
10914 }
10915
10916 qpnum_per_tc = used_queues / total_tc;
10917 /* Number of queues per enabled TC */
10918 if (qpnum_per_tc == 0) {
10919 PMD_INIT_LOG(ERR, " number of queues is less that tcs.");
10920 return I40E_ERR_INVALID_QP_ID;
10921 }
10922 qpnum_per_tc = RTE_MIN(i40e_align_floor(qpnum_per_tc),
10923 I40E_MAX_Q_PER_TC);
10924 bsf = rte_bsf32(qpnum_per_tc);
10925
10926 /**
10927 * Configure TC and queue mapping parameters, for enabled TC,
10928 * allocate qpnum_per_tc queues to this traffic. For disabled TC,
10929 * default queue will serve it.
10930 */
10931 qp_idx = 0;
10932 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10933 if (vsi->enabled_tc & (1 << i)) {
10934 info->tc_mapping[i] = rte_cpu_to_le_16((qp_idx <<
10935 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
10936 (bsf << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
10937 qp_idx += qpnum_per_tc;
10938 } else
10939 info->tc_mapping[i] = 0;
10940 }
10941
10942 /* Associate queue number with VSI, Keep vsi->nb_qps unchanged */
10943 if (vsi->type == I40E_VSI_SRIOV) {
10944 info->mapping_flags |=
10945 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
10946 for (i = 0; i < vsi->nb_qps; i++)
10947 info->queue_mapping[i] =
10948 rte_cpu_to_le_16(vsi->base_queue + i);
10949 } else {
10950 info->mapping_flags |=
10951 rte_cpu_to_le_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
10952 info->queue_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
10953 }
10954 info->valid_sections |=
10955 rte_cpu_to_le_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
10956
10957 return I40E_SUCCESS;
10958 }
10959
10960 /*
10961 * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map
10962 * @veb: VEB to be configured
10963 * @tc_map: enabled TC bitmap
10964 *
10965 * Returns 0 on success, negative value on failure
10966 */
10967 static enum i40e_status_code
10968 i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
10969 {
10970 struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw;
10971 struct i40e_aqc_query_switching_comp_bw_config_resp bw_query;
10972 struct i40e_aqc_query_switching_comp_ets_config_resp ets_query;
10973 struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi);
10974 enum i40e_status_code ret = I40E_SUCCESS;
10975 int i;
10976 uint32_t bw_max;
10977
10978 /* Check if enabled_tc is same as existing or new TCs */
10979 if (veb->enabled_tc == tc_map)
10980 return ret;
10981
10982 /* configure tc bandwidth */
10983 memset(&veb_bw, 0, sizeof(veb_bw));
10984 veb_bw.tc_valid_bits = tc_map;
10985 /* Enable ETS TCs with equal BW Share for now across all VSIs */
10986 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
10987 if (tc_map & BIT_ULL(i))
10988 veb_bw.tc_bw_share_credits[i] = 1;
10989 }
10990 ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
10991 &veb_bw, NULL);
10992 if (ret) {
10993 PMD_INIT_LOG(ERR,
10994 "AQ command Config switch_comp BW allocation per TC failed = %d",
10995 hw->aq.asq_last_status);
10996 return ret;
10997 }
10998
10999 memset(&ets_query, 0, sizeof(ets_query));
11000 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
11001 &ets_query, NULL);
11002 if (ret != I40E_SUCCESS) {
11003 PMD_DRV_LOG(ERR,
11004 "Failed to get switch_comp ETS configuration %u",
11005 hw->aq.asq_last_status);
11006 return ret;
11007 }
11008 memset(&bw_query, 0, sizeof(bw_query));
11009 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
11010 &bw_query, NULL);
11011 if (ret != I40E_SUCCESS) {
11012 PMD_DRV_LOG(ERR,
11013 "Failed to get switch_comp bandwidth configuration %u",
11014 hw->aq.asq_last_status);
11015 return ret;
11016 }
11017
11018 /* store and print out BW info */
11019 veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit);
11020 veb->bw_info.bw_max = ets_query.tc_bw_max;
11021 PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit);
11022 PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max);
11023 bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) |
11024 (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) <<
11025 I40E_16_BIT_WIDTH);
11026 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11027 veb->bw_info.bw_ets_share_credits[i] =
11028 bw_query.tc_bw_share_credits[i];
11029 veb->bw_info.bw_ets_credits[i] =
11030 rte_le_to_cpu_16(bw_query.tc_bw_limits[i]);
11031 /* 4 bits per TC, 4th bit is reserved */
11032 veb->bw_info.bw_ets_max[i] =
11033 (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) &
11034 RTE_LEN2MASK(3, uint8_t));
11035 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i,
11036 veb->bw_info.bw_ets_share_credits[i]);
11037 PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i,
11038 veb->bw_info.bw_ets_credits[i]);
11039 PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i,
11040 veb->bw_info.bw_ets_max[i]);
11041 }
11042
11043 veb->enabled_tc = tc_map;
11044
11045 return ret;
11046 }
11047
11048
11049 /*
11050 * i40e_vsi_config_tc - Configure VSI tc setting for given TC map
11051 * @vsi: VSI to be configured
11052 * @tc_map: enabled TC bitmap
11053 *
11054 * Returns 0 on success, negative value on failure
11055 */
11056 static enum i40e_status_code
11057 i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
11058 {
11059 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
11060 struct i40e_vsi_context ctxt;
11061 struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
11062 enum i40e_status_code ret = I40E_SUCCESS;
11063 int i;
11064
11065 /* Check if enabled_tc is same as existing or new TCs */
11066 if (vsi->enabled_tc == tc_map)
11067 return ret;
11068
11069 /* configure tc bandwidth */
11070 memset(&bw_data, 0, sizeof(bw_data));
11071 bw_data.tc_valid_bits = tc_map;
11072 /* Enable ETS TCs with equal BW Share for now across all VSIs */
11073 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11074 if (tc_map & BIT_ULL(i))
11075 bw_data.tc_bw_credits[i] = 1;
11076 }
11077 ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
11078 if (ret) {
11079 PMD_INIT_LOG(ERR,
11080 "AQ command Config VSI BW allocation per TC failed = %d",
11081 hw->aq.asq_last_status);
11082 goto out;
11083 }
11084 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
11085 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
11086
11087 /* Update Queue Pairs Mapping for currently enabled UPs */
11088 ctxt.seid = vsi->seid;
11089 ctxt.pf_num = hw->pf_id;
11090 ctxt.vf_num = 0;
11091 ctxt.uplink_seid = vsi->uplink_seid;
11092 ctxt.info = vsi->info;
11093 i40e_get_cap(hw);
11094 ret = i40e_vsi_update_queue_mapping(vsi, &ctxt.info, tc_map);
11095 if (ret)
11096 goto out;
11097
11098 /* Update the VSI after updating the VSI queue-mapping information */
11099 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
11100 if (ret) {
11101 PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
11102 hw->aq.asq_last_status);
11103 goto out;
11104 }
11105 /* update the local VSI info with updated queue map */
11106 rte_memcpy(&vsi->info.tc_mapping, &ctxt.info.tc_mapping,
11107 sizeof(vsi->info.tc_mapping));
11108 rte_memcpy(&vsi->info.queue_mapping,
11109 &ctxt.info.queue_mapping,
11110 sizeof(vsi->info.queue_mapping));
11111 vsi->info.mapping_flags = ctxt.info.mapping_flags;
11112 vsi->info.valid_sections = 0;
11113
11114 /* query and update current VSI BW information */
11115 ret = i40e_vsi_get_bw_config(vsi);
11116 if (ret) {
11117 PMD_INIT_LOG(ERR,
11118 "Failed updating vsi bw info, err %s aq_err %s",
11119 i40e_stat_str(hw, ret),
11120 i40e_aq_str(hw, hw->aq.asq_last_status));
11121 goto out;
11122 }
11123
11124 vsi->enabled_tc = tc_map;
11125
11126 out:
11127 return ret;
11128 }
11129
11130 /*
11131 * i40e_dcb_hw_configure - program the dcb setting to hw
11132 * @pf: pf the configuration is taken on
11133 * @new_cfg: new configuration
11134 * @tc_map: enabled TC bitmap
11135 *
11136 * Returns 0 on success, negative value on failure
11137 */
11138 static enum i40e_status_code
11139 i40e_dcb_hw_configure(struct i40e_pf *pf,
11140 struct i40e_dcbx_config *new_cfg,
11141 uint8_t tc_map)
11142 {
11143 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11144 struct i40e_dcbx_config *old_cfg = &hw->local_dcbx_config;
11145 struct i40e_vsi *main_vsi = pf->main_vsi;
11146 struct i40e_vsi_list *vsi_list;
11147 enum i40e_status_code ret;
11148 int i;
11149 uint32_t val;
11150
11151 /* Use the FW API if FW > v4.4*/
11152 if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
11153 (hw->aq.fw_maj_ver >= 5))) {
11154 PMD_INIT_LOG(ERR,
11155 "FW < v4.4, can not use FW LLDP API to configure DCB");
11156 return I40E_ERR_FIRMWARE_API_VERSION;
11157 }
11158
11159 /* Check if need reconfiguration */
11160 if (!memcmp(new_cfg, old_cfg, sizeof(struct i40e_dcbx_config))) {
11161 PMD_INIT_LOG(ERR, "No Change in DCB Config required.");
11162 return I40E_SUCCESS;
11163 }
11164
11165 /* Copy the new config to the current config */
11166 *old_cfg = *new_cfg;
11167 old_cfg->etsrec = old_cfg->etscfg;
11168 ret = i40e_set_dcb_config(hw);
11169 if (ret) {
11170 PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
11171 i40e_stat_str(hw, ret),
11172 i40e_aq_str(hw, hw->aq.asq_last_status));
11173 return ret;
11174 }
11175 /* set receive Arbiter to RR mode and ETS scheme by default */
11176 for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
11177 val = I40E_READ_REG(hw, I40E_PRTDCB_RETSTCC(i));
11178 val &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
11179 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
11180 I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
11181 val |= ((uint32_t)old_cfg->etscfg.tcbwtable[i] <<
11182 I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
11183 I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
11184 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
11185 I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
11186 val |= ((uint32_t)1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
11187 I40E_PRTDCB_RETSTCC_ETSTC_MASK;
11188 I40E_WRITE_REG(hw, I40E_PRTDCB_RETSTCC(i), val);
11189 }
11190 /* get local mib to check whether it is configured correctly */
11191 /* IEEE mode */
11192 hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
11193 /* Get Local DCB Config */
11194 i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
11195 &hw->local_dcbx_config);
11196
11197 /* if Veb is created, need to update TC of it at first */
11198 if (main_vsi->veb) {
11199 ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
11200 if (ret)
11201 PMD_INIT_LOG(WARNING,
11202 "Failed configuring TC for VEB seid=%d",
11203 main_vsi->veb->seid);
11204 }
11205 /* Update each VSI */
11206 i40e_vsi_config_tc(main_vsi, tc_map);
11207 if (main_vsi->veb) {
11208 TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) {
11209 /* Beside main VSI and VMDQ VSIs, only enable default
11210 * TC for other VSIs
11211 */
11212 if (vsi_list->vsi->type == I40E_VSI_VMDQ2)
11213 ret = i40e_vsi_config_tc(vsi_list->vsi,
11214 tc_map);
11215 else
11216 ret = i40e_vsi_config_tc(vsi_list->vsi,
11217 I40E_DEFAULT_TCMAP);
11218 if (ret)
11219 PMD_INIT_LOG(WARNING,
11220 "Failed configuring TC for VSI seid=%d",
11221 vsi_list->vsi->seid);
11222 /* continue */
11223 }
11224 }
11225 return I40E_SUCCESS;
11226 }
11227
11228 /*
11229 * i40e_dcb_init_configure - initial dcb config
11230 * @dev: device being configured
11231 * @sw_dcb: indicate whether dcb is sw configured or hw offload
11232 *
11233 * Returns 0 on success, negative value on failure
11234 */
11235 int
11236 i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
11237 {
11238 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11239 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11240 int i, ret = 0;
11241
11242 if ((pf->flags & I40E_FLAG_DCB) == 0) {
11243 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11244 return -ENOTSUP;
11245 }
11246
11247 /* DCB initialization:
11248 * Update DCB configuration from the Firmware and configure
11249 * LLDP MIB change event.
11250 */
11251 if (sw_dcb == TRUE) {
11252 ret = i40e_init_dcb(hw);
11253 /* If lldp agent is stopped, the return value from
11254 * i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
11255 * adminq status. Otherwise, it should return success.
11256 */
11257 if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
11258 hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
11259 memset(&hw->local_dcbx_config, 0,
11260 sizeof(struct i40e_dcbx_config));
11261 /* set dcb default configuration */
11262 hw->local_dcbx_config.etscfg.willing = 0;
11263 hw->local_dcbx_config.etscfg.maxtcs = 0;
11264 hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
11265 hw->local_dcbx_config.etscfg.tsatable[0] =
11266 I40E_IEEE_TSA_ETS;
11267 /* all UPs mapping to TC0 */
11268 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11269 hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
11270 hw->local_dcbx_config.etsrec =
11271 hw->local_dcbx_config.etscfg;
11272 hw->local_dcbx_config.pfc.willing = 0;
11273 hw->local_dcbx_config.pfc.pfccap =
11274 I40E_MAX_TRAFFIC_CLASS;
11275 /* FW needs one App to configure HW */
11276 hw->local_dcbx_config.numapps = 1;
11277 hw->local_dcbx_config.app[0].selector =
11278 I40E_APP_SEL_ETHTYPE;
11279 hw->local_dcbx_config.app[0].priority = 3;
11280 hw->local_dcbx_config.app[0].protocolid =
11281 I40E_APP_PROTOID_FCOE;
11282 ret = i40e_set_dcb_config(hw);
11283 if (ret) {
11284 PMD_INIT_LOG(ERR,
11285 "default dcb config fails. err = %d, aq_err = %d.",
11286 ret, hw->aq.asq_last_status);
11287 return -ENOSYS;
11288 }
11289 } else {
11290 PMD_INIT_LOG(ERR,
11291 "DCB initialization in FW fails, err = %d, aq_err = %d.",
11292 ret, hw->aq.asq_last_status);
11293 return -ENOTSUP;
11294 }
11295 } else {
11296 ret = i40e_aq_start_lldp(hw, NULL);
11297 if (ret != I40E_SUCCESS)
11298 PMD_INIT_LOG(DEBUG, "Failed to start lldp");
11299
11300 ret = i40e_init_dcb(hw);
11301 if (!ret) {
11302 if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
11303 PMD_INIT_LOG(ERR,
11304 "HW doesn't support DCBX offload.");
11305 return -ENOTSUP;
11306 }
11307 } else {
11308 PMD_INIT_LOG(ERR,
11309 "DCBX configuration failed, err = %d, aq_err = %d.",
11310 ret, hw->aq.asq_last_status);
11311 return -ENOTSUP;
11312 }
11313 }
11314 return 0;
11315 }
11316
11317 /*
11318 * i40e_dcb_setup - setup dcb related config
11319 * @dev: device being configured
11320 *
11321 * Returns 0 on success, negative value on failure
11322 */
11323 static int
11324 i40e_dcb_setup(struct rte_eth_dev *dev)
11325 {
11326 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11327 struct i40e_dcbx_config dcb_cfg;
11328 uint8_t tc_map = 0;
11329 int ret = 0;
11330
11331 if ((pf->flags & I40E_FLAG_DCB) == 0) {
11332 PMD_INIT_LOG(ERR, "HW doesn't support DCB");
11333 return -ENOTSUP;
11334 }
11335
11336 if (pf->vf_num != 0)
11337 PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis.");
11338
11339 ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map);
11340 if (ret) {
11341 PMD_INIT_LOG(ERR, "invalid dcb config");
11342 return -EINVAL;
11343 }
11344 ret = i40e_dcb_hw_configure(pf, &dcb_cfg, tc_map);
11345 if (ret) {
11346 PMD_INIT_LOG(ERR, "dcb sw configure fails");
11347 return -ENOSYS;
11348 }
11349
11350 return 0;
11351 }
11352
11353 static int
11354 i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
11355 struct rte_eth_dcb_info *dcb_info)
11356 {
11357 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11358 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11359 struct i40e_vsi *vsi = pf->main_vsi;
11360 struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config;
11361 uint16_t bsf, tc_mapping;
11362 int i, j = 0;
11363
11364 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
11365 dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1);
11366 else
11367 dcb_info->nb_tcs = 1;
11368 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
11369 dcb_info->prio_tc[i] = dcb_cfg->etscfg.prioritytable[i];
11370 for (i = 0; i < dcb_info->nb_tcs; i++)
11371 dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i];
11372
11373 /* get queue mapping if vmdq is disabled */
11374 if (!pf->nb_cfg_vmdq_vsi) {
11375 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11376 if (!(vsi->enabled_tc & (1 << i)))
11377 continue;
11378 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11379 dcb_info->tc_queue.tc_rxq[j][i].base =
11380 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11381 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11382 dcb_info->tc_queue.tc_txq[j][i].base =
11383 dcb_info->tc_queue.tc_rxq[j][i].base;
11384 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11385 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11386 dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11387 dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11388 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11389 }
11390 return 0;
11391 }
11392
11393 /* get queue mapping if vmdq is enabled */
11394 do {
11395 vsi = pf->vmdq[j].vsi;
11396 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
11397 if (!(vsi->enabled_tc & (1 << i)))
11398 continue;
11399 tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]);
11400 dcb_info->tc_queue.tc_rxq[j][i].base =
11401 (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
11402 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT;
11403 dcb_info->tc_queue.tc_txq[j][i].base =
11404 dcb_info->tc_queue.tc_rxq[j][i].base;
11405 bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
11406 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT;
11407 dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf;
11408 dcb_info->tc_queue.tc_txq[j][i].nb_queue =
11409 dcb_info->tc_queue.tc_rxq[j][i].nb_queue;
11410 }
11411 j++;
11412 } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL));
11413 return 0;
11414 }
11415
11416 static int
11417 i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
11418 {
11419 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11420 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11421 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11422 uint16_t msix_intr;
11423
11424 msix_intr = intr_handle->intr_vec[queue_id];
11425 if (msix_intr == I40E_MISC_VEC_ID)
11426 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11427 I40E_PFINT_DYN_CTL0_INTENA_MASK |
11428 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
11429 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11430 else
11431 I40E_WRITE_REG(hw,
11432 I40E_PFINT_DYN_CTLN(msix_intr -
11433 I40E_RX_VEC_START),
11434 I40E_PFINT_DYN_CTLN_INTENA_MASK |
11435 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
11436 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11437
11438 I40E_WRITE_FLUSH(hw);
11439 rte_intr_enable(&pci_dev->intr_handle);
11440
11441 return 0;
11442 }
11443
11444 static int
11445 i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
11446 {
11447 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
11448 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
11449 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11450 uint16_t msix_intr;
11451
11452 msix_intr = intr_handle->intr_vec[queue_id];
11453 if (msix_intr == I40E_MISC_VEC_ID)
11454 I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0,
11455 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK);
11456 else
11457 I40E_WRITE_REG(hw,
11458 I40E_PFINT_DYN_CTLN(msix_intr -
11459 I40E_RX_VEC_START),
11460 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
11461 I40E_WRITE_FLUSH(hw);
11462
11463 return 0;
11464 }
11465
11466 static int i40e_get_regs(struct rte_eth_dev *dev,
11467 struct rte_dev_reg_info *regs)
11468 {
11469 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11470 uint32_t *ptr_data = regs->data;
11471 uint32_t reg_idx, arr_idx, arr_idx2, reg_offset;
11472 const struct i40e_reg_info *reg_info;
11473
11474 if (ptr_data == NULL) {
11475 regs->length = I40E_GLGEN_STAT_CLEAR + 4;
11476 regs->width = sizeof(uint32_t);
11477 return 0;
11478 }
11479
11480 /* The first few registers have to be read using AQ operations */
11481 reg_idx = 0;
11482 while (i40e_regs_adminq[reg_idx].name) {
11483 reg_info = &i40e_regs_adminq[reg_idx++];
11484 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11485 for (arr_idx2 = 0;
11486 arr_idx2 <= reg_info->count2;
11487 arr_idx2++) {
11488 reg_offset = arr_idx * reg_info->stride1 +
11489 arr_idx2 * reg_info->stride2;
11490 reg_offset += reg_info->base_addr;
11491 ptr_data[reg_offset >> 2] =
11492 i40e_read_rx_ctl(hw, reg_offset);
11493 }
11494 }
11495
11496 /* The remaining registers can be read using primitives */
11497 reg_idx = 0;
11498 while (i40e_regs_others[reg_idx].name) {
11499 reg_info = &i40e_regs_others[reg_idx++];
11500 for (arr_idx = 0; arr_idx <= reg_info->count1; arr_idx++)
11501 for (arr_idx2 = 0;
11502 arr_idx2 <= reg_info->count2;
11503 arr_idx2++) {
11504 reg_offset = arr_idx * reg_info->stride1 +
11505 arr_idx2 * reg_info->stride2;
11506 reg_offset += reg_info->base_addr;
11507 ptr_data[reg_offset >> 2] =
11508 I40E_READ_REG(hw, reg_offset);
11509 }
11510 }
11511
11512 return 0;
11513 }
11514
11515 static int i40e_get_eeprom_length(struct rte_eth_dev *dev)
11516 {
11517 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11518
11519 /* Convert word count to byte count */
11520 return hw->nvm.sr_size << 1;
11521 }
11522
11523 static int i40e_get_eeprom(struct rte_eth_dev *dev,
11524 struct rte_dev_eeprom_info *eeprom)
11525 {
11526 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11527 uint16_t *data = eeprom->data;
11528 uint16_t offset, length, cnt_words;
11529 int ret_code;
11530
11531 offset = eeprom->offset >> 1;
11532 length = eeprom->length >> 1;
11533 cnt_words = length;
11534
11535 if (offset > hw->nvm.sr_size ||
11536 offset + length > hw->nvm.sr_size) {
11537 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
11538 return -EINVAL;
11539 }
11540
11541 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
11542
11543 ret_code = i40e_read_nvm_buffer(hw, offset, &cnt_words, data);
11544 if (ret_code != I40E_SUCCESS || cnt_words != length) {
11545 PMD_DRV_LOG(ERR, "EEPROM read failed.");
11546 return -EIO;
11547 }
11548
11549 return 0;
11550 }
11551
11552 static int i40e_get_module_info(struct rte_eth_dev *dev,
11553 struct rte_eth_dev_module_info *modinfo)
11554 {
11555 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11556 uint32_t sff8472_comp = 0;
11557 uint32_t sff8472_swap = 0;
11558 uint32_t sff8636_rev = 0;
11559 i40e_status status;
11560 uint32_t type = 0;
11561
11562 /* Check if firmware supports reading module EEPROM. */
11563 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
11564 PMD_DRV_LOG(ERR,
11565 "Module EEPROM memory read not supported. "
11566 "Please update the NVM image.\n");
11567 return -EINVAL;
11568 }
11569
11570 status = i40e_update_link_info(hw);
11571 if (status)
11572 return -EIO;
11573
11574 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
11575 PMD_DRV_LOG(ERR,
11576 "Cannot read module EEPROM memory. "
11577 "No module connected.\n");
11578 return -EINVAL;
11579 }
11580
11581 type = hw->phy.link_info.module_type[0];
11582
11583 switch (type) {
11584 case I40E_MODULE_TYPE_SFP:
11585 status = i40e_aq_get_phy_register(hw,
11586 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11587 I40E_I2C_EEPROM_DEV_ADDR,
11588 I40E_MODULE_SFF_8472_COMP,
11589 &sff8472_comp, NULL);
11590 if (status)
11591 return -EIO;
11592
11593 status = i40e_aq_get_phy_register(hw,
11594 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11595 I40E_I2C_EEPROM_DEV_ADDR,
11596 I40E_MODULE_SFF_8472_SWAP,
11597 &sff8472_swap, NULL);
11598 if (status)
11599 return -EIO;
11600
11601 /* Check if the module requires address swap to access
11602 * the other EEPROM memory page.
11603 */
11604 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
11605 PMD_DRV_LOG(WARNING,
11606 "Module address swap to access "
11607 "page 0xA2 is not supported.\n");
11608 modinfo->type = RTE_ETH_MODULE_SFF_8079;
11609 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11610 } else if (sff8472_comp == 0x00) {
11611 /* Module is not SFF-8472 compliant */
11612 modinfo->type = RTE_ETH_MODULE_SFF_8079;
11613 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
11614 } else {
11615 modinfo->type = RTE_ETH_MODULE_SFF_8472;
11616 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
11617 }
11618 break;
11619 case I40E_MODULE_TYPE_QSFP_PLUS:
11620 /* Read from memory page 0. */
11621 status = i40e_aq_get_phy_register(hw,
11622 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11623 0,
11624 I40E_MODULE_REVISION_ADDR,
11625 &sff8636_rev, NULL);
11626 if (status)
11627 return -EIO;
11628 /* Determine revision compliance byte */
11629 if (sff8636_rev > 0x02) {
11630 /* Module is SFF-8636 compliant */
11631 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11632 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11633 } else {
11634 modinfo->type = RTE_ETH_MODULE_SFF_8436;
11635 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11636 }
11637 break;
11638 case I40E_MODULE_TYPE_QSFP28:
11639 modinfo->type = RTE_ETH_MODULE_SFF_8636;
11640 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
11641 break;
11642 default:
11643 PMD_DRV_LOG(ERR, "Module type unrecognized\n");
11644 return -EINVAL;
11645 }
11646 return 0;
11647 }
11648
11649 static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
11650 struct rte_dev_eeprom_info *info)
11651 {
11652 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11653 bool is_sfp = false;
11654 i40e_status status;
11655 uint8_t *data = info->data;
11656 uint32_t value = 0;
11657 uint32_t i;
11658
11659 if (!info || !info->length || !data)
11660 return -EINVAL;
11661
11662 if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
11663 is_sfp = true;
11664
11665 for (i = 0; i < info->length; i++) {
11666 u32 offset = i + info->offset;
11667 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
11668
11669 /* Check if we need to access the other memory page */
11670 if (is_sfp) {
11671 if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
11672 offset -= RTE_ETH_MODULE_SFF_8079_LEN;
11673 addr = I40E_I2C_EEPROM_DEV_ADDR2;
11674 }
11675 } else {
11676 while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
11677 /* Compute memory page number and offset. */
11678 offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
11679 addr++;
11680 }
11681 }
11682 status = i40e_aq_get_phy_register(hw,
11683 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
11684 addr, offset, &value, NULL);
11685 if (status)
11686 return -EIO;
11687 data[i] = (uint8_t)value;
11688 }
11689 return 0;
11690 }
11691
11692 static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
11693 struct ether_addr *mac_addr)
11694 {
11695 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11696 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11697 struct i40e_vsi *vsi = pf->main_vsi;
11698 struct i40e_mac_filter_info mac_filter;
11699 struct i40e_mac_filter *f;
11700 int ret;
11701
11702 if (!is_valid_assigned_ether_addr(mac_addr)) {
11703 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
11704 return -EINVAL;
11705 }
11706
11707 TAILQ_FOREACH(f, &vsi->mac_list, next) {
11708 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
11709 break;
11710 }
11711
11712 if (f == NULL) {
11713 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
11714 return -EIO;
11715 }
11716
11717 mac_filter = f->mac_info;
11718 ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
11719 if (ret != I40E_SUCCESS) {
11720 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
11721 return -EIO;
11722 }
11723 memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
11724 ret = i40e_vsi_add_mac(vsi, &mac_filter);
11725 if (ret != I40E_SUCCESS) {
11726 PMD_DRV_LOG(ERR, "Failed to add mac filter");
11727 return -EIO;
11728 }
11729 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
11730
11731 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
11732 mac_addr->addr_bytes, NULL);
11733 if (ret != I40E_SUCCESS) {
11734 PMD_DRV_LOG(ERR, "Failed to change mac");
11735 return -EIO;
11736 }
11737
11738 return 0;
11739 }
11740
11741 static int
11742 i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
11743 {
11744 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11745 struct rte_eth_dev_data *dev_data = pf->dev_data;
11746 uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
11747 int ret = 0;
11748
11749 /* check if mtu is within the allowed range */
11750 if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
11751 return -EINVAL;
11752
11753 /* mtu setting is forbidden if port is start */
11754 if (dev_data->dev_started) {
11755 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
11756 dev_data->port_id);
11757 return -EBUSY;
11758 }
11759
11760 if (frame_size > ETHER_MAX_LEN)
11761 dev_data->dev_conf.rxmode.offloads |=
11762 DEV_RX_OFFLOAD_JUMBO_FRAME;
11763 else
11764 dev_data->dev_conf.rxmode.offloads &=
11765 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
11766
11767 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
11768
11769 return ret;
11770 }
11771
11772 /* Restore ethertype filter */
11773 static void
11774 i40e_ethertype_filter_restore(struct i40e_pf *pf)
11775 {
11776 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11777 struct i40e_ethertype_filter_list
11778 *ethertype_list = &pf->ethertype.ethertype_list;
11779 struct i40e_ethertype_filter *f;
11780 struct i40e_control_filter_stats stats;
11781 uint16_t flags;
11782
11783 TAILQ_FOREACH(f, ethertype_list, rules) {
11784 flags = 0;
11785 if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
11786 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
11787 if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
11788 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
11789 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
11790
11791 memset(&stats, 0, sizeof(stats));
11792 i40e_aq_add_rem_control_packet_filter(hw,
11793 f->input.mac_addr.addr_bytes,
11794 f->input.ether_type,
11795 flags, pf->main_vsi->seid,
11796 f->queue, 1, &stats, NULL);
11797 }
11798 PMD_DRV_LOG(INFO, "Ethertype filter:"
11799 " mac_etype_used = %u, etype_used = %u,"
11800 " mac_etype_free = %u, etype_free = %u",
11801 stats.mac_etype_used, stats.etype_used,
11802 stats.mac_etype_free, stats.etype_free);
11803 }
11804
11805 /* Restore tunnel filter */
11806 static void
11807 i40e_tunnel_filter_restore(struct i40e_pf *pf)
11808 {
11809 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
11810 struct i40e_vsi *vsi;
11811 struct i40e_pf_vf *vf;
11812 struct i40e_tunnel_filter_list
11813 *tunnel_list = &pf->tunnel.tunnel_list;
11814 struct i40e_tunnel_filter *f;
11815 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
11816 bool big_buffer = 0;
11817
11818 TAILQ_FOREACH(f, tunnel_list, rules) {
11819 if (!f->is_to_vf)
11820 vsi = pf->main_vsi;
11821 else {
11822 vf = &pf->vfs[f->vf_id];
11823 vsi = vf->vsi;
11824 }
11825 memset(&cld_filter, 0, sizeof(cld_filter));
11826 ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
11827 (struct ether_addr *)&cld_filter.element.outer_mac);
11828 ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
11829 (struct ether_addr *)&cld_filter.element.inner_mac);
11830 cld_filter.element.inner_vlan = f->input.inner_vlan;
11831 cld_filter.element.flags = f->input.flags;
11832 cld_filter.element.tenant_id = f->input.tenant_id;
11833 cld_filter.element.queue_number = f->queue;
11834 rte_memcpy(cld_filter.general_fields,
11835 f->input.general_fields,
11836 sizeof(f->input.general_fields));
11837
11838 if (((f->input.flags &
11839 I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
11840 I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
11841 ((f->input.flags &
11842 I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
11843 I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
11844 ((f->input.flags &
11845 I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
11846 I40E_AQC_ADD_CLOUD_FILTER_0X10))
11847 big_buffer = 1;
11848
11849 if (big_buffer)
11850 i40e_aq_add_cloud_filters_big_buffer(hw,
11851 vsi->seid, &cld_filter, 1);
11852 else
11853 i40e_aq_add_cloud_filters(hw, vsi->seid,
11854 &cld_filter.element, 1);
11855 }
11856 }
11857
11858 /* Restore rss filter */
11859 static inline void
11860 i40e_rss_filter_restore(struct i40e_pf *pf)
11861 {
11862 struct i40e_rte_flow_rss_conf *conf =
11863 &pf->rss_info;
11864 if (conf->conf.queue_num)
11865 i40e_config_rss_filter(pf, conf, TRUE);
11866 }
11867
11868 static void
11869 i40e_filter_restore(struct i40e_pf *pf)
11870 {
11871 i40e_ethertype_filter_restore(pf);
11872 i40e_tunnel_filter_restore(pf);
11873 i40e_fdir_filter_restore(pf);
11874 i40e_rss_filter_restore(pf);
11875 }
11876
11877 static bool
11878 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
11879 {
11880 if (strcmp(dev->device->driver->name, drv->driver.name))
11881 return false;
11882
11883 return true;
11884 }
11885
11886 bool
11887 is_i40e_supported(struct rte_eth_dev *dev)
11888 {
11889 return is_device_supported(dev, &rte_i40e_pmd);
11890 }
11891
11892 struct i40e_customized_pctype*
11893 i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
11894 {
11895 int i;
11896
11897 for (i = 0; i < I40E_CUSTOMIZED_MAX; i++) {
11898 if (pf->customized_pctype[i].index == index)
11899 return &pf->customized_pctype[i];
11900 }
11901 return NULL;
11902 }
11903
11904 static int
11905 i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
11906 uint32_t pkg_size, uint32_t proto_num,
11907 struct rte_pmd_i40e_proto_info *proto,
11908 enum rte_pmd_i40e_package_op op)
11909 {
11910 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
11911 uint32_t pctype_num;
11912 struct rte_pmd_i40e_ptype_info *pctype;
11913 uint32_t buff_size;
11914 struct i40e_customized_pctype *new_pctype = NULL;
11915 uint8_t proto_id;
11916 uint8_t pctype_value;
11917 char name[64];
11918 uint32_t i, j, n;
11919 int ret;
11920
11921 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
11922 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
11923 PMD_DRV_LOG(ERR, "Unsupported operation.");
11924 return -1;
11925 }
11926
11927 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11928 (uint8_t *)&pctype_num, sizeof(pctype_num),
11929 RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
11930 if (ret) {
11931 PMD_DRV_LOG(ERR, "Failed to get pctype number");
11932 return -1;
11933 }
11934 if (!pctype_num) {
11935 PMD_DRV_LOG(INFO, "No new pctype added");
11936 return -1;
11937 }
11938
11939 buff_size = pctype_num * sizeof(struct rte_pmd_i40e_proto_info);
11940 pctype = rte_zmalloc("new_pctype", buff_size, 0);
11941 if (!pctype) {
11942 PMD_DRV_LOG(ERR, "Failed to allocate memory");
11943 return -1;
11944 }
11945 /* get information about new pctype list */
11946 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
11947 (uint8_t *)pctype, buff_size,
11948 RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST);
11949 if (ret) {
11950 PMD_DRV_LOG(ERR, "Failed to get pctype list");
11951 rte_free(pctype);
11952 return -1;
11953 }
11954
11955 /* Update customized pctype. */
11956 for (i = 0; i < pctype_num; i++) {
11957 pctype_value = pctype[i].ptype_id;
11958 memset(name, 0, sizeof(name));
11959 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
11960 proto_id = pctype[i].protocols[j];
11961 if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
11962 continue;
11963 for (n = 0; n < proto_num; n++) {
11964 if (proto[n].proto_id != proto_id)
11965 continue;
11966 strcat(name, proto[n].name);
11967 strcat(name, "_");
11968 break;
11969 }
11970 }
11971 name[strlen(name) - 1] = '\0';
11972 if (!strcmp(name, "GTPC"))
11973 new_pctype =
11974 i40e_find_customized_pctype(pf,
11975 I40E_CUSTOMIZED_GTPC);
11976 else if (!strcmp(name, "GTPU_IPV4"))
11977 new_pctype =
11978 i40e_find_customized_pctype(pf,
11979 I40E_CUSTOMIZED_GTPU_IPV4);
11980 else if (!strcmp(name, "GTPU_IPV6"))
11981 new_pctype =
11982 i40e_find_customized_pctype(pf,
11983 I40E_CUSTOMIZED_GTPU_IPV6);
11984 else if (!strcmp(name, "GTPU"))
11985 new_pctype =
11986 i40e_find_customized_pctype(pf,
11987 I40E_CUSTOMIZED_GTPU);
11988 if (new_pctype) {
11989 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
11990 new_pctype->pctype = pctype_value;
11991 new_pctype->valid = true;
11992 } else {
11993 new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
11994 new_pctype->valid = false;
11995 }
11996 }
11997 }
11998
11999 rte_free(pctype);
12000 return 0;
12001 }
12002
12003 static int
12004 i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
12005 uint32_t pkg_size, uint32_t proto_num,
12006 struct rte_pmd_i40e_proto_info *proto,
12007 enum rte_pmd_i40e_package_op op)
12008 {
12009 struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
12010 uint16_t port_id = dev->data->port_id;
12011 uint32_t ptype_num;
12012 struct rte_pmd_i40e_ptype_info *ptype;
12013 uint32_t buff_size;
12014 uint8_t proto_id;
12015 char name[RTE_PMD_I40E_DDP_NAME_SIZE];
12016 uint32_t i, j, n;
12017 bool in_tunnel;
12018 int ret;
12019
12020 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12021 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12022 PMD_DRV_LOG(ERR, "Unsupported operation.");
12023 return -1;
12024 }
12025
12026 if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
12027 rte_pmd_i40e_ptype_mapping_reset(port_id);
12028 return 0;
12029 }
12030
12031 /* get information about new ptype num */
12032 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12033 (uint8_t *)&ptype_num, sizeof(ptype_num),
12034 RTE_PMD_I40E_PKG_INFO_PTYPE_NUM);
12035 if (ret) {
12036 PMD_DRV_LOG(ERR, "Failed to get ptype number");
12037 return ret;
12038 }
12039 if (!ptype_num) {
12040 PMD_DRV_LOG(INFO, "No new ptype added");
12041 return -1;
12042 }
12043
12044 buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info);
12045 ptype = rte_zmalloc("new_ptype", buff_size, 0);
12046 if (!ptype) {
12047 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12048 return -1;
12049 }
12050
12051 /* get information about new ptype list */
12052 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12053 (uint8_t *)ptype, buff_size,
12054 RTE_PMD_I40E_PKG_INFO_PTYPE_LIST);
12055 if (ret) {
12056 PMD_DRV_LOG(ERR, "Failed to get ptype list");
12057 rte_free(ptype);
12058 return ret;
12059 }
12060
12061 buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_mapping);
12062 ptype_mapping = rte_zmalloc("ptype_mapping", buff_size, 0);
12063 if (!ptype_mapping) {
12064 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12065 rte_free(ptype);
12066 return -1;
12067 }
12068
12069 /* Update ptype mapping table. */
12070 for (i = 0; i < ptype_num; i++) {
12071 ptype_mapping[i].hw_ptype = ptype[i].ptype_id;
12072 ptype_mapping[i].sw_ptype = 0;
12073 in_tunnel = false;
12074 for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) {
12075 proto_id = ptype[i].protocols[j];
12076 if (proto_id == RTE_PMD_I40E_PROTO_UNUSED)
12077 continue;
12078 for (n = 0; n < proto_num; n++) {
12079 if (proto[n].proto_id != proto_id)
12080 continue;
12081 memset(name, 0, sizeof(name));
12082 strcpy(name, proto[n].name);
12083 if (!strncasecmp(name, "PPPOE", 5))
12084 ptype_mapping[i].sw_ptype |=
12085 RTE_PTYPE_L2_ETHER_PPPOE;
12086 else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12087 !in_tunnel) {
12088 ptype_mapping[i].sw_ptype |=
12089 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12090 ptype_mapping[i].sw_ptype |=
12091 RTE_PTYPE_L4_FRAG;
12092 } else if (!strncasecmp(name, "IPV4FRAG", 8) &&
12093 in_tunnel) {
12094 ptype_mapping[i].sw_ptype |=
12095 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12096 ptype_mapping[i].sw_ptype |=
12097 RTE_PTYPE_INNER_L4_FRAG;
12098 } else if (!strncasecmp(name, "OIPV4", 5)) {
12099 ptype_mapping[i].sw_ptype |=
12100 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12101 in_tunnel = true;
12102 } else if (!strncasecmp(name, "IPV4", 4) &&
12103 !in_tunnel)
12104 ptype_mapping[i].sw_ptype |=
12105 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
12106 else if (!strncasecmp(name, "IPV4", 4) &&
12107 in_tunnel)
12108 ptype_mapping[i].sw_ptype |=
12109 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
12110 else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12111 !in_tunnel) {
12112 ptype_mapping[i].sw_ptype |=
12113 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12114 ptype_mapping[i].sw_ptype |=
12115 RTE_PTYPE_L4_FRAG;
12116 } else if (!strncasecmp(name, "IPV6FRAG", 8) &&
12117 in_tunnel) {
12118 ptype_mapping[i].sw_ptype |=
12119 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12120 ptype_mapping[i].sw_ptype |=
12121 RTE_PTYPE_INNER_L4_FRAG;
12122 } else if (!strncasecmp(name, "OIPV6", 5)) {
12123 ptype_mapping[i].sw_ptype |=
12124 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12125 in_tunnel = true;
12126 } else if (!strncasecmp(name, "IPV6", 4) &&
12127 !in_tunnel)
12128 ptype_mapping[i].sw_ptype |=
12129 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
12130 else if (!strncasecmp(name, "IPV6", 4) &&
12131 in_tunnel)
12132 ptype_mapping[i].sw_ptype |=
12133 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
12134 else if (!strncasecmp(name, "UDP", 3) &&
12135 !in_tunnel)
12136 ptype_mapping[i].sw_ptype |=
12137 RTE_PTYPE_L4_UDP;
12138 else if (!strncasecmp(name, "UDP", 3) &&
12139 in_tunnel)
12140 ptype_mapping[i].sw_ptype |=
12141 RTE_PTYPE_INNER_L4_UDP;
12142 else if (!strncasecmp(name, "TCP", 3) &&
12143 !in_tunnel)
12144 ptype_mapping[i].sw_ptype |=
12145 RTE_PTYPE_L4_TCP;
12146 else if (!strncasecmp(name, "TCP", 3) &&
12147 in_tunnel)
12148 ptype_mapping[i].sw_ptype |=
12149 RTE_PTYPE_INNER_L4_TCP;
12150 else if (!strncasecmp(name, "SCTP", 4) &&
12151 !in_tunnel)
12152 ptype_mapping[i].sw_ptype |=
12153 RTE_PTYPE_L4_SCTP;
12154 else if (!strncasecmp(name, "SCTP", 4) &&
12155 in_tunnel)
12156 ptype_mapping[i].sw_ptype |=
12157 RTE_PTYPE_INNER_L4_SCTP;
12158 else if ((!strncasecmp(name, "ICMP", 4) ||
12159 !strncasecmp(name, "ICMPV6", 6)) &&
12160 !in_tunnel)
12161 ptype_mapping[i].sw_ptype |=
12162 RTE_PTYPE_L4_ICMP;
12163 else if ((!strncasecmp(name, "ICMP", 4) ||
12164 !strncasecmp(name, "ICMPV6", 6)) &&
12165 in_tunnel)
12166 ptype_mapping[i].sw_ptype |=
12167 RTE_PTYPE_INNER_L4_ICMP;
12168 else if (!strncasecmp(name, "GTPC", 4)) {
12169 ptype_mapping[i].sw_ptype |=
12170 RTE_PTYPE_TUNNEL_GTPC;
12171 in_tunnel = true;
12172 } else if (!strncasecmp(name, "GTPU", 4)) {
12173 ptype_mapping[i].sw_ptype |=
12174 RTE_PTYPE_TUNNEL_GTPU;
12175 in_tunnel = true;
12176 } else if (!strncasecmp(name, "GRENAT", 6)) {
12177 ptype_mapping[i].sw_ptype |=
12178 RTE_PTYPE_TUNNEL_GRENAT;
12179 in_tunnel = true;
12180 } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
12181 !strncasecmp(name, "L2TPV2", 6)) {
12182 ptype_mapping[i].sw_ptype |=
12183 RTE_PTYPE_TUNNEL_L2TP;
12184 in_tunnel = true;
12185 }
12186
12187 break;
12188 }
12189 }
12190 }
12191
12192 ret = rte_pmd_i40e_ptype_mapping_update(port_id, ptype_mapping,
12193 ptype_num, 0);
12194 if (ret)
12195 PMD_DRV_LOG(ERR, "Failed to update mapping table.");
12196
12197 rte_free(ptype_mapping);
12198 rte_free(ptype);
12199 return ret;
12200 }
12201
12202 void
12203 i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
12204 uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
12205 {
12206 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
12207 uint32_t proto_num;
12208 struct rte_pmd_i40e_proto_info *proto;
12209 uint32_t buff_size;
12210 uint32_t i;
12211 int ret;
12212
12213 if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
12214 op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
12215 PMD_DRV_LOG(ERR, "Unsupported operation.");
12216 return;
12217 }
12218
12219 /* get information about protocol number */
12220 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12221 (uint8_t *)&proto_num, sizeof(proto_num),
12222 RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM);
12223 if (ret) {
12224 PMD_DRV_LOG(ERR, "Failed to get protocol number");
12225 return;
12226 }
12227 if (!proto_num) {
12228 PMD_DRV_LOG(INFO, "No new protocol added");
12229 return;
12230 }
12231
12232 buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info);
12233 proto = rte_zmalloc("new_proto", buff_size, 0);
12234 if (!proto) {
12235 PMD_DRV_LOG(ERR, "Failed to allocate memory");
12236 return;
12237 }
12238
12239 /* get information about protocol list */
12240 ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
12241 (uint8_t *)proto, buff_size,
12242 RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST);
12243 if (ret) {
12244 PMD_DRV_LOG(ERR, "Failed to get protocol list");
12245 rte_free(proto);
12246 return;
12247 }
12248
12249 /* Check if GTP is supported. */
12250 for (i = 0; i < proto_num; i++) {
12251 if (!strncmp(proto[i].name, "GTP", 3)) {
12252 if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
12253 pf->gtp_support = true;
12254 else
12255 pf->gtp_support = false;
12256 break;
12257 }
12258 }
12259
12260 /* Update customized pctype info */
12261 ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
12262 proto_num, proto, op);
12263 if (ret)
12264 PMD_DRV_LOG(INFO, "No pctype is updated.");
12265
12266 /* Update customized ptype info */
12267 ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
12268 proto_num, proto, op);
12269 if (ret)
12270 PMD_DRV_LOG(INFO, "No ptype is updated.");
12271
12272 rte_free(proto);
12273 }
12274
12275 /* Create a QinQ cloud filter
12276 *
12277 * The Fortville NIC has limited resources for tunnel filters,
12278 * so we can only reuse existing filters.
12279 *
12280 * In step 1 we define which Field Vector fields can be used for
12281 * filter types.
12282 * As we do not have the inner tag defined as a field,
12283 * we have to define it first, by reusing one of L1 entries.
12284 *
12285 * In step 2 we are replacing one of existing filter types with
12286 * a new one for QinQ.
12287 * As we reusing L1 and replacing L2, some of the default filter
12288 * types will disappear,which depends on L1 and L2 entries we reuse.
12289 *
12290 * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
12291 *
12292 * 1. Create L1 filter of outer vlan (12b) which will be in use
12293 * later when we define the cloud filter.
12294 * a. Valid_flags.replace_cloud = 0
12295 * b. Old_filter = 10 (Stag_Inner_Vlan)
12296 * c. New_filter = 0x10
12297 * d. TR bit = 0xff (optional, not used here)
12298 * e. Buffer – 2 entries:
12299 * i. Byte 0 = 8 (outer vlan FV index).
12300 * Byte 1 = 0 (rsv)
12301 * Byte 2-3 = 0x0fff
12302 * ii. Byte 0 = 37 (inner vlan FV index).
12303 * Byte 1 =0 (rsv)
12304 * Byte 2-3 = 0x0fff
12305 *
12306 * Step 2:
12307 * 2. Create cloud filter using two L1 filters entries: stag and
12308 * new filter(outer vlan+ inner vlan)
12309 * a. Valid_flags.replace_cloud = 1
12310 * b. Old_filter = 1 (instead of outer IP)
12311 * c. New_filter = 0x10
12312 * d. Buffer – 2 entries:
12313 * i. Byte 0 = 0x80 | 7 (valid | Stag).
12314 * Byte 1-3 = 0 (rsv)
12315 * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
12316 * Byte 9-11 = 0 (rsv)
12317 */
12318 static int
12319 i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
12320 {
12321 int ret = -ENOTSUP;
12322 struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
12323 struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
12324 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12325 struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
12326
12327 if (pf->support_multi_driver) {
12328 PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
12329 return ret;
12330 }
12331
12332 /* Init */
12333 memset(&filter_replace, 0,
12334 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12335 memset(&filter_replace_buf, 0,
12336 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12337
12338 /* create L1 filter */
12339 filter_replace.old_filter_type =
12340 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
12341 filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12342 filter_replace.tr_bit = 0;
12343
12344 /* Prepare the buffer, 2 entries */
12345 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
12346 filter_replace_buf.data[0] |=
12347 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12348 /* Field Vector 12b mask */
12349 filter_replace_buf.data[2] = 0xff;
12350 filter_replace_buf.data[3] = 0x0f;
12351 filter_replace_buf.data[4] =
12352 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
12353 filter_replace_buf.data[4] |=
12354 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12355 /* Field Vector 12b mask */
12356 filter_replace_buf.data[6] = 0xff;
12357 filter_replace_buf.data[7] = 0x0f;
12358 ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12359 &filter_replace_buf);
12360 if (ret != I40E_SUCCESS)
12361 return ret;
12362
12363 if (filter_replace.old_filter_type !=
12364 filter_replace.new_filter_type)
12365 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
12366 " original: 0x%x, new: 0x%x",
12367 dev->device->name,
12368 filter_replace.old_filter_type,
12369 filter_replace.new_filter_type);
12370
12371 /* Apply the second L2 cloud filter */
12372 memset(&filter_replace, 0,
12373 sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
12374 memset(&filter_replace_buf, 0,
12375 sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
12376
12377 /* create L2 filter, input for L2 filter will be L1 filter */
12378 filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
12379 filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
12380 filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12381
12382 /* Prepare the buffer, 2 entries */
12383 filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
12384 filter_replace_buf.data[0] |=
12385 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12386 filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
12387 filter_replace_buf.data[4] |=
12388 I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
12389 ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
12390 &filter_replace_buf);
12391 if (!ret && (filter_replace.old_filter_type !=
12392 filter_replace.new_filter_type))
12393 PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
12394 " original: 0x%x, new: 0x%x",
12395 dev->device->name,
12396 filter_replace.old_filter_type,
12397 filter_replace.new_filter_type);
12398
12399 return ret;
12400 }
12401
12402 int
12403 i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
12404 const struct rte_flow_action_rss *in)
12405 {
12406 if (in->key_len > RTE_DIM(out->key) ||
12407 in->queue_num > RTE_DIM(out->queue))
12408 return -EINVAL;
12409 out->conf = (struct rte_flow_action_rss){
12410 .func = in->func,
12411 .level = in->level,
12412 .types = in->types,
12413 .key_len = in->key_len,
12414 .queue_num = in->queue_num,
12415 .key = memcpy(out->key, in->key, in->key_len),
12416 .queue = memcpy(out->queue, in->queue,
12417 sizeof(*in->queue) * in->queue_num),
12418 };
12419 return 0;
12420 }
12421
12422 int
12423 i40e_action_rss_same(const struct rte_flow_action_rss *comp,
12424 const struct rte_flow_action_rss *with)
12425 {
12426 return (comp->func == with->func &&
12427 comp->level == with->level &&
12428 comp->types == with->types &&
12429 comp->key_len == with->key_len &&
12430 comp->queue_num == with->queue_num &&
12431 !memcmp(comp->key, with->key, with->key_len) &&
12432 !memcmp(comp->queue, with->queue,
12433 sizeof(*with->queue) * with->queue_num));
12434 }
12435
12436 int
12437 i40e_config_rss_filter(struct i40e_pf *pf,
12438 struct i40e_rte_flow_rss_conf *conf, bool add)
12439 {
12440 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
12441 uint32_t i, lut = 0;
12442 uint16_t j, num;
12443 struct rte_eth_rss_conf rss_conf = {
12444 .rss_key = conf->conf.key_len ?
12445 (void *)(uintptr_t)conf->conf.key : NULL,
12446 .rss_key_len = conf->conf.key_len,
12447 .rss_hf = conf->conf.types,
12448 };
12449 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
12450
12451 if (!add) {
12452 if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
12453 i40e_pf_disable_rss(pf);
12454 memset(rss_info, 0,
12455 sizeof(struct i40e_rte_flow_rss_conf));
12456 return 0;
12457 }
12458 return -EINVAL;
12459 }
12460
12461 if (rss_info->conf.queue_num)
12462 return -EINVAL;
12463
12464 /* If both VMDQ and RSS enabled, not all of PF queues are configured.
12465 * It's necessary to calculate the actual PF queues that are configured.
12466 */
12467 if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
12468 num = i40e_pf_calc_configured_queues_num(pf);
12469 else
12470 num = pf->dev_data->nb_rx_queues;
12471
12472 num = RTE_MIN(num, conf->conf.queue_num);
12473 PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
12474 num);
12475
12476 if (num == 0) {
12477 PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
12478 return -ENOTSUP;
12479 }
12480
12481 /* Fill in redirection table */
12482 for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
12483 if (j == num)
12484 j = 0;
12485 lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
12486 hw->func_caps.rss_table_entry_width) - 1));
12487 if ((i & 3) == 3)
12488 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
12489 }
12490
12491 if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
12492 i40e_pf_disable_rss(pf);
12493 return 0;
12494 }
12495 if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
12496 (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
12497 /* Random default keys */
12498 static uint32_t rss_key_default[] = {0x6b793944,
12499 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
12500 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
12501 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
12502
12503 rss_conf.rss_key = (uint8_t *)rss_key_default;
12504 rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
12505 sizeof(uint32_t);
12506 }
12507
12508 i40e_hw_rss_hash_set(pf, &rss_conf);
12509
12510 if (i40e_rss_conf_init(rss_info, &conf->conf))
12511 return -EINVAL;
12512
12513 return 0;
12514 }
12515
12516 RTE_INIT(i40e_init_log)
12517 {
12518 i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
12519 if (i40e_logtype_init >= 0)
12520 rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
12521 i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver");
12522 if (i40e_logtype_driver >= 0)
12523 rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
12524 }
12525
12526 RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
12527 ETH_I40E_FLOATING_VEB_ARG "=1"
12528 ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
12529 ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
12530 ETH_I40E_SUPPORT_MULTI_DRIVER "=1");