]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/e1000/e1000_ethdev.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / e1000 / e1000_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 */
4
5 #ifndef _E1000_ETHDEV_H_
6 #define _E1000_ETHDEV_H_
7
8 #include <stdint.h>
9
10 #include <rte_flow.h>
11 #include <rte_time.h>
12 #include <rte_pci.h>
13
14 #define E1000_INTEL_VENDOR_ID 0x8086
15
16 /* need update link, bit flag */
17 #define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
18 #define E1000_FLAG_MAILBOX (uint32_t)(1 << 1)
19
20 /*
21 * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
22 * driver.
23 */
24 #define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
25 #define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
26 #define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */
27 #define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
28 #define E1000_RXD_ERR_CKSUM_BIT 29
29 #define E1000_RXD_ERR_CKSUM_MSK 3
30 #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
31 #define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
32 #define IGB_VFTA_SIZE 128
33
34 #define IGB_HKEY_MAX_INDEX 10
35 #define IGB_MAX_RX_QUEUE_NUM 8
36 #define IGB_MAX_RX_QUEUE_NUM_82576 16
37
38 #define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
39 #define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
40 #define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
41 #define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
42
43 #define E1000_ETQF_ETHERTYPE 0x0000FFFF
44 #define E1000_ETQF_QUEUE 0x00070000
45 #define E1000_ETQF_QUEUE_SHIFT 16
46 #define E1000_MAX_ETQF_FILTERS 8
47
48 #define E1000_IMIR_DSTPORT 0x0000FFFF
49 #define E1000_IMIR_PRIORITY 0xE0000000
50 #define E1000_MAX_TTQF_FILTERS 8
51 #define E1000_2TUPLE_MAX_PRI 7
52
53 #define E1000_MAX_FLEX_FILTERS 8
54 #define E1000_MAX_FHFT 4
55 #define E1000_MAX_FHFT_EXT 4
56 #define E1000_FHFT_SIZE_IN_DWD 64
57 #define E1000_MAX_FLEX_FILTER_PRI 7
58 #define E1000_MAX_FLEX_FILTER_LEN 128
59 #define E1000_MAX_FLEX_FILTER_DWDS \
60 (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
61 #define E1000_FLEX_FILTERS_MASK_SIZE \
62 (E1000_MAX_FLEX_FILTER_DWDS / 2)
63 #define E1000_FHFT_QUEUEING_LEN 0x0000007F
64 #define E1000_FHFT_QUEUEING_QUEUE 0x00000700
65 #define E1000_FHFT_QUEUEING_PRIO 0x00070000
66 #define E1000_FHFT_QUEUEING_OFFSET 0xFC
67 #define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
68 #define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
69 #define E1000_WUFC_FLEX_HQ 0x00004000
70
71 #define E1000_SPQF_SRCPORT 0x0000FFFF
72
73 #define E1000_MAX_FTQF_FILTERS 8
74 #define E1000_FTQF_PROTOCOL_MASK 0x000000FF
75 #define E1000_FTQF_5TUPLE_MASK_SHIFT 28
76 #define E1000_FTQF_QUEUE_MASK 0x03ff0000
77 #define E1000_FTQF_QUEUE_SHIFT 16
78 #define E1000_FTQF_QUEUE_ENABLE 0x00000100
79
80 #define IGB_RSS_OFFLOAD_ALL ( \
81 ETH_RSS_IPV4 | \
82 ETH_RSS_NONFRAG_IPV4_TCP | \
83 ETH_RSS_NONFRAG_IPV4_UDP | \
84 ETH_RSS_IPV6 | \
85 ETH_RSS_NONFRAG_IPV6_TCP | \
86 ETH_RSS_NONFRAG_IPV6_UDP | \
87 ETH_RSS_IPV6_EX | \
88 ETH_RSS_IPV6_TCP_EX | \
89 ETH_RSS_IPV6_UDP_EX)
90
91 /*
92 * The overhead from MTU to max frame size.
93 * Considering VLAN so a tag needs to be counted.
94 */
95 #define E1000_ETH_OVERHEAD (ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE)
96
97 /*
98 * Maximum number of Ring Descriptors.
99 *
100 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
101 * desscriptors should meet the following condition:
102 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
103 */
104 #define E1000_MIN_RING_DESC 32
105 #define E1000_MAX_RING_DESC 4096
106
107 /*
108 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
109 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
110 * This will also optimize cache line size effect.
111 * H/W supports up to cache line size 128.
112 */
113 #define E1000_ALIGN 128
114
115 #define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
116 #define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
117
118 #define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc))
119 #define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc))
120
121 #define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
122 #define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
123
124 #define IGB_TX_MAX_SEG UINT8_MAX
125 #define IGB_TX_MAX_MTU_SEG UINT8_MAX
126 #define EM_TX_MAX_SEG UINT8_MAX
127 #define EM_TX_MAX_MTU_SEG UINT8_MAX
128
129 #define MAC_TYPE_FILTER_SUP(type) do {\
130 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
131 (type) != e1000_82576 && (type) != e1000_i210 &&\
132 (type) != e1000_i211)\
133 return -ENOTSUP;\
134 } while (0)
135
136 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
137 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
138 (type) != e1000_i210 && (type) != e1000_i211)\
139 return -ENOTSUP; \
140 } while (0)
141
142 /* structure for interrupt relative data */
143 struct e1000_interrupt {
144 uint32_t flags;
145 uint32_t mask;
146 };
147
148 /* local vfta copy */
149 struct e1000_vfta {
150 uint32_t vfta[IGB_VFTA_SIZE];
151 };
152
153 /*
154 * VF data which used by PF host only
155 */
156 #define E1000_MAX_VF_MC_ENTRIES 30
157 struct e1000_vf_info {
158 uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
159 uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
160 uint16_t num_vf_mc_hashes;
161 uint16_t default_vf_vlan_id;
162 uint16_t vlans_enabled;
163 uint16_t pf_qos;
164 uint16_t vlan_count;
165 uint16_t tx_rate;
166 };
167
168 TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
169
170 struct e1000_flex_filter_info {
171 uint16_t len;
172 uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
173 /* if mask bit is 1b, do not compare corresponding byte in dwords. */
174 uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
175 uint8_t priority;
176 };
177
178 /* Flex filter structure */
179 struct e1000_flex_filter {
180 TAILQ_ENTRY(e1000_flex_filter) entries;
181 uint16_t index; /* index of flex filter */
182 struct e1000_flex_filter_info filter_info;
183 uint16_t queue; /* rx queue assigned to */
184 };
185
186 TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
187 TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
188
189 struct e1000_5tuple_filter_info {
190 uint32_t dst_ip;
191 uint32_t src_ip;
192 uint16_t dst_port;
193 uint16_t src_port;
194 uint8_t proto; /* l4 protocol. */
195 /* the packet matched above 5tuple and contain any set bit will hit this filter. */
196 uint8_t tcp_flags;
197 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
198 used when more than one filter matches. */
199 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
200 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
201 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
202 src_port_mask:1, /* if mask is 1b, do not compare src port. */
203 proto_mask:1; /* if mask is 1b, do not compare protocol. */
204 };
205
206 struct e1000_2tuple_filter_info {
207 uint16_t dst_port;
208 uint8_t proto; /* l4 protocol. */
209 /* the packet matched above 2tuple and contain any set bit will hit this filter. */
210 uint8_t tcp_flags;
211 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
212 used when more than one filter matches. */
213 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
214 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
215 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
216 src_port_mask:1, /* if mask is 1b, do not compare src port. */
217 proto_mask:1; /* if mask is 1b, do not compare protocol. */
218 };
219
220 /* 5tuple filter structure */
221 struct e1000_5tuple_filter {
222 TAILQ_ENTRY(e1000_5tuple_filter) entries;
223 uint16_t index; /* the index of 5tuple filter */
224 struct e1000_5tuple_filter_info filter_info;
225 uint16_t queue; /* rx queue assigned to */
226 };
227
228 /* 2tuple filter structure */
229 struct e1000_2tuple_filter {
230 TAILQ_ENTRY(e1000_2tuple_filter) entries;
231 uint16_t index; /* the index of 2tuple filter */
232 struct e1000_2tuple_filter_info filter_info;
233 uint16_t queue; /* rx queue assigned to */
234 };
235
236 /* ethertype filter structure */
237 struct igb_ethertype_filter {
238 uint16_t ethertype;
239 uint32_t etqf;
240 };
241
242 struct igb_rte_flow_rss_conf {
243 struct rte_flow_action_rss conf; /**< RSS parameters. */
244 uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
245 /* Queues indices to use. */
246 uint16_t queue[IGB_MAX_RX_QUEUE_NUM_82576];
247 };
248
249 /*
250 * Structure to store filters'info.
251 */
252 struct e1000_filter_info {
253 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
254 /* store used ethertype filters*/
255 struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
256 uint8_t flex_mask; /* Bit mask for every used flex filter */
257 struct e1000_flex_filter_list flex_list;
258 /* Bit mask for every used 5tuple filter */
259 uint8_t fivetuple_mask;
260 struct e1000_5tuple_filter_list fivetuple_list;
261 /* Bit mask for every used 2tuple filter */
262 uint8_t twotuple_mask;
263 struct e1000_2tuple_filter_list twotuple_list;
264 /* store the SYN filter info */
265 uint32_t syn_info;
266 /* store the rss filter info */
267 struct igb_rte_flow_rss_conf rss_info;
268 };
269
270 /*
271 * Structure to store private data for each driver instance (for each port).
272 */
273 struct e1000_adapter {
274 struct e1000_hw hw;
275 struct e1000_hw_stats stats;
276 struct e1000_interrupt intr;
277 struct e1000_vfta shadow_vfta;
278 struct e1000_vf_info *vfdata;
279 struct e1000_filter_info filter;
280 bool stopped;
281 struct rte_timecounter systime_tc;
282 struct rte_timecounter rx_tstamp_tc;
283 struct rte_timecounter tx_tstamp_tc;
284 };
285
286 #define E1000_DEV_PRIVATE(adapter) \
287 ((struct e1000_adapter *)adapter)
288
289 #define E1000_DEV_PRIVATE_TO_HW(adapter) \
290 (&((struct e1000_adapter *)adapter)->hw)
291
292 #define E1000_DEV_PRIVATE_TO_STATS(adapter) \
293 (&((struct e1000_adapter *)adapter)->stats)
294
295 #define E1000_DEV_PRIVATE_TO_INTR(adapter) \
296 (&((struct e1000_adapter *)adapter)->intr)
297
298 #define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
299 (&((struct e1000_adapter *)adapter)->shadow_vfta)
300
301 #define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
302 (&((struct e1000_adapter *)adapter)->vfdata)
303
304 #define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
305 (&((struct e1000_adapter *)adapter)->filter)
306
307 struct rte_flow {
308 enum rte_filter_type filter_type;
309 void *rule;
310 };
311
312 /* ntuple filter list structure */
313 struct igb_ntuple_filter_ele {
314 TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
315 struct rte_eth_ntuple_filter filter_info;
316 };
317
318 /* ethertype filter list structure */
319 struct igb_ethertype_filter_ele {
320 TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
321 struct rte_eth_ethertype_filter filter_info;
322 };
323
324 /* syn filter list structure */
325 struct igb_eth_syn_filter_ele {
326 TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
327 struct rte_eth_syn_filter filter_info;
328 };
329
330 /* flex filter list structure */
331 struct igb_flex_filter_ele {
332 TAILQ_ENTRY(igb_flex_filter_ele) entries;
333 struct rte_eth_flex_filter filter_info;
334 };
335
336 /* rss filter list structure */
337 struct igb_rss_conf_ele {
338 TAILQ_ENTRY(igb_rss_conf_ele) entries;
339 struct igb_rte_flow_rss_conf filter_info;
340 };
341
342 /* igb_flow memory list structure */
343 struct igb_flow_mem {
344 TAILQ_ENTRY(igb_flow_mem) entries;
345 struct rte_flow *flow;
346 struct rte_eth_dev *dev;
347 };
348
349 TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
350 struct igb_ntuple_filter_list igb_filter_ntuple_list;
351 TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
352 struct igb_ethertype_filter_list igb_filter_ethertype_list;
353 TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
354 struct igb_syn_filter_list igb_filter_syn_list;
355 TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
356 struct igb_flex_filter_list igb_filter_flex_list;
357 TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
358 struct igb_rss_filter_list igb_filter_rss_list;
359 TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
360 struct igb_flow_mem_list igb_flow_list;
361
362 extern const struct rte_flow_ops igb_flow_ops;
363
364 /*
365 * RX/TX IGB function prototypes
366 */
367 void eth_igb_tx_queue_release(void *txq);
368 void eth_igb_rx_queue_release(void *rxq);
369 void igb_dev_clear_queues(struct rte_eth_dev *dev);
370 void igb_dev_free_queues(struct rte_eth_dev *dev);
371
372 uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
373 uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
374
375 int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
376 uint16_t nb_rx_desc, unsigned int socket_id,
377 const struct rte_eth_rxconf *rx_conf,
378 struct rte_mempool *mb_pool);
379
380 uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
381 uint16_t rx_queue_id);
382
383 int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
384
385 int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
386 int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
387
388 uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
389 uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
390
391 int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
392 uint16_t nb_tx_desc, unsigned int socket_id,
393 const struct rte_eth_txconf *tx_conf);
394
395 int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
396
397 int eth_igb_rx_init(struct rte_eth_dev *dev);
398
399 void eth_igb_tx_init(struct rte_eth_dev *dev);
400
401 uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
402 uint16_t nb_pkts);
403
404 uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
405 uint16_t nb_pkts);
406
407 uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
408 uint16_t nb_pkts);
409
410 uint16_t eth_igb_recv_scattered_pkts(void *rxq,
411 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
412
413 int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
414 struct rte_eth_rss_conf *rss_conf);
415
416 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
417 struct rte_eth_rss_conf *rss_conf);
418
419 int eth_igbvf_rx_init(struct rte_eth_dev *dev);
420
421 void eth_igbvf_tx_init(struct rte_eth_dev *dev);
422
423 /*
424 * misc function prototypes
425 */
426 void igb_pf_host_init(struct rte_eth_dev *eth_dev);
427
428 void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
429
430 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
431
432 void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
433 struct rte_eth_rxq_info *qinfo);
434
435 void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
436 struct rte_eth_txq_info *qinfo);
437
438 uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
439
440 /*
441 * RX/TX EM function prototypes
442 */
443 void eth_em_tx_queue_release(void *txq);
444 void eth_em_rx_queue_release(void *rxq);
445
446 void em_dev_clear_queues(struct rte_eth_dev *dev);
447 void em_dev_free_queues(struct rte_eth_dev *dev);
448
449 uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
450 uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
451
452 int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
453 uint16_t nb_rx_desc, unsigned int socket_id,
454 const struct rte_eth_rxconf *rx_conf,
455 struct rte_mempool *mb_pool);
456
457 uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
458 uint16_t rx_queue_id);
459
460 int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
461
462 int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
463 int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
464
465 uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
466 uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
467
468 int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
469 uint16_t nb_tx_desc, unsigned int socket_id,
470 const struct rte_eth_txconf *tx_conf);
471
472 int eth_em_rx_init(struct rte_eth_dev *dev);
473
474 void eth_em_tx_init(struct rte_eth_dev *dev);
475
476 uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
477 uint16_t nb_pkts);
478
479 uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
480 uint16_t nb_pkts);
481
482 uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
483 uint16_t nb_pkts);
484
485 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
486 uint16_t nb_pkts);
487
488 void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
489 struct rte_eth_rxq_info *qinfo);
490
491 void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
492 struct rte_eth_txq_info *qinfo);
493
494 void igb_pf_host_uninit(struct rte_eth_dev *dev);
495
496 void igb_filterlist_flush(struct rte_eth_dev *dev);
497 int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
498 struct e1000_5tuple_filter *filter);
499 int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
500 struct e1000_2tuple_filter *filter);
501 void igb_remove_flex_filter(struct rte_eth_dev *dev,
502 struct e1000_flex_filter *filter);
503 int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
504 uint8_t idx);
505 int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
506 struct rte_eth_ntuple_filter *ntuple_filter, bool add);
507 int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
508 struct rte_eth_ethertype_filter *filter,
509 bool add);
510 int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
511 struct rte_eth_syn_filter *filter,
512 bool add);
513 int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
514 struct rte_eth_flex_filter *filter,
515 bool add);
516 int igb_rss_conf_init(struct rte_eth_dev *dev,
517 struct igb_rte_flow_rss_conf *out,
518 const struct rte_flow_action_rss *in);
519 int igb_action_rss_same(const struct rte_flow_action_rss *comp,
520 const struct rte_flow_action_rss *with);
521 int igb_config_rss_filter(struct rte_eth_dev *dev,
522 struct igb_rte_flow_rss_conf *conf,
523 bool add);
524
525 #endif /* _E1000_ETHDEV_H_ */