1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "i40e_logs.h"
22 #include "base/i40e_type.h"
23 #include "base/i40e_prototype.h"
24 #include "i40e_ethdev.h"
26 #define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
27 #define I40E_IPV6_FRAG_HEADER 44
28 #define I40E_TENANT_ARRAY_NUM 3
29 #define I40E_TCI_MASK 0xFFFF
31 static int i40e_flow_validate(struct rte_eth_dev
*dev
,
32 const struct rte_flow_attr
*attr
,
33 const struct rte_flow_item pattern
[],
34 const struct rte_flow_action actions
[],
35 struct rte_flow_error
*error
);
36 static struct rte_flow
*i40e_flow_create(struct rte_eth_dev
*dev
,
37 const struct rte_flow_attr
*attr
,
38 const struct rte_flow_item pattern
[],
39 const struct rte_flow_action actions
[],
40 struct rte_flow_error
*error
);
41 static int i40e_flow_destroy(struct rte_eth_dev
*dev
,
42 struct rte_flow
*flow
,
43 struct rte_flow_error
*error
);
44 static int i40e_flow_flush(struct rte_eth_dev
*dev
,
45 struct rte_flow_error
*error
);
47 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev
*dev
,
48 const struct rte_flow_item
*pattern
,
49 struct rte_flow_error
*error
,
50 struct rte_eth_ethertype_filter
*filter
);
51 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev
*dev
,
52 const struct rte_flow_action
*actions
,
53 struct rte_flow_error
*error
,
54 struct rte_eth_ethertype_filter
*filter
);
55 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev
*dev
,
56 const struct rte_flow_attr
*attr
,
57 const struct rte_flow_item
*pattern
,
58 struct rte_flow_error
*error
,
59 struct i40e_fdir_filter_conf
*filter
);
60 static int i40e_flow_parse_fdir_action(struct rte_eth_dev
*dev
,
61 const struct rte_flow_action
*actions
,
62 struct rte_flow_error
*error
,
63 struct i40e_fdir_filter_conf
*filter
);
64 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev
*dev
,
65 const struct rte_flow_action
*actions
,
66 struct rte_flow_error
*error
,
67 struct i40e_tunnel_filter_conf
*filter
);
68 static int i40e_flow_parse_attr(const struct rte_flow_attr
*attr
,
69 struct rte_flow_error
*error
);
70 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev
*dev
,
71 const struct rte_flow_attr
*attr
,
72 const struct rte_flow_item pattern
[],
73 const struct rte_flow_action actions
[],
74 struct rte_flow_error
*error
,
75 union i40e_filter_t
*filter
);
76 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev
*dev
,
77 const struct rte_flow_attr
*attr
,
78 const struct rte_flow_item pattern
[],
79 const struct rte_flow_action actions
[],
80 struct rte_flow_error
*error
,
81 union i40e_filter_t
*filter
);
82 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev
*dev
,
83 const struct rte_flow_attr
*attr
,
84 const struct rte_flow_item pattern
[],
85 const struct rte_flow_action actions
[],
86 struct rte_flow_error
*error
,
87 union i40e_filter_t
*filter
);
88 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev
*dev
,
89 const struct rte_flow_attr
*attr
,
90 const struct rte_flow_item pattern
[],
91 const struct rte_flow_action actions
[],
92 struct rte_flow_error
*error
,
93 union i40e_filter_t
*filter
);
94 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev
*dev
,
95 const struct rte_flow_attr
*attr
,
96 const struct rte_flow_item pattern
[],
97 const struct rte_flow_action actions
[],
98 struct rte_flow_error
*error
,
99 union i40e_filter_t
*filter
);
100 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev
*dev
,
101 const struct rte_flow_attr
*attr
,
102 const struct rte_flow_item pattern
[],
103 const struct rte_flow_action actions
[],
104 struct rte_flow_error
*error
,
105 union i40e_filter_t
*filter
);
106 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf
*pf
,
107 struct i40e_ethertype_filter
*filter
);
108 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf
*pf
,
109 struct i40e_tunnel_filter
*filter
);
110 static int i40e_flow_flush_fdir_filter(struct i40e_pf
*pf
);
111 static int i40e_flow_flush_ethertype_filter(struct i40e_pf
*pf
);
112 static int i40e_flow_flush_tunnel_filter(struct i40e_pf
*pf
);
114 i40e_flow_flush_rss_filter(struct rte_eth_dev
*dev
);
116 i40e_flow_parse_qinq_filter(struct rte_eth_dev
*dev
,
117 const struct rte_flow_attr
*attr
,
118 const struct rte_flow_item pattern
[],
119 const struct rte_flow_action actions
[],
120 struct rte_flow_error
*error
,
121 union i40e_filter_t
*filter
);
123 i40e_flow_parse_qinq_pattern(struct rte_eth_dev
*dev
,
124 const struct rte_flow_item
*pattern
,
125 struct rte_flow_error
*error
,
126 struct i40e_tunnel_filter_conf
*filter
);
128 const struct rte_flow_ops i40e_flow_ops
= {
129 .validate
= i40e_flow_validate
,
130 .create
= i40e_flow_create
,
131 .destroy
= i40e_flow_destroy
,
132 .flush
= i40e_flow_flush
,
135 static union i40e_filter_t cons_filter
;
136 static enum rte_filter_type cons_filter_type
= RTE_ETH_FILTER_NONE
;
138 /* Pattern matched ethertype filter */
139 static enum rte_flow_item_type pattern_ethertype
[] = {
140 RTE_FLOW_ITEM_TYPE_ETH
,
141 RTE_FLOW_ITEM_TYPE_END
,
144 /* Pattern matched flow director filter */
145 static enum rte_flow_item_type pattern_fdir_ipv4
[] = {
146 RTE_FLOW_ITEM_TYPE_ETH
,
147 RTE_FLOW_ITEM_TYPE_IPV4
,
148 RTE_FLOW_ITEM_TYPE_END
,
151 static enum rte_flow_item_type pattern_fdir_ipv4_udp
[] = {
152 RTE_FLOW_ITEM_TYPE_ETH
,
153 RTE_FLOW_ITEM_TYPE_IPV4
,
154 RTE_FLOW_ITEM_TYPE_UDP
,
155 RTE_FLOW_ITEM_TYPE_END
,
158 static enum rte_flow_item_type pattern_fdir_ipv4_tcp
[] = {
159 RTE_FLOW_ITEM_TYPE_ETH
,
160 RTE_FLOW_ITEM_TYPE_IPV4
,
161 RTE_FLOW_ITEM_TYPE_TCP
,
162 RTE_FLOW_ITEM_TYPE_END
,
165 static enum rte_flow_item_type pattern_fdir_ipv4_sctp
[] = {
166 RTE_FLOW_ITEM_TYPE_ETH
,
167 RTE_FLOW_ITEM_TYPE_IPV4
,
168 RTE_FLOW_ITEM_TYPE_SCTP
,
169 RTE_FLOW_ITEM_TYPE_END
,
172 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc
[] = {
173 RTE_FLOW_ITEM_TYPE_ETH
,
174 RTE_FLOW_ITEM_TYPE_IPV4
,
175 RTE_FLOW_ITEM_TYPE_UDP
,
176 RTE_FLOW_ITEM_TYPE_GTPC
,
177 RTE_FLOW_ITEM_TYPE_END
,
180 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu
[] = {
181 RTE_FLOW_ITEM_TYPE_ETH
,
182 RTE_FLOW_ITEM_TYPE_IPV4
,
183 RTE_FLOW_ITEM_TYPE_UDP
,
184 RTE_FLOW_ITEM_TYPE_GTPU
,
185 RTE_FLOW_ITEM_TYPE_END
,
188 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4
[] = {
189 RTE_FLOW_ITEM_TYPE_ETH
,
190 RTE_FLOW_ITEM_TYPE_IPV4
,
191 RTE_FLOW_ITEM_TYPE_UDP
,
192 RTE_FLOW_ITEM_TYPE_GTPU
,
193 RTE_FLOW_ITEM_TYPE_IPV4
,
194 RTE_FLOW_ITEM_TYPE_END
,
197 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6
[] = {
198 RTE_FLOW_ITEM_TYPE_ETH
,
199 RTE_FLOW_ITEM_TYPE_IPV4
,
200 RTE_FLOW_ITEM_TYPE_UDP
,
201 RTE_FLOW_ITEM_TYPE_GTPU
,
202 RTE_FLOW_ITEM_TYPE_IPV6
,
203 RTE_FLOW_ITEM_TYPE_END
,
206 static enum rte_flow_item_type pattern_fdir_ipv6
[] = {
207 RTE_FLOW_ITEM_TYPE_ETH
,
208 RTE_FLOW_ITEM_TYPE_IPV6
,
209 RTE_FLOW_ITEM_TYPE_END
,
212 static enum rte_flow_item_type pattern_fdir_ipv6_udp
[] = {
213 RTE_FLOW_ITEM_TYPE_ETH
,
214 RTE_FLOW_ITEM_TYPE_IPV6
,
215 RTE_FLOW_ITEM_TYPE_UDP
,
216 RTE_FLOW_ITEM_TYPE_END
,
219 static enum rte_flow_item_type pattern_fdir_ipv6_tcp
[] = {
220 RTE_FLOW_ITEM_TYPE_ETH
,
221 RTE_FLOW_ITEM_TYPE_IPV6
,
222 RTE_FLOW_ITEM_TYPE_TCP
,
223 RTE_FLOW_ITEM_TYPE_END
,
226 static enum rte_flow_item_type pattern_fdir_ipv6_sctp
[] = {
227 RTE_FLOW_ITEM_TYPE_ETH
,
228 RTE_FLOW_ITEM_TYPE_IPV6
,
229 RTE_FLOW_ITEM_TYPE_SCTP
,
230 RTE_FLOW_ITEM_TYPE_END
,
233 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc
[] = {
234 RTE_FLOW_ITEM_TYPE_ETH
,
235 RTE_FLOW_ITEM_TYPE_IPV6
,
236 RTE_FLOW_ITEM_TYPE_UDP
,
237 RTE_FLOW_ITEM_TYPE_GTPC
,
238 RTE_FLOW_ITEM_TYPE_END
,
241 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu
[] = {
242 RTE_FLOW_ITEM_TYPE_ETH
,
243 RTE_FLOW_ITEM_TYPE_IPV6
,
244 RTE_FLOW_ITEM_TYPE_UDP
,
245 RTE_FLOW_ITEM_TYPE_GTPU
,
246 RTE_FLOW_ITEM_TYPE_END
,
249 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4
[] = {
250 RTE_FLOW_ITEM_TYPE_ETH
,
251 RTE_FLOW_ITEM_TYPE_IPV6
,
252 RTE_FLOW_ITEM_TYPE_UDP
,
253 RTE_FLOW_ITEM_TYPE_GTPU
,
254 RTE_FLOW_ITEM_TYPE_IPV4
,
255 RTE_FLOW_ITEM_TYPE_END
,
258 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6
[] = {
259 RTE_FLOW_ITEM_TYPE_ETH
,
260 RTE_FLOW_ITEM_TYPE_IPV6
,
261 RTE_FLOW_ITEM_TYPE_UDP
,
262 RTE_FLOW_ITEM_TYPE_GTPU
,
263 RTE_FLOW_ITEM_TYPE_IPV6
,
264 RTE_FLOW_ITEM_TYPE_END
,
267 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1
[] = {
268 RTE_FLOW_ITEM_TYPE_ETH
,
269 RTE_FLOW_ITEM_TYPE_RAW
,
270 RTE_FLOW_ITEM_TYPE_END
,
273 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2
[] = {
274 RTE_FLOW_ITEM_TYPE_ETH
,
275 RTE_FLOW_ITEM_TYPE_RAW
,
276 RTE_FLOW_ITEM_TYPE_RAW
,
277 RTE_FLOW_ITEM_TYPE_END
,
280 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3
[] = {
281 RTE_FLOW_ITEM_TYPE_ETH
,
282 RTE_FLOW_ITEM_TYPE_RAW
,
283 RTE_FLOW_ITEM_TYPE_RAW
,
284 RTE_FLOW_ITEM_TYPE_RAW
,
285 RTE_FLOW_ITEM_TYPE_END
,
288 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1
[] = {
289 RTE_FLOW_ITEM_TYPE_ETH
,
290 RTE_FLOW_ITEM_TYPE_IPV4
,
291 RTE_FLOW_ITEM_TYPE_RAW
,
292 RTE_FLOW_ITEM_TYPE_END
,
295 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2
[] = {
296 RTE_FLOW_ITEM_TYPE_ETH
,
297 RTE_FLOW_ITEM_TYPE_IPV4
,
298 RTE_FLOW_ITEM_TYPE_RAW
,
299 RTE_FLOW_ITEM_TYPE_RAW
,
300 RTE_FLOW_ITEM_TYPE_END
,
303 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3
[] = {
304 RTE_FLOW_ITEM_TYPE_ETH
,
305 RTE_FLOW_ITEM_TYPE_IPV4
,
306 RTE_FLOW_ITEM_TYPE_RAW
,
307 RTE_FLOW_ITEM_TYPE_RAW
,
308 RTE_FLOW_ITEM_TYPE_RAW
,
309 RTE_FLOW_ITEM_TYPE_END
,
312 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1
[] = {
313 RTE_FLOW_ITEM_TYPE_ETH
,
314 RTE_FLOW_ITEM_TYPE_IPV4
,
315 RTE_FLOW_ITEM_TYPE_UDP
,
316 RTE_FLOW_ITEM_TYPE_RAW
,
317 RTE_FLOW_ITEM_TYPE_END
,
320 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2
[] = {
321 RTE_FLOW_ITEM_TYPE_ETH
,
322 RTE_FLOW_ITEM_TYPE_IPV4
,
323 RTE_FLOW_ITEM_TYPE_UDP
,
324 RTE_FLOW_ITEM_TYPE_RAW
,
325 RTE_FLOW_ITEM_TYPE_RAW
,
326 RTE_FLOW_ITEM_TYPE_END
,
329 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3
[] = {
330 RTE_FLOW_ITEM_TYPE_ETH
,
331 RTE_FLOW_ITEM_TYPE_IPV4
,
332 RTE_FLOW_ITEM_TYPE_UDP
,
333 RTE_FLOW_ITEM_TYPE_RAW
,
334 RTE_FLOW_ITEM_TYPE_RAW
,
335 RTE_FLOW_ITEM_TYPE_RAW
,
336 RTE_FLOW_ITEM_TYPE_END
,
339 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1
[] = {
340 RTE_FLOW_ITEM_TYPE_ETH
,
341 RTE_FLOW_ITEM_TYPE_IPV4
,
342 RTE_FLOW_ITEM_TYPE_TCP
,
343 RTE_FLOW_ITEM_TYPE_RAW
,
344 RTE_FLOW_ITEM_TYPE_END
,
347 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2
[] = {
348 RTE_FLOW_ITEM_TYPE_ETH
,
349 RTE_FLOW_ITEM_TYPE_IPV4
,
350 RTE_FLOW_ITEM_TYPE_TCP
,
351 RTE_FLOW_ITEM_TYPE_RAW
,
352 RTE_FLOW_ITEM_TYPE_RAW
,
353 RTE_FLOW_ITEM_TYPE_END
,
356 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3
[] = {
357 RTE_FLOW_ITEM_TYPE_ETH
,
358 RTE_FLOW_ITEM_TYPE_IPV4
,
359 RTE_FLOW_ITEM_TYPE_TCP
,
360 RTE_FLOW_ITEM_TYPE_RAW
,
361 RTE_FLOW_ITEM_TYPE_RAW
,
362 RTE_FLOW_ITEM_TYPE_RAW
,
363 RTE_FLOW_ITEM_TYPE_END
,
366 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1
[] = {
367 RTE_FLOW_ITEM_TYPE_ETH
,
368 RTE_FLOW_ITEM_TYPE_IPV4
,
369 RTE_FLOW_ITEM_TYPE_SCTP
,
370 RTE_FLOW_ITEM_TYPE_RAW
,
371 RTE_FLOW_ITEM_TYPE_END
,
374 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2
[] = {
375 RTE_FLOW_ITEM_TYPE_ETH
,
376 RTE_FLOW_ITEM_TYPE_IPV4
,
377 RTE_FLOW_ITEM_TYPE_SCTP
,
378 RTE_FLOW_ITEM_TYPE_RAW
,
379 RTE_FLOW_ITEM_TYPE_RAW
,
380 RTE_FLOW_ITEM_TYPE_END
,
383 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3
[] = {
384 RTE_FLOW_ITEM_TYPE_ETH
,
385 RTE_FLOW_ITEM_TYPE_IPV4
,
386 RTE_FLOW_ITEM_TYPE_SCTP
,
387 RTE_FLOW_ITEM_TYPE_RAW
,
388 RTE_FLOW_ITEM_TYPE_RAW
,
389 RTE_FLOW_ITEM_TYPE_RAW
,
390 RTE_FLOW_ITEM_TYPE_END
,
393 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1
[] = {
394 RTE_FLOW_ITEM_TYPE_ETH
,
395 RTE_FLOW_ITEM_TYPE_IPV6
,
396 RTE_FLOW_ITEM_TYPE_RAW
,
397 RTE_FLOW_ITEM_TYPE_END
,
400 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2
[] = {
401 RTE_FLOW_ITEM_TYPE_ETH
,
402 RTE_FLOW_ITEM_TYPE_IPV6
,
403 RTE_FLOW_ITEM_TYPE_RAW
,
404 RTE_FLOW_ITEM_TYPE_RAW
,
405 RTE_FLOW_ITEM_TYPE_END
,
408 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3
[] = {
409 RTE_FLOW_ITEM_TYPE_ETH
,
410 RTE_FLOW_ITEM_TYPE_IPV6
,
411 RTE_FLOW_ITEM_TYPE_RAW
,
412 RTE_FLOW_ITEM_TYPE_RAW
,
413 RTE_FLOW_ITEM_TYPE_RAW
,
414 RTE_FLOW_ITEM_TYPE_END
,
417 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1
[] = {
418 RTE_FLOW_ITEM_TYPE_ETH
,
419 RTE_FLOW_ITEM_TYPE_IPV6
,
420 RTE_FLOW_ITEM_TYPE_UDP
,
421 RTE_FLOW_ITEM_TYPE_RAW
,
422 RTE_FLOW_ITEM_TYPE_END
,
425 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2
[] = {
426 RTE_FLOW_ITEM_TYPE_ETH
,
427 RTE_FLOW_ITEM_TYPE_IPV6
,
428 RTE_FLOW_ITEM_TYPE_UDP
,
429 RTE_FLOW_ITEM_TYPE_RAW
,
430 RTE_FLOW_ITEM_TYPE_RAW
,
431 RTE_FLOW_ITEM_TYPE_END
,
434 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3
[] = {
435 RTE_FLOW_ITEM_TYPE_ETH
,
436 RTE_FLOW_ITEM_TYPE_IPV6
,
437 RTE_FLOW_ITEM_TYPE_UDP
,
438 RTE_FLOW_ITEM_TYPE_RAW
,
439 RTE_FLOW_ITEM_TYPE_RAW
,
440 RTE_FLOW_ITEM_TYPE_RAW
,
441 RTE_FLOW_ITEM_TYPE_END
,
444 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1
[] = {
445 RTE_FLOW_ITEM_TYPE_ETH
,
446 RTE_FLOW_ITEM_TYPE_IPV6
,
447 RTE_FLOW_ITEM_TYPE_TCP
,
448 RTE_FLOW_ITEM_TYPE_RAW
,
449 RTE_FLOW_ITEM_TYPE_END
,
452 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2
[] = {
453 RTE_FLOW_ITEM_TYPE_ETH
,
454 RTE_FLOW_ITEM_TYPE_IPV6
,
455 RTE_FLOW_ITEM_TYPE_TCP
,
456 RTE_FLOW_ITEM_TYPE_RAW
,
457 RTE_FLOW_ITEM_TYPE_RAW
,
458 RTE_FLOW_ITEM_TYPE_END
,
461 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3
[] = {
462 RTE_FLOW_ITEM_TYPE_ETH
,
463 RTE_FLOW_ITEM_TYPE_IPV6
,
464 RTE_FLOW_ITEM_TYPE_TCP
,
465 RTE_FLOW_ITEM_TYPE_RAW
,
466 RTE_FLOW_ITEM_TYPE_RAW
,
467 RTE_FLOW_ITEM_TYPE_RAW
,
468 RTE_FLOW_ITEM_TYPE_END
,
471 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1
[] = {
472 RTE_FLOW_ITEM_TYPE_ETH
,
473 RTE_FLOW_ITEM_TYPE_IPV6
,
474 RTE_FLOW_ITEM_TYPE_SCTP
,
475 RTE_FLOW_ITEM_TYPE_RAW
,
476 RTE_FLOW_ITEM_TYPE_END
,
479 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2
[] = {
480 RTE_FLOW_ITEM_TYPE_ETH
,
481 RTE_FLOW_ITEM_TYPE_IPV6
,
482 RTE_FLOW_ITEM_TYPE_SCTP
,
483 RTE_FLOW_ITEM_TYPE_RAW
,
484 RTE_FLOW_ITEM_TYPE_RAW
,
485 RTE_FLOW_ITEM_TYPE_END
,
488 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3
[] = {
489 RTE_FLOW_ITEM_TYPE_ETH
,
490 RTE_FLOW_ITEM_TYPE_IPV6
,
491 RTE_FLOW_ITEM_TYPE_SCTP
,
492 RTE_FLOW_ITEM_TYPE_RAW
,
493 RTE_FLOW_ITEM_TYPE_RAW
,
494 RTE_FLOW_ITEM_TYPE_RAW
,
495 RTE_FLOW_ITEM_TYPE_END
,
498 static enum rte_flow_item_type pattern_fdir_ethertype_vlan
[] = {
499 RTE_FLOW_ITEM_TYPE_ETH
,
500 RTE_FLOW_ITEM_TYPE_VLAN
,
501 RTE_FLOW_ITEM_TYPE_END
,
504 static enum rte_flow_item_type pattern_fdir_vlan_ipv4
[] = {
505 RTE_FLOW_ITEM_TYPE_ETH
,
506 RTE_FLOW_ITEM_TYPE_VLAN
,
507 RTE_FLOW_ITEM_TYPE_IPV4
,
508 RTE_FLOW_ITEM_TYPE_END
,
511 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp
[] = {
512 RTE_FLOW_ITEM_TYPE_ETH
,
513 RTE_FLOW_ITEM_TYPE_VLAN
,
514 RTE_FLOW_ITEM_TYPE_IPV4
,
515 RTE_FLOW_ITEM_TYPE_UDP
,
516 RTE_FLOW_ITEM_TYPE_END
,
519 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp
[] = {
520 RTE_FLOW_ITEM_TYPE_ETH
,
521 RTE_FLOW_ITEM_TYPE_VLAN
,
522 RTE_FLOW_ITEM_TYPE_IPV4
,
523 RTE_FLOW_ITEM_TYPE_TCP
,
524 RTE_FLOW_ITEM_TYPE_END
,
527 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp
[] = {
528 RTE_FLOW_ITEM_TYPE_ETH
,
529 RTE_FLOW_ITEM_TYPE_VLAN
,
530 RTE_FLOW_ITEM_TYPE_IPV4
,
531 RTE_FLOW_ITEM_TYPE_SCTP
,
532 RTE_FLOW_ITEM_TYPE_END
,
535 static enum rte_flow_item_type pattern_fdir_vlan_ipv6
[] = {
536 RTE_FLOW_ITEM_TYPE_ETH
,
537 RTE_FLOW_ITEM_TYPE_VLAN
,
538 RTE_FLOW_ITEM_TYPE_IPV6
,
539 RTE_FLOW_ITEM_TYPE_END
,
542 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp
[] = {
543 RTE_FLOW_ITEM_TYPE_ETH
,
544 RTE_FLOW_ITEM_TYPE_VLAN
,
545 RTE_FLOW_ITEM_TYPE_IPV6
,
546 RTE_FLOW_ITEM_TYPE_UDP
,
547 RTE_FLOW_ITEM_TYPE_END
,
550 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp
[] = {
551 RTE_FLOW_ITEM_TYPE_ETH
,
552 RTE_FLOW_ITEM_TYPE_VLAN
,
553 RTE_FLOW_ITEM_TYPE_IPV6
,
554 RTE_FLOW_ITEM_TYPE_TCP
,
555 RTE_FLOW_ITEM_TYPE_END
,
558 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp
[] = {
559 RTE_FLOW_ITEM_TYPE_ETH
,
560 RTE_FLOW_ITEM_TYPE_VLAN
,
561 RTE_FLOW_ITEM_TYPE_IPV6
,
562 RTE_FLOW_ITEM_TYPE_SCTP
,
563 RTE_FLOW_ITEM_TYPE_END
,
566 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1
[] = {
567 RTE_FLOW_ITEM_TYPE_ETH
,
568 RTE_FLOW_ITEM_TYPE_VLAN
,
569 RTE_FLOW_ITEM_TYPE_RAW
,
570 RTE_FLOW_ITEM_TYPE_END
,
573 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2
[] = {
574 RTE_FLOW_ITEM_TYPE_ETH
,
575 RTE_FLOW_ITEM_TYPE_VLAN
,
576 RTE_FLOW_ITEM_TYPE_RAW
,
577 RTE_FLOW_ITEM_TYPE_RAW
,
578 RTE_FLOW_ITEM_TYPE_END
,
581 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3
[] = {
582 RTE_FLOW_ITEM_TYPE_ETH
,
583 RTE_FLOW_ITEM_TYPE_VLAN
,
584 RTE_FLOW_ITEM_TYPE_RAW
,
585 RTE_FLOW_ITEM_TYPE_RAW
,
586 RTE_FLOW_ITEM_TYPE_RAW
,
587 RTE_FLOW_ITEM_TYPE_END
,
590 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1
[] = {
591 RTE_FLOW_ITEM_TYPE_ETH
,
592 RTE_FLOW_ITEM_TYPE_VLAN
,
593 RTE_FLOW_ITEM_TYPE_IPV4
,
594 RTE_FLOW_ITEM_TYPE_RAW
,
595 RTE_FLOW_ITEM_TYPE_END
,
598 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2
[] = {
599 RTE_FLOW_ITEM_TYPE_ETH
,
600 RTE_FLOW_ITEM_TYPE_VLAN
,
601 RTE_FLOW_ITEM_TYPE_IPV4
,
602 RTE_FLOW_ITEM_TYPE_RAW
,
603 RTE_FLOW_ITEM_TYPE_RAW
,
604 RTE_FLOW_ITEM_TYPE_END
,
607 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3
[] = {
608 RTE_FLOW_ITEM_TYPE_ETH
,
609 RTE_FLOW_ITEM_TYPE_VLAN
,
610 RTE_FLOW_ITEM_TYPE_IPV4
,
611 RTE_FLOW_ITEM_TYPE_RAW
,
612 RTE_FLOW_ITEM_TYPE_RAW
,
613 RTE_FLOW_ITEM_TYPE_RAW
,
614 RTE_FLOW_ITEM_TYPE_END
,
617 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1
[] = {
618 RTE_FLOW_ITEM_TYPE_ETH
,
619 RTE_FLOW_ITEM_TYPE_VLAN
,
620 RTE_FLOW_ITEM_TYPE_IPV4
,
621 RTE_FLOW_ITEM_TYPE_UDP
,
622 RTE_FLOW_ITEM_TYPE_RAW
,
623 RTE_FLOW_ITEM_TYPE_END
,
626 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2
[] = {
627 RTE_FLOW_ITEM_TYPE_ETH
,
628 RTE_FLOW_ITEM_TYPE_VLAN
,
629 RTE_FLOW_ITEM_TYPE_IPV4
,
630 RTE_FLOW_ITEM_TYPE_UDP
,
631 RTE_FLOW_ITEM_TYPE_RAW
,
632 RTE_FLOW_ITEM_TYPE_RAW
,
633 RTE_FLOW_ITEM_TYPE_END
,
636 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3
[] = {
637 RTE_FLOW_ITEM_TYPE_ETH
,
638 RTE_FLOW_ITEM_TYPE_VLAN
,
639 RTE_FLOW_ITEM_TYPE_IPV4
,
640 RTE_FLOW_ITEM_TYPE_UDP
,
641 RTE_FLOW_ITEM_TYPE_RAW
,
642 RTE_FLOW_ITEM_TYPE_RAW
,
643 RTE_FLOW_ITEM_TYPE_RAW
,
644 RTE_FLOW_ITEM_TYPE_END
,
647 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1
[] = {
648 RTE_FLOW_ITEM_TYPE_ETH
,
649 RTE_FLOW_ITEM_TYPE_VLAN
,
650 RTE_FLOW_ITEM_TYPE_IPV4
,
651 RTE_FLOW_ITEM_TYPE_TCP
,
652 RTE_FLOW_ITEM_TYPE_RAW
,
653 RTE_FLOW_ITEM_TYPE_END
,
656 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2
[] = {
657 RTE_FLOW_ITEM_TYPE_ETH
,
658 RTE_FLOW_ITEM_TYPE_VLAN
,
659 RTE_FLOW_ITEM_TYPE_IPV4
,
660 RTE_FLOW_ITEM_TYPE_TCP
,
661 RTE_FLOW_ITEM_TYPE_RAW
,
662 RTE_FLOW_ITEM_TYPE_RAW
,
663 RTE_FLOW_ITEM_TYPE_END
,
666 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3
[] = {
667 RTE_FLOW_ITEM_TYPE_ETH
,
668 RTE_FLOW_ITEM_TYPE_VLAN
,
669 RTE_FLOW_ITEM_TYPE_IPV4
,
670 RTE_FLOW_ITEM_TYPE_TCP
,
671 RTE_FLOW_ITEM_TYPE_RAW
,
672 RTE_FLOW_ITEM_TYPE_RAW
,
673 RTE_FLOW_ITEM_TYPE_RAW
,
674 RTE_FLOW_ITEM_TYPE_END
,
677 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1
[] = {
678 RTE_FLOW_ITEM_TYPE_ETH
,
679 RTE_FLOW_ITEM_TYPE_VLAN
,
680 RTE_FLOW_ITEM_TYPE_IPV4
,
681 RTE_FLOW_ITEM_TYPE_SCTP
,
682 RTE_FLOW_ITEM_TYPE_RAW
,
683 RTE_FLOW_ITEM_TYPE_END
,
686 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2
[] = {
687 RTE_FLOW_ITEM_TYPE_ETH
,
688 RTE_FLOW_ITEM_TYPE_VLAN
,
689 RTE_FLOW_ITEM_TYPE_IPV4
,
690 RTE_FLOW_ITEM_TYPE_SCTP
,
691 RTE_FLOW_ITEM_TYPE_RAW
,
692 RTE_FLOW_ITEM_TYPE_RAW
,
693 RTE_FLOW_ITEM_TYPE_END
,
696 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3
[] = {
697 RTE_FLOW_ITEM_TYPE_ETH
,
698 RTE_FLOW_ITEM_TYPE_VLAN
,
699 RTE_FLOW_ITEM_TYPE_IPV4
,
700 RTE_FLOW_ITEM_TYPE_SCTP
,
701 RTE_FLOW_ITEM_TYPE_RAW
,
702 RTE_FLOW_ITEM_TYPE_RAW
,
703 RTE_FLOW_ITEM_TYPE_RAW
,
704 RTE_FLOW_ITEM_TYPE_END
,
707 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1
[] = {
708 RTE_FLOW_ITEM_TYPE_ETH
,
709 RTE_FLOW_ITEM_TYPE_VLAN
,
710 RTE_FLOW_ITEM_TYPE_IPV6
,
711 RTE_FLOW_ITEM_TYPE_RAW
,
712 RTE_FLOW_ITEM_TYPE_END
,
715 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2
[] = {
716 RTE_FLOW_ITEM_TYPE_ETH
,
717 RTE_FLOW_ITEM_TYPE_VLAN
,
718 RTE_FLOW_ITEM_TYPE_IPV6
,
719 RTE_FLOW_ITEM_TYPE_RAW
,
720 RTE_FLOW_ITEM_TYPE_RAW
,
721 RTE_FLOW_ITEM_TYPE_END
,
724 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3
[] = {
725 RTE_FLOW_ITEM_TYPE_ETH
,
726 RTE_FLOW_ITEM_TYPE_VLAN
,
727 RTE_FLOW_ITEM_TYPE_IPV6
,
728 RTE_FLOW_ITEM_TYPE_RAW
,
729 RTE_FLOW_ITEM_TYPE_RAW
,
730 RTE_FLOW_ITEM_TYPE_RAW
,
731 RTE_FLOW_ITEM_TYPE_END
,
734 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1
[] = {
735 RTE_FLOW_ITEM_TYPE_ETH
,
736 RTE_FLOW_ITEM_TYPE_VLAN
,
737 RTE_FLOW_ITEM_TYPE_IPV6
,
738 RTE_FLOW_ITEM_TYPE_UDP
,
739 RTE_FLOW_ITEM_TYPE_RAW
,
740 RTE_FLOW_ITEM_TYPE_END
,
743 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2
[] = {
744 RTE_FLOW_ITEM_TYPE_ETH
,
745 RTE_FLOW_ITEM_TYPE_VLAN
,
746 RTE_FLOW_ITEM_TYPE_IPV6
,
747 RTE_FLOW_ITEM_TYPE_UDP
,
748 RTE_FLOW_ITEM_TYPE_RAW
,
749 RTE_FLOW_ITEM_TYPE_RAW
,
750 RTE_FLOW_ITEM_TYPE_END
,
753 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3
[] = {
754 RTE_FLOW_ITEM_TYPE_ETH
,
755 RTE_FLOW_ITEM_TYPE_VLAN
,
756 RTE_FLOW_ITEM_TYPE_IPV6
,
757 RTE_FLOW_ITEM_TYPE_UDP
,
758 RTE_FLOW_ITEM_TYPE_RAW
,
759 RTE_FLOW_ITEM_TYPE_RAW
,
760 RTE_FLOW_ITEM_TYPE_RAW
,
761 RTE_FLOW_ITEM_TYPE_END
,
764 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1
[] = {
765 RTE_FLOW_ITEM_TYPE_ETH
,
766 RTE_FLOW_ITEM_TYPE_VLAN
,
767 RTE_FLOW_ITEM_TYPE_IPV6
,
768 RTE_FLOW_ITEM_TYPE_TCP
,
769 RTE_FLOW_ITEM_TYPE_RAW
,
770 RTE_FLOW_ITEM_TYPE_END
,
773 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2
[] = {
774 RTE_FLOW_ITEM_TYPE_ETH
,
775 RTE_FLOW_ITEM_TYPE_VLAN
,
776 RTE_FLOW_ITEM_TYPE_IPV6
,
777 RTE_FLOW_ITEM_TYPE_TCP
,
778 RTE_FLOW_ITEM_TYPE_RAW
,
779 RTE_FLOW_ITEM_TYPE_RAW
,
780 RTE_FLOW_ITEM_TYPE_END
,
783 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3
[] = {
784 RTE_FLOW_ITEM_TYPE_ETH
,
785 RTE_FLOW_ITEM_TYPE_VLAN
,
786 RTE_FLOW_ITEM_TYPE_IPV6
,
787 RTE_FLOW_ITEM_TYPE_TCP
,
788 RTE_FLOW_ITEM_TYPE_RAW
,
789 RTE_FLOW_ITEM_TYPE_RAW
,
790 RTE_FLOW_ITEM_TYPE_RAW
,
791 RTE_FLOW_ITEM_TYPE_END
,
794 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1
[] = {
795 RTE_FLOW_ITEM_TYPE_ETH
,
796 RTE_FLOW_ITEM_TYPE_VLAN
,
797 RTE_FLOW_ITEM_TYPE_IPV6
,
798 RTE_FLOW_ITEM_TYPE_SCTP
,
799 RTE_FLOW_ITEM_TYPE_RAW
,
800 RTE_FLOW_ITEM_TYPE_END
,
803 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2
[] = {
804 RTE_FLOW_ITEM_TYPE_ETH
,
805 RTE_FLOW_ITEM_TYPE_VLAN
,
806 RTE_FLOW_ITEM_TYPE_IPV6
,
807 RTE_FLOW_ITEM_TYPE_SCTP
,
808 RTE_FLOW_ITEM_TYPE_RAW
,
809 RTE_FLOW_ITEM_TYPE_RAW
,
810 RTE_FLOW_ITEM_TYPE_END
,
813 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3
[] = {
814 RTE_FLOW_ITEM_TYPE_ETH
,
815 RTE_FLOW_ITEM_TYPE_VLAN
,
816 RTE_FLOW_ITEM_TYPE_IPV6
,
817 RTE_FLOW_ITEM_TYPE_SCTP
,
818 RTE_FLOW_ITEM_TYPE_RAW
,
819 RTE_FLOW_ITEM_TYPE_RAW
,
820 RTE_FLOW_ITEM_TYPE_RAW
,
821 RTE_FLOW_ITEM_TYPE_END
,
824 static enum rte_flow_item_type pattern_fdir_ipv4_vf
[] = {
825 RTE_FLOW_ITEM_TYPE_ETH
,
826 RTE_FLOW_ITEM_TYPE_IPV4
,
827 RTE_FLOW_ITEM_TYPE_VF
,
828 RTE_FLOW_ITEM_TYPE_END
,
831 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf
[] = {
832 RTE_FLOW_ITEM_TYPE_ETH
,
833 RTE_FLOW_ITEM_TYPE_IPV4
,
834 RTE_FLOW_ITEM_TYPE_UDP
,
835 RTE_FLOW_ITEM_TYPE_VF
,
836 RTE_FLOW_ITEM_TYPE_END
,
839 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf
[] = {
840 RTE_FLOW_ITEM_TYPE_ETH
,
841 RTE_FLOW_ITEM_TYPE_IPV4
,
842 RTE_FLOW_ITEM_TYPE_TCP
,
843 RTE_FLOW_ITEM_TYPE_VF
,
844 RTE_FLOW_ITEM_TYPE_END
,
847 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf
[] = {
848 RTE_FLOW_ITEM_TYPE_ETH
,
849 RTE_FLOW_ITEM_TYPE_IPV4
,
850 RTE_FLOW_ITEM_TYPE_SCTP
,
851 RTE_FLOW_ITEM_TYPE_VF
,
852 RTE_FLOW_ITEM_TYPE_END
,
855 static enum rte_flow_item_type pattern_fdir_ipv6_vf
[] = {
856 RTE_FLOW_ITEM_TYPE_ETH
,
857 RTE_FLOW_ITEM_TYPE_IPV6
,
858 RTE_FLOW_ITEM_TYPE_VF
,
859 RTE_FLOW_ITEM_TYPE_END
,
862 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf
[] = {
863 RTE_FLOW_ITEM_TYPE_ETH
,
864 RTE_FLOW_ITEM_TYPE_IPV6
,
865 RTE_FLOW_ITEM_TYPE_UDP
,
866 RTE_FLOW_ITEM_TYPE_VF
,
867 RTE_FLOW_ITEM_TYPE_END
,
870 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf
[] = {
871 RTE_FLOW_ITEM_TYPE_ETH
,
872 RTE_FLOW_ITEM_TYPE_IPV6
,
873 RTE_FLOW_ITEM_TYPE_TCP
,
874 RTE_FLOW_ITEM_TYPE_VF
,
875 RTE_FLOW_ITEM_TYPE_END
,
878 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf
[] = {
879 RTE_FLOW_ITEM_TYPE_ETH
,
880 RTE_FLOW_ITEM_TYPE_IPV6
,
881 RTE_FLOW_ITEM_TYPE_SCTP
,
882 RTE_FLOW_ITEM_TYPE_VF
,
883 RTE_FLOW_ITEM_TYPE_END
,
886 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf
[] = {
887 RTE_FLOW_ITEM_TYPE_ETH
,
888 RTE_FLOW_ITEM_TYPE_RAW
,
889 RTE_FLOW_ITEM_TYPE_VF
,
890 RTE_FLOW_ITEM_TYPE_END
,
893 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf
[] = {
894 RTE_FLOW_ITEM_TYPE_ETH
,
895 RTE_FLOW_ITEM_TYPE_RAW
,
896 RTE_FLOW_ITEM_TYPE_RAW
,
897 RTE_FLOW_ITEM_TYPE_VF
,
898 RTE_FLOW_ITEM_TYPE_END
,
901 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf
[] = {
902 RTE_FLOW_ITEM_TYPE_ETH
,
903 RTE_FLOW_ITEM_TYPE_RAW
,
904 RTE_FLOW_ITEM_TYPE_RAW
,
905 RTE_FLOW_ITEM_TYPE_RAW
,
906 RTE_FLOW_ITEM_TYPE_VF
,
907 RTE_FLOW_ITEM_TYPE_END
,
910 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf
[] = {
911 RTE_FLOW_ITEM_TYPE_ETH
,
912 RTE_FLOW_ITEM_TYPE_IPV4
,
913 RTE_FLOW_ITEM_TYPE_RAW
,
914 RTE_FLOW_ITEM_TYPE_VF
,
915 RTE_FLOW_ITEM_TYPE_END
,
918 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf
[] = {
919 RTE_FLOW_ITEM_TYPE_ETH
,
920 RTE_FLOW_ITEM_TYPE_IPV4
,
921 RTE_FLOW_ITEM_TYPE_RAW
,
922 RTE_FLOW_ITEM_TYPE_RAW
,
923 RTE_FLOW_ITEM_TYPE_VF
,
924 RTE_FLOW_ITEM_TYPE_END
,
927 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf
[] = {
928 RTE_FLOW_ITEM_TYPE_ETH
,
929 RTE_FLOW_ITEM_TYPE_IPV4
,
930 RTE_FLOW_ITEM_TYPE_RAW
,
931 RTE_FLOW_ITEM_TYPE_RAW
,
932 RTE_FLOW_ITEM_TYPE_RAW
,
933 RTE_FLOW_ITEM_TYPE_VF
,
934 RTE_FLOW_ITEM_TYPE_END
,
937 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf
[] = {
938 RTE_FLOW_ITEM_TYPE_ETH
,
939 RTE_FLOW_ITEM_TYPE_IPV4
,
940 RTE_FLOW_ITEM_TYPE_UDP
,
941 RTE_FLOW_ITEM_TYPE_RAW
,
942 RTE_FLOW_ITEM_TYPE_VF
,
943 RTE_FLOW_ITEM_TYPE_END
,
946 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf
[] = {
947 RTE_FLOW_ITEM_TYPE_ETH
,
948 RTE_FLOW_ITEM_TYPE_IPV4
,
949 RTE_FLOW_ITEM_TYPE_UDP
,
950 RTE_FLOW_ITEM_TYPE_RAW
,
951 RTE_FLOW_ITEM_TYPE_RAW
,
952 RTE_FLOW_ITEM_TYPE_VF
,
953 RTE_FLOW_ITEM_TYPE_END
,
956 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf
[] = {
957 RTE_FLOW_ITEM_TYPE_ETH
,
958 RTE_FLOW_ITEM_TYPE_IPV4
,
959 RTE_FLOW_ITEM_TYPE_UDP
,
960 RTE_FLOW_ITEM_TYPE_RAW
,
961 RTE_FLOW_ITEM_TYPE_RAW
,
962 RTE_FLOW_ITEM_TYPE_RAW
,
963 RTE_FLOW_ITEM_TYPE_VF
,
964 RTE_FLOW_ITEM_TYPE_END
,
967 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf
[] = {
968 RTE_FLOW_ITEM_TYPE_ETH
,
969 RTE_FLOW_ITEM_TYPE_IPV4
,
970 RTE_FLOW_ITEM_TYPE_TCP
,
971 RTE_FLOW_ITEM_TYPE_RAW
,
972 RTE_FLOW_ITEM_TYPE_VF
,
973 RTE_FLOW_ITEM_TYPE_END
,
976 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf
[] = {
977 RTE_FLOW_ITEM_TYPE_ETH
,
978 RTE_FLOW_ITEM_TYPE_IPV4
,
979 RTE_FLOW_ITEM_TYPE_TCP
,
980 RTE_FLOW_ITEM_TYPE_RAW
,
981 RTE_FLOW_ITEM_TYPE_RAW
,
982 RTE_FLOW_ITEM_TYPE_VF
,
983 RTE_FLOW_ITEM_TYPE_END
,
986 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf
[] = {
987 RTE_FLOW_ITEM_TYPE_ETH
,
988 RTE_FLOW_ITEM_TYPE_IPV4
,
989 RTE_FLOW_ITEM_TYPE_TCP
,
990 RTE_FLOW_ITEM_TYPE_RAW
,
991 RTE_FLOW_ITEM_TYPE_RAW
,
992 RTE_FLOW_ITEM_TYPE_RAW
,
993 RTE_FLOW_ITEM_TYPE_VF
,
994 RTE_FLOW_ITEM_TYPE_END
,
997 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf
[] = {
998 RTE_FLOW_ITEM_TYPE_ETH
,
999 RTE_FLOW_ITEM_TYPE_IPV4
,
1000 RTE_FLOW_ITEM_TYPE_SCTP
,
1001 RTE_FLOW_ITEM_TYPE_RAW
,
1002 RTE_FLOW_ITEM_TYPE_VF
,
1003 RTE_FLOW_ITEM_TYPE_END
,
1006 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf
[] = {
1007 RTE_FLOW_ITEM_TYPE_ETH
,
1008 RTE_FLOW_ITEM_TYPE_IPV4
,
1009 RTE_FLOW_ITEM_TYPE_SCTP
,
1010 RTE_FLOW_ITEM_TYPE_RAW
,
1011 RTE_FLOW_ITEM_TYPE_RAW
,
1012 RTE_FLOW_ITEM_TYPE_VF
,
1013 RTE_FLOW_ITEM_TYPE_END
,
1016 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf
[] = {
1017 RTE_FLOW_ITEM_TYPE_ETH
,
1018 RTE_FLOW_ITEM_TYPE_IPV4
,
1019 RTE_FLOW_ITEM_TYPE_SCTP
,
1020 RTE_FLOW_ITEM_TYPE_RAW
,
1021 RTE_FLOW_ITEM_TYPE_RAW
,
1022 RTE_FLOW_ITEM_TYPE_RAW
,
1023 RTE_FLOW_ITEM_TYPE_VF
,
1024 RTE_FLOW_ITEM_TYPE_END
,
1027 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf
[] = {
1028 RTE_FLOW_ITEM_TYPE_ETH
,
1029 RTE_FLOW_ITEM_TYPE_IPV6
,
1030 RTE_FLOW_ITEM_TYPE_RAW
,
1031 RTE_FLOW_ITEM_TYPE_VF
,
1032 RTE_FLOW_ITEM_TYPE_END
,
1035 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf
[] = {
1036 RTE_FLOW_ITEM_TYPE_ETH
,
1037 RTE_FLOW_ITEM_TYPE_IPV6
,
1038 RTE_FLOW_ITEM_TYPE_RAW
,
1039 RTE_FLOW_ITEM_TYPE_RAW
,
1040 RTE_FLOW_ITEM_TYPE_VF
,
1041 RTE_FLOW_ITEM_TYPE_END
,
1044 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf
[] = {
1045 RTE_FLOW_ITEM_TYPE_ETH
,
1046 RTE_FLOW_ITEM_TYPE_IPV6
,
1047 RTE_FLOW_ITEM_TYPE_RAW
,
1048 RTE_FLOW_ITEM_TYPE_RAW
,
1049 RTE_FLOW_ITEM_TYPE_RAW
,
1050 RTE_FLOW_ITEM_TYPE_VF
,
1051 RTE_FLOW_ITEM_TYPE_END
,
1054 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf
[] = {
1055 RTE_FLOW_ITEM_TYPE_ETH
,
1056 RTE_FLOW_ITEM_TYPE_IPV6
,
1057 RTE_FLOW_ITEM_TYPE_UDP
,
1058 RTE_FLOW_ITEM_TYPE_RAW
,
1059 RTE_FLOW_ITEM_TYPE_VF
,
1060 RTE_FLOW_ITEM_TYPE_END
,
1063 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf
[] = {
1064 RTE_FLOW_ITEM_TYPE_ETH
,
1065 RTE_FLOW_ITEM_TYPE_IPV6
,
1066 RTE_FLOW_ITEM_TYPE_UDP
,
1067 RTE_FLOW_ITEM_TYPE_RAW
,
1068 RTE_FLOW_ITEM_TYPE_RAW
,
1069 RTE_FLOW_ITEM_TYPE_VF
,
1070 RTE_FLOW_ITEM_TYPE_END
,
1073 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf
[] = {
1074 RTE_FLOW_ITEM_TYPE_ETH
,
1075 RTE_FLOW_ITEM_TYPE_IPV6
,
1076 RTE_FLOW_ITEM_TYPE_UDP
,
1077 RTE_FLOW_ITEM_TYPE_RAW
,
1078 RTE_FLOW_ITEM_TYPE_RAW
,
1079 RTE_FLOW_ITEM_TYPE_RAW
,
1080 RTE_FLOW_ITEM_TYPE_VF
,
1081 RTE_FLOW_ITEM_TYPE_END
,
1084 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf
[] = {
1085 RTE_FLOW_ITEM_TYPE_ETH
,
1086 RTE_FLOW_ITEM_TYPE_IPV6
,
1087 RTE_FLOW_ITEM_TYPE_TCP
,
1088 RTE_FLOW_ITEM_TYPE_RAW
,
1089 RTE_FLOW_ITEM_TYPE_VF
,
1090 RTE_FLOW_ITEM_TYPE_END
,
1093 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf
[] = {
1094 RTE_FLOW_ITEM_TYPE_ETH
,
1095 RTE_FLOW_ITEM_TYPE_IPV6
,
1096 RTE_FLOW_ITEM_TYPE_TCP
,
1097 RTE_FLOW_ITEM_TYPE_RAW
,
1098 RTE_FLOW_ITEM_TYPE_RAW
,
1099 RTE_FLOW_ITEM_TYPE_VF
,
1100 RTE_FLOW_ITEM_TYPE_END
,
1103 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf
[] = {
1104 RTE_FLOW_ITEM_TYPE_ETH
,
1105 RTE_FLOW_ITEM_TYPE_IPV6
,
1106 RTE_FLOW_ITEM_TYPE_TCP
,
1107 RTE_FLOW_ITEM_TYPE_RAW
,
1108 RTE_FLOW_ITEM_TYPE_RAW
,
1109 RTE_FLOW_ITEM_TYPE_RAW
,
1110 RTE_FLOW_ITEM_TYPE_VF
,
1111 RTE_FLOW_ITEM_TYPE_END
,
1114 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf
[] = {
1115 RTE_FLOW_ITEM_TYPE_ETH
,
1116 RTE_FLOW_ITEM_TYPE_IPV6
,
1117 RTE_FLOW_ITEM_TYPE_SCTP
,
1118 RTE_FLOW_ITEM_TYPE_RAW
,
1119 RTE_FLOW_ITEM_TYPE_VF
,
1120 RTE_FLOW_ITEM_TYPE_END
,
1123 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf
[] = {
1124 RTE_FLOW_ITEM_TYPE_ETH
,
1125 RTE_FLOW_ITEM_TYPE_IPV6
,
1126 RTE_FLOW_ITEM_TYPE_SCTP
,
1127 RTE_FLOW_ITEM_TYPE_RAW
,
1128 RTE_FLOW_ITEM_TYPE_RAW
,
1129 RTE_FLOW_ITEM_TYPE_VF
,
1130 RTE_FLOW_ITEM_TYPE_END
,
1133 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf
[] = {
1134 RTE_FLOW_ITEM_TYPE_ETH
,
1135 RTE_FLOW_ITEM_TYPE_IPV6
,
1136 RTE_FLOW_ITEM_TYPE_SCTP
,
1137 RTE_FLOW_ITEM_TYPE_RAW
,
1138 RTE_FLOW_ITEM_TYPE_RAW
,
1139 RTE_FLOW_ITEM_TYPE_RAW
,
1140 RTE_FLOW_ITEM_TYPE_VF
,
1141 RTE_FLOW_ITEM_TYPE_END
,
1144 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf
[] = {
1145 RTE_FLOW_ITEM_TYPE_ETH
,
1146 RTE_FLOW_ITEM_TYPE_VLAN
,
1147 RTE_FLOW_ITEM_TYPE_VF
,
1148 RTE_FLOW_ITEM_TYPE_END
,
1151 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf
[] = {
1152 RTE_FLOW_ITEM_TYPE_ETH
,
1153 RTE_FLOW_ITEM_TYPE_VLAN
,
1154 RTE_FLOW_ITEM_TYPE_IPV4
,
1155 RTE_FLOW_ITEM_TYPE_VF
,
1156 RTE_FLOW_ITEM_TYPE_END
,
1159 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf
[] = {
1160 RTE_FLOW_ITEM_TYPE_ETH
,
1161 RTE_FLOW_ITEM_TYPE_VLAN
,
1162 RTE_FLOW_ITEM_TYPE_IPV4
,
1163 RTE_FLOW_ITEM_TYPE_UDP
,
1164 RTE_FLOW_ITEM_TYPE_VF
,
1165 RTE_FLOW_ITEM_TYPE_END
,
1168 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf
[] = {
1169 RTE_FLOW_ITEM_TYPE_ETH
,
1170 RTE_FLOW_ITEM_TYPE_VLAN
,
1171 RTE_FLOW_ITEM_TYPE_IPV4
,
1172 RTE_FLOW_ITEM_TYPE_TCP
,
1173 RTE_FLOW_ITEM_TYPE_VF
,
1174 RTE_FLOW_ITEM_TYPE_END
,
1177 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf
[] = {
1178 RTE_FLOW_ITEM_TYPE_ETH
,
1179 RTE_FLOW_ITEM_TYPE_VLAN
,
1180 RTE_FLOW_ITEM_TYPE_IPV4
,
1181 RTE_FLOW_ITEM_TYPE_SCTP
,
1182 RTE_FLOW_ITEM_TYPE_VF
,
1183 RTE_FLOW_ITEM_TYPE_END
,
1186 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf
[] = {
1187 RTE_FLOW_ITEM_TYPE_ETH
,
1188 RTE_FLOW_ITEM_TYPE_VLAN
,
1189 RTE_FLOW_ITEM_TYPE_IPV6
,
1190 RTE_FLOW_ITEM_TYPE_VF
,
1191 RTE_FLOW_ITEM_TYPE_END
,
1194 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf
[] = {
1195 RTE_FLOW_ITEM_TYPE_ETH
,
1196 RTE_FLOW_ITEM_TYPE_VLAN
,
1197 RTE_FLOW_ITEM_TYPE_IPV6
,
1198 RTE_FLOW_ITEM_TYPE_UDP
,
1199 RTE_FLOW_ITEM_TYPE_VF
,
1200 RTE_FLOW_ITEM_TYPE_END
,
1203 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf
[] = {
1204 RTE_FLOW_ITEM_TYPE_ETH
,
1205 RTE_FLOW_ITEM_TYPE_VLAN
,
1206 RTE_FLOW_ITEM_TYPE_IPV6
,
1207 RTE_FLOW_ITEM_TYPE_TCP
,
1208 RTE_FLOW_ITEM_TYPE_VF
,
1209 RTE_FLOW_ITEM_TYPE_END
,
1212 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf
[] = {
1213 RTE_FLOW_ITEM_TYPE_ETH
,
1214 RTE_FLOW_ITEM_TYPE_VLAN
,
1215 RTE_FLOW_ITEM_TYPE_IPV6
,
1216 RTE_FLOW_ITEM_TYPE_SCTP
,
1217 RTE_FLOW_ITEM_TYPE_VF
,
1218 RTE_FLOW_ITEM_TYPE_END
,
1221 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf
[] = {
1222 RTE_FLOW_ITEM_TYPE_ETH
,
1223 RTE_FLOW_ITEM_TYPE_VLAN
,
1224 RTE_FLOW_ITEM_TYPE_RAW
,
1225 RTE_FLOW_ITEM_TYPE_VF
,
1226 RTE_FLOW_ITEM_TYPE_END
,
1229 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf
[] = {
1230 RTE_FLOW_ITEM_TYPE_ETH
,
1231 RTE_FLOW_ITEM_TYPE_VLAN
,
1232 RTE_FLOW_ITEM_TYPE_RAW
,
1233 RTE_FLOW_ITEM_TYPE_RAW
,
1234 RTE_FLOW_ITEM_TYPE_VF
,
1235 RTE_FLOW_ITEM_TYPE_END
,
1238 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf
[] = {
1239 RTE_FLOW_ITEM_TYPE_ETH
,
1240 RTE_FLOW_ITEM_TYPE_VLAN
,
1241 RTE_FLOW_ITEM_TYPE_RAW
,
1242 RTE_FLOW_ITEM_TYPE_RAW
,
1243 RTE_FLOW_ITEM_TYPE_RAW
,
1244 RTE_FLOW_ITEM_TYPE_VF
,
1245 RTE_FLOW_ITEM_TYPE_END
,
1248 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf
[] = {
1249 RTE_FLOW_ITEM_TYPE_ETH
,
1250 RTE_FLOW_ITEM_TYPE_VLAN
,
1251 RTE_FLOW_ITEM_TYPE_IPV4
,
1252 RTE_FLOW_ITEM_TYPE_RAW
,
1253 RTE_FLOW_ITEM_TYPE_VF
,
1254 RTE_FLOW_ITEM_TYPE_END
,
1257 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf
[] = {
1258 RTE_FLOW_ITEM_TYPE_ETH
,
1259 RTE_FLOW_ITEM_TYPE_VLAN
,
1260 RTE_FLOW_ITEM_TYPE_IPV4
,
1261 RTE_FLOW_ITEM_TYPE_RAW
,
1262 RTE_FLOW_ITEM_TYPE_RAW
,
1263 RTE_FLOW_ITEM_TYPE_VF
,
1264 RTE_FLOW_ITEM_TYPE_END
,
1267 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf
[] = {
1268 RTE_FLOW_ITEM_TYPE_ETH
,
1269 RTE_FLOW_ITEM_TYPE_VLAN
,
1270 RTE_FLOW_ITEM_TYPE_IPV4
,
1271 RTE_FLOW_ITEM_TYPE_RAW
,
1272 RTE_FLOW_ITEM_TYPE_RAW
,
1273 RTE_FLOW_ITEM_TYPE_RAW
,
1274 RTE_FLOW_ITEM_TYPE_VF
,
1275 RTE_FLOW_ITEM_TYPE_END
,
1278 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf
[] = {
1279 RTE_FLOW_ITEM_TYPE_ETH
,
1280 RTE_FLOW_ITEM_TYPE_VLAN
,
1281 RTE_FLOW_ITEM_TYPE_IPV4
,
1282 RTE_FLOW_ITEM_TYPE_UDP
,
1283 RTE_FLOW_ITEM_TYPE_RAW
,
1284 RTE_FLOW_ITEM_TYPE_VF
,
1285 RTE_FLOW_ITEM_TYPE_END
,
1288 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf
[] = {
1289 RTE_FLOW_ITEM_TYPE_ETH
,
1290 RTE_FLOW_ITEM_TYPE_VLAN
,
1291 RTE_FLOW_ITEM_TYPE_IPV4
,
1292 RTE_FLOW_ITEM_TYPE_UDP
,
1293 RTE_FLOW_ITEM_TYPE_RAW
,
1294 RTE_FLOW_ITEM_TYPE_RAW
,
1295 RTE_FLOW_ITEM_TYPE_VF
,
1296 RTE_FLOW_ITEM_TYPE_END
,
1299 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf
[] = {
1300 RTE_FLOW_ITEM_TYPE_ETH
,
1301 RTE_FLOW_ITEM_TYPE_VLAN
,
1302 RTE_FLOW_ITEM_TYPE_IPV4
,
1303 RTE_FLOW_ITEM_TYPE_UDP
,
1304 RTE_FLOW_ITEM_TYPE_RAW
,
1305 RTE_FLOW_ITEM_TYPE_RAW
,
1306 RTE_FLOW_ITEM_TYPE_RAW
,
1307 RTE_FLOW_ITEM_TYPE_VF
,
1308 RTE_FLOW_ITEM_TYPE_END
,
1311 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf
[] = {
1312 RTE_FLOW_ITEM_TYPE_ETH
,
1313 RTE_FLOW_ITEM_TYPE_VLAN
,
1314 RTE_FLOW_ITEM_TYPE_IPV4
,
1315 RTE_FLOW_ITEM_TYPE_TCP
,
1316 RTE_FLOW_ITEM_TYPE_RAW
,
1317 RTE_FLOW_ITEM_TYPE_VF
,
1318 RTE_FLOW_ITEM_TYPE_END
,
1321 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf
[] = {
1322 RTE_FLOW_ITEM_TYPE_ETH
,
1323 RTE_FLOW_ITEM_TYPE_VLAN
,
1324 RTE_FLOW_ITEM_TYPE_IPV4
,
1325 RTE_FLOW_ITEM_TYPE_TCP
,
1326 RTE_FLOW_ITEM_TYPE_RAW
,
1327 RTE_FLOW_ITEM_TYPE_RAW
,
1328 RTE_FLOW_ITEM_TYPE_VF
,
1329 RTE_FLOW_ITEM_TYPE_END
,
1332 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf
[] = {
1333 RTE_FLOW_ITEM_TYPE_ETH
,
1334 RTE_FLOW_ITEM_TYPE_VLAN
,
1335 RTE_FLOW_ITEM_TYPE_IPV4
,
1336 RTE_FLOW_ITEM_TYPE_TCP
,
1337 RTE_FLOW_ITEM_TYPE_RAW
,
1338 RTE_FLOW_ITEM_TYPE_RAW
,
1339 RTE_FLOW_ITEM_TYPE_RAW
,
1340 RTE_FLOW_ITEM_TYPE_VF
,
1341 RTE_FLOW_ITEM_TYPE_END
,
1344 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf
[] = {
1345 RTE_FLOW_ITEM_TYPE_ETH
,
1346 RTE_FLOW_ITEM_TYPE_VLAN
,
1347 RTE_FLOW_ITEM_TYPE_IPV4
,
1348 RTE_FLOW_ITEM_TYPE_SCTP
,
1349 RTE_FLOW_ITEM_TYPE_RAW
,
1350 RTE_FLOW_ITEM_TYPE_VF
,
1351 RTE_FLOW_ITEM_TYPE_END
,
1354 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf
[] = {
1355 RTE_FLOW_ITEM_TYPE_ETH
,
1356 RTE_FLOW_ITEM_TYPE_VLAN
,
1357 RTE_FLOW_ITEM_TYPE_IPV4
,
1358 RTE_FLOW_ITEM_TYPE_SCTP
,
1359 RTE_FLOW_ITEM_TYPE_RAW
,
1360 RTE_FLOW_ITEM_TYPE_RAW
,
1361 RTE_FLOW_ITEM_TYPE_VF
,
1362 RTE_FLOW_ITEM_TYPE_END
,
1365 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf
[] = {
1366 RTE_FLOW_ITEM_TYPE_ETH
,
1367 RTE_FLOW_ITEM_TYPE_VLAN
,
1368 RTE_FLOW_ITEM_TYPE_IPV4
,
1369 RTE_FLOW_ITEM_TYPE_SCTP
,
1370 RTE_FLOW_ITEM_TYPE_RAW
,
1371 RTE_FLOW_ITEM_TYPE_RAW
,
1372 RTE_FLOW_ITEM_TYPE_RAW
,
1373 RTE_FLOW_ITEM_TYPE_VF
,
1374 RTE_FLOW_ITEM_TYPE_END
,
1377 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf
[] = {
1378 RTE_FLOW_ITEM_TYPE_ETH
,
1379 RTE_FLOW_ITEM_TYPE_VLAN
,
1380 RTE_FLOW_ITEM_TYPE_IPV6
,
1381 RTE_FLOW_ITEM_TYPE_RAW
,
1382 RTE_FLOW_ITEM_TYPE_VF
,
1383 RTE_FLOW_ITEM_TYPE_END
,
1386 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf
[] = {
1387 RTE_FLOW_ITEM_TYPE_ETH
,
1388 RTE_FLOW_ITEM_TYPE_VLAN
,
1389 RTE_FLOW_ITEM_TYPE_IPV6
,
1390 RTE_FLOW_ITEM_TYPE_RAW
,
1391 RTE_FLOW_ITEM_TYPE_RAW
,
1392 RTE_FLOW_ITEM_TYPE_VF
,
1393 RTE_FLOW_ITEM_TYPE_END
,
1396 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf
[] = {
1397 RTE_FLOW_ITEM_TYPE_ETH
,
1398 RTE_FLOW_ITEM_TYPE_VLAN
,
1399 RTE_FLOW_ITEM_TYPE_IPV6
,
1400 RTE_FLOW_ITEM_TYPE_RAW
,
1401 RTE_FLOW_ITEM_TYPE_RAW
,
1402 RTE_FLOW_ITEM_TYPE_RAW
,
1403 RTE_FLOW_ITEM_TYPE_VF
,
1404 RTE_FLOW_ITEM_TYPE_END
,
1407 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf
[] = {
1408 RTE_FLOW_ITEM_TYPE_ETH
,
1409 RTE_FLOW_ITEM_TYPE_VLAN
,
1410 RTE_FLOW_ITEM_TYPE_IPV6
,
1411 RTE_FLOW_ITEM_TYPE_UDP
,
1412 RTE_FLOW_ITEM_TYPE_RAW
,
1413 RTE_FLOW_ITEM_TYPE_VF
,
1414 RTE_FLOW_ITEM_TYPE_END
,
1417 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf
[] = {
1418 RTE_FLOW_ITEM_TYPE_ETH
,
1419 RTE_FLOW_ITEM_TYPE_VLAN
,
1420 RTE_FLOW_ITEM_TYPE_IPV6
,
1421 RTE_FLOW_ITEM_TYPE_UDP
,
1422 RTE_FLOW_ITEM_TYPE_RAW
,
1423 RTE_FLOW_ITEM_TYPE_RAW
,
1424 RTE_FLOW_ITEM_TYPE_VF
,
1425 RTE_FLOW_ITEM_TYPE_END
,
1428 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf
[] = {
1429 RTE_FLOW_ITEM_TYPE_ETH
,
1430 RTE_FLOW_ITEM_TYPE_VLAN
,
1431 RTE_FLOW_ITEM_TYPE_IPV6
,
1432 RTE_FLOW_ITEM_TYPE_UDP
,
1433 RTE_FLOW_ITEM_TYPE_RAW
,
1434 RTE_FLOW_ITEM_TYPE_RAW
,
1435 RTE_FLOW_ITEM_TYPE_RAW
,
1436 RTE_FLOW_ITEM_TYPE_VF
,
1437 RTE_FLOW_ITEM_TYPE_END
,
1440 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf
[] = {
1441 RTE_FLOW_ITEM_TYPE_ETH
,
1442 RTE_FLOW_ITEM_TYPE_VLAN
,
1443 RTE_FLOW_ITEM_TYPE_IPV6
,
1444 RTE_FLOW_ITEM_TYPE_TCP
,
1445 RTE_FLOW_ITEM_TYPE_RAW
,
1446 RTE_FLOW_ITEM_TYPE_VF
,
1447 RTE_FLOW_ITEM_TYPE_END
,
1450 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf
[] = {
1451 RTE_FLOW_ITEM_TYPE_ETH
,
1452 RTE_FLOW_ITEM_TYPE_VLAN
,
1453 RTE_FLOW_ITEM_TYPE_IPV6
,
1454 RTE_FLOW_ITEM_TYPE_TCP
,
1455 RTE_FLOW_ITEM_TYPE_RAW
,
1456 RTE_FLOW_ITEM_TYPE_RAW
,
1457 RTE_FLOW_ITEM_TYPE_VF
,
1458 RTE_FLOW_ITEM_TYPE_END
,
1461 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf
[] = {
1462 RTE_FLOW_ITEM_TYPE_ETH
,
1463 RTE_FLOW_ITEM_TYPE_VLAN
,
1464 RTE_FLOW_ITEM_TYPE_IPV6
,
1465 RTE_FLOW_ITEM_TYPE_TCP
,
1466 RTE_FLOW_ITEM_TYPE_RAW
,
1467 RTE_FLOW_ITEM_TYPE_RAW
,
1468 RTE_FLOW_ITEM_TYPE_RAW
,
1469 RTE_FLOW_ITEM_TYPE_VF
,
1470 RTE_FLOW_ITEM_TYPE_END
,
1473 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf
[] = {
1474 RTE_FLOW_ITEM_TYPE_ETH
,
1475 RTE_FLOW_ITEM_TYPE_VLAN
,
1476 RTE_FLOW_ITEM_TYPE_IPV6
,
1477 RTE_FLOW_ITEM_TYPE_SCTP
,
1478 RTE_FLOW_ITEM_TYPE_RAW
,
1479 RTE_FLOW_ITEM_TYPE_VF
,
1480 RTE_FLOW_ITEM_TYPE_END
,
1483 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf
[] = {
1484 RTE_FLOW_ITEM_TYPE_ETH
,
1485 RTE_FLOW_ITEM_TYPE_VLAN
,
1486 RTE_FLOW_ITEM_TYPE_IPV6
,
1487 RTE_FLOW_ITEM_TYPE_SCTP
,
1488 RTE_FLOW_ITEM_TYPE_RAW
,
1489 RTE_FLOW_ITEM_TYPE_RAW
,
1490 RTE_FLOW_ITEM_TYPE_VF
,
1491 RTE_FLOW_ITEM_TYPE_END
,
1494 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf
[] = {
1495 RTE_FLOW_ITEM_TYPE_ETH
,
1496 RTE_FLOW_ITEM_TYPE_VLAN
,
1497 RTE_FLOW_ITEM_TYPE_IPV6
,
1498 RTE_FLOW_ITEM_TYPE_SCTP
,
1499 RTE_FLOW_ITEM_TYPE_RAW
,
1500 RTE_FLOW_ITEM_TYPE_RAW
,
1501 RTE_FLOW_ITEM_TYPE_RAW
,
1502 RTE_FLOW_ITEM_TYPE_VF
,
1503 RTE_FLOW_ITEM_TYPE_END
,
1506 /* Pattern matched tunnel filter */
1507 static enum rte_flow_item_type pattern_vxlan_1
[] = {
1508 RTE_FLOW_ITEM_TYPE_ETH
,
1509 RTE_FLOW_ITEM_TYPE_IPV4
,
1510 RTE_FLOW_ITEM_TYPE_UDP
,
1511 RTE_FLOW_ITEM_TYPE_VXLAN
,
1512 RTE_FLOW_ITEM_TYPE_ETH
,
1513 RTE_FLOW_ITEM_TYPE_END
,
1516 static enum rte_flow_item_type pattern_vxlan_2
[] = {
1517 RTE_FLOW_ITEM_TYPE_ETH
,
1518 RTE_FLOW_ITEM_TYPE_IPV6
,
1519 RTE_FLOW_ITEM_TYPE_UDP
,
1520 RTE_FLOW_ITEM_TYPE_VXLAN
,
1521 RTE_FLOW_ITEM_TYPE_ETH
,
1522 RTE_FLOW_ITEM_TYPE_END
,
1525 static enum rte_flow_item_type pattern_vxlan_3
[] = {
1526 RTE_FLOW_ITEM_TYPE_ETH
,
1527 RTE_FLOW_ITEM_TYPE_IPV4
,
1528 RTE_FLOW_ITEM_TYPE_UDP
,
1529 RTE_FLOW_ITEM_TYPE_VXLAN
,
1530 RTE_FLOW_ITEM_TYPE_ETH
,
1531 RTE_FLOW_ITEM_TYPE_VLAN
,
1532 RTE_FLOW_ITEM_TYPE_END
,
1535 static enum rte_flow_item_type pattern_vxlan_4
[] = {
1536 RTE_FLOW_ITEM_TYPE_ETH
,
1537 RTE_FLOW_ITEM_TYPE_IPV6
,
1538 RTE_FLOW_ITEM_TYPE_UDP
,
1539 RTE_FLOW_ITEM_TYPE_VXLAN
,
1540 RTE_FLOW_ITEM_TYPE_ETH
,
1541 RTE_FLOW_ITEM_TYPE_VLAN
,
1542 RTE_FLOW_ITEM_TYPE_END
,
1545 static enum rte_flow_item_type pattern_nvgre_1
[] = {
1546 RTE_FLOW_ITEM_TYPE_ETH
,
1547 RTE_FLOW_ITEM_TYPE_IPV4
,
1548 RTE_FLOW_ITEM_TYPE_NVGRE
,
1549 RTE_FLOW_ITEM_TYPE_ETH
,
1550 RTE_FLOW_ITEM_TYPE_END
,
1553 static enum rte_flow_item_type pattern_nvgre_2
[] = {
1554 RTE_FLOW_ITEM_TYPE_ETH
,
1555 RTE_FLOW_ITEM_TYPE_IPV6
,
1556 RTE_FLOW_ITEM_TYPE_NVGRE
,
1557 RTE_FLOW_ITEM_TYPE_ETH
,
1558 RTE_FLOW_ITEM_TYPE_END
,
1561 static enum rte_flow_item_type pattern_nvgre_3
[] = {
1562 RTE_FLOW_ITEM_TYPE_ETH
,
1563 RTE_FLOW_ITEM_TYPE_IPV4
,
1564 RTE_FLOW_ITEM_TYPE_NVGRE
,
1565 RTE_FLOW_ITEM_TYPE_ETH
,
1566 RTE_FLOW_ITEM_TYPE_VLAN
,
1567 RTE_FLOW_ITEM_TYPE_END
,
1570 static enum rte_flow_item_type pattern_nvgre_4
[] = {
1571 RTE_FLOW_ITEM_TYPE_ETH
,
1572 RTE_FLOW_ITEM_TYPE_IPV6
,
1573 RTE_FLOW_ITEM_TYPE_NVGRE
,
1574 RTE_FLOW_ITEM_TYPE_ETH
,
1575 RTE_FLOW_ITEM_TYPE_VLAN
,
1576 RTE_FLOW_ITEM_TYPE_END
,
1579 static enum rte_flow_item_type pattern_mpls_1
[] = {
1580 RTE_FLOW_ITEM_TYPE_ETH
,
1581 RTE_FLOW_ITEM_TYPE_IPV4
,
1582 RTE_FLOW_ITEM_TYPE_UDP
,
1583 RTE_FLOW_ITEM_TYPE_MPLS
,
1584 RTE_FLOW_ITEM_TYPE_END
,
1587 static enum rte_flow_item_type pattern_mpls_2
[] = {
1588 RTE_FLOW_ITEM_TYPE_ETH
,
1589 RTE_FLOW_ITEM_TYPE_IPV6
,
1590 RTE_FLOW_ITEM_TYPE_UDP
,
1591 RTE_FLOW_ITEM_TYPE_MPLS
,
1592 RTE_FLOW_ITEM_TYPE_END
,
1595 static enum rte_flow_item_type pattern_mpls_3
[] = {
1596 RTE_FLOW_ITEM_TYPE_ETH
,
1597 RTE_FLOW_ITEM_TYPE_IPV4
,
1598 RTE_FLOW_ITEM_TYPE_GRE
,
1599 RTE_FLOW_ITEM_TYPE_MPLS
,
1600 RTE_FLOW_ITEM_TYPE_END
,
1603 static enum rte_flow_item_type pattern_mpls_4
[] = {
1604 RTE_FLOW_ITEM_TYPE_ETH
,
1605 RTE_FLOW_ITEM_TYPE_IPV6
,
1606 RTE_FLOW_ITEM_TYPE_GRE
,
1607 RTE_FLOW_ITEM_TYPE_MPLS
,
1608 RTE_FLOW_ITEM_TYPE_END
,
1611 static enum rte_flow_item_type pattern_qinq_1
[] = {
1612 RTE_FLOW_ITEM_TYPE_ETH
,
1613 RTE_FLOW_ITEM_TYPE_VLAN
,
1614 RTE_FLOW_ITEM_TYPE_VLAN
,
1615 RTE_FLOW_ITEM_TYPE_END
,
1618 static struct i40e_valid_pattern i40e_supported_patterns
[] = {
1620 { pattern_ethertype
, i40e_flow_parse_ethertype_filter
},
1621 /* FDIR - support default flow type without flexible payload*/
1622 { pattern_ethertype
, i40e_flow_parse_fdir_filter
},
1623 { pattern_fdir_ipv4
, i40e_flow_parse_fdir_filter
},
1624 { pattern_fdir_ipv4_udp
, i40e_flow_parse_fdir_filter
},
1625 { pattern_fdir_ipv4_tcp
, i40e_flow_parse_fdir_filter
},
1626 { pattern_fdir_ipv4_sctp
, i40e_flow_parse_fdir_filter
},
1627 { pattern_fdir_ipv4_gtpc
, i40e_flow_parse_fdir_filter
},
1628 { pattern_fdir_ipv4_gtpu
, i40e_flow_parse_fdir_filter
},
1629 { pattern_fdir_ipv4_gtpu_ipv4
, i40e_flow_parse_fdir_filter
},
1630 { pattern_fdir_ipv4_gtpu_ipv6
, i40e_flow_parse_fdir_filter
},
1631 { pattern_fdir_ipv6
, i40e_flow_parse_fdir_filter
},
1632 { pattern_fdir_ipv6_udp
, i40e_flow_parse_fdir_filter
},
1633 { pattern_fdir_ipv6_tcp
, i40e_flow_parse_fdir_filter
},
1634 { pattern_fdir_ipv6_sctp
, i40e_flow_parse_fdir_filter
},
1635 { pattern_fdir_ipv6_gtpc
, i40e_flow_parse_fdir_filter
},
1636 { pattern_fdir_ipv6_gtpu
, i40e_flow_parse_fdir_filter
},
1637 { pattern_fdir_ipv6_gtpu_ipv4
, i40e_flow_parse_fdir_filter
},
1638 { pattern_fdir_ipv6_gtpu_ipv6
, i40e_flow_parse_fdir_filter
},
1639 /* FDIR - support default flow type with flexible payload */
1640 { pattern_fdir_ethertype_raw_1
, i40e_flow_parse_fdir_filter
},
1641 { pattern_fdir_ethertype_raw_2
, i40e_flow_parse_fdir_filter
},
1642 { pattern_fdir_ethertype_raw_3
, i40e_flow_parse_fdir_filter
},
1643 { pattern_fdir_ipv4_raw_1
, i40e_flow_parse_fdir_filter
},
1644 { pattern_fdir_ipv4_raw_2
, i40e_flow_parse_fdir_filter
},
1645 { pattern_fdir_ipv4_raw_3
, i40e_flow_parse_fdir_filter
},
1646 { pattern_fdir_ipv4_udp_raw_1
, i40e_flow_parse_fdir_filter
},
1647 { pattern_fdir_ipv4_udp_raw_2
, i40e_flow_parse_fdir_filter
},
1648 { pattern_fdir_ipv4_udp_raw_3
, i40e_flow_parse_fdir_filter
},
1649 { pattern_fdir_ipv4_tcp_raw_1
, i40e_flow_parse_fdir_filter
},
1650 { pattern_fdir_ipv4_tcp_raw_2
, i40e_flow_parse_fdir_filter
},
1651 { pattern_fdir_ipv4_tcp_raw_3
, i40e_flow_parse_fdir_filter
},
1652 { pattern_fdir_ipv4_sctp_raw_1
, i40e_flow_parse_fdir_filter
},
1653 { pattern_fdir_ipv4_sctp_raw_2
, i40e_flow_parse_fdir_filter
},
1654 { pattern_fdir_ipv4_sctp_raw_3
, i40e_flow_parse_fdir_filter
},
1655 { pattern_fdir_ipv6_raw_1
, i40e_flow_parse_fdir_filter
},
1656 { pattern_fdir_ipv6_raw_2
, i40e_flow_parse_fdir_filter
},
1657 { pattern_fdir_ipv6_raw_3
, i40e_flow_parse_fdir_filter
},
1658 { pattern_fdir_ipv6_udp_raw_1
, i40e_flow_parse_fdir_filter
},
1659 { pattern_fdir_ipv6_udp_raw_2
, i40e_flow_parse_fdir_filter
},
1660 { pattern_fdir_ipv6_udp_raw_3
, i40e_flow_parse_fdir_filter
},
1661 { pattern_fdir_ipv6_tcp_raw_1
, i40e_flow_parse_fdir_filter
},
1662 { pattern_fdir_ipv6_tcp_raw_2
, i40e_flow_parse_fdir_filter
},
1663 { pattern_fdir_ipv6_tcp_raw_3
, i40e_flow_parse_fdir_filter
},
1664 { pattern_fdir_ipv6_sctp_raw_1
, i40e_flow_parse_fdir_filter
},
1665 { pattern_fdir_ipv6_sctp_raw_2
, i40e_flow_parse_fdir_filter
},
1666 { pattern_fdir_ipv6_sctp_raw_3
, i40e_flow_parse_fdir_filter
},
1667 /* FDIR - support single vlan input set */
1668 { pattern_fdir_ethertype_vlan
, i40e_flow_parse_fdir_filter
},
1669 { pattern_fdir_vlan_ipv4
, i40e_flow_parse_fdir_filter
},
1670 { pattern_fdir_vlan_ipv4_udp
, i40e_flow_parse_fdir_filter
},
1671 { pattern_fdir_vlan_ipv4_tcp
, i40e_flow_parse_fdir_filter
},
1672 { pattern_fdir_vlan_ipv4_sctp
, i40e_flow_parse_fdir_filter
},
1673 { pattern_fdir_vlan_ipv6
, i40e_flow_parse_fdir_filter
},
1674 { pattern_fdir_vlan_ipv6_udp
, i40e_flow_parse_fdir_filter
},
1675 { pattern_fdir_vlan_ipv6_tcp
, i40e_flow_parse_fdir_filter
},
1676 { pattern_fdir_vlan_ipv6_sctp
, i40e_flow_parse_fdir_filter
},
1677 { pattern_fdir_ethertype_vlan_raw_1
, i40e_flow_parse_fdir_filter
},
1678 { pattern_fdir_ethertype_vlan_raw_2
, i40e_flow_parse_fdir_filter
},
1679 { pattern_fdir_ethertype_vlan_raw_3
, i40e_flow_parse_fdir_filter
},
1680 { pattern_fdir_vlan_ipv4_raw_1
, i40e_flow_parse_fdir_filter
},
1681 { pattern_fdir_vlan_ipv4_raw_2
, i40e_flow_parse_fdir_filter
},
1682 { pattern_fdir_vlan_ipv4_raw_3
, i40e_flow_parse_fdir_filter
},
1683 { pattern_fdir_vlan_ipv4_udp_raw_1
, i40e_flow_parse_fdir_filter
},
1684 { pattern_fdir_vlan_ipv4_udp_raw_2
, i40e_flow_parse_fdir_filter
},
1685 { pattern_fdir_vlan_ipv4_udp_raw_3
, i40e_flow_parse_fdir_filter
},
1686 { pattern_fdir_vlan_ipv4_tcp_raw_1
, i40e_flow_parse_fdir_filter
},
1687 { pattern_fdir_vlan_ipv4_tcp_raw_2
, i40e_flow_parse_fdir_filter
},
1688 { pattern_fdir_vlan_ipv4_tcp_raw_3
, i40e_flow_parse_fdir_filter
},
1689 { pattern_fdir_vlan_ipv4_sctp_raw_1
, i40e_flow_parse_fdir_filter
},
1690 { pattern_fdir_vlan_ipv4_sctp_raw_2
, i40e_flow_parse_fdir_filter
},
1691 { pattern_fdir_vlan_ipv4_sctp_raw_3
, i40e_flow_parse_fdir_filter
},
1692 { pattern_fdir_vlan_ipv6_raw_1
, i40e_flow_parse_fdir_filter
},
1693 { pattern_fdir_vlan_ipv6_raw_2
, i40e_flow_parse_fdir_filter
},
1694 { pattern_fdir_vlan_ipv6_raw_3
, i40e_flow_parse_fdir_filter
},
1695 { pattern_fdir_vlan_ipv6_udp_raw_1
, i40e_flow_parse_fdir_filter
},
1696 { pattern_fdir_vlan_ipv6_udp_raw_2
, i40e_flow_parse_fdir_filter
},
1697 { pattern_fdir_vlan_ipv6_udp_raw_3
, i40e_flow_parse_fdir_filter
},
1698 { pattern_fdir_vlan_ipv6_tcp_raw_1
, i40e_flow_parse_fdir_filter
},
1699 { pattern_fdir_vlan_ipv6_tcp_raw_2
, i40e_flow_parse_fdir_filter
},
1700 { pattern_fdir_vlan_ipv6_tcp_raw_3
, i40e_flow_parse_fdir_filter
},
1701 { pattern_fdir_vlan_ipv6_sctp_raw_1
, i40e_flow_parse_fdir_filter
},
1702 { pattern_fdir_vlan_ipv6_sctp_raw_2
, i40e_flow_parse_fdir_filter
},
1703 { pattern_fdir_vlan_ipv6_sctp_raw_3
, i40e_flow_parse_fdir_filter
},
1704 /* FDIR - support VF item */
1705 { pattern_fdir_ipv4_vf
, i40e_flow_parse_fdir_filter
},
1706 { pattern_fdir_ipv4_udp_vf
, i40e_flow_parse_fdir_filter
},
1707 { pattern_fdir_ipv4_tcp_vf
, i40e_flow_parse_fdir_filter
},
1708 { pattern_fdir_ipv4_sctp_vf
, i40e_flow_parse_fdir_filter
},
1709 { pattern_fdir_ipv6_vf
, i40e_flow_parse_fdir_filter
},
1710 { pattern_fdir_ipv6_udp_vf
, i40e_flow_parse_fdir_filter
},
1711 { pattern_fdir_ipv6_tcp_vf
, i40e_flow_parse_fdir_filter
},
1712 { pattern_fdir_ipv6_sctp_vf
, i40e_flow_parse_fdir_filter
},
1713 { pattern_fdir_ethertype_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1714 { pattern_fdir_ethertype_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1715 { pattern_fdir_ethertype_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1716 { pattern_fdir_ipv4_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1717 { pattern_fdir_ipv4_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1718 { pattern_fdir_ipv4_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1719 { pattern_fdir_ipv4_udp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1720 { pattern_fdir_ipv4_udp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1721 { pattern_fdir_ipv4_udp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1722 { pattern_fdir_ipv4_tcp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1723 { pattern_fdir_ipv4_tcp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1724 { pattern_fdir_ipv4_tcp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1725 { pattern_fdir_ipv4_sctp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1726 { pattern_fdir_ipv4_sctp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1727 { pattern_fdir_ipv4_sctp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1728 { pattern_fdir_ipv6_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1729 { pattern_fdir_ipv6_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1730 { pattern_fdir_ipv6_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1731 { pattern_fdir_ipv6_udp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1732 { pattern_fdir_ipv6_udp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1733 { pattern_fdir_ipv6_udp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1734 { pattern_fdir_ipv6_tcp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1735 { pattern_fdir_ipv6_tcp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1736 { pattern_fdir_ipv6_tcp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1737 { pattern_fdir_ipv6_sctp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1738 { pattern_fdir_ipv6_sctp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1739 { pattern_fdir_ipv6_sctp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1740 { pattern_fdir_ethertype_vlan_vf
, i40e_flow_parse_fdir_filter
},
1741 { pattern_fdir_vlan_ipv4_vf
, i40e_flow_parse_fdir_filter
},
1742 { pattern_fdir_vlan_ipv4_udp_vf
, i40e_flow_parse_fdir_filter
},
1743 { pattern_fdir_vlan_ipv4_tcp_vf
, i40e_flow_parse_fdir_filter
},
1744 { pattern_fdir_vlan_ipv4_sctp_vf
, i40e_flow_parse_fdir_filter
},
1745 { pattern_fdir_vlan_ipv6_vf
, i40e_flow_parse_fdir_filter
},
1746 { pattern_fdir_vlan_ipv6_udp_vf
, i40e_flow_parse_fdir_filter
},
1747 { pattern_fdir_vlan_ipv6_tcp_vf
, i40e_flow_parse_fdir_filter
},
1748 { pattern_fdir_vlan_ipv6_sctp_vf
, i40e_flow_parse_fdir_filter
},
1749 { pattern_fdir_ethertype_vlan_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1750 { pattern_fdir_ethertype_vlan_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1751 { pattern_fdir_ethertype_vlan_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1752 { pattern_fdir_vlan_ipv4_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1753 { pattern_fdir_vlan_ipv4_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1754 { pattern_fdir_vlan_ipv4_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1755 { pattern_fdir_vlan_ipv4_udp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1756 { pattern_fdir_vlan_ipv4_udp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1757 { pattern_fdir_vlan_ipv4_udp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1758 { pattern_fdir_vlan_ipv4_tcp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1759 { pattern_fdir_vlan_ipv4_tcp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1760 { pattern_fdir_vlan_ipv4_tcp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1761 { pattern_fdir_vlan_ipv4_sctp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1762 { pattern_fdir_vlan_ipv4_sctp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1763 { pattern_fdir_vlan_ipv4_sctp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1764 { pattern_fdir_vlan_ipv6_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1765 { pattern_fdir_vlan_ipv6_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1766 { pattern_fdir_vlan_ipv6_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1767 { pattern_fdir_vlan_ipv6_udp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1768 { pattern_fdir_vlan_ipv6_udp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1769 { pattern_fdir_vlan_ipv6_udp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1770 { pattern_fdir_vlan_ipv6_tcp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1771 { pattern_fdir_vlan_ipv6_tcp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1772 { pattern_fdir_vlan_ipv6_tcp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1773 { pattern_fdir_vlan_ipv6_sctp_raw_1_vf
, i40e_flow_parse_fdir_filter
},
1774 { pattern_fdir_vlan_ipv6_sctp_raw_2_vf
, i40e_flow_parse_fdir_filter
},
1775 { pattern_fdir_vlan_ipv6_sctp_raw_3_vf
, i40e_flow_parse_fdir_filter
},
1777 { pattern_vxlan_1
, i40e_flow_parse_vxlan_filter
},
1778 { pattern_vxlan_2
, i40e_flow_parse_vxlan_filter
},
1779 { pattern_vxlan_3
, i40e_flow_parse_vxlan_filter
},
1780 { pattern_vxlan_4
, i40e_flow_parse_vxlan_filter
},
1782 { pattern_nvgre_1
, i40e_flow_parse_nvgre_filter
},
1783 { pattern_nvgre_2
, i40e_flow_parse_nvgre_filter
},
1784 { pattern_nvgre_3
, i40e_flow_parse_nvgre_filter
},
1785 { pattern_nvgre_4
, i40e_flow_parse_nvgre_filter
},
1786 /* MPLSoUDP & MPLSoGRE */
1787 { pattern_mpls_1
, i40e_flow_parse_mpls_filter
},
1788 { pattern_mpls_2
, i40e_flow_parse_mpls_filter
},
1789 { pattern_mpls_3
, i40e_flow_parse_mpls_filter
},
1790 { pattern_mpls_4
, i40e_flow_parse_mpls_filter
},
1792 { pattern_fdir_ipv4_gtpc
, i40e_flow_parse_gtp_filter
},
1793 { pattern_fdir_ipv4_gtpu
, i40e_flow_parse_gtp_filter
},
1794 { pattern_fdir_ipv6_gtpc
, i40e_flow_parse_gtp_filter
},
1795 { pattern_fdir_ipv6_gtpu
, i40e_flow_parse_gtp_filter
},
1797 { pattern_qinq_1
, i40e_flow_parse_qinq_filter
},
1800 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
1802 act = actions + index; \
1803 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
1805 act = actions + index; \
1809 /* Find the first VOID or non-VOID item pointer */
1810 static const struct rte_flow_item
*
1811 i40e_find_first_item(const struct rte_flow_item
*item
, bool is_void
)
1815 while (item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
1817 is_find
= item
->type
== RTE_FLOW_ITEM_TYPE_VOID
;
1819 is_find
= item
->type
!= RTE_FLOW_ITEM_TYPE_VOID
;
1827 /* Skip all VOID items of the pattern */
1829 i40e_pattern_skip_void_item(struct rte_flow_item
*items
,
1830 const struct rte_flow_item
*pattern
)
1832 uint32_t cpy_count
= 0;
1833 const struct rte_flow_item
*pb
= pattern
, *pe
= pattern
;
1836 /* Find a non-void item first */
1837 pb
= i40e_find_first_item(pb
, false);
1838 if (pb
->type
== RTE_FLOW_ITEM_TYPE_END
) {
1843 /* Find a void item */
1844 pe
= i40e_find_first_item(pb
+ 1, true);
1846 cpy_count
= pe
- pb
;
1847 rte_memcpy(items
, pb
, sizeof(struct rte_flow_item
) * cpy_count
);
1851 if (pe
->type
== RTE_FLOW_ITEM_TYPE_END
) {
1858 /* Copy the END item. */
1859 rte_memcpy(items
, pe
, sizeof(struct rte_flow_item
));
1862 /* Check if the pattern matches a supported item type array */
1864 i40e_match_pattern(enum rte_flow_item_type
*item_array
,
1865 struct rte_flow_item
*pattern
)
1867 struct rte_flow_item
*item
= pattern
;
1869 while ((*item_array
== item
->type
) &&
1870 (*item_array
!= RTE_FLOW_ITEM_TYPE_END
)) {
1875 return (*item_array
== RTE_FLOW_ITEM_TYPE_END
&&
1876 item
->type
== RTE_FLOW_ITEM_TYPE_END
);
1879 /* Find if there's parse filter function matched */
1880 static parse_filter_t
1881 i40e_find_parse_filter_func(struct rte_flow_item
*pattern
, uint32_t *idx
)
1883 parse_filter_t parse_filter
= NULL
;
1886 for (; i
< RTE_DIM(i40e_supported_patterns
); i
++) {
1887 if (i40e_match_pattern(i40e_supported_patterns
[i
].items
,
1889 parse_filter
= i40e_supported_patterns
[i
].parse_filter
;
1896 return parse_filter
;
1899 /* Parse attributes */
1901 i40e_flow_parse_attr(const struct rte_flow_attr
*attr
,
1902 struct rte_flow_error
*error
)
1904 /* Must be input direction */
1905 if (!attr
->ingress
) {
1906 rte_flow_error_set(error
, EINVAL
,
1907 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
1908 attr
, "Only support ingress.");
1914 rte_flow_error_set(error
, EINVAL
,
1915 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
1916 attr
, "Not support egress.");
1921 if (attr
->priority
) {
1922 rte_flow_error_set(error
, EINVAL
,
1923 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
1924 attr
, "Not support priority.");
1930 rte_flow_error_set(error
, EINVAL
,
1931 RTE_FLOW_ERROR_TYPE_ATTR_GROUP
,
1932 attr
, "Not support group.");
1940 i40e_get_outer_vlan(struct rte_eth_dev
*dev
)
1942 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1943 int qinq
= dev
->data
->dev_conf
.rxmode
.offloads
&
1944 DEV_RX_OFFLOAD_VLAN_EXTEND
;
1954 i40e_aq_debug_read_register(hw
, I40E_GL_SWT_L2TAGCTRL(reg_id
),
1957 tpid
= (reg_r
>> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT
) & 0xFFFF;
1962 /* 1. Last in item should be NULL as range is not supported.
1963 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1964 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1965 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1967 * 5. Ether_type mask should be 0xFFFF.
1970 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev
*dev
,
1971 const struct rte_flow_item
*pattern
,
1972 struct rte_flow_error
*error
,
1973 struct rte_eth_ethertype_filter
*filter
)
1975 const struct rte_flow_item
*item
= pattern
;
1976 const struct rte_flow_item_eth
*eth_spec
;
1977 const struct rte_flow_item_eth
*eth_mask
;
1978 enum rte_flow_item_type item_type
;
1979 uint16_t outer_tpid
;
1981 outer_tpid
= i40e_get_outer_vlan(dev
);
1983 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
1985 rte_flow_error_set(error
, EINVAL
,
1986 RTE_FLOW_ERROR_TYPE_ITEM
,
1988 "Not support range");
1991 item_type
= item
->type
;
1992 switch (item_type
) {
1993 case RTE_FLOW_ITEM_TYPE_ETH
:
1994 eth_spec
= item
->spec
;
1995 eth_mask
= item
->mask
;
1996 /* Get the MAC info. */
1997 if (!eth_spec
|| !eth_mask
) {
1998 rte_flow_error_set(error
, EINVAL
,
1999 RTE_FLOW_ERROR_TYPE_ITEM
,
2001 "NULL ETH spec/mask");
2005 /* Mask bits of source MAC address must be full of 0.
2006 * Mask bits of destination MAC address must be full
2007 * of 1 or full of 0.
2009 if (!is_zero_ether_addr(ð_mask
->src
) ||
2010 (!is_zero_ether_addr(ð_mask
->dst
) &&
2011 !is_broadcast_ether_addr(ð_mask
->dst
))) {
2012 rte_flow_error_set(error
, EINVAL
,
2013 RTE_FLOW_ERROR_TYPE_ITEM
,
2015 "Invalid MAC_addr mask");
2019 if ((eth_mask
->type
& UINT16_MAX
) != UINT16_MAX
) {
2020 rte_flow_error_set(error
, EINVAL
,
2021 RTE_FLOW_ERROR_TYPE_ITEM
,
2023 "Invalid ethertype mask");
2027 /* If mask bits of destination MAC address
2028 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2030 if (is_broadcast_ether_addr(ð_mask
->dst
)) {
2031 filter
->mac_addr
= eth_spec
->dst
;
2032 filter
->flags
|= RTE_ETHTYPE_FLAGS_MAC
;
2034 filter
->flags
&= ~RTE_ETHTYPE_FLAGS_MAC
;
2036 filter
->ether_type
= rte_be_to_cpu_16(eth_spec
->type
);
2038 if (filter
->ether_type
== ETHER_TYPE_IPv4
||
2039 filter
->ether_type
== ETHER_TYPE_IPv6
||
2040 filter
->ether_type
== ETHER_TYPE_LLDP
||
2041 filter
->ether_type
== outer_tpid
) {
2042 rte_flow_error_set(error
, EINVAL
,
2043 RTE_FLOW_ERROR_TYPE_ITEM
,
2045 "Unsupported ether_type in"
2046 " control packet filter.");
2058 /* Ethertype action only supports QUEUE or DROP. */
2060 i40e_flow_parse_ethertype_action(struct rte_eth_dev
*dev
,
2061 const struct rte_flow_action
*actions
,
2062 struct rte_flow_error
*error
,
2063 struct rte_eth_ethertype_filter
*filter
)
2065 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2066 const struct rte_flow_action
*act
;
2067 const struct rte_flow_action_queue
*act_q
;
2070 /* Check if the first non-void action is QUEUE or DROP. */
2071 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
2072 if (act
->type
!= RTE_FLOW_ACTION_TYPE_QUEUE
&&
2073 act
->type
!= RTE_FLOW_ACTION_TYPE_DROP
) {
2074 rte_flow_error_set(error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
,
2075 act
, "Not supported action.");
2079 if (act
->type
== RTE_FLOW_ACTION_TYPE_QUEUE
) {
2081 filter
->queue
= act_q
->index
;
2082 if (filter
->queue
>= pf
->dev_data
->nb_rx_queues
) {
2083 rte_flow_error_set(error
, EINVAL
,
2084 RTE_FLOW_ERROR_TYPE_ACTION
,
2085 act
, "Invalid queue ID for"
2086 " ethertype_filter.");
2090 filter
->flags
|= RTE_ETHTYPE_FLAGS_DROP
;
2093 /* Check if the next non-void item is END */
2095 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
2096 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
2097 rte_flow_error_set(error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
,
2098 act
, "Not supported action.");
2106 i40e_flow_parse_ethertype_filter(struct rte_eth_dev
*dev
,
2107 const struct rte_flow_attr
*attr
,
2108 const struct rte_flow_item pattern
[],
2109 const struct rte_flow_action actions
[],
2110 struct rte_flow_error
*error
,
2111 union i40e_filter_t
*filter
)
2113 struct rte_eth_ethertype_filter
*ethertype_filter
=
2114 &filter
->ethertype_filter
;
2117 ret
= i40e_flow_parse_ethertype_pattern(dev
, pattern
, error
,
2122 ret
= i40e_flow_parse_ethertype_action(dev
, actions
, error
,
2127 ret
= i40e_flow_parse_attr(attr
, error
);
2131 cons_filter_type
= RTE_ETH_FILTER_ETHERTYPE
;
2137 i40e_flow_check_raw_item(const struct rte_flow_item
*item
,
2138 const struct rte_flow_item_raw
*raw_spec
,
2139 struct rte_flow_error
*error
)
2141 if (!raw_spec
->relative
) {
2142 rte_flow_error_set(error
, EINVAL
,
2143 RTE_FLOW_ERROR_TYPE_ITEM
,
2145 "Relative should be 1.");
2149 if (raw_spec
->offset
% sizeof(uint16_t)) {
2150 rte_flow_error_set(error
, EINVAL
,
2151 RTE_FLOW_ERROR_TYPE_ITEM
,
2153 "Offset should be even.");
2157 if (raw_spec
->search
|| raw_spec
->limit
) {
2158 rte_flow_error_set(error
, EINVAL
,
2159 RTE_FLOW_ERROR_TYPE_ITEM
,
2161 "search or limit is not supported.");
2165 if (raw_spec
->offset
< 0) {
2166 rte_flow_error_set(error
, EINVAL
,
2167 RTE_FLOW_ERROR_TYPE_ITEM
,
2169 "Offset should be non-negative.");
2176 i40e_flow_store_flex_pit(struct i40e_pf
*pf
,
2177 struct i40e_fdir_flex_pit
*flex_pit
,
2178 enum i40e_flxpld_layer_idx layer_idx
,
2183 field_idx
= layer_idx
* I40E_MAX_FLXPLD_FIED
+ raw_id
;
2184 /* Check if the configuration is conflicted */
2185 if (pf
->fdir
.flex_pit_flag
[layer_idx
] &&
2186 (pf
->fdir
.flex_set
[field_idx
].src_offset
!= flex_pit
->src_offset
||
2187 pf
->fdir
.flex_set
[field_idx
].size
!= flex_pit
->size
||
2188 pf
->fdir
.flex_set
[field_idx
].dst_offset
!= flex_pit
->dst_offset
))
2191 /* Check if the configuration exists. */
2192 if (pf
->fdir
.flex_pit_flag
[layer_idx
] &&
2193 (pf
->fdir
.flex_set
[field_idx
].src_offset
== flex_pit
->src_offset
&&
2194 pf
->fdir
.flex_set
[field_idx
].size
== flex_pit
->size
&&
2195 pf
->fdir
.flex_set
[field_idx
].dst_offset
== flex_pit
->dst_offset
))
2198 pf
->fdir
.flex_set
[field_idx
].src_offset
=
2199 flex_pit
->src_offset
;
2200 pf
->fdir
.flex_set
[field_idx
].size
=
2202 pf
->fdir
.flex_set
[field_idx
].dst_offset
=
2203 flex_pit
->dst_offset
;
2209 i40e_flow_store_flex_mask(struct i40e_pf
*pf
,
2210 enum i40e_filter_pctype pctype
,
2213 struct i40e_fdir_flex_mask flex_mask
;
2215 uint8_t i
, nb_bitmask
= 0;
2217 memset(&flex_mask
, 0, sizeof(struct i40e_fdir_flex_mask
));
2218 for (i
= 0; i
< I40E_FDIR_MAX_FLEX_LEN
; i
+= sizeof(uint16_t)) {
2219 mask_tmp
= I40E_WORD(mask
[i
], mask
[i
+ 1]);
2221 flex_mask
.word_mask
|=
2222 I40E_FLEX_WORD_MASK(i
/ sizeof(uint16_t));
2223 if (mask_tmp
!= UINT16_MAX
) {
2224 flex_mask
.bitmask
[nb_bitmask
].mask
= ~mask_tmp
;
2225 flex_mask
.bitmask
[nb_bitmask
].offset
=
2226 i
/ sizeof(uint16_t);
2228 if (nb_bitmask
> I40E_FDIR_BITMASK_NUM_WORD
)
2233 flex_mask
.nb_bitmask
= nb_bitmask
;
2235 if (pf
->fdir
.flex_mask_flag
[pctype
] &&
2236 (memcmp(&flex_mask
, &pf
->fdir
.flex_mask
[pctype
],
2237 sizeof(struct i40e_fdir_flex_mask
))))
2239 else if (pf
->fdir
.flex_mask_flag
[pctype
] &&
2240 !(memcmp(&flex_mask
, &pf
->fdir
.flex_mask
[pctype
],
2241 sizeof(struct i40e_fdir_flex_mask
))))
2244 memcpy(&pf
->fdir
.flex_mask
[pctype
], &flex_mask
,
2245 sizeof(struct i40e_fdir_flex_mask
));
2250 i40e_flow_set_fdir_flex_pit(struct i40e_pf
*pf
,
2251 enum i40e_flxpld_layer_idx layer_idx
,
2254 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
2255 uint32_t flx_pit
, flx_ort
;
2257 uint16_t min_next_off
= 0; /* in words */
2261 flx_ort
= (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT
) |
2262 (raw_id
<< I40E_GLQF_ORT_FIELD_CNT_SHIFT
) |
2263 (layer_idx
* I40E_MAX_FLXPLD_FIED
);
2264 I40E_WRITE_GLB_REG(hw
, I40E_GLQF_ORT(33 + layer_idx
), flx_ort
);
2268 for (i
= 0; i
< raw_id
; i
++) {
2269 field_idx
= layer_idx
* I40E_MAX_FLXPLD_FIED
+ i
;
2270 flx_pit
= MK_FLX_PIT(pf
->fdir
.flex_set
[field_idx
].src_offset
,
2271 pf
->fdir
.flex_set
[field_idx
].size
,
2272 pf
->fdir
.flex_set
[field_idx
].dst_offset
);
2274 I40E_WRITE_REG(hw
, I40E_PRTQF_FLX_PIT(field_idx
), flx_pit
);
2275 min_next_off
= pf
->fdir
.flex_set
[field_idx
].src_offset
+
2276 pf
->fdir
.flex_set
[field_idx
].size
;
2279 for (; i
< I40E_MAX_FLXPLD_FIED
; i
++) {
2280 /* set the non-used register obeying register's constrain */
2281 field_idx
= layer_idx
* I40E_MAX_FLXPLD_FIED
+ i
;
2282 flx_pit
= MK_FLX_PIT(min_next_off
, NONUSE_FLX_PIT_FSIZE
,
2283 NONUSE_FLX_PIT_DEST_OFF
);
2284 I40E_WRITE_REG(hw
, I40E_PRTQF_FLX_PIT(field_idx
), flx_pit
);
2288 pf
->fdir
.flex_pit_flag
[layer_idx
] = 1;
2292 i40e_flow_set_fdir_flex_msk(struct i40e_pf
*pf
,
2293 enum i40e_filter_pctype pctype
)
2295 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
2296 struct i40e_fdir_flex_mask
*flex_mask
;
2297 uint32_t flxinset
, fd_mask
;
2301 flex_mask
= &pf
->fdir
.flex_mask
[pctype
];
2302 flxinset
= (flex_mask
->word_mask
<<
2303 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT
) &
2304 I40E_PRTQF_FD_FLXINSET_INSET_MASK
;
2305 i40e_write_rx_ctl(hw
, I40E_PRTQF_FD_FLXINSET(pctype
), flxinset
);
2307 for (i
= 0; i
< flex_mask
->nb_bitmask
; i
++) {
2308 fd_mask
= (flex_mask
->bitmask
[i
].mask
<<
2309 I40E_PRTQF_FD_MSK_MASK_SHIFT
) &
2310 I40E_PRTQF_FD_MSK_MASK_MASK
;
2311 fd_mask
|= ((flex_mask
->bitmask
[i
].offset
+
2312 I40E_FLX_OFFSET_IN_FIELD_VECTOR
) <<
2313 I40E_PRTQF_FD_MSK_OFFSET_SHIFT
) &
2314 I40E_PRTQF_FD_MSK_OFFSET_MASK
;
2315 i40e_write_rx_ctl(hw
, I40E_PRTQF_FD_MSK(pctype
, i
), fd_mask
);
2318 pf
->fdir
.flex_mask_flag
[pctype
] = 1;
2322 i40e_flow_set_fdir_inset(struct i40e_pf
*pf
,
2323 enum i40e_filter_pctype pctype
,
2326 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
2327 uint64_t inset_reg
= 0;
2328 uint32_t mask_reg
[I40E_INSET_MASK_NUM_REG
] = {0};
2331 /* Check if the input set is valid */
2332 if (i40e_validate_input_set(pctype
, RTE_ETH_FILTER_FDIR
,
2334 PMD_DRV_LOG(ERR
, "Invalid input set");
2338 /* Check if the configuration is conflicted */
2339 if (pf
->fdir
.inset_flag
[pctype
] &&
2340 memcmp(&pf
->fdir
.input_set
[pctype
], &input_set
, sizeof(uint64_t)))
2343 if (pf
->fdir
.inset_flag
[pctype
] &&
2344 !memcmp(&pf
->fdir
.input_set
[pctype
], &input_set
, sizeof(uint64_t)))
2347 num
= i40e_generate_inset_mask_reg(input_set
, mask_reg
,
2348 I40E_INSET_MASK_NUM_REG
);
2352 inset_reg
|= i40e_translate_input_set_reg(hw
->mac
.type
, input_set
);
2354 i40e_check_write_reg(hw
, I40E_PRTQF_FD_INSET(pctype
, 0),
2355 (uint32_t)(inset_reg
& UINT32_MAX
));
2356 i40e_check_write_reg(hw
, I40E_PRTQF_FD_INSET(pctype
, 1),
2357 (uint32_t)((inset_reg
>>
2358 I40E_32_BIT_WIDTH
) & UINT32_MAX
));
2360 for (i
= 0; i
< num
; i
++)
2361 i40e_check_write_reg(hw
, I40E_GLQF_FD_MSK(i
, pctype
),
2364 /*clear unused mask registers of the pctype */
2365 for (i
= num
; i
< I40E_INSET_MASK_NUM_REG
; i
++)
2366 i40e_check_write_reg(hw
, I40E_GLQF_FD_MSK(i
, pctype
), 0);
2367 I40E_WRITE_FLUSH(hw
);
2369 pf
->fdir
.input_set
[pctype
] = input_set
;
2370 pf
->fdir
.inset_flag
[pctype
] = 1;
2375 i40e_flow_fdir_get_pctype_value(struct i40e_pf
*pf
,
2376 enum rte_flow_item_type item_type
,
2377 struct i40e_fdir_filter_conf
*filter
)
2379 struct i40e_customized_pctype
*cus_pctype
= NULL
;
2381 switch (item_type
) {
2382 case RTE_FLOW_ITEM_TYPE_GTPC
:
2383 cus_pctype
= i40e_find_customized_pctype(pf
,
2384 I40E_CUSTOMIZED_GTPC
);
2386 case RTE_FLOW_ITEM_TYPE_GTPU
:
2387 if (!filter
->input
.flow_ext
.inner_ip
)
2388 cus_pctype
= i40e_find_customized_pctype(pf
,
2389 I40E_CUSTOMIZED_GTPU
);
2390 else if (filter
->input
.flow_ext
.iip_type
==
2391 I40E_FDIR_IPTYPE_IPV4
)
2392 cus_pctype
= i40e_find_customized_pctype(pf
,
2393 I40E_CUSTOMIZED_GTPU_IPV4
);
2394 else if (filter
->input
.flow_ext
.iip_type
==
2395 I40E_FDIR_IPTYPE_IPV6
)
2396 cus_pctype
= i40e_find_customized_pctype(pf
,
2397 I40E_CUSTOMIZED_GTPU_IPV6
);
2400 PMD_DRV_LOG(ERR
, "Unsupported item type");
2404 if (cus_pctype
&& cus_pctype
->valid
)
2405 return cus_pctype
->pctype
;
2407 return I40E_FILTER_PCTYPE_INVALID
;
2410 /* 1. Last in item should be NULL as range is not supported.
2411 * 2. Supported patterns: refer to array i40e_supported_patterns.
2412 * 3. Default supported flow type and input set: refer to array
2413 * valid_fdir_inset_table in i40e_ethdev.c.
2414 * 4. Mask of fields which need to be matched should be
2416 * 5. Mask of fields which needn't to be matched should be
2418 * 6. GTP profile supports GTPv1 only.
2419 * 7. GTP-C response message ('source_port' = 2123) is not supported.
2422 i40e_flow_parse_fdir_pattern(struct rte_eth_dev
*dev
,
2423 const struct rte_flow_attr
*attr
,
2424 const struct rte_flow_item
*pattern
,
2425 struct rte_flow_error
*error
,
2426 struct i40e_fdir_filter_conf
*filter
)
2428 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
2429 const struct rte_flow_item
*item
= pattern
;
2430 const struct rte_flow_item_eth
*eth_spec
, *eth_mask
;
2431 const struct rte_flow_item_vlan
*vlan_spec
, *vlan_mask
;
2432 const struct rte_flow_item_ipv4
*ipv4_spec
, *ipv4_mask
;
2433 const struct rte_flow_item_ipv6
*ipv6_spec
, *ipv6_mask
;
2434 const struct rte_flow_item_tcp
*tcp_spec
, *tcp_mask
;
2435 const struct rte_flow_item_udp
*udp_spec
, *udp_mask
;
2436 const struct rte_flow_item_sctp
*sctp_spec
, *sctp_mask
;
2437 const struct rte_flow_item_gtp
*gtp_spec
, *gtp_mask
;
2438 const struct rte_flow_item_raw
*raw_spec
, *raw_mask
;
2439 const struct rte_flow_item_vf
*vf_spec
;
2442 uint64_t input_set
= I40E_INSET_NONE
;
2444 enum rte_flow_item_type item_type
;
2445 enum rte_flow_item_type l3
= RTE_FLOW_ITEM_TYPE_END
;
2446 enum rte_flow_item_type cus_proto
= RTE_FLOW_ITEM_TYPE_END
;
2448 uint8_t ipv6_addr_mask
[16] = {
2449 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2450 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2451 enum i40e_flxpld_layer_idx layer_idx
= I40E_FLXPLD_L2_IDX
;
2453 int32_t off_arr
[I40E_MAX_FLXPLD_FIED
];
2454 uint16_t len_arr
[I40E_MAX_FLXPLD_FIED
];
2455 struct i40e_fdir_flex_pit flex_pit
;
2456 uint8_t next_dst_off
= 0;
2457 uint8_t flex_mask
[I40E_FDIR_MAX_FLEX_LEN
];
2459 bool cfg_flex_pit
= true;
2460 bool cfg_flex_msk
= true;
2461 uint16_t outer_tpid
;
2462 uint16_t ether_type
;
2463 uint32_t vtc_flow_cpu
;
2464 bool outer_ip
= true;
2467 memset(off_arr
, 0, sizeof(off_arr
));
2468 memset(len_arr
, 0, sizeof(len_arr
));
2469 memset(flex_mask
, 0, I40E_FDIR_MAX_FLEX_LEN
);
2470 outer_tpid
= i40e_get_outer_vlan(dev
);
2471 filter
->input
.flow_ext
.customized_pctype
= false;
2472 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
2474 rte_flow_error_set(error
, EINVAL
,
2475 RTE_FLOW_ERROR_TYPE_ITEM
,
2477 "Not support range");
2480 item_type
= item
->type
;
2481 switch (item_type
) {
2482 case RTE_FLOW_ITEM_TYPE_ETH
:
2483 eth_spec
= item
->spec
;
2484 eth_mask
= item
->mask
;
2486 if (eth_spec
&& eth_mask
) {
2487 if (!is_zero_ether_addr(ð_mask
->src
) ||
2488 !is_zero_ether_addr(ð_mask
->dst
)) {
2489 rte_flow_error_set(error
, EINVAL
,
2490 RTE_FLOW_ERROR_TYPE_ITEM
,
2492 "Invalid MAC_addr mask.");
2496 if (eth_spec
&& eth_mask
&& eth_mask
->type
) {
2497 enum rte_flow_item_type next
= (item
+ 1)->type
;
2499 if (eth_mask
->type
!= RTE_BE16(0xffff)) {
2500 rte_flow_error_set(error
, EINVAL
,
2501 RTE_FLOW_ERROR_TYPE_ITEM
,
2503 "Invalid type mask.");
2507 ether_type
= rte_be_to_cpu_16(eth_spec
->type
);
2509 if (next
== RTE_FLOW_ITEM_TYPE_VLAN
||
2510 ether_type
== ETHER_TYPE_IPv4
||
2511 ether_type
== ETHER_TYPE_IPv6
||
2512 ether_type
== ETHER_TYPE_ARP
||
2513 ether_type
== outer_tpid
) {
2514 rte_flow_error_set(error
, EINVAL
,
2515 RTE_FLOW_ERROR_TYPE_ITEM
,
2517 "Unsupported ether_type.");
2520 input_set
|= I40E_INSET_LAST_ETHER_TYPE
;
2521 filter
->input
.flow
.l2_flow
.ether_type
=
2525 pctype
= I40E_FILTER_PCTYPE_L2_PAYLOAD
;
2526 layer_idx
= I40E_FLXPLD_L2_IDX
;
2529 case RTE_FLOW_ITEM_TYPE_VLAN
:
2530 vlan_spec
= item
->spec
;
2531 vlan_mask
= item
->mask
;
2533 RTE_ASSERT(!(input_set
& I40E_INSET_LAST_ETHER_TYPE
));
2534 if (vlan_spec
&& vlan_mask
) {
2535 if (vlan_mask
->tci
==
2536 rte_cpu_to_be_16(I40E_TCI_MASK
)) {
2537 input_set
|= I40E_INSET_VLAN_INNER
;
2538 filter
->input
.flow_ext
.vlan_tci
=
2542 if (vlan_spec
&& vlan_mask
&& vlan_mask
->inner_type
) {
2543 if (vlan_mask
->inner_type
!= RTE_BE16(0xffff)) {
2544 rte_flow_error_set(error
, EINVAL
,
2545 RTE_FLOW_ERROR_TYPE_ITEM
,
2547 "Invalid inner_type"
2553 rte_be_to_cpu_16(vlan_spec
->inner_type
);
2555 if (ether_type
== ETHER_TYPE_IPv4
||
2556 ether_type
== ETHER_TYPE_IPv6
||
2557 ether_type
== ETHER_TYPE_ARP
||
2558 ether_type
== outer_tpid
) {
2559 rte_flow_error_set(error
, EINVAL
,
2560 RTE_FLOW_ERROR_TYPE_ITEM
,
2562 "Unsupported inner_type.");
2565 input_set
|= I40E_INSET_LAST_ETHER_TYPE
;
2566 filter
->input
.flow
.l2_flow
.ether_type
=
2567 vlan_spec
->inner_type
;
2570 pctype
= I40E_FILTER_PCTYPE_L2_PAYLOAD
;
2571 layer_idx
= I40E_FLXPLD_L2_IDX
;
2574 case RTE_FLOW_ITEM_TYPE_IPV4
:
2575 l3
= RTE_FLOW_ITEM_TYPE_IPV4
;
2576 ipv4_spec
= item
->spec
;
2577 ipv4_mask
= item
->mask
;
2578 pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
;
2579 layer_idx
= I40E_FLXPLD_L3_IDX
;
2581 if (ipv4_spec
&& ipv4_mask
&& outer_ip
) {
2582 /* Check IPv4 mask and update input set */
2583 if (ipv4_mask
->hdr
.version_ihl
||
2584 ipv4_mask
->hdr
.total_length
||
2585 ipv4_mask
->hdr
.packet_id
||
2586 ipv4_mask
->hdr
.fragment_offset
||
2587 ipv4_mask
->hdr
.hdr_checksum
) {
2588 rte_flow_error_set(error
, EINVAL
,
2589 RTE_FLOW_ERROR_TYPE_ITEM
,
2591 "Invalid IPv4 mask.");
2595 if (ipv4_mask
->hdr
.src_addr
== UINT32_MAX
)
2596 input_set
|= I40E_INSET_IPV4_SRC
;
2597 if (ipv4_mask
->hdr
.dst_addr
== UINT32_MAX
)
2598 input_set
|= I40E_INSET_IPV4_DST
;
2599 if (ipv4_mask
->hdr
.type_of_service
== UINT8_MAX
)
2600 input_set
|= I40E_INSET_IPV4_TOS
;
2601 if (ipv4_mask
->hdr
.time_to_live
== UINT8_MAX
)
2602 input_set
|= I40E_INSET_IPV4_TTL
;
2603 if (ipv4_mask
->hdr
.next_proto_id
== UINT8_MAX
)
2604 input_set
|= I40E_INSET_IPV4_PROTO
;
2606 /* Check if it is fragment. */
2607 frag_off
= ipv4_spec
->hdr
.fragment_offset
;
2608 frag_off
= rte_be_to_cpu_16(frag_off
);
2609 if (frag_off
& IPV4_HDR_OFFSET_MASK
||
2610 frag_off
& IPV4_HDR_MF_FLAG
)
2611 pctype
= I40E_FILTER_PCTYPE_FRAG_IPV4
;
2613 /* Get the filter info */
2614 filter
->input
.flow
.ip4_flow
.proto
=
2615 ipv4_spec
->hdr
.next_proto_id
;
2616 filter
->input
.flow
.ip4_flow
.tos
=
2617 ipv4_spec
->hdr
.type_of_service
;
2618 filter
->input
.flow
.ip4_flow
.ttl
=
2619 ipv4_spec
->hdr
.time_to_live
;
2620 filter
->input
.flow
.ip4_flow
.src_ip
=
2621 ipv4_spec
->hdr
.src_addr
;
2622 filter
->input
.flow
.ip4_flow
.dst_ip
=
2623 ipv4_spec
->hdr
.dst_addr
;
2624 } else if (!ipv4_spec
&& !ipv4_mask
&& !outer_ip
) {
2625 filter
->input
.flow_ext
.inner_ip
= true;
2626 filter
->input
.flow_ext
.iip_type
=
2627 I40E_FDIR_IPTYPE_IPV4
;
2628 } else if ((ipv4_spec
|| ipv4_mask
) && !outer_ip
) {
2629 rte_flow_error_set(error
, EINVAL
,
2630 RTE_FLOW_ERROR_TYPE_ITEM
,
2632 "Invalid inner IPv4 mask.");
2640 case RTE_FLOW_ITEM_TYPE_IPV6
:
2641 l3
= RTE_FLOW_ITEM_TYPE_IPV6
;
2642 ipv6_spec
= item
->spec
;
2643 ipv6_mask
= item
->mask
;
2644 pctype
= I40E_FILTER_PCTYPE_NONF_IPV6_OTHER
;
2645 layer_idx
= I40E_FLXPLD_L3_IDX
;
2647 if (ipv6_spec
&& ipv6_mask
&& outer_ip
) {
2648 /* Check IPv6 mask and update input set */
2649 if (ipv6_mask
->hdr
.payload_len
) {
2650 rte_flow_error_set(error
, EINVAL
,
2651 RTE_FLOW_ERROR_TYPE_ITEM
,
2653 "Invalid IPv6 mask");
2657 if (!memcmp(ipv6_mask
->hdr
.src_addr
,
2659 RTE_DIM(ipv6_mask
->hdr
.src_addr
)))
2660 input_set
|= I40E_INSET_IPV6_SRC
;
2661 if (!memcmp(ipv6_mask
->hdr
.dst_addr
,
2663 RTE_DIM(ipv6_mask
->hdr
.dst_addr
)))
2664 input_set
|= I40E_INSET_IPV6_DST
;
2666 if ((ipv6_mask
->hdr
.vtc_flow
&
2667 rte_cpu_to_be_32(I40E_IPV6_TC_MASK
))
2668 == rte_cpu_to_be_32(I40E_IPV6_TC_MASK
))
2669 input_set
|= I40E_INSET_IPV6_TC
;
2670 if (ipv6_mask
->hdr
.proto
== UINT8_MAX
)
2671 input_set
|= I40E_INSET_IPV6_NEXT_HDR
;
2672 if (ipv6_mask
->hdr
.hop_limits
== UINT8_MAX
)
2673 input_set
|= I40E_INSET_IPV6_HOP_LIMIT
;
2675 /* Get filter info */
2677 rte_be_to_cpu_32(ipv6_spec
->hdr
.vtc_flow
);
2678 filter
->input
.flow
.ipv6_flow
.tc
=
2679 (uint8_t)(vtc_flow_cpu
>>
2680 I40E_FDIR_IPv6_TC_OFFSET
);
2681 filter
->input
.flow
.ipv6_flow
.proto
=
2682 ipv6_spec
->hdr
.proto
;
2683 filter
->input
.flow
.ipv6_flow
.hop_limits
=
2684 ipv6_spec
->hdr
.hop_limits
;
2686 rte_memcpy(filter
->input
.flow
.ipv6_flow
.src_ip
,
2687 ipv6_spec
->hdr
.src_addr
, 16);
2688 rte_memcpy(filter
->input
.flow
.ipv6_flow
.dst_ip
,
2689 ipv6_spec
->hdr
.dst_addr
, 16);
2691 /* Check if it is fragment. */
2692 if (ipv6_spec
->hdr
.proto
==
2693 I40E_IPV6_FRAG_HEADER
)
2694 pctype
= I40E_FILTER_PCTYPE_FRAG_IPV6
;
2695 } else if (!ipv6_spec
&& !ipv6_mask
&& !outer_ip
) {
2696 filter
->input
.flow_ext
.inner_ip
= true;
2697 filter
->input
.flow_ext
.iip_type
=
2698 I40E_FDIR_IPTYPE_IPV6
;
2699 } else if ((ipv6_spec
|| ipv6_mask
) && !outer_ip
) {
2700 rte_flow_error_set(error
, EINVAL
,
2701 RTE_FLOW_ERROR_TYPE_ITEM
,
2703 "Invalid inner IPv6 mask");
2710 case RTE_FLOW_ITEM_TYPE_TCP
:
2711 tcp_spec
= item
->spec
;
2712 tcp_mask
= item
->mask
;
2714 if (l3
== RTE_FLOW_ITEM_TYPE_IPV4
)
2716 I40E_FILTER_PCTYPE_NONF_IPV4_TCP
;
2717 else if (l3
== RTE_FLOW_ITEM_TYPE_IPV6
)
2719 I40E_FILTER_PCTYPE_NONF_IPV6_TCP
;
2720 if (tcp_spec
&& tcp_mask
) {
2721 /* Check TCP mask and update input set */
2722 if (tcp_mask
->hdr
.sent_seq
||
2723 tcp_mask
->hdr
.recv_ack
||
2724 tcp_mask
->hdr
.data_off
||
2725 tcp_mask
->hdr
.tcp_flags
||
2726 tcp_mask
->hdr
.rx_win
||
2727 tcp_mask
->hdr
.cksum
||
2728 tcp_mask
->hdr
.tcp_urp
) {
2729 rte_flow_error_set(error
, EINVAL
,
2730 RTE_FLOW_ERROR_TYPE_ITEM
,
2732 "Invalid TCP mask");
2736 if (tcp_mask
->hdr
.src_port
== UINT16_MAX
)
2737 input_set
|= I40E_INSET_SRC_PORT
;
2738 if (tcp_mask
->hdr
.dst_port
== UINT16_MAX
)
2739 input_set
|= I40E_INSET_DST_PORT
;
2741 /* Get filter info */
2742 if (l3
== RTE_FLOW_ITEM_TYPE_IPV4
) {
2743 filter
->input
.flow
.tcp4_flow
.src_port
=
2744 tcp_spec
->hdr
.src_port
;
2745 filter
->input
.flow
.tcp4_flow
.dst_port
=
2746 tcp_spec
->hdr
.dst_port
;
2747 } else if (l3
== RTE_FLOW_ITEM_TYPE_IPV6
) {
2748 filter
->input
.flow
.tcp6_flow
.src_port
=
2749 tcp_spec
->hdr
.src_port
;
2750 filter
->input
.flow
.tcp6_flow
.dst_port
=
2751 tcp_spec
->hdr
.dst_port
;
2755 layer_idx
= I40E_FLXPLD_L4_IDX
;
2758 case RTE_FLOW_ITEM_TYPE_UDP
:
2759 udp_spec
= item
->spec
;
2760 udp_mask
= item
->mask
;
2762 if (l3
== RTE_FLOW_ITEM_TYPE_IPV4
)
2764 I40E_FILTER_PCTYPE_NONF_IPV4_UDP
;
2765 else if (l3
== RTE_FLOW_ITEM_TYPE_IPV6
)
2767 I40E_FILTER_PCTYPE_NONF_IPV6_UDP
;
2769 if (udp_spec
&& udp_mask
) {
2770 /* Check UDP mask and update input set*/
2771 if (udp_mask
->hdr
.dgram_len
||
2772 udp_mask
->hdr
.dgram_cksum
) {
2773 rte_flow_error_set(error
, EINVAL
,
2774 RTE_FLOW_ERROR_TYPE_ITEM
,
2776 "Invalid UDP mask");
2780 if (udp_mask
->hdr
.src_port
== UINT16_MAX
)
2781 input_set
|= I40E_INSET_SRC_PORT
;
2782 if (udp_mask
->hdr
.dst_port
== UINT16_MAX
)
2783 input_set
|= I40E_INSET_DST_PORT
;
2785 /* Get filter info */
2786 if (l3
== RTE_FLOW_ITEM_TYPE_IPV4
) {
2787 filter
->input
.flow
.udp4_flow
.src_port
=
2788 udp_spec
->hdr
.src_port
;
2789 filter
->input
.flow
.udp4_flow
.dst_port
=
2790 udp_spec
->hdr
.dst_port
;
2791 } else if (l3
== RTE_FLOW_ITEM_TYPE_IPV6
) {
2792 filter
->input
.flow
.udp6_flow
.src_port
=
2793 udp_spec
->hdr
.src_port
;
2794 filter
->input
.flow
.udp6_flow
.dst_port
=
2795 udp_spec
->hdr
.dst_port
;
2799 layer_idx
= I40E_FLXPLD_L4_IDX
;
2802 case RTE_FLOW_ITEM_TYPE_GTPC
:
2803 case RTE_FLOW_ITEM_TYPE_GTPU
:
2804 if (!pf
->gtp_support
) {
2805 rte_flow_error_set(error
, EINVAL
,
2806 RTE_FLOW_ERROR_TYPE_ITEM
,
2808 "Unsupported protocol");
2812 gtp_spec
= item
->spec
;
2813 gtp_mask
= item
->mask
;
2815 if (gtp_spec
&& gtp_mask
) {
2816 if (gtp_mask
->v_pt_rsv_flags
||
2817 gtp_mask
->msg_type
||
2818 gtp_mask
->msg_len
||
2819 gtp_mask
->teid
!= UINT32_MAX
) {
2820 rte_flow_error_set(error
, EINVAL
,
2821 RTE_FLOW_ERROR_TYPE_ITEM
,
2823 "Invalid GTP mask");
2827 filter
->input
.flow
.gtp_flow
.teid
=
2829 filter
->input
.flow_ext
.customized_pctype
= true;
2830 cus_proto
= item_type
;
2833 case RTE_FLOW_ITEM_TYPE_SCTP
:
2834 sctp_spec
= item
->spec
;
2835 sctp_mask
= item
->mask
;
2837 if (l3
== RTE_FLOW_ITEM_TYPE_IPV4
)
2839 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP
;
2840 else if (l3
== RTE_FLOW_ITEM_TYPE_IPV6
)
2842 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP
;
2844 if (sctp_spec
&& sctp_mask
) {
2845 /* Check SCTP mask and update input set */
2846 if (sctp_mask
->hdr
.cksum
) {
2847 rte_flow_error_set(error
, EINVAL
,
2848 RTE_FLOW_ERROR_TYPE_ITEM
,
2850 "Invalid UDP mask");
2854 if (sctp_mask
->hdr
.src_port
== UINT16_MAX
)
2855 input_set
|= I40E_INSET_SRC_PORT
;
2856 if (sctp_mask
->hdr
.dst_port
== UINT16_MAX
)
2857 input_set
|= I40E_INSET_DST_PORT
;
2858 if (sctp_mask
->hdr
.tag
== UINT32_MAX
)
2859 input_set
|= I40E_INSET_SCTP_VT
;
2861 /* Get filter info */
2862 if (l3
== RTE_FLOW_ITEM_TYPE_IPV4
) {
2863 filter
->input
.flow
.sctp4_flow
.src_port
=
2864 sctp_spec
->hdr
.src_port
;
2865 filter
->input
.flow
.sctp4_flow
.dst_port
=
2866 sctp_spec
->hdr
.dst_port
;
2867 filter
->input
.flow
.sctp4_flow
.verify_tag
2868 = sctp_spec
->hdr
.tag
;
2869 } else if (l3
== RTE_FLOW_ITEM_TYPE_IPV6
) {
2870 filter
->input
.flow
.sctp6_flow
.src_port
=
2871 sctp_spec
->hdr
.src_port
;
2872 filter
->input
.flow
.sctp6_flow
.dst_port
=
2873 sctp_spec
->hdr
.dst_port
;
2874 filter
->input
.flow
.sctp6_flow
.verify_tag
2875 = sctp_spec
->hdr
.tag
;
2879 layer_idx
= I40E_FLXPLD_L4_IDX
;
2882 case RTE_FLOW_ITEM_TYPE_RAW
:
2883 raw_spec
= item
->spec
;
2884 raw_mask
= item
->mask
;
2886 if (!raw_spec
|| !raw_mask
) {
2887 rte_flow_error_set(error
, EINVAL
,
2888 RTE_FLOW_ERROR_TYPE_ITEM
,
2890 "NULL RAW spec/mask");
2894 if (pf
->support_multi_driver
) {
2895 rte_flow_error_set(error
, ENOTSUP
,
2896 RTE_FLOW_ERROR_TYPE_ITEM
,
2898 "Unsupported flexible payload.");
2902 ret
= i40e_flow_check_raw_item(item
, raw_spec
, error
);
2906 off_arr
[raw_id
] = raw_spec
->offset
;
2907 len_arr
[raw_id
] = raw_spec
->length
;
2910 memset(&flex_pit
, 0, sizeof(struct i40e_fdir_flex_pit
));
2912 raw_spec
->length
/ sizeof(uint16_t);
2913 flex_pit
.dst_offset
=
2914 next_dst_off
/ sizeof(uint16_t);
2916 for (i
= 0; i
<= raw_id
; i
++) {
2918 flex_pit
.src_offset
+=
2922 flex_pit
.src_offset
+=
2923 (off_arr
[i
] + len_arr
[i
]) /
2925 flex_size
+= len_arr
[i
];
2927 if (((flex_pit
.src_offset
+ flex_pit
.size
) >=
2928 I40E_MAX_FLX_SOURCE_OFF
/ sizeof(uint16_t)) ||
2929 flex_size
> I40E_FDIR_MAX_FLEXLEN
) {
2930 rte_flow_error_set(error
, EINVAL
,
2931 RTE_FLOW_ERROR_TYPE_ITEM
,
2933 "Exceeds maxmial payload limit.");
2937 /* Store flex pit to SW */
2938 ret
= i40e_flow_store_flex_pit(pf
, &flex_pit
,
2941 rte_flow_error_set(error
, EINVAL
,
2942 RTE_FLOW_ERROR_TYPE_ITEM
,
2944 "Conflict with the first flexible rule.");
2947 cfg_flex_pit
= false;
2949 for (i
= 0; i
< raw_spec
->length
; i
++) {
2950 j
= i
+ next_dst_off
;
2951 filter
->input
.flow_ext
.flexbytes
[j
] =
2952 raw_spec
->pattern
[i
];
2953 flex_mask
[j
] = raw_mask
->pattern
[i
];
2956 next_dst_off
+= raw_spec
->length
;
2959 case RTE_FLOW_ITEM_TYPE_VF
:
2960 vf_spec
= item
->spec
;
2961 if (!attr
->transfer
) {
2962 rte_flow_error_set(error
, ENOTSUP
,
2963 RTE_FLOW_ERROR_TYPE_ITEM
,
2965 "Matching VF traffic"
2966 " without affecting it"
2967 " (transfer attribute)"
2971 filter
->input
.flow_ext
.is_vf
= 1;
2972 filter
->input
.flow_ext
.dst_id
= vf_spec
->id
;
2973 if (filter
->input
.flow_ext
.is_vf
&&
2974 filter
->input
.flow_ext
.dst_id
>= pf
->vf_num
) {
2975 rte_flow_error_set(error
, EINVAL
,
2976 RTE_FLOW_ERROR_TYPE_ITEM
,
2978 "Invalid VF ID for FDIR.");
2987 /* Get customized pctype value */
2988 if (filter
->input
.flow_ext
.customized_pctype
) {
2989 pctype
= i40e_flow_fdir_get_pctype_value(pf
, cus_proto
, filter
);
2990 if (pctype
== I40E_FILTER_PCTYPE_INVALID
) {
2991 rte_flow_error_set(error
, EINVAL
,
2992 RTE_FLOW_ERROR_TYPE_ITEM
,
2994 "Unsupported pctype");
2999 /* If customized pctype is not used, set fdir configuration.*/
3000 if (!filter
->input
.flow_ext
.customized_pctype
) {
3001 ret
= i40e_flow_set_fdir_inset(pf
, pctype
, input_set
);
3003 rte_flow_error_set(error
, EINVAL
,
3004 RTE_FLOW_ERROR_TYPE_ITEM
, item
,
3005 "Conflict with the first rule's input set.");
3007 } else if (ret
== -EINVAL
) {
3008 rte_flow_error_set(error
, EINVAL
,
3009 RTE_FLOW_ERROR_TYPE_ITEM
, item
,
3010 "Invalid pattern mask.");
3014 /* Store flex mask to SW */
3015 ret
= i40e_flow_store_flex_mask(pf
, pctype
, flex_mask
);
3017 rte_flow_error_set(error
, EINVAL
,
3018 RTE_FLOW_ERROR_TYPE_ITEM
,
3020 "Exceed maximal number of bitmasks");
3022 } else if (ret
== -2) {
3023 rte_flow_error_set(error
, EINVAL
,
3024 RTE_FLOW_ERROR_TYPE_ITEM
,
3026 "Conflict with the first flexible rule");
3029 cfg_flex_msk
= false;
3032 i40e_flow_set_fdir_flex_pit(pf
, layer_idx
, raw_id
);
3035 i40e_flow_set_fdir_flex_msk(pf
, pctype
);
3038 filter
->input
.pctype
= pctype
;
3043 /* Parse to get the action info of a FDIR filter.
3044 * FDIR action supports QUEUE or (QUEUE + MARK).
3047 i40e_flow_parse_fdir_action(struct rte_eth_dev
*dev
,
3048 const struct rte_flow_action
*actions
,
3049 struct rte_flow_error
*error
,
3050 struct i40e_fdir_filter_conf
*filter
)
3052 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3053 const struct rte_flow_action
*act
;
3054 const struct rte_flow_action_queue
*act_q
;
3055 const struct rte_flow_action_mark
*mark_spec
;
3058 /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3059 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
3060 switch (act
->type
) {
3061 case RTE_FLOW_ACTION_TYPE_QUEUE
:
3063 filter
->action
.rx_queue
= act_q
->index
;
3064 if ((!filter
->input
.flow_ext
.is_vf
&&
3065 filter
->action
.rx_queue
>= pf
->dev_data
->nb_rx_queues
) ||
3066 (filter
->input
.flow_ext
.is_vf
&&
3067 filter
->action
.rx_queue
>= pf
->vf_nb_qps
)) {
3068 rte_flow_error_set(error
, EINVAL
,
3069 RTE_FLOW_ERROR_TYPE_ACTION
, act
,
3070 "Invalid queue ID for FDIR.");
3073 filter
->action
.behavior
= I40E_FDIR_ACCEPT
;
3075 case RTE_FLOW_ACTION_TYPE_DROP
:
3076 filter
->action
.behavior
= I40E_FDIR_REJECT
;
3078 case RTE_FLOW_ACTION_TYPE_PASSTHRU
:
3079 filter
->action
.behavior
= I40E_FDIR_PASSTHRU
;
3082 rte_flow_error_set(error
, EINVAL
,
3083 RTE_FLOW_ERROR_TYPE_ACTION
, act
,
3088 /* Check if the next non-void item is MARK or FLAG or END. */
3090 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
3091 switch (act
->type
) {
3092 case RTE_FLOW_ACTION_TYPE_MARK
:
3093 mark_spec
= act
->conf
;
3094 filter
->action
.report_status
= I40E_FDIR_REPORT_ID
;
3095 filter
->soft_id
= mark_spec
->id
;
3097 case RTE_FLOW_ACTION_TYPE_FLAG
:
3098 filter
->action
.report_status
= I40E_FDIR_NO_REPORT_STATUS
;
3100 case RTE_FLOW_ACTION_TYPE_END
:
3103 rte_flow_error_set(error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
,
3104 act
, "Invalid action.");
3108 /* Check if the next non-void item is END */
3110 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
3111 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
3112 rte_flow_error_set(error
, EINVAL
,
3113 RTE_FLOW_ERROR_TYPE_ACTION
,
3114 act
, "Invalid action.");
3122 i40e_flow_parse_fdir_filter(struct rte_eth_dev
*dev
,
3123 const struct rte_flow_attr
*attr
,
3124 const struct rte_flow_item pattern
[],
3125 const struct rte_flow_action actions
[],
3126 struct rte_flow_error
*error
,
3127 union i40e_filter_t
*filter
)
3129 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3130 struct i40e_fdir_filter_conf
*fdir_filter
=
3131 &filter
->fdir_filter
;
3134 ret
= i40e_flow_parse_fdir_pattern(dev
, attr
, pattern
, error
,
3139 ret
= i40e_flow_parse_fdir_action(dev
, actions
, error
, fdir_filter
);
3143 ret
= i40e_flow_parse_attr(attr
, error
);
3147 cons_filter_type
= RTE_ETH_FILTER_FDIR
;
3149 if (dev
->data
->dev_conf
.fdir_conf
.mode
!=
3150 RTE_FDIR_MODE_PERFECT
) {
3151 /* Enable fdir when fdir flow is added at first time. */
3152 ret
= i40e_fdir_setup(pf
);
3153 if (ret
!= I40E_SUCCESS
) {
3154 rte_flow_error_set(error
, ENOTSUP
,
3155 RTE_FLOW_ERROR_TYPE_HANDLE
,
3156 NULL
, "Failed to setup fdir.");
3159 ret
= i40e_fdir_configure(dev
);
3161 rte_flow_error_set(error
, ENOTSUP
,
3162 RTE_FLOW_ERROR_TYPE_HANDLE
,
3163 NULL
, "Failed to configure fdir.");
3167 dev
->data
->dev_conf
.fdir_conf
.mode
= RTE_FDIR_MODE_PERFECT
;
3172 i40e_fdir_teardown(pf
);
3176 /* Parse to get the action info of a tunnel filter
3177 * Tunnel action only supports PF, VF and QUEUE.
3180 i40e_flow_parse_tunnel_action(struct rte_eth_dev
*dev
,
3181 const struct rte_flow_action
*actions
,
3182 struct rte_flow_error
*error
,
3183 struct i40e_tunnel_filter_conf
*filter
)
3185 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3186 const struct rte_flow_action
*act
;
3187 const struct rte_flow_action_queue
*act_q
;
3188 const struct rte_flow_action_vf
*act_vf
;
3191 /* Check if the first non-void action is PF or VF. */
3192 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
3193 if (act
->type
!= RTE_FLOW_ACTION_TYPE_PF
&&
3194 act
->type
!= RTE_FLOW_ACTION_TYPE_VF
) {
3195 rte_flow_error_set(error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
,
3196 act
, "Not supported action.");
3200 if (act
->type
== RTE_FLOW_ACTION_TYPE_VF
) {
3202 filter
->vf_id
= act_vf
->id
;
3203 filter
->is_to_vf
= 1;
3204 if (filter
->vf_id
>= pf
->vf_num
) {
3205 rte_flow_error_set(error
, EINVAL
,
3206 RTE_FLOW_ERROR_TYPE_ACTION
,
3207 act
, "Invalid VF ID for tunnel filter");
3212 /* Check if the next non-void item is QUEUE */
3214 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
3215 if (act
->type
== RTE_FLOW_ACTION_TYPE_QUEUE
) {
3217 filter
->queue_id
= act_q
->index
;
3218 if ((!filter
->is_to_vf
) &&
3219 (filter
->queue_id
>= pf
->dev_data
->nb_rx_queues
)) {
3220 rte_flow_error_set(error
, EINVAL
,
3221 RTE_FLOW_ERROR_TYPE_ACTION
,
3222 act
, "Invalid queue ID for tunnel filter");
3224 } else if (filter
->is_to_vf
&&
3225 (filter
->queue_id
>= pf
->vf_nb_qps
)) {
3226 rte_flow_error_set(error
, EINVAL
,
3227 RTE_FLOW_ERROR_TYPE_ACTION
,
3228 act
, "Invalid queue ID for tunnel filter");
3233 /* Check if the next non-void item is END */
3235 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
3236 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
3237 rte_flow_error_set(error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
,
3238 act
, "Not supported action.");
3245 static uint16_t i40e_supported_tunnel_filter_types
[] = {
3246 ETH_TUNNEL_FILTER_IMAC
| ETH_TUNNEL_FILTER_TENID
|
3247 ETH_TUNNEL_FILTER_IVLAN
,
3248 ETH_TUNNEL_FILTER_IMAC
| ETH_TUNNEL_FILTER_IVLAN
,
3249 ETH_TUNNEL_FILTER_IMAC
| ETH_TUNNEL_FILTER_TENID
,
3250 ETH_TUNNEL_FILTER_OMAC
| ETH_TUNNEL_FILTER_TENID
|
3251 ETH_TUNNEL_FILTER_IMAC
,
3252 ETH_TUNNEL_FILTER_IMAC
,
3256 i40e_check_tunnel_filter_type(uint8_t filter_type
)
3260 for (i
= 0; i
< RTE_DIM(i40e_supported_tunnel_filter_types
); i
++) {
3261 if (filter_type
== i40e_supported_tunnel_filter_types
[i
])
3268 /* 1. Last in item should be NULL as range is not supported.
3269 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3270 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3271 * 3. Mask of fields which need to be matched should be
3273 * 4. Mask of fields which needn't to be matched should be
3277 i40e_flow_parse_vxlan_pattern(__rte_unused
struct rte_eth_dev
*dev
,
3278 const struct rte_flow_item
*pattern
,
3279 struct rte_flow_error
*error
,
3280 struct i40e_tunnel_filter_conf
*filter
)
3282 const struct rte_flow_item
*item
= pattern
;
3283 const struct rte_flow_item_eth
*eth_spec
;
3284 const struct rte_flow_item_eth
*eth_mask
;
3285 const struct rte_flow_item_vxlan
*vxlan_spec
;
3286 const struct rte_flow_item_vxlan
*vxlan_mask
;
3287 const struct rte_flow_item_vlan
*vlan_spec
;
3288 const struct rte_flow_item_vlan
*vlan_mask
;
3289 uint8_t filter_type
= 0;
3290 bool is_vni_masked
= 0;
3291 uint8_t vni_mask
[] = {0xFF, 0xFF, 0xFF};
3292 enum rte_flow_item_type item_type
;
3293 bool vxlan_flag
= 0;
3294 uint32_t tenant_id_be
= 0;
3297 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
3299 rte_flow_error_set(error
, EINVAL
,
3300 RTE_FLOW_ERROR_TYPE_ITEM
,
3302 "Not support range");
3305 item_type
= item
->type
;
3306 switch (item_type
) {
3307 case RTE_FLOW_ITEM_TYPE_ETH
:
3308 eth_spec
= item
->spec
;
3309 eth_mask
= item
->mask
;
3311 /* Check if ETH item is used for place holder.
3312 * If yes, both spec and mask should be NULL.
3313 * If no, both spec and mask shouldn't be NULL.
3315 if ((!eth_spec
&& eth_mask
) ||
3316 (eth_spec
&& !eth_mask
)) {
3317 rte_flow_error_set(error
, EINVAL
,
3318 RTE_FLOW_ERROR_TYPE_ITEM
,
3320 "Invalid ether spec/mask");
3324 if (eth_spec
&& eth_mask
) {
3325 /* DST address of inner MAC shouldn't be masked.
3326 * SRC address of Inner MAC should be masked.
3328 if (!is_broadcast_ether_addr(ð_mask
->dst
) ||
3329 !is_zero_ether_addr(ð_mask
->src
) ||
3331 rte_flow_error_set(error
, EINVAL
,
3332 RTE_FLOW_ERROR_TYPE_ITEM
,
3334 "Invalid ether spec/mask");
3339 rte_memcpy(&filter
->outer_mac
,
3342 filter_type
|= ETH_TUNNEL_FILTER_OMAC
;
3344 rte_memcpy(&filter
->inner_mac
,
3347 filter_type
|= ETH_TUNNEL_FILTER_IMAC
;
3351 case RTE_FLOW_ITEM_TYPE_VLAN
:
3352 vlan_spec
= item
->spec
;
3353 vlan_mask
= item
->mask
;
3354 if (!(vlan_spec
&& vlan_mask
) ||
3355 vlan_mask
->inner_type
) {
3356 rte_flow_error_set(error
, EINVAL
,
3357 RTE_FLOW_ERROR_TYPE_ITEM
,
3359 "Invalid vlan item");
3363 if (vlan_spec
&& vlan_mask
) {
3364 if (vlan_mask
->tci
==
3365 rte_cpu_to_be_16(I40E_TCI_MASK
))
3366 filter
->inner_vlan
=
3367 rte_be_to_cpu_16(vlan_spec
->tci
) &
3369 filter_type
|= ETH_TUNNEL_FILTER_IVLAN
;
3372 case RTE_FLOW_ITEM_TYPE_IPV4
:
3373 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV4
;
3374 /* IPv4 is used to describe protocol,
3375 * spec and mask should be NULL.
3377 if (item
->spec
|| item
->mask
) {
3378 rte_flow_error_set(error
, EINVAL
,
3379 RTE_FLOW_ERROR_TYPE_ITEM
,
3381 "Invalid IPv4 item");
3385 case RTE_FLOW_ITEM_TYPE_IPV6
:
3386 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV6
;
3387 /* IPv6 is used to describe protocol,
3388 * spec and mask should be NULL.
3390 if (item
->spec
|| item
->mask
) {
3391 rte_flow_error_set(error
, EINVAL
,
3392 RTE_FLOW_ERROR_TYPE_ITEM
,
3394 "Invalid IPv6 item");
3398 case RTE_FLOW_ITEM_TYPE_UDP
:
3399 /* UDP is used to describe protocol,
3400 * spec and mask should be NULL.
3402 if (item
->spec
|| item
->mask
) {
3403 rte_flow_error_set(error
, EINVAL
,
3404 RTE_FLOW_ERROR_TYPE_ITEM
,
3406 "Invalid UDP item");
3410 case RTE_FLOW_ITEM_TYPE_VXLAN
:
3411 vxlan_spec
= item
->spec
;
3412 vxlan_mask
= item
->mask
;
3413 /* Check if VXLAN item is used to describe protocol.
3414 * If yes, both spec and mask should be NULL.
3415 * If no, both spec and mask shouldn't be NULL.
3417 if ((!vxlan_spec
&& vxlan_mask
) ||
3418 (vxlan_spec
&& !vxlan_mask
)) {
3419 rte_flow_error_set(error
, EINVAL
,
3420 RTE_FLOW_ERROR_TYPE_ITEM
,
3422 "Invalid VXLAN item");
3426 /* Check if VNI is masked. */
3427 if (vxlan_spec
&& vxlan_mask
) {
3429 !!memcmp(vxlan_mask
->vni
, vni_mask
,
3431 if (is_vni_masked
) {
3432 rte_flow_error_set(error
, EINVAL
,
3433 RTE_FLOW_ERROR_TYPE_ITEM
,
3435 "Invalid VNI mask");
3439 rte_memcpy(((uint8_t *)&tenant_id_be
+ 1),
3440 vxlan_spec
->vni
, 3);
3442 rte_be_to_cpu_32(tenant_id_be
);
3443 filter_type
|= ETH_TUNNEL_FILTER_TENID
;
3453 ret
= i40e_check_tunnel_filter_type(filter_type
);
3455 rte_flow_error_set(error
, EINVAL
,
3456 RTE_FLOW_ERROR_TYPE_ITEM
,
3458 "Invalid filter type");
3461 filter
->filter_type
= filter_type
;
3463 filter
->tunnel_type
= I40E_TUNNEL_TYPE_VXLAN
;
3469 i40e_flow_parse_vxlan_filter(struct rte_eth_dev
*dev
,
3470 const struct rte_flow_attr
*attr
,
3471 const struct rte_flow_item pattern
[],
3472 const struct rte_flow_action actions
[],
3473 struct rte_flow_error
*error
,
3474 union i40e_filter_t
*filter
)
3476 struct i40e_tunnel_filter_conf
*tunnel_filter
=
3477 &filter
->consistent_tunnel_filter
;
3480 ret
= i40e_flow_parse_vxlan_pattern(dev
, pattern
,
3481 error
, tunnel_filter
);
3485 ret
= i40e_flow_parse_tunnel_action(dev
, actions
, error
, tunnel_filter
);
3489 ret
= i40e_flow_parse_attr(attr
, error
);
3493 cons_filter_type
= RTE_ETH_FILTER_TUNNEL
;
3498 /* 1. Last in item should be NULL as range is not supported.
3499 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3500 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3501 * 3. Mask of fields which need to be matched should be
3503 * 4. Mask of fields which needn't to be matched should be
3507 i40e_flow_parse_nvgre_pattern(__rte_unused
struct rte_eth_dev
*dev
,
3508 const struct rte_flow_item
*pattern
,
3509 struct rte_flow_error
*error
,
3510 struct i40e_tunnel_filter_conf
*filter
)
3512 const struct rte_flow_item
*item
= pattern
;
3513 const struct rte_flow_item_eth
*eth_spec
;
3514 const struct rte_flow_item_eth
*eth_mask
;
3515 const struct rte_flow_item_nvgre
*nvgre_spec
;
3516 const struct rte_flow_item_nvgre
*nvgre_mask
;
3517 const struct rte_flow_item_vlan
*vlan_spec
;
3518 const struct rte_flow_item_vlan
*vlan_mask
;
3519 enum rte_flow_item_type item_type
;
3520 uint8_t filter_type
= 0;
3521 bool is_tni_masked
= 0;
3522 uint8_t tni_mask
[] = {0xFF, 0xFF, 0xFF};
3523 bool nvgre_flag
= 0;
3524 uint32_t tenant_id_be
= 0;
3527 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
3529 rte_flow_error_set(error
, EINVAL
,
3530 RTE_FLOW_ERROR_TYPE_ITEM
,
3532 "Not support range");
3535 item_type
= item
->type
;
3536 switch (item_type
) {
3537 case RTE_FLOW_ITEM_TYPE_ETH
:
3538 eth_spec
= item
->spec
;
3539 eth_mask
= item
->mask
;
3541 /* Check if ETH item is used for place holder.
3542 * If yes, both spec and mask should be NULL.
3543 * If no, both spec and mask shouldn't be NULL.
3545 if ((!eth_spec
&& eth_mask
) ||
3546 (eth_spec
&& !eth_mask
)) {
3547 rte_flow_error_set(error
, EINVAL
,
3548 RTE_FLOW_ERROR_TYPE_ITEM
,
3550 "Invalid ether spec/mask");
3554 if (eth_spec
&& eth_mask
) {
3555 /* DST address of inner MAC shouldn't be masked.
3556 * SRC address of Inner MAC should be masked.
3558 if (!is_broadcast_ether_addr(ð_mask
->dst
) ||
3559 !is_zero_ether_addr(ð_mask
->src
) ||
3561 rte_flow_error_set(error
, EINVAL
,
3562 RTE_FLOW_ERROR_TYPE_ITEM
,
3564 "Invalid ether spec/mask");
3569 rte_memcpy(&filter
->outer_mac
,
3572 filter_type
|= ETH_TUNNEL_FILTER_OMAC
;
3574 rte_memcpy(&filter
->inner_mac
,
3577 filter_type
|= ETH_TUNNEL_FILTER_IMAC
;
3582 case RTE_FLOW_ITEM_TYPE_VLAN
:
3583 vlan_spec
= item
->spec
;
3584 vlan_mask
= item
->mask
;
3585 if (!(vlan_spec
&& vlan_mask
) ||
3586 vlan_mask
->inner_type
) {
3587 rte_flow_error_set(error
, EINVAL
,
3588 RTE_FLOW_ERROR_TYPE_ITEM
,
3590 "Invalid vlan item");
3594 if (vlan_spec
&& vlan_mask
) {
3595 if (vlan_mask
->tci
==
3596 rte_cpu_to_be_16(I40E_TCI_MASK
))
3597 filter
->inner_vlan
=
3598 rte_be_to_cpu_16(vlan_spec
->tci
) &
3600 filter_type
|= ETH_TUNNEL_FILTER_IVLAN
;
3603 case RTE_FLOW_ITEM_TYPE_IPV4
:
3604 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV4
;
3605 /* IPv4 is used to describe protocol,
3606 * spec and mask should be NULL.
3608 if (item
->spec
|| item
->mask
) {
3609 rte_flow_error_set(error
, EINVAL
,
3610 RTE_FLOW_ERROR_TYPE_ITEM
,
3612 "Invalid IPv4 item");
3616 case RTE_FLOW_ITEM_TYPE_IPV6
:
3617 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV6
;
3618 /* IPv6 is used to describe protocol,
3619 * spec and mask should be NULL.
3621 if (item
->spec
|| item
->mask
) {
3622 rte_flow_error_set(error
, EINVAL
,
3623 RTE_FLOW_ERROR_TYPE_ITEM
,
3625 "Invalid IPv6 item");
3629 case RTE_FLOW_ITEM_TYPE_NVGRE
:
3630 nvgre_spec
= item
->spec
;
3631 nvgre_mask
= item
->mask
;
3632 /* Check if NVGRE item is used to describe protocol.
3633 * If yes, both spec and mask should be NULL.
3634 * If no, both spec and mask shouldn't be NULL.
3636 if ((!nvgre_spec
&& nvgre_mask
) ||
3637 (nvgre_spec
&& !nvgre_mask
)) {
3638 rte_flow_error_set(error
, EINVAL
,
3639 RTE_FLOW_ERROR_TYPE_ITEM
,
3641 "Invalid NVGRE item");
3645 if (nvgre_spec
&& nvgre_mask
) {
3647 !!memcmp(nvgre_mask
->tni
, tni_mask
,
3649 if (is_tni_masked
) {
3650 rte_flow_error_set(error
, EINVAL
,
3651 RTE_FLOW_ERROR_TYPE_ITEM
,
3653 "Invalid TNI mask");
3656 if (nvgre_mask
->protocol
&&
3657 nvgre_mask
->protocol
!= 0xFFFF) {
3658 rte_flow_error_set(error
, EINVAL
,
3659 RTE_FLOW_ERROR_TYPE_ITEM
,
3661 "Invalid NVGRE item");
3664 if (nvgre_mask
->c_k_s_rsvd0_ver
&&
3665 nvgre_mask
->c_k_s_rsvd0_ver
!=
3666 rte_cpu_to_be_16(0xFFFF)) {
3667 rte_flow_error_set(error
, EINVAL
,
3668 RTE_FLOW_ERROR_TYPE_ITEM
,
3670 "Invalid NVGRE item");
3673 if (nvgre_spec
->c_k_s_rsvd0_ver
!=
3674 rte_cpu_to_be_16(0x2000) &&
3675 nvgre_mask
->c_k_s_rsvd0_ver
) {
3676 rte_flow_error_set(error
, EINVAL
,
3677 RTE_FLOW_ERROR_TYPE_ITEM
,
3679 "Invalid NVGRE item");
3682 if (nvgre_mask
->protocol
&&
3683 nvgre_spec
->protocol
!=
3684 rte_cpu_to_be_16(0x6558)) {
3685 rte_flow_error_set(error
, EINVAL
,
3686 RTE_FLOW_ERROR_TYPE_ITEM
,
3688 "Invalid NVGRE item");
3691 rte_memcpy(((uint8_t *)&tenant_id_be
+ 1),
3692 nvgre_spec
->tni
, 3);
3694 rte_be_to_cpu_32(tenant_id_be
);
3695 filter_type
|= ETH_TUNNEL_FILTER_TENID
;
3705 ret
= i40e_check_tunnel_filter_type(filter_type
);
3707 rte_flow_error_set(error
, EINVAL
,
3708 RTE_FLOW_ERROR_TYPE_ITEM
,
3710 "Invalid filter type");
3713 filter
->filter_type
= filter_type
;
3715 filter
->tunnel_type
= I40E_TUNNEL_TYPE_NVGRE
;
3721 i40e_flow_parse_nvgre_filter(struct rte_eth_dev
*dev
,
3722 const struct rte_flow_attr
*attr
,
3723 const struct rte_flow_item pattern
[],
3724 const struct rte_flow_action actions
[],
3725 struct rte_flow_error
*error
,
3726 union i40e_filter_t
*filter
)
3728 struct i40e_tunnel_filter_conf
*tunnel_filter
=
3729 &filter
->consistent_tunnel_filter
;
3732 ret
= i40e_flow_parse_nvgre_pattern(dev
, pattern
,
3733 error
, tunnel_filter
);
3737 ret
= i40e_flow_parse_tunnel_action(dev
, actions
, error
, tunnel_filter
);
3741 ret
= i40e_flow_parse_attr(attr
, error
);
3745 cons_filter_type
= RTE_ETH_FILTER_TUNNEL
;
3750 /* 1. Last in item should be NULL as range is not supported.
3751 * 2. Supported filter types: MPLS label.
3752 * 3. Mask of fields which need to be matched should be
3754 * 4. Mask of fields which needn't to be matched should be
3758 i40e_flow_parse_mpls_pattern(__rte_unused
struct rte_eth_dev
*dev
,
3759 const struct rte_flow_item
*pattern
,
3760 struct rte_flow_error
*error
,
3761 struct i40e_tunnel_filter_conf
*filter
)
3763 const struct rte_flow_item
*item
= pattern
;
3764 const struct rte_flow_item_mpls
*mpls_spec
;
3765 const struct rte_flow_item_mpls
*mpls_mask
;
3766 enum rte_flow_item_type item_type
;
3767 bool is_mplsoudp
= 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3768 const uint8_t label_mask
[3] = {0xFF, 0xFF, 0xF0};
3769 uint32_t label_be
= 0;
3771 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
3773 rte_flow_error_set(error
, EINVAL
,
3774 RTE_FLOW_ERROR_TYPE_ITEM
,
3776 "Not support range");
3779 item_type
= item
->type
;
3780 switch (item_type
) {
3781 case RTE_FLOW_ITEM_TYPE_ETH
:
3782 if (item
->spec
|| item
->mask
) {
3783 rte_flow_error_set(error
, EINVAL
,
3784 RTE_FLOW_ERROR_TYPE_ITEM
,
3786 "Invalid ETH item");
3790 case RTE_FLOW_ITEM_TYPE_IPV4
:
3791 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV4
;
3792 /* IPv4 is used to describe protocol,
3793 * spec and mask should be NULL.
3795 if (item
->spec
|| item
->mask
) {
3796 rte_flow_error_set(error
, EINVAL
,
3797 RTE_FLOW_ERROR_TYPE_ITEM
,
3799 "Invalid IPv4 item");
3803 case RTE_FLOW_ITEM_TYPE_IPV6
:
3804 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV6
;
3805 /* IPv6 is used to describe protocol,
3806 * spec and mask should be NULL.
3808 if (item
->spec
|| item
->mask
) {
3809 rte_flow_error_set(error
, EINVAL
,
3810 RTE_FLOW_ERROR_TYPE_ITEM
,
3812 "Invalid IPv6 item");
3816 case RTE_FLOW_ITEM_TYPE_UDP
:
3817 /* UDP is used to describe protocol,
3818 * spec and mask should be NULL.
3820 if (item
->spec
|| item
->mask
) {
3821 rte_flow_error_set(error
, EINVAL
,
3822 RTE_FLOW_ERROR_TYPE_ITEM
,
3824 "Invalid UDP item");
3829 case RTE_FLOW_ITEM_TYPE_GRE
:
3830 /* GRE is used to describe protocol,
3831 * spec and mask should be NULL.
3833 if (item
->spec
|| item
->mask
) {
3834 rte_flow_error_set(error
, EINVAL
,
3835 RTE_FLOW_ERROR_TYPE_ITEM
,
3837 "Invalid GRE item");
3841 case RTE_FLOW_ITEM_TYPE_MPLS
:
3842 mpls_spec
= item
->spec
;
3843 mpls_mask
= item
->mask
;
3845 if (!mpls_spec
|| !mpls_mask
) {
3846 rte_flow_error_set(error
, EINVAL
,
3847 RTE_FLOW_ERROR_TYPE_ITEM
,
3849 "Invalid MPLS item");
3853 if (memcmp(mpls_mask
->label_tc_s
, label_mask
, 3)) {
3854 rte_flow_error_set(error
, EINVAL
,
3855 RTE_FLOW_ERROR_TYPE_ITEM
,
3857 "Invalid MPLS label mask");
3860 rte_memcpy(((uint8_t *)&label_be
+ 1),
3861 mpls_spec
->label_tc_s
, 3);
3862 filter
->tenant_id
= rte_be_to_cpu_32(label_be
) >> 4;
3870 filter
->tunnel_type
= I40E_TUNNEL_TYPE_MPLSoUDP
;
3872 filter
->tunnel_type
= I40E_TUNNEL_TYPE_MPLSoGRE
;
3878 i40e_flow_parse_mpls_filter(struct rte_eth_dev
*dev
,
3879 const struct rte_flow_attr
*attr
,
3880 const struct rte_flow_item pattern
[],
3881 const struct rte_flow_action actions
[],
3882 struct rte_flow_error
*error
,
3883 union i40e_filter_t
*filter
)
3885 struct i40e_tunnel_filter_conf
*tunnel_filter
=
3886 &filter
->consistent_tunnel_filter
;
3889 ret
= i40e_flow_parse_mpls_pattern(dev
, pattern
,
3890 error
, tunnel_filter
);
3894 ret
= i40e_flow_parse_tunnel_action(dev
, actions
, error
, tunnel_filter
);
3898 ret
= i40e_flow_parse_attr(attr
, error
);
3902 cons_filter_type
= RTE_ETH_FILTER_TUNNEL
;
3907 /* 1. Last in item should be NULL as range is not supported.
3908 * 2. Supported filter types: GTP TEID.
3909 * 3. Mask of fields which need to be matched should be
3911 * 4. Mask of fields which needn't to be matched should be
3913 * 5. GTP profile supports GTPv1 only.
3914 * 6. GTP-C response message ('source_port' = 2123) is not supported.
3917 i40e_flow_parse_gtp_pattern(struct rte_eth_dev
*dev
,
3918 const struct rte_flow_item
*pattern
,
3919 struct rte_flow_error
*error
,
3920 struct i40e_tunnel_filter_conf
*filter
)
3922 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
3923 const struct rte_flow_item
*item
= pattern
;
3924 const struct rte_flow_item_gtp
*gtp_spec
;
3925 const struct rte_flow_item_gtp
*gtp_mask
;
3926 enum rte_flow_item_type item_type
;
3928 if (!pf
->gtp_support
) {
3929 rte_flow_error_set(error
, EINVAL
,
3930 RTE_FLOW_ERROR_TYPE_ITEM
,
3932 "GTP is not supported by default.");
3936 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
3938 rte_flow_error_set(error
, EINVAL
,
3939 RTE_FLOW_ERROR_TYPE_ITEM
,
3941 "Not support range");
3944 item_type
= item
->type
;
3945 switch (item_type
) {
3946 case RTE_FLOW_ITEM_TYPE_ETH
:
3947 if (item
->spec
|| item
->mask
) {
3948 rte_flow_error_set(error
, EINVAL
,
3949 RTE_FLOW_ERROR_TYPE_ITEM
,
3951 "Invalid ETH item");
3955 case RTE_FLOW_ITEM_TYPE_IPV4
:
3956 filter
->ip_type
= I40E_TUNNEL_IPTYPE_IPV4
;
3957 /* IPv4 is used to describe protocol,
3958 * spec and mask should be NULL.
3960 if (item
->spec
|| item
->mask
) {
3961 rte_flow_error_set(error
, EINVAL
,
3962 RTE_FLOW_ERROR_TYPE_ITEM
,
3964 "Invalid IPv4 item");
3968 case RTE_FLOW_ITEM_TYPE_UDP
:
3969 if (item
->spec
|| item
->mask
) {
3970 rte_flow_error_set(error
, EINVAL
,
3971 RTE_FLOW_ERROR_TYPE_ITEM
,
3973 "Invalid UDP item");
3977 case RTE_FLOW_ITEM_TYPE_GTPC
:
3978 case RTE_FLOW_ITEM_TYPE_GTPU
:
3979 gtp_spec
= item
->spec
;
3980 gtp_mask
= item
->mask
;
3982 if (!gtp_spec
|| !gtp_mask
) {
3983 rte_flow_error_set(error
, EINVAL
,
3984 RTE_FLOW_ERROR_TYPE_ITEM
,
3986 "Invalid GTP item");
3990 if (gtp_mask
->v_pt_rsv_flags
||
3991 gtp_mask
->msg_type
||
3992 gtp_mask
->msg_len
||
3993 gtp_mask
->teid
!= UINT32_MAX
) {
3994 rte_flow_error_set(error
, EINVAL
,
3995 RTE_FLOW_ERROR_TYPE_ITEM
,
3997 "Invalid GTP mask");
4001 if (item_type
== RTE_FLOW_ITEM_TYPE_GTPC
)
4002 filter
->tunnel_type
= I40E_TUNNEL_TYPE_GTPC
;
4003 else if (item_type
== RTE_FLOW_ITEM_TYPE_GTPU
)
4004 filter
->tunnel_type
= I40E_TUNNEL_TYPE_GTPU
;
4006 filter
->tenant_id
= rte_be_to_cpu_32(gtp_spec
->teid
);
4018 i40e_flow_parse_gtp_filter(struct rte_eth_dev
*dev
,
4019 const struct rte_flow_attr
*attr
,
4020 const struct rte_flow_item pattern
[],
4021 const struct rte_flow_action actions
[],
4022 struct rte_flow_error
*error
,
4023 union i40e_filter_t
*filter
)
4025 struct i40e_tunnel_filter_conf
*tunnel_filter
=
4026 &filter
->consistent_tunnel_filter
;
4029 ret
= i40e_flow_parse_gtp_pattern(dev
, pattern
,
4030 error
, tunnel_filter
);
4034 ret
= i40e_flow_parse_tunnel_action(dev
, actions
, error
, tunnel_filter
);
4038 ret
= i40e_flow_parse_attr(attr
, error
);
4042 cons_filter_type
= RTE_ETH_FILTER_TUNNEL
;
4047 /* 1. Last in item should be NULL as range is not supported.
4048 * 2. Supported filter types: QINQ.
4049 * 3. Mask of fields which need to be matched should be
4051 * 4. Mask of fields which needn't to be matched should be
4055 i40e_flow_parse_qinq_pattern(__rte_unused
struct rte_eth_dev
*dev
,
4056 const struct rte_flow_item
*pattern
,
4057 struct rte_flow_error
*error
,
4058 struct i40e_tunnel_filter_conf
*filter
)
4060 const struct rte_flow_item
*item
= pattern
;
4061 const struct rte_flow_item_vlan
*vlan_spec
= NULL
;
4062 const struct rte_flow_item_vlan
*vlan_mask
= NULL
;
4063 const struct rte_flow_item_vlan
*i_vlan_spec
= NULL
;
4064 const struct rte_flow_item_vlan
*i_vlan_mask
= NULL
;
4065 const struct rte_flow_item_vlan
*o_vlan_spec
= NULL
;
4066 const struct rte_flow_item_vlan
*o_vlan_mask
= NULL
;
4068 enum rte_flow_item_type item_type
;
4071 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
4073 rte_flow_error_set(error
, EINVAL
,
4074 RTE_FLOW_ERROR_TYPE_ITEM
,
4076 "Not support range");
4079 item_type
= item
->type
;
4080 switch (item_type
) {
4081 case RTE_FLOW_ITEM_TYPE_ETH
:
4082 if (item
->spec
|| item
->mask
) {
4083 rte_flow_error_set(error
, EINVAL
,
4084 RTE_FLOW_ERROR_TYPE_ITEM
,
4086 "Invalid ETH item");
4090 case RTE_FLOW_ITEM_TYPE_VLAN
:
4091 vlan_spec
= item
->spec
;
4092 vlan_mask
= item
->mask
;
4094 if (!(vlan_spec
&& vlan_mask
) ||
4095 vlan_mask
->inner_type
) {
4096 rte_flow_error_set(error
, EINVAL
,
4097 RTE_FLOW_ERROR_TYPE_ITEM
,
4099 "Invalid vlan item");
4104 o_vlan_spec
= vlan_spec
;
4105 o_vlan_mask
= vlan_mask
;
4108 i_vlan_spec
= vlan_spec
;
4109 i_vlan_mask
= vlan_mask
;
4119 /* Get filter specification */
4120 if ((o_vlan_mask
!= NULL
) && (o_vlan_mask
->tci
==
4121 rte_cpu_to_be_16(I40E_TCI_MASK
)) &&
4122 (i_vlan_mask
!= NULL
) &&
4123 (i_vlan_mask
->tci
== rte_cpu_to_be_16(I40E_TCI_MASK
))) {
4124 filter
->outer_vlan
= rte_be_to_cpu_16(o_vlan_spec
->tci
)
4126 filter
->inner_vlan
= rte_be_to_cpu_16(i_vlan_spec
->tci
)
4129 rte_flow_error_set(error
, EINVAL
,
4130 RTE_FLOW_ERROR_TYPE_ITEM
,
4132 "Invalid filter type");
4136 filter
->tunnel_type
= I40E_TUNNEL_TYPE_QINQ
;
4141 i40e_flow_parse_qinq_filter(struct rte_eth_dev
*dev
,
4142 const struct rte_flow_attr
*attr
,
4143 const struct rte_flow_item pattern
[],
4144 const struct rte_flow_action actions
[],
4145 struct rte_flow_error
*error
,
4146 union i40e_filter_t
*filter
)
4148 struct i40e_tunnel_filter_conf
*tunnel_filter
=
4149 &filter
->consistent_tunnel_filter
;
4152 ret
= i40e_flow_parse_qinq_pattern(dev
, pattern
,
4153 error
, tunnel_filter
);
4157 ret
= i40e_flow_parse_tunnel_action(dev
, actions
, error
, tunnel_filter
);
4161 ret
= i40e_flow_parse_attr(attr
, error
);
4165 cons_filter_type
= RTE_ETH_FILTER_TUNNEL
;
4171 * This function is used to do configuration i40e existing RSS with rte_flow.
4172 * It also enable queue region configuration using flow API for i40e.
4173 * pattern can be used indicate what parameters will be include in flow,
4174 * like user_priority or flowtype for queue region or HASH function for RSS.
4175 * Action is used to transmit parameter like queue index and HASH
4176 * function for RSS, or flowtype for queue region configuration.
4179 * Case 1: only ETH, indicate flowtype for queue region will be parsed.
4180 * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
4181 * Case 3: none, indicate RSS related will be parsed in action.
4182 * Any pattern other the ETH or VLAN will be treated as invalid except END.
4183 * So, pattern choice is depened on the purpose of configuration of
4186 * action RSS will be uaed to transmit valid parameter with
4187 * struct rte_flow_action_rss for all the 3 case.
4190 i40e_flow_parse_rss_pattern(__rte_unused
struct rte_eth_dev
*dev
,
4191 const struct rte_flow_item
*pattern
,
4192 struct rte_flow_error
*error
,
4193 uint8_t *action_flag
,
4194 struct i40e_queue_regions
*info
)
4196 const struct rte_flow_item_vlan
*vlan_spec
, *vlan_mask
;
4197 const struct rte_flow_item
*item
= pattern
;
4198 enum rte_flow_item_type item_type
;
4200 if (item
->type
== RTE_FLOW_ITEM_TYPE_END
)
4203 for (; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
4205 rte_flow_error_set(error
, EINVAL
,
4206 RTE_FLOW_ERROR_TYPE_ITEM
,
4208 "Not support range");
4211 item_type
= item
->type
;
4212 switch (item_type
) {
4213 case RTE_FLOW_ITEM_TYPE_ETH
:
4216 case RTE_FLOW_ITEM_TYPE_VLAN
:
4217 vlan_spec
= item
->spec
;
4218 vlan_mask
= item
->mask
;
4219 if (vlan_spec
&& vlan_mask
) {
4220 if (vlan_mask
->tci
==
4221 rte_cpu_to_be_16(I40E_TCI_MASK
)) {
4222 info
->region
[0].user_priority
[0] =
4224 vlan_spec
->tci
) >> 13) & 0x7;
4225 info
->region
[0].user_priority_num
= 1;
4226 info
->queue_region_number
= 1;
4232 rte_flow_error_set(error
, EINVAL
,
4233 RTE_FLOW_ERROR_TYPE_ITEM
,
4235 "Not support range");
4244 * This function is used to parse rss queue index, total queue number and
4245 * hash functions, If the purpose of this configuration is for queue region
4246 * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4247 * In queue region configuration, it also need to parse hardware flowtype
4248 * and user_priority from configuration, it will also cheeck the validity
4249 * of these parameters. For example, The queue region sizes should
4250 * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4251 * hw_flowtype or PCTYPE max index should be 63, the user priority
4252 * max index should be 7, and so on. And also, queue index should be
4253 * continuous sequence and queue region index should be part of rss
4254 * queue index for this port.
4257 i40e_flow_parse_rss_action(struct rte_eth_dev
*dev
,
4258 const struct rte_flow_action
*actions
,
4259 struct rte_flow_error
*error
,
4260 uint8_t action_flag
,
4261 struct i40e_queue_regions
*conf_info
,
4262 union i40e_filter_t
*filter
)
4264 const struct rte_flow_action
*act
;
4265 const struct rte_flow_action_rss
*rss
;
4266 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4267 struct i40e_queue_regions
*info
= &pf
->queue_region
;
4268 struct i40e_rte_flow_rss_conf
*rss_config
=
4270 struct i40e_rte_flow_rss_conf
*rss_info
= &pf
->rss_info
;
4271 uint16_t i
, j
, n
, tmp
;
4273 uint64_t hf_bit
= 1;
4275 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
4279 * rss only supports forwarding,
4280 * check if the first not void action is RSS.
4282 if (act
->type
!= RTE_FLOW_ACTION_TYPE_RSS
) {
4283 memset(rss_config
, 0, sizeof(struct i40e_rte_flow_rss_conf
));
4284 rte_flow_error_set(error
, EINVAL
,
4285 RTE_FLOW_ERROR_TYPE_ACTION
,
4286 act
, "Not supported action.");
4291 for (n
= 0; n
< 64; n
++) {
4292 if (rss
->types
& (hf_bit
<< n
)) {
4293 conf_info
->region
[0].hw_flowtype
[0] = n
;
4294 conf_info
->region
[0].flowtype_num
= 1;
4295 conf_info
->queue_region_number
= 1;
4302 * Do some queue region related parameters check
4303 * in order to keep queue index for queue region to be
4304 * continuous sequence and also to be part of RSS
4305 * queue index for this port.
4307 if (conf_info
->queue_region_number
) {
4308 for (i
= 0; i
< rss
->queue_num
; i
++) {
4309 for (j
= 0; j
< rss_info
->conf
.queue_num
; j
++) {
4310 if (rss
->queue
[i
] == rss_info
->conf
.queue
[j
])
4313 if (j
== rss_info
->conf
.queue_num
) {
4314 rte_flow_error_set(error
, EINVAL
,
4315 RTE_FLOW_ERROR_TYPE_ACTION
,
4322 for (i
= 0; i
< rss
->queue_num
- 1; i
++) {
4323 if (rss
->queue
[i
+ 1] != rss
->queue
[i
] + 1) {
4324 rte_flow_error_set(error
, EINVAL
,
4325 RTE_FLOW_ERROR_TYPE_ACTION
,
4333 /* Parse queue region related parameters from configuration */
4334 for (n
= 0; n
< conf_info
->queue_region_number
; n
++) {
4335 if (conf_info
->region
[n
].user_priority_num
||
4336 conf_info
->region
[n
].flowtype_num
) {
4337 if (!((rte_is_power_of_2(rss
->queue_num
)) &&
4338 rss
->queue_num
<= 64)) {
4339 rte_flow_error_set(error
, EINVAL
,
4340 RTE_FLOW_ERROR_TYPE_ACTION
,
4342 "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4343 "total number of queues do not exceed the VSI allocation");
4347 if (conf_info
->region
[n
].user_priority
[n
] >=
4348 I40E_MAX_USER_PRIORITY
) {
4349 rte_flow_error_set(error
, EINVAL
,
4350 RTE_FLOW_ERROR_TYPE_ACTION
,
4352 "the user priority max index is 7");
4356 if (conf_info
->region
[n
].hw_flowtype
[n
] >=
4357 I40E_FILTER_PCTYPE_MAX
) {
4358 rte_flow_error_set(error
, EINVAL
,
4359 RTE_FLOW_ERROR_TYPE_ACTION
,
4361 "the hw_flowtype or PCTYPE max index is 63");
4365 for (i
= 0; i
< info
->queue_region_number
; i
++) {
4366 if (info
->region
[i
].queue_num
==
4368 info
->region
[i
].queue_start_index
==
4373 if (i
== info
->queue_region_number
) {
4374 if (i
> I40E_REGION_MAX_INDEX
) {
4375 rte_flow_error_set(error
, EINVAL
,
4376 RTE_FLOW_ERROR_TYPE_ACTION
,
4378 "the queue region max index is 7");
4382 info
->region
[i
].queue_num
=
4384 info
->region
[i
].queue_start_index
=
4386 info
->region
[i
].region_id
=
4387 info
->queue_region_number
;
4389 j
= info
->region
[i
].user_priority_num
;
4390 tmp
= conf_info
->region
[n
].user_priority
[0];
4391 if (conf_info
->region
[n
].user_priority_num
) {
4392 info
->region
[i
].user_priority
[j
] = tmp
;
4393 info
->region
[i
].user_priority_num
++;
4396 j
= info
->region
[i
].flowtype_num
;
4397 tmp
= conf_info
->region
[n
].hw_flowtype
[0];
4398 if (conf_info
->region
[n
].flowtype_num
) {
4399 info
->region
[i
].hw_flowtype
[j
] = tmp
;
4400 info
->region
[i
].flowtype_num
++;
4402 info
->queue_region_number
++;
4404 j
= info
->region
[i
].user_priority_num
;
4405 tmp
= conf_info
->region
[n
].user_priority
[0];
4406 if (conf_info
->region
[n
].user_priority_num
) {
4407 info
->region
[i
].user_priority
[j
] = tmp
;
4408 info
->region
[i
].user_priority_num
++;
4411 j
= info
->region
[i
].flowtype_num
;
4412 tmp
= conf_info
->region
[n
].hw_flowtype
[0];
4413 if (conf_info
->region
[n
].flowtype_num
) {
4414 info
->region
[i
].hw_flowtype
[j
] = tmp
;
4415 info
->region
[i
].flowtype_num
++;
4420 rss_config
->queue_region_conf
= TRUE
;
4424 * Return function if this flow is used for queue region configuration
4426 if (rss_config
->queue_region_conf
)
4429 if (!rss
|| !rss
->queue_num
) {
4430 rte_flow_error_set(error
, EINVAL
,
4431 RTE_FLOW_ERROR_TYPE_ACTION
,
4437 for (n
= 0; n
< rss
->queue_num
; n
++) {
4438 if (rss
->queue
[n
] >= dev
->data
->nb_rx_queues
) {
4439 rte_flow_error_set(error
, EINVAL
,
4440 RTE_FLOW_ERROR_TYPE_ACTION
,
4442 "queue id > max number of queues");
4447 /* Parse RSS related parameters from configuration */
4448 if (rss
->func
!= RTE_ETH_HASH_FUNCTION_DEFAULT
)
4449 return rte_flow_error_set
4450 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
4451 "non-default RSS hash functions are not supported");
4453 return rte_flow_error_set
4454 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
4455 "a nonzero RSS encapsulation level is not supported");
4456 if (rss
->key_len
&& rss
->key_len
> RTE_DIM(rss_config
->key
))
4457 return rte_flow_error_set
4458 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
4459 "RSS hash key too large");
4460 if (rss
->queue_num
> RTE_DIM(rss_config
->queue
))
4461 return rte_flow_error_set
4462 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
4463 "too many queues for RSS context");
4464 if (i40e_rss_conf_init(rss_config
, rss
))
4465 return rte_flow_error_set
4466 (error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
4467 "RSS context initialization failure");
4471 /* check if the next not void action is END */
4472 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
4473 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
4474 memset(rss_config
, 0, sizeof(struct i40e_rte_flow_rss_conf
));
4475 rte_flow_error_set(error
, EINVAL
,
4476 RTE_FLOW_ERROR_TYPE_ACTION
,
4477 act
, "Not supported action.");
4480 rss_config
->queue_region_conf
= FALSE
;
4486 i40e_parse_rss_filter(struct rte_eth_dev
*dev
,
4487 const struct rte_flow_attr
*attr
,
4488 const struct rte_flow_item pattern
[],
4489 const struct rte_flow_action actions
[],
4490 union i40e_filter_t
*filter
,
4491 struct rte_flow_error
*error
)
4494 struct i40e_queue_regions info
;
4495 uint8_t action_flag
= 0;
4497 memset(&info
, 0, sizeof(struct i40e_queue_regions
));
4499 ret
= i40e_flow_parse_rss_pattern(dev
, pattern
,
4500 error
, &action_flag
, &info
);
4504 ret
= i40e_flow_parse_rss_action(dev
, actions
, error
,
4505 action_flag
, &info
, filter
);
4509 ret
= i40e_flow_parse_attr(attr
, error
);
4513 cons_filter_type
= RTE_ETH_FILTER_HASH
;
4519 i40e_config_rss_filter_set(struct rte_eth_dev
*dev
,
4520 struct i40e_rte_flow_rss_conf
*conf
)
4522 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4523 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4526 if (conf
->queue_region_conf
) {
4527 ret
= i40e_flush_queue_region_all_conf(dev
, hw
, pf
, 1);
4528 conf
->queue_region_conf
= 0;
4530 ret
= i40e_config_rss_filter(pf
, conf
, 1);
4536 i40e_config_rss_filter_del(struct rte_eth_dev
*dev
,
4537 struct i40e_rte_flow_rss_conf
*conf
)
4539 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4540 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4542 i40e_flush_queue_region_all_conf(dev
, hw
, pf
, 0);
4544 i40e_config_rss_filter(pf
, conf
, 0);
4549 i40e_flow_validate(struct rte_eth_dev
*dev
,
4550 const struct rte_flow_attr
*attr
,
4551 const struct rte_flow_item pattern
[],
4552 const struct rte_flow_action actions
[],
4553 struct rte_flow_error
*error
)
4555 struct rte_flow_item
*items
; /* internal pattern w/o VOID items */
4556 parse_filter_t parse_filter
;
4557 uint32_t item_num
= 0; /* non-void item number of pattern*/
4560 int ret
= I40E_NOT_SUPPORTED
;
4563 rte_flow_error_set(error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
4564 NULL
, "NULL pattern.");
4569 rte_flow_error_set(error
, EINVAL
,
4570 RTE_FLOW_ERROR_TYPE_ACTION_NUM
,
4571 NULL
, "NULL action.");
4576 rte_flow_error_set(error
, EINVAL
,
4577 RTE_FLOW_ERROR_TYPE_ATTR
,
4578 NULL
, "NULL attribute.");
4582 memset(&cons_filter
, 0, sizeof(cons_filter
));
4584 /* Get the non-void item of action */
4585 while ((actions
+ i
)->type
== RTE_FLOW_ACTION_TYPE_VOID
)
4588 if ((actions
+ i
)->type
== RTE_FLOW_ACTION_TYPE_RSS
) {
4589 ret
= i40e_parse_rss_filter(dev
, attr
, pattern
,
4590 actions
, &cons_filter
, error
);
4595 /* Get the non-void item number of pattern */
4596 while ((pattern
+ i
)->type
!= RTE_FLOW_ITEM_TYPE_END
) {
4597 if ((pattern
+ i
)->type
!= RTE_FLOW_ITEM_TYPE_VOID
)
4603 items
= rte_zmalloc("i40e_pattern",
4604 item_num
* sizeof(struct rte_flow_item
), 0);
4606 rte_flow_error_set(error
, ENOMEM
, RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
4607 NULL
, "No memory for PMD internal items.");
4611 i40e_pattern_skip_void_item(items
, pattern
);
4615 parse_filter
= i40e_find_parse_filter_func(items
, &i
);
4616 if (!parse_filter
&& !flag
) {
4617 rte_flow_error_set(error
, EINVAL
,
4618 RTE_FLOW_ERROR_TYPE_ITEM
,
4619 pattern
, "Unsupported pattern");
4624 ret
= parse_filter(dev
, attr
, items
, actions
,
4625 error
, &cons_filter
);
4627 } while ((ret
< 0) && (i
< RTE_DIM(i40e_supported_patterns
)));
4634 static struct rte_flow
*
4635 i40e_flow_create(struct rte_eth_dev
*dev
,
4636 const struct rte_flow_attr
*attr
,
4637 const struct rte_flow_item pattern
[],
4638 const struct rte_flow_action actions
[],
4639 struct rte_flow_error
*error
)
4641 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4642 struct rte_flow
*flow
;
4645 flow
= rte_zmalloc("i40e_flow", sizeof(struct rte_flow
), 0);
4647 rte_flow_error_set(error
, ENOMEM
,
4648 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4649 "Failed to allocate memory");
4653 ret
= i40e_flow_validate(dev
, attr
, pattern
, actions
, error
);
4657 switch (cons_filter_type
) {
4658 case RTE_ETH_FILTER_ETHERTYPE
:
4659 ret
= i40e_ethertype_filter_set(pf
,
4660 &cons_filter
.ethertype_filter
, 1);
4663 flow
->rule
= TAILQ_LAST(&pf
->ethertype
.ethertype_list
,
4664 i40e_ethertype_filter_list
);
4666 case RTE_ETH_FILTER_FDIR
:
4667 ret
= i40e_flow_add_del_fdir_filter(dev
,
4668 &cons_filter
.fdir_filter
, 1);
4671 flow
->rule
= TAILQ_LAST(&pf
->fdir
.fdir_list
,
4672 i40e_fdir_filter_list
);
4674 case RTE_ETH_FILTER_TUNNEL
:
4675 ret
= i40e_dev_consistent_tunnel_filter_set(pf
,
4676 &cons_filter
.consistent_tunnel_filter
, 1);
4679 flow
->rule
= TAILQ_LAST(&pf
->tunnel
.tunnel_list
,
4680 i40e_tunnel_filter_list
);
4682 case RTE_ETH_FILTER_HASH
:
4683 ret
= i40e_config_rss_filter_set(dev
,
4684 &cons_filter
.rss_conf
);
4687 flow
->rule
= &pf
->rss_info
;
4693 flow
->filter_type
= cons_filter_type
;
4694 TAILQ_INSERT_TAIL(&pf
->flow_list
, flow
, node
);
4698 rte_flow_error_set(error
, -ret
,
4699 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4700 "Failed to create flow.");
4706 i40e_flow_destroy(struct rte_eth_dev
*dev
,
4707 struct rte_flow
*flow
,
4708 struct rte_flow_error
*error
)
4710 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4711 enum rte_filter_type filter_type
= flow
->filter_type
;
4714 switch (filter_type
) {
4715 case RTE_ETH_FILTER_ETHERTYPE
:
4716 ret
= i40e_flow_destroy_ethertype_filter(pf
,
4717 (struct i40e_ethertype_filter
*)flow
->rule
);
4719 case RTE_ETH_FILTER_TUNNEL
:
4720 ret
= i40e_flow_destroy_tunnel_filter(pf
,
4721 (struct i40e_tunnel_filter
*)flow
->rule
);
4723 case RTE_ETH_FILTER_FDIR
:
4724 ret
= i40e_flow_add_del_fdir_filter(dev
,
4725 &((struct i40e_fdir_filter
*)flow
->rule
)->fdir
, 0);
4727 /* If the last flow is destroyed, disable fdir. */
4728 if (!ret
&& !TAILQ_EMPTY(&pf
->fdir
.fdir_list
)) {
4729 i40e_fdir_teardown(pf
);
4730 dev
->data
->dev_conf
.fdir_conf
.mode
=
4734 case RTE_ETH_FILTER_HASH
:
4735 ret
= i40e_config_rss_filter_del(dev
,
4736 (struct i40e_rte_flow_rss_conf
*)flow
->rule
);
4739 PMD_DRV_LOG(WARNING
, "Filter type (%d) not supported",
4746 TAILQ_REMOVE(&pf
->flow_list
, flow
, node
);
4749 rte_flow_error_set(error
, -ret
,
4750 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4751 "Failed to destroy flow.");
4757 i40e_flow_destroy_ethertype_filter(struct i40e_pf
*pf
,
4758 struct i40e_ethertype_filter
*filter
)
4760 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
4761 struct i40e_ethertype_rule
*ethertype_rule
= &pf
->ethertype
;
4762 struct i40e_ethertype_filter
*node
;
4763 struct i40e_control_filter_stats stats
;
4767 if (!(filter
->flags
& RTE_ETHTYPE_FLAGS_MAC
))
4768 flags
|= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC
;
4769 if (filter
->flags
& RTE_ETHTYPE_FLAGS_DROP
)
4770 flags
|= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP
;
4771 flags
|= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE
;
4773 memset(&stats
, 0, sizeof(stats
));
4774 ret
= i40e_aq_add_rem_control_packet_filter(hw
,
4775 filter
->input
.mac_addr
.addr_bytes
,
4776 filter
->input
.ether_type
,
4777 flags
, pf
->main_vsi
->seid
,
4778 filter
->queue
, 0, &stats
, NULL
);
4782 node
= i40e_sw_ethertype_filter_lookup(ethertype_rule
, &filter
->input
);
4786 ret
= i40e_sw_ethertype_filter_del(pf
, &node
->input
);
4792 i40e_flow_destroy_tunnel_filter(struct i40e_pf
*pf
,
4793 struct i40e_tunnel_filter
*filter
)
4795 struct i40e_hw
*hw
= I40E_PF_TO_HW(pf
);
4796 struct i40e_vsi
*vsi
;
4797 struct i40e_pf_vf
*vf
;
4798 struct i40e_aqc_cloud_filters_element_bb cld_filter
;
4799 struct i40e_tunnel_rule
*tunnel_rule
= &pf
->tunnel
;
4800 struct i40e_tunnel_filter
*node
;
4801 bool big_buffer
= 0;
4804 memset(&cld_filter
, 0, sizeof(cld_filter
));
4805 ether_addr_copy((struct ether_addr
*)&filter
->input
.outer_mac
,
4806 (struct ether_addr
*)&cld_filter
.element
.outer_mac
);
4807 ether_addr_copy((struct ether_addr
*)&filter
->input
.inner_mac
,
4808 (struct ether_addr
*)&cld_filter
.element
.inner_mac
);
4809 cld_filter
.element
.inner_vlan
= filter
->input
.inner_vlan
;
4810 cld_filter
.element
.flags
= filter
->input
.flags
;
4811 cld_filter
.element
.tenant_id
= filter
->input
.tenant_id
;
4812 cld_filter
.element
.queue_number
= filter
->queue
;
4813 rte_memcpy(cld_filter
.general_fields
,
4814 filter
->input
.general_fields
,
4815 sizeof(cld_filter
.general_fields
));
4817 if (!filter
->is_to_vf
)
4820 vf
= &pf
->vfs
[filter
->vf_id
];
4824 if (((filter
->input
.flags
& I40E_AQC_ADD_CLOUD_FILTER_0X11
) ==
4825 I40E_AQC_ADD_CLOUD_FILTER_0X11
) ||
4826 ((filter
->input
.flags
& I40E_AQC_ADD_CLOUD_FILTER_0X12
) ==
4827 I40E_AQC_ADD_CLOUD_FILTER_0X12
) ||
4828 ((filter
->input
.flags
& I40E_AQC_ADD_CLOUD_FILTER_0X10
) ==
4829 I40E_AQC_ADD_CLOUD_FILTER_0X10
))
4833 ret
= i40e_aq_rem_cloud_filters_bb(hw
, vsi
->seid
,
4836 ret
= i40e_aq_rem_cloud_filters(hw
, vsi
->seid
,
4837 &cld_filter
.element
, 1);
4841 node
= i40e_sw_tunnel_filter_lookup(tunnel_rule
, &filter
->input
);
4845 ret
= i40e_sw_tunnel_filter_del(pf
, &node
->input
);
4851 i40e_flow_flush(struct rte_eth_dev
*dev
, struct rte_flow_error
*error
)
4853 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4856 ret
= i40e_flow_flush_fdir_filter(pf
);
4858 rte_flow_error_set(error
, -ret
,
4859 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4860 "Failed to flush FDIR flows.");
4864 ret
= i40e_flow_flush_ethertype_filter(pf
);
4866 rte_flow_error_set(error
, -ret
,
4867 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4868 "Failed to ethertype flush flows.");
4872 ret
= i40e_flow_flush_tunnel_filter(pf
);
4874 rte_flow_error_set(error
, -ret
,
4875 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4876 "Failed to flush tunnel flows.");
4880 ret
= i40e_flow_flush_rss_filter(dev
);
4882 rte_flow_error_set(error
, -ret
,
4883 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
4884 "Failed to flush rss flows.");
4892 i40e_flow_flush_fdir_filter(struct i40e_pf
*pf
)
4894 struct rte_eth_dev
*dev
= pf
->adapter
->eth_dev
;
4895 struct i40e_fdir_info
*fdir_info
= &pf
->fdir
;
4896 struct i40e_fdir_filter
*fdir_filter
;
4897 enum i40e_filter_pctype pctype
;
4898 struct rte_flow
*flow
;
4902 ret
= i40e_fdir_flush(dev
);
4904 /* Delete FDIR filters in FDIR list. */
4905 while ((fdir_filter
= TAILQ_FIRST(&fdir_info
->fdir_list
))) {
4906 ret
= i40e_sw_fdir_filter_del(pf
,
4907 &fdir_filter
->fdir
.input
);
4912 /* Delete FDIR flows in flow list. */
4913 TAILQ_FOREACH_SAFE(flow
, &pf
->flow_list
, node
, temp
) {
4914 if (flow
->filter_type
== RTE_ETH_FILTER_FDIR
) {
4915 TAILQ_REMOVE(&pf
->flow_list
, flow
, node
);
4920 for (pctype
= I40E_FILTER_PCTYPE_NONF_IPV4_UDP
;
4921 pctype
<= I40E_FILTER_PCTYPE_L2_PAYLOAD
; pctype
++)
4922 pf
->fdir
.inset_flag
[pctype
] = 0;
4925 i40e_fdir_teardown(pf
);
4930 /* Flush all ethertype filters */
4932 i40e_flow_flush_ethertype_filter(struct i40e_pf
*pf
)
4934 struct i40e_ethertype_filter_list
4935 *ethertype_list
= &pf
->ethertype
.ethertype_list
;
4936 struct i40e_ethertype_filter
*filter
;
4937 struct rte_flow
*flow
;
4941 while ((filter
= TAILQ_FIRST(ethertype_list
))) {
4942 ret
= i40e_flow_destroy_ethertype_filter(pf
, filter
);
4947 /* Delete ethertype flows in flow list. */
4948 TAILQ_FOREACH_SAFE(flow
, &pf
->flow_list
, node
, temp
) {
4949 if (flow
->filter_type
== RTE_ETH_FILTER_ETHERTYPE
) {
4950 TAILQ_REMOVE(&pf
->flow_list
, flow
, node
);
4958 /* Flush all tunnel filters */
4960 i40e_flow_flush_tunnel_filter(struct i40e_pf
*pf
)
4962 struct i40e_tunnel_filter_list
4963 *tunnel_list
= &pf
->tunnel
.tunnel_list
;
4964 struct i40e_tunnel_filter
*filter
;
4965 struct rte_flow
*flow
;
4969 while ((filter
= TAILQ_FIRST(tunnel_list
))) {
4970 ret
= i40e_flow_destroy_tunnel_filter(pf
, filter
);
4975 /* Delete tunnel flows in flow list. */
4976 TAILQ_FOREACH_SAFE(flow
, &pf
->flow_list
, node
, temp
) {
4977 if (flow
->filter_type
== RTE_ETH_FILTER_TUNNEL
) {
4978 TAILQ_REMOVE(&pf
->flow_list
, flow
, node
);
4986 /* remove the rss filter */
4988 i40e_flow_flush_rss_filter(struct rte_eth_dev
*dev
)
4990 struct i40e_pf
*pf
= I40E_DEV_PRIVATE_TO_PF(dev
->data
->dev_private
);
4991 struct i40e_rte_flow_rss_conf
*rss_info
= &pf
->rss_info
;
4992 struct i40e_hw
*hw
= I40E_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
4993 int32_t ret
= -EINVAL
;
4995 ret
= i40e_flush_queue_region_all_conf(dev
, hw
, pf
, 0);
4997 if (rss_info
->conf
.queue_num
)
4998 ret
= i40e_config_rss_filter(pf
, rss_info
, FALSE
);