]> git.proxmox.com Git - mirror_ovs.git/blob - include/sparse/rte_flow.h
a36ab45e7ca2f697b37110d4c2fe2e26e18346d6
[mirror_ovs.git] / include / sparse / rte_flow.h
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #ifndef __CHECKER__
35 #error "Use this header only with sparse. It is not a correct implementation."
36 #endif
37
38 #ifndef RTE_FLOW_H_
39 #define RTE_FLOW_H_
40
41 /**
42 * @file
43 * RTE generic flow API
44 *
45 * This interface provides the ability to program packet matching and
46 * associated actions in hardware through flow rules.
47 */
48
49 #include <rte_arp.h>
50 #include <rte_ether.h>
51 #include <rte_icmp.h>
52 #include <rte_ip.h>
53 #include <rte_sctp.h>
54 #include <rte_tcp.h>
55 #include <rte_udp.h>
56 #include <rte_byteorder.h>
57 #include <rte_esp.h>
58
59 #ifdef __cplusplus
60 extern "C" {
61 #endif
62
63 /**
64 * Flow rule attributes.
65 *
66 * Priorities are set on two levels: per group and per rule within groups.
67 *
68 * Lower values denote higher priority, the highest priority for both levels
69 * is 0, so that a rule with priority 0 in group 8 is always matched after a
70 * rule with priority 8 in group 0.
71 *
72 * Although optional, applications are encouraged to group similar rules as
73 * much as possible to fully take advantage of hardware capabilities
74 * (e.g. optimized matching) and work around limitations (e.g. a single
75 * pattern type possibly allowed in a given group).
76 *
77 * Group and priority levels are arbitrary and up to the application, they
78 * do not need to be contiguous nor start from 0, however the maximum number
79 * varies between devices and may be affected by existing flow rules.
80 *
81 * If a packet is matched by several rules of a given group for a given
82 * priority level, the outcome is undefined. It can take any path, may be
83 * duplicated or even cause unrecoverable errors.
84 *
85 * Note that support for more than a single group and priority level is not
86 * guaranteed.
87 *
88 * Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
89 *
90 * Several pattern items and actions are valid and can be used in both
91 * directions. Those valid for only one direction are described as such.
92 *
93 * At least one direction must be specified.
94 *
95 * Specifying both directions at once for a given rule is not recommended
96 * but may be valid in a few cases (e.g. shared counter).
97 */
98 struct rte_flow_attr {
99 uint32_t group; /**< Priority group. */
100 uint32_t priority; /**< Priority level within group. */
101 uint32_t ingress:1; /**< Rule applies to ingress traffic. */
102 uint32_t egress:1; /**< Rule applies to egress traffic. */
103 uint32_t reserved:30; /**< Reserved, must be zero. */
104 };
105
106 /**
107 * Matching pattern item types.
108 *
109 * Pattern items fall in two categories:
110 *
111 * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
112 * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a
113 * specification structure. These must be stacked in the same order as the
114 * protocol layers to match, starting from the lowest.
115 *
116 * - Matching meta-data or affecting pattern processing (END, VOID, INVERT,
117 * PF, VF, PORT and so on), often without a specification structure. Since
118 * they do not match packet contents, these can be specified anywhere
119 * within item lists without affecting others.
120 *
121 * See the description of individual types for more information. Those
122 * marked with [META] fall into the second category.
123 */
124 enum rte_flow_item_type {
125 /**
126 * [META]
127 *
128 * End marker for item lists. Prevents further processing of items,
129 * thereby ending the pattern.
130 *
131 * No associated specification structure.
132 */
133 RTE_FLOW_ITEM_TYPE_END,
134
135 /**
136 * [META]
137 *
138 * Used as a placeholder for convenience. It is ignored and simply
139 * discarded by PMDs.
140 *
141 * No associated specification structure.
142 */
143 RTE_FLOW_ITEM_TYPE_VOID,
144
145 /**
146 * [META]
147 *
148 * Inverted matching, i.e. process packets that do not match the
149 * pattern.
150 *
151 * No associated specification structure.
152 */
153 RTE_FLOW_ITEM_TYPE_INVERT,
154
155 /**
156 * Matches any protocol in place of the current layer, a single ANY
157 * may also stand for several protocol layers.
158 *
159 * See struct rte_flow_item_any.
160 */
161 RTE_FLOW_ITEM_TYPE_ANY,
162
163 /**
164 * [META]
165 *
166 * Matches packets addressed to the physical function of the device.
167 *
168 * If the underlying device function differs from the one that would
169 * normally receive the matched traffic, specifying this item
170 * prevents it from reaching that device unless the flow rule
171 * contains a PF action. Packets are not duplicated between device
172 * instances by default.
173 *
174 * No associated specification structure.
175 */
176 RTE_FLOW_ITEM_TYPE_PF,
177
178 /**
179 * [META]
180 *
181 * Matches packets addressed to a virtual function ID of the device.
182 *
183 * If the underlying device function differs from the one that would
184 * normally receive the matched traffic, specifying this item
185 * prevents it from reaching that device unless the flow rule
186 * contains a VF action. Packets are not duplicated between device
187 * instances by default.
188 *
189 * See struct rte_flow_item_vf.
190 */
191 RTE_FLOW_ITEM_TYPE_VF,
192
193 /**
194 * [META]
195 *
196 * Matches packets coming from the specified physical port of the
197 * underlying device.
198 *
199 * The first PORT item overrides the physical port normally
200 * associated with the specified DPDK input port (port_id). This
201 * item can be provided several times to match additional physical
202 * ports.
203 *
204 * See struct rte_flow_item_port.
205 */
206 RTE_FLOW_ITEM_TYPE_PORT,
207
208 /**
209 * Matches a byte string of a given length at a given offset.
210 *
211 * See struct rte_flow_item_raw.
212 */
213 RTE_FLOW_ITEM_TYPE_RAW,
214
215 /**
216 * Matches an Ethernet header.
217 *
218 * See struct rte_flow_item_eth.
219 */
220 RTE_FLOW_ITEM_TYPE_ETH,
221
222 /**
223 * Matches an 802.1Q/ad VLAN tag.
224 *
225 * See struct rte_flow_item_vlan.
226 */
227 RTE_FLOW_ITEM_TYPE_VLAN,
228
229 /**
230 * Matches an IPv4 header.
231 *
232 * See struct rte_flow_item_ipv4.
233 */
234 RTE_FLOW_ITEM_TYPE_IPV4,
235
236 /**
237 * Matches an IPv6 header.
238 *
239 * See struct rte_flow_item_ipv6.
240 */
241 RTE_FLOW_ITEM_TYPE_IPV6,
242
243 /**
244 * Matches an ICMP header.
245 *
246 * See struct rte_flow_item_icmp.
247 */
248 RTE_FLOW_ITEM_TYPE_ICMP,
249
250 /**
251 * Matches a UDP header.
252 *
253 * See struct rte_flow_item_udp.
254 */
255 RTE_FLOW_ITEM_TYPE_UDP,
256
257 /**
258 * Matches a TCP header.
259 *
260 * See struct rte_flow_item_tcp.
261 */
262 RTE_FLOW_ITEM_TYPE_TCP,
263
264 /**
265 * Matches a SCTP header.
266 *
267 * See struct rte_flow_item_sctp.
268 */
269 RTE_FLOW_ITEM_TYPE_SCTP,
270
271 /**
272 * Matches a VXLAN header.
273 *
274 * See struct rte_flow_item_vxlan.
275 */
276 RTE_FLOW_ITEM_TYPE_VXLAN,
277
278 /**
279 * Matches a E_TAG header.
280 *
281 * See struct rte_flow_item_e_tag.
282 */
283 RTE_FLOW_ITEM_TYPE_E_TAG,
284
285 /**
286 * Matches a NVGRE header.
287 *
288 * See struct rte_flow_item_nvgre.
289 */
290 RTE_FLOW_ITEM_TYPE_NVGRE,
291
292 /**
293 * Matches a MPLS header.
294 *
295 * See struct rte_flow_item_mpls.
296 */
297 RTE_FLOW_ITEM_TYPE_MPLS,
298
299 /**
300 * Matches a GRE header.
301 *
302 * See struct rte_flow_item_gre.
303 */
304 RTE_FLOW_ITEM_TYPE_GRE,
305
306 /**
307 * [META]
308 *
309 * Fuzzy pattern match, expect faster than default.
310 *
311 * This is for device that support fuzzy matching option.
312 * Usually a fuzzy matching is fast but the cost is accuracy.
313 *
314 * See struct rte_flow_item_fuzzy.
315 */
316 RTE_FLOW_ITEM_TYPE_FUZZY,
317
318 /**
319 * Matches a GTP header.
320 *
321 * Configure flow for GTP packets.
322 *
323 * See struct rte_flow_item_gtp.
324 */
325 RTE_FLOW_ITEM_TYPE_GTP,
326
327 /**
328 * Matches a GTP header.
329 *
330 * Configure flow for GTP-C packets.
331 *
332 * See struct rte_flow_item_gtp.
333 */
334 RTE_FLOW_ITEM_TYPE_GTPC,
335
336 /**
337 * Matches a GTP header.
338 *
339 * Configure flow for GTP-U packets.
340 *
341 * See struct rte_flow_item_gtp.
342 */
343 RTE_FLOW_ITEM_TYPE_GTPU,
344
345 /**
346 * Matches a ESP header.
347 *
348 * See struct rte_flow_item_esp.
349 */
350 RTE_FLOW_ITEM_TYPE_ESP,
351 };
352
353 /**
354 * RTE_FLOW_ITEM_TYPE_ANY
355 *
356 * Matches any protocol in place of the current layer, a single ANY may also
357 * stand for several protocol layers.
358 *
359 * This is usually specified as the first pattern item when looking for a
360 * protocol anywhere in a packet.
361 *
362 * A zeroed mask stands for any number of layers.
363 */
364 struct rte_flow_item_any {
365 uint32_t num; /**< Number of layers covered. */
366 };
367
368 /** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
369 #ifndef __cplusplus
370 static const struct rte_flow_item_any rte_flow_item_any_mask = {
371 .num = 0x00000000,
372 };
373 #endif
374
375 /**
376 * RTE_FLOW_ITEM_TYPE_VF
377 *
378 * Matches packets addressed to a virtual function ID of the device.
379 *
380 * If the underlying device function differs from the one that would
381 * normally receive the matched traffic, specifying this item prevents it
382 * from reaching that device unless the flow rule contains a VF
383 * action. Packets are not duplicated between device instances by default.
384 *
385 * - Likely to return an error or never match any traffic if this causes a
386 * VF device to match traffic addressed to a different VF.
387 * - Can be specified multiple times to match traffic addressed to several
388 * VF IDs.
389 * - Can be combined with a PF item to match both PF and VF traffic.
390 *
391 * A zeroed mask can be used to match any VF ID.
392 */
393 struct rte_flow_item_vf {
394 uint32_t id; /**< Destination VF ID. */
395 };
396
397 /** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
398 #ifndef __cplusplus
399 static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
400 .id = 0x00000000,
401 };
402 #endif
403
404 /**
405 * RTE_FLOW_ITEM_TYPE_PORT
406 *
407 * Matches packets coming from the specified physical port of the underlying
408 * device.
409 *
410 * The first PORT item overrides the physical port normally associated with
411 * the specified DPDK input port (port_id). This item can be provided
412 * several times to match additional physical ports.
413 *
414 * Note that physical ports are not necessarily tied to DPDK input ports
415 * (port_id) when those are not under DPDK control. Possible values are
416 * specific to each device, they are not necessarily indexed from zero and
417 * may not be contiguous.
418 *
419 * As a device property, the list of allowed values as well as the value
420 * associated with a port_id should be retrieved by other means.
421 *
422 * A zeroed mask can be used to match any port index.
423 */
424 struct rte_flow_item_port {
425 uint32_t index; /**< Physical port index. */
426 };
427
428 /** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
429 #ifndef __cplusplus
430 static const struct rte_flow_item_port rte_flow_item_port_mask = {
431 .index = 0x00000000,
432 };
433 #endif
434
435 /**
436 * RTE_FLOW_ITEM_TYPE_RAW
437 *
438 * Matches a byte string of a given length at a given offset.
439 *
440 * Offset is either absolute (using the start of the packet) or relative to
441 * the end of the previous matched item in the stack, in which case negative
442 * values are allowed.
443 *
444 * If search is enabled, offset is used as the starting point. The search
445 * area can be delimited by setting limit to a nonzero value, which is the
446 * maximum number of bytes after offset where the pattern may start.
447 *
448 * Matching a zero-length pattern is allowed, doing so resets the relative
449 * offset for subsequent items.
450 *
451 * This type does not support ranges (struct rte_flow_item.last).
452 */
453 struct rte_flow_item_raw {
454 uint32_t relative:1; /**< Look for pattern after the previous item. */
455 uint32_t search:1; /**< Search pattern from offset (see also limit). */
456 uint32_t reserved:30; /**< Reserved, must be set to zero. */
457 int32_t offset; /**< Absolute or relative offset for pattern. */
458 uint16_t limit; /**< Search area limit for start of pattern. */
459 uint16_t length; /**< Pattern length. */
460 uint8_t pattern[]; /**< Byte string to look for. */
461 };
462
463 /** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
464 #ifndef __cplusplus
465 static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
466 .relative = 1,
467 .search = 1,
468 .reserved = 0x3fffffff,
469 .offset = 0xffffffff,
470 .limit = 0xffff,
471 .length = 0xffff,
472 };
473 #endif
474
475 /**
476 * RTE_FLOW_ITEM_TYPE_ETH
477 *
478 * Matches an Ethernet header.
479 */
480 struct rte_flow_item_eth {
481 struct ether_addr dst; /**< Destination MAC. */
482 struct ether_addr src; /**< Source MAC. */
483 rte_be16_t type; /**< EtherType. */
484 };
485
486 /** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
487 #ifndef __cplusplus
488 static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
489 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
490 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
491 .type = RTE_BE16(0x0000),
492 };
493 #endif
494
495 /**
496 * RTE_FLOW_ITEM_TYPE_VLAN
497 *
498 * Matches an 802.1Q/ad VLAN tag.
499 *
500 * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
501 * RTE_FLOW_ITEM_TYPE_VLAN.
502 */
503 struct rte_flow_item_vlan {
504 rte_be16_t tpid; /**< Tag protocol identifier. */
505 rte_be16_t tci; /**< Tag control information. */
506 };
507
508 /** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
509 #ifndef __cplusplus
510 static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
511 .tpid = RTE_BE16(0x0000),
512 .tci = RTE_BE16(0xffff),
513 };
514 #endif
515
516 /**
517 * RTE_FLOW_ITEM_TYPE_IPV4
518 *
519 * Matches an IPv4 header.
520 *
521 * Note: IPv4 options are handled by dedicated pattern items.
522 */
523 struct rte_flow_item_ipv4 {
524 struct ipv4_hdr hdr; /**< IPv4 header definition. */
525 };
526
527 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
528 #ifndef __cplusplus
529 static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
530 .hdr = {
531 .src_addr = RTE_BE32(0xffffffff),
532 .dst_addr = RTE_BE32(0xffffffff),
533 },
534 };
535 #endif
536
537 /**
538 * RTE_FLOW_ITEM_TYPE_IPV6.
539 *
540 * Matches an IPv6 header.
541 *
542 * Note: IPv6 options are handled by dedicated pattern items.
543 */
544 struct rte_flow_item_ipv6 {
545 struct ipv6_hdr hdr; /**< IPv6 header definition. */
546 };
547
548 /** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
549 #ifndef __cplusplus
550 static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
551 .hdr = {
552 .src_addr =
553 "\xff\xff\xff\xff\xff\xff\xff\xff"
554 "\xff\xff\xff\xff\xff\xff\xff\xff",
555 .dst_addr =
556 "\xff\xff\xff\xff\xff\xff\xff\xff"
557 "\xff\xff\xff\xff\xff\xff\xff\xff",
558 },
559 };
560 #endif
561
562 /**
563 * RTE_FLOW_ITEM_TYPE_ICMP.
564 *
565 * Matches an ICMP header.
566 */
567 struct rte_flow_item_icmp {
568 struct icmp_hdr hdr; /**< ICMP header definition. */
569 };
570
571 /** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
572 #ifndef __cplusplus
573 static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
574 .hdr = {
575 .icmp_type = 0xff,
576 .icmp_code = 0xff,
577 },
578 };
579 #endif
580
581 /**
582 * RTE_FLOW_ITEM_TYPE_UDP.
583 *
584 * Matches a UDP header.
585 */
586 struct rte_flow_item_udp {
587 struct udp_hdr hdr; /**< UDP header definition. */
588 };
589
590 /** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
591 #ifndef __cplusplus
592 static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
593 .hdr = {
594 .src_port = RTE_BE16(0xffff),
595 .dst_port = RTE_BE16(0xffff),
596 },
597 };
598 #endif
599
600 /**
601 * RTE_FLOW_ITEM_TYPE_TCP.
602 *
603 * Matches a TCP header.
604 */
605 struct rte_flow_item_tcp {
606 struct tcp_hdr hdr; /**< TCP header definition. */
607 };
608
609 /** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
610 #ifndef __cplusplus
611 static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
612 .hdr = {
613 .src_port = RTE_BE16(0xffff),
614 .dst_port = RTE_BE16(0xffff),
615 },
616 };
617 #endif
618
619 /**
620 * RTE_FLOW_ITEM_TYPE_SCTP.
621 *
622 * Matches a SCTP header.
623 */
624 struct rte_flow_item_sctp {
625 struct sctp_hdr hdr; /**< SCTP header definition. */
626 };
627
628 /** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
629 #ifndef __cplusplus
630 static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
631 .hdr = {
632 .src_port = RTE_BE16(0xffff),
633 .dst_port = RTE_BE16(0xffff),
634 },
635 };
636 #endif
637
638 /**
639 * RTE_FLOW_ITEM_TYPE_VXLAN.
640 *
641 * Matches a VXLAN header (RFC 7348).
642 */
643 struct rte_flow_item_vxlan {
644 uint8_t flags; /**< Normally 0x08 (I flag). */
645 uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
646 uint8_t vni[3]; /**< VXLAN identifier. */
647 uint8_t rsvd1; /**< Reserved, normally 0x00. */
648 };
649
650 /** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
651 #ifndef __cplusplus
652 static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
653 .vni = "\xff\xff\xff",
654 };
655 #endif
656
657 /**
658 * RTE_FLOW_ITEM_TYPE_E_TAG.
659 *
660 * Matches a E-tag header.
661 */
662 struct rte_flow_item_e_tag {
663 rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
664 /**
665 * E-Tag control information (E-TCI).
666 * E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
667 */
668 rte_be16_t epcp_edei_in_ecid_b;
669 /** Reserved (2b), GRP (2b), E-CID base (12b). */
670 rte_be16_t rsvd_grp_ecid_b;
671 uint8_t in_ecid_e; /**< Ingress E-CID ext. */
672 uint8_t ecid_e; /**< E-CID ext. */
673 };
674
675 /** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
676 #ifndef __cplusplus
677 static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
678 .rsvd_grp_ecid_b = RTE_BE16(0x3fff),
679 };
680 #endif
681
682 /**
683 * RTE_FLOW_ITEM_TYPE_NVGRE.
684 *
685 * Matches a NVGRE header.
686 */
687 struct rte_flow_item_nvgre {
688 /**
689 * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b),
690 * reserved 0 (9b), version (3b).
691 *
692 * c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
693 */
694 rte_be16_t c_k_s_rsvd0_ver;
695 rte_be16_t protocol; /**< Protocol type (0x6558). */
696 uint8_t tni[3]; /**< Virtual subnet ID. */
697 uint8_t flow_id; /**< Flow ID. */
698 };
699
700 /** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */
701 #ifndef __cplusplus
702 static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = {
703 .tni = "\xff\xff\xff",
704 };
705 #endif
706
707 /**
708 * RTE_FLOW_ITEM_TYPE_MPLS.
709 *
710 * Matches a MPLS header.
711 */
712 struct rte_flow_item_mpls {
713 /**
714 * Label (20b), TC (3b), Bottom of Stack (1b).
715 */
716 uint8_t label_tc_s[3];
717 uint8_t ttl; /** Time-to-Live. */
718 };
719
720 /** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */
721 #ifndef __cplusplus
722 static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = {
723 .label_tc_s = "\xff\xff\xf0",
724 };
725 #endif
726
727 /**
728 * RTE_FLOW_ITEM_TYPE_GRE.
729 *
730 * Matches a GRE header.
731 */
732 struct rte_flow_item_gre {
733 /**
734 * Checksum (1b), reserved 0 (12b), version (3b).
735 * Refer to RFC 2784.
736 */
737 rte_be16_t c_rsvd0_ver;
738 rte_be16_t protocol; /**< Protocol type. */
739 };
740
741 /** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
742 #ifndef __cplusplus
743 static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
744 .protocol = RTE_BE16(0xffff),
745 };
746 #endif
747
748 /**
749 * RTE_FLOW_ITEM_TYPE_FUZZY
750 *
751 * Fuzzy pattern match, expect faster than default.
752 *
753 * This is for device that support fuzzy match option.
754 * Usually a fuzzy match is fast but the cost is accuracy.
755 * i.e. Signature Match only match pattern's hash value, but it is
756 * possible two different patterns have the same hash value.
757 *
758 * Matching accuracy level can be configure by threshold.
759 * Driver can divide the range of threshold and map to different
760 * accuracy levels that device support.
761 *
762 * Threshold 0 means perfect match (no fuzziness), while threshold
763 * 0xffffffff means fuzziest match.
764 */
765 struct rte_flow_item_fuzzy {
766 uint32_t thresh; /**< Accuracy threshold. */
767 };
768
769 /** Default mask for RTE_FLOW_ITEM_TYPE_FUZZY. */
770 #ifndef __cplusplus
771 static const struct rte_flow_item_fuzzy rte_flow_item_fuzzy_mask = {
772 .thresh = 0xffffffff,
773 };
774 #endif
775
776 /**
777 * RTE_FLOW_ITEM_TYPE_GTP.
778 *
779 * Matches a GTPv1 header.
780 */
781 struct rte_flow_item_gtp {
782 /**
783 * Version (3b), protocol type (1b), reserved (1b),
784 * Extension header flag (1b),
785 * Sequence number flag (1b),
786 * N-PDU number flag (1b).
787 */
788 uint8_t v_pt_rsv_flags;
789 uint8_t msg_type; /**< Message type. */
790 rte_be16_t msg_len; /**< Message length. */
791 rte_be32_t teid; /**< Tunnel endpoint identifier. */
792 };
793
794 /** Default mask for RTE_FLOW_ITEM_TYPE_GTP. */
795 #ifndef __cplusplus
796 static const struct rte_flow_item_gtp rte_flow_item_gtp_mask = {
797 .teid = RTE_BE32(0xffffffff),
798 };
799 #endif
800
801 /**
802 * RTE_FLOW_ITEM_TYPE_ESP
803 *
804 * Matches an ESP header.
805 */
806 struct rte_flow_item_esp {
807 struct esp_hdr hdr; /**< ESP header definition. */
808 };
809
810 /** Default mask for RTE_FLOW_ITEM_TYPE_ESP. */
811 #ifndef __cplusplus
812 static const struct rte_flow_item_esp rte_flow_item_esp_mask = {
813 .hdr = {
814 .spi = OVS_BE32_MAX,
815 },
816 };
817 #endif
818
819 /**
820 * Matching pattern item definition.
821 *
822 * A pattern is formed by stacking items starting from the lowest protocol
823 * layer to match. This stacking restriction does not apply to meta items
824 * which can be placed anywhere in the stack without affecting the meaning
825 * of the resulting pattern.
826 *
827 * Patterns are terminated by END items.
828 *
829 * The spec field should be a valid pointer to a structure of the related
830 * item type. It may remain unspecified (NULL) in many cases to request
831 * broad (nonspecific) matching. In such cases, last and mask must also be
832 * set to NULL.
833 *
834 * Optionally, last can point to a structure of the same type to define an
835 * inclusive range. This is mostly supported by integer and address fields,
836 * may cause errors otherwise. Fields that do not support ranges must be set
837 * to 0 or to the same value as the corresponding fields in spec.
838 *
839 * Only the fields defined to nonzero values in the default masks (see
840 * rte_flow_item_{name}_mask constants) are considered relevant by
841 * default. This can be overridden by providing a mask structure of the
842 * same type with applicable bits set to one. It can also be used to
843 * partially filter out specific fields (e.g. as an alternate mean to match
844 * ranges of IP addresses).
845 *
846 * Mask is a simple bit-mask applied before interpreting the contents of
847 * spec and last, which may yield unexpected results if not used
848 * carefully. For example, if for an IPv4 address field, spec provides
849 * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
850 * effective range becomes 10.1.0.0 to 10.3.255.255.
851 */
852 struct rte_flow_item {
853 enum rte_flow_item_type type; /**< Item type. */
854 const void *spec; /**< Pointer to item specification structure. */
855 const void *last; /**< Defines an inclusive range (spec to last). */
856 const void *mask; /**< Bit-mask applied to spec and last. */
857 };
858
859 /**
860 * Action types.
861 *
862 * Each possible action is represented by a type. Some have associated
863 * configuration structures. Several actions combined in a list can be
864 * affected to a flow rule. That list is not ordered.
865 *
866 * They fall in three categories:
867 *
868 * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
869 * processing matched packets by subsequent flow rules, unless overridden
870 * with PASSTHRU.
871 *
872 * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up
873 * for additional processing by subsequent flow rules.
874 *
875 * - Other non terminating meta actions that do not affect the fate of
876 * packets (END, VOID, MARK, FLAG, COUNT).
877 *
878 * When several actions are combined in a flow rule, they should all have
879 * different types (e.g. dropping a packet twice is not possible).
880 *
881 * Only the last action of a given type is taken into account. PMDs still
882 * perform error checking on the entire list.
883 *
884 * Note that PASSTHRU is the only action able to override a terminating
885 * rule.
886 */
887 enum rte_flow_action_type {
888 /**
889 * [META]
890 *
891 * End marker for action lists. Prevents further processing of
892 * actions, thereby ending the list.
893 *
894 * No associated configuration structure.
895 */
896 RTE_FLOW_ACTION_TYPE_END,
897
898 /**
899 * [META]
900 *
901 * Used as a placeholder for convenience. It is ignored and simply
902 * discarded by PMDs.
903 *
904 * No associated configuration structure.
905 */
906 RTE_FLOW_ACTION_TYPE_VOID,
907
908 /**
909 * Leaves packets up for additional processing by subsequent flow
910 * rules. This is the default when a rule does not contain a
911 * terminating action, but can be specified to force a rule to
912 * become non-terminating.
913 *
914 * No associated configuration structure.
915 */
916 RTE_FLOW_ACTION_TYPE_PASSTHRU,
917
918 /**
919 * [META]
920 *
921 * Attaches an integer value to packets and sets PKT_RX_FDIR and
922 * PKT_RX_FDIR_ID mbuf flags.
923 *
924 * See struct rte_flow_action_mark.
925 */
926 RTE_FLOW_ACTION_TYPE_MARK,
927
928 /**
929 * [META]
930 *
931 * Flags packets. Similar to MARK without a specific value; only
932 * sets the PKT_RX_FDIR mbuf flag.
933 *
934 * No associated configuration structure.
935 */
936 RTE_FLOW_ACTION_TYPE_FLAG,
937
938 /**
939 * Assigns packets to a given queue index.
940 *
941 * See struct rte_flow_action_queue.
942 */
943 RTE_FLOW_ACTION_TYPE_QUEUE,
944
945 /**
946 * Drops packets.
947 *
948 * PASSTHRU overrides this action if both are specified.
949 *
950 * No associated configuration structure.
951 */
952 RTE_FLOW_ACTION_TYPE_DROP,
953
954 /**
955 * [META]
956 *
957 * Enables counters for this rule.
958 *
959 * These counters can be retrieved and reset through rte_flow_query(),
960 * see struct rte_flow_query_count.
961 *
962 * No associated configuration structure.
963 */
964 RTE_FLOW_ACTION_TYPE_COUNT,
965
966 /**
967 * Duplicates packets to a given queue index.
968 *
969 * This is normally combined with QUEUE, however when used alone, it
970 * is actually similar to QUEUE + PASSTHRU.
971 *
972 * See struct rte_flow_action_dup.
973 */
974 RTE_FLOW_ACTION_TYPE_DUP,
975
976 /**
977 * Similar to QUEUE, except RSS is additionally performed on packets
978 * to spread them among several queues according to the provided
979 * parameters.
980 *
981 * See struct rte_flow_action_rss.
982 */
983 RTE_FLOW_ACTION_TYPE_RSS,
984
985 /**
986 * Redirects packets to the physical function (PF) of the current
987 * device.
988 *
989 * No associated configuration structure.
990 */
991 RTE_FLOW_ACTION_TYPE_PF,
992
993 /**
994 * Redirects packets to the virtual function (VF) of the current
995 * device with the specified ID.
996 *
997 * See struct rte_flow_action_vf.
998 */
999 RTE_FLOW_ACTION_TYPE_VF,
1000
1001 /**
1002 * Traffic metering and policing (MTR).
1003 *
1004 * See struct rte_flow_action_meter.
1005 * See file rte_mtr.h for MTR object configuration.
1006 */
1007 RTE_FLOW_ACTION_TYPE_METER,
1008
1009 /**
1010 * Redirects packets to security engine of current device for security
1011 * processing as specified by security session.
1012 *
1013 * See struct rte_flow_action_security.
1014 */
1015 RTE_FLOW_ACTION_TYPE_SECURITY
1016 };
1017
1018 /**
1019 * RTE_FLOW_ACTION_TYPE_MARK
1020 *
1021 * Attaches an integer value to packets and sets PKT_RX_FDIR and
1022 * PKT_RX_FDIR_ID mbuf flags.
1023 *
1024 * This value is arbitrary and application-defined. Maximum allowed value
1025 * depends on the underlying implementation. It is returned in the
1026 * hash.fdir.hi mbuf field.
1027 */
1028 struct rte_flow_action_mark {
1029 uint32_t id; /**< Integer value to return with packets. */
1030 };
1031
1032 /**
1033 * RTE_FLOW_ACTION_TYPE_QUEUE
1034 *
1035 * Assign packets to a given queue index.
1036 *
1037 * Terminating by default.
1038 */
1039 struct rte_flow_action_queue {
1040 uint16_t index; /**< Queue index to use. */
1041 };
1042
1043 /**
1044 * RTE_FLOW_ACTION_TYPE_COUNT (query)
1045 *
1046 * Query structure to retrieve and reset flow rule counters.
1047 */
1048 struct rte_flow_query_count {
1049 uint32_t reset:1; /**< Reset counters after query [in]. */
1050 uint32_t hits_set:1; /**< hits field is set [out]. */
1051 uint32_t bytes_set:1; /**< bytes field is set [out]. */
1052 uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
1053 uint64_t hits; /**< Number of hits for this rule [out]. */
1054 uint64_t bytes; /**< Number of bytes through this rule [out]. */
1055 };
1056
1057 /**
1058 * RTE_FLOW_ACTION_TYPE_DUP
1059 *
1060 * Duplicates packets to a given queue index.
1061 *
1062 * This is normally combined with QUEUE, however when used alone, it is
1063 * actually similar to QUEUE + PASSTHRU.
1064 *
1065 * Non-terminating by default.
1066 */
1067 struct rte_flow_action_dup {
1068 uint16_t index; /**< Queue index to duplicate packets to. */
1069 };
1070
1071 /**
1072 * RTE_FLOW_ACTION_TYPE_RSS
1073 *
1074 * Similar to QUEUE, except RSS is additionally performed on packets to
1075 * spread them among several queues according to the provided parameters.
1076 *
1077 * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
1078 * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
1079 * both can be requested simultaneously.
1080 *
1081 * Terminating by default.
1082 */
1083 struct rte_flow_action_rss {
1084 const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
1085 uint16_t num; /**< Number of entries in queue[]. */
1086 uint16_t queue[]; /**< Queues indices to use. */
1087 };
1088
1089 /**
1090 * RTE_FLOW_ACTION_TYPE_VF
1091 *
1092 * Redirects packets to a virtual function (VF) of the current device.
1093 *
1094 * Packets matched by a VF pattern item can be redirected to their original
1095 * VF ID instead of the specified one. This parameter may not be available
1096 * and is not guaranteed to work properly if the VF part is matched by a
1097 * prior flow rule or if packets are not addressed to a VF in the first
1098 * place.
1099 *
1100 * Terminating by default.
1101 */
1102 struct rte_flow_action_vf {
1103 uint32_t original:1; /**< Use original VF ID if possible. */
1104 uint32_t reserved:31; /**< Reserved, must be zero. */
1105 uint32_t id; /**< VF ID to redirect packets to. */
1106 };
1107
1108 /**
1109 * RTE_FLOW_ACTION_TYPE_METER
1110 *
1111 * Traffic metering and policing (MTR).
1112 *
1113 * Packets matched by items of this type can be either dropped or passed to the
1114 * next item with their color set by the MTR object.
1115 *
1116 * Non-terminating by default.
1117 */
1118 struct rte_flow_action_meter {
1119 uint32_t mtr_id; /**< MTR object ID created with rte_mtr_create(). */
1120 };
1121
1122 /**
1123 * RTE_FLOW_ACTION_TYPE_SECURITY
1124 *
1125 * Perform the security action on flows matched by the pattern items
1126 * according to the configuration of the security session.
1127 *
1128 * This action modifies the payload of matched flows. For INLINE_CRYPTO, the
1129 * security protocol headers and IV are fully provided by the application as
1130 * specified in the flow pattern. The payload of matching packets is
1131 * encrypted on egress, and decrypted and authenticated on ingress.
1132 * For INLINE_PROTOCOL, the security protocol is fully offloaded to HW,
1133 * providing full encapsulation and decapsulation of packets in security
1134 * protocols. The flow pattern specifies both the outer security header fields
1135 * and the inner packet fields. The security session specified in the action
1136 * must match the pattern parameters.
1137 *
1138 * The security session specified in the action must be created on the same
1139 * port as the flow action that is being specified.
1140 *
1141 * The ingress/egress flow attribute should match that specified in the
1142 * security session if the security session supports the definition of the
1143 * direction.
1144 *
1145 * Multiple flows can be configured to use the same security session.
1146 *
1147 * Non-terminating by default.
1148 */
1149 struct rte_flow_action_security {
1150 void *security_session; /**< Pointer to security session structure. */
1151 };
1152
1153 /**
1154 * Definition of a single action.
1155 *
1156 * A list of actions is terminated by a END action.
1157 *
1158 * For simple actions without a configuration structure, conf remains NULL.
1159 */
1160 struct rte_flow_action {
1161 enum rte_flow_action_type type; /**< Action type. */
1162 const void *conf; /**< Pointer to action configuration structure. */
1163 };
1164
1165 /**
1166 * Opaque type returned after successfully creating a flow.
1167 *
1168 * This handle can be used to manage and query the related flow (e.g. to
1169 * destroy it or retrieve counters).
1170 */
1171 struct rte_flow;
1172
1173 /**
1174 * Verbose error types.
1175 *
1176 * Most of them provide the type of the object referenced by struct
1177 * rte_flow_error.cause.
1178 */
1179 enum rte_flow_error_type {
1180 RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
1181 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
1182 RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
1183 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
1184 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
1185 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
1186 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
1187 RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
1188 RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
1189 RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
1190 RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
1191 RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
1192 };
1193
1194 /**
1195 * Verbose error structure definition.
1196 *
1197 * This object is normally allocated by applications and set by PMDs, the
1198 * message points to a constant string which does not need to be freed by
1199 * the application, however its pointer can be considered valid only as long
1200 * as its associated DPDK port remains configured. Closing the underlying
1201 * device or unloading the PMD invalidates it.
1202 *
1203 * Both cause and message may be NULL regardless of the error type.
1204 */
1205 struct rte_flow_error {
1206 enum rte_flow_error_type type; /**< Cause field and error types. */
1207 const void *cause; /**< Object responsible for the error. */
1208 const char *message; /**< Human-readable error message. */
1209 };
1210
1211 /**
1212 * Check whether a flow rule can be created on a given port.
1213 *
1214 * The flow rule is validated for correctness and whether it could be accepted
1215 * by the device given sufficient resources. The rule is checked against the
1216 * current device mode and queue configuration. The flow rule may also
1217 * optionally be validated against existing flow rules and device resources.
1218 * This function has no effect on the target device.
1219 *
1220 * The returned value is guaranteed to remain valid only as long as no
1221 * successful calls to rte_flow_create() or rte_flow_destroy() are made in
1222 * the meantime and no device parameter affecting flow rules in any way are
1223 * modified, due to possible collisions or resource limitations (although in
1224 * such cases EINVAL should not be returned).
1225 *
1226 * @param port_id
1227 * Port identifier of Ethernet device.
1228 * @param[in] attr
1229 * Flow rule attributes.
1230 * @param[in] pattern
1231 * Pattern specification (list terminated by the END pattern item).
1232 * @param[in] actions
1233 * Associated actions (list terminated by the END action).
1234 * @param[out] error
1235 * Perform verbose error reporting if not NULL. PMDs initialize this
1236 * structure in case of error only.
1237 *
1238 * @return
1239 * 0 if flow rule is valid and can be created. A negative errno value
1240 * otherwise (rte_errno is also set), the following errors are defined:
1241 *
1242 * -ENOSYS: underlying device does not support this functionality.
1243 *
1244 * -EINVAL: unknown or invalid rule specification.
1245 *
1246 * -ENOTSUP: valid but unsupported rule specification (e.g. partial
1247 * bit-masks are unsupported).
1248 *
1249 * -EEXIST: collision with an existing rule. Only returned if device
1250 * supports flow rule collision checking and there was a flow rule
1251 * collision. Not receiving this return code is no guarantee that creating
1252 * the rule will not fail due to a collision.
1253 *
1254 * -ENOMEM: not enough memory to execute the function, or if the device
1255 * supports resource validation, resource limitation on the device.
1256 *
1257 * -EBUSY: action cannot be performed due to busy device resources, may
1258 * succeed if the affected queues or even the entire port are in a stopped
1259 * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
1260 */
1261 int
1262 rte_flow_validate(uint16_t port_id,
1263 const struct rte_flow_attr *attr,
1264 const struct rte_flow_item pattern[],
1265 const struct rte_flow_action actions[],
1266 struct rte_flow_error *error);
1267
1268 /**
1269 * Create a flow rule on a given port.
1270 *
1271 * @param port_id
1272 * Port identifier of Ethernet device.
1273 * @param[in] attr
1274 * Flow rule attributes.
1275 * @param[in] pattern
1276 * Pattern specification (list terminated by the END pattern item).
1277 * @param[in] actions
1278 * Associated actions (list terminated by the END action).
1279 * @param[out] error
1280 * Perform verbose error reporting if not NULL. PMDs initialize this
1281 * structure in case of error only.
1282 *
1283 * @return
1284 * A valid handle in case of success, NULL otherwise and rte_errno is set
1285 * to the positive version of one of the error codes defined for
1286 * rte_flow_validate().
1287 */
1288 struct rte_flow *
1289 rte_flow_create(uint16_t port_id,
1290 const struct rte_flow_attr *attr,
1291 const struct rte_flow_item pattern[],
1292 const struct rte_flow_action actions[],
1293 struct rte_flow_error *error);
1294
1295 /**
1296 * Destroy a flow rule on a given port.
1297 *
1298 * Failure to destroy a flow rule handle may occur when other flow rules
1299 * depend on it, and destroying it would result in an inconsistent state.
1300 *
1301 * This function is only guaranteed to succeed if handles are destroyed in
1302 * reverse order of their creation.
1303 *
1304 * @param port_id
1305 * Port identifier of Ethernet device.
1306 * @param flow
1307 * Flow rule handle to destroy.
1308 * @param[out] error
1309 * Perform verbose error reporting if not NULL. PMDs initialize this
1310 * structure in case of error only.
1311 *
1312 * @return
1313 * 0 on success, a negative errno value otherwise and rte_errno is set.
1314 */
1315 int
1316 rte_flow_destroy(uint16_t port_id,
1317 struct rte_flow *flow,
1318 struct rte_flow_error *error);
1319
1320 /**
1321 * Destroy all flow rules associated with a port.
1322 *
1323 * In the unlikely event of failure, handles are still considered destroyed
1324 * and no longer valid but the port must be assumed to be in an inconsistent
1325 * state.
1326 *
1327 * @param port_id
1328 * Port identifier of Ethernet device.
1329 * @param[out] error
1330 * Perform verbose error reporting if not NULL. PMDs initialize this
1331 * structure in case of error only.
1332 *
1333 * @return
1334 * 0 on success, a negative errno value otherwise and rte_errno is set.
1335 */
1336 int
1337 rte_flow_flush(uint16_t port_id,
1338 struct rte_flow_error *error);
1339
1340 /**
1341 * Query an existing flow rule.
1342 *
1343 * This function allows retrieving flow-specific data such as counters.
1344 * Data is gathered by special actions which must be present in the flow
1345 * rule definition.
1346 *
1347 * \see RTE_FLOW_ACTION_TYPE_COUNT
1348 *
1349 * @param port_id
1350 * Port identifier of Ethernet device.
1351 * @param flow
1352 * Flow rule handle to query.
1353 * @param action
1354 * Action type to query.
1355 * @param[in, out] data
1356 * Pointer to storage for the associated query data type.
1357 * @param[out] error
1358 * Perform verbose error reporting if not NULL. PMDs initialize this
1359 * structure in case of error only.
1360 *
1361 * @return
1362 * 0 on success, a negative errno value otherwise and rte_errno is set.
1363 */
1364 int
1365 rte_flow_query(uint16_t port_id,
1366 struct rte_flow *flow,
1367 enum rte_flow_action_type action,
1368 void *data,
1369 struct rte_flow_error *error);
1370
1371 /**
1372 * Restrict ingress traffic to the defined flow rules.
1373 *
1374 * Isolated mode guarantees that all ingress traffic comes from defined flow
1375 * rules only (current and future).
1376 *
1377 * Besides making ingress more deterministic, it allows PMDs to safely reuse
1378 * resources otherwise assigned to handle the remaining traffic, such as
1379 * global RSS configuration settings, VLAN filters, MAC address entries,
1380 * legacy filter API rules and so on in order to expand the set of possible
1381 * flow rule types.
1382 *
1383 * Calling this function as soon as possible after device initialization,
1384 * ideally before the first call to rte_eth_dev_configure(), is recommended
1385 * to avoid possible failures due to conflicting settings.
1386 *
1387 * Once effective, leaving isolated mode may not be possible depending on
1388 * PMD implementation.
1389 *
1390 * Additionally, the following functionality has no effect on the underlying
1391 * port and may return errors such as ENOTSUP ("not supported"):
1392 *
1393 * - Toggling promiscuous mode.
1394 * - Toggling allmulticast mode.
1395 * - Configuring MAC addresses.
1396 * - Configuring multicast addresses.
1397 * - Configuring VLAN filters.
1398 * - Configuring Rx filters through the legacy API (e.g. FDIR).
1399 * - Configuring global RSS settings.
1400 *
1401 * @param port_id
1402 * Port identifier of Ethernet device.
1403 * @param set
1404 * Nonzero to enter isolated mode, attempt to leave it otherwise.
1405 * @param[out] error
1406 * Perform verbose error reporting if not NULL. PMDs initialize this
1407 * structure in case of error only.
1408 *
1409 * @return
1410 * 0 on success, a negative errno value otherwise and rte_errno is set.
1411 */
1412 int
1413 rte_flow_isolate(uint16_t port_id, int set, struct rte_flow_error *error);
1414
1415 /**
1416 * Initialize flow error structure.
1417 *
1418 * @param[out] error
1419 * Pointer to flow error structure (may be NULL).
1420 * @param code
1421 * Related error code (rte_errno).
1422 * @param type
1423 * Cause field and error types.
1424 * @param cause
1425 * Object responsible for the error.
1426 * @param message
1427 * Human-readable error message.
1428 *
1429 * @return
1430 * Negative error code (errno value) and rte_errno is set.
1431 */
1432 int
1433 rte_flow_error_set(struct rte_flow_error *error,
1434 int code,
1435 enum rte_flow_error_type type,
1436 const void *cause,
1437 const char *message);
1438
1439 /**
1440 * Generic flow representation.
1441 *
1442 * This form is sufficient to describe an rte_flow independently from any
1443 * PMD implementation and allows for replayability and identification.
1444 */
1445 struct rte_flow_desc {
1446 size_t size; /**< Allocated space including data[]. */
1447 struct rte_flow_attr attr; /**< Attributes. */
1448 struct rte_flow_item *items; /**< Items. */
1449 struct rte_flow_action *actions; /**< Actions. */
1450 uint8_t data[]; /**< Storage for items/actions. */
1451 };
1452
1453 /**
1454 * Copy an rte_flow rule description.
1455 *
1456 * @param[in] fd
1457 * Flow rule description.
1458 * @param[in] len
1459 * Total size of allocated data for the flow description.
1460 * @param[in] attr
1461 * Flow rule attributes.
1462 * @param[in] items
1463 * Pattern specification (list terminated by the END pattern item).
1464 * @param[in] actions
1465 * Associated actions (list terminated by the END action).
1466 *
1467 * @return
1468 * If len is greater or equal to the size of the flow, the total size of the
1469 * flow description and its data.
1470 * If len is lower than the size of the flow, the number of bytes that would
1471 * have been written to desc had it been sufficient. Nothing is written.
1472 */
1473 size_t
1474 rte_flow_copy(struct rte_flow_desc *fd, size_t len,
1475 const struct rte_flow_attr *attr,
1476 const struct rte_flow_item *items,
1477 const struct rte_flow_action *actions);
1478
1479 #ifdef __cplusplus
1480 }
1481 #endif
1482
1483 #endif /* RTE_FLOW_H_ */