1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_ethdev.h"
18 #include "rte_flow_driver.h"
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs
= -1;
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask
;
28 * Flow elements description tables.
30 struct rte_flow_desc_data
{
35 /** Generate flow_item[] entry. */
36 #define MK_FLOW_ITEM(t, s) \
37 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
42 /** Information about known flow pattern items. */
43 static const struct rte_flow_desc_data rte_flow_desc_item
[] = {
45 MK_FLOW_ITEM(VOID
, 0),
46 MK_FLOW_ITEM(INVERT
, 0),
47 MK_FLOW_ITEM(ANY
, sizeof(struct rte_flow_item_any
)),
49 MK_FLOW_ITEM(VF
, sizeof(struct rte_flow_item_vf
)),
50 MK_FLOW_ITEM(PHY_PORT
, sizeof(struct rte_flow_item_phy_port
)),
51 MK_FLOW_ITEM(PORT_ID
, sizeof(struct rte_flow_item_port_id
)),
52 MK_FLOW_ITEM(RAW
, sizeof(struct rte_flow_item_raw
)),
53 MK_FLOW_ITEM(ETH
, sizeof(struct rte_flow_item_eth
)),
54 MK_FLOW_ITEM(VLAN
, sizeof(struct rte_flow_item_vlan
)),
55 MK_FLOW_ITEM(IPV4
, sizeof(struct rte_flow_item_ipv4
)),
56 MK_FLOW_ITEM(IPV6
, sizeof(struct rte_flow_item_ipv6
)),
57 MK_FLOW_ITEM(ICMP
, sizeof(struct rte_flow_item_icmp
)),
58 MK_FLOW_ITEM(UDP
, sizeof(struct rte_flow_item_udp
)),
59 MK_FLOW_ITEM(TCP
, sizeof(struct rte_flow_item_tcp
)),
60 MK_FLOW_ITEM(SCTP
, sizeof(struct rte_flow_item_sctp
)),
61 MK_FLOW_ITEM(VXLAN
, sizeof(struct rte_flow_item_vxlan
)),
62 MK_FLOW_ITEM(E_TAG
, sizeof(struct rte_flow_item_e_tag
)),
63 MK_FLOW_ITEM(NVGRE
, sizeof(struct rte_flow_item_nvgre
)),
64 MK_FLOW_ITEM(MPLS
, sizeof(struct rte_flow_item_mpls
)),
65 MK_FLOW_ITEM(GRE
, sizeof(struct rte_flow_item_gre
)),
66 MK_FLOW_ITEM(FUZZY
, sizeof(struct rte_flow_item_fuzzy
)),
67 MK_FLOW_ITEM(GTP
, sizeof(struct rte_flow_item_gtp
)),
68 MK_FLOW_ITEM(GTPC
, sizeof(struct rte_flow_item_gtp
)),
69 MK_FLOW_ITEM(GTPU
, sizeof(struct rte_flow_item_gtp
)),
70 MK_FLOW_ITEM(ESP
, sizeof(struct rte_flow_item_esp
)),
71 MK_FLOW_ITEM(GENEVE
, sizeof(struct rte_flow_item_geneve
)),
72 MK_FLOW_ITEM(VXLAN_GPE
, sizeof(struct rte_flow_item_vxlan_gpe
)),
73 MK_FLOW_ITEM(ARP_ETH_IPV4
, sizeof(struct rte_flow_item_arp_eth_ipv4
)),
74 MK_FLOW_ITEM(IPV6_EXT
, sizeof(struct rte_flow_item_ipv6_ext
)),
75 MK_FLOW_ITEM(ICMP6
, sizeof(struct rte_flow_item_icmp6
)),
76 MK_FLOW_ITEM(ICMP6_ND_NS
, sizeof(struct rte_flow_item_icmp6_nd_ns
)),
77 MK_FLOW_ITEM(ICMP6_ND_NA
, sizeof(struct rte_flow_item_icmp6_nd_na
)),
78 MK_FLOW_ITEM(ICMP6_ND_OPT
, sizeof(struct rte_flow_item_icmp6_nd_opt
)),
79 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH
,
80 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth
)),
81 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH
,
82 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth
)),
83 MK_FLOW_ITEM(MARK
, sizeof(struct rte_flow_item_mark
)),
84 MK_FLOW_ITEM(META
, sizeof(struct rte_flow_item_meta
)),
85 MK_FLOW_ITEM(TAG
, sizeof(struct rte_flow_item_tag
)),
86 MK_FLOW_ITEM(GRE_KEY
, sizeof(rte_be32_t
)),
87 MK_FLOW_ITEM(GTP_PSC
, sizeof(struct rte_flow_item_gtp_psc
)),
88 MK_FLOW_ITEM(PPPOES
, sizeof(struct rte_flow_item_pppoe
)),
89 MK_FLOW_ITEM(PPPOED
, sizeof(struct rte_flow_item_pppoe
)),
90 MK_FLOW_ITEM(PPPOE_PROTO_ID
,
91 sizeof(struct rte_flow_item_pppoe_proto_id
)),
92 MK_FLOW_ITEM(NSH
, sizeof(struct rte_flow_item_nsh
)),
93 MK_FLOW_ITEM(IGMP
, sizeof(struct rte_flow_item_igmp
)),
94 MK_FLOW_ITEM(AH
, sizeof(struct rte_flow_item_ah
)),
95 MK_FLOW_ITEM(HIGIG2
, sizeof(struct rte_flow_item_higig2_hdr
)),
96 MK_FLOW_ITEM(L2TPV3OIP
, sizeof(struct rte_flow_item_l2tpv3oip
)),
97 MK_FLOW_ITEM(PFCP
, sizeof(struct rte_flow_item_pfcp
)),
100 /** Generate flow_action[] entry. */
101 #define MK_FLOW_ACTION(t, s) \
102 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
107 /** Information about known flow actions. */
108 static const struct rte_flow_desc_data rte_flow_desc_action
[] = {
109 MK_FLOW_ACTION(END
, 0),
110 MK_FLOW_ACTION(VOID
, 0),
111 MK_FLOW_ACTION(PASSTHRU
, 0),
112 MK_FLOW_ACTION(JUMP
, sizeof(struct rte_flow_action_jump
)),
113 MK_FLOW_ACTION(MARK
, sizeof(struct rte_flow_action_mark
)),
114 MK_FLOW_ACTION(FLAG
, 0),
115 MK_FLOW_ACTION(QUEUE
, sizeof(struct rte_flow_action_queue
)),
116 MK_FLOW_ACTION(DROP
, 0),
117 MK_FLOW_ACTION(COUNT
, sizeof(struct rte_flow_action_count
)),
118 MK_FLOW_ACTION(RSS
, sizeof(struct rte_flow_action_rss
)),
119 MK_FLOW_ACTION(PF
, 0),
120 MK_FLOW_ACTION(VF
, sizeof(struct rte_flow_action_vf
)),
121 MK_FLOW_ACTION(PHY_PORT
, sizeof(struct rte_flow_action_phy_port
)),
122 MK_FLOW_ACTION(PORT_ID
, sizeof(struct rte_flow_action_port_id
)),
123 MK_FLOW_ACTION(METER
, sizeof(struct rte_flow_action_meter
)),
124 MK_FLOW_ACTION(SECURITY
, sizeof(struct rte_flow_action_security
)),
125 MK_FLOW_ACTION(OF_SET_MPLS_TTL
,
126 sizeof(struct rte_flow_action_of_set_mpls_ttl
)),
127 MK_FLOW_ACTION(OF_DEC_MPLS_TTL
, 0),
128 MK_FLOW_ACTION(OF_SET_NW_TTL
,
129 sizeof(struct rte_flow_action_of_set_nw_ttl
)),
130 MK_FLOW_ACTION(OF_DEC_NW_TTL
, 0),
131 MK_FLOW_ACTION(OF_COPY_TTL_OUT
, 0),
132 MK_FLOW_ACTION(OF_COPY_TTL_IN
, 0),
133 MK_FLOW_ACTION(OF_POP_VLAN
, 0),
134 MK_FLOW_ACTION(OF_PUSH_VLAN
,
135 sizeof(struct rte_flow_action_of_push_vlan
)),
136 MK_FLOW_ACTION(OF_SET_VLAN_VID
,
137 sizeof(struct rte_flow_action_of_set_vlan_vid
)),
138 MK_FLOW_ACTION(OF_SET_VLAN_PCP
,
139 sizeof(struct rte_flow_action_of_set_vlan_pcp
)),
140 MK_FLOW_ACTION(OF_POP_MPLS
,
141 sizeof(struct rte_flow_action_of_pop_mpls
)),
142 MK_FLOW_ACTION(OF_PUSH_MPLS
,
143 sizeof(struct rte_flow_action_of_push_mpls
)),
144 MK_FLOW_ACTION(VXLAN_ENCAP
, sizeof(struct rte_flow_action_vxlan_encap
)),
145 MK_FLOW_ACTION(VXLAN_DECAP
, 0),
146 MK_FLOW_ACTION(NVGRE_ENCAP
, sizeof(struct rte_flow_action_vxlan_encap
)),
147 MK_FLOW_ACTION(NVGRE_DECAP
, 0),
148 MK_FLOW_ACTION(RAW_ENCAP
, sizeof(struct rte_flow_action_raw_encap
)),
149 MK_FLOW_ACTION(RAW_DECAP
, sizeof(struct rte_flow_action_raw_decap
)),
150 MK_FLOW_ACTION(SET_IPV4_SRC
,
151 sizeof(struct rte_flow_action_set_ipv4
)),
152 MK_FLOW_ACTION(SET_IPV4_DST
,
153 sizeof(struct rte_flow_action_set_ipv4
)),
154 MK_FLOW_ACTION(SET_IPV6_SRC
,
155 sizeof(struct rte_flow_action_set_ipv6
)),
156 MK_FLOW_ACTION(SET_IPV6_DST
,
157 sizeof(struct rte_flow_action_set_ipv6
)),
158 MK_FLOW_ACTION(SET_TP_SRC
,
159 sizeof(struct rte_flow_action_set_tp
)),
160 MK_FLOW_ACTION(SET_TP_DST
,
161 sizeof(struct rte_flow_action_set_tp
)),
162 MK_FLOW_ACTION(MAC_SWAP
, 0),
163 MK_FLOW_ACTION(DEC_TTL
, 0),
164 MK_FLOW_ACTION(SET_TTL
, sizeof(struct rte_flow_action_set_ttl
)),
165 MK_FLOW_ACTION(SET_MAC_SRC
, sizeof(struct rte_flow_action_set_mac
)),
166 MK_FLOW_ACTION(SET_MAC_DST
, sizeof(struct rte_flow_action_set_mac
)),
167 MK_FLOW_ACTION(INC_TCP_SEQ
, sizeof(rte_be32_t
)),
168 MK_FLOW_ACTION(DEC_TCP_SEQ
, sizeof(rte_be32_t
)),
169 MK_FLOW_ACTION(INC_TCP_ACK
, sizeof(rte_be32_t
)),
170 MK_FLOW_ACTION(DEC_TCP_ACK
, sizeof(rte_be32_t
)),
171 MK_FLOW_ACTION(SET_TAG
, sizeof(struct rte_flow_action_set_tag
)),
172 MK_FLOW_ACTION(SET_META
, sizeof(struct rte_flow_action_set_meta
)),
173 MK_FLOW_ACTION(SET_IPV4_DSCP
, sizeof(struct rte_flow_action_set_dscp
)),
174 MK_FLOW_ACTION(SET_IPV6_DSCP
, sizeof(struct rte_flow_action_set_dscp
)),
175 MK_FLOW_ACTION(AGE
, sizeof(struct rte_flow_action_age
)),
179 rte_flow_dynf_metadata_register(void)
184 static const struct rte_mbuf_dynfield desc_offs
= {
185 .name
= RTE_MBUF_DYNFIELD_METADATA_NAME
,
186 .size
= sizeof(uint32_t),
187 .align
= __alignof__(uint32_t),
189 static const struct rte_mbuf_dynflag desc_flag
= {
190 .name
= RTE_MBUF_DYNFLAG_METADATA_NAME
,
193 offset
= rte_mbuf_dynfield_register(&desc_offs
);
196 flag
= rte_mbuf_dynflag_register(&desc_flag
);
199 rte_flow_dynf_metadata_offs
= offset
;
200 rte_flow_dynf_metadata_mask
= (1ULL << flag
);
204 rte_flow_dynf_metadata_offs
= -1;
205 rte_flow_dynf_metadata_mask
= 0ULL;
210 flow_err(uint16_t port_id
, int ret
, struct rte_flow_error
*error
)
214 if (rte_eth_dev_is_removed(port_id
))
215 return rte_flow_error_set(error
, EIO
,
216 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
217 NULL
, rte_strerror(EIO
));
221 static enum rte_flow_item_type
222 rte_flow_expand_rss_item_complete(const struct rte_flow_item
*item
)
224 enum rte_flow_item_type ret
= RTE_FLOW_ITEM_TYPE_VOID
;
225 uint16_t ether_type
= 0;
226 uint16_t ether_type_m
;
227 uint8_t ip_next_proto
= 0;
228 uint8_t ip_next_proto_m
;
230 if (item
== NULL
|| item
->spec
== NULL
)
232 switch (item
->type
) {
233 case RTE_FLOW_ITEM_TYPE_ETH
:
235 ether_type_m
= ((const struct rte_flow_item_eth
*)
238 ether_type_m
= rte_flow_item_eth_mask
.type
;
239 if (ether_type_m
!= RTE_BE16(0xFFFF))
241 ether_type
= ((const struct rte_flow_item_eth
*)
243 if (rte_be_to_cpu_16(ether_type
) == RTE_ETHER_TYPE_IPV4
)
244 ret
= RTE_FLOW_ITEM_TYPE_IPV4
;
245 else if (rte_be_to_cpu_16(ether_type
) == RTE_ETHER_TYPE_IPV6
)
246 ret
= RTE_FLOW_ITEM_TYPE_IPV6
;
247 else if (rte_be_to_cpu_16(ether_type
) == RTE_ETHER_TYPE_VLAN
)
248 ret
= RTE_FLOW_ITEM_TYPE_VLAN
;
250 case RTE_FLOW_ITEM_TYPE_VLAN
:
252 ether_type_m
= ((const struct rte_flow_item_vlan
*)
253 (item
->mask
))->inner_type
;
255 ether_type_m
= rte_flow_item_vlan_mask
.inner_type
;
256 if (ether_type_m
!= RTE_BE16(0xFFFF))
258 ether_type
= ((const struct rte_flow_item_vlan
*)
259 (item
->spec
))->inner_type
;
260 if (rte_be_to_cpu_16(ether_type
) == RTE_ETHER_TYPE_IPV4
)
261 ret
= RTE_FLOW_ITEM_TYPE_IPV4
;
262 else if (rte_be_to_cpu_16(ether_type
) == RTE_ETHER_TYPE_IPV6
)
263 ret
= RTE_FLOW_ITEM_TYPE_IPV6
;
264 else if (rte_be_to_cpu_16(ether_type
) == RTE_ETHER_TYPE_VLAN
)
265 ret
= RTE_FLOW_ITEM_TYPE_VLAN
;
267 case RTE_FLOW_ITEM_TYPE_IPV4
:
269 ip_next_proto_m
= ((const struct rte_flow_item_ipv4
*)
270 (item
->mask
))->hdr
.next_proto_id
;
273 rte_flow_item_ipv4_mask
.hdr
.next_proto_id
;
274 if (ip_next_proto_m
!= 0xFF)
276 ip_next_proto
= ((const struct rte_flow_item_ipv4
*)
277 (item
->spec
))->hdr
.next_proto_id
;
278 if (ip_next_proto
== IPPROTO_UDP
)
279 ret
= RTE_FLOW_ITEM_TYPE_UDP
;
280 else if (ip_next_proto
== IPPROTO_TCP
)
281 ret
= RTE_FLOW_ITEM_TYPE_TCP
;
282 else if (ip_next_proto
== IPPROTO_IP
)
283 ret
= RTE_FLOW_ITEM_TYPE_IPV4
;
284 else if (ip_next_proto
== IPPROTO_IPV6
)
285 ret
= RTE_FLOW_ITEM_TYPE_IPV6
;
287 case RTE_FLOW_ITEM_TYPE_IPV6
:
289 ip_next_proto_m
= ((const struct rte_flow_item_ipv6
*)
290 (item
->mask
))->hdr
.proto
;
293 rte_flow_item_ipv6_mask
.hdr
.proto
;
294 if (ip_next_proto_m
!= 0xFF)
296 ip_next_proto
= ((const struct rte_flow_item_ipv6
*)
297 (item
->spec
))->hdr
.proto
;
298 if (ip_next_proto
== IPPROTO_UDP
)
299 ret
= RTE_FLOW_ITEM_TYPE_UDP
;
300 else if (ip_next_proto
== IPPROTO_TCP
)
301 ret
= RTE_FLOW_ITEM_TYPE_TCP
;
302 else if (ip_next_proto
== IPPROTO_IP
)
303 ret
= RTE_FLOW_ITEM_TYPE_IPV4
;
304 else if (ip_next_proto
== IPPROTO_IPV6
)
305 ret
= RTE_FLOW_ITEM_TYPE_IPV6
;
308 ret
= RTE_FLOW_ITEM_TYPE_VOID
;
314 /* Get generic flow operations structure from a port. */
315 const struct rte_flow_ops
*
316 rte_flow_ops_get(uint16_t port_id
, struct rte_flow_error
*error
)
318 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
319 const struct rte_flow_ops
*ops
;
322 if (unlikely(!rte_eth_dev_is_valid_port(port_id
)))
324 else if (unlikely(!dev
->dev_ops
->filter_ctrl
||
325 dev
->dev_ops
->filter_ctrl(dev
,
326 RTE_ETH_FILTER_GENERIC
,
333 rte_flow_error_set(error
, code
, RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
334 NULL
, rte_strerror(code
));
338 /* Check whether a flow rule can be created on a given port. */
340 rte_flow_validate(uint16_t port_id
,
341 const struct rte_flow_attr
*attr
,
342 const struct rte_flow_item pattern
[],
343 const struct rte_flow_action actions
[],
344 struct rte_flow_error
*error
)
346 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
347 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
351 if (likely(!!ops
->validate
))
352 return flow_err(port_id
, ops
->validate(dev
, attr
, pattern
,
353 actions
, error
), error
);
354 return rte_flow_error_set(error
, ENOSYS
,
355 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
356 NULL
, rte_strerror(ENOSYS
));
359 /* Create a flow rule on a given port. */
361 rte_flow_create(uint16_t port_id
,
362 const struct rte_flow_attr
*attr
,
363 const struct rte_flow_item pattern
[],
364 const struct rte_flow_action actions
[],
365 struct rte_flow_error
*error
)
367 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
368 struct rte_flow
*flow
;
369 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
373 if (likely(!!ops
->create
)) {
374 flow
= ops
->create(dev
, attr
, pattern
, actions
, error
);
376 flow_err(port_id
, -rte_errno
, error
);
379 rte_flow_error_set(error
, ENOSYS
, RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
380 NULL
, rte_strerror(ENOSYS
));
384 /* Destroy a flow rule on a given port. */
386 rte_flow_destroy(uint16_t port_id
,
387 struct rte_flow
*flow
,
388 struct rte_flow_error
*error
)
390 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
391 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
395 if (likely(!!ops
->destroy
))
396 return flow_err(port_id
, ops
->destroy(dev
, flow
, error
),
398 return rte_flow_error_set(error
, ENOSYS
,
399 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
400 NULL
, rte_strerror(ENOSYS
));
403 /* Destroy all flow rules associated with a port. */
405 rte_flow_flush(uint16_t port_id
,
406 struct rte_flow_error
*error
)
408 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
409 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
413 if (likely(!!ops
->flush
))
414 return flow_err(port_id
, ops
->flush(dev
, error
), error
);
415 return rte_flow_error_set(error
, ENOSYS
,
416 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
417 NULL
, rte_strerror(ENOSYS
));
420 /* Query an existing flow rule. */
422 rte_flow_query(uint16_t port_id
,
423 struct rte_flow
*flow
,
424 const struct rte_flow_action
*action
,
426 struct rte_flow_error
*error
)
428 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
429 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
433 if (likely(!!ops
->query
))
434 return flow_err(port_id
, ops
->query(dev
, flow
, action
, data
,
436 return rte_flow_error_set(error
, ENOSYS
,
437 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
438 NULL
, rte_strerror(ENOSYS
));
441 /* Restrict ingress traffic to the defined flow rules. */
443 rte_flow_isolate(uint16_t port_id
,
445 struct rte_flow_error
*error
)
447 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
448 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
452 if (likely(!!ops
->isolate
))
453 return flow_err(port_id
, ops
->isolate(dev
, set
, error
), error
);
454 return rte_flow_error_set(error
, ENOSYS
,
455 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
456 NULL
, rte_strerror(ENOSYS
));
459 /* Initialize flow error structure. */
461 rte_flow_error_set(struct rte_flow_error
*error
,
463 enum rte_flow_error_type type
,
468 *error
= (struct rte_flow_error
){
478 /** Pattern item specification types. */
479 enum rte_flow_conv_item_spec_type
{
480 RTE_FLOW_CONV_ITEM_SPEC
,
481 RTE_FLOW_CONV_ITEM_LAST
,
482 RTE_FLOW_CONV_ITEM_MASK
,
486 * Copy pattern item specification.
489 * Output buffer. Can be NULL if @p size is zero.
491 * Size of @p buf in bytes.
493 * Pattern item to copy specification from.
495 * Specification selector for either @p spec, @p last or @p mask.
498 * Number of bytes needed to store pattern item specification regardless
499 * of @p size. @p buf contents are truncated to @p size if not large
503 rte_flow_conv_item_spec(void *buf
, const size_t size
,
504 const struct rte_flow_item
*item
,
505 enum rte_flow_conv_item_spec_type type
)
509 type
== RTE_FLOW_CONV_ITEM_SPEC
? item
->spec
:
510 type
== RTE_FLOW_CONV_ITEM_LAST
? item
->last
:
511 type
== RTE_FLOW_CONV_ITEM_MASK
? item
->mask
:
514 switch (item
->type
) {
516 const struct rte_flow_item_raw
*raw
;
519 const struct rte_flow_item_raw
*raw
;
522 const struct rte_flow_item_raw
*raw
;
525 const struct rte_flow_item_raw
*raw
;
528 struct rte_flow_item_raw
*raw
;
532 case RTE_FLOW_ITEM_TYPE_RAW
:
533 spec
.raw
= item
->spec
;
534 last
.raw
= item
->last
? item
->last
: item
->spec
;
535 mask
.raw
= item
->mask
? item
->mask
: &rte_flow_item_raw_mask
;
539 (&(struct rte_flow_item_raw
){
540 .relative
= src
.raw
->relative
,
541 .search
= src
.raw
->search
,
542 .reserved
= src
.raw
->reserved
,
543 .offset
= src
.raw
->offset
,
544 .limit
= src
.raw
->limit
,
545 .length
= src
.raw
->length
,
547 size
> sizeof(*dst
.raw
) ? sizeof(*dst
.raw
) : size
);
548 off
= sizeof(*dst
.raw
);
549 if (type
== RTE_FLOW_CONV_ITEM_SPEC
||
550 (type
== RTE_FLOW_CONV_ITEM_MASK
&&
551 ((spec
.raw
->length
& mask
.raw
->length
) >=
552 (last
.raw
->length
& mask
.raw
->length
))))
553 tmp
= spec
.raw
->length
& mask
.raw
->length
;
555 tmp
= last
.raw
->length
& mask
.raw
->length
;
557 off
= RTE_ALIGN_CEIL(off
, sizeof(*dst
.raw
->pattern
));
558 if (size
>= off
+ tmp
)
559 dst
.raw
->pattern
= rte_memcpy
560 ((void *)((uintptr_t)dst
.raw
+ off
),
561 src
.raw
->pattern
, tmp
);
566 off
= rte_flow_desc_item
[item
->type
].size
;
567 rte_memcpy(buf
, data
, (size
> off
? off
: size
));
574 * Copy action configuration.
577 * Output buffer. Can be NULL if @p size is zero.
579 * Size of @p buf in bytes.
581 * Action to copy configuration from.
584 * Number of bytes needed to store pattern item specification regardless
585 * of @p size. @p buf contents are truncated to @p size if not large
589 rte_flow_conv_action_conf(void *buf
, const size_t size
,
590 const struct rte_flow_action
*action
)
594 switch (action
->type
) {
596 const struct rte_flow_action_rss
*rss
;
597 const struct rte_flow_action_vxlan_encap
*vxlan_encap
;
598 const struct rte_flow_action_nvgre_encap
*nvgre_encap
;
601 struct rte_flow_action_rss
*rss
;
602 struct rte_flow_action_vxlan_encap
*vxlan_encap
;
603 struct rte_flow_action_nvgre_encap
*nvgre_encap
;
608 case RTE_FLOW_ACTION_TYPE_RSS
:
609 src
.rss
= action
->conf
;
612 (&(struct rte_flow_action_rss
){
613 .func
= src
.rss
->func
,
614 .level
= src
.rss
->level
,
615 .types
= src
.rss
->types
,
616 .key_len
= src
.rss
->key_len
,
617 .queue_num
= src
.rss
->queue_num
,
619 size
> sizeof(*dst
.rss
) ? sizeof(*dst
.rss
) : size
);
620 off
= sizeof(*dst
.rss
);
621 if (src
.rss
->key_len
) {
622 off
= RTE_ALIGN_CEIL(off
, sizeof(*dst
.rss
->key
));
623 tmp
= sizeof(*src
.rss
->key
) * src
.rss
->key_len
;
624 if (size
>= off
+ tmp
)
625 dst
.rss
->key
= rte_memcpy
626 ((void *)((uintptr_t)dst
.rss
+ off
),
630 if (src
.rss
->queue_num
) {
631 off
= RTE_ALIGN_CEIL(off
, sizeof(*dst
.rss
->queue
));
632 tmp
= sizeof(*src
.rss
->queue
) * src
.rss
->queue_num
;
633 if (size
>= off
+ tmp
)
634 dst
.rss
->queue
= rte_memcpy
635 ((void *)((uintptr_t)dst
.rss
+ off
),
636 src
.rss
->queue
, tmp
);
640 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
:
641 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP
:
642 src
.vxlan_encap
= action
->conf
;
643 dst
.vxlan_encap
= buf
;
644 RTE_BUILD_BUG_ON(sizeof(*src
.vxlan_encap
) !=
645 sizeof(*src
.nvgre_encap
) ||
646 offsetof(struct rte_flow_action_vxlan_encap
,
648 offsetof(struct rte_flow_action_nvgre_encap
,
650 off
= sizeof(*dst
.vxlan_encap
);
651 if (src
.vxlan_encap
->definition
) {
653 (off
, sizeof(*dst
.vxlan_encap
->definition
));
655 (RTE_FLOW_CONV_OP_PATTERN
,
656 (void *)((uintptr_t)dst
.vxlan_encap
+ off
),
657 size
> off
? size
- off
: 0,
658 src
.vxlan_encap
->definition
, NULL
);
661 if (size
>= off
+ ret
)
662 dst
.vxlan_encap
->definition
=
663 (void *)((uintptr_t)dst
.vxlan_encap
+
669 off
= rte_flow_desc_action
[action
->type
].size
;
670 rte_memcpy(buf
, action
->conf
, (size
> off
? off
: size
));
677 * Copy a list of pattern items.
680 * Destination buffer. Can be NULL if @p size is zero.
682 * Size of @p dst in bytes.
684 * Source pattern items.
686 * Maximum number of pattern items to process from @p src or 0 to process
687 * the entire list. In both cases, processing stops after
688 * RTE_FLOW_ITEM_TYPE_END is encountered.
690 * Perform verbose error reporting if not NULL.
693 * A positive value representing the number of bytes needed to store
694 * pattern items regardless of @p size on success (@p buf contents are
695 * truncated to @p size if not large enough), a negative errno value
696 * otherwise and rte_errno is set.
699 rte_flow_conv_pattern(struct rte_flow_item
*dst
,
701 const struct rte_flow_item
*src
,
703 struct rte_flow_error
*error
)
705 uintptr_t data
= (uintptr_t)dst
;
710 for (i
= 0, off
= 0; !num
|| i
!= num
; ++i
, ++src
, ++dst
) {
711 if ((size_t)src
->type
>= RTE_DIM(rte_flow_desc_item
) ||
712 !rte_flow_desc_item
[src
->type
].name
)
713 return rte_flow_error_set
714 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
, src
,
715 "cannot convert unknown item type");
716 if (size
>= off
+ sizeof(*dst
))
717 *dst
= (struct rte_flow_item
){
729 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
730 ret
= rte_flow_conv_item_spec
731 ((void *)(data
+ off
),
732 size
> off
? size
- off
: 0, src
,
733 RTE_FLOW_CONV_ITEM_SPEC
);
734 if (size
&& size
>= off
+ ret
)
735 dst
->spec
= (void *)(data
+ off
);
740 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
741 ret
= rte_flow_conv_item_spec
742 ((void *)(data
+ off
),
743 size
> off
? size
- off
: 0, src
,
744 RTE_FLOW_CONV_ITEM_LAST
);
745 if (size
&& size
>= off
+ ret
)
746 dst
->last
= (void *)(data
+ off
);
750 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
751 ret
= rte_flow_conv_item_spec
752 ((void *)(data
+ off
),
753 size
> off
? size
- off
: 0, src
,
754 RTE_FLOW_CONV_ITEM_MASK
);
755 if (size
&& size
>= off
+ ret
)
756 dst
->mask
= (void *)(data
+ off
);
766 * Copy a list of actions.
769 * Destination buffer. Can be NULL if @p size is zero.
771 * Size of @p dst in bytes.
775 * Maximum number of actions to process from @p src or 0 to process the
776 * entire list. In both cases, processing stops after
777 * RTE_FLOW_ACTION_TYPE_END is encountered.
779 * Perform verbose error reporting if not NULL.
782 * A positive value representing the number of bytes needed to store
783 * actions regardless of @p size on success (@p buf contents are truncated
784 * to @p size if not large enough), a negative errno value otherwise and
788 rte_flow_conv_actions(struct rte_flow_action
*dst
,
790 const struct rte_flow_action
*src
,
792 struct rte_flow_error
*error
)
794 uintptr_t data
= (uintptr_t)dst
;
799 for (i
= 0, off
= 0; !num
|| i
!= num
; ++i
, ++src
, ++dst
) {
800 if ((size_t)src
->type
>= RTE_DIM(rte_flow_desc_action
) ||
801 !rte_flow_desc_action
[src
->type
].name
)
802 return rte_flow_error_set
803 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
,
804 src
, "cannot convert unknown action type");
805 if (size
>= off
+ sizeof(*dst
))
806 *dst
= (struct rte_flow_action
){
818 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
819 ret
= rte_flow_conv_action_conf
820 ((void *)(data
+ off
),
821 size
> off
? size
- off
: 0, src
);
822 if (size
&& size
>= off
+ ret
)
823 dst
->conf
= (void *)(data
+ off
);
833 * Copy flow rule components.
835 * This comprises the flow rule descriptor itself, attributes, pattern and
836 * actions list. NULL components in @p src are skipped.
839 * Destination buffer. Can be NULL if @p size is zero.
841 * Size of @p dst in bytes.
843 * Source flow rule descriptor.
845 * Perform verbose error reporting if not NULL.
848 * A positive value representing the number of bytes needed to store all
849 * components including the descriptor regardless of @p size on success
850 * (@p buf contents are truncated to @p size if not large enough), a
851 * negative errno value otherwise and rte_errno is set.
854 rte_flow_conv_rule(struct rte_flow_conv_rule
*dst
,
856 const struct rte_flow_conv_rule
*src
,
857 struct rte_flow_error
*error
)
863 (&(struct rte_flow_conv_rule
){
868 size
> sizeof(*dst
) ? sizeof(*dst
) : size
);
871 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
872 if (size
&& size
>= off
+ sizeof(*dst
->attr
))
873 dst
->attr
= rte_memcpy
874 ((void *)((uintptr_t)dst
+ off
),
875 src
->attr_ro
, sizeof(*dst
->attr
));
876 off
+= sizeof(*dst
->attr
);
878 if (src
->pattern_ro
) {
879 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
880 ret
= rte_flow_conv_pattern((void *)((uintptr_t)dst
+ off
),
881 size
> off
? size
- off
: 0,
882 src
->pattern_ro
, 0, error
);
885 if (size
&& size
>= off
+ (size_t)ret
)
886 dst
->pattern
= (void *)((uintptr_t)dst
+ off
);
889 if (src
->actions_ro
) {
890 off
= RTE_ALIGN_CEIL(off
, sizeof(double));
891 ret
= rte_flow_conv_actions((void *)((uintptr_t)dst
+ off
),
892 size
> off
? size
- off
: 0,
893 src
->actions_ro
, 0, error
);
896 if (size
>= off
+ (size_t)ret
)
897 dst
->actions
= (void *)((uintptr_t)dst
+ off
);
904 * Retrieve the name of a pattern item/action type.
907 * Nonzero when @p src represents an action type instead of a pattern item
910 * Nonzero to write string address instead of contents into @p dst.
912 * Destination buffer. Can be NULL if @p size is zero.
914 * Size of @p dst in bytes.
916 * Depending on @p is_action, source pattern item or action type cast as a
919 * Perform verbose error reporting if not NULL.
922 * A positive value representing the number of bytes needed to store the
923 * name or its address regardless of @p size on success (@p buf contents
924 * are truncated to @p size if not large enough), a negative errno value
925 * otherwise and rte_errno is set.
928 rte_flow_conv_name(int is_action
,
933 struct rte_flow_error
*error
)
936 const struct rte_flow_desc_data
*data
;
939 static const struct desc_info info_rep
[2] = {
940 { rte_flow_desc_item
, RTE_DIM(rte_flow_desc_item
), },
941 { rte_flow_desc_action
, RTE_DIM(rte_flow_desc_action
), },
943 const struct desc_info
*const info
= &info_rep
[!!is_action
];
944 unsigned int type
= (uintptr_t)src
;
946 if (type
>= info
->num
)
947 return rte_flow_error_set
948 (error
, EINVAL
, RTE_FLOW_ERROR_TYPE_UNSPECIFIED
, NULL
,
949 "unknown object type to retrieve the name of");
951 return strlcpy(dst
, info
->data
[type
].name
, size
);
952 if (size
>= sizeof(const char **))
953 *((const char **)dst
) = info
->data
[type
].name
;
954 return sizeof(const char **);
957 /** Helper function to convert flow API objects. */
959 rte_flow_conv(enum rte_flow_conv_op op
,
963 struct rte_flow_error
*error
)
966 const struct rte_flow_attr
*attr
;
968 case RTE_FLOW_CONV_OP_NONE
:
970 case RTE_FLOW_CONV_OP_ATTR
:
972 if (size
> sizeof(*attr
))
973 size
= sizeof(*attr
);
974 rte_memcpy(dst
, attr
, size
);
975 return sizeof(*attr
);
976 case RTE_FLOW_CONV_OP_ITEM
:
977 return rte_flow_conv_pattern(dst
, size
, src
, 1, error
);
978 case RTE_FLOW_CONV_OP_ACTION
:
979 return rte_flow_conv_actions(dst
, size
, src
, 1, error
);
980 case RTE_FLOW_CONV_OP_PATTERN
:
981 return rte_flow_conv_pattern(dst
, size
, src
, 0, error
);
982 case RTE_FLOW_CONV_OP_ACTIONS
:
983 return rte_flow_conv_actions(dst
, size
, src
, 0, error
);
984 case RTE_FLOW_CONV_OP_RULE
:
985 return rte_flow_conv_rule(dst
, size
, src
, error
);
986 case RTE_FLOW_CONV_OP_ITEM_NAME
:
987 return rte_flow_conv_name(0, 0, dst
, size
, src
, error
);
988 case RTE_FLOW_CONV_OP_ACTION_NAME
:
989 return rte_flow_conv_name(1, 0, dst
, size
, src
, error
);
990 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR
:
991 return rte_flow_conv_name(0, 1, dst
, size
, src
, error
);
992 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR
:
993 return rte_flow_conv_name(1, 1, dst
, size
, src
, error
);
995 return rte_flow_error_set
996 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_UNSPECIFIED
, NULL
,
997 "unknown object conversion operation");
1000 /** Store a full rte_flow description. */
1002 rte_flow_copy(struct rte_flow_desc
*desc
, size_t len
,
1003 const struct rte_flow_attr
*attr
,
1004 const struct rte_flow_item
*items
,
1005 const struct rte_flow_action
*actions
)
1008 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1009 * to convert the former to the latter without wasting space.
1011 struct rte_flow_conv_rule
*dst
=
1013 (void *)((uintptr_t)desc
+
1014 (offsetof(struct rte_flow_desc
, actions
) -
1015 offsetof(struct rte_flow_conv_rule
, actions
))) :
1018 len
> sizeof(*desc
) - sizeof(*dst
) ?
1019 len
- (sizeof(*desc
) - sizeof(*dst
)) :
1021 struct rte_flow_conv_rule src
= {
1023 .pattern_ro
= items
,
1024 .actions_ro
= actions
,
1028 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc
) <
1029 sizeof(struct rte_flow_conv_rule
));
1031 (&dst
->pattern
!= &desc
->items
||
1032 &dst
->actions
!= &desc
->actions
||
1033 (uintptr_t)(dst
+ 1) != (uintptr_t)(desc
+ 1))) {
1037 ret
= rte_flow_conv(RTE_FLOW_CONV_OP_RULE
, dst
, dst_size
, &src
, NULL
);
1040 ret
+= sizeof(*desc
) - sizeof(*dst
);
1042 (&(struct rte_flow_desc
){
1045 .items
= dst_size
? dst
->pattern
: NULL
,
1046 .actions
= dst_size
? dst
->actions
: NULL
,
1048 len
> sizeof(*desc
) ? sizeof(*desc
) : len
);
1053 * Expand RSS flows into several possible flows according to the RSS hash
1054 * fields requested and the driver capabilities.
1057 rte_flow_expand_rss(struct rte_flow_expand_rss
*buf
, size_t size
,
1058 const struct rte_flow_item
*pattern
, uint64_t types
,
1059 const struct rte_flow_expand_node graph
[],
1060 int graph_root_index
)
1062 const int elt_n
= 8;
1063 const struct rte_flow_item
*item
;
1064 const struct rte_flow_expand_node
*node
= &graph
[graph_root_index
];
1065 const int *next_node
;
1066 const int *stack
[elt_n
];
1068 struct rte_flow_item flow_items
[elt_n
];
1071 size_t user_pattern_size
= 0;
1073 const struct rte_flow_expand_node
*next
= NULL
;
1074 struct rte_flow_item missed_item
;
1077 const struct rte_flow_item
*last_item
= NULL
;
1079 memset(&missed_item
, 0, sizeof(missed_item
));
1080 lsize
= offsetof(struct rte_flow_expand_rss
, entry
) +
1081 elt_n
* sizeof(buf
->entry
[0]);
1082 if (lsize
<= size
) {
1083 buf
->entry
[0].priority
= 0;
1084 buf
->entry
[0].pattern
= (void *)&buf
->entry
[elt_n
];
1086 addr
= buf
->entry
[0].pattern
;
1088 for (item
= pattern
; item
->type
!= RTE_FLOW_ITEM_TYPE_END
; item
++) {
1089 if (item
->type
!= RTE_FLOW_ITEM_TYPE_VOID
)
1091 for (i
= 0; node
->next
&& node
->next
[i
]; ++i
) {
1092 next
= &graph
[node
->next
[i
]];
1093 if (next
->type
== item
->type
)
1098 user_pattern_size
+= sizeof(*item
);
1100 user_pattern_size
+= sizeof(*item
); /* Handle END item. */
1101 lsize
+= user_pattern_size
;
1102 /* Copy the user pattern in the first entry of the buffer. */
1103 if (lsize
<= size
) {
1104 rte_memcpy(addr
, pattern
, user_pattern_size
);
1105 addr
= (void *)(((uintptr_t)addr
) + user_pattern_size
);
1108 /* Start expanding. */
1109 memset(flow_items
, 0, sizeof(flow_items
));
1110 user_pattern_size
-= sizeof(*item
);
1112 * Check if the last valid item has spec set
1113 * and need complete pattern.
1115 missed_item
.type
= rte_flow_expand_rss_item_complete(last_item
);
1116 if (missed_item
.type
!= RTE_FLOW_ITEM_TYPE_VOID
) {
1119 for (i
= 0; node
->next
&& node
->next
[i
]; ++i
) {
1120 next
= &graph
[node
->next
[i
]];
1121 if (next
->type
== missed_item
.type
) {
1122 flow_items
[0].type
= missed_item
.type
;
1123 flow_items
[1].type
= RTE_FLOW_ITEM_TYPE_END
;
1129 if (next
&& missed
) {
1130 elt
= 2; /* missed item + item end. */
1132 lsize
+= elt
* sizeof(*item
) + user_pattern_size
;
1133 if ((node
->rss_types
& types
) && lsize
<= size
) {
1134 buf
->entry
[buf
->entries
].priority
= 1;
1135 buf
->entry
[buf
->entries
].pattern
= addr
;
1137 rte_memcpy(addr
, buf
->entry
[0].pattern
,
1139 addr
= (void *)(((uintptr_t)addr
) + user_pattern_size
);
1140 rte_memcpy(addr
, flow_items
, elt
* sizeof(*item
));
1141 addr
= (void *)(((uintptr_t)addr
) +
1142 elt
* sizeof(*item
));
1145 memset(flow_items
, 0, sizeof(flow_items
));
1146 next_node
= node
->next
;
1147 stack
[stack_pos
] = next_node
;
1148 node
= next_node
? &graph
[*next_node
] : NULL
;
1150 flow_items
[stack_pos
].type
= node
->type
;
1151 if (node
->rss_types
& types
) {
1153 * compute the number of items to copy from the
1154 * expansion and copy it.
1155 * When the stack_pos is 0, there are 1 element in it,
1156 * plus the addition END item.
1158 elt
= stack_pos
+ 2;
1159 flow_items
[stack_pos
+ 1].type
= RTE_FLOW_ITEM_TYPE_END
;
1160 lsize
+= elt
* sizeof(*item
) + user_pattern_size
;
1161 if (lsize
<= size
) {
1162 size_t n
= elt
* sizeof(*item
);
1164 buf
->entry
[buf
->entries
].priority
=
1165 stack_pos
+ 1 + missed
;
1166 buf
->entry
[buf
->entries
].pattern
= addr
;
1168 rte_memcpy(addr
, buf
->entry
[0].pattern
,
1170 addr
= (void *)(((uintptr_t)addr
) +
1172 rte_memcpy(addr
, &missed_item
,
1173 missed
* sizeof(*item
));
1174 addr
= (void *)(((uintptr_t)addr
) +
1175 missed
* sizeof(*item
));
1176 rte_memcpy(addr
, flow_items
, n
);
1177 addr
= (void *)(((uintptr_t)addr
) + n
);
1182 next_node
= node
->next
;
1183 if (stack_pos
++ == elt_n
) {
1187 stack
[stack_pos
] = next_node
;
1188 } else if (*(next_node
+ 1)) {
1189 /* Follow up with the next possibility. */
1192 /* Move to the next path. */
1194 next_node
= stack
[--stack_pos
];
1196 stack
[stack_pos
] = next_node
;
1198 node
= *next_node
? &graph
[*next_node
] : NULL
;
1200 /* no expanded flows but we have missed item, create one rule for it */
1201 if (buf
->entries
== 1 && missed
!= 0) {
1203 lsize
+= elt
* sizeof(*item
) + user_pattern_size
;
1204 if (lsize
<= size
) {
1205 buf
->entry
[buf
->entries
].priority
= 1;
1206 buf
->entry
[buf
->entries
].pattern
= addr
;
1208 flow_items
[0].type
= missed_item
.type
;
1209 flow_items
[1].type
= RTE_FLOW_ITEM_TYPE_END
;
1210 rte_memcpy(addr
, buf
->entry
[0].pattern
,
1212 addr
= (void *)(((uintptr_t)addr
) + user_pattern_size
);
1213 rte_memcpy(addr
, flow_items
, elt
* sizeof(*item
));
1214 addr
= (void *)(((uintptr_t)addr
) +
1215 elt
* sizeof(*item
));
1222 rte_flow_dev_dump(uint16_t port_id
, FILE *file
, struct rte_flow_error
*error
)
1224 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
1225 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
1229 if (likely(!!ops
->dev_dump
))
1230 return flow_err(port_id
, ops
->dev_dump(dev
, file
, error
),
1232 return rte_flow_error_set(error
, ENOSYS
,
1233 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
1234 NULL
, rte_strerror(ENOSYS
));
1238 rte_flow_get_aged_flows(uint16_t port_id
, void **contexts
,
1239 uint32_t nb_contexts
, struct rte_flow_error
*error
)
1241 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
1242 const struct rte_flow_ops
*ops
= rte_flow_ops_get(port_id
, error
);
1246 if (likely(!!ops
->get_aged_flows
))
1247 return flow_err(port_id
, ops
->get_aged_flows(dev
, contexts
,
1248 nb_contexts
, error
), error
);
1249 return rte_flow_error_set(error
, ENOTSUP
,
1250 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
1251 NULL
, rte_strerror(ENOTSUP
));