1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7 #include <rte_flow_driver.h>
9 struct classify_valid_pattern
{
10 enum rte_flow_item_type
*items
;
11 parse_filter_t parse_filter
;
14 static struct classify_action action
;
16 /* Pattern for IPv4 5-tuple UDP filter */
17 static enum rte_flow_item_type pattern_ntuple_1
[] = {
18 RTE_FLOW_ITEM_TYPE_ETH
,
19 RTE_FLOW_ITEM_TYPE_IPV4
,
20 RTE_FLOW_ITEM_TYPE_UDP
,
21 RTE_FLOW_ITEM_TYPE_END
,
24 /* Pattern for IPv4 5-tuple TCP filter */
25 static enum rte_flow_item_type pattern_ntuple_2
[] = {
26 RTE_FLOW_ITEM_TYPE_ETH
,
27 RTE_FLOW_ITEM_TYPE_IPV4
,
28 RTE_FLOW_ITEM_TYPE_TCP
,
29 RTE_FLOW_ITEM_TYPE_END
,
32 /* Pattern for IPv4 5-tuple SCTP filter */
33 static enum rte_flow_item_type pattern_ntuple_3
[] = {
34 RTE_FLOW_ITEM_TYPE_ETH
,
35 RTE_FLOW_ITEM_TYPE_IPV4
,
36 RTE_FLOW_ITEM_TYPE_SCTP
,
37 RTE_FLOW_ITEM_TYPE_END
,
41 classify_parse_ntuple_filter(const struct rte_flow_attr
*attr
,
42 const struct rte_flow_item pattern
[],
43 const struct rte_flow_action actions
[],
44 struct rte_eth_ntuple_filter
*filter
,
45 struct rte_flow_error
*error
);
47 static struct classify_valid_pattern classify_supported_patterns
[] = {
49 { pattern_ntuple_1
, classify_parse_ntuple_filter
},
50 { pattern_ntuple_2
, classify_parse_ntuple_filter
},
51 { pattern_ntuple_3
, classify_parse_ntuple_filter
},
54 struct classify_action
*
55 classify_get_flow_action(void)
60 /* Find the first VOID or non-VOID item pointer */
61 const struct rte_flow_item
*
62 classify_find_first_item(const struct rte_flow_item
*item
, bool is_void
)
66 while (item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
68 is_find
= item
->type
== RTE_FLOW_ITEM_TYPE_VOID
;
70 is_find
= item
->type
!= RTE_FLOW_ITEM_TYPE_VOID
;
78 /* Skip all VOID items of the pattern */
80 classify_pattern_skip_void_item(struct rte_flow_item
*items
,
81 const struct rte_flow_item
*pattern
)
83 uint32_t cpy_count
= 0;
84 const struct rte_flow_item
*pb
= pattern
, *pe
= pattern
;
87 /* Find a non-void item first */
88 pb
= classify_find_first_item(pb
, false);
89 if (pb
->type
== RTE_FLOW_ITEM_TYPE_END
) {
94 /* Find a void item */
95 pe
= classify_find_first_item(pb
+ 1, true);
98 rte_memcpy(items
, pb
, sizeof(struct rte_flow_item
) * cpy_count
);
102 if (pe
->type
== RTE_FLOW_ITEM_TYPE_END
) {
107 /* Copy the END item. */
108 rte_memcpy(items
, pe
, sizeof(struct rte_flow_item
));
111 /* Check if the pattern matches a supported item type array */
113 classify_match_pattern(enum rte_flow_item_type
*item_array
,
114 struct rte_flow_item
*pattern
)
116 struct rte_flow_item
*item
= pattern
;
118 while ((*item_array
== item
->type
) &&
119 (*item_array
!= RTE_FLOW_ITEM_TYPE_END
)) {
124 return (*item_array
== RTE_FLOW_ITEM_TYPE_END
&&
125 item
->type
== RTE_FLOW_ITEM_TYPE_END
);
128 /* Find if there's parse filter function matched */
130 classify_find_parse_filter_func(struct rte_flow_item
*pattern
)
132 parse_filter_t parse_filter
= NULL
;
135 for (; i
< RTE_DIM(classify_supported_patterns
); i
++) {
136 if (classify_match_pattern(classify_supported_patterns
[i
].items
,
139 classify_supported_patterns
[i
].parse_filter
;
147 #define FLOW_RULE_MIN_PRIORITY 8
148 #define FLOW_RULE_MAX_PRIORITY 0
150 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
152 item = pattern + index;\
153 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
155 item = pattern + index;\
159 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
161 act = actions + index;\
162 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
164 act = actions + index;\
169 * Please aware there's an assumption for all the parsers.
170 * rte_flow_item is using big endian, rte_flow_attr and
171 * rte_flow_action are using CPU order.
172 * Because the pattern is used to describe the packets,
173 * normally the packets should use network order.
177 * Parse the rule to see if it is a n-tuple rule.
178 * And get the n-tuple filter info BTW.
180 * The first not void item can be ETH or IPV4.
181 * The second not void item must be IPV4 if the first one is ETH.
182 * The third not void item must be UDP or TCP.
183 * The next not void item must be END.
185 * The first not void action should be QUEUE.
186 * The next not void action should be END.
190 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
191 * dst_addr 192.167.3.50 0xFFFFFFFF
192 * next_proto_id 17 0xFF
193 * UDP/TCP/ src_port 80 0xFFFF
194 * SCTP dst_port 80 0xFFFF
196 * other members in mask and spec should set to 0x00.
197 * item->last should be NULL.
200 classify_parse_ntuple_filter(const struct rte_flow_attr
*attr
,
201 const struct rte_flow_item pattern
[],
202 const struct rte_flow_action actions
[],
203 struct rte_eth_ntuple_filter
*filter
,
204 struct rte_flow_error
*error
)
206 const struct rte_flow_item
*item
;
207 const struct rte_flow_action
*act
;
208 const struct rte_flow_item_ipv4
*ipv4_spec
;
209 const struct rte_flow_item_ipv4
*ipv4_mask
;
210 const struct rte_flow_item_tcp
*tcp_spec
;
211 const struct rte_flow_item_tcp
*tcp_mask
;
212 const struct rte_flow_item_udp
*udp_spec
;
213 const struct rte_flow_item_udp
*udp_mask
;
214 const struct rte_flow_item_sctp
*sctp_spec
;
215 const struct rte_flow_item_sctp
*sctp_mask
;
216 const struct rte_flow_action_count
*count
;
217 const struct rte_flow_action_mark
*mark_spec
;
223 /* the first not void item can be MAC or IPv4 */
224 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
226 if (item
->type
!= RTE_FLOW_ITEM_TYPE_ETH
&&
227 item
->type
!= RTE_FLOW_ITEM_TYPE_IPV4
) {
228 rte_flow_error_set(error
, EINVAL
,
229 RTE_FLOW_ERROR_TYPE_ITEM
,
230 item
, "Not supported by ntuple filter");
234 if (item
->type
== RTE_FLOW_ITEM_TYPE_ETH
) {
235 /*Not supported last point for range*/
237 rte_flow_error_set(error
, EINVAL
,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
240 "Not supported last point for range");
244 /* if the first item is MAC, the content should be NULL */
245 if (item
->spec
|| item
->mask
) {
246 rte_flow_error_set(error
, EINVAL
,
247 RTE_FLOW_ERROR_TYPE_ITEM
,
249 "Not supported by ntuple filter");
252 /* check if the next not void item is IPv4 */
254 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
255 if (item
->type
!= RTE_FLOW_ITEM_TYPE_IPV4
) {
256 rte_flow_error_set(error
, EINVAL
,
257 RTE_FLOW_ERROR_TYPE_ITEM
,
259 "Not supported by ntuple filter");
264 /* get the IPv4 info */
265 if (!item
->spec
|| !item
->mask
) {
266 rte_flow_error_set(error
, EINVAL
,
267 RTE_FLOW_ERROR_TYPE_ITEM
,
268 item
, "Invalid ntuple mask");
271 /*Not supported last point for range*/
273 rte_flow_error_set(error
, EINVAL
,
274 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
275 item
, "Not supported last point for range");
280 ipv4_mask
= item
->mask
;
282 * Only support src & dst addresses, protocol,
283 * others should be masked.
285 if (ipv4_mask
->hdr
.version_ihl
||
286 ipv4_mask
->hdr
.type_of_service
||
287 ipv4_mask
->hdr
.total_length
||
288 ipv4_mask
->hdr
.packet_id
||
289 ipv4_mask
->hdr
.fragment_offset
||
290 ipv4_mask
->hdr
.time_to_live
||
291 ipv4_mask
->hdr
.hdr_checksum
) {
292 rte_flow_error_set(error
,
293 EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM
,
294 item
, "Not supported by ntuple filter");
298 filter
->dst_ip_mask
= ipv4_mask
->hdr
.dst_addr
;
299 filter
->src_ip_mask
= ipv4_mask
->hdr
.src_addr
;
300 filter
->proto_mask
= ipv4_mask
->hdr
.next_proto_id
;
302 ipv4_spec
= item
->spec
;
303 filter
->dst_ip
= ipv4_spec
->hdr
.dst_addr
;
304 filter
->src_ip
= ipv4_spec
->hdr
.src_addr
;
305 filter
->proto
= ipv4_spec
->hdr
.next_proto_id
;
307 /* check if the next not void item is TCP or UDP or SCTP */
309 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
310 if (item
->type
!= RTE_FLOW_ITEM_TYPE_TCP
&&
311 item
->type
!= RTE_FLOW_ITEM_TYPE_UDP
&&
312 item
->type
!= RTE_FLOW_ITEM_TYPE_SCTP
) {
313 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
314 rte_flow_error_set(error
, EINVAL
,
315 RTE_FLOW_ERROR_TYPE_ITEM
,
316 item
, "Not supported by ntuple filter");
320 /* get the TCP/UDP info */
321 if (!item
->spec
|| !item
->mask
) {
322 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
323 rte_flow_error_set(error
, EINVAL
,
324 RTE_FLOW_ERROR_TYPE_ITEM
,
325 item
, "Invalid ntuple mask");
329 /*Not supported last point for range*/
331 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
332 rte_flow_error_set(error
, EINVAL
,
333 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
334 item
, "Not supported last point for range");
339 if (item
->type
== RTE_FLOW_ITEM_TYPE_TCP
) {
340 tcp_mask
= item
->mask
;
343 * Only support src & dst ports, tcp flags,
344 * others should be masked.
346 if (tcp_mask
->hdr
.sent_seq
||
347 tcp_mask
->hdr
.recv_ack
||
348 tcp_mask
->hdr
.data_off
||
349 tcp_mask
->hdr
.rx_win
||
350 tcp_mask
->hdr
.cksum
||
351 tcp_mask
->hdr
.tcp_urp
) {
353 sizeof(struct rte_eth_ntuple_filter
));
354 rte_flow_error_set(error
, EINVAL
,
355 RTE_FLOW_ERROR_TYPE_ITEM
,
356 item
, "Not supported by ntuple filter");
360 filter
->dst_port_mask
= tcp_mask
->hdr
.dst_port
;
361 filter
->src_port_mask
= tcp_mask
->hdr
.src_port
;
362 if (tcp_mask
->hdr
.tcp_flags
== 0xFF) {
363 filter
->flags
|= RTE_NTUPLE_FLAGS_TCP_FLAG
;
364 } else if (!tcp_mask
->hdr
.tcp_flags
) {
365 filter
->flags
&= ~RTE_NTUPLE_FLAGS_TCP_FLAG
;
367 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
368 rte_flow_error_set(error
, EINVAL
,
369 RTE_FLOW_ERROR_TYPE_ITEM
,
370 item
, "Not supported by ntuple filter");
374 tcp_spec
= item
->spec
;
375 filter
->dst_port
= tcp_spec
->hdr
.dst_port
;
376 filter
->src_port
= tcp_spec
->hdr
.src_port
;
377 filter
->tcp_flags
= tcp_spec
->hdr
.tcp_flags
;
378 } else if (item
->type
== RTE_FLOW_ITEM_TYPE_UDP
) {
379 udp_mask
= item
->mask
;
382 * Only support src & dst ports,
383 * others should be masked.
385 if (udp_mask
->hdr
.dgram_len
||
386 udp_mask
->hdr
.dgram_cksum
) {
388 sizeof(struct rte_eth_ntuple_filter
));
389 rte_flow_error_set(error
, EINVAL
,
390 RTE_FLOW_ERROR_TYPE_ITEM
,
391 item
, "Not supported by ntuple filter");
395 filter
->dst_port_mask
= udp_mask
->hdr
.dst_port
;
396 filter
->src_port_mask
= udp_mask
->hdr
.src_port
;
398 udp_spec
= item
->spec
;
399 filter
->dst_port
= udp_spec
->hdr
.dst_port
;
400 filter
->src_port
= udp_spec
->hdr
.src_port
;
402 sctp_mask
= item
->mask
;
405 * Only support src & dst ports,
406 * others should be masked.
408 if (sctp_mask
->hdr
.tag
||
409 sctp_mask
->hdr
.cksum
) {
411 sizeof(struct rte_eth_ntuple_filter
));
412 rte_flow_error_set(error
, EINVAL
,
413 RTE_FLOW_ERROR_TYPE_ITEM
,
414 item
, "Not supported by ntuple filter");
418 filter
->dst_port_mask
= sctp_mask
->hdr
.dst_port
;
419 filter
->src_port_mask
= sctp_mask
->hdr
.src_port
;
421 sctp_spec
= item
->spec
;
422 filter
->dst_port
= sctp_spec
->hdr
.dst_port
;
423 filter
->src_port
= sctp_spec
->hdr
.src_port
;
426 /* check if the next not void item is END */
428 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
429 if (item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
430 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
431 rte_flow_error_set(error
, EINVAL
,
432 RTE_FLOW_ERROR_TYPE_ITEM
,
433 item
, "Not supported by ntuple filter");
437 table_type
= RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE
;
440 /* must be input direction */
441 if (!attr
->ingress
) {
442 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
443 rte_flow_error_set(error
, EINVAL
,
444 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
445 attr
, "Only support ingress.");
451 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
452 rte_flow_error_set(error
, EINVAL
,
453 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
454 attr
, "Not support egress.");
458 if (attr
->priority
> 0xFFFF) {
459 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
460 rte_flow_error_set(error
, EINVAL
,
461 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
462 attr
, "Error priority.");
465 filter
->priority
= (uint16_t)attr
->priority
;
466 if (attr
->priority
> FLOW_RULE_MIN_PRIORITY
)
467 filter
->priority
= FLOW_RULE_MAX_PRIORITY
;
473 * n-tuple only supports count and Mark,
474 * check if the first not void action is COUNT or MARK.
476 memset(&action
, 0, sizeof(action
));
477 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
479 case RTE_FLOW_ACTION_TYPE_COUNT
:
480 action
.action_mask
|= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT
;
482 memcpy(&action
.act
.counter
, count
, sizeof(action
.act
.counter
));
484 case RTE_FLOW_ACTION_TYPE_MARK
:
485 action
.action_mask
|= 1LLU << RTE_FLOW_ACTION_TYPE_MARK
;
486 mark_spec
= act
->conf
;
487 memcpy(&action
.act
.mark
, mark_spec
, sizeof(action
.act
.mark
));
490 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
491 rte_flow_error_set(error
, EINVAL
,
492 RTE_FLOW_ERROR_TYPE_ACTION
, act
,
497 /* check if the next not void item is MARK or COUNT or END */
499 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
501 case RTE_FLOW_ACTION_TYPE_COUNT
:
502 action
.action_mask
|= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT
;
504 memcpy(&action
.act
.counter
, count
, sizeof(action
.act
.counter
));
506 case RTE_FLOW_ACTION_TYPE_MARK
:
507 action
.action_mask
|= 1LLU << RTE_FLOW_ACTION_TYPE_MARK
;
508 mark_spec
= act
->conf
;
509 memcpy(&action
.act
.mark
, mark_spec
, sizeof(action
.act
.mark
));
511 case RTE_FLOW_ACTION_TYPE_END
:
514 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
515 rte_flow_error_set(error
, EINVAL
,
516 RTE_FLOW_ERROR_TYPE_ACTION
, act
,
521 /* check if the next not void item is END */
523 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
524 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
525 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
526 rte_flow_error_set(error
, EINVAL
,
527 RTE_FLOW_ERROR_TYPE_ACTION
, act
,