1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_compat.h>
7 #include <rte_flow_classify.h>
8 #include "rte_flow_classify_parse.h"
9 #include <rte_flow_driver.h>
10 #include <rte_table_acl.h>
13 int librte_flow_classify_logtype
;
15 static uint32_t unique_id
= 1;
17 enum rte_flow_classify_table_type table_type
18 = RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE
;
20 struct rte_flow_classify_table_entry
{
21 /* meta-data for classify rule */
25 struct classify_action action
;
28 struct rte_cls_table
{
29 /* Input parameters */
30 struct rte_table_ops ops
;
32 enum rte_flow_classify_table_type type
;
34 /* Handle to the low-level table object */
38 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
40 struct rte_flow_classifier
{
41 /* Input parameters */
42 char name
[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ
];
47 struct rte_eth_ntuple_filter ntuple_filter
;
49 /* classifier tables */
50 struct rte_cls_table tables
[RTE_FLOW_CLASSIFY_TABLE_MAX
];
55 struct rte_flow_classify_table_entry
56 *entries
[RTE_PORT_IN_BURST_SIZE_MAX
];
57 } __rte_cache_aligned
;
69 struct rte_table_acl_rule_add_params key_add
; /* add key */
70 struct rte_table_acl_rule_delete_params key_del
; /* delete key */
73 struct classify_rules
{
74 enum rte_flow_classify_rule_type type
;
76 struct rte_flow_classify_ipv4_5tuple ipv4_5tuple
;
80 struct rte_flow_classify_rule
{
81 uint32_t id
; /* unique ID of classify rule */
82 enum rte_flow_classify_table_type tbl_type
; /* rule table */
83 struct classify_rules rules
; /* union of rules */
87 int key_found
; /* rule key found in table */
88 struct rte_flow_classify_table_entry entry
; /* rule meta data */
89 void *entry_ptr
; /* handle to the table entry for rule meta data */
92 int __rte_experimental
93 rte_flow_classify_validate(
94 struct rte_flow_classifier
*cls
,
95 const struct rte_flow_attr
*attr
,
96 const struct rte_flow_item pattern
[],
97 const struct rte_flow_action actions
[],
98 struct rte_flow_error
*error
)
100 struct rte_flow_item
*items
;
101 parse_filter_t parse_filter
;
102 uint32_t item_num
= 0;
110 RTE_FLOW_CLASSIFY_LOG(ERR
,
111 "%s: rte_flow_classifier parameter is NULL\n",
117 rte_flow_error_set(error
, EINVAL
,
118 RTE_FLOW_ERROR_TYPE_ATTR
,
119 NULL
, "NULL attribute.");
124 rte_flow_error_set(error
,
125 EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
126 NULL
, "NULL pattern.");
131 rte_flow_error_set(error
, EINVAL
,
132 RTE_FLOW_ERROR_TYPE_ACTION_NUM
,
133 NULL
, "NULL action.");
137 memset(&cls
->ntuple_filter
, 0, sizeof(cls
->ntuple_filter
));
139 /* Get the non-void item number of pattern */
140 while ((pattern
+ i
)->type
!= RTE_FLOW_ITEM_TYPE_END
) {
141 if ((pattern
+ i
)->type
!= RTE_FLOW_ITEM_TYPE_VOID
)
147 items
= malloc(item_num
* sizeof(struct rte_flow_item
));
149 rte_flow_error_set(error
, ENOMEM
,
150 RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
151 NULL
, "No memory for pattern items.");
155 memset(items
, 0, item_num
* sizeof(struct rte_flow_item
));
156 classify_pattern_skip_void_item(items
, pattern
);
158 parse_filter
= classify_find_parse_filter_func(items
);
160 rte_flow_error_set(error
, EINVAL
,
161 RTE_FLOW_ERROR_TYPE_ITEM
,
162 pattern
, "Unsupported pattern");
167 ret
= parse_filter(attr
, items
, actions
, &cls
->ntuple_filter
, error
);
173 #define uint32_t_to_char(ip, a, b, c, d) do {\
174 *a = (unsigned char)(ip >> 24 & 0xff);\
175 *b = (unsigned char)(ip >> 16 & 0xff);\
176 *c = (unsigned char)(ip >> 8 & 0xff);\
177 *d = (unsigned char)(ip & 0xff);\
181 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params
*key
)
183 unsigned char a
, b
, c
, d
;
185 printf("%s: 0x%02hhx/0x%hhx ", __func__
,
186 key
->field_value
[PROTO_FIELD_IPV4
].value
.u8
,
187 key
->field_value
[PROTO_FIELD_IPV4
].mask_range
.u8
);
189 uint32_t_to_char(key
->field_value
[SRC_FIELD_IPV4
].value
.u32
,
191 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a
, b
, c
, d
,
192 key
->field_value
[SRC_FIELD_IPV4
].mask_range
.u32
);
194 uint32_t_to_char(key
->field_value
[DST_FIELD_IPV4
].value
.u32
,
196 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a
, b
, c
, d
,
197 key
->field_value
[DST_FIELD_IPV4
].mask_range
.u32
);
199 printf("%hu : 0x%x %hu : 0x%x",
200 key
->field_value
[SRCP_FIELD_IPV4
].value
.u16
,
201 key
->field_value
[SRCP_FIELD_IPV4
].mask_range
.u16
,
202 key
->field_value
[DSTP_FIELD_IPV4
].value
.u16
,
203 key
->field_value
[DSTP_FIELD_IPV4
].mask_range
.u16
);
205 printf(" priority: 0x%x\n", key
->priority
);
209 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params
*key
)
211 unsigned char a
, b
, c
, d
;
213 printf("%s: 0x%02hhx/0x%hhx ", __func__
,
214 key
->field_value
[PROTO_FIELD_IPV4
].value
.u8
,
215 key
->field_value
[PROTO_FIELD_IPV4
].mask_range
.u8
);
217 uint32_t_to_char(key
->field_value
[SRC_FIELD_IPV4
].value
.u32
,
219 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a
, b
, c
, d
,
220 key
->field_value
[SRC_FIELD_IPV4
].mask_range
.u32
);
222 uint32_t_to_char(key
->field_value
[DST_FIELD_IPV4
].value
.u32
,
224 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a
, b
, c
, d
,
225 key
->field_value
[DST_FIELD_IPV4
].mask_range
.u32
);
227 printf("%hu : 0x%x %hu : 0x%x\n",
228 key
->field_value
[SRCP_FIELD_IPV4
].value
.u16
,
229 key
->field_value
[SRCP_FIELD_IPV4
].mask_range
.u16
,
230 key
->field_value
[DSTP_FIELD_IPV4
].value
.u16
,
231 key
->field_value
[DSTP_FIELD_IPV4
].mask_range
.u16
);
235 rte_flow_classifier_check_params(struct rte_flow_classifier_params
*params
)
237 if (params
== NULL
) {
238 RTE_FLOW_CLASSIFY_LOG(ERR
,
239 "%s: Incorrect value for parameter params\n", __func__
);
244 if (params
->name
== NULL
) {
245 RTE_FLOW_CLASSIFY_LOG(ERR
,
246 "%s: Incorrect value for parameter name\n", __func__
);
251 if (params
->socket_id
< 0) {
252 RTE_FLOW_CLASSIFY_LOG(ERR
,
253 "%s: Incorrect value for parameter socket_id\n",
261 struct rte_flow_classifier
* __rte_experimental
262 rte_flow_classifier_create(struct rte_flow_classifier_params
*params
)
264 struct rte_flow_classifier
*cls
;
267 /* Check input parameters */
268 ret
= rte_flow_classifier_check_params(params
);
270 RTE_FLOW_CLASSIFY_LOG(ERR
,
271 "%s: flow classifier params check failed (%d)\n",
276 /* Allocate memory for the flow classifier */
277 cls
= rte_zmalloc_socket("FLOW_CLASSIFIER",
278 sizeof(struct rte_flow_classifier
),
279 RTE_CACHE_LINE_SIZE
, params
->socket_id
);
282 RTE_FLOW_CLASSIFY_LOG(ERR
,
283 "%s: flow classifier memory allocation failed\n",
288 /* Save input parameters */
289 strlcpy(cls
->name
, params
->name
, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ
);
291 cls
->socket_id
= params
->socket_id
;
297 rte_flow_classify_table_free(struct rte_cls_table
*table
)
299 if (table
->ops
.f_free
!= NULL
)
300 table
->ops
.f_free(table
->h_table
);
303 int __rte_experimental
304 rte_flow_classifier_free(struct rte_flow_classifier
*cls
)
308 /* Check input parameters */
310 RTE_FLOW_CLASSIFY_LOG(ERR
,
311 "%s: rte_flow_classifier parameter is NULL\n",
317 for (i
= 0; i
< cls
->num_tables
; i
++) {
318 struct rte_cls_table
*table
= &cls
->tables
[i
];
320 rte_flow_classify_table_free(table
);
323 /* Free flow classifier memory */
330 rte_table_check_params(struct rte_flow_classifier
*cls
,
331 struct rte_flow_classify_table_params
*params
)
334 RTE_FLOW_CLASSIFY_LOG(ERR
,
335 "%s: flow classifier parameter is NULL\n",
339 if (params
== NULL
) {
340 RTE_FLOW_CLASSIFY_LOG(ERR
, "%s: params parameter is NULL\n",
346 if (params
->ops
== NULL
) {
347 RTE_FLOW_CLASSIFY_LOG(ERR
, "%s: params->ops is NULL\n",
352 if (params
->ops
->f_create
== NULL
) {
353 RTE_FLOW_CLASSIFY_LOG(ERR
,
354 "%s: f_create function pointer is NULL\n", __func__
);
358 if (params
->ops
->f_lookup
== NULL
) {
359 RTE_FLOW_CLASSIFY_LOG(ERR
,
360 "%s: f_lookup function pointer is NULL\n", __func__
);
364 /* De we have room for one more table? */
365 if (cls
->num_tables
== RTE_FLOW_CLASSIFY_TABLE_MAX
) {
366 RTE_FLOW_CLASSIFY_LOG(ERR
,
367 "%s: Incorrect value for num_tables parameter\n",
375 int __rte_experimental
376 rte_flow_classify_table_create(struct rte_flow_classifier
*cls
,
377 struct rte_flow_classify_table_params
*params
)
379 struct rte_cls_table
*table
;
384 /* Check input arguments */
385 ret
= rte_table_check_params(cls
, params
);
389 /* calculate table entry size */
390 entry_size
= sizeof(struct rte_flow_classify_table_entry
);
392 /* Create the table */
393 h_table
= params
->ops
->f_create(params
->arg_create
, cls
->socket_id
,
395 if (h_table
== NULL
) {
396 RTE_FLOW_CLASSIFY_LOG(ERR
, "%s: Table creation failed\n",
401 /* Commit current table to the classifier */
402 table
= &cls
->tables
[cls
->num_tables
];
403 table
->type
= params
->type
;
406 /* Save input parameters */
407 memcpy(&table
->ops
, params
->ops
, sizeof(struct rte_table_ops
));
409 /* Initialize table internal data structure */
410 table
->entry_size
= entry_size
;
411 table
->h_table
= h_table
;
416 static struct rte_flow_classify_rule
*
417 allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier
*cls
)
419 struct rte_flow_classify_rule
*rule
;
422 rule
= malloc(sizeof(struct rte_flow_classify_rule
));
426 memset(rule
, 0, sizeof(struct rte_flow_classify_rule
));
427 rule
->id
= unique_id
++;
428 rule
->rules
.type
= RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE
;
431 rule
->u
.key
.key_add
.priority
= cls
->ntuple_filter
.priority
;
432 rule
->u
.key
.key_add
.field_value
[PROTO_FIELD_IPV4
].mask_range
.u8
=
433 cls
->ntuple_filter
.proto_mask
;
434 rule
->u
.key
.key_add
.field_value
[PROTO_FIELD_IPV4
].value
.u8
=
435 cls
->ntuple_filter
.proto
;
436 rule
->rules
.u
.ipv4_5tuple
.proto
= cls
->ntuple_filter
.proto
;
437 rule
->rules
.u
.ipv4_5tuple
.proto_mask
= cls
->ntuple_filter
.proto_mask
;
439 rule
->u
.key
.key_add
.field_value
[SRC_FIELD_IPV4
].mask_range
.u32
=
440 cls
->ntuple_filter
.src_ip_mask
;
441 rule
->u
.key
.key_add
.field_value
[SRC_FIELD_IPV4
].value
.u32
=
442 cls
->ntuple_filter
.src_ip
;
443 rule
->rules
.u
.ipv4_5tuple
.src_ip_mask
= cls
->ntuple_filter
.src_ip_mask
;
444 rule
->rules
.u
.ipv4_5tuple
.src_ip
= cls
->ntuple_filter
.src_ip
;
446 rule
->u
.key
.key_add
.field_value
[DST_FIELD_IPV4
].mask_range
.u32
=
447 cls
->ntuple_filter
.dst_ip_mask
;
448 rule
->u
.key
.key_add
.field_value
[DST_FIELD_IPV4
].value
.u32
=
449 cls
->ntuple_filter
.dst_ip
;
450 rule
->rules
.u
.ipv4_5tuple
.dst_ip_mask
= cls
->ntuple_filter
.dst_ip_mask
;
451 rule
->rules
.u
.ipv4_5tuple
.dst_ip
= cls
->ntuple_filter
.dst_ip
;
453 rule
->u
.key
.key_add
.field_value
[SRCP_FIELD_IPV4
].mask_range
.u16
=
454 cls
->ntuple_filter
.src_port_mask
;
455 rule
->u
.key
.key_add
.field_value
[SRCP_FIELD_IPV4
].value
.u16
=
456 cls
->ntuple_filter
.src_port
;
457 rule
->rules
.u
.ipv4_5tuple
.src_port_mask
=
458 cls
->ntuple_filter
.src_port_mask
;
459 rule
->rules
.u
.ipv4_5tuple
.src_port
= cls
->ntuple_filter
.src_port
;
461 rule
->u
.key
.key_add
.field_value
[DSTP_FIELD_IPV4
].mask_range
.u16
=
462 cls
->ntuple_filter
.dst_port_mask
;
463 rule
->u
.key
.key_add
.field_value
[DSTP_FIELD_IPV4
].value
.u16
=
464 cls
->ntuple_filter
.dst_port
;
465 rule
->rules
.u
.ipv4_5tuple
.dst_port_mask
=
466 cls
->ntuple_filter
.dst_port_mask
;
467 rule
->rules
.u
.ipv4_5tuple
.dst_port
= cls
->ntuple_filter
.dst_port
;
469 log_level
= rte_log_get_level(librte_flow_classify_logtype
);
471 if (log_level
== RTE_LOG_DEBUG
)
472 print_acl_ipv4_key_add(&rule
->u
.key
.key_add
);
474 /* key delete values */
475 memcpy(&rule
->u
.key
.key_del
.field_value
[PROTO_FIELD_IPV4
],
476 &rule
->u
.key
.key_add
.field_value
[PROTO_FIELD_IPV4
],
477 NUM_FIELDS_IPV4
* sizeof(struct rte_acl_field
));
479 if (log_level
== RTE_LOG_DEBUG
)
480 print_acl_ipv4_key_delete(&rule
->u
.key
.key_del
);
485 struct rte_flow_classify_rule
* __rte_experimental
486 rte_flow_classify_table_entry_add(struct rte_flow_classifier
*cls
,
487 const struct rte_flow_attr
*attr
,
488 const struct rte_flow_item pattern
[],
489 const struct rte_flow_action actions
[],
491 struct rte_flow_error
*error
)
493 struct rte_flow_classify_rule
*rule
;
494 struct rte_flow_classify_table_entry
*table_entry
;
495 struct classify_action
*action
;
502 if (key_found
== NULL
) {
503 rte_flow_error_set(error
, EINVAL
,
504 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
505 NULL
, "NULL key_found.");
509 /* parse attr, pattern and actions */
510 ret
= rte_flow_classify_validate(cls
, attr
, pattern
, actions
, error
);
514 switch (table_type
) {
515 case RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE
:
516 rule
= allocate_acl_ipv4_5tuple_rule(cls
);
519 rule
->tbl_type
= table_type
;
520 cls
->table_mask
|= table_type
;
526 action
= classify_get_flow_action();
527 table_entry
= &rule
->entry
;
528 table_entry
->rule_id
= rule
->id
;
529 table_entry
->action
.action_mask
= action
->action_mask
;
532 if (action
->action_mask
& (1LLU << RTE_FLOW_ACTION_TYPE_COUNT
)) {
533 memcpy(&table_entry
->action
.act
.counter
, &action
->act
.counter
,
534 sizeof(table_entry
->action
.act
.counter
));
536 if (action
->action_mask
& (1LLU << RTE_FLOW_ACTION_TYPE_MARK
)) {
537 memcpy(&table_entry
->action
.act
.mark
, &action
->act
.mark
,
538 sizeof(table_entry
->action
.act
.mark
));
541 for (i
= 0; i
< cls
->num_tables
; i
++) {
542 struct rte_cls_table
*table
= &cls
->tables
[i
];
544 if (table
->type
== table_type
) {
545 if (table
->ops
.f_add
!= NULL
) {
546 ret
= table
->ops
.f_add(
548 &rule
->u
.key
.key_add
,
557 *key_found
= rule
->key_found
;
567 int __rte_experimental
568 rte_flow_classify_table_entry_delete(struct rte_flow_classifier
*cls
,
569 struct rte_flow_classify_rule
*rule
)
576 enum rte_flow_classify_table_type tbl_type
= rule
->tbl_type
;
578 for (i
= 0; i
< cls
->num_tables
; i
++) {
579 struct rte_cls_table
*table
= &cls
->tables
[i
];
581 if (table
->type
== tbl_type
) {
582 if (table
->ops
.f_delete
!= NULL
) {
583 ret
= table
->ops
.f_delete(table
->h_table
,
584 &rule
->u
.key
.key_del
,
597 flow_classifier_lookup(struct rte_flow_classifier
*cls
,
598 struct rte_cls_table
*table
,
599 struct rte_mbuf
**pkts
,
600 const uint16_t nb_pkts
)
604 uint64_t lookup_hit_mask
;
606 pkts_mask
= RTE_LEN2MASK(nb_pkts
, uint64_t);
607 ret
= table
->ops
.f_lookup(table
->h_table
,
608 pkts
, pkts_mask
, &lookup_hit_mask
,
609 (void **)cls
->entries
);
611 if (!ret
&& lookup_hit_mask
)
612 cls
->nb_pkts
= nb_pkts
;
620 action_apply(struct rte_flow_classifier
*cls
,
621 struct rte_flow_classify_rule
*rule
,
622 struct rte_flow_classify_stats
*stats
)
624 struct rte_flow_classify_ipv4_5tuple_stats
*ntuple_stats
;
625 struct rte_flow_classify_table_entry
*entry
= &rule
->entry
;
627 uint32_t action_mask
= entry
->action
.action_mask
;
628 int i
, ret
= -EINVAL
;
630 if (action_mask
& (1LLU << RTE_FLOW_ACTION_TYPE_COUNT
)) {
631 for (i
= 0; i
< cls
->nb_pkts
; i
++) {
632 if (rule
->id
== cls
->entries
[i
]->rule_id
)
637 ntuple_stats
= stats
->stats
;
638 ntuple_stats
->counter1
= count
;
639 ntuple_stats
->ipv4_5tuple
= rule
->rules
.u
.ipv4_5tuple
;
645 int __rte_experimental
646 rte_flow_classifier_query(struct rte_flow_classifier
*cls
,
647 struct rte_mbuf
**pkts
,
648 const uint16_t nb_pkts
,
649 struct rte_flow_classify_rule
*rule
,
650 struct rte_flow_classify_stats
*stats
)
652 enum rte_flow_classify_table_type tbl_type
;
656 if (!cls
|| !rule
|| !stats
|| !pkts
|| nb_pkts
== 0)
659 tbl_type
= rule
->tbl_type
;
660 for (i
= 0; i
< cls
->num_tables
; i
++) {
661 struct rte_cls_table
*table
= &cls
->tables
[i
];
663 if (table
->type
== tbl_type
) {
664 ret
= flow_classifier_lookup(cls
, table
,
667 ret
= action_apply(cls
, rule
, stats
);
675 RTE_INIT(librte_flow_classify_init_log
)
677 librte_flow_classify_logtype
=
678 rte_log_register("lib.flow_classify");
679 if (librte_flow_classify_logtype
>= 0)
680 rte_log_set_level(librte_flow_classify_logtype
, RTE_LOG_INFO
);